diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000..45232b80ed8cd --- /dev/null +++ b/.clang-format @@ -0,0 +1,161 @@ +--- +Language: Cpp +AlignAfterOpenBracket: Align +AlignArrayOfStructures: Left +AlignConsecutiveAssignments: AcrossComments +AlignConsecutiveBitFields: AcrossComments +AlignConsecutiveDeclarations: AcrossComments +AlignConsecutiveMacros: AcrossComments +# AlignConsecutiveShortCaseStatements: AcrossComments +AlignEscapedNewlines: Left # LeftWithLastLine +AlignOperands: Align +AlignTrailingComments: + Kind: Always + OverEmptyLines: 1 +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: false +# AllowBreakBeforeNoexceptSpecifier: OnlyWithParen +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Inline +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: Inline +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: true +BinPackArguments: true +BinPackParameters: true # OnePerLine +BitFieldColonSpacing: Both +BreakBeforeBraces: Custom # Attach +BraceWrapping: + AfterCaseLabel: true + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: false + SplitEmptyRecord: false + SplitEmptyNamespace: false +# BreakAdjacentStringLiterals: true +BreakAfterAttributes: Never +BreakBeforeBinaryOperators: None +BreakBeforeInlineASMColon: OnlyMultiline +BreakBeforeTernaryOperators: false +# BreakBinaryOperations: Never +BreakConstructorInitializers: AfterColon +# BreakFunctionDefinitionParameters: false +BreakInheritanceList: AfterComma +BreakStringLiterals: true +# BreakTemplateDeclarations: Yes +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: false +DerivePointerAlignment: false +DisableFormat: false +EmptyLineBeforeAccessModifier: Leave +EmptyLineAfterAccessModifier: Never +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '^<.*\.h>' + Priority: 1 + SortPriority: 0 + - Regex: '^<.*' + Priority: 2 + SortPriority: 0 + - Regex: '.*' + Priority: 3 + SortPriority: 0 +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentAccessModifiers: false +IndentCaseBlocks: true +IndentCaseLabels: true +IndentExternBlock: NoIndent +IndentGotoLabels: false +IndentPPDirectives: AfterHash +IndentWidth: 4 +IndentWrappedFunctionNames: false +InsertBraces: true # NOTE: may lead to incorrect formatting +InsertNewlineAtEOF: true +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +LambdaBodyIndentation: Signature +LineEnding: LF +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 4 +ObjCSpaceAfterProperty: true +ObjCSpaceBeforeProtocolList: true +PPIndentWidth: -1 +PackConstructorInitializers: CurrentLine +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PointerAlignment: Middle +QualifierAlignment: Left +#QualifierOrder: ['static', 'inline', 'friend', 'constexpr', 'const', 'volatile', 'type', 'restrict'] +RawStringFormats: + - Language: Cpp + Delimiters: + - cc + - CC + - cpp + - Cpp + - CPP + - 'c++' + - 'C++' + CanonicalDelimiter: '' +ReferenceAlignment: Middle +ReflowComments: false # IndentOnly +SeparateDefinitionBlocks: Always +SortIncludes: CaseInsensitive +SortUsingDeclarations: LexicographicNumeric +SpaceAfterCStyleCast: true +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: Never +SpacesInContainerLiterals: true +SpacesInLineCommentPrefix: + Minimum: 1 + Maximum: -1 +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: c++17 +TabWidth: 4 +UseTab: Never +WhitespaceSensitiveMacros: ['STRINGIZE'] +... + diff --git a/.clang-tidy b/.clang-tidy index 952c0cca82580..310c3d182c8f2 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -17,8 +17,10 @@ Checks: > -clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling, performance-*, portability-*, + -portability-simd-intrinsics, misc-*, -misc-const-correctness, -misc-non-private-member-variables-in-classes, -misc-no-recursion, + -misc-use-anonymous-namespace, FormatStyle: none diff --git a/.devops/full-musa.Dockerfile b/.devops/full-musa.Dockerfile index 575e81b486d13..3193fea1e9ae5 100644 --- a/.devops/full-musa.Dockerfile +++ b/.devops/full-musa.Dockerfile @@ -6,6 +6,9 @@ ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_V FROM ${BASE_MUSA_DEV_CONTAINER} AS build +# MUSA architecture to build for (defaults to all supported archs) +ARG MUSA_DOCKER_ARCH=default + RUN apt-get update && \ apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1 @@ -19,7 +22,11 @@ WORKDIR /app COPY . . -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ +# Use the default MUSA archs if not specified +RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ + export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ + fi && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release -j$(nproc) && \ cp build/bin/* . diff --git a/.devops/full.Dockerfile b/.devops/full.Dockerfile index 2a06f82b738ae..d93c0be6a70c0 100644 --- a/.devops/full.Dockerfile +++ b/.devops/full.Dockerfile @@ -3,23 +3,36 @@ ARG UBUNTU_VERSION=22.04 FROM ubuntu:$UBUNTU_VERSION AS build RUN apt-get update && \ - apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1 + apt-get install -y build-essential git cmake libcurl4-openssl-dev + +WORKDIR /app + +COPY . . -COPY requirements.txt requirements.txt -COPY requirements requirements +RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ + cmake --build build -j $(nproc) && \ + mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib/ \; -RUN pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt +FROM ubuntu:$UBUNTU_VERSION as runtime WORKDIR /app -COPY . . +RUN apt-get update && \ + apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1 -ENV LLAMA_CURL=1 +COPY requirements.txt /app/requirements.txt +COPY requirements /app/requirements +COPY .devops/tools.sh /app/tools.sh +RUN pip install --upgrade pip setuptools wheel && \ + pip install -r /app/requirements.txt -RUN make -j$(nproc) +COPY --from=build /app/build/bin/ /app/ +COPY --from=build /app/lib/ /app/ +COPY --from=build /app/convert_hf_to_gguf.py /app/ +COPY --from=build /app/gguf-py /app/gguf-py ENV LC_ALL=C.utf8 -ENTRYPOINT ["/app/.devops/tools.sh"] +ENTRYPOINT ["/app/tools.sh"] diff --git a/.devops/llama-cli-musa.Dockerfile b/.devops/llama-cli-musa.Dockerfile index 3372749bee70e..e7c75af20e265 100644 --- a/.devops/llama-cli-musa.Dockerfile +++ b/.devops/llama-cli-musa.Dockerfile @@ -8,6 +8,9 @@ ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU FROM ${BASE_MUSA_DEV_CONTAINER} AS build +# MUSA architecture to build for (defaults to all supported archs) +ARG MUSA_DOCKER_ARCH=default + RUN apt-get update && \ apt-get install -y build-essential git cmake @@ -15,7 +18,11 @@ WORKDIR /app COPY . . -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ +# Use the default MUSA archs if not specified +RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ + export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ + fi && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release --target llama-cli -j$(nproc) && \ mkdir -p /app/lib && \ find build -name "*.so" -exec cp {} /app/lib \; diff --git a/.devops/llama-cli.Dockerfile b/.devops/llama-cli.Dockerfile index 7f741aa46ecf0..be234d55dce5c 100644 --- a/.devops/llama-cli.Dockerfile +++ b/.devops/llama-cli.Dockerfile @@ -3,21 +3,27 @@ ARG UBUNTU_VERSION=22.04 FROM ubuntu:$UBUNTU_VERSION AS build RUN apt-get update && \ - apt-get install -y build-essential git + apt-get install -y build-essential git cmake libcurl4-openssl-dev WORKDIR /app COPY . . -RUN make -j$(nproc) llama-cli +RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ + cmake --build build -j $(nproc) && \ + mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib/ \; FROM ubuntu:$UBUNTU_VERSION AS runtime +WORKDIR /app + RUN apt-get update && \ - apt-get install -y libgomp1 + apt-get install -y libcurl4-openssl-dev libgomp1 curl -COPY --from=build /app/llama-cli /llama-cli +COPY --from=build /app/build/bin/llama-cli /app/ +COPY --from=build /app/lib/ /app/ ENV LC_ALL=C.utf8 -ENTRYPOINT [ "/llama-cli" ] +ENTRYPOINT [ "/app/llama-cli" ] diff --git a/.devops/llama-server-musa.Dockerfile b/.devops/llama-server-musa.Dockerfile index eb67201c18775..cebe51d42fa95 100644 --- a/.devops/llama-server-musa.Dockerfile +++ b/.devops/llama-server-musa.Dockerfile @@ -8,6 +8,9 @@ ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU FROM ${BASE_MUSA_DEV_CONTAINER} AS build +# MUSA architecture to build for (defaults to all supported archs) +ARG MUSA_DOCKER_ARCH=default + RUN apt-get update && \ apt-get install -y build-essential git cmake libcurl4-openssl-dev @@ -15,7 +18,11 @@ WORKDIR /app COPY . . -RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ +# Use the default MUSA archs if not specified +RUN if [ "${MUSA_DOCKER_ARCH}" != "default" ]; then \ + export CMAKE_ARGS="-DMUSA_ARCHITECTURES=${MUSA_DOCKER_ARCH}"; \ + fi && \ + cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \ cmake --build build --config Release --target llama-server -j$(nproc) && \ mkdir -p /app/lib && \ find build -name "*.so" -exec cp {} /app/lib \; diff --git a/.devops/llama-server.Dockerfile b/.devops/llama-server.Dockerfile index 02accc85e1368..72ccde2feaeb9 100644 --- a/.devops/llama-server.Dockerfile +++ b/.devops/llama-server.Dockerfile @@ -3,22 +3,26 @@ ARG UBUNTU_VERSION=22.04 FROM ubuntu:$UBUNTU_VERSION AS build RUN apt-get update && \ - apt-get install -y build-essential git libcurl4-openssl-dev + apt-get install -y build-essential git cmake libcurl4-openssl-dev WORKDIR /app COPY . . -ENV LLAMA_CURL=1 - -RUN make -j$(nproc) llama-server +RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ + cmake --build build -j $(nproc) && \ + mkdir -p /app/lib && \ + find build -name "*.so" -exec cp {} /app/lib/ \; FROM ubuntu:$UBUNTU_VERSION AS runtime +WORKDIR /app + RUN apt-get update && \ apt-get install -y libcurl4-openssl-dev libgomp1 curl -COPY --from=build /app/llama-server /llama-server +COPY --from=build /app/build/bin/llama-server /app/ +COPY --from=build /app/lib/ /app/ ENV LC_ALL=C.utf8 # Must be set to 0.0.0.0 so it can listen to requests from host machine @@ -26,4 +30,4 @@ ENV LLAMA_ARG_HOST=0.0.0.0 HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ] -ENTRYPOINT [ "/llama-server" ] +ENTRYPOINT [ "/app/llama-server" ] diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index b88e6ca809f1f..043c4364b956a 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -31,6 +31,7 @@ # Increases the runtime closure size by ~700M useMpi ? false, useRocm ? config.rocmSupport, + rocmGpuTargets ? builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets, enableCurl ? true, useVulkan ? false, llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake @@ -188,7 +189,7 @@ effectiveStdenv.mkDerivation (finalAttrs: { ] ++ optionals useRocm [ (cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.llvm.clang}/bin/clang") - (cmakeFeature "CMAKE_HIP_ARCHITECTURES" (builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets)) + (cmakeFeature "CMAKE_HIP_ARCHITECTURES" rocmGpuTargets) ] ++ optionals useMetalKit [ (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") diff --git a/.devops/nix/python-scripts.nix b/.devops/nix/python-scripts.nix index 392e9ffe41bf5..56ea182788764 100644 --- a/.devops/nix/python-scripts.nix +++ b/.devops/nix/python-scripts.nix @@ -34,7 +34,7 @@ let # server tests openai - behave + pytest prometheus-client ]; in diff --git a/.devops/tools.sh b/.devops/tools.sh index 24dcfd35079cb..9a86e6ea0185d 100755 --- a/.devops/tools.sh +++ b/.devops/tools.sh @@ -8,11 +8,11 @@ arg1="$1" shift if [[ "$arg1" == '--convert' || "$arg1" == '-c' ]]; then - python3 ./convert_hf_to_gguf.py "$@" + exec python3 ./convert_hf_to_gguf.py "$@" elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then - ./llama-quantize "$@" + exec ./llama-quantize "$@" elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then - ./llama-cli "$@" + exec ./llama-cli "$@" elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then echo "Converting PTH to GGML..." for i in `ls $1/$2/ggml-model-f16.bin*`; do @@ -20,11 +20,11 @@ elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then echo "Skip model quantization, it already exists: ${i/f16/q4_0}" else echo "Converting PTH to GGML: $i into ${i/f16/q4_0}..." - ./llama-quantize "$i" "${i/f16/q4_0}" q4_0 + exec ./llama-quantize "$i" "${i/f16/q4_0}" q4_0 fi done elif [[ "$arg1" == '--server' || "$arg1" == '-s' ]]; then - ./llama-server "$@" + exec ./llama-server "$@" else echo "Unknown command: $arg1" echo "Available commands: " diff --git a/.github/ISSUE_TEMPLATE/01-bug-low.yml b/.github/ISSUE_TEMPLATE/01-bug-low.yml deleted file mode 100644 index 54785854f776e..0000000000000 --- a/.github/ISSUE_TEMPLATE/01-bug-low.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Low Severity Bugs -description: Used to report low severity bugs in llama.cpp (e.g. cosmetic issues, non critical UI glitches) -title: "Bug: " -labels: ["bug-unconfirmed", "low severity"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If possible, please provide a minimal code example that reproduces the bug. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: Also tell us, what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/010-bug-compilation.yml b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml new file mode 100644 index 0000000000000..f10b3a2b22648 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml @@ -0,0 +1,77 @@ +name: Bug (compilation) +description: Something goes wrong when trying to compile llama.cpp. +title: "Compile bug: " +labels: ["bug-unconfirmed", "compilation"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this bug report! + This issue template is intended for bug reports where the compilation of llama.cpp fails. + Before opening an issue, please confirm that the compilation still fails with `-DGGML_CCACHE=OFF`. + If the compilation succeeds with ccache disabled you should be able to permanently fix the issue + by clearing `~/.cache/ccache` (on Linux). + - type: textarea + id: commit + attributes: + label: Git commit + description: Which commit are you trying to compile? + placeholder: | + $git rev-parse HEAD + 84a07a17b1b08cf2b9747c633a2372782848a27f + validations: + required: true + - type: dropdown + id: operating-system + attributes: + label: Operating systems + description: Which operating systems do you know to be affected? + multiple: true + options: + - Linux + - Mac + - Windows + - BSD + - Other? (Please let us know in description) + validations: + required: true + - type: dropdown + id: backends + attributes: + label: GGML backends + description: Which GGML backends do you know to be affected? + options: [AMX, BLAS, CPU, CUDA, HIP, Kompute, Metal, Musa, RPC, SYCL, Vulkan] + multiple: true + validations: + required: true + - type: textarea + id: info + attributes: + label: Problem description & steps to reproduce + description: > + Please give us a summary of the problem and tell us how to reproduce it. + If you can narrow down the bug to specific compile flags, that information would be very much appreciated by us. + placeholder: > + I'm trying to compile llama.cpp with CUDA support on a fresh install of Ubuntu and get error XY. + Here are the exact commands that I used: ... + validations: + required: true + - type: textarea + id: first_bad_commit + attributes: + label: First Bad Commit + description: > + If the bug was not present on an earlier version: when did it start appearing? + If possible, please do a git bisect and identify the exact commit that introduced the bug. + validations: + required: false + - type: textarea + id: logs + attributes: + label: Relevant log output + description: > + Please copy and paste any relevant log output, including the command that you entered and any generated text. + This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/011-bug-results.yml b/.github/ISSUE_TEMPLATE/011-bug-results.yml new file mode 100644 index 0000000000000..1ccef0793d45e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/011-bug-results.yml @@ -0,0 +1,101 @@ +name: Bug (model use) +description: Something goes wrong when using a model (in general, not specific to a single llama.cpp module). +title: "Eval bug: " +labels: ["bug-unconfirmed", "model evaluation"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this bug report! + This issue template is intended for bug reports where the model evaluation results + (i.e. the generated text) are incorrect or llama.cpp crashes during model evaluation. + If you encountered the issue while using an external UI (e.g. ollama), + please reproduce your issue using one of the examples/binaries in this repository. + The `llama-cli` binary can be used for simple and reproducible model inference. + - type: textarea + id: version + attributes: + label: Name and Version + description: Which version of our software are you running? (use `--version` to get a version string) + placeholder: | + $./llama-cli --version + version: 2999 (42b4109e) + built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu + validations: + required: true + - type: dropdown + id: operating-system + attributes: + label: Operating systems + description: Which operating systems do you know to be affected? + multiple: true + options: + - Linux + - Mac + - Windows + - BSD + - Other? (Please let us know in description) + validations: + required: true + - type: dropdown + id: backends + attributes: + label: GGML backends + description: Which GGML backends do you know to be affected? + options: [AMX, BLAS, CPU, CUDA, HIP, Kompute, Metal, Musa, RPC, SYCL, Vulkan] + multiple: true + validations: + required: true + - type: textarea + id: hardware + attributes: + label: Hardware + description: Which CPUs/GPUs are you using? + placeholder: > + e.g. Ryzen 5950X + 2x RTX 4090 + validations: + required: true + - type: textarea + id: model + attributes: + label: Models + description: > + Which model(s) at which quantization were you using when encountering the bug? + If you downloaded a GGUF file off of Huggingface, please provide a link. + placeholder: > + e.g. Meta LLaMA 3.1 Instruct 8b q4_K_M + validations: + required: false + - type: textarea + id: info + attributes: + label: Problem description & steps to reproduce + description: > + Please give us a summary of the problem and tell us how to reproduce it. + If you can narrow down the bug to specific hardware, compile flags, or command line arguments, + that information would be very much appreciated by us. + placeholder: > + e.g. when I run llama-cli with -ngl 99 I get garbled outputs. + When I use -ngl 0 it works correctly. + Here are the exact commands that I used: ... + validations: + required: true + - type: textarea + id: first_bad_commit + attributes: + label: First Bad Commit + description: > + If the bug was not present on an earlier version: when did it start appearing? + If possible, please do a git bisect and identify the exact commit that introduced the bug. + validations: + required: false + - type: textarea + id: logs + attributes: + label: Relevant log output + description: > + Please copy and paste any relevant log output, including the command that you entered and any generated text. + This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/019-bug-misc.yml b/.github/ISSUE_TEMPLATE/019-bug-misc.yml new file mode 100644 index 0000000000000..d157ea307e50f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/019-bug-misc.yml @@ -0,0 +1,81 @@ +name: Bug (misc.) +description: Something is not working the way it should (and it's not covered by any of the above cases). +title: "Misc. bug: " +labels: ["bug-unconfirmed"] +body: + - type: markdown + attributes: + value: > + Thanks for taking the time to fill out this bug report! + This issue template is intended for miscellaneous bugs that don't fit into any other category. + If you encountered the issue while using an external UI (e.g. ollama), + please reproduce your issue using one of the examples/binaries in this repository. + - type: textarea + id: version + attributes: + label: Name and Version + description: Which version of our software is affected? (You can use `--version` to get a version string.) + placeholder: | + $./llama-cli --version + version: 2999 (42b4109e) + built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu + validations: + required: true + - type: dropdown + id: operating-system + attributes: + label: Operating systems + description: Which operating systems do you know to be affected? + multiple: true + options: + - Linux + - Mac + - Windows + - BSD + - Other? (Please let us know in description) + validations: + required: false + - type: dropdown + id: module + attributes: + label: Which llama.cpp modules do you know to be affected? + multiple: true + options: + - Documentation/Github + - libllama (core library) + - llama-cli + - llama-server + - llama-bench + - llama-quantize + - Python/Bash scripts + - Test code + - Other (Please specify in the next section) + validations: + required: false + - type: textarea + id: info + attributes: + label: Problem description & steps to reproduce + description: > + Please give us a summary of the problem and tell us how to reproduce it (if applicable). + validations: + required: true + - type: textarea + id: first_bad_commit + attributes: + label: First Bad Commit + description: > + If the bug was not present on an earlier version and it's not trivial to track down: when did it start appearing? + If possible, please do a git bisect and identify the exact commit that introduced the bug. + validations: + required: false + - type: textarea + id: logs + attributes: + label: Relevant log output + description: > + If applicable, please copy and paste any relevant log output, including the command that you entered and any generated text. + This will be automatically formatted into code, so no need for backticks. + render: shell + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/02-bug-medium.yml b/.github/ISSUE_TEMPLATE/02-bug-medium.yml deleted file mode 100644 index a6285c6f05bac..0000000000000 --- a/.github/ISSUE_TEMPLATE/02-bug-medium.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Medium Severity Bug -description: Used to report medium severity bugs in llama.cpp (e.g. Malfunctioning Features but generally still useable) -title: "Bug: " -labels: ["bug-unconfirmed", "medium severity"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If possible, please provide a minimal code example that reproduces the bug. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: Also tell us, what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/05-enhancement.yml b/.github/ISSUE_TEMPLATE/020-enhancement.yml similarity index 97% rename from .github/ISSUE_TEMPLATE/05-enhancement.yml rename to .github/ISSUE_TEMPLATE/020-enhancement.yml index 58fca73183d41..02dd4f575a686 100644 --- a/.github/ISSUE_TEMPLATE/05-enhancement.yml +++ b/.github/ISSUE_TEMPLATE/020-enhancement.yml @@ -1,5 +1,5 @@ name: Enhancement -description: Used to request enhancements for llama.cpp +description: Used to request enhancements for llama.cpp. title: "Feature Request: " labels: ["enhancement"] body: diff --git a/.github/ISSUE_TEMPLATE/03-bug-high.yml b/.github/ISSUE_TEMPLATE/03-bug-high.yml deleted file mode 100644 index ff816b93769c3..0000000000000 --- a/.github/ISSUE_TEMPLATE/03-bug-high.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: High Severity Bug -description: Used to report high severity bugs in llama.cpp (e.g. Malfunctioning features hindering important common workflow) -title: "Bug: " -labels: ["bug-unconfirmed", "high severity"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If possible, please provide a minimal code example that reproduces the bug. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: Also tell us, what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/06-research.yml b/.github/ISSUE_TEMPLATE/030-research.yml similarity index 97% rename from .github/ISSUE_TEMPLATE/06-research.yml rename to .github/ISSUE_TEMPLATE/030-research.yml index 3ae4e9f8caaa4..18975dbbfd0fe 100644 --- a/.github/ISSUE_TEMPLATE/06-research.yml +++ b/.github/ISSUE_TEMPLATE/030-research.yml @@ -1,5 +1,5 @@ name: Research -description: Track new technical research area +description: Track new technical research area. title: "Research: " labels: ["research šŸ”¬"] body: diff --git a/.github/ISSUE_TEMPLATE/04-bug-critical.yml b/.github/ISSUE_TEMPLATE/04-bug-critical.yml deleted file mode 100644 index 7af42a80b3b93..0000000000000 --- a/.github/ISSUE_TEMPLATE/04-bug-critical.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Critical Severity Bug -description: Used to report critical severity bugs in llama.cpp (e.g. Crashing, Corrupted, Dataloss) -title: "Bug: " -labels: ["bug-unconfirmed", "critical severity"] -body: - - type: markdown - attributes: - value: | - Thanks for taking the time to fill out this bug report! - Please include information about your system, the steps to reproduce the bug, - and the version of llama.cpp that you are using. - If possible, please provide a minimal code example that reproduces the bug. - - type: textarea - id: what-happened - attributes: - label: What happened? - description: Also tell us, what did you expect to happen? - placeholder: Tell us what you see! - validations: - required: true - - type: textarea - id: version - attributes: - label: Name and Version - description: Which executable and which version of our software are you running? (use `--version` to get a version string) - placeholder: | - $./llama-cli --version - version: 2999 (42b4109e) - built with cc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0 for x86_64-linux-gnu - validations: - required: true - - type: dropdown - id: operating-system - attributes: - label: What operating system are you seeing the problem on? - multiple: true - options: - - Linux - - Mac - - Windows - - BSD - - Other? (Please let us know in description) - validations: - required: false - - type: textarea - id: logs - attributes: - label: Relevant log output - description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. - render: shell diff --git a/.github/ISSUE_TEMPLATE/07-refactor.yml b/.github/ISSUE_TEMPLATE/040-refactor.yml similarity index 95% rename from .github/ISSUE_TEMPLATE/07-refactor.yml rename to .github/ISSUE_TEMPLATE/040-refactor.yml index 3a68d3d5355d6..b6e6ab36defd6 100644 --- a/.github/ISSUE_TEMPLATE/07-refactor.yml +++ b/.github/ISSUE_TEMPLATE/040-refactor.yml @@ -1,5 +1,5 @@ name: Refactor (Maintainers) -description: Used to track refactoring opportunities +description: Used to track refactoring opportunities. title: "Refactor: " labels: ["refactor"] body: diff --git a/.github/labeler.yml b/.github/labeler.yml index 89436740d1ffb..1b47bc96885c4 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -3,19 +3,18 @@ Kompute: - changed-files: - any-glob-to-any-file: - ggml/include/ggml-kompute.h - - ggml/src/ggml-kompute.cpp + - ggml/src/ggml-kompute/** - README-kompute.md Apple Metal: - changed-files: - any-glob-to-any-file: - ggml/include/ggml-metal.h - - ggml/src/ggml-metal.cpp + - ggml/src/ggml-metal/** - README-metal.md SYCL: - changed-files: - any-glob-to-any-file: - ggml/include/ggml-sycl.h - - ggml/src/ggml-sycl.cpp - ggml/src/ggml-sycl/** - docs/backend/SYCL.md - examples/sycl/** @@ -27,8 +26,8 @@ Nvidia GPU: Vulkan: - changed-files: - any-glob-to-any-file: - - ggml/ggml_vk_generate_shaders.py - - ggml/src/ggml-vulkan* + - ggml/include/ggml-vulkan.h + - ggml/src/ggml-vulkan/** documentation: - changed-files: - any-glob-to-any-file: @@ -75,11 +74,7 @@ server: ggml: - changed-files: - any-glob-to-any-file: - - ggml/include/ggml*.h - - ggml/src/ggml*.c - - ggml/src/ggml*.cpp - - ggml/src/ggml*.h - - ggml-cuda/** + - ggml/** nix: - changed-files: - any-glob-to-any-file: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 997c6d9d05397..d9f5bdc235a00 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,7 +1 @@ - - -- [x] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md) -- Self-reported review complexity: - - [ ] Low - - [ ] Medium - - [ ] High +*Make sure to read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md) before submitting a PR* diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6ef0770f3fa2f..a377eff38fbe0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -160,66 +160,6 @@ jobs: path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip name: llama-bin-macos-x64.zip - ubuntu-focal-make: - runs-on: ubuntu-20.04 - env: - LLAMA_NODE_AVAILABLE: true - LLAMA_PYTHON_AVAILABLE: true - - steps: - - name: Clone - id: checkout - uses: actions/checkout@v4 - - - name: Dependencies - id: depends - run: | - sudo apt-get update - sudo apt-get install build-essential gcc-8 - - - uses: actions/setup-node@v4 - with: - node-version: "20" - - - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Build - id: make_build - env: - LLAMA_FATAL_WARNINGS: 1 - run: | - CC=gcc-8 make -j $(nproc) - - - name: Test - id: make_test - run: | - CC=gcc-8 make tests -j $(nproc) - make test -j $(nproc) - - ubuntu-focal-make-curl: - runs-on: ubuntu-20.04 - - steps: - - name: Clone - id: checkout - uses: actions/checkout@v4 - - - name: Dependencies - id: depends - run: | - sudo apt-get update - sudo apt-get install build-essential gcc-8 libcurl4-openssl-dev - - - name: Build - id: make_build - env: - LLAMA_FATAL_WARNINGS: 1 - LLAMA_CURL: 1 - run: | - CC=gcc-8 make -j $(nproc) - ubuntu-latest-cmake: runs-on: ubuntu-latest @@ -377,7 +317,7 @@ jobs: wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo apt-key add - sudo wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list sudo apt-get update -y - sudo apt-get install -y build-essential vulkan-sdk + sudo apt-get install -y build-essential mesa-vulkan-drivers vulkan-sdk - name: Build id: cmake_build @@ -387,6 +327,12 @@ jobs: cmake -DGGML_VULKAN=ON .. cmake --build . --config Release -j $(nproc) + - name: Test + id: cmake_test + run: | + cd build + ctest -L main --verbose --timeout 900 + ubuntu-22-cmake-hip: runs-on: ubuntu-22.04 container: rocm/dev-ubuntu-22.04:6.0.2 @@ -517,36 +463,6 @@ jobs: cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON .. cmake --build . --config Release -j $(nproc) - # TODO: build with GGML_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know - # how to debug it. - # ref: https://github.com/ggerganov/llama.cpp/actions/runs/7131777249/job/19420981052#step:5:1124 - macOS-latest-make: - runs-on: macos-latest - - steps: - - name: Clone - id: checkout - uses: actions/checkout@v4 - - - name: Dependencies - id: depends - continue-on-error: true - run: | - brew update - - - name: Build - id: make_build - env: - LLAMA_FATAL_WARNINGS: 1 - run: | - GGML_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu) - - - name: Test - id: make_test - run: | - GGML_NO_METAL=1 make tests -j $(sysctl -n hw.logicalcpu) - GGML_NO_METAL=1 make test -j $(sysctl -n hw.logicalcpu) - # TODO: build with GGML_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know # how to debug it. # ref: https://github.com/ggerganov/llama.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584 @@ -660,15 +576,26 @@ jobs: run: | brew update - - name: xcodebuild for swift package - id: xcodebuild + - name: Build llama.cpp with CMake + id: cmake_build run: | - xcodebuild -scheme llama -destination "${{ matrix.destination }}" + sysctl -a + mkdir build + cd build + cmake -G Xcode .. \ + -DGGML_METAL_USE_BF16=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON \ + -DLLAMA_BUILD_EXAMPLES=OFF \ + -DLLAMA_BUILD_TESTS=OFF \ + -DLLAMA_BUILD_SERVER=OFF \ + -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" + cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) + sudo cmake --install . --config Release - - name: Build Swift Example - id: make_build_swift_example + - name: xcodebuild for swift package + id: xcodebuild run: | - make swift + xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}" windows-msys2: runs-on: windows-latest @@ -695,21 +622,6 @@ jobs: mingw-w64-${{matrix.env}}-cmake mingw-w64-${{matrix.env}}-openblas - - name: Build using make - shell: msys2 {0} - run: | - make -j $(nproc) - - - name: Clean after building using make - shell: msys2 {0} - run: | - make clean - - - name: Build using make w/ OpenBLAS - shell: msys2 {0} - run: | - make GGML_OPENBLAS=1 -j $(nproc) - - name: Build using CMake shell: msys2 {0} run: | @@ -728,7 +640,7 @@ jobs: cmake --build build --config ${{ matrix.build }} -j $(nproc) windows-latest-cmake: - runs-on: windows-2019 + runs-on: windows-latest env: OPENBLAS_VERSION: 0.3.23 @@ -756,6 +668,8 @@ jobs: defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' - build: 'msvc-arm64' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + - build: 'llvm-arm64-opencl-adreno' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON' steps: - name: Clone @@ -797,6 +711,28 @@ jobs: run: | choco install ninja + - name: Install OpenCL Headers and Libs + id: install_opencl + if: ${{ matrix.build == 'llvm-arm64-opencl-adreno' }} + run: | + git clone https://github.com/KhronosGroup/OpenCL-Headers + cd OpenCL-Headers + mkdir build && cd build + cmake .. ` + -DBUILD_TESTING=OFF ` + -DOPENCL_HEADERS_BUILD_TESTING=OFF ` + -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF ` + -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" + cmake --build . --target install + git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader + cd OpenCL-ICD-Loader + mkdir build-arm64-release && cd build-arm64-release + cmake .. ` + -A arm64 ` + -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" ` + -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" + cmake --build . --target install --config release + - name: Build id: cmake_build run: | @@ -826,7 +762,7 @@ jobs: - name: Test id: cmake_test # not all machines have native AVX-512 - if: ${{ matrix.build != 'msvc-arm64' && matrix.build != 'llvm-arm64' && matrix.build != 'kompute-x64' && matrix.build != 'vulkan-x64' && (matrix.build != 'avx512-x64' || env.HAS_AVX512F == '1') }} + if: ${{ matrix.build != 'msvc-arm64' && matrix.build != 'llvm-arm64' && matrix.build != 'llvm-arm64-opencl-adreno' && matrix.build != 'kompute-x64' && matrix.build != 'vulkan-x64' && (matrix.build != 'avx512-x64' || env.HAS_AVX512F == '1') }} run: | cd build ctest -L main -C Release --verbose --timeout 900 @@ -871,12 +807,33 @@ jobs: path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip name: llama-bin-win-${{ matrix.build }}.zip - windows-latest-cmake-cuda: + ubuntu-latest-cmake-cuda: + runs-on: ubuntu-latest + container: nvidia/cuda:12.6.2-devel-ubuntu24.04 + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Install dependencies + env: + DEBIAN_FRONTEND: noninteractive + run: | + apt update + apt install -y cmake build-essential ninja-build libgomp1 git + + - name: Build with CMake + run: | + cmake -S . -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES=89-real -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined -DLLAMA_FATAL_WARNINGS=ON + cmake --build build + + windows-2019-cmake-cuda: runs-on: windows-2019 strategy: matrix: - cuda: ['12.2.0', '11.7.1'] + cuda: ['12.4', '11.7'] build: ['cuda'] steps: @@ -884,24 +841,83 @@ jobs: id: checkout uses: actions/checkout@v4 with: - fetch-depth: 0 - - - name: Install CUDA toolkit - id: cuda-toolkit - uses: Jimver/cuda-toolkit@v0.2.15 + fetch-depth: 0 + + - name: Install Cuda Toolkit 11.7 + if: ${{ matrix.cuda == '11.7' }} + run: | + mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" + choco install unzip -y + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-11.7.99-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-11.7.99-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-11.7.99-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-11.7.4.6-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-11.7.91-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-11.7.91-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-11.7.101-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-11.7.91-archive.zip" + unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cudart-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvcc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvrtc-windows-x86_64-11.7.99-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libcublas-windows-x86_64-11.7.4.6-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvtx-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\visual_studio_integration-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_nvprof-windows-x86_64-11.7.101-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\cuda_cccl-windows-x86_64-11.7.91-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" /E /I /H /Y + echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "CUDA_PATH_V11_7=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.7" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + + - name: Install Cuda Toolkit 12.4 + if: ${{ matrix.cuda == '12.4' }} + run: | + mkdir -p "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" + choco install unzip -y + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cudart/windows-x86_64/cuda_cudart-windows-x86_64-12.4.127-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvcc/windows-x86_64/cuda_nvcc-windows-x86_64-12.4.131-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvrtc/windows-x86_64/cuda_nvrtc-windows-x86_64-12.4.127-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/libcublas/windows-x86_64/libcublas-windows-x86_64-12.4.5.8-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvtx/windows-x86_64/cuda_nvtx-windows-x86_64-12.4.127-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_profiler_api/windows-x86_64/cuda_profiler_api-windows-x86_64-12.4.127-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/visual_studio_integration/windows-x86_64/visual_studio_integration-windows-x86_64-12.4.127-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_nvprof/windows-x86_64/cuda_nvprof-windows-x86_64-12.4.127-archive.zip" + curl -O "https://developer.download.nvidia.com/compute/cuda/redist/cuda_cccl/windows-x86_64/cuda_cccl-windows-x86_64-12.4.127-archive.zip" + unzip '*.zip' -d "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cudart-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvcc-windows-x86_64-12.4.131-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvrtc-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libcublas-windows-x86_64-12.4.5.8-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvtx-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_profiler_api-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\visual_studio_integration-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_nvprof-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + xcopy "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\cuda_cccl-windows-x86_64-12.4.127-archive\*" "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" /E /I /H /Y + echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4\libnvvp" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + + - name: Install ccache + uses: hendrikmuhs/ccache-action@v1.2 with: - cuda: ${{ matrix.cuda }} - method: 'network' - sub-packages: '["nvcc", "cudart", "cublas", "cublas_dev", "thrust", "visual_studio_integration"]' + key: ${{ github.job }}-${{ matrix.cuda }}-${{ matrix.build }} + + - name: Install Ninja + id: install_ninja + run: | + choco install ninja - name: Build id: cmake_build + shell: cmd run: | - mkdir build - cd build - cmake .. -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON -DGGML_RPC=ON - cmake --build . --config Release -j $((${env:NUMBER_OF_PROCESSORS} - 1)) -t ggml - cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS} + call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" + cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON -DGGML_RPC=ON + set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 + cmake --build build --config Release -j %NINJA_JOBS% -t ggml + cmake --build build --config Release - name: Determine tag name id: tag @@ -930,10 +946,12 @@ jobs: name: llama-bin-win-cu${{ matrix.cuda }}-x64.zip - name: Copy and pack Cuda runtime + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} run: | - echo "Cuda install location: ${{steps.cuda-toolkit.outputs.CUDA_PATH}}" + echo "Cuda install location: ${{ env.CUDA_PATH }}" $dst='.\build\bin\cudart\' - robocopy "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll + robocopy "${{env.CUDA_PATH}}\bin" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll + robocopy "${{env.CUDA_PATH}}\lib" $dst cudart64_*.dll cublas64_*.dll cublasLt64_*.dll 7z a cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip $dst\* - name: Upload Cuda runtime @@ -952,7 +970,7 @@ jobs: env: WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/b380d914-366b-4b77-a74a-05e3c38b3514/intel-oneapi-base-toolkit-2025.0.0.882_offline.exe - WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel + WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel:intel.oneapi.win.dnnl:intel.oneapi.win.tbb.devel ONEAPI_ROOT: "C:/Program Files (x86)/Intel/oneAPI" steps: - name: Clone @@ -962,7 +980,8 @@ jobs: fetch-depth: 0 - name: Install - run: scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL + run: | + scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL - name: Build id: cmake_build @@ -981,25 +1000,33 @@ jobs: echo "name=${SAFE_NAME}-b${BUILD_NUMBER}-${SHORT_HASH}" >> $GITHUB_OUTPUT fi - - name: Pack artifacts + - name: Build the release package id: pack_artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | echo "cp oneAPI running time dll files in ${{ env.ONEAPI_ROOT }} to ./build/bin" - cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.4.dll" ./build/bin + + cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_sycl_blas.5.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_core.2.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/mkl/latest/bin/mkl_tbb_thread.2.dll" ./build/bin - cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/pi_win_proxy_loader.dll" ./build/bin - cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/pi_level_zero.dll" ./build/bin - cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl7.dll" ./build/bin + cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_level_zero.dll" ./build/bin + cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_adapter_opencl.dll" ./build/bin + cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_loader.dll" ./build/bin + cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/ur_win_proxy_loader.dll" ./build/bin + + cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/sycl8.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/svml_dispmd.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libmmd.dll" ./build/bin cp "${{ env.ONEAPI_ROOT }}/compiler/latest/bin/libiomp5md.dll" ./build/bin + + cp "${{ env.ONEAPI_ROOT }}/dnnl/latest/bin/dnnl.dll" ./build/bin + cp "${{ env.ONEAPI_ROOT }}/tbb/latest/bin/tbb12.dll" ./build/bin + echo "cp oneAPI running time dll files to ./build/bin done" 7z a llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip ./build/bin/* - - name: Upload artifacts + - name: Upload the release package if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: @@ -1030,6 +1057,11 @@ jobs: run: | & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version + - name: Install ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + key: ${{ github.job }} + - name: Build id: cmake_build run: | @@ -1050,6 +1082,8 @@ jobs: - name: Clone id: checkout uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Install id: depends @@ -1109,6 +1143,29 @@ jobs: - name: Checkout code uses: actions/checkout@v4 + - name: Build + id: cmake_build + run: | + sysctl -a + mkdir build + cd build + cmake -G Xcode .. \ + -DGGML_METAL_USE_BF16=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON \ + -DLLAMA_BUILD_EXAMPLES=OFF \ + -DLLAMA_BUILD_TESTS=OFF \ + -DLLAMA_BUILD_SERVER=OFF \ + -DCMAKE_SYSTEM_NAME=iOS \ + -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ + -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml + cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + sudo cmake --install . --config Release + + - name: xcodebuild for swift package + id: xcodebuild + run: | + xcodebuild -scheme llama-Package -destination 'generic/platform=iOS' + - name: Build Xcode project run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build @@ -1136,35 +1193,16 @@ jobs: ./gradlew build --no-daemon -# freeBSD-latest: -# runs-on: macos-12 -# steps: -# - name: Clone -# uses: actions/checkout@v4 -# -# - name: Build -# uses: cross-platform-actions/action@v0.19.0 -# with: -# operating_system: freebsd -# version: '13.2' -# hypervisor: 'qemu' -# run: | -# sudo pkg update -# sudo pkg install -y gmake automake autoconf pkgconf llvm15 openblas -# gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j `sysctl -n hw.ncpu` - release: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} runs-on: ubuntu-latest needs: - - ubuntu-focal-make - ubuntu-latest-cmake - - macOS-latest-make - macOS-latest-cmake - windows-latest-cmake - - windows-latest-cmake-cuda + - windows-2019-cmake-cuda - windows-latest-cmake-hip-release - macOS-latest-cmake-arm64 - macOS-latest-cmake-x64 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a953cdac907ae..bc2e5020de25c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -10,12 +10,10 @@ name: Publish Docker image on: - #pull_request: - push: - branches: - - master - paths: ['.github/workflows/docker.yml', '.devops/*.Dockerfile', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal'] - workflow_dispatch: # allows manual triggering, useful for debugging + workflow_dispatch: # allows manual triggering + schedule: + # Rebuild daily rather than on every push because it is expensive + - cron: '12 4 * * *' concurrency: group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} @@ -29,7 +27,6 @@ permissions: jobs: push_to_registry: name: Push Docker image to Docker Hub - #if: github.event.pull_request.draft == false runs-on: ubuntu-latest env: @@ -117,7 +114,7 @@ jobs: swap-storage: true - name: Build and push Docker image (tagged + versioned) - if: github.event_name == 'push' + if: ${{ github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} uses: docker/build-push-action@v6 with: context: . diff --git a/.github/workflows/nix-ci-aarch64.yml b/.github/workflows/nix-ci-aarch64.yml deleted file mode 100644 index 0da6acdf1c81e..0000000000000 --- a/.github/workflows/nix-ci-aarch64.yml +++ /dev/null @@ -1,72 +0,0 @@ -name: Nix aarch64 builds - -on: - workflow_dispatch: # allows manual triggering - schedule: - # Rebuild daily rather than on every push because QEMU is expensive (e.g. - # 1.5h instead of minutes with the cold cache). - # - # randint(0, 59), randint(0, 23) - - cron: '26 12 * * *' - # But also rebuild if we touched any of the Nix expressions: - push: - branches: - - master - paths: ['**/*.nix', 'flake.lock'] - pull_request: - types: [opened, synchronize, reopened] - paths: ['**/*.nix', 'flake.lock'] - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} - cancel-in-progress: true - -# Fine-grant permission -# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token -permissions: - # https://github.com/DeterminateSystems/nix-installer-action?tab=readme-ov-file#with-flakehub - id-token: write - contents: read - -jobs: - nix-build-aarch64: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Install QEMU - # Copy-paste from https://github.com/orgs/community/discussions/8305#discussioncomment-5888654 - run: | - sudo apt-get update - sudo apt-get install -y qemu-user-static qemu-system-aarch64 - sudo usermod -a -G kvm $USER - - name: Install Nix - uses: DeterminateSystems/nix-installer-action@v9 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - extra-conf: | - extra-platforms = aarch64-linux - extra-system-features = nixos-test kvm - extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - - uses: DeterminateSystems/magic-nix-cache-action@v2 - with: - upstream-cache: https://${{ matrix.cachixName }}.cachix.org - - name: Set-up cachix to push the results to - uses: cachix/cachix-action@v13 - with: - authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - name: llama-cpp - - name: Show all output paths - run: > - nix run github:nix-community/nix-eval-jobs - -- --gc-roots-dir gcroot - --flake - ".#packages.aarch64-linux" - - name: Build - run: > - nix run github:Mic92/nix-fast-build - -- --skip-cached --no-nom - --systems aarch64-linux - --flake - ".#checks.aarch64-linux" diff --git a/.github/workflows/nix-ci.yml b/.github/workflows/nix-ci.yml deleted file mode 100644 index 8ecbbe53b4ed1..0000000000000 --- a/.github/workflows/nix-ci.yml +++ /dev/null @@ -1,79 +0,0 @@ -name: Nix CI - -on: - workflow_dispatch: # allows manual triggering - push: - branches: - - master - pull_request: - types: [opened, synchronize, reopened] - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} - cancel-in-progress: true - -# Fine-grant permission -# https://docs.github.com/en/actions/security-for-github-actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token -permissions: - # https://github.com/DeterminateSystems/nix-installer-action?tab=readme-ov-file#with-flakehub - id-token: write - contents: read - -jobs: - nix-eval: - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest ] - runs-on: ${{ matrix.os }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Install Nix - uses: DeterminateSystems/nix-installer-action@v9 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - extra-conf: | - extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - - uses: DeterminateSystems/magic-nix-cache-action@v2 - with: - upstream-cache: https://${{ matrix.cachixName }}.cachix.org - - name: List all flake outputs - run: nix flake show --all-systems - - name: Show all output paths - run: > - nix run github:nix-community/nix-eval-jobs - -- --gc-roots-dir gcroot - --flake - ".#packages.$(nix eval --raw --impure --expr builtins.currentSystem)" - nix-build: - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest ] - runs-on: ${{ matrix.os }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Install Nix - uses: DeterminateSystems/nix-installer-action@v9 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - extra-conf: | - extra-substituters = https://llama-cpp.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc= cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - - uses: DeterminateSystems/magic-nix-cache-action@v2 - with: - upstream-cache: https://${{ matrix.cachixName }}.cachix.org - - name: Set-up cachix to push the results to - uses: cachix/cachix-action@v13 - with: - authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - name: llama-cpp - - name: Build - run: > - nix run github:Mic92/nix-fast-build - -- --skip-cached --no-nom - --flake - ".#checks.$(nix eval --raw --impure --expr builtins.currentSystem)" diff --git a/.github/workflows/nix-flake-update.yml b/.github/workflows/nix-flake-update.yml deleted file mode 100644 index 3a6a96e263e59..0000000000000 --- a/.github/workflows/nix-flake-update.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: update-flake-lock -on: - workflow_dispatch: - schedule: - - cron: '0 0 * * 0' # runs weekly on Sunday at 00:00 - -jobs: - lockfile: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Install Nix - uses: DeterminateSystems/nix-installer-action@main - - name: Update flake.lock - uses: DeterminateSystems/update-flake-lock@main - with: - pr-title: "nix: update flake.lock" - pr-labels: | - nix - pr-reviewers: philiptaron,SomeoneSerge - token: ${{ secrets.FLAKE_TOKEN }} diff --git a/.github/workflows/nix-publish-flake.yml b/.github/workflows/nix-publish-flake.yml deleted file mode 100644 index 2c3c1ebdaeff1..0000000000000 --- a/.github/workflows/nix-publish-flake.yml +++ /dev/null @@ -1,36 +0,0 @@ -# Make the flake discoverable on https://flakestry.dev and https://flakehub.com/flakes -name: "Publish a flake to flakestry & flakehub" -on: - push: - tags: - - "*" - workflow_dispatch: - inputs: - tag: - description: "The existing tag to publish" - type: "string" - required: true -jobs: - flakestry-publish: - runs-on: ubuntu-latest - permissions: - id-token: "write" - contents: "read" - steps: - - uses: flakestry/flakestry-publish@main - with: - version: "${{ inputs.tag || github.ref_name }}" - flakehub-publish: - runs-on: "ubuntu-latest" - permissions: - id-token: "write" - contents: "read" - steps: - - uses: "actions/checkout@v4" - with: - ref: "${{ (inputs.tag != null) && format('refs/tags/{0}', inputs.tag) || '' }}" - - uses: "DeterminateSystems/nix-installer-action@main" - - uses: "DeterminateSystems/flakehub-push@main" - with: - visibility: "public" - tag: "${{ inputs.tag }}" diff --git a/.github/workflows/python-lint.yml b/.github/workflows/python-lint.yml index a8d46f31dd4f5..ddfdf73b8fce2 100644 --- a/.github/workflows/python-lint.yml +++ b/.github/workflows/python-lint.yml @@ -1,6 +1,13 @@ name: flake8 Lint -on: [push, pull_request] +on: + push: + branches: + - master + paths: ['.github/workflows/python-lint.yml', '**/*.py'] + pull_request: + types: [opened, synchronize, reopened] + paths: ['.github/workflows/python-lint.yml', '**/*.py'] concurrency: group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index 699ac095d6c83..671fe595cdfcb 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -76,20 +76,26 @@ jobs: run: | pip install -r examples/server/tests/requirements.txt - - name: Verify server deps - id: verify_server_deps + # Setup nodejs (to be used for verifying bundled index.html) + - uses: actions/setup-node@v4 + with: + node-version: '22.11.0' + + - name: Verify bundled index.html + id: verify_server_index_html run: | git config --global --add safe.directory $(realpath .) - cd examples/server - git ls-files --others --modified + cd examples/server/webui git status - ./deps.sh + npm ci + npm run build git status - not_ignored_files="$(git ls-files --others --modified)" - echo "Modified files: ${not_ignored_files}" - if [ -n "${not_ignored_files}" ]; then - echo "Repository is dirty or server deps are not built as expected" - echo "${not_ignored_files}" + modified_files="$(git status -s)" + echo "Modified files: ${modified_files}" + if [ -n "${modified_files}" ]; then + echo "Repository is dirty or server/webui is not built as expected" + echo "Hint: You may need to follow Web UI build guide in server/README.md" + echo "${modified_files}" exit 1 fi @@ -122,14 +128,14 @@ jobs: id: server_integration_tests run: | cd examples/server/tests - PORT=8888 ./tests.sh + ./tests.sh - name: Slow tests id: server_integration_tests_slow if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }} run: | cd examples/server/tests - PORT=8888 ./tests.sh --stop --no-skipped --no-capture --tags slow + SLOW_TESTS=1 ./tests.sh server-windows: @@ -180,11 +186,12 @@ jobs: run: | cd examples/server/tests $env:PYTHONIOENCODING = ":replace" - behave.exe --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp + pytest -v -x - name: Slow tests id: server_integration_tests_slow if: ${{ (github.event.schedule || github.event.inputs.slow_tests == 'true') && matrix.build_type == 'Release' }} run: | cd examples/server/tests - behave.exe --stop --no-skipped --no-capture --tags slow + $env:SLOW_TESTS = "1" + pytest -v -x diff --git a/.gitignore b/.gitignore index 307c065f793e6..1df7cf4a15ecd 100644 --- a/.gitignore +++ b/.gitignore @@ -104,6 +104,10 @@ examples/server/*.mjs.hpp !examples/sycl/*.bat !examples/sycl/*.sh +# Server Web UI temporary files +node_modules +examples/server/webui/dist + # Python /.venv diff --git a/AUTHORS b/AUTHORS index 1bd36158a72f4..2eb60806ad058 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,4 +1,4 @@ -# date: Wed Jun 26 19:36:34 EEST 2024 +# date: Thu Nov 28 20:46:15 EET 2024 # this file is auto-generated by scripts/gen-authors.sh 0cc4m @@ -7,6 +7,7 @@ 2f38b454 3ooabkhxtn <31479382+3ooabkhxtn@users.noreply.github.com> 44670 <44670@users.noreply.github.com> +65a <10104049+65a@users.noreply.github.com> AN Long AT Aarni Koskela @@ -19,20 +20,28 @@ Adithya Balaji AdithyanI Adrian Adrian Hesketh +Ahmad Tameem <113388789+Tameem-10xE@users.noreply.github.com> Ahmet Zeer AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com> +AidanBeltonS Aisuko +Akarshan Biswas Akarshan Biswas +Al Mochkin <14274697+amochkin@users.noreply.github.com> Albert Jin Alberto <57916483+albbus-stack@users.noreply.github.com> +Alberto Cabrera PĆ©rez +Alberto Cabrera PĆ©rez Alex Alex Azarov Alex Azarov Alex Klinkhamer Alex Klinkhamer Alex Nguyen +Alex O'Connell <35843486+acon96@users.noreply.github.com> Alex Petenchea Alex Renda +Alex Tuddenham <61622354+AlexsCode@users.noreply.github.com> Alex von Gluck IV Alexey Parfenov Ali Chraghi <63465728+alichraghi@users.noreply.github.com> @@ -45,18 +54,25 @@ AmirAli Mirian <37371367+amiralimi@users.noreply.github.com> Ananta Bastola Anas Ahouzi <112881240+aahouzi@users.noreply.github.com> AndrĆ”s Salamon +Andreas (Andi) Kunar Andrei Andrew Canis Andrew Downing Andrew Duffy Andrew Godfrey +Andrew Minh Nguyen <40281306+amqdn@users.noreply.github.com> +Andy Salerno Andy Tai +Anthony Van de Gejuchte +Antonis Makropoulos Arik Poznanski +Armen Kaleshian Artem Artem Zinnatullin Artyom Lebedev AsbjĆørn Olling Ɓsgeir Bjarni Ingvarsson +Asghar Ghorbani Ashish <1856117+ashishdatta@users.noreply.github.com> Ashok Gelal <401055+ashokgelal@users.noreply.github.com> Ashraful Islam @@ -76,12 +92,16 @@ Ben Williams Benjamin Findley <39356821+Kartoffelsaft@users.noreply.github.com> Benjamin Lecaillon <84293038+blecaillon@users.noreply.github.com> Bernat Vadell +Bert Wagner Bingan <70050083+binganao@users.noreply.github.com> +Bjarke ViksĆøe <164612031+bviksoe@users.noreply.github.com> Bodo Graumann Bono Lv Borislav Stanimirov Branden Butler +Brandon Squizzato <35474886+bsquizz@users.noreply.github.com> Brian +Brian Cunnie Bruce MacDonald Bryan Honof CJ Pais @@ -90,32 +110,47 @@ Calvin Laurenson Cameron Cameron Kaiser Carolinabanana <140120812+Carolinabanana@users.noreply.github.com> +CarryFun <76023481+CarryFun@users.noreply.github.com> +Carsten Kragelund JĆørgensen +CarterLi999 <664681047@qq.com> Casey Primozic Casey Primozic CausalLM <148736309+CausalLM@users.noreply.github.com> Cebtenzzre Chad Brewbaker +Changyeon Kim Chao Jiang +Charles Xu <63788048+chaxu01@users.noreply.github.com> +Charles Xu +Chen Xi +Chen Xi Cheng Shao +Chenguang Li <87689256+noemotiovon@users.noreply.github.com> Chris Elrod Chris Kuehl Christian Demsar Christian Demsar Christian Falch <875252+chrfalch@users.noreply.github.com> Christian Kƶgler +Christian Kƶhnenkamp Christian Zhou-Zheng <59622928+christianazinn@users.noreply.github.com> Clark Saben <76020733+csaben@users.noreply.github.com> Clint Herron +Conrad Kramer CrispStrobe <154636388+CrispStrobe@users.noreply.github.com> +Csaba Kecskemeti Cuong Trinh Manh DANā„¢ Damian Stewart +Dan Johansson <164997844+eddnjjn@users.noreply.github.com> +Dan Johansson Dane Madsen DaniAndTheWeb <57776841+DaniAndTheWeb@users.noreply.github.com> Daniel Bevenius Daniel Drake Daniel Hiltgen Daniel Illescas Romero +Daniel Kleine <53251018+d-kleine@users.noreply.github.com> Daniele <57776841+daniandtheweb@users.noreply.github.com> DannyDaemonic Dat Quoc Nguyen <2412555+datquocnguyen@users.noreply.github.com> @@ -129,19 +164,28 @@ David Pflug David Renshaw David Sommers <12738+databyte@users.noreply.github.com> David Yang +DavidKorczynski Dawid Potocki Dawid Wysocki <62249621+TortillaZHawaii@users.noreply.github.com> Dean Deins +Denis Spasyuk <34203011+dspasyuk@users.noreply.github.com> +Derrick T. Woolworth Deven Mistry <31466137+deven367@users.noreply.github.com> +Dibakar Gope Didzis Gosko +Diego Devesa +Diogo Teles Sant'Anna Djip007 Don Mahurin DooWoong Lee (David) Doomsdayrs <38189170+Doomsdayrs@users.noreply.github.com> +Dou Xinpeng <15529241576@163.com> +Dou Xinpeng <81913537+Dou-Git@users.noreply.github.com> Douglas Hanley Dr. Tom Murphy VII Ph.D <499244+tom7@users.noreply.github.com> Ebey Abraham +Echo Nolan Ed Lee Ed Lepedus Eddie-Wang @@ -151,10 +195,13 @@ Elbios <141279586+Elbios@users.noreply.github.com> Elton Kola Engininja2 <139037756+Engininja2@users.noreply.github.com> Equim +Eric Curtin +Eric Curtin Eric Sommerlade Eric Zhang <34133756+EZForever@users.noreply.github.com> Erik Garrison Erik Scholz +Esko Toivonen Ettore Di Giacinto Evan Jones Evan Miller @@ -166,19 +213,26 @@ FK Fabian Fabio R. Sluzala Faez Shakil +Faisal Zaghloul +Faisal Zaghloul +Fan Shupei FantasyGmm <16450052+FantasyGmm@users.noreply.github.com> +Farbod Bijary <110523279+farbodbj@users.noreply.github.com> Fattire <528174+fat-tire@users.noreply.github.com> Felix Finn Voorhees Firat +FirstTimeEZ <179362031+FirstTimeEZ@users.noreply.github.com> Folko-Ven <71110216+Folko-Ven@users.noreply.github.com> Foul-Tarnished <107711110+Foul-Tarnished@users.noreply.github.com> Francisco Melo <43780565+francis2tm@users.noreply.github.com> Frank Mai FrankHB +Frankie Robertson Fred Douglas <43351173+fredlas@users.noreply.github.com> Frederik Vogel Gabe Goodhart +Gabe Goodhart GainLee Galunid Gary Linscott @@ -187,11 +241,13 @@ Gavin Zhao Genkagaku.GPT Georgi Gerganov Gilad S +Gilad S. <7817232+giladgd@users.noreply.github.com> Giuseppe Scrivano GiviMAD Govlzkoy Guillaume "Vermeille" Sanchez Guillaume Wenzek +Guoliang Hua <32868157+nbcsm@users.noreply.github.com> Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Gustavo Rocha Dias <91472747+gustrd@users.noreply.github.com> Haggai Nuchi @@ -213,11 +269,14 @@ Hong Bo PENG Hongyu Ouyang <96765450+casavaca@users.noreply.github.com> Howard Su Hua Jiang +Huang Qi Huawei Lin Hugo Roussel +Huifeng Ou <79071290+ho2103@users.noreply.github.com> Ian Bull Ian Bull Ian Scrivener +Icecream95 Ido S IgnacioFDM Igor Okulist @@ -226,11 +285,15 @@ Ilya Kurdyukov <59548320+ilyakurdyukov@users.noreply.github.com> Ionoclast Laboratories Isaac McFadyen IsaacDynamo <61521674+IsaacDynamo@users.noreply.github.com> +Ivan +Ivan Filipov <159561759+vanaka11@users.noreply.github.com> Ivan Komarov Ivan Stepanov JH23X <165871467+JH23X@users.noreply.github.com> +Jack Mousseau Jack Mousseau JackJollimore <130917767+JackJollimore@users.noreply.github.com> +Jaeden Amero Jaemin Son Jag Chadha Jakub N @@ -243,10 +306,14 @@ Jannis Schƶnleber Jared Van Bortel Jared Van Bortel Jason McCartney +Jason Stillerman Jean-Christophe Hoelt Jean-MichaĆ«l Celerier Jed Fox +Jeff Bolz +Jeffrey Morgan Jeffrey Quesnelle +Jeroen Mostert Jesse Jojo Johnson Jeximo Jhen-Jie Hong @@ -258,6 +325,9 @@ Jiří PodivĆ­n <66251151+jpodivin@users.noreply.github.com> Jiří Sejkora Joan Fontanals Joan Fontanals +JoĆ£o Dinis Ferreira +Joe Eli McIlvain +Joe Todd Johan Johannes Gäßler Johannes Rudolph @@ -274,7 +344,9 @@ Joyce Juan Calderon-Perez <835733+gaby@users.noreply.github.com> Judd Julius Arkenberg +Jun Hee Yoo Jun Jie <71215065+junnjiee16@users.noreply.github.com> +Junil Kim Junyang Lin Juraj Bednar Justin Parker @@ -292,12 +364,14 @@ Karthik Sethuraman Kasumi <90275229+kasumi-1@users.noreply.github.com> Kawrakow <48489457+ikawrakow@users.noreply.github.com> Keiichi Tabata +Keke Han Kenvix ⭐ Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Kevin Gibbons Kevin Ji <1146876+kevinji@users.noreply.github.com> Kevin Kwok Kevin Lo +Kevin Wang Kolen Cheung Konstantin Herud Konstantin Zhuravlyov @@ -315,22 +389,29 @@ LeonEricsson <70749762+LeonEricsson@users.noreply.github.com> Leonardo Neumann Li Tan Linwei Wang +Liu Jia <109258120+Septa2112@users.noreply.github.com> +Liu Jia LoganDark +LoĆÆc CarrĆØre LostRuins <39025047+LostRuins@users.noreply.github.com> Luciano Luo Tian Lyle Dean +M-A M. Yusuf Sarıgƶz +Ma Mingfei Maarten ter Huurne Mack Straight MaĆ«l Kerbiriou MaggotHATE +Mahesh Madhav <67384846+heshpdx@users.noreply.github.com> Manuel <44313466+makuche@users.noreply.github.com> Marc Kƶhlbrugge Marco Matthies <71844+marcom@users.noreply.github.com> Marcus Dunn <51931484+MarcusDunn@users.noreply.github.com> Marian Cepok Mark Fairbairn +Mark Zhuang Marko Tasic Markus Tavenrath Martin Delille @@ -342,11 +423,15 @@ MasterYi1024 <39848311+MasterYi1024@users.noreply.github.com> Mateusz Charytoniuk Matheus C. FranƧa Matheus Gabriel Alves Silva +Mathieu Geli Mathieu Nayrolles +Mathijs Henquet Mathijs de Bruin Matt Clayton <156335168+mattjcly@users.noreply.github.com> Matt Pulver +Matt Stephenson Matteo Boschini <12133566+mbosc@users.noreply.github.com> +Matteo Mortari Mattheus Chediak Matthew Tejo Matvey Soloviev @@ -356,8 +441,10 @@ Maxime <672982+maximegmd@users.noreply.github.com> Maximilian Winter Meng Zhang Meng, Hengyu +Mengqing Cao Merrick Christensen Michael Coppola +Michael Francis Michael Hueschen Michael Kesper Michael Klimenko @@ -365,41 +452,57 @@ Michael Podvitskiy Michael Potter Michael de Gans MichaĆ«l de Vries +Michał Tuszyński Mihai Mike Mikko Juola Minsoo Cheong <54794500+mscheong01@users.noreply.github.com> +Minsoo Cheong Mirko185 Mirror Azure <54669636+MirrorAzure@users.noreply.github.com> +MistApproach <98988043+MistApproach@users.noreply.github.com> Miwa / Ensan <63481257+ensan-hcl@users.noreply.github.com> Mohammadreza Hendiani Mohammadreza Hendiani +Molly Sophia +MorganRO8 <47795945+MorganRO8@users.noreply.github.com> Murilo Santana Musab Gultekin Nam D. Tran <42194884+namtranase@users.noreply.github.com> Nathan Epstein +Natsu NawafAlansari <72708095+NawafAlansari@users.noreply.github.com> Nebula Neo Zhang <14088817+arthw@users.noreply.github.com> Neo Zhang Neo Zhang Jianyu Neuman Vong +Nexes the Old <124105151+Nexesenex@users.noreply.github.com> Nexesenex <124105151+Nexesenex@users.noreply.github.com> Niall Coates <1349685+Niall-@users.noreply.github.com> +Nicholai Tukanov +Nico Bosshard Nicolai Weitkemper NicolĆ”s PĆ©rez Nigel Bosch Niklas Korz +NikolaiLyssogor <59844691+NikolaiLyssogor@users.noreply.github.com> Nikolas <127742645+nneubacher@users.noreply.github.com> Nindaleth +OSecret <135510162+OLSecret@users.noreply.github.com> Oleksandr Nikitin Oleksii Maryshchenko Olivier Chafik Ondřej ČertĆ­k Ouadie EL FAROUKI +PAB +Pablo Duboue +Pascal Patry Patrice Ferlet Paul Tsochantaris +Pavel Zloi Pavol Rusnak +Paweł Wodnicki <151604+32bitmicro@users.noreply.github.com> Pedro Cuenca Peter Sugihara Phil H <5756783+phiharri@users.noreply.github.com> @@ -407,10 +510,15 @@ Philip Taron Phillip Kravtsov Pierre Alexandre SCHEMBRI Pierrick Hymbert +Pieter Ouwerkerk +Plamen Minev +Prashant Vithule <119530321+Vithulep@users.noreply.github.com> Przemysław Pawełczyk Qin Yue Chen <71813199+chenqiny@users.noreply.github.com> Qingyou Meng Qu Zongfu <43257352+yancaoweidaode@users.noreply.github.com> +R0CKSTAR +R0CKSTAR RJ Adriaansen Radoslav Gerganov Radosław Gryta @@ -419,11 +527,13 @@ Raj Hammeer Singh Hada Ralph Soika Rand Xie Randall Fitzgerald +Random Fly Reinforce-II Ren Xuancheng Rene Leonhardt <65483435+reneleonhardt@users.noreply.github.com> RhinoDevel Riceball LEE +Rich Dougherty Richard Kiss Richard Roberson Rick G <26732651+TheFlipbook@users.noreply.github.com> @@ -439,21 +549,30 @@ Robey Holderith Robyn Roger Meier Roland <14355895+rbur0425@users.noreply.github.com> +Romain Biessy Romain D <90720+Artefact2@users.noreply.github.com> Romain Neutron Roman Parykin Ron Evans Ron Jailall +Roni Ronny Brendel Ronsor Rowan Hart +Ruchira Hasaranga +Ruixin Huang <18860020911@163.com> Rune <43761327+Rune-AI@users.noreply.github.com> +RunningLeon +RunningLeon Ryan Landay Ryder Wishart Ryuei Rőczey BarnabĆ”s <31726601+An0nie@users.noreply.github.com> +SRHMorris <69468379+SRHMorris@users.noreply.github.com> +SXX SakuraUmi Salvador E. Tropea +Salvatore Mesoraca Sam Spilsbury Sami Farin <3876865+Safari77@users.noreply.github.com> Samuel Maynard @@ -463,23 +582,29 @@ SebastiĆ”n A SebastianApel <13675545+SebastianApel@users.noreply.github.com> Senemu <10880819+Senemu@users.noreply.github.com> Sergey Alirzaev +Sergio López Sergio López SertaƧ Ɩzercan <852750+sozercan@users.noreply.github.com> SeungWon Jeong <65549245+redlion0929@users.noreply.github.com> ShadovvBeast Shakhar Dasgupta +Shane A Shangning Xu <32517059+xushangning@users.noreply.github.com> +Shankar +Shanshan Shen <467638484@qq.com> Shijie <821898965@qq.com> Shintarou Okada Shouzheng Liu <61452103+lshzh-ww@users.noreply.github.com> Shouzheng Liu Shuichi Tsutsumi +Shupei Fan SigbjĆørn SkjƦret Simon Willison Siwen Yu Sky Yan Slaren <2141330+slaren@users.noreply.github.com> Slava Primenko +Small Grass Forest SoftwareRenderer <138734813+SoftwareRenderer@users.noreply.github.com> Someone Someone Serge @@ -491,12 +616,15 @@ Stefan Sydow Steffen Rƶcker Stephan Walter Stephen Nichols +Steve Bonds Steve Grubb Steven Prichard Steven Roussey Steward Garcia <57494570+FSSRepo@users.noreply.github.com> +StrangeBytesDev <141275258+StrangeBytesDev@users.noreply.github.com> Suaj Carrot <72162667+SuajCarrot@users.noreply.github.com> SuperUserNameMan +Sutou Kouhei Tai Duc Nguyen Taikono-Himazin Tameem <113388789+AhmadTameem@users.noreply.github.com> @@ -507,7 +635,9 @@ Theia Vogel ThĆ©rence <13496987+Royalphax@users.noreply.github.com> Thibault Terrasson Thomas Klausner +Thorsten Sommer Tim Miller +Tim Wang Timmy Knight Timothy Cronin <40186632+4imothy@users.noreply.github.com> Ting Lou @@ -517,24 +647,31 @@ Tom C Tom Jobbins <784313+TheBloke@users.noreply.github.com> Tomas TomÔŔ Pazdiora +Tony Wasserka <4840017+neobrain@users.noreply.github.com> Tristan Druyen Tristan Ross +Trivikram Kamat <16024985+trivikr@users.noreply.github.com> Tungsten842 <886724vf@anonaddy.me> Tungsten842 Tushar UEXTM.com <84163508+uextm@users.noreply.github.com> +Ujjawal Panchal <31011628+Ujjawal-K-Panchal@users.noreply.github.com> Ulrich Drepper Uzo Nweke Vaibhav Srivastav Val Kharitonov Valentin Konovalov Valentyn Bezshapkin <61702053+valentynbez@users.noreply.github.com> +Vali Malinoiu <0x4139@gmail.com> Victor Nogueira Victor Z. Peng +Viet-Anh NGUYEN (Andrew) +Vinesh Janarthanan <36610342+VJHack@users.noreply.github.com> Vlad Vladimir Vladimir Malyutin Vladimir Zorin +VoidIsVoid <343750470@qq.com> Volodymyr Vitvitskyi <72226+signalpillar@users.noreply.github.com> WangHaoranRobin <56047610+WangHaoranRobin@users.noreply.github.com> Weird Constructor @@ -551,15 +688,22 @@ Xiang (Kevin) Li Xiao-Yong Jin XiaotaoChen Xiaoyi Chen +Xie Yanbo Xingchen Song(å®‹ę˜Ÿč¾°) +Xinpeng Dou <81913537+Dou-Git@users.noreply.github.com> Xuan Son Nguyen +Yaiko Yann Follet <131855179+YannFollet@users.noreply.github.com> Yaroslav Yazan Agha-Schrader Yiming Cui Yishuo Wang +Yoshi Suhara +Yoshi Suhara +Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Yueh-Po Peng <94939112+y10ab1@users.noreply.github.com> Yui +Yuri Khrustalev Yusuf Kağan Hanoğlu Yuval Peled <31162840+Yuval-Peled@users.noreply.github.com> ZHAOKAI WANG @@ -568,6 +712,8 @@ Zay <95888118+isaiahbjork@users.noreply.github.com> Zenix Zhang Peiyuan Zheng.Deng <32841220+dengzheng-cloud@users.noreply.github.com> +Zhenwei Jin <109658203+kylo5aby@users.noreply.github.com> +Zhiyuan Li ZhouYuChen Ziad Ben Hadj-Alouane Ziang Wu <97337387+ZiangWu-77@users.noreply.github.com> @@ -581,6 +727,7 @@ alexpinel <93524949+alexpinel@users.noreply.github.com> alonfaraj alwqx amd-lalithnc +amritahs-ibm andrijdavid anon998 <131767832+anon998@users.noreply.github.com> anzz1 @@ -588,14 +735,18 @@ apaz apcameron <37645737+apcameron@users.noreply.github.com> arch-btw <57669023+arch-btw@users.noreply.github.com> arcrank +ardfork <134447697+ardfork@users.noreply.github.com> arlo-phoenix <140345165+arlo-phoenix@users.noreply.github.com> at8u <129688334+at8u@users.noreply.github.com> automaticcat +awatuna <23447591+awatuna@users.noreply.github.com> +b4b4o bandoti <141645996+bandoti@users.noreply.github.com> beiller bhubbb <79117352+bhubbb@users.noreply.github.com> bmwl bobqianic <129547291+bobqianic@users.noreply.github.com> +brucepro bryanSwk <93190252+bryanSwk@users.noreply.github.com> bsilvereagle bssrdf @@ -614,10 +765,14 @@ cpumaxx <163466046+cpumaxx@users.noreply.github.com> crasm crasm daboe01 +daghanerdonmez <44506702+daghanerdonmez@users.noreply.github.com> +daminho <37615795+daminho@users.noreply.github.com> david raistrick ddh0 ddpasa <112642920+ddpasa@users.noreply.github.com> deepdiffuser <112834445+deepdiffuser@users.noreply.github.com> +devojony <61173062+devojony@users.noreply.github.com> +ditsuke divinity76 dm4 dotpy314 <33351922+dotpy314@users.noreply.github.com> @@ -629,14 +784,18 @@ ebraminio eiery <19350831+eiery@users.noreply.github.com> eric8607242 fairydreaming <166155368+fairydreaming@users.noreply.github.com> +fengerhu1 <2748250768@qq.com> fraxy-v <65565042+fraxy-v@users.noreply.github.com> github-actions[bot] gliptic goerch grahameth <96447521+grahameth@users.noreply.github.com> +gtygo gwjr <502526+gwjr@users.noreply.github.com> h-h-h-h <13482553+h-h-h-h@users.noreply.github.com> hankcs +haopeng <657407891@qq.com> +hipudding hoangmit hongbo.mo <352280764@qq.com> hopkins385 <98618192+hopkins385@users.noreply.github.com> @@ -649,12 +808,14 @@ hxer7963 hydai iSma iacore <74560659+iacore@users.noreply.github.com> +icppWorld <124377669+icppWorld@users.noreply.github.com> igarnier intelmatt <61025942+intelmatt@users.noreply.github.com> iohub jacobi petrucciani <8117202+jpetrucciani@users.noreply.github.com> jaime-m-p <167997752+jaime-m-p@users.noreply.github.com> jameswu2014 <545426914@qq.com> +jdomke <28772296+jdomke@users.noreply.github.com> jiez <373447296@qq.com> jneem joecryptotoo <80373433+joecryptotoo@users.noreply.github.com> @@ -677,28 +838,35 @@ klosax <131523366+klosax@users.noreply.github.com> kunal-vaishnavi <115581922+kunal-vaishnavi@users.noreply.github.com> kunnis kuronekosaiko +kustaaya <58045274+kustaaya@users.noreply.github.com> kuvaus <22169537+kuvaus@users.noreply.github.com> kwin1412 <42286931+kwin1412@users.noreply.github.com> l3utterfly +laik ldwang le.chang leejet +leo-pony limitedAtonement liuwei-git <14815172+liuwei-git@users.noreply.github.com> lon <114724657+longregen@users.noreply.github.com> loonerin <132926317+loonerin@users.noreply.github.com> +ltoniazzi <61414566+ltoniazzi@users.noreply.github.com> luoyu-intel m3ndax maddes8cht <55592906+maddes8cht@users.noreply.github.com> makomk manikbhandari maor-ps <154728172+maor-ps@users.noreply.github.com> +matiaslin <45382001+matiaslin@users.noreply.github.com> +matteo mdrokz mgroeber9110 <45620825+mgroeber9110@users.noreply.github.com> minarchist mj-shifu <77107165+mj-shifu@users.noreply.github.com> mmyjona momonga <115213907+mmnga@users.noreply.github.com> +momonga <146910567+mmngays@users.noreply.github.com> moritzbrantner <31051084+moritzbrantner@users.noreply.github.com> mzcu nanahi <130121847+na-na-hi@users.noreply.github.com> @@ -716,8 +884,10 @@ omahs <73983677+omahs@users.noreply.github.com> oobabooga <112222186+oobabooga@users.noreply.github.com> opparco ostix360 <55257054+ostix360@users.noreply.github.com> +pculliton pengxin99 perserk +piDack <104877312+piDack@users.noreply.github.com> pmysl postmasters pudepiedj @@ -733,6 +903,7 @@ runfuture sandyiscool sasha0552 semidark +serhii-nakon <57632032+serhii-nakon@users.noreply.github.com> sharpHL <132747147+sharpHL@users.noreply.github.com> shibe2 singularity <12184989+singularity-s0@users.noreply.github.com> @@ -741,42 +912,55 @@ sjxx <63994076+ylsdamxssjxxdd@users.noreply.github.com> slaren <2141330+slaren@users.noreply.github.com> slaren snadampal <87143774+snadampal@users.noreply.github.com> +standby24x7 staviq stduhpf strawberrymelonpanda <152940198+strawberrymelonpanda@users.noreply.github.com> swittk takov751 <40316768+takov751@users.noreply.github.com> tarcey +tc-mb <157115220+tc-mb@users.noreply.github.com> texmex76 <40733439+texmex76@users.noreply.github.com> thement <40525767+thement@users.noreply.github.com> +thewh1teagle <61390950+thewh1teagle@users.noreply.github.com> tjohnman +toyer <2042519524@qq.com> tslmy ubik2 uint256_t uint256_t unbounded +uvos valiray <133289098+valiray@users.noreply.github.com> +vb vik viric vodkaslime <646329483@qq.com> vvhg1 <94630311+vvhg1@users.noreply.github.com> vxiiduu <73044267+vxiiduu@users.noreply.github.com> +wangshuai09 <391746016@qq.com> wbpxre150 <100937007+wbpxre150@users.noreply.github.com> whoreson <139810751+whoreson@users.noreply.github.com> woachk <24752637+woachk@users.noreply.github.com> wonjun Jang woodx <124784234+woodx9@users.noreply.github.com> +wwoodsTM <104587230+wwoodsTM@users.noreply.github.com> wzy <32936898+Freed-Wu@users.noreply.github.com> xaedes xaedes +xctan xloem <0xloem@gmail.com> yangli2 yuiseki +yuri@FreeBSD zakkor zhangkaihuo +zhentaoyu zhouwg <6889919+zhouwg@users.noreply.github.com> zhouwg zrm Ștefan-Gabriel Muscalu +ęØęœ± Ā· Kiki 源文雨 <41315874+fumiama@users.noreply.github.com> +蕭澧邦 <45505768+shou692199@users.noreply.github.com> ŠŠøŃŠ· Š“Š°Ń€ŠøŃ„Š·ŃŠ½Š¾Š² <112617865+garrnizon@users.noreply.github.com> diff --git a/CMakeLists.txt b/CMakeLists.txt index 93c60ef431d66..a717a508ff5d9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -46,6 +46,11 @@ if (WIN32) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) endif() +if (MSVC) + add_compile_options("$<$:/utf-8>") + add_compile_options("$<$:/utf-8>") +endif() + # # option list # @@ -75,6 +80,7 @@ option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF) # Required for relocatable CMake package include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake) # override ggml options set(GGML_SANITIZE_THREAD ${LLAMA_SANITIZE_THREAD}) @@ -88,10 +94,6 @@ if (NOT DEFINED GGML_LLAMAFILE) set(GGML_LLAMAFILE_DEFAULT ON) endif() -if (NOT DEFINED GGML_AMX) - set(GGML_AMX ON) -endif() - if (NOT DEFINED GGML_CUDA_GRAPHS) set(GGML_CUDA_GRAPHS_DEFAULT ON) endif() @@ -156,8 +158,11 @@ if (GGML_TARGET_DEFINES) list(APPEND GGML_TRANSIENT_DEFINES ${GGML_TARGET_DEFINES}) endif() get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES) - -set_target_properties(llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h) +# all public headers +set(LLAMA_PUBLIC_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h + ${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h) +set_target_properties(llama PROPERTIES PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}") install(TARGETS llama LIBRARY PUBLIC_HEADER) configure_package_config_file( diff --git a/CMakePresets.json b/CMakePresets.json index 436448967c9af..13bdd7907ab40 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -31,6 +31,13 @@ { "name": "sycl_f16", "hidden": true, "cacheVariables": { "GGML_SYCL_F16": "ON" } }, { "name": "vulkan", "hidden": true, "cacheVariables": { "GGML_VULKAN": "ON" } }, + { + "name": "x64-windows-llvm", "hidden": true, + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/x64-windows-llvm.cmake" + } + }, + { "name": "arm64-windows-msvc", "hidden": true, "architecture": { "value": "arm64", "strategy": "external" }, @@ -70,6 +77,11 @@ { "name": "arm64-windows-msvc-release", "inherits": [ "base", "arm64-windows-msvc", "reldbg" ] }, { "name": "arm64-windows-msvc+static-release", "inherits": [ "base", "arm64-windows-msvc", "reldbg", "static" ] }, + { "name": "x64-windows-llvm-debug", "inherits": [ "base", "x64-windows-llvm", "debug" ] }, + { "name": "x64-windows-llvm-release", "inherits": [ "base", "x64-windows-llvm", "release" ] }, + { "name": "x64-windows-llvm-reldbg", "inherits": [ "base", "x64-windows-llvm", "reldbg" ] }, + { "name": "x64-windows-llvm+static-release", "inherits": [ "base", "x64-windows-llvm", "reldbg", "static" ] }, + { "name": "x64-windows-msvc-debug", "inherits": [ "base", "debug" ] }, { "name": "x64-windows-msvc-release", "inherits": [ "base", "reldbg" ] }, { "name": "x64-windows-msvc+static-release", "inherits": [ "base", "reldbg", "static" ] }, diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000000..adeba5395831a --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,5 @@ +# collaborators can optionally add themselves here to indicate their availability for reviewing related PRs + +/ci/ @ggerganov +/.devops/ @ngxson +/examples/server/ @ngxson diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4c882c254cac5..5a85ec5d2efb4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,10 @@ # Pull requests (for contributors) - Test your changes: - - Using the commands in the [`tests`](tests) folder. For instance, running the `./tests/test-backend-ops` command tests different backend implementations of the `ggml` library - Execute [the full CI locally on your machine](ci/README.md) before publishing -- Optionally rate the complexity of your PR (i.e. `Review Complexity : Low`, `Review Complexity : Medium`, `Review Complexity : High`). This makes it easier for maintainers to triage the PRs + - Verify that the perplexity and the performance are not affected negatively by your changes (use `llama-perplexity` and `llama-bench`) + - If you modified the `ggml` source, run the `test-backend-ops` tool to check whether different backend implementations of the `ggml` operators produce consistent results (this requires access to at least two different `ggml` backends) + - If you modified a `ggml` operator or added a new one, add the corresponding test cases to `test-backend-ops` - Consider allowing write access to your branch for faster reviews, as reviewers can push commits directly - If your PR becomes stale, don't hesitate to ping the maintainers in the comments @@ -12,6 +13,7 @@ - Squash-merge PRs - Use the following format for the squashed commit title: ` : (#)`. For example: `utils : fix typo in utils.py (#1234)` - Optionally pick a `` from here: https://github.com/ggerganov/llama.cpp/wiki/Modules +- Consider adding yourself to [CODEOWNERS](CODEOWNERS) # Coding guidelines diff --git a/Makefile b/Makefile index 95110d4eb7d8b..19ae0d5f1c87b 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,7 @@ +ifndef LLAMA_MAKEFILE +$(error The Makefile build is deprecated. Use the CMake build instead. For more details, see https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) +endif + # Define the default target now so that it is always the first target BUILD_TARGETS = \ libllava.a \ @@ -18,6 +22,7 @@ BUILD_TARGETS = \ llama-infill \ llama-llava-cli \ llama-minicpmv-cli\ + llama-qwen2vl-cli\ llama-lookahead \ llama-lookup \ llama-lookup-create \ @@ -34,6 +39,7 @@ BUILD_TARGETS = \ llama-server \ llama-simple \ llama-simple-chat \ + llama-run \ llama-speculative \ llama-tokenize \ llama-vdot \ @@ -250,11 +256,11 @@ endif # Compile flags # -# keep standard at C11 and C++11 -MK_CPPFLAGS = -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon +# keep standard at C11 and C++17 +MK_CPPFLAGS = -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -DGGML_USE_CPU MK_CFLAGS = -std=c11 -fPIC -MK_CXXFLAGS = -std=c++11 -fPIC -MK_NVCCFLAGS = -std=c++11 +MK_CXXFLAGS = -std=c++17 -fPIC +MK_NVCCFLAGS = -std=c++17 ifdef LLAMA_NO_CCACHE GGML_NO_CCACHE := 1 @@ -290,6 +296,7 @@ endif # some memory allocation are available on Linux through GNU extensions in libc ifeq ($(UNAME_S),Linux) MK_CPPFLAGS += -D_GNU_SOURCE + MK_LDFLAGS += -ldl endif # RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1, @@ -439,6 +446,10 @@ ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64)) MK_CFLAGS += -march=native -mtune=native HOST_CXXFLAGS += -march=native -mtune=native + # Usage AMX build test + #MK_CFLAGS += -march=graniterapids -mtune=graniterapids + #HOST_CXXFLAGS += -march=graniterapids -mtune=graniterapids + # Usage AVX-only #MK_CFLAGS += -mfma -mf16c -mavx #MK_CXXFLAGS += -mfma -mf16c -mavx @@ -573,9 +584,12 @@ endif ifndef GGML_NO_AMX MK_CPPFLAGS += -DGGML_USE_AMX - OBJ_GGML_EXT += ggml/src/ggml-amx/ggml-amx.o ggml/src/ggml-amx/mmq.o + OBJ_GGML_EXT += ggml/src/ggml-cpu/amx/amx.o ggml/src/ggml-cpu/amx/mmq.o endif +# only necessary for the CPU backend files +MK_CPPFLAGS += -Iggml/src/ggml-cpu + ifdef GGML_RPC MK_CPPFLAGS += -DGGML_USE_RPC OBJ_GGML_EXT += ggml/src/ggml-rpc.o @@ -730,10 +744,10 @@ GLSLC_CMD = glslc _ggml_vk_genshaders_cmd = $(shell pwd)/vulkan-shaders-gen _ggml_vk_header = ggml/src/ggml-vulkan-shaders.hpp _ggml_vk_source = ggml/src/ggml-vulkan-shaders.cpp -_ggml_vk_input_dir = ggml/src/vulkan-shaders +_ggml_vk_input_dir = ggml/src/ggml-vulkan/vulkan-shaders _ggml_vk_shader_deps = $(echo $(_ggml_vk_input_dir)/*.comp) -ggml/src/ggml-vulkan.o: ggml/src/ggml-vulkan.cpp ggml/include/ggml-vulkan.h $(_ggml_vk_header) $(_ggml_vk_source) +ggml/src/ggml-vulkan.o: ggml/src/ggml-vulkan/ggml-vulkan.cpp ggml/include/ggml-vulkan.h $(_ggml_vk_header) $(_ggml_vk_source) $(CXX) $(CXXFLAGS) $(shell pkg-config --cflags vulkan) -c $< -o $@ $(_ggml_vk_header): $(_ggml_vk_source) @@ -745,12 +759,12 @@ $(_ggml_vk_source): $(_ggml_vk_shader_deps) vulkan-shaders-gen --target-hpp $(_ggml_vk_header) \ --target-cpp $(_ggml_vk_source) -vulkan-shaders-gen: ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp - $(CXX) $(CXXFLAGS) -o $@ $(LDFLAGS) ggml/src/vulkan-shaders/vulkan-shaders-gen.cpp +vulkan-shaders-gen: ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp + $(CXX) $(CXXFLAGS) -o $@ $(LDFLAGS) ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp endif # GGML_VULKAN -ifdef GGML_HIPBLAS +ifdef GGML_HIP ifeq ($(wildcard /opt/rocm),) ROCM_PATH ?= /usr AMDGPU_TARGETS ?= $(shell $(shell which amdgpu-arch)) @@ -805,7 +819,7 @@ ggml/src/ggml-cuda/%.o: \ ggml/src/ggml-common.h \ ggml/src/ggml-cuda/common.cuh $(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $< -endif # GGML_HIPBLAS +endif # GGML_HIP ifdef GGML_MUSA ifeq ($(wildcard /opt/musa),) @@ -813,7 +827,7 @@ ifdef GGML_MUSA else MUSA_PATH ?= /opt/musa endif - MTGPU_TARGETS ?= mp_21 mp_22 + MUSA_ARCHITECTURES ?= 21;22 MK_CPPFLAGS += -DGGML_USE_MUSA -DGGML_USE_CUDA MK_LDFLAGS += -L$(MUSA_PATH)/lib -Wl,-rpath=$(MUSA_PATH)/lib @@ -832,7 +846,8 @@ ifdef GGML_MUSA CXX := $(MUSA_PATH)/bin/clang++ MCC := $(CCACHE) $(MUSA_PATH)/bin/mcc - MUSAFLAGS += $(addprefix --cuda-gpu-arch=, $(MTGPU_TARGETS)) + MUSAFLAGS = -x musa -mtgpu + MUSAFLAGS += $(foreach arch,$(subst ;, ,$(MUSA_ARCHITECTURES)),--cuda-gpu-arch=mp_$(arch)) ifdef GGML_CUDA_FORCE_MMQ MUSAFLAGS += -DGGML_CUDA_FORCE_MMQ @@ -876,14 +891,14 @@ ggml/src/ggml-cuda/ggml-cuda.o: \ ggml/src/ggml-backend-impl.h \ ggml/src/ggml-common.h \ $(wildcard ggml/src/ggml-cuda/*.cuh) - $(MCC) $(CXXFLAGS) $(MUSAFLAGS) -x musa -mtgpu -c -o $@ $< + $(MCC) $(CXXFLAGS) $(MUSAFLAGS) -c -o $@ $< ggml/src/ggml-cuda/%.o: \ ggml/src/ggml-cuda/%.cu \ ggml/include/ggml.h \ ggml/src/ggml-common.h \ ggml/src/ggml-cuda/common.cuh - $(MCC) $(CXXFLAGS) $(MUSAFLAGS) -x musa -mtgpu -c -o $@ $< + $(MCC) $(CXXFLAGS) $(MUSAFLAGS) -c -o $@ $< endif # GGML_MUSA ifdef GGML_METAL @@ -938,7 +953,6 @@ DIR_COMMON = common OBJ_GGML = \ $(DIR_GGML)/src/ggml.o \ - $(DIR_GGML)/src/ggml-aarch64.o \ $(DIR_GGML)/src/ggml-alloc.o \ $(DIR_GGML)/src/ggml-backend.o \ $(DIR_GGML)/src/ggml-backend-reg.o \ @@ -946,9 +960,11 @@ OBJ_GGML = \ $(DIR_GGML)/src/ggml-quants.o \ $(DIR_GGML)/src/ggml-threading.o \ $(DIR_GGML)/src/ggml-cpu/ggml-cpu.o \ - $(DIR_GGML)/src/ggml-cpu/ggml-cpu-cpp.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu_cpp.o \ $(DIR_GGML)/src/ggml-cpu/ggml-cpu-aarch64.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu-hbm.o \ $(DIR_GGML)/src/ggml-cpu/ggml-cpu-quants.o \ + $(DIR_GGML)/src/ggml-cpu/ggml-cpu-traits.o \ $(OBJ_GGML_EXT) OBJ_LLAMA = \ @@ -966,6 +982,7 @@ OBJ_COMMON = \ $(DIR_COMMON)/console.o \ $(DIR_COMMON)/ngram-cache.o \ $(DIR_COMMON)/sampling.o \ + $(DIR_COMMON)/speculative.o \ $(DIR_COMMON)/build-info.o \ $(DIR_COMMON)/json-schema-to-grammar.o @@ -1087,17 +1104,10 @@ DEP_FILES = $(OBJ_GGML:.o=.d) $(OBJ_LLAMA:.o=.d) $(OBJ_COMMON:.o=.d) # Default target all: $(BUILD_TARGETS) +# force c++ build for source file that have same name as c file # Note: need this exception because `ggml-cpu.c` and `ggml-cpu.cpp` both produce the same obj/dep files -# g++ -M -I ./ggml/include/ -I ./ggml/src ggml/src/ggml-cpu/ggml-cpu.cpp | grep ggml -$(DIR_GGML)/src/ggml-cpu/ggml-cpu-cpp.o: \ - ggml/src/ggml-cpu/ggml-cpu.cpp \ - ggml/include/ggml-backend.h \ - ggml/include/ggml.h \ - ggml/include/ggml-alloc.h \ - ggml/src/ggml-backend-impl.h \ - ggml/include/ggml-cpu.h \ - ggml/src/ggml-impl.h - $(CXX) $(CXXFLAGS) -c $< -o $@ +$(DIR_GGML)/%_cpp.o: $(DIR_GGML)/%.cpp + $(CXX) $(CXXFLAGS) -MMD -c $< -o $@ # Rules for building object files $(DIR_GGML)/%.o: $(DIR_GGML)/%.c @@ -1134,8 +1144,15 @@ $(LIB_COMMON_S): $(OBJ_COMMON) # Include dependency files -include $(DEP_FILES) +# Clean generated server assets +clean-server-assets: + find examples/server -type f -name "*.js.hpp" -delete + find examples/server -type f -name "*.mjs.hpp" -delete + find examples/server -type f -name "*.css.hpp" -delete + find examples/server -type f -name "*.html.hpp" -delete + # Clean rule -clean: +clean: clean-server-assets rm -vrf $(BUILD_TARGETS) $(TEST_TARGETS) rm -rvf *.a *.dll *.so *.dot find ggml src common tests examples pocs -type f -name "*.o" -delete @@ -1165,6 +1182,11 @@ llama-infill: examples/infill/infill.cpp \ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +llama-run: examples/run/run.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + llama-simple: examples/simple/simple.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) @@ -1338,20 +1360,14 @@ llama-server: \ examples/server/utils.hpp \ examples/server/httplib.h \ examples/server/index.html.hpp \ - examples/server/completion.js.hpp \ examples/server/loading.html.hpp \ - examples/server/deps_daisyui.min.css.hpp \ - examples/server/deps_markdown-it.js.hpp \ - examples/server/deps_tailwindcss.js.hpp \ - examples/server/deps_vue.esm-browser.js.hpp \ common/json.hpp \ - common/stb_image.h \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h %.hpp $<,$^) -Iexamples/server $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(LWINSOCK2) # Portable equivalent of `cd examples/server/public && xxd -i $(notdir $<) ../$(notdir $<).hpp`: -examples/server/%.hpp: examples/server/public/% Makefile +examples/server/%.hpp: examples/server/public/% FORCE Makefile @( export NAME=$(subst .,_,$(subst -,_,$(notdir $<))) && \ echo "unsigned char $${NAME}[] = {" && \ cat $< | od -v -t x1 -An | sed -E 's/([0-9a-fA-F]+)/0x\1, /g' && \ @@ -1389,6 +1405,14 @@ llama-minicpmv-cli: examples/llava/minicpmv-cli.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual +llama-qwen2vl-cli: examples/llava/qwen2vl-cli.cpp \ + examples/llava/llava.cpp \ + examples/llava/llava.h \ + examples/llava/clip.cpp \ + examples/llava/clip.h \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) $< $(filter-out %.h $<,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + ifeq ($(UNAME_S),Darwin) swift: examples/batched.swift (cd examples/batched.swift; make build) @@ -1526,7 +1550,7 @@ llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ # Deprecated binaries that we want to keep around long enough for people to migrate to the new filenames, then these can be removed. # # Mark legacy binary targets as .PHONY so that they are always checked. -.PHONY: main quantize perplexity embedding server +.PHONY: FORCE main quantize perplexity embedding server # Define the object file target examples/deprecation-warning/deprecation-warning.o: examples/deprecation-warning/deprecation-warning.cpp diff --git a/Package.swift b/Package.swift index 6b68aecdebec1..01c996d242037 100644 --- a/Package.swift +++ b/Package.swift @@ -2,56 +2,6 @@ import PackageDescription -var sources = [ - "src/llama.cpp", - "src/llama-vocab.cpp", - "src/llama-grammar.cpp", - "src/llama-sampling.cpp", - "src/unicode.cpp", - "src/unicode-data.cpp", - "ggml/src/ggml.c", - "ggml/src/ggml-aarch64.c", - "ggml/src/ggml-alloc.c", - "ggml/src/ggml-backend.cpp", - "ggml/src/ggml-backend-reg.cpp", - "ggml/src/ggml-cpu/ggml-cpu.c", - "ggml/src/ggml-cpu/ggml-cpu.cpp", - "ggml/src/ggml-cpu/ggml-cpu-aarch64.c", - "ggml/src/ggml-cpu/ggml-cpu-quants.c", - "ggml/src/ggml-threading.cpp", - "ggml/src/ggml-quants.c", -] - -var resources: [Resource] = [] -var linkerSettings: [LinkerSetting] = [] -var cSettings: [CSetting] = [ - .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]), - .unsafeFlags(["-fno-objc-arc"]), - .headerSearchPath("ggml/src"), - // NOTE: NEW_LAPACK will required iOS version 16.4+ - // We should consider add this in the future when we drop support for iOS 14 - // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc) - // .define("ACCELERATE_NEW_LAPACK"), - // .define("ACCELERATE_LAPACK_ILP64") -] - -#if canImport(Darwin) -sources.append("ggml/src/ggml-common.h") -sources.append("ggml/src/ggml-metal/ggml-metal.m") -resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal")) -linkerSettings.append(.linkedFramework("Accelerate")) -cSettings.append( - contentsOf: [ - .define("GGML_USE_ACCELERATE"), - .define("GGML_USE_METAL") - ] -) -#endif - -#if os(Linux) - cSettings.append(.define("_GNU_SOURCE")) -#endif - let package = Package( name: "llama", platforms: [ @@ -64,26 +14,6 @@ let package = Package( .library(name: "llama", targets: ["llama"]), ], targets: [ - .target( - name: "llama", - path: ".", - exclude: [ - "build", - "cmake", - "examples", - "scripts", - "models", - "tests", - "CMakeLists.txt", - "Makefile", - "ggml/src/ggml-metal-embed.metal" - ], - sources: sources, - resources: resources, - publicHeadersPath: "spm-headers", - cSettings: cSettings, - linkerSettings: linkerSettings - ) - ], - cxxLanguageStandard: .cxx11 + .systemLibrary(name: "llama", pkgConfig: "llama"), + ] ) diff --git a/README.md b/README.md index aea0e7ab01393..789f9a3293cce 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,6 @@ During sync the commits, I will test and update the commits impact SYCL backend. [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml) -[![Conan Center](https://shields.io/conan/v/llama-cpp)](https://conan.io/center/llama-cpp) [Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml) @@ -60,7 +59,7 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) ## Description The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide -variety of hardware - locally and in the cloud. +range of hardware - locally and in the cloud. - Plain C/C++ implementation without any dependencies - Apple silicon is a first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks @@ -70,14 +69,17 @@ variety of hardware - locally and in the cloud. - Vulkan and SYCL backend support - CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity -Since its [inception](https://github.com/ggerganov/llama.cpp/issues/33#issuecomment-1465108022), the project has -improved significantly thanks to many contributions. It is the main playground for developing new features for the -[ggml](https://github.com/ggerganov/ggml) library. +The `llama.cpp` project is the main playground for developing new features for the [ggml](https://github.com/ggerganov/ggml) library. -**Supported models:** +
+Models Typically finetunes of the base models below are supported as well. +Instructions for adding support for new models: [HOWTO-add-model.md](docs/development/HOWTO-add-model.md) + +#### Text-only + - [X] LLaMA šŸ¦™ - [x] LLaMA 2 šŸ¦™šŸ¦™ - [x] LLaMA 3 šŸ¦™šŸ¦™šŸ¦™ @@ -113,6 +115,7 @@ Typically finetunes of the base models below are supported as well. - [x] [SEA-LION](https://huggingface.co/models?search=sea-lion) - [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B) - [x] [OLMo](https://allenai.org/olmo) +- [x] [OLMo 2](https://allenai.org/olmo) - [x] [OLMoE](https://huggingface.co/allenai/OLMoE-1B-7B-0924) - [x] [Granite models](https://huggingface.co/collections/ibm-granite/granite-code-models-6624c5cec322e4c148c8b330) - [x] [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) + [Pythia](https://github.com/EleutherAI/pythia) @@ -129,10 +132,9 @@ Typically finetunes of the base models below are supported as well. - [x] [Jais](https://huggingface.co/inceptionai/jais-13b-chat) - [x] [Bielik-11B-v2.3](https://huggingface.co/collections/speakleash/bielik-11b-v23-66ee813238d9b526a072408a) - [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM) +- [x] [GigaChat-20B-A3B](https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct) -(instructions for supporting more models: [HOWTO-add-model.md](./docs/development/HOWTO-add-model.md)) - -**Multimodal models:** +#### Multimodal - [x] [LLaVA 1.5 models](https://huggingface.co/collections/liuhaotian/llava-15-653aac15d994e992e2677a7e), [LLaVA 1.6 models](https://huggingface.co/collections/liuhaotian/llava-16-65b9e40155f60fd046a5ccf2) - [x] [BakLLaVA](https://huggingface.co/models?search=SkunkworksAI/Bakllava) @@ -143,8 +145,12 @@ Typically finetunes of the base models below are supported as well. - [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM) - [x] [Moondream](https://huggingface.co/vikhyatk/moondream2) - [x] [Bunny](https://github.com/BAAI-DCAI/Bunny) +- [x] [Qwen2-VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d) -**Bindings:** +
+ +
+Bindings - Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python) - Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) @@ -171,316 +177,328 @@ Typically finetunes of the base models below are supported as well. - Swift [srgtuszy/llama-cpp-swift](https://github.com/srgtuszy/llama-cpp-swift) - Swift [ShenghaiWang/SwiftLlama](https://github.com/ShenghaiWang/SwiftLlama) -**UI:** +
-Unless otherwise noted these projects are open-source with permissive licensing: +
+UIs -- [MindWorkAI/AI-Studio](https://github.com/MindWorkAI/AI-Studio) (FSL-1.1-MIT) -- [iohub/collama](https://github.com/iohub/coLLaMA) +*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)* + +- [AI Sublime Text plugin](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (MIT) +- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT) +- [Dot](https://github.com/alexpinel/Dot) (GPL) +- [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT) +- [iohub/collama](https://github.com/iohub/coLLaMA) (Apache-2.0) - [janhq/jan](https://github.com/janhq/jan) (AGPL) -- [nat/openplayground](https://github.com/nat/openplayground) -- [Faraday](https://faraday.dev/) (proprietary) +- [KanTV](https://github.com/zhouwg/kantv?tab=readme-ov-file) (Apache-2.0) +- [KodiBot](https://github.com/firatkiral/kodibot) (GPL) +- [llama.vim](https://github.com/ggml-org/llama.vim) (MIT) +- [LARS](https://github.com/abgulati/LARS) (AGPL) +- [Llama Assistant](https://github.com/vietanhdev/llama-assistant) (GPL) +- [LLMFarm](https://github.com/guinmoon/LLMFarm?tab=readme-ov-file) (MIT) +- [LLMUnity](https://github.com/undreamai/LLMUnity) (MIT) - [LMStudio](https://lmstudio.ai/) (proprietary) -- [Layla](https://play.google.com/store/apps/details?id=com.laylalite) (proprietary) -- [ramalama](https://github.com/containers/ramalama) (MIT) - [LocalAI](https://github.com/mudler/LocalAI) (MIT) - [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL) -- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile) -- [nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) -- [ollama/ollama](https://github.com/ollama/ollama) +- [MindMac](https://mindmac.app) (proprietary) +- [MindWorkAI/AI-Studio](https://github.com/MindWorkAI/AI-Studio) (FSL-1.1-MIT) +- [Mobile-Artificial-Intelligence/maid](https://github.com/Mobile-Artificial-Intelligence/maid) (MIT) +- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile) (Apache-2.0) +- [nat/openplayground](https://github.com/nat/openplayground) (MIT) +- [nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) (MIT) +- [ollama/ollama](https://github.com/ollama/ollama) (MIT) - [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) (AGPL) -- [psugihara/FreeChat](https://github.com/psugihara/FreeChat) -- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT) -- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal) +- [PocketPal AI](https://github.com/a-ghorbani/pocketpal-ai) (MIT) +- [psugihara/FreeChat](https://github.com/psugihara/FreeChat) (MIT) +- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal) (MIT) - [pythops/tenere](https://github.com/pythops/tenere) (AGPL) -- [RAGNA Desktop](https://ragna.app/) (proprietary) -- [RecurseChat](https://recurse.chat/) (proprietary) -- [semperai/amica](https://github.com/semperai/amica) -- [withcatai/catai](https://github.com/withcatai/catai) -- [Mobile-Artificial-Intelligence/maid](https://github.com/Mobile-Artificial-Intelligence/maid) (MIT) -- [Msty](https://msty.app) (proprietary) -- [LLMFarm](https://github.com/guinmoon/LLMFarm?tab=readme-ov-file) (MIT) -- [KanTV](https://github.com/zhouwg/kantv?tab=readme-ov-file)(Apachev2.0 or later) -- [Dot](https://github.com/alexpinel/Dot) (GPL) -- [MindMac](https://mindmac.app) (proprietary) -- [KodiBot](https://github.com/firatkiral/kodibot) (GPL) -- [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT) -- [AI Sublime Text plugin](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (MIT) -- [AIKit](https://github.com/sozercan/aikit) (MIT) -- [LARS - The LLM & Advanced Referencing Solution](https://github.com/abgulati/LARS) (AGPL) -- [LLMUnity](https://github.com/undreamai/LLMUnity) (MIT) -- [Llama Assistant](https://github.com/vietanhdev/llama-assistant) (GPL) -- [PocketPal AI - An iOS and Android App](https://github.com/a-ghorbani/pocketpal-ai) (MIT) +- [ramalama](https://github.com/containers/ramalama) (MIT) +- [semperai/amica](https://github.com/semperai/amica) (MIT) +- [withcatai/catai](https://github.com/withcatai/catai) (MIT) -*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)* +
-**Tools:** +
+Tools - [akx/ggify](https://github.com/akx/ggify) – download PyTorch models from HuggingFace Hub and convert them to GGML - [akx/ollama-dl](https://github.com/akx/ollama-dl) – download models from the Ollama library to be used directly with llama.cpp - [crashr/gppm](https://github.com/crashr/gppm) – launch llama.cpp instances utilizing NVIDIA Tesla P40 or P100 GPUs with reduced idle power consumption - [gpustack/gguf-parser](https://github.com/gpustack/gguf-parser-go/tree/main/cmd/gguf-parser) - review/check the GGUF file and estimate the memory usage -- [Styled Lines](https://marketplace.unity.com/packages/tools/generative-ai/styled-lines-llama-cpp-model-292902) (proprietary licensed, async wrapper of inference part for game development in Unity3d with prebuild Mobile and Web platform wrappers and a model example) +- [Styled Lines](https://marketplace.unity.com/packages/tools/generative-ai/styled-lines-llama-cpp-model-292902) (proprietary licensed, async wrapper of inference part for game development in Unity3d with pre-built Mobile and Web platform wrappers and a model example) -**Infrastructure:** +
+ +
+Infrastructure - [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp - [GPUStack](https://github.com/gpustack/gpustack) - Manage GPU clusters for running LLMs - [llama_cpp_canister](https://github.com/onicai/llama_cpp_canister) - llama.cpp as a smart contract on the Internet Computer, using WebAssembly -**Games:** -- [Lucy's Labyrinth](https://github.com/MorganRO8/Lucys_Labyrinth) - A simple maze game where agents controlled by an AI model will try to trick you. - -## Demo +
-Typical run using LLaMA v2 13B on M2 Ultra - -``` -$ make -j && ./llama-cli -m models/llama-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -I llama.cpp build info: -I UNAME_S: Darwin -I UNAME_P: arm -I UNAME_M: arm64 -I CFLAGS: -I. -O3 -std=c11 -fPIC -DNDEBUG -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes -pthread -DGGML_USE_K_QUANTS -DGGML_USE_ACCELERATE -I CXXFLAGS: -I. -I./common -O3 -std=c++11 -fPIC -DNDEBUG -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar -pthread -DGGML_USE_K_QUANTS -I LDFLAGS: -framework Accelerate -I CC: Apple clang version 14.0.3 (clang-1403.0.22.14.1) -I CXX: Apple clang version 14.0.3 (clang-1403.0.22.14.1) - -make: Nothing to be done for `default'. -main: build = 1041 (cf658ad) -main: seed = 1692823051 -llama_model_loader: loaded meta data with 16 key-value pairs and 363 tensors from models/llama-13b-v2/ggml-model-q4_0.gguf (version GGUF V1 (latest)) -llama_model_loader: - type f32: 81 tensors -llama_model_loader: - type q4_0: 281 tensors -llama_model_loader: - type q6_K: 1 tensors -llm_load_print_meta: format = GGUF V1 (latest) -llm_load_print_meta: arch = llama -llm_load_print_meta: vocab type = SPM -llm_load_print_meta: n_vocab = 32000 -llm_load_print_meta: n_merges = 0 -llm_load_print_meta: n_ctx_train = 4096 -llm_load_print_meta: n_ctx = 512 -llm_load_print_meta: n_embd = 5120 -llm_load_print_meta: n_head = 40 -llm_load_print_meta: n_head_kv = 40 -llm_load_print_meta: n_layer = 40 -llm_load_print_meta: n_rot = 128 -llm_load_print_meta: n_gqa = 1 -llm_load_print_meta: f_norm_eps = 1.0e-05 -llm_load_print_meta: f_norm_rms_eps = 1.0e-05 -llm_load_print_meta: n_ff = 13824 -llm_load_print_meta: freq_base = 10000.0 -llm_load_print_meta: freq_scale = 1 -llm_load_print_meta: model type = 13B -llm_load_print_meta: model ftype = mostly Q4_0 -llm_load_print_meta: model size = 13.02 B -llm_load_print_meta: general.name = LLaMA v2 -llm_load_print_meta: BOS token = 1 '' -llm_load_print_meta: EOS token = 2 '' -llm_load_print_meta: UNK token = 0 '' -llm_load_print_meta: LF token = 13 '<0x0A>' -llm_load_tensors: ggml ctx size = 0.11 MB -llm_load_tensors: mem required = 7024.01 MB (+ 400.00 MB per state) -................................................................................................... -llama_new_context_with_model: kv self size = 400.00 MB -llama_new_context_with_model: compute buffer total size = 75.41 MB - -system_info: n_threads = 16 / 24 | AVX = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 0 | NEON = 1 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | VSX = 0 | -sampling: repeat_last_n = 64, repeat_penalty = 1.100000, presence_penalty = 0.000000, frequency_penalty = 0.000000, top_k = 40, tfs_z = 1.000000, top_p = 0.950000, typical_p = 1.000000, temp = 0.800000, mirostat = 0, mirostat_lr = 0.100000, mirostat_ent = 5.000000 -generate: n_ctx = 512, n_batch = 512, n_predict = 400, n_keep = 0 - - - Building a website can be done in 10 simple steps: -Step 1: Find the right website platform. -Step 2: Choose your domain name and hosting plan. -Step 3: Design your website layout. -Step 4: Write your website content and add images. -Step 5: Install security features to protect your site from hackers or spammers -Step 6: Test your website on multiple browsers, mobile devices, operating systems etc… -Step 7: Test it again with people who are not related to you personally – friends or family members will work just fine! -Step 8: Start marketing and promoting the website via social media channels or paid ads -Step 9: Analyze how many visitors have come to your site so far, what type of people visit more often than others (e.g., men vs women) etc… -Step 10: Continue to improve upon all aspects mentioned above by following trends in web design and staying up-to-date on new technologies that can enhance user experience even further! -How does a Website Work? -A website works by having pages, which are made of HTML code. This code tells your computer how to display the content on each page you visit – whether it’s an image or text file (like PDFs). In order for someone else’s browser not only be able but also want those same results when accessing any given URL; some additional steps need taken by way of programming scripts that will add functionality such as making links clickable! -The most common type is called static HTML pages because they remain unchanged over time unless modified manually (either through editing files directly or using an interface such as WordPress). They are usually served up via HTTP protocols – this means anyone can access them without having any special privileges like being part of a group who is allowed into restricted areas online; however, there may still exist some limitations depending upon where one lives geographically speaking. -How to -llama_print_timings: load time = 576.45 ms -llama_print_timings: sample time = 283.10 ms / 400 runs ( 0.71 ms per token, 1412.91 tokens per second) -llama_print_timings: prompt eval time = 599.83 ms / 19 tokens ( 31.57 ms per token, 31.68 tokens per second) -llama_print_timings: eval time = 24513.59 ms / 399 runs ( 61.44 ms per token, 16.28 tokens per second) -llama_print_timings: total time = 25431.49 ms -``` +Games + +- [Lucy's Labyrinth](https://github.com/MorganRO8/Lucys_Labyrinth) - A simple maze game where agents controlled by an AI model will try to trick you.
-
-Demo of running both LLaMA-7B and whisper.cpp on a single M1 Pro MacBook +## Supported backends -And here is another demo of running both LLaMA-7B and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) on a single M1 Pro MacBook: +| Backend | Target devices | +| --- | --- | +| [Metal](docs/build.md#metal-build) | Apple Silicon | +| [BLAS](docs/build.md#blas-build) | All | +| [BLIS](docs/backend/BLIS.md) | All | +| [SYCL](docs/backend/SYCL.md) | Intel and Nvidia GPU | +| [MUSA](docs/build.md#musa) | Moore Threads MTT GPU | +| [CUDA](docs/build.md#cuda) | Nvidia GPU | +| [HIP](docs/build.md#hip) | AMD GPU | +| [Vulkan](docs/build.md#vulkan) | GPU | +| [CANN](docs/build.md#cann) | Ascend NPU | -https://user-images.githubusercontent.com/1991296/224442907-7693d4be-acaa-4e01-8b4f-add84093ffff.mp4 +## Building the project -
+The main product of this project is the `llama` library. Its C-style interface can be found in [include/llama.h](include/llama.h). +The project also includes many example programs and tools using the `llama` library. The examples range from simple, minimal code snippets to sophisticated sub-projects such as an OpenAI-compatible HTTP server. Possible methods for obtaining the binaries: -## Usage +- Clone this repository and build locally, see [how to build](docs/build.md) +- On MacOS or Linux, install `llama.cpp` via [brew, flox or nix](docs/install.md) +- Use a Docker image, see [documentation for Docker](docs/docker.md) +- Download pre-built binaries from [releases](https://github.com/ggerganov/llama.cpp/releases) -Here are the end-to-end binary build and model conversion steps for most supported models. +## Obtaining and quantizing models -### Basic usage +The [Hugging Face](https://huggingface.co) platform hosts a [number of LLMs](https://huggingface.co/models?library=gguf&sort=trending) compatible with `llama.cpp`: -Firstly, you need to get the binary. There are different methods that you can follow: -- Method 1: Clone this repository and build locally, see [how to build](./docs/build.md) -- Method 2: If you are using MacOS or Linux, you can install llama.cpp via [brew, flox or nix](./docs/install.md) -- Method 3: Use a Docker image, see [documentation for Docker](./docs/docker.md) -- Method 4: Download pre-built binary from [releases](https://github.com/ggerganov/llama.cpp/releases) +- [Trending](https://huggingface.co/models?library=gguf&sort=trending) +- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf) -You can run a basic completion using this command: +After downloading a model, use the CLI tools to run it locally - see below. -```bash -llama-cli -m your_model.gguf -p "I believe the meaning of life is" -n 128 +`llama.cpp` requires the model to be stored in the [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) file format. Models in other data formats can be converted to GGUF using the `convert_*.py` Python scripts in this repo. -# Output: -# I believe the meaning of life is to find your own truth and to live in accordance with it. For me, this means being true to myself and following my passions, even if they don't align with societal expectations. I think that's what I love about yoga – it's not just a physical practice, but a spiritual one too. It's about connecting with yourself, listening to your inner voice, and honoring your own unique journey. -``` +The Hugging Face platform provides a variety of online tools for converting, quantizing and hosting models with `llama.cpp`: -See [this page](./examples/main/README.md) for a full list of parameters. +- Use the [GGUF-my-repo space](https://huggingface.co/spaces/ggml-org/gguf-my-repo) to convert to GGUF format and quantize model weights to smaller sizes +- Use the [GGUF-my-LoRA space](https://huggingface.co/spaces/ggml-org/gguf-my-lora) to convert LoRA adapters to GGUF format (more info: https://github.com/ggerganov/llama.cpp/discussions/10123) +- Use the [GGUF-editor space](https://huggingface.co/spaces/CISCai/gguf-editor) to edit GGUF meta data in the browser (more info: https://github.com/ggerganov/llama.cpp/discussions/9268) +- Use the [Inference Endpoints](https://ui.endpoints.huggingface.co/) to directly host `llama.cpp` in the cloud (more info: https://github.com/ggerganov/llama.cpp/discussions/9669) -### Conversation mode +To learn more about model quantization, [read this documentation](examples/quantize/README.md) -If you want a more ChatGPT-like experience, you can run in conversation mode by passing `-cnv` as a parameter: +## [`llama-cli`](examples/main) -```bash -llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv +#### A CLI tool for accessing and experimenting with most of `llama.cpp`'s functionality. -# Output: -# > hi, who are you? -# Hi there! I'm your helpful assistant! I'm an AI-powered chatbot designed to assist and provide information to users like you. I'm here to help answer your questions, provide guidance, and offer support on a wide range of topics. I'm a friendly and knowledgeable AI, and I'm always happy to help with anything you need. What's on your mind, and how can I assist you today? -# -# > what is 1+1? -# Easy peasy! The answer to 1+1 is... 2! -``` +-
+ Run simple text completion -By default, the chat template will be taken from the input model. If you want to use another chat template, pass `--chat-template NAME` as a parameter. See the list of [supported templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) + ```bash + llama-cli -m model.gguf -p "I believe the meaning of life is" -n 128 -```bash -./llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --chat-template chatml -``` + # I believe the meaning of life is to find your own truth and to live in accordance with it. For me, this means being true to myself and following my passions, even if they don't align with societal expectations. I think that's what I love about yoga – it's not just a physical practice, but a spiritual one too. It's about connecting with yourself, listening to your inner voice, and honoring your own unique journey. + ``` -You can also use your own template via in-prefix, in-suffix and reverse-prompt parameters: +
-```bash -./llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --in-prefix 'User: ' --reverse-prompt 'User:' -``` +-
+ Run in conversation mode -### Web server + ```bash + llama-cli -m model.gguf -p "You are a helpful assistant" -cnv -[llama.cpp web server](./examples/server/README.md) is a lightweight [OpenAI API](https://github.com/openai/openai-openapi) compatible HTTP server that can be used to serve local models and easily connect them to existing clients. + # > hi, who are you? + # Hi there! I'm your helpful assistant! I'm an AI-powered chatbot designed to assist and provide information to users like you. I'm here to help answer your questions, provide guidance, and offer support on a wide range of topics. I'm a friendly and knowledgeable AI, and I'm always happy to help with anything you need. What's on your mind, and how can I assist you today? + # + # > what is 1+1? + # Easy peasy! The answer to 1+1 is... 2! + ``` -Example usage: +
-```bash -./llama-server -m your_model.gguf --port 8080 +-
+ Run with custom chat template -# Basic web UI can be accessed via browser: http://localhost:8080 -# Chat completion endpoint: http://localhost:8080/v1/chat/completions -``` + ```bash + # use the "chatml" template + llama-cli -m model.gguf -p "You are a helpful assistant" -cnv --chat-template chatml -### Interactive mode + # use a custom template + llama-cli -m model.gguf -p "You are a helpful assistant" -cnv --in-prefix 'User: ' --reverse-prompt 'User:' + ``` -> [!NOTE] -> If you prefer basic usage, please consider using conversation mode instead of interactive mode + [Supported templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) -In this mode, you can always interrupt generation by pressing Ctrl+C and entering one or more lines of text, which will be converted into tokens and appended to the current context. You can also specify a *reverse prompt* with the parameter `-r "reverse prompt string"`. This will result in user input being prompted whenever the exact tokens of the reverse prompt string are encountered in the generation. A typical use is to use a prompt that makes LLaMA emulate a chat between multiple users, say Alice and Bob, and pass `-r "Alice:"`. +
-Here is an example of a few-shot interaction, invoked with the command +-
+ Constrain the output with a custom grammar -```bash -# default arguments using a 7B model -./examples/chat.sh + ```bash + llama-cli -m model.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:' -# advanced chat with a 13B model -./examples/chat-13B.sh + # {"appointmentTime": "8pm", "appointmentDetails": "schedule a a call"} + ``` -# custom arguments using a 13B model -./llama-cli -m ./models/13B/ggml-model-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt -``` + The [grammars/](grammars/) folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](grammars/README.md). -Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `llama-cli` example program. + For authoring more complex JSON grammars, check out https://grammar.intrinsiclabs.ai/ -![image](https://user-images.githubusercontent.com/1991296/224575029-2af3c7dc-5a65-4f64-a6bb-517a532aea38.png) +
-### Persistent Interaction -The prompt, user inputs, and model generations can be saved and resumed across calls to `./llama-cli` by leveraging `--prompt-cache` and `--prompt-cache-all`. The `./examples/chat-persistent.sh` script demonstrates this with support for long-running, resumable chat sessions. To use this example, you must provide a file to cache the initial chat prompt and a directory to save the chat session, and may optionally provide the same variables as `chat-13B.sh`. The same prompt cache can be reused for new chat sessions. Note that both prompt cache and chat directory are tied to the initial prompt (`PROMPT_TEMPLATE`) and the model file. +## [`llama-server`](examples/server) -```bash -# Start a new chat -PROMPT_CACHE_FILE=chat.prompt.bin CHAT_SAVE_DIR=./chat/default ./examples/chat-persistent.sh +#### A lightweight, [OpenAI API](https://github.com/openai/openai-openapi) compatible, HTTP server for serving LLMs. -# Resume that chat -PROMPT_CACHE_FILE=chat.prompt.bin CHAT_SAVE_DIR=./chat/default ./examples/chat-persistent.sh +-
+ Start a local HTTP server with default configuration on port 8080 -# Start a different chat with the same prompt/model -PROMPT_CACHE_FILE=chat.prompt.bin CHAT_SAVE_DIR=./chat/another ./examples/chat-persistent.sh + ```bash + llama-server -m model.gguf --port 8080 -# Different prompt cache for different prompt/model -PROMPT_TEMPLATE=./prompts/chat-with-bob.txt PROMPT_CACHE_FILE=bob.prompt.bin \ - CHAT_SAVE_DIR=./chat/bob ./examples/chat-persistent.sh -``` + # Basic web UI can be accessed via browser: http://localhost:8080 + # Chat completion endpoint: http://localhost:8080/v1/chat/completions + ``` -### Constrained output with grammars +
-`llama.cpp` supports grammars to constrain model output. For example, you can force the model to output JSON only: +-
+ Support multiple-users and parallel decoding -```bash -./llama-cli -m ./models/13B/ggml-model-q4_0.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:' -``` + ```bash + # up to 4 concurrent requests, each with 4096 max context + llama-server -m model.gguf -c 16384 -np 4 + ``` -The `grammars/` folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](./grammars/README.md). +
-For authoring more complex JSON grammars, you can also check out https://grammar.intrinsiclabs.ai/, a browser app that lets you write TypeScript interfaces which it compiles to GBNF grammars that you can save for local use. Note that the app is built and maintained by members of the community, please file any issues or FRs on [its repo](http://github.com/intrinsiclabsai/gbnfgen) and not this one. +-
+ Enable speculative decoding -## Build + ```bash + # the draft.gguf model should be a small variant of the target model.gguf + llama-server -m model.gguf -md draft.gguf + ``` -Please refer to [Build llama.cpp locally](./docs/build.md) +
-## Supported backends +-
+ Serve an embedding model -| Backend | Target devices | -| --- | --- | -| [Metal](./docs/build.md#metal-build) | Apple Silicon | -| [BLAS](./docs/build.md#blas-build) | All | -| [BLIS](./docs/backend/BLIS.md) | All | -| [SYCL](./docs/backend/SYCL.md) | Intel and Nvidia GPU | -| [MUSA](./docs/build.md#musa) | Moore Threads MTT GPU | -| [CUDA](./docs/build.md#cuda) | Nvidia GPU | -| [hipBLAS](./docs/build.md#hipblas) | AMD GPU | -| [Vulkan](./docs/build.md#vulkan) | GPU | -| [CANN](./docs/build.md#cann) | Ascend NPU | + ```bash + # use the /embedding endpoint + llama-server -m model.gguf --embedding --pooling cls -ub 8192 + ``` + +
+ +-
+ Serve a reranking model + + ```bash + # use the /reranking endpoint + llama-server -m model.gguf --reranking + ``` + +
+ +-
+ Constrain all outputs with a grammar + + ```bash + # custom grammar + llama-server -m model.gguf --grammar-file grammar.gbnf + + # JSON + llama-server -m model.gguf --grammar-file grammars/json.gbnf + ``` + +
+ + +## [`llama-perplexity`](examples/perplexity) -## Tools +#### A tool for measuring the perplexity [^1][^2] (and other quality metrics) of a model over a given text. -### Prepare and Quantize +-
+ Measure the perplexity over a text file -> [!NOTE] -> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `llama.cpp` main every 6 hours. + ```bash + llama-perplexity -m model.gguf -f file.txt -To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face. + # [1]15.2701,[2]5.4007,[3]5.3073,[4]6.2965,[5]5.8940,[6]5.6096,[7]5.7942,[8]4.9297, ... + # Final estimate: PPL = 5.4007 +/- 0.67339 + ``` -Note: `convert.py` has been moved to `examples/convert_legacy_llama.py` and shouldn't be used for anything other than `Llama/Llama2/Mistral` models and their derivatives. -It does not support LLaMA 3, you can use `convert_hf_to_gguf.py` with LLaMA 3 downloaded from Hugging Face. +
-To learn more about quantizing model, [read this documentation](./examples/quantize/README.md) +-
+ Measure KL divergence -### Perplexity (measuring model quality) + ```bash + # TODO + ``` -You can use the `perplexity` example to measure perplexity over a given prompt (lower perplexity is better). -For more information, see [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity). +
+ +[^1]: [examples/perplexity/README.md](examples/perplexity/README.md) +[^2]: [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity) + +## [`llama-bench`](examples/llama-bench) + +#### Benchmark the performance of the inference for various parameters. + +-
+ Run default benchmark + + ```bash + llama-bench -m model.gguf + + # Output: + # | model | size | params | backend | threads | test | t/s | + # | ------------------- | ---------: | ---------: | ---------- | ------: | ------------: | -------------------: | + # | qwen2 1.5B Q4_0 | 885.97 MiB | 1.54 B | Metal,BLAS | 16 | pp512 | 5765.41 ± 20.55 | + # | qwen2 1.5B Q4_0 | 885.97 MiB | 1.54 B | Metal,BLAS | 16 | tg128 | 197.71 ± 0.81 | + # + # build: 3e0ba0e60 (4229) + ``` + +
+ +## [`llama-run`](examples/run) + +#### A comprehensive example for running `llama.cpp` models. Useful for inferencing. Used with RamaLama [^3]. + +-
+ Run a model with a specific prompt (by default it's pulled from Ollama registry) + + ```bash + llama-run granite-code + ``` + +
+ +[^3]: [RamaLama](https://github.com/containers/ramalama) + +## [`llama-simple`](examples/simple) + +#### A minimal example for implementing apps with `llama.cpp`. Useful for developers. + +-
+ Basic text completion + + ```bash + llama-simple -m model.gguf + + # Hello my name is Kaitlyn and I am a 16 year old girl. I am a junior in high school and I am currently taking a class called "The Art of + ``` + +
-To learn more how to measure perplexity using llama.cpp, [read this documentation](./examples/perplexity/README.md) ## Contributing @@ -495,20 +513,19 @@ To learn more how to measure perplexity using llama.cpp, [read this documentatio ## Other documentation -- [main (cli)](./examples/main/README.md) -- [server](./examples/server/README.md) -- [jeopardy](./examples/jeopardy/README.md) -- [GBNF grammars](./grammars/README.md) +- [main (cli)](examples/main/README.md) +- [server](examples/server/README.md) +- [GBNF grammars](grammars/README.md) -**Development documentation** +#### Development documentation -- [How to build](./docs/build.md) -- [Running on Docker](./docs/docker.md) -- [Build on Android](./docs/android.md) -- [Performance troubleshooting](./docs/development/token_generation_performance_tips.md) +- [How to build](docs/build.md) +- [Running on Docker](docs/docker.md) +- [Build on Android](docs/android.md) +- [Performance troubleshooting](docs/development/token_generation_performance_tips.md) - [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks) -**Seminal papers and background on the models** +#### Seminal papers and background on the models If your issue is with model generation quality, then please at least scan the following links and papers to understand the limitations of LLaMA models. This is especially important when choosing an appropriate model size and appreciating both the significant and subtle differences between LLaMA models and ChatGPT: - LLaMA: @@ -519,3 +536,6 @@ If your issue is with model generation quality, then please at least scan the fo - GPT-3.5 / InstructGPT / ChatGPT: - [Aligning language models to follow instructions](https://openai.com/research/instruction-following) - [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155) + +#### References + diff --git a/Sources/llama/llama.h b/Sources/llama/llama.h new file mode 100644 index 0000000000000..41725880ed8c0 --- /dev/null +++ b/Sources/llama/llama.h @@ -0,0 +1,4 @@ +#pragma once + +#include + diff --git a/Sources/llama/module.modulemap b/Sources/llama/module.modulemap new file mode 100644 index 0000000000000..d010555b1cb65 --- /dev/null +++ b/Sources/llama/module.modulemap @@ -0,0 +1,5 @@ +module llama [system] { + header "llama.h" + link "llama" + export * +} diff --git a/ci/run.sh b/ci/run.sh index 20610e56009be..abf08a4ff6411 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -815,7 +815,10 @@ if [ -z ${GG_BUILD_LOW_PERF} ]; then ln -sfn ${mnt_models} ${SRC}/models-mnt # Create a fresh python3 venv and enter it - python3 -m venv "$MNT/venv" + if ! python3 -m venv "$MNT/venv"; then + echo "Error: Failed to create Python virtual environment at $MNT/venv." + exit 1 + fi source "$MNT/venv/bin/activate" pip install -r ${SRC}/requirements.txt --disable-pip-version-check diff --git a/cmake/common.cmake b/cmake/common.cmake new file mode 100644 index 0000000000000..0f54871e4143d --- /dev/null +++ b/cmake/common.cmake @@ -0,0 +1,33 @@ +function(llama_add_compile_flags) + if (LLAMA_FATAL_WARNINGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND C_FLAGS -Werror) + list(APPEND CXX_FLAGS -Werror) + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_compile_options(/WX) + endif() + endif() + + if (LLAMA_ALL_WARNINGS) + if (NOT MSVC) + list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes + -Werror=implicit-int -Werror=implicit-function-declaration) + + list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn) + + list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function) + + list(APPEND C_FLAGS ${WARNING_FLAGS}) + list(APPEND CXX_FLAGS ${WARNING_FLAGS}) + + ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) + + add_compile_options("$<$:${C_FLAGS};${GF_C_FLAGS}>" + "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") + else() + # todo : msvc + set(C_FLAGS "" PARENT_SCOPE) + set(CXX_FLAGS "" PARENT_SCOPE) + endif() + endif() +endfunction() diff --git a/cmake/llama-config.cmake.in b/cmake/llama-config.cmake.in index ef68417b412cf..5c55bc6b822a6 100644 --- a/cmake/llama-config.cmake.in +++ b/cmake/llama-config.cmake.in @@ -3,84 +3,172 @@ set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@) set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@) set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@) -set(GGML_BLAS @GGML_BLAS@) -set(GGML_CUDA @GGML_CUDA@) -set(GGML_METAL @GGML_METAL@) -set(GGML_HIP @GGML_HIP@) -set(GGML_ACCELERATE @GGML_ACCELERATE@) -set(GGML_VULKAN @GGML_VULKAN@) -set(GGML_VULKAN_CHECK_RESULTS @GGML_VULKAN_CHECK_RESULTS@) -set(GGML_VULKAN_DEBUG @GGML_VULKAN_DEBUG@) -set(GGML_VULKAN_MEMORY_DEBUG @GGML_VULKAN_MEMORY_DEBUG@) -set(GGML_VULKAN_VALIDATE @GGML_VULKAN_VALIDATE@) -set(GGML_SYCL @GGML_SYCL@) -set(GGML_OPENMP @GGML_OPENMP@) +set(GGML_STATIC @GGML_STATIC@) +set(GGML_NATIVE @GGML_NATIVE@) +set(GGML_LTO @GGML_LTO@) +set(GGML_CCACHE @GGML_CCACHE@) +set(GGML_AVX @GGML_AVX@) +set(GGML_AVX2 @GGML_AVX2@) +set(GGML_AVX512 @GGML_AVX512@) +set(GGML_AVX512_VBMI @GGML_AVX512_VBMI@) +set(GGML_AVX512_VNNI @GGML_AVX512_VNNI@) +set(GGML_AVX512_BF16 @GGML_AVX512_BF16@) +set(GGML_AMX_TILE @GGML_AMX_TILE@) +set(GGML_AMX_INT8 @GGML_AMX_INT8@) +set(GGML_AMX_BF16 @GGML_AMX_BF16@) +set(GGML_FMA @GGML_FMA@) +set(GGML_LASX @GGML_LASX@) +set(GGML_LSX @GGML_LSX@) +set(GGML_RVV @GGML_RVV@) +set(GGML_SVE @GGML_SVE@) -@PACKAGE_INIT@ +set(GGML_ACCELERATE @GGML_ACCELERATE@) +set(GGML_OPENMP @GGML_OPENMP@) +set(GGML_CPU_HBM @GGML_CPU_HBM@) +set(GGML_BLAS_VENDOR @GGML_BLAS_VENDOR@) -set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@") -set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@") -set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@") +set(GGML_CUDA_FORCE_MMQ @GGML_CUDA_FORCE_MMQ@) +set(GGML_CUDA_FORCE_CUBLAS @GGML_CUDA_FORCE_CUBLAS@) +set(GGML_CUDA_F16 @GGML_CUDA_F16@) +set(GGML_CUDA_PEER_MAX_BATCH_SIZE @GGML_CUDA_PEER_MAX_BATCH_SIZE@) +set(GGML_CUDA_NO_PEER_COPY @GGML_CUDA_NO_PEER_COPY@) +set(GGML_CUDA_NO_VMM @GGML_CUDA_NO_VMM@) +set(GGML_CUDA_FA_ALL_QUANTS @GGML_CUDA_FA_ALL_QUANTS@) +set(GGML_CUDA_GRAPHS @GGML_CUDA_GRAPHS@) -# Ensure transient dependencies satisfied +set(GGML_HIP_UMA @GGML_HIP_UMA@) -find_package(Threads REQUIRED) +set(GGML_VULKAN_CHECK_RESULTS @GGML_VULKAN_CHECK_RESULTS@) +set(GGML_VULKAN_DEBUG @GGML_VULKAN_DEBUG@) +set(GGML_VULKAN_MEMORY_DEBUG @GGML_VULKAN_MEMORY_DEBUG@) +set(GGML_VULKAN_SHADER_DEBUG_INFO @GGML_VULKAN_SHADER_DEBUG_INFO@) +set(GGML_VULKAN_PERF @GGML_VULKAN_PERF@) +set(GGML_VULKAN_VALIDATE @GGML_VULKAN_VALIDATE@) +set(GGML_VULKAN_RUN_TESTS @GGML_VULKAN_RUN_TESTS@) -if (APPLE AND GGML_ACCELERATE) - find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED) -endif() +set(GGML_METAL_USE_BF16 @GGML_METAL_USE_BF16@) +set(GGML_METAL_NDEBUG @GGML_METAL_NDEBUG@) +set(GGML_METAL_SHADER_DEBUG @GGML_METAL_SHADER_DEBUG@) +set(GGML_METAL_EMBED_LIBRARY @GGML_METAL_EMBED_LIBRARY@) +set(GGML_METAL_MACOSX_VERSION_MIN @GGML_METAL_MACOSX_VERSION_MIN@) +set(GGML_METAL_STD @GGML_METAL_STD@) -if (GGML_BLAS) - find_package(BLAS REQUIRED) -endif() +set(GGML_SYCL_F16 @GGML_SYCL_F16@) +set(GGML_SYCL_TARGET @GGML_SYCL_TARGET@) +set(GGML_SYCL_DEVICE_ARCH @GGML_SYCL_DEVICE_ARCH@) -if (GGML_CUDA) - find_package(CUDAToolkit REQUIRED) -endif() -if (GGML_METAL) - find_library(FOUNDATION_LIBRARY Foundation REQUIRED) - find_library(METAL_FRAMEWORK Metal REQUIRED) - find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) -endif() - -if (GGML_VULKAN) - find_package(Vulkan REQUIRED) -endif() +@PACKAGE_INIT@ -if (GGML_HIPBLAS) - find_package(hip REQUIRED) - find_package(hipblas REQUIRED) - find_package(rocblas REQUIRED) -endif() +set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@") +set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@") +set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@") -if (GGML_SYCL) - find_package(IntelSYCL REQUIRED) - find_package(MKL REQUIRED) -endif() +find_package(Threads REQUIRED) -if (GGML_OPENMP) - find_package(OpenMP REQUIRED) +set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@") +set(_llama_link_deps "") +set(_llama_link_opts "") +foreach(_ggml_lib ggml ggml-base) + string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY") + find_library(${_ggml_lib_var} ${_ggml_lib} + REQUIRED + HINTS ${LLAMA_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH + ) + list(APPEND _llama_link_deps "${${_ggml_lib_var}}") + message(STATUS "Found ${${_ggml_lib_var}}") +endforeach() + +foreach(backend amx blas cann cpu cuda hip kompute metal musa rpc sycl vulkan) + string(TOUPPER "GGML_${backend}" backend_id) + set(_ggml_lib "ggml-${backend}") + string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY") + + find_library(${_ggml_lib_var} ${_ggml_lib} + HINTS ${LLAMA_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH + ) + if(${_ggml_lib_var}) + list(APPEND _llama_link_deps "${${_ggml_lib_var}}") + set(${backend_id} ON) + message(STATUS "Found backend ${${_ggml_lib_var}}") + else() + set(${backend_id} OFF) + endif() +endforeach() + +if (NOT LLAMA_SHARED_LIB) + if (APPLE AND GGML_ACCELERATE) + find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED) + list(APPEND _llama_link_deps ${ACCELERATE_FRAMEWORK}) + endif() + + if (GGML_OPENMP) + find_package(OpenMP REQUIRED) + list(APPEND _llama_link_deps OpenMP::OpenMP_C OpenMP::OpenMP_CXX) + endif() + + if (GGML_CPU_HBM) + find_library(memkind memkind REQUIRED) + list(APPEND _llama_link_deps memkind) + endif() + + if (GGML_BLAS) + find_package(BLAS REQUIRED) + list(APPEND _llama_link_deps ${BLAS_LIBRARIES}) + list(APPEND _llama_link_opts ${BLAS_LINKER_FLAGS}) + endif() + + if (GGML_CUDA) + find_package(CUDAToolkit REQUIRED) + endif() + + if (GGML_METAL) + find_library(FOUNDATION_LIBRARY Foundation REQUIRED) + find_library(METAL_FRAMEWORK Metal REQUIRED) + find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) + list(APPEND _llama_link_deps ${FOUNDATION_LIBRARY} + ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK}) + endif() + + if (GGML_VULKAN) + find_package(Vulkan REQUIRED) + list(APPEND _llama_link_deps Vulkan::Vulkan) + endif() + + if (GGML_HIP) + find_package(hip REQUIRED) + find_package(hipblas REQUIRED) + find_package(rocblas REQUIRED) + list(APPEND _llama_link_deps hip::host roc::rocblas roc::hipblas) + endif() + + if (GGML_SYCL) + find_package(DNNL) + if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL") + list(APPEND _llama_link_deps DNNL::dnnl) + endif() + if (WIN32) + find_package(IntelSYCL REQUIRED) + find_package(MKL REQUIRED) + list(APPEND _llama_link_deps IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL) + endif() + endif() endif() - -find_library(ggml_LIBRARY ggml - REQUIRED - HINTS ${LLAMA_LIB_DIR}) - find_library(llama_LIBRARY llama REQUIRED - HINTS ${LLAMA_LIB_DIR}) - -set(_llama_link_deps "${ggml_LIBRARY}" "@GGML_LINK_LIBRARIES@") -set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@") + HINTS ${LLAMA_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH +) add_library(llama UNKNOWN IMPORTED) - set_target_properties(llama PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}" INTERFACE_LINK_LIBRARIES "${_llama_link_deps}" + INTERFACE_LINK_OPTIONS "${_llama_link_opts}" INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}" IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" IMPORTED_LOCATION "${llama_LIBRARY}" diff --git a/cmake/llama.pc.in b/cmake/llama.pc.in index 326acbb6108fd..0b2b6bcfabfd1 100644 --- a/cmake/llama.pc.in +++ b/cmake/llama.pc.in @@ -6,5 +6,5 @@ includedir=${prefix}/include Name: llama Description: Port of Facebook's LLaMA model in C/C++ Version: @PROJECT_VERSION@ -Libs: -L${libdir} -lllama +Libs: -L${libdir} -lggml -lggml-base -lllama Cflags: -I${includedir} diff --git a/cmake/x64-windows-llvm.cmake b/cmake/x64-windows-llvm.cmake new file mode 100644 index 0000000000000..0603d738fbef0 --- /dev/null +++ b/cmake/x64-windows-llvm.cmake @@ -0,0 +1,11 @@ +set( CMAKE_SYSTEM_NAME Windows ) +set( CMAKE_SYSTEM_PROCESSOR x86_64 ) + +set( CMAKE_C_COMPILER clang ) +set( CMAKE_CXX_COMPILER clang++ ) + +set( arch_c_flags "-march=native" ) + +set( CMAKE_C_FLAGS_INIT "${arch_c_flags}" ) +set( CMAKE_CXX_FLAGS_INIT "${arch_c_flags}" ) + diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 5ab1ffa1922aa..df1cdf9a59af3 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -2,6 +2,8 @@ find_package(Threads REQUIRED) +llama_add_compile_flags() + # Build info header # @@ -66,6 +68,8 @@ add_library(${TARGET} STATIC ngram-cache.h sampling.cpp sampling.h + speculative.cpp + speculative.h ) if (BUILD_SHARED_LIBS) @@ -77,12 +81,12 @@ set(LLAMA_COMMON_EXTRA_LIBS build_info) # Use curl to download model url if (LLAMA_CURL) find_package(CURL REQUIRED) - add_definitions(-DLLAMA_USE_CURL) + target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_CURL) include_directories(${CURL_INCLUDE_DIRS}) find_library(CURL_LIBRARY curl REQUIRED) set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARY}) endif () target_include_directories(${TARGET} PUBLIC .) -target_compile_features (${TARGET} PUBLIC cxx_std_11) +target_compile_features (${TARGET} PUBLIC cxx_std_17) target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads) diff --git a/common/arg.cpp b/common/arg.cpp index 4115b2f7511d3..c3d66efb5a701 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -119,26 +119,63 @@ std::string common_arg::to_string() { // utils // -static void common_params_handle_model_default(common_params & params) { - if (!params.hf_repo.empty()) { +static void common_params_handle_model_default( + std::string & model, + std::string & model_url, + std::string & hf_repo, + std::string & hf_file) { + if (!hf_repo.empty()) { // short-hand to avoid specifying --hf-file -> default it to --model - if (params.hf_file.empty()) { - if (params.model.empty()) { + if (hf_file.empty()) { + if (model.empty()) { throw std::invalid_argument("error: --hf-repo requires either --hf-file or --model\n"); } - params.hf_file = params.model; - } else if (params.model.empty()) { - params.model = fs_get_cache_file(string_split(params.hf_file, '/').back()); - } - } else if (!params.model_url.empty()) { - if (params.model.empty()) { - auto f = string_split(params.model_url, '#').front(); + hf_file = model; + } else if (model.empty()) { + // this is to avoid different repo having same file name, or same file name in different subdirs + std::string filename = hf_repo + "_" + hf_file; + // to make sure we don't have any slashes in the filename + string_replace_all(filename, "/", "_"); + model = fs_get_cache_file(filename); + } + } else if (!model_url.empty()) { + if (model.empty()) { + auto f = string_split(model_url, '#').front(); f = string_split(f, '?').front(); - params.model = fs_get_cache_file(string_split(f, '/').back()); + model = fs_get_cache_file(string_split(f, '/').back()); } - } else if (params.model.empty()) { - params.model = DEFAULT_MODEL_PATH; + } else if (model.empty()) { + model = DEFAULT_MODEL_PATH; + } +} + +const std::vector kv_cache_types = { + GGML_TYPE_F32, + GGML_TYPE_F16, + GGML_TYPE_BF16, + GGML_TYPE_Q8_0, + GGML_TYPE_Q4_0, + GGML_TYPE_Q4_1, + GGML_TYPE_IQ4_NL, + GGML_TYPE_Q5_0, + GGML_TYPE_Q5_1, +}; + +static ggml_type kv_cache_type_from_str(const std::string & s) { + for (const auto & type : kv_cache_types) { + if (ggml_type_name(type) == s) { + return type; + } + } + throw std::runtime_error("Unsupported cache type: " + s); +} + +static std::string get_all_kv_cache_types() { + std::ostringstream msg; + for (const auto & type : kv_cache_types) { + msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", "); } + return msg.str(); } // @@ -233,16 +270,19 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context } } - postprocess_cpu_params(params.cpuparams, nullptr); + postprocess_cpu_params(params.cpuparams, nullptr); postprocess_cpu_params(params.cpuparams_batch, ¶ms.cpuparams); - postprocess_cpu_params(params.draft_cpuparams, ¶ms.cpuparams); - postprocess_cpu_params(params.draft_cpuparams_batch, ¶ms.cpuparams_batch); + + postprocess_cpu_params(params.speculative.cpuparams, ¶ms.cpuparams); + postprocess_cpu_params(params.speculative.cpuparams_batch, ¶ms.cpuparams_batch); if (params.prompt_cache_all && (params.interactive || params.interactive_first)) { throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n"); } - common_params_handle_model_default(params); + // TODO: refactor model params in a common struct + common_params_handle_model_default(params.model, params.model_url, params.hf_repo, params.hf_file); + common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file); if (params.escape) { string_process_escapes(params.prompt); @@ -251,7 +291,7 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context for (auto & antiprompt : params.antiprompt) { string_process_escapes(antiprompt); } - for (auto & seq_breaker : params.sparams.dry_sequence_breakers) { + for (auto & seq_breaker : params.sampling.dry_sequence_breakers) { string_process_escapes(seq_breaker); } } @@ -297,6 +337,27 @@ static void common_params_print_usage(common_params_context & ctx_arg) { print_options(specific_options); } +static std::vector parse_device_list(const std::string & value) { + std::vector devices; + auto dev_names = string_split(value, ','); + if (dev_names.empty()) { + throw std::invalid_argument("no devices specified"); + } + if (dev_names.size() == 1 && dev_names[0] == "none") { + devices.push_back(nullptr); + } else { + for (const auto & device : dev_names) { + auto * dev = ggml_backend_dev_by_name(device.c_str()); + if (!dev || ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_GPU) { + throw std::invalid_argument(string_format("invalid device: %s", device.c_str())); + } + devices.push_back(dev); + } + devices.push_back(nullptr); + } + return devices; +} + bool common_params_parse(int argc, char ** argv, common_params & params, llama_example ex, void(*print_usage)(int, char **)) { auto ctx_arg = common_params_parser_init(params, ex, print_usage); const common_params params_org = ctx_arg.params; // the example can modify the default params @@ -322,14 +383,29 @@ bool common_params_parse(int argc, char ** argv, common_params & params, llama_e return true; } +static std::string list_builtin_chat_templates() { + std::vector supported_tmpl; + int32_t res = llama_chat_builtin_templates(nullptr, 0); + supported_tmpl.resize(res); + res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size()); + std::ostringstream msg; + for (auto & tmpl : supported_tmpl) { + msg << tmpl << (&tmpl == &supported_tmpl.back() ? "" : ", "); + } + return msg.str(); +} + common_params_context common_params_parser_init(common_params & params, llama_example ex, void(*print_usage)(int, char **)) { + // load dynamic backends + ggml_backend_load_all(); + common_params_context ctx_arg(params); ctx_arg.print_usage = print_usage; ctx_arg.ex = ex; std::string sampler_type_chars; std::string sampler_type_names; - for (const auto & sampler : params.sparams.samplers) { + for (const auto & sampler : params.sampling.samplers) { sampler_type_chars += common_sampler_type_to_chr(sampler); sampler_type_names += common_sampler_type_to_str(sampler) + ";"; } @@ -407,26 +483,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } } )); - add_opt(common_arg( - {"-td", "--threads-draft"}, "N", - "number of threads to use during generation (default: same as --threads)", - [](common_params & params, int value) { - params.draft_cpuparams.n_threads = value; - if (params.draft_cpuparams.n_threads <= 0) { - params.draft_cpuparams.n_threads = std::thread::hardware_concurrency(); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"-tbd", "--threads-batch-draft"}, "N", - "number of threads to use during batch and prompt processing (default: same as --threads-draft)", - [](common_params & params, int value) { - params.draft_cpuparams_batch.n_threads = value; - if (params.draft_cpuparams_batch.n_threads <= 0) { - params.draft_cpuparams_batch.n_threads = std::thread::hardware_concurrency(); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-C", "--cpu-mask"}, "M", "CPU affinity mask: arbitrarily long hex. Complements cpu-range (default: \"\")", @@ -515,108 +571,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.cpuparams_batch.poll = value; } )); - add_opt(common_arg( - {"-Cd", "--cpu-mask-draft"}, "M", - "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)", - [](common_params & params, const std::string & mask) { - params.draft_cpuparams.mask_valid = true; - if (!parse_cpu_mask(mask, params.draft_cpuparams.cpumask)) { - throw std::invalid_argument("invalid cpumask"); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"-Crd", "--cpu-range-draft"}, "lo-hi", - "Ranges of CPUs for affinity. Complements --cpu-mask-draft", - [](common_params & params, const std::string & range) { - params.draft_cpuparams.mask_valid = true; - if (!parse_cpu_range(range, params.draft_cpuparams.cpumask)) { - throw std::invalid_argument("invalid range"); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--cpu-strict-draft"}, "<0|1>", - "Use strict CPU placement for draft model (default: same as --cpu-strict)", - [](common_params & params, int value) { - params.draft_cpuparams.strict_cpu = value; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--prio-draft"}, "N", - string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.draft_cpuparams.priority), - [](common_params & params, int prio) { - if (prio < 0 || prio > 3) { - throw std::invalid_argument("invalid value"); - } - params.draft_cpuparams.priority = (enum ggml_sched_priority) prio; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--poll-draft"}, "<0|1>", - "Use polling to wait for draft model work (default: same as --poll])", - [](common_params & params, int value) { - params.draft_cpuparams.poll = value; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"-Cbd", "--cpu-mask-batch-draft"}, "M", - "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)", - [](common_params & params, const std::string & mask) { - params.draft_cpuparams_batch.mask_valid = true; - if (!parse_cpu_mask(mask, params.draft_cpuparams_batch.cpumask)) { - throw std::invalid_argument("invalid cpumask"); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi", - "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)", - [](common_params & params, const std::string & range) { - params.draft_cpuparams_batch.mask_valid = true; - if (!parse_cpu_range(range, params.draft_cpuparams_batch.cpumask)) { - throw std::invalid_argument("invalid cpumask"); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--cpu-strict-batch-draft"}, "<0|1>", - "Use strict CPU placement for draft model (default: --cpu-strict-draft)", - [](common_params & params, int value) { - params.draft_cpuparams_batch.strict_cpu = value; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--prio-batch-draft"}, "N", - string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.draft_cpuparams_batch.priority), - [](common_params & params, int prio) { - if (prio < 0 || prio > 3) { - throw std::invalid_argument("invalid value"); - } - params.draft_cpuparams_batch.priority = (enum ggml_sched_priority) prio; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--poll-batch-draft"}, "<0|1>", - "Use polling to wait for draft model work (default: --poll-draft)", - [](common_params & params, int value) { - params.draft_cpuparams_batch.poll = value; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); - add_opt(common_arg( - {"--draft"}, "N", - string_format("number of tokens to draft for speculative decoding (default: %d)", params.n_draft), - [](common_params & params, int value) { - params.n_draft = value; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP})); - add_opt(common_arg( - {"-ps", "--p-split"}, "N", - string_format("speculative decoding split probability (default: %.1f)", (double)params.p_split), - [](common_params & params, const std::string & value) { - params.p_split = std::stof(value); - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-lcs", "--lookup-cache-static"}, "FNAME", "path to static lookup cache to use for lookup decoding (not updated by generation)", @@ -672,7 +626,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.ctx_shift = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT")); + ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_IMATRIX}).set_env("LLAMA_ARG_NO_CONTEXT_SHIFT")); add_opt(common_arg( {"--chunks"}, "N", string_format("max number of chunks to process (default: %d, -1 = all)", params.n_chunks), @@ -701,7 +655,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"), [](common_params & params) { params.no_perf = true; - params.sparams.no_perf = true; + params.sampling.no_perf = true; } ).set_env("LLAMA_ARG_NO_PERF")); add_opt(common_arg( @@ -867,7 +821,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.warmup = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN})); + ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER})); add_opt(common_arg( {"--spm-infill"}, string_format( @@ -883,155 +837,154 @@ common_params_context common_params_parser_init(common_params & params, llama_ex string_format("samplers that will be used for generation in the order, separated by \';\'\n(default: %s)", sampler_type_names.c_str()), [](common_params & params, const std::string & value) { const auto sampler_names = string_split(value, ';'); - params.sparams.samplers = common_sampler_types_from_names(sampler_names, true); + params.sampling.samplers = common_sampler_types_from_names(sampler_names, true); } ).set_sparam()); add_opt(common_arg( {"-s", "--seed"}, "SEED", - string_format("RNG seed (default: %d, use random seed for %d)", params.sparams.seed, LLAMA_DEFAULT_SEED), + string_format("RNG seed (default: %d, use random seed for %d)", params.sampling.seed, LLAMA_DEFAULT_SEED), [](common_params & params, const std::string & value) { - params.sparams.seed = std::stoul(value); + params.sampling.seed = std::stoul(value); } ).set_sparam()); add_opt(common_arg( - {"--sampling-seq"}, "SEQUENCE", + {"--sampling-seq", "--sampler-seq"}, "SEQUENCE", string_format("simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str()), [](common_params & params, const std::string & value) { - params.sparams.samplers = common_sampler_types_from_chars(value); + params.sampling.samplers = common_sampler_types_from_chars(value); } ).set_sparam()); add_opt(common_arg( {"--ignore-eos"}, "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)", [](common_params & params) { - params.sparams.ignore_eos = true; - } - ).set_sparam()); - add_opt(common_arg( - {"--penalize-nl"}, - string_format("penalize newline tokens (default: %s)", params.sparams.penalize_nl ? "true" : "false"), - [](common_params & params) { - params.sparams.penalize_nl = true; + params.sampling.ignore_eos = true; } ).set_sparam()); add_opt(common_arg( {"--temp"}, "N", - string_format("temperature (default: %.1f)", (double)params.sparams.temp), + string_format("temperature (default: %.1f)", (double)params.sampling.temp), [](common_params & params, const std::string & value) { - params.sparams.temp = std::stof(value); - params.sparams.temp = std::max(params.sparams.temp, 0.0f); + params.sampling.temp = std::stof(value); + params.sampling.temp = std::max(params.sampling.temp, 0.0f); } ).set_sparam()); add_opt(common_arg( {"--top-k"}, "N", - string_format("top-k sampling (default: %d, 0 = disabled)", params.sparams.top_k), + string_format("top-k sampling (default: %d, 0 = disabled)", params.sampling.top_k), [](common_params & params, int value) { - params.sparams.top_k = value; + params.sampling.top_k = value; } ).set_sparam()); add_opt(common_arg( {"--top-p"}, "N", - string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sparams.top_p), + string_format("top-p sampling (default: %.1f, 1.0 = disabled)", (double)params.sampling.top_p), [](common_params & params, const std::string & value) { - params.sparams.top_p = std::stof(value); + params.sampling.top_p = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--min-p"}, "N", - string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sparams.min_p), + string_format("min-p sampling (default: %.1f, 0.0 = disabled)", (double)params.sampling.min_p), [](common_params & params, const std::string & value) { - params.sparams.min_p = std::stof(value); + params.sampling.min_p = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--xtc-probability"}, "N", - string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sparams.xtc_probability), + string_format("xtc probability (default: %.1f, 0.0 = disabled)", (double)params.sampling.xtc_probability), [](common_params & params, const std::string & value) { - params.sparams.xtc_probability = std::stof(value); + params.sampling.xtc_probability = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--xtc-threshold"}, "N", - string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sparams.xtc_threshold), + string_format("xtc threshold (default: %.1f, 1.0 = disabled)", (double)params.sampling.xtc_threshold), [](common_params & params, const std::string & value) { - params.sparams.xtc_threshold = std::stof(value); + params.sampling.xtc_threshold = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--typical"}, "N", - string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sparams.typ_p), + string_format("locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)params.sampling.typ_p), [](common_params & params, const std::string & value) { - params.sparams.typ_p = std::stof(value); + params.sampling.typ_p = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--repeat-last-n"}, "N", - string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sparams.penalty_last_n), + string_format("last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", params.sampling.penalty_last_n), [](common_params & params, int value) { - params.sparams.penalty_last_n = value; - params.sparams.n_prev = std::max(params.sparams.n_prev, params.sparams.penalty_last_n); + if (value < -1) { + throw std::runtime_error(string_format("error: invalid repeat-last-n = %d\n", value)); + } + params.sampling.penalty_last_n = value; + params.sampling.n_prev = std::max(params.sampling.n_prev, params.sampling.penalty_last_n); } ).set_sparam()); add_opt(common_arg( {"--repeat-penalty"}, "N", - string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sparams.penalty_repeat), + string_format("penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)params.sampling.penalty_repeat), [](common_params & params, const std::string & value) { - params.sparams.penalty_repeat = std::stof(value); + params.sampling.penalty_repeat = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--presence-penalty"}, "N", - string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sparams.penalty_present), + string_format("repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_present), [](common_params & params, const std::string & value) { - params.sparams.penalty_present = std::stof(value); + params.sampling.penalty_present = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--frequency-penalty"}, "N", - string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sparams.penalty_freq), + string_format("repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)params.sampling.penalty_freq), [](common_params & params, const std::string & value) { - params.sparams.penalty_freq = std::stof(value); + params.sampling.penalty_freq = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--dry-multiplier"}, "N", - string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sparams.dry_multiplier), + string_format("set DRY sampling multiplier (default: %.1f, 0.0 = disabled)", (double)params.sampling.dry_multiplier), [](common_params & params, const std::string & value) { - params.sparams.dry_multiplier = std::stof(value); + params.sampling.dry_multiplier = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--dry-base"}, "N", - string_format("set DRY sampling base value (default: %.2f)", (double)params.sparams.dry_base), + string_format("set DRY sampling base value (default: %.2f)", (double)params.sampling.dry_base), [](common_params & params, const std::string & value) { float potential_base = std::stof(value); if (potential_base >= 1.0f) { - params.sparams.dry_base = potential_base; + params.sampling.dry_base = potential_base; } } ).set_sparam()); add_opt(common_arg( {"--dry-allowed-length"}, "N", - string_format("set allowed length for DRY sampling (default: %d)", params.sparams.dry_allowed_length), + string_format("set allowed length for DRY sampling (default: %d)", params.sampling.dry_allowed_length), [](common_params & params, int value) { - params.sparams.dry_allowed_length = value; + params.sampling.dry_allowed_length = value; } ).set_sparam()); add_opt(common_arg( {"--dry-penalty-last-n"}, "N", - string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sparams.dry_penalty_last_n), + string_format("set DRY penalty for the last n tokens (default: %d, 0 = disable, -1 = context size)", params.sampling.dry_penalty_last_n), [](common_params & params, int value) { - params.sparams.dry_penalty_last_n = value; + if (value < -1) { + throw std::runtime_error(string_format("error: invalid dry-penalty-last-n = %d\n", value)); + } + params.sampling.dry_penalty_last_n = value; } ).set_sparam()); add_opt(common_arg( {"--dry-sequence-breaker"}, "STRING", string_format("add sequence breaker for DRY sampling, clearing out default breakers (%s) in the process; use \"none\" to not use any sequence breakers\n", - params.sparams.dry_sequence_breakers.empty() ? "none" : - std::accumulate(std::next(params.sparams.dry_sequence_breakers.begin()), - params.sparams.dry_sequence_breakers.end(), - std::string("'") + (params.sparams.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sparams.dry_sequence_breakers[0]) + "'", + params.sampling.dry_sequence_breakers.empty() ? "none" : + std::accumulate(std::next(params.sampling.dry_sequence_breakers.begin()), + params.sampling.dry_sequence_breakers.end(), + std::string("'") + (params.sampling.dry_sequence_breakers[0] == "\n" ? "\\n" : params.sampling.dry_sequence_breakers[0]) + "'", [](const std::string& a, const std::string& b) { std::string formatted_b = (b == "\n") ? "\\n" : b; return a + ", '" + formatted_b + "'"; @@ -1040,51 +993,51 @@ common_params_context common_params_parser_init(common_params & params, llama_ex static bool defaults_cleared = false; if (!defaults_cleared) { - params.sparams.dry_sequence_breakers.clear(); + params.sampling.dry_sequence_breakers.clear(); defaults_cleared = true; } if (value == "none") { - params.sparams.dry_sequence_breakers.clear(); + params.sampling.dry_sequence_breakers.clear(); } else { - params.sparams.dry_sequence_breakers.emplace_back(value); + params.sampling.dry_sequence_breakers.emplace_back(value); } } ).set_sparam()); add_opt(common_arg( {"--dynatemp-range"}, "N", - string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sparams.dynatemp_range), + string_format("dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)params.sampling.dynatemp_range), [](common_params & params, const std::string & value) { - params.sparams.dynatemp_range = std::stof(value); + params.sampling.dynatemp_range = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--dynatemp-exp"}, "N", - string_format("dynamic temperature exponent (default: %.1f)", (double)params.sparams.dynatemp_exponent), + string_format("dynamic temperature exponent (default: %.1f)", (double)params.sampling.dynatemp_exponent), [](common_params & params, const std::string & value) { - params.sparams.dynatemp_exponent = std::stof(value); + params.sampling.dynatemp_exponent = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--mirostat"}, "N", string_format("use Mirostat sampling.\nTop K, Nucleus and Locally Typical samplers are ignored if used.\n" - "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sparams.mirostat), + "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", params.sampling.mirostat), [](common_params & params, int value) { - params.sparams.mirostat = value; + params.sampling.mirostat = value; } ).set_sparam()); add_opt(common_arg( {"--mirostat-lr"}, "N", - string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sparams.mirostat_eta), + string_format("Mirostat learning rate, parameter eta (default: %.1f)", (double)params.sampling.mirostat_eta), [](common_params & params, const std::string & value) { - params.sparams.mirostat_eta = std::stof(value); + params.sampling.mirostat_eta = std::stof(value); } ).set_sparam()); add_opt(common_arg( {"--mirostat-ent"}, "N", - string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sparams.mirostat_tau), + string_format("Mirostat target entropy, parameter tau (default: %.1f)", (double)params.sampling.mirostat_tau), [](common_params & params, const std::string & value) { - params.sparams.mirostat_tau = std::stof(value); + params.sampling.mirostat_tau = std::stof(value); } ).set_sparam()); add_opt(common_arg( @@ -1100,7 +1053,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex try { if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) { const float bias = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f); - params.sparams.logit_bias.push_back({key, bias}); + params.sampling.logit_bias.push_back({key, bias}); } else { throw std::invalid_argument("invalid input format"); } @@ -1111,9 +1064,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_sparam()); add_opt(common_arg( {"--grammar"}, "GRAMMAR", - string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sparams.grammar.c_str()), + string_format("BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", params.sampling.grammar.c_str()), [](common_params & params, const std::string & value) { - params.sparams.grammar = value; + params.sampling.grammar = value; } ).set_sparam()); add_opt(common_arg( @@ -1127,7 +1080,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex std::copy( std::istreambuf_iterator(file), std::istreambuf_iterator(), - std::back_inserter(params.sparams.grammar) + std::back_inserter(params.sampling.grammar) ); } ).set_sparam()); @@ -1135,7 +1088,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"-j", "--json-schema"}, "SCHEMA", "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\nFor schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead", [](common_params & params, const std::string & value) { - params.sparams.grammar = json_schema_to_grammar(json::parse(value)); + params.sampling.grammar = json_schema_to_grammar(json::parse(value)); } ).set_sparam()); add_opt(common_arg( @@ -1255,18 +1208,28 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_env("LLAMA_ARG_NO_KV_OFFLOAD")); add_opt(common_arg( {"-ctk", "--cache-type-k"}, "TYPE", - string_format("KV cache data type for K (default: %s)", params.cache_type_k.c_str()), + string_format( + "KV cache data type for K\n" + "allowed values: %s\n" + "(default: %s)", + get_all_kv_cache_types().c_str(), + ggml_type_name(params.cache_type_k) + ), [](common_params & params, const std::string & value) { - // TODO: get the type right here - params.cache_type_k = value; + params.cache_type_k = kv_cache_type_from_str(value); } ).set_env("LLAMA_ARG_CACHE_TYPE_K")); add_opt(common_arg( {"-ctv", "--cache-type-v"}, "TYPE", - string_format("KV cache data type for V (default: %s)", params.cache_type_v.c_str()), + string_format( + "KV cache data type for V\n" + "allowed values: %s\n" + "(default: %s)", + get_all_kv_cache_types().c_str(), + ggml_type_name(params.cache_type_v) + ), [](common_params & params, const std::string & value) { - // TODO: get the type right here - params.cache_type_v = value; + params.cache_type_v = kv_cache_type_from_str(value); } ).set_env("LLAMA_ARG_CACHE_TYPE_V")); add_opt(common_arg( @@ -1433,28 +1396,42 @@ common_params_context common_params_parser_init(common_params & params, llama_ex else { throw std::invalid_argument("invalid value"); } } ).set_env("LLAMA_ARG_NUMA")); + add_opt(common_arg( + {"-dev", "--device"}, "", + "comma-separated list of devices to use for offloading (none = don't offload)\n" + "use --list-devices to see a list of available devices", + [](common_params & params, const std::string & value) { + params.devices = parse_device_list(value); + } + ).set_env("LLAMA_ARG_DEVICE")); + add_opt(common_arg( + {"--list-devices"}, + "print list of available devices and exit", + [](common_params &) { + printf("Available devices:\n"); + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + auto * dev = ggml_backend_dev_get(i); + if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { + size_t free, total; + ggml_backend_dev_memory(dev, &free, &total); + printf(" %s: %s (%zu MiB, %zu MiB free)\n", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), total / 1024 / 1024, free / 1024 / 1024); + } + } + exit(0); + } + )); add_opt(common_arg( {"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N", "number of layers to store in VRAM", [](common_params & params, int value) { params.n_gpu_layers = value; if (!llama_supports_gpu_offload()) { - fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers option will be ignored\n"); - fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); + fprintf(stderr, "warning: no usable GPU found, --gpu-layers option will be ignored\n"); + fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n"); + fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n"); } } ).set_env("LLAMA_ARG_N_GPU_LAYERS")); - add_opt(common_arg( - {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N", - "number of layers to store in VRAM for the draft model", - [](common_params & params, int value) { - params.n_gpu_layers_draft = value; - if (!llama_supports_gpu_offload()) { - fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers-draft option will be ignored\n"); - fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); - } - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-sm", "--split-mode"}, "{none,layer,row}", "how to split the model across multiple GPUs, one of:\n" @@ -1468,10 +1445,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } else if (arg_next == "layer") { params.split_mode = LLAMA_SPLIT_MODE_LAYER; } else if (arg_next == "row") { -#ifdef GGML_USE_SYCL - fprintf(stderr, "warning: The split mode value:[row] is not supported by llama.cpp with SYCL. It's developing.\nExit!\n"); - exit(1); -#endif // GGML_USE_SYCL params.split_mode = LLAMA_SPLIT_MODE_ROW; } else { throw std::invalid_argument("invalid value"); @@ -1593,13 +1566,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.model = value; } ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}).set_env("LLAMA_ARG_MODEL")); - add_opt(common_arg( - {"-md", "--model-draft"}, "FNAME", - "draft model for speculative decoding (default: unused)", - [](common_params & params, const std::string & value) { - params.model_draft = value; - } - ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); add_opt(common_arg( {"-mu", "--model-url"}, "MODEL_URL", "model download url (default: unused)", @@ -1621,6 +1587,20 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.hf_file = value; } ).set_env("LLAMA_ARG_HF_FILE")); + add_opt(common_arg( + {"-hfrv", "--hf-repo-v"}, "REPO", + "Hugging Face model repository for the vocoder model (default: unused)", + [](common_params & params, const std::string & value) { + params.vocoder.hf_repo = value; + } + ).set_env("LLAMA_ARG_HF_REPO_V")); + add_opt(common_arg( + {"-hffv", "--hf-file-v"}, "FILE", + "Hugging Face model file for the vocoder model (default: unused)", + [](common_params & params, const std::string & value) { + params.vocoder.hf_file = value; + } + ).set_env("LLAMA_ARG_HF_FILE_V")); add_opt(common_arg( {"-hft", "--hf-token"}, "TOKEN", "Hugging Face access token (default: value from HF_TOKEN environment variable)", @@ -1789,6 +1769,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex params.public_path = value; } ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_STATIC_PATH")); + add_opt(common_arg( + {"--no-webui"}, + string_format("Disable the Web UI (default: %s)", params.webui ? "enabled" : "disabled"), + [](common_params & params) { + params.webui = false; + } + ).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_NO_WEBUI")); add_opt(common_arg( {"--embedding", "--embeddings"}, string_format("restrict to only support embedding use case; use only with dedicated embedding models (default: %s)", params.embedding ? "enabled" : "disabled"), @@ -1904,9 +1891,11 @@ common_params_context common_params_parser_init(common_params & params, llama_ex ).set_examples({LLAMA_EXAMPLE_SERVER})); add_opt(common_arg( {"--chat-template"}, "JINJA_TEMPLATE", - "set custom jinja chat template (default: template taken from model's metadata)\n" - "if suffix/prefix are specified, template will be disabled\n" - "only commonly used templates are accepted:\nhttps://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template", + string_format( + "set custom jinja chat template (default: template taken from model's metadata)\n" + "if suffix/prefix are specified, template will be disabled\n" + "list of built-in templates:\n%s", list_builtin_chat_templates().c_str() + ), [](common_params & params, const std::string & value) { if (!common_chat_verify_template(value)) { throw std::runtime_error(string_format( @@ -2037,5 +2026,197 @@ common_params_context common_params_parser_init(common_params & params, llama_ex } ).set_env("LLAMA_LOG_TIMESTAMPS")); + // speculative parameters + add_opt(common_arg( + {"-td", "--threads-draft"}, "N", + "number of threads to use during generation (default: same as --threads)", + [](common_params & params, int value) { + params.speculative.cpuparams.n_threads = value; + if (params.speculative.cpuparams.n_threads <= 0) { + params.speculative.cpuparams.n_threads = std::thread::hardware_concurrency(); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"-tbd", "--threads-batch-draft"}, "N", + "number of threads to use during batch and prompt processing (default: same as --threads-draft)", + [](common_params & params, int value) { + params.speculative.cpuparams_batch.n_threads = value; + if (params.speculative.cpuparams_batch.n_threads <= 0) { + params.speculative.cpuparams_batch.n_threads = std::thread::hardware_concurrency(); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"-Cd", "--cpu-mask-draft"}, "M", + "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)", + [](common_params & params, const std::string & mask) { + params.speculative.cpuparams.mask_valid = true; + if (!parse_cpu_mask(mask, params.speculative.cpuparams.cpumask)) { + throw std::invalid_argument("invalid cpumask"); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"-Crd", "--cpu-range-draft"}, "lo-hi", + "Ranges of CPUs for affinity. Complements --cpu-mask-draft", + [](common_params & params, const std::string & range) { + params.speculative.cpuparams.mask_valid = true; + if (!parse_cpu_range(range, params.speculative.cpuparams.cpumask)) { + throw std::invalid_argument("invalid range"); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--cpu-strict-draft"}, "<0|1>", + "Use strict CPU placement for draft model (default: same as --cpu-strict)", + [](common_params & params, int value) { + params.speculative.cpuparams.strict_cpu = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--prio-draft"}, "N", + string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams.priority), + [](common_params & params, int prio) { + if (prio < 0 || prio > 3) { + throw std::invalid_argument("invalid value"); + } + params.speculative.cpuparams.priority = (enum ggml_sched_priority) prio; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--poll-draft"}, "<0|1>", + "Use polling to wait for draft model work (default: same as --poll])", + [](common_params & params, int value) { + params.speculative.cpuparams.poll = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"-Cbd", "--cpu-mask-batch-draft"}, "M", + "Draft model CPU affinity mask. Complements cpu-range-draft (default: same as --cpu-mask)", + [](common_params & params, const std::string & mask) { + params.speculative.cpuparams_batch.mask_valid = true; + if (!parse_cpu_mask(mask, params.speculative.cpuparams_batch.cpumask)) { + throw std::invalid_argument("invalid cpumask"); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"-Crbd", "--cpu-range-batch-draft"}, "lo-hi", + "Ranges of CPUs for affinity. Complements --cpu-mask-draft-batch)", + [](common_params & params, const std::string & range) { + params.speculative.cpuparams_batch.mask_valid = true; + if (!parse_cpu_range(range, params.speculative.cpuparams_batch.cpumask)) { + throw std::invalid_argument("invalid cpumask"); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--cpu-strict-batch-draft"}, "<0|1>", + "Use strict CPU placement for draft model (default: --cpu-strict-draft)", + [](common_params & params, int value) { + params.speculative.cpuparams_batch.strict_cpu = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--prio-batch-draft"}, "N", + string_format("set draft process/thread priority : 0-normal, 1-medium, 2-high, 3-realtime (default: %d)\n", params.speculative.cpuparams_batch.priority), + [](common_params & params, int prio) { + if (prio < 0 || prio > 3) { + throw std::invalid_argument("invalid value"); + } + params.speculative.cpuparams_batch.priority = (enum ggml_sched_priority) prio; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--poll-batch-draft"}, "<0|1>", + "Use polling to wait for draft model work (default: --poll-draft)", + [](common_params & params, int value) { + params.speculative.cpuparams_batch.poll = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE})); + add_opt(common_arg( + {"--draft-max", "--draft", "--draft-n"}, "N", + string_format("number of tokens to draft for speculative decoding (default: %d)", params.speculative.n_max), + [](common_params & params, int value) { + params.speculative.n_max = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MAX")); + add_opt(common_arg( + {"--draft-min", "--draft-n-min"}, "N", + string_format("minimum number of draft tokens to use for speculative decoding (default: %d)", params.speculative.n_min), + [](common_params & params, int value) { + params.speculative.n_min = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_MIN")); + add_opt(common_arg( + {"--draft-p-split"}, "P", + string_format("speculative decoding split probability (default: %.1f)", (double)params.speculative.p_split), + [](common_params & params, const std::string & value) { + params.speculative.p_split = std::stof(value); + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE}).set_env("LLAMA_ARG_DRAFT_P_SPLIT")); + add_opt(common_arg( + {"--draft-p-min"}, "P", + string_format("minimum speculative decoding probability (greedy) (default: %.1f)", (double)params.speculative.p_min), + [](common_params & params, const std::string & value) { + params.speculative.p_min = std::stof(value); + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_DRAFT_P_MIN")); + add_opt(common_arg( + {"-cd", "--ctx-size-draft"}, "N", + string_format("size of the prompt context for the draft model (default: %d, 0 = loaded from model)", params.speculative.n_ctx), + [](common_params & params, int value) { + params.speculative.n_ctx = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CTX_SIZE_DRAFT")); + add_opt(common_arg( + {"-devd", "--device-draft"}, "", + "comma-separated list of devices to use for offloading the draft model (none = don't offload)\n" + "use --list-devices to see a list of available devices", + [](common_params & params, const std::string & value) { + params.speculative.devices = parse_device_list(value); + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER})); + add_opt(common_arg( + {"-ngld", "--gpu-layers-draft", "--n-gpu-layers-draft"}, "N", + "number of layers to store in VRAM for the draft model", + [](common_params & params, int value) { + params.speculative.n_gpu_layers = value; + if (!llama_supports_gpu_offload()) { + fprintf(stderr, "warning: no usable GPU found, --gpu-layers-draft option will be ignored\n"); + fprintf(stderr, "warning: one possible reason is that llama.cpp was compiled without GPU support\n"); + fprintf(stderr, "warning: consult docs/build.md for compilation instructions\n"); + } + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_GPU_LAYERS_DRAFT")); + add_opt(common_arg( + {"-md", "--model-draft"}, "FNAME", + "draft model for speculative decoding (default: unused)", + [](common_params & params, const std::string & value) { + params.speculative.model = value; + } + ).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT")); + + add_opt(common_arg( + {"-mv", "--model-vocoder"}, "FNAME", + "vocoder model for audio generation (default: unused)", + [](common_params & params, const std::string & value) { + params.vocoder.model = value; + } + ).set_examples({LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_SERVER})); + + // model-specific + add_opt(common_arg( + {"--tts-oute-default"}, + string_format("use default OuteTTS models (note: can download weights from the internet)"), + [](common_params & params) { + params.hf_repo = "OuteAI/OuteTTS-0.2-500M-GGUF"; + params.hf_file = "OuteTTS-0.2-500M-Q8_0.gguf"; + params.vocoder.hf_repo = "ggml-org/WavTokenizer"; + params.vocoder.hf_file = "WavTokenizer-Large-75-F16.gguf"; + } + ).set_examples({LLAMA_EXAMPLE_TTS})); + return ctx_arg; } diff --git a/common/common.cpp b/common/common.cpp index 93037462127d5..20be9291161ca 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -536,12 +536,12 @@ std::string string_from(const struct llama_context * ctx, const struct llama_bat [](const unsigned char c) { return !std::isprint(c); }), detokenized.end()); - buf << "\n" << std::to_string(i) - << ":token '" << detokenized << "'" - << ":pos " << std::to_string(batch.pos[i]) - << ":n_seq_id " << std::to_string(batch.n_seq_id[i]) - << ":seq_id " << std::to_string(batch.seq_id[i][0]) - << ":logits " << std::to_string(batch.logits[i]); + buf << "\n" << std::to_string(i) + << ", token '" << detokenized << "'" + << ", pos " << std::to_string(batch.pos[i]) + << ", n_seq_id " << std::to_string(batch.n_seq_id[i]) + << ", seq_id " << std::to_string(batch.seq_id[i][0]) + << ", logits " << std::to_string(batch.logits[i]); } buf << " ]"; @@ -652,7 +652,17 @@ bool fs_validate_filename(const std::string & filename) { std::u32string filename_utf32; try { +#if defined(__clang__) + // disable C++17 deprecation warning for std::codecvt_utf8 +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif std::wstring_convert, char32_t> converter; + +#if defined(__clang__) +# pragma clang diagnostic pop +#endif + filename_utf32 = converter.from_bytes(filename); // If the reverse conversion mismatches, it means overlong UTF-8 sequences were used, @@ -829,9 +839,9 @@ struct common_init_result common_init_from_params(common_params & params) { llama_model * model = nullptr; if (!params.hf_repo.empty() && !params.hf_file.empty()) { - model = common_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams); + model = common_load_model_from_hf(params.hf_repo, params.hf_file, params.model, params.hf_token, mparams); } else if (!params.model_url.empty()) { - model = common_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams); + model = common_load_model_from_url(params.model_url, params.model, params.hf_token, mparams); } else { model = llama_load_model_from_file(params.model.c_str(), mparams); } @@ -875,6 +885,12 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } + if (params.ctx_shift && !llama_kv_cache_can_shift(lctx)) { + LOG_ERR("%s: KV cache shifting is not supported for this model (--no-context-shift to disable)'\n", __func__); + llama_free_model(model); + return iparams; + } + if (!params.control_vectors.empty()) { if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1; if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model); @@ -919,9 +935,28 @@ struct common_init_result common_init_from_params(common_params & params) { common_lora_adapters_apply(lctx, iparams.lora_adapters); } - if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { + if (params.sampling.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__); - params.sparams.ignore_eos = false; + params.sampling.ignore_eos = false; + } + + if (params.sampling.ignore_eos) { + for (llama_token i = 0; i < llama_n_vocab(model); i++) { + if (llama_token_is_eog(model, i)) { + LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY); + params.sampling.logit_bias.push_back({i, -INFINITY}); + } + } + } + + if (params.sampling.penalty_last_n == -1) { + LOG_INF("%s: setting penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); + params.sampling.penalty_last_n = llama_n_ctx(lctx); + } + + if (params.sampling.dry_penalty_last_n == -1) { + LOG_INF("%s: setting dry_penalty_last_n to ctx_size = %d\n", __func__, llama_n_ctx(lctx)); + params.sampling.dry_penalty_last_n = llama_n_ctx(lctx); } if (params.warmup) { @@ -973,9 +1008,12 @@ void common_lora_adapters_apply(struct llama_context * ctx, std::vector 0) { @@ -1119,7 +1119,6 @@ static bool curl_perform_with_retry(const std::string& url, CURL* curl, int max_ } static bool common_download_file(const std::string & url, const std::string & path, const std::string & hf_token) { - // Initialize libcurl std::unique_ptr curl(curl_easy_init(), &curl_easy_cleanup); if (!curl) { @@ -1192,11 +1191,13 @@ static bool common_download_file(const std::string & url, const std::string & pa std::string etag; std::string last_modified; }; + common_load_model_from_url_headers headers; + { typedef size_t(*CURLOPT_HEADERFUNCTION_PTR)(char *, size_t, size_t, void *); auto header_callback = [](char * buffer, size_t /*size*/, size_t n_items, void * userdata) -> size_t { - common_load_model_from_url_headers *headers = (common_load_model_from_url_headers *) userdata; + common_load_model_from_url_headers * headers = (common_load_model_from_url_headers *) userdata; static std::regex header_regex("([^:]+): (.*)\r\n"); static std::regex etag_regex("ETag", std::regex_constants::icase); @@ -1333,17 +1334,17 @@ static bool common_download_file(const std::string & url, const std::string & pa } struct llama_model * common_load_model_from_url( - const char * model_url, - const char * path_model, - const char * hf_token, + const std::string & model_url, + const std::string & local_path, + const std::string & hf_token, const struct llama_model_params & params) { // Basic validation of the model_url - if (!model_url || strlen(model_url) == 0) { + if (model_url.empty()) { LOG_ERR("%s: invalid model_url\n", __func__); return NULL; } - if (!common_download_file(model_url, path_model, hf_token)) { + if (!common_download_file(model_url, local_path, hf_token)) { return NULL; } @@ -1354,9 +1355,9 @@ struct llama_model * common_load_model_from_url( /*.no_alloc = */ true, /*.ctx = */ NULL, }; - auto * ctx_gguf = gguf_init_from_file(path_model, gguf_params); + auto * ctx_gguf = gguf_init_from_file(local_path.c_str(), gguf_params); if (!ctx_gguf) { - LOG_ERR("\n%s: failed to load input GGUF from %s\n", __func__, path_model); + LOG_ERR("\n%s: failed to load input GGUF from %s\n", __func__, local_path.c_str()); return NULL; } @@ -1375,13 +1376,13 @@ struct llama_model * common_load_model_from_url( // Verify the first split file format // and extract split URL and PATH prefixes { - if (!llama_split_prefix(split_prefix, sizeof(split_prefix), path_model, 0, n_split)) { - LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, path_model, n_split); + if (!llama_split_prefix(split_prefix, sizeof(split_prefix), local_path.c_str(), 0, n_split)) { + LOG_ERR("\n%s: unexpected model file name: %s n_split=%d\n", __func__, local_path.c_str(), n_split); return NULL; } - if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model_url, 0, n_split)) { - LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model_url, n_split); + if (!llama_split_prefix(split_url_prefix, sizeof(split_url_prefix), model_url.c_str(), 0, n_split)) { + LOG_ERR("\n%s: unexpected model url: %s n_split=%d\n", __func__, model_url.c_str(), n_split); return NULL; } } @@ -1408,14 +1409,14 @@ struct llama_model * common_load_model_from_url( } } - return llama_load_model_from_file(path_model, params); + return llama_load_model_from_file(local_path.c_str(), params); } struct llama_model * common_load_model_from_hf( - const char * repo, - const char * model, - const char * path_model, - const char * hf_token, + const std::string & repo, + const std::string & remote_path, + const std::string & local_path, + const std::string & hf_token, const struct llama_model_params & params) { // construct hugging face model url: // @@ -1429,27 +1430,27 @@ struct llama_model * common_load_model_from_hf( std::string model_url = "https://huggingface.co/"; model_url += repo; model_url += "/resolve/main/"; - model_url += model; + model_url += remote_path; - return common_load_model_from_url(model_url.c_str(), path_model, hf_token, params); + return common_load_model_from_url(model_url, local_path, hf_token, params); } #else struct llama_model * common_load_model_from_url( - const char * /*model_url*/, - const char * /*path_model*/, - const char * /*hf_token*/, + const std::string & /*model_url*/, + const std::string & /*local_path*/, + const std::string & /*hf_token*/, const struct llama_model_params & /*params*/) { LOG_WRN("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__); return nullptr; } struct llama_model * common_load_model_from_hf( - const char * /*repo*/, - const char * /*model*/, - const char * /*path_model*/, - const char * /*hf_token*/, + const std::string & /*repo*/, + const std::string & /*remote_path*/, + const std::string & /*local_path*/, + const std::string & /*hf_token*/, const struct llama_model_params & /*params*/) { LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__); return nullptr; @@ -1484,6 +1485,66 @@ void common_batch_add( batch.n_tokens++; } +// +// Token utils +// + +size_t common_lcp(const llama_tokens & a, const llama_tokens & b) { + size_t i; + for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {} + + return i; +} + +size_t common_lcs(const llama_tokens & a, const llama_tokens & b) { + // check for empty sequences + if (a.empty() || b.empty()) { + return 0; + } + + // get the lengths of the input sequences + size_t a_len = a.size(); + size_t b_len = b.size(); + + // initialize the maximum length of the longest common subsequence (LCS) + size_t max_length = 0; + + // use two rows instead of a 2D matrix to optimize space + std::vector prev_row(b_len + 1, 0); + std::vector curr_row(b_len + 1, 0); + + // iterate through the elements of a + for (size_t i = 1; i <= a_len; i++) { + // iterate through the elements of b + for (size_t j = 1; j <= b_len; j++) { + // if elements at the current positions match + if (a[i - 1] == b[j - 1]) { + // if it's the first element of either sequences, set LCS length to 1 + if (i == 1 || j == 1) { + curr_row[j] = 1; + } else { + // increment LCS length by 1 compared to the previous element + curr_row[j] = prev_row[j - 1] + 1; + } + + // update max_length if necessary + if (curr_row[j] > max_length) { + max_length = curr_row[j]; + } + } else { + // reset LCS length if elements don't match + curr_row[j] = 0; + } + } + + // update the previous row for the next iteration + prev_row = curr_row; + } + + // return the maximum length of the LCS + return max_length; +} + // // Vocab utils // @@ -1720,7 +1781,9 @@ void common_embd_normalize(const float * inp, float * out, int n, int embd_norm) break; case 0: // max absolute for (int i = 0; i < n; i++) { - if (sum < std::abs(inp[i])) sum = std::abs(inp[i]); + if (sum < std::abs(inp[i])) { + sum = std::abs(inp[i]); + } } sum /= 32760.0; // make an int16 range break; diff --git a/common/common.h b/common/common.h index 7977cc7a99a78..1d2bd932c211d 100644 --- a/common/common.h +++ b/common/common.h @@ -33,11 +33,13 @@ struct common_lora_adapter_container : common_lora_adapter_info { struct llama_lora_adapter * adapter; }; +using llama_tokens = std::vector; + // build info extern int LLAMA_BUILD_NUMBER; -extern char const * LLAMA_COMMIT; -extern char const * LLAMA_COMPILER; -extern char const * LLAMA_BUILD_TARGET; +extern const char * LLAMA_COMMIT; +extern const char * LLAMA_COMPILER; +extern const char * LLAMA_BUILD_TARGET; struct common_control_vector_load_info; @@ -78,6 +80,7 @@ enum llama_example { LLAMA_EXAMPLE_LLAVA, LLAMA_EXAMPLE_LOOKUP, LLAMA_EXAMPLE_PARALLEL, + LLAMA_EXAMPLE_TTS, LLAMA_EXAMPLE_COUNT, }; @@ -93,6 +96,7 @@ enum common_sampler_type { COMMON_SAMPLER_TYPE_TEMPERATURE = 7, COMMON_SAMPLER_TYPE_XTC = 8, COMMON_SAMPLER_TYPE_INFILL = 9, + COMMON_SAMPLER_TYPE_PENALTIES = 10, }; // dimensionality reduction methods, used by cvector-generator @@ -101,8 +105,8 @@ enum dimre_method { DIMRE_METHOD_MEAN, }; -// sampler parameters -struct common_sampler_params { +// sampling parameters +struct common_params_sampling { uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler int32_t n_prev = 64; // number of previous tokens to remember @@ -128,14 +132,15 @@ struct common_sampler_params { int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate - bool penalize_nl = false; // consider newlines as a repeatable token bool ignore_eos = false; bool no_perf = false; // disable performance metrics + bool timing_per_token = false; std::vector dry_sequence_breakers = {"\n", ":", "\"", "*"}; // default sequence breakers for DRY std::vector samplers = { + COMMON_SAMPLER_TYPE_PENALTIES, COMMON_SAMPLER_TYPE_DRY, COMMON_SAMPLER_TYPE_TOP_K, COMMON_SAMPLER_TYPE_TYPICAL_P, @@ -153,21 +158,39 @@ struct common_sampler_params { std::string print() const; }; +struct common_params_speculative { + std::vector devices; // devices to use for offloading + + int32_t n_ctx = 0; // draft context size + int32_t n_max = 16; // maximum number of tokens to draft during speculative decoding + int32_t n_min = 5; // minimum number of draft tokens to use for speculative decoding + int32_t n_gpu_layers = -1; // number of layers to store in VRAM for the draft model (-1 - use default) + float p_split = 0.1f; // speculative decoding split probability + float p_min = 0.9f; // minimum speculative decoding probability (greedy) + + struct cpu_params cpuparams; + struct cpu_params cpuparams_batch; + + std::string model = ""; // draft model for speculative decoding // NOLINT +}; + +struct common_params_vocoder { + std::string hf_repo = ""; // HF repo // NOLINT + std::string hf_file = ""; // HF file // NOLINT + + std::string model = ""; // model path // NOLINT + std::string model_url = ""; // model url to download // NOLINT +}; + struct common_params { int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 4096; // context size int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_draft = 5; // number of tokens to draft during speculative decoding int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_parallel = 1; // number of parallel sequences to decode int32_t n_sequences = 1; // number of sequences to decode - float p_split = 0.1f; // speculative decoding split probability - int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) - int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) - int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors - float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs int32_t grp_attn_n = 1; // group-attention factor int32_t grp_attn_w = 512; // group-attention width int32_t n_print = -1; // print token count every n tokens (-1 = disabled) @@ -180,26 +203,33 @@ struct common_params { int32_t yarn_orig_ctx = 0; // YaRN original context length float defrag_thold = 0.1f; // KV cache defragmentation threshold + // offload params + std::vector devices; // devices to use for offloading + + int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) + int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors + float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs + + enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs + struct cpu_params cpuparams; struct cpu_params cpuparams_batch; - struct cpu_params draft_cpuparams; - struct cpu_params draft_cpuparams_batch; ggml_backend_sched_eval_callback cb_eval = nullptr; void * cb_eval_user_data = nullptr; ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED; - enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED; enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings - struct common_sampler_params sparams; + struct common_params_sampling sampling; + struct common_params_speculative speculative; + struct common_params_vocoder vocoder; std::string model = ""; // model path // NOLINT - std::string model_draft = ""; // draft model for speculative decoding // NOLINT - std::string model_alias = "unknown"; // model alias // NOLINT + std::string model_alias = ""; // model alias // NOLINT std::string model_url = ""; // model url to download // NOLINT std::string hf_token = ""; // HF token // NOLINT std::string hf_repo = ""; // HF repo // NOLINT @@ -270,8 +300,8 @@ struct common_params { bool warmup = true; // warmup run bool check_tensors = false; // validate tensor data - std::string cache_type_k = "f16"; // KV cache data type for the K - std::string cache_type_v = "f16"; // KV cache data type for the V + ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K + ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V // multimodal models (see examples/llava) std::string mmproj = ""; // path to multimodal projector // NOLINT @@ -421,6 +451,11 @@ std::vector string_split(const std::string & input, ch return parts; } +static bool string_starts_with(const std::string & str, + const std::string & prefix) { // While we wait for C++20's std::string::starts_with... + return str.rfind(prefix, 0) == 0; +} + bool string_parse_kv_override(const char * data, std::vector & overrides); void string_process_escapes(std::string & input); @@ -451,17 +486,28 @@ struct common_init_result { struct common_init_result common_init_from_params(common_params & params); -struct llama_model_params common_model_params_to_llama (const common_params & params); +struct llama_model_params common_model_params_to_llama ( common_params & params); struct llama_context_params common_context_params_to_llama(const common_params & params); struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_params & params); -struct llama_model * common_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params); -struct llama_model * common_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params); +struct llama_model * common_load_model_from_url( + const std::string & model_url, + const std::string & local_path, + const std::string & hf_token, + const struct llama_model_params & params); +struct llama_model * common_load_model_from_hf( + const std::string & repo, + const std::string & remote_path, + const std::string & local_path, + const std::string & hf_token, + const struct llama_model_params & params); // clear LoRA adapters from context, then apply new list of adapters void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters); +// // Batch utils +// void common_batch_clear(struct llama_batch & batch); @@ -472,6 +518,16 @@ void common_batch_add( const std::vector & seq_ids, bool logits); +// +// Token utils +// + +// longest common prefix +size_t common_lcp(const llama_tokens & a, const llama_tokens & b); + +// longet common subsequence +size_t common_lcs(const llama_tokens & a, const llama_tokens & b); + // // Vocab utils // @@ -551,7 +607,8 @@ void common_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_si // Embedding utils // -void common_embd_normalize(const float * inp, float * out, int n, int embd_norm = 2); +// TODO: repace embd_norm with an enum +void common_embd_normalize(const float * inp, float * out, int n, int embd_norm); float common_embd_similarity_cos(const float * embd1, const float * embd2, int n); diff --git a/common/sampling.cpp b/common/sampling.cpp index 7922fde47d369..e83a971c74d52 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -99,7 +99,7 @@ struct ring_buffer { }; struct common_sampler { - common_sampler_params params; + common_params_sampling params; struct llama_sampler * grmr; struct llama_sampler * chain; @@ -125,7 +125,7 @@ struct common_sampler { } }; -std::string common_sampler_params::print() const { +std::string common_params_sampling::print() const { char result[1024]; snprintf(result, sizeof(result), @@ -141,7 +141,7 @@ std::string common_sampler_params::print() const { return std::string(result); } -struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params) { +struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params) { llama_sampler_chain_params lparams = llama_sampler_chain_default_params(); lparams.no_perf = params.no_perf; @@ -161,32 +161,20 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co params.logit_bias.size(), params.logit_bias.data())); - llama_sampler_chain_add(result->chain, - llama_sampler_init_penalties( - llama_n_vocab (model), - llama_token_eos(model), - llama_token_nl (model), - params.penalty_last_n, - params.penalty_repeat, - params.penalty_freq, - params.penalty_present, - params.penalize_nl, - params.ignore_eos)); - if (params.mirostat == 0) { for (const auto & cnstr : params.samplers) { switch (cnstr) { - case COMMON_SAMPLER_TYPE_DRY: + case COMMON_SAMPLER_TYPE_DRY: { - std::vector c_breakers; + std::vector c_breakers; c_breakers.reserve(params.dry_sequence_breakers.size()); - for (const auto& str : params.dry_sequence_breakers) { + for (const auto & str : params.dry_sequence_breakers) { c_breakers.push_back(str.c_str()); } llama_sampler_chain_add(result->chain, llama_sampler_init_dry (model, params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); } - break; + break; case COMMON_SAMPLER_TYPE_TOP_K: llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k)); break; @@ -208,6 +196,9 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co case COMMON_SAMPLER_TYPE_INFILL: llama_sampler_chain_add(result->chain, llama_sampler_init_infill (model)); break; + case COMMON_SAMPLER_TYPE_PENALTIES: + llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present)); + break; default: GGML_ASSERT(false && "unknown sampler type"); } @@ -320,6 +311,45 @@ llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_co return cur_p.data[cur_p.selected].id; } +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector & idxs, const llama_tokens & draft, bool grammar_first) { + GGML_ASSERT(idxs.size() == draft.size() + 1 && "idxs.size() must be draft.size() + 1"); + + std::vector result; + result.reserve(idxs.size()); + + size_t i = 0; + for (; i < draft.size(); i++) { + const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first); + + common_sampler_accept(gsmpl, id, true); + + result.push_back(id); + + if (draft[i] != id) { + break; + } + } + + if (i == draft.size()) { + const llama_token id = common_sampler_sample(gsmpl, ctx, idxs[i], grammar_first); + + common_sampler_accept(gsmpl, id, true); + + result.push_back(id); + } + + return result; +} + +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first) { + std::vector idxs(draft.size() + 1); + for (size_t i = 0; i < idxs.size(); ++i) { + idxs[i] = i; + } + + return common_sampler_sample_and_accept_n(gsmpl, ctx, idxs, draft, grammar_first); +} + uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl) { return llama_sampler_get_seed(gsmpl->chain); } @@ -376,6 +406,7 @@ char common_sampler_type_to_chr(enum common_sampler_type cnstr) { case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't'; case COMMON_SAMPLER_TYPE_XTC: return 'x'; case COMMON_SAMPLER_TYPE_INFILL: return 'i'; + case COMMON_SAMPLER_TYPE_PENALTIES: return 'e'; default : return '?'; } } @@ -390,6 +421,7 @@ std::string common_sampler_type_to_str(enum common_sampler_type cnstr) { case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature"; case COMMON_SAMPLER_TYPE_XTC: return "xtc"; case COMMON_SAMPLER_TYPE_INFILL: return "infill"; + case COMMON_SAMPLER_TYPE_PENALTIES: return "penalties"; default : return ""; } } @@ -404,6 +436,7 @@ std::vector common_sampler_types_from_names(const std::vect { "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE }, { "xtc", COMMON_SAMPLER_TYPE_XTC }, { "infill", COMMON_SAMPLER_TYPE_INFILL }, + { "penalties", COMMON_SAMPLER_TYPE_PENALTIES }, }; // since samplers names are written multiple ways @@ -450,6 +483,7 @@ std::vector common_sampler_types_from_chars(const std::stri { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE }, { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC }, { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_INFILL), COMMON_SAMPLER_TYPE_INFILL }, + { common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_PENALTIES), COMMON_SAMPLER_TYPE_PENALTIES }, }; std::vector samplers; diff --git a/common/sampling.h b/common/sampling.h index d37f25ad37c4a..348911b18888b 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -36,7 +36,7 @@ struct common_sampler; // llama_sampler API overloads -struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_sampler_params & params); +struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params); void common_sampler_free(struct common_sampler * gsmpl); @@ -60,6 +60,27 @@ void common_perf_print(const struct llama_context * ctx, const struct common_sam // llama_token common_sampler_sample(struct common_sampler * gsmpl, struct llama_context * ctx, int idx, bool grammar_first = false); +// generalized version of common_sampler_sample +// +// will cross-reference the sampled tokens with a batch of draft tokens and accept those that match +// if the sampler disagrees at some point, we stop and return the accepted tokens up to now +// +// common_sampler_sample_n(gsmpl, ctx, { idx }, {}); +// +// is equivalent to +// +// common_sampler_sample(gsmpl, ctx, idx); +// common_sampler_accept(gsmpl, token, true); +// +// requires: idxs.size() == draft.size() + 1 +// +// returns at least 1 token, up to idxs.size() +// +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const std::vector & idxs, const llama_tokens & draft, bool grammar_first = false); + +// assume idxs == [ 0, 1, 2, ..., draft.size() ] +std::vector common_sampler_sample_and_accept_n(struct common_sampler * gsmpl, struct llama_context * ctx, const llama_tokens & draft, bool grammar_first = false); + uint32_t common_sampler_get_seed(const struct common_sampler * gsmpl); // helpers diff --git a/common/speculative.cpp b/common/speculative.cpp new file mode 100644 index 0000000000000..3fcbb0020b6af --- /dev/null +++ b/common/speculative.cpp @@ -0,0 +1,274 @@ +#include "speculative.h" + +#include "log.h" +#include "common.h" +#include "sampling.h" + +#include + +#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128 +#define SPEC_VOCAB_CHECK_START_TOKEN_ID 5 + +struct common_speculative { + struct llama_context * ctx; + struct common_sampler * smpl; + + llama_batch batch; + llama_tokens prompt; +}; + +struct common_speculative * common_speculative_init( + struct llama_context * ctx_dft) { + auto * result = new common_speculative { + /* .ctx = */ ctx_dft, + /* .smpl = */ nullptr, + /* .batch = */ llama_batch_init(llama_n_batch(ctx_dft), 0, 1), + /* .prompt = */ {}, + }; + + // TODO: optimize or pass from outside? +#if 0 + { + common_params_sampling params; + params.no_perf = false; + + params.top_k = 40; + params.top_p = 0.9; + + params.samplers = { + COMMON_SAMPLER_TYPE_TOP_K, + COMMON_SAMPLER_TYPE_TOP_P, + COMMON_SAMPLER_TYPE_INFILL, + }; + + result->smpl = common_sampler_init(llama_get_model(ctx_dft), params); + } +#else + { + common_params_sampling params; + params.no_perf = false; + + params.top_k = 10; + + params.samplers = { + COMMON_SAMPLER_TYPE_TOP_K, + }; + + result->smpl = common_sampler_init(llama_get_model(ctx_dft), params); + } +#endif + + return result; +} + +void common_speculative_free(struct common_speculative * spec) { + if (spec == nullptr) { + return; + } + + common_sampler_free(spec->smpl); + + llama_batch_free(spec->batch); + + delete spec; +} + +bool common_speculative_are_compatible( + const struct llama_context * ctx_tgt, + const struct llama_context * ctx_dft) { + const struct llama_model * model_tgt = llama_get_model(ctx_tgt); + const struct llama_model * model_dft = llama_get_model(ctx_dft); + + const bool vocab_type_tgt = llama_vocab_type(model_tgt); + LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt); + + const bool vocab_type_dft = llama_vocab_type(model_dft); + LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft); + + if (vocab_type_tgt != vocab_type_dft) { + LOG_ERR("%s: draft model vocab type must match target model to use speculation but " + "vocab_type_dft = %d while vocab_type_tgt = %d\n", __func__, vocab_type_dft, vocab_type_tgt); + return false; + } + + if (llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) || + llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) || + llama_token_bos(model_tgt) != llama_token_bos(model_dft) || + llama_token_eos(model_tgt) != llama_token_eos(model_dft)) { + LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__); + LOG_ERR("%s: tgt: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_tgt), llama_add_bos_token(model_tgt), llama_token_eos(model_tgt), llama_add_eos_token(model_tgt)); + LOG_ERR("%s: dft: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_dft), llama_add_bos_token(model_dft), llama_token_eos(model_dft), llama_add_eos_token(model_dft)); + return false; + } + + { + const int n_vocab_tgt = llama_n_vocab(model_tgt); + const int n_vocab_dft = llama_n_vocab(model_dft); + + const int vocab_diff = std::abs(n_vocab_tgt - n_vocab_dft); + + if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) { + LOG_ERR("%s: draft model vocab must closely match target model to use speculation but " + "target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n", + __func__, n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); + return false; + } + + for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) { + const char * token_text_tgt = llama_token_get_text(model_tgt, i); + const char * token_text_dft = llama_token_get_text(model_dft, i); + if (std::strcmp(token_text_tgt, token_text_dft) != 0) { + LOG_ERR("%s: draft model vocab must match target model to use speculation but " + "token %d content differs - target '%s', draft '%s'\n", __func__, i, + common_token_to_piece(ctx_tgt, i).c_str(), + common_token_to_piece(ctx_dft, i).c_str()); + return false; + } + } + } + + return true; +} + +llama_tokens common_speculative_gen_draft( + struct common_speculative * spec, + struct common_speculative_params params, + const llama_tokens & prompt_tgt, + llama_token id_last) { + auto & batch = spec->batch; + auto & ctx = spec->ctx; + auto & smpl = spec->smpl; + auto & prompt = spec->prompt; + + int reuse_i = 0; + int reuse_n = 0; + + const int n_ctx = llama_n_ctx(ctx) - params.n_draft; + + const int i_start = std::max(0, (int) prompt_tgt.size() - n_ctx); + + // reuse as much as possible from the old draft context + // ideally, the draft context should be as big as the target context and we will always reuse the entire prompt + for (int i = 0; i < (int) prompt.size(); ++i) { + int cur = 0; + while (i_start + cur < (int) prompt_tgt.size() && + i + cur < (int) prompt.size() && + prompt_tgt[i_start + cur] == prompt[i + cur]) { + cur++; + } + + if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) { + reuse_i = i; + reuse_n = cur; + } + } + + LOG_DBG("%s: reuse_i = %d, reuse_n = %d, prompt = %d\n", __func__, reuse_i, reuse_n, (int) prompt.size()); + + llama_tokens result; + result.reserve(params.n_draft); + + if (reuse_n == 0) { + llama_kv_cache_clear(ctx); + + prompt.clear(); + } else { + // this happens when a previous draft has been discarded (for example, due to being too small), but the + // target model agreed with it. in this case, we simply pass back the previous results to save compute + if (reuse_i + reuse_n < (int) prompt.size() && prompt[reuse_i + reuse_n] == id_last) { + for (int i = reuse_i + reuse_n + 1; i < (int) prompt.size(); ++i) { + result.push_back(prompt[i]); + + if (params.n_draft <= (int) result.size()) { + break; + } + } + + return result; + } + + if (reuse_i > 0) { + llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i); + llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i); + + prompt.erase(prompt.begin(), prompt.begin() + reuse_i); + } + + if (reuse_n < (int) prompt.size()) { + llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1); + + prompt.erase(prompt.begin() + reuse_n, prompt.end()); + } + } + + // prepare a batch to evaluate any new tokens in the prompt + common_batch_clear(batch); + + for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) { + //LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_tgt[i]); + common_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false); + + prompt.push_back(prompt_tgt[i]); + } + + // we should rarely end-up here during normal decoding + if (batch.n_tokens > 0) { + //LOG_DBG("%s: draft prompt batch: %s\n", __func__, string_from(ctx, batch).c_str()); + + llama_decode(ctx, batch); + } + + const llama_pos n_past = prompt.size(); + + LOG_DBG("%s: n_past = %d\n", __func__, n_past); + + common_batch_clear(batch); + common_batch_add (batch, id_last, n_past, { 0 }, true); + + prompt.push_back(id_last); + + //LOG_DBG("%s: draft prompt: %s\n", __func__, string_from(ctx, prompt).c_str()); + + llama_decode(ctx, batch); + + common_sampler_reset(smpl); + + // sample n_draft tokens from the draft model + for (int i = 0; i < params.n_draft; ++i) { + common_batch_clear(batch); + + common_sampler_sample(smpl, ctx, 0, true); + + const auto * cur_p = common_sampler_get_candidates(smpl); + + for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) { + LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n", + k, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx, cur_p->data[k].id).c_str()); + } + + // add drafted token for each sequence + const llama_token id = cur_p->data[0].id; + + // only collect very high-confidence draft tokens + if (cur_p->data[0].p < params.p_min) { + break; + } + + common_sampler_accept(smpl, id, true); + + result.push_back(id); + + if (params.n_draft <= (int) result.size()) { + break; + } + + common_batch_add(batch, id, n_past + i + 1, { 0 }, true); + + // evaluate the drafted tokens on the draft model + llama_decode(ctx, batch); + + prompt.push_back(id); + } + + return result; +} diff --git a/common/speculative.h b/common/speculative.h new file mode 100644 index 0000000000000..50ec0344618aa --- /dev/null +++ b/common/speculative.h @@ -0,0 +1,28 @@ +#pragma once + +#include "llama.h" +#include "common.h" + +struct common_speculative; + +struct common_speculative_params { + int n_draft = 16; // max drafted tokens + int n_reuse = 256; + + float p_min = 0.9f; // min probabiliy required to accept a token in the draft +}; + +struct common_speculative * common_speculative_init(struct llama_context * ctx_dft); + +void common_speculative_free(struct common_speculative * spec); + +bool common_speculative_are_compatible( + const struct llama_context * ctx_tgt, + const struct llama_context * ctx_dft); + +// sample up to n_draft tokens and add them to the batch using the draft model +llama_tokens common_speculative_gen_draft( + struct common_speculative * spec, + struct common_speculative_params params, + const llama_tokens & prompt, + llama_token id_last); diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 39afa5ef4f272..ecd69be6bbb07 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -221,17 +221,17 @@ def set_gguf_parameters(self): self.gguf_writer.add_context_length(n_ctx) logger.info(f"gguf: context length = {n_ctx}") - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - self.gguf_writer.add_embedding_length(n_embd) - logger.info(f"gguf: embedding length = {n_embd}") + if (n_embd := self.find_hparam(["hidden_size", "n_embd"], optional=True)) is not None: + self.gguf_writer.add_embedding_length(n_embd) + logger.info(f"gguf: embedding length = {n_embd}") if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None: self.gguf_writer.add_feed_forward_length(n_ff) logger.info(f"gguf: feed forward length = {n_ff}") - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - self.gguf_writer.add_head_count(n_head) - logger.info(f"gguf: head count = {n_head}") + if (n_head := self.find_hparam(["num_attention_heads", "n_head"], optional=True)) is not None: + self.gguf_writer.add_head_count(n_head) + logger.info(f"gguf: head count = {n_head}") if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None: self.gguf_writer.add_head_count_kv(n_head_kv) @@ -296,7 +296,9 @@ def prepare_tensors(self): break for new_name, data_torch in (self.modify_tensors(data_torch, name, bid)): - data = data_torch.squeeze().numpy() + # TODO: why do we squeeze here? + # data = data_torch.squeeze().numpy() + data = data_torch.numpy() # if data ends up empty, it means data_torch was a scalar tensor -> restore if len(data.shape) == 0: @@ -324,6 +326,8 @@ def prepare_tensors(self): gguf.MODEL_TENSOR.TIME_MIX_W2, gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1, gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2, + gguf.MODEL_TENSOR.POSNET_NORM1, + gguf.MODEL_TENSOR.POSNET_NORM2, ) ) or not new_name.endswith(".weight") @@ -658,6 +662,15 @@ def get_vocab_base_pre(self, tokenizer) -> str: if chkhsh == "60824e3c0d9401f89943cbb2fff727f0e2d4c545ba4df2d6e4f09a6db0f5b450": # ref: https://huggingface.co/facebook/chameleon-7b res = "chameleon" + if chkhsh == "1431a23e583c97432bc230bff598d103ddb5a1f89960c8f1d1051aaa944d0b35": + # ref: https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0 + res = "minerva-7b" + if chkhsh == "8b5a93ed704057481f240da0be7e7dca721d7f8f4755263b6807227a2cbeae65": + # ref: https://huggingface.co/sentence-transformers/stsb-roberta-base + res = "roberta-bpe" + if chkhsh == "ad851be1dba641f2e3711822f816db2c265f788b37c63b4e1aeacb9ee92de8eb": + # ref: https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct + res = "gigachat" if res is None: logger.warning("\n") @@ -680,6 +693,9 @@ def get_vocab_base_pre(self, tokenizer) -> str: return res # Marker: End get_vocab_base_pre + def _set_vocab_none(self) -> None: + self.gguf_writer.add_tokenizer_model("none") + def _set_vocab_gpt2(self) -> None: tokens, toktypes, tokpre = self.get_vocab_base() self.gguf_writer.add_tokenizer_model("gpt2") @@ -1831,29 +1847,40 @@ class MiniCPMModel(Model): model_arch = gguf.MODEL_ARCH.MINICPM def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) + super().set_gguf_parameters() + embedding_scale = float(self.hparams["scale_emb"]) + self.gguf_writer.add_embedding_scale(embedding_scale) + logger.info(f"gguf: (minicpm) embedding_scale = {embedding_scale}") + residual_scale = self.hparams["scale_depth"] / self.hparams["num_hidden_layers"] ** 0.5 + self.gguf_writer.add_residual_scale(residual_scale) + logger.info(f"gguf: (minicpm) residual_scale = {residual_scale}") + logit_scale = self.hparams["hidden_size"] / self.hparams["dim_model_base"] + self.gguf_writer.add_logit_scale(logit_scale) + logger.info(f"gguf: (minicpm) logit_scale = {logit_scale}") + if self.hparams.get("rope_scaling") is not None: + if self.hparams["rope_scaling"].get("type") == "longrope": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LONGROPE) + logger.info(f"gguf: (minicpm) rope_scaling_type = {gguf.RopeScalingType.LONGROPE}") - def set_vocab(self): - self._set_vocab_llama_hf() + def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: + rope_dims = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head + rope_scaling = self.find_hparam(['rope_scaling'], True) + if rope_scaling is not None: + long_factors = rope_scaling.get('long_factor', None) + short_factors = rope_scaling.get('short_factor', None) - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) + if long_factors is None or short_factors is None: + raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor') + + if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2: + raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}') + + yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_LONG), torch.tensor(long_factors, dtype=torch.float32)) + yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32)) + + def set_vocab(self): + self._set_vocab_sentencepiece() def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused @@ -1863,9 +1890,9 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter # HF models permute some of the tensors, so we need to undo that if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) + data_torch = LlamaModel.permute(data_torch, n_head, n_head) if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) + data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) return [(self.map_tensor_name(name), data_torch)] @@ -1975,6 +2002,75 @@ def set_vocab(self): except FileNotFoundError: self._set_vocab_gpt2() + def set_gguf_parameters(self): + super().set_gguf_parameters() + if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: + if self.hparams["rope_scaling"].get("type") == "yarn": + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) + self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) + self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) + + +@Model.register("Qwen2VLForConditionalGeneration") +class Qwen2VLModel(Model): + model_arch = gguf.MODEL_ARCH.QWEN2VL + + def set_gguf_parameters(self): + super().set_gguf_parameters() + mrope_section = self.hparams["rope_scaling"]["mrope_section"] + mrope_section += [0] * max(0, 4 - len(mrope_section)) + self.gguf_writer.add_rope_dimension_sections(mrope_section) + + def set_vocab(self): + try: + self._set_vocab_sentencepiece() + except FileNotFoundError: + self._set_vocab_gpt2() + + def get_tensors(self) -> Iterator[tuple[str, Tensor]]: + for name, data in super().get_tensors(): + if name.startswith("visual."): + continue + yield name, data + + +@Model.register("WavTokenizerDec") +class WavTokenizerDecModel(Model): + model_arch = gguf.MODEL_ARCH.WAVTOKENIZER_DEC + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + del bid # unused + + if \ + name.endswith("codebook.cluster_size") or \ + name.endswith("codebook.embed_avg") or \ + name.endswith("codebook.inited"): + logger.debug(f"Skipping {name!r}") + return [] + + logger.info(f"{self.map_tensor_name(name)} -> {data_torch.shape}") + + return [(self.map_tensor_name(name), data_torch)] + + def set_vocab(self): + self._set_vocab_none() + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_vocab_size (self.hparams["vocab_size"]) + self.gguf_writer.add_features_length (self.hparams["n_embd_features"]) + self.gguf_writer.add_feed_forward_length(self.hparams["n_ff"]) + self.gguf_writer.add_group_norm_eps (self.hparams["group_norm_epsilon"]) + self.gguf_writer.add_group_norm_groups (self.hparams["group_norm_groups"]) + + self.gguf_writer.add_posnet_embedding_length(self.hparams["posnet"]["n_embd"]) + self.gguf_writer.add_posnet_block_count (self.hparams["posnet"]["n_layer"]) + + self.gguf_writer.add_convnext_embedding_length(self.hparams["convnext"]["n_embd"]) + self.gguf_writer.add_convnext_block_count (self.hparams["convnext"]["n_layer"]) + + self.gguf_writer.add_causal_attention(False) + @Model.register("Qwen2MoeForCausalLM") class Qwen2MoeModel(Model): @@ -2104,6 +2200,15 @@ class Phi3MiniModel(Model): model_arch = gguf.MODEL_ARCH.PHI3 def set_vocab(self): + # Phi-4 model uses GPT2Tokenizer + tokenizer_config_file = self.dir_model / 'tokenizer_config.json' + if tokenizer_config_file.is_file(): + with open(tokenizer_config_file, "r", encoding="utf-8") as f: + tokenizer_config_json = json.load(f) + tokenizer_class = tokenizer_config_json['tokenizer_class'] + if tokenizer_class == 'GPT2Tokenizer': + return self._set_vocab_gpt2() + from sentencepiece import SentencePieceProcessor tokenizer_path = self.dir_model / 'tokenizer.model' @@ -2220,7 +2325,11 @@ def set_gguf_parameters(self): self.gguf_writer.add_rope_dimension_count(rope_dims) self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"])) self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"])) + sliding_window = self.hparams.get("sliding_window") + # use zero value of sliding_window to distinguish Phi-4 from other PHI3 models + if sliding_window is None: + sliding_window = 0 + self.gguf_writer.add_sliding_window(sliding_window) def generate_extra_tensors(self) -> Iterable[tuple[str, Tensor]]: n_embd = self.find_hparam(["hidden_size", "n_embd"]) @@ -2560,7 +2669,8 @@ def set_vocab(self): # we need this to validate the size of the token_type embeddings # though currently we are passing all zeros to the token_type embeddings - self.gguf_writer.add_token_type_count(2) # "Sequence A" or "Sequence B" + # "Sequence A" or "Sequence B" + self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1)) # convert to phantom space vocab def phantom(tok): @@ -2591,6 +2701,51 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] +@Model.register("RobertaModel") +class RobertaModel(BertModel): + model_arch = gguf.MODEL_ARCH.BERT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # we need the pad_token_id to know how to chop down position_embd matrix + if (pad_token_id := self.hparams.get("pad_token_id")) is not None: + self._position_offset = 1 + pad_token_id + if "max_position_embeddings" in self.hparams: + self.hparams["max_position_embeddings"] -= self._position_offset + else: + self._position_offset = None + + def set_vocab(self): + """Support BPE tokenizers for roberta models""" + bpe_tok_path = self.dir_model / "tokenizer.json" + if bpe_tok_path.exists(): + self._set_vocab_gpt2() + self.gguf_writer.add_add_bos_token(True) + self.gguf_writer.add_add_eos_token(True) + + # we need this to validate the size of the token_type embeddings + # though currently we are passing all zeros to the token_type embeddings + # "Sequence A" or "Sequence B" + self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1)) + + else: + return super().set_vocab() + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # if name starts with "roberta.", remove the prefix + # e.g. https://huggingface.co/BAAI/bge-reranker-v2-m3/tree/main + if name.startswith("roberta."): + name = name[8:] + + # position embeddings start at pad_token_id + 1, so just chop down the weight tensor + if name == "embeddings.position_embeddings.weight": + if self._position_offset is not None: + data_torch = data_torch[self._position_offset:,:] + + return super().modify_tensors(data_torch, name, bid) + + @Model.register("NomicBertModel") class NomicBertModel(BertModel): model_arch = gguf.MODEL_ARCH.NOMIC_BERT @@ -2707,7 +2862,7 @@ def set_vocab(self): self.gguf_writer.add_token_scores(scores) self.gguf_writer.add_token_types(toktypes) self.gguf_writer.add_add_space_prefix(add_prefix) - self.gguf_writer.add_token_type_count(1) + self.gguf_writer.add_token_type_count(self.hparams.get("type_vocab_size", 1)) self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces) if precompiled_charsmap: self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap) @@ -3040,6 +3195,11 @@ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iter return [(self.map_tensor_name(name), data_torch)] +@Model.register("Olmo2ForCausalLM") +class Olmo2Model(Model): + model_arch = gguf.MODEL_ARCH.OLMO2 + + @Model.register("OlmoeForCausalLM") class OlmoeModel(Model): model_arch = gguf.MODEL_ARCH.OLMOE @@ -3373,6 +3533,97 @@ def prepare_tensors(self): raise ValueError(f"Unprocessed experts: {experts}") +@Model.register("DeepseekForCausalLM") +class DeepseekModel(Model): + model_arch = gguf.MODEL_ARCH.DEEPSEEK + + def set_vocab(self): + try: + self._set_vocab_sentencepiece() + except FileNotFoundError: + self._set_vocab_gpt2() + + def set_gguf_parameters(self): + super().set_gguf_parameters() + hparams = self.hparams + if "head_dim" in hparams: + rope_dim = hparams["head_dim"] + else: + rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] + + self.gguf_writer.add_rope_dimension_count(rope_dim) + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) + self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"]) + self.gguf_writer.add_vocab_size(hparams["vocab_size"]) + self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"]) + self.gguf_writer.add_expert_weights_scale(1.0) + self.gguf_writer.add_expert_count(hparams["n_routed_experts"]) + self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"]) + + _experts: list[dict[str, Tensor]] | None = None + + @staticmethod + def permute(weights: Tensor, n_head: int, n_head_kv: int | None): + if n_head_kv is not None and n_head != n_head_kv: + n_head = n_head_kv + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + n_head = self.hparams["num_attention_heads"] + n_kv_head = self.hparams.get("num_key_value_heads") + + if name.endswith(("q_proj.weight", "q_proj.bias")): + data_torch = DeepseekModel.permute(data_torch, n_head, n_head) + if name.endswith(("k_proj.weight", "k_proj.bias")): + data_torch = DeepseekModel.permute(data_torch, n_head, n_kv_head) + + # process the experts separately + if name.find("mlp.experts") != -1: + n_experts = self.hparams["n_routed_experts"] + assert bid is not None + + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] + + self._experts[bid][name] = data_torch + + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] + + # merge the experts into a single 3d tensor + for w_name in ["down_proj", "gate_proj", "up_proj"]: + datas: list[Tensor] = [] + + for xid in range(n_experts): + ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] + + data_torch = torch.stack(datas, dim=0) + + merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" + + new_name = self.map_tensor_name(merged_name) + + tensors.append((new_name, data_torch)) + return tensors + else: + return [] + + return [(self.map_tensor_name(name), data_torch)] + + def prepare_tensors(self): + super().prepare_tensors() + + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") + + @Model.register("DeepseekV2ForCausalLM") class DeepseekV2Model(Model): model_arch = gguf.MODEL_ARCH.DEEPSEEK2 diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py index 28cd02e5a7f66..88058442f6dc4 100755 --- a/convert_hf_to_gguf_update.py +++ b/convert_hf_to_gguf_update.py @@ -17,7 +17,7 @@ # # python3 convert_hf_to_gguf_update.py # -# - Copy-paste the generated get_vocab_base_pre() function into convert_hf_to_gguf.py +# - The convert_hf_to_gguf.py script will have had its get_vocab_base_pre() function updated # - Update llama.cpp with the new pre-tokenizer if necessary # # TODO: generate tokenizer tests for llama.cpp @@ -102,6 +102,9 @@ class TOKENIZER_TYPE(IntEnum): {"name": "exaone", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct", }, {"name": "phi-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/microsoft/phi-2", }, {"name": "chameleon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/facebook/chameleon-7b", }, + {"name": "minerva-7b", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sapienzanlp/Minerva-7B-base-v1.0", }, + {"name": "roberta-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"}, + {"name": "gigachat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"}, ] diff --git a/docs/android.md b/docs/android.md index 320b62240382f..47530c6c1d478 100644 --- a/docs/android.md +++ b/docs/android.md @@ -23,10 +23,10 @@ $ curl -L {model-url} -o ~/{model}.gguf Then, if you are not already in the repo directory, `cd` into `llama.cpp` and: ``` -$ ./build/bin/llama-simple -m ~/{model}.gguf -c {context-size} -p "{your-prompt}" +$ ./build/bin/llama-cli -m ~/{model}.gguf -c {context-size} -p "{your-prompt}" ``` -Here, we show `llama-simple`, but any of the executables under `examples` should work, in theory. Be sure to set `context-size` to a reasonable number (say, 4096) to start with; otherwise, memory could spike and kill your terminal. +Here, we show `llama-cli`, but any of the executables under `examples` should work, in theory. Be sure to set `context-size` to a reasonable number (say, 4096) to start with; otherwise, memory could spike and kill your terminal. To see what it might look like visually, here's an old demo of an interactive session running on a Pixel 5 phone: diff --git a/docs/backend/BLIS.md b/docs/backend/BLIS.md index 35d06bd0f303d..9045485771ea6 100644 --- a/docs/backend/BLIS.md +++ b/docs/backend/BLIS.md @@ -27,13 +27,6 @@ We recommend using openmp since it's easier to modify the cores being used. ### llama.cpp compilation -Makefile: - -```bash -make GGML_BLIS=1 -j -# make GGML_BLIS=1 llama-benchmark-matmult -``` - CMake: ```bash diff --git a/docs/backend/CANN.md b/docs/backend/CANN.md index 6bdd9d2daab90..23f10175a6b2d 100644 --- a/docs/backend/CANN.md +++ b/docs/backend/CANN.md @@ -23,6 +23,8 @@ The llama.cpp CANN backend is designed to support Ascend NPU. It utilize the abi ## News +- 2024.11 + - Support F16 and F32 data type model for Ascend 310P NPU. - 2024.8 - Support `Q4_0` and `Q8_0` data type for Ascend NPU. - 2024.7 @@ -40,9 +42,11 @@ The llama.cpp CANN backend is designed to support Ascend NPU. It utilize the abi ### Ascend NPU **Verified devices** + | Ascend NPU | Status | |:-----------------------------:|:-------:| | Atlas 300T A2 | Support | +| Atlas 300I Duo | Support | *Notes:* diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md index 5a67d10a7a6ac..96c12d8ab8e84 100644 --- a/docs/backend/SYCL.md +++ b/docs/backend/SYCL.md @@ -34,10 +34,10 @@ The SYCL backend would be broken by some PRs due to no online CI. The following release is verified with good quality: -|Commit ID|Tag|Release|Verified Platform| -|-|-|-|-| -|c16f01b97558440cc9df1472f8093d088338bf6c| ||Arc770/Linux/oneAPI 2024.2| -|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1| +|Commit ID|Tag|Release|Verified Platform| Update date| +|-|-|-|-|-| +|3bcd40b3c593d14261fb2abfabad3c0fb5b9e318|b4040 |[llama-b4040-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b4040/llama-b4040-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1| 2024-11-19| +|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1|| For CI and performance test summary, please refer to [llama.cpp CI for SYCL Backend](https://github.com/NeoZhangJianyu/ci_log/tree/main/llama.cpp). @@ -317,12 +317,14 @@ export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithCublas/include:$CPLUS_INCLUDE_ export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR # Build LLAMA with Nvidia BLAS acceleration through SYCL +# Setting GGML_SYCL_DEVICE_ARCH is optional but can improve performance +GGML_SYCL_DEVICE_ARCH=sm_80 # Example architecture # Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx +cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DGGML_SYCL_DEVICE_ARCH=${GGML_SYCL_DEVICE_ARCH} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx # Option 2: Use FP16 -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON +cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DGGML_SYCL_DEVICE_ARCH=${GGML_SYCL_DEVICE_ARCH} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON # build all binary cmake --build build --config Release -j -v @@ -340,8 +342,9 @@ export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithrocBLAS/include:$CPLUS_INCLUDE ## AMD # Use FP32, FP16 is not supported -# Find your GGML_SYCL_HIP_TARGET with rocminfo, under the key 'Name:' -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=AMD -DGGML_SYCL_HIP_TARGET=${GGML_SYCL_HIP_TARGET} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx +# Find your GGML_SYCL_DEVICE_ARCH with rocminfo, under the key 'Name:' +GGML_SYCL_DEVICE_ARCH=gfx90a # Example architecture +cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=AMD -DGGML_SYCL_DEVICE_ARCH=${GGML_SYCL_DEVICE_ARCH} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx # build all binary cmake --build build --config Release -j -v @@ -684,6 +687,7 @@ use 1 SYCL GPUs: [0] with Max compute units:512 |--------------------|---------------------------------------|---------------------------------------------| | GGML_SYCL | ON (mandatory) | Enable build with SYCL code path.
FP32 path - recommended for better perforemance than FP16 on quantized model| | GGML_SYCL_TARGET | INTEL *(default)* \| NVIDIA \| AMD | Set the SYCL target device type. | +| GGML_SYCL_DEVICE_ARCH | Optional (except for AMD) | Set the SYCL device architecture, optional except for AMD. Setting the device architecture can improve the performance. See the table [--offload-arch](https://github.com/intel/llvm/blob/sycl/sycl/doc/design/OffloadDesign.md#--offload-arch) for a list of valid architectures. | | GGML_SYCL_F16 | OFF *(default)* \|ON *(optional)* | Enable FP16 build with SYCL code path. | | CMAKE_C_COMPILER | `icx` *(Linux)*, `icx/cl` *(Windows)* | Set `icx` compiler for SYCL code path. | | CMAKE_CXX_COMPILER | `icpx` *(Linux)*, `icx` *(Windows)* | Set `icpx/icx` compiler for SYCL code path. | diff --git a/docs/build.md b/docs/build.md index 359952b30ff93..84019b2046a89 100644 --- a/docs/build.md +++ b/docs/build.md @@ -7,124 +7,75 @@ git clone https://github.com/ggerganov/llama.cpp cd llama.cpp ``` -In order to build llama.cpp you have four different options. +The following sections describe how to build with different backends and options. -- Using `make`: - - On Linux or MacOS: +## CPU Build - ```bash - make - ``` - - - On Windows (x86/x64 only, arm64 requires cmake): +Build llama.cpp using `CMake`: - 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). - 2. Extract `w64devkit` on your pc. - 3. Run `w64devkit.exe`. - 4. Use the `cd` command to reach the `llama.cpp` folder. - 5. From here you can run: - ```bash - make - ``` +```bash +cmake -B build +cmake --build build --config Release +``` - - Notes: - - For `Q4_0_4_4` quantization type build, add the `GGML_NO_LLAMAFILE=1` flag. For example, use `make GGML_NO_LLAMAFILE=1`. - - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel. - - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, run `make LLAMA_DEBUG=1` +**Notes**: -- Using `CMake`: +- For faster compilation, add the `-j` argument to run multiple jobs in parallel, or use a generator that does this automatically such as Ninja. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel. +- For faster repeated compilation, install [ccache](https://ccache.dev/) +- For debug builds, there are two cases: - ```bash - cmake -B build - cmake --build build --config Release - ``` + 1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag): - **Notes**: + ```bash + cmake -B build -DCMAKE_BUILD_TYPE=Debug + cmake --build build + ``` - - For `Q4_0_4_4` quantization type build, add the `-DGGML_LLAMAFILE=OFF` cmake option. For example, use `cmake -B build -DGGML_LLAMAFILE=OFF`. - - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel. - - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, there are two cases: + 2. Multi-config generators (`-G` param set to Visual Studio, XCode...): - 1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag): + ```bash + cmake -B build -G "Xcode" + cmake --build build --config Debug + ``` - ```bash - cmake -B build -DCMAKE_BUILD_TYPE=Debug - cmake --build build - ``` + For more details and a list of supported generators, see the [CMake documentation](https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html). +- For static builds, add `-DBUILD_SHARED_LIBS=OFF`: + ``` + cmake -B build -DBUILD_SHARED_LIBS=OFF + cmake --build build --config Release + ``` - 2. Multi-config generators (`-G` param set to Visual Studio, XCode...): +- Building for Windows (x86, x64 and arm64) with MSVC or clang as compilers: + - Install Visual Studio 2022, e.g. via the [Community Edition](https://visualstudio.microsoft.com/de/vs/community/). In the installer, select at least the following options (this also automatically installs the required additional tools like CMake,...): + - Tab Workload: Desktop-development with C++ + - Tab Components (select quickly via search): C++-_CMake_ Tools for Windows, _Git_ for Windows, C++-_Clang_ Compiler for Windows, MS-Build Support for LLVM-Toolset (clang) + - Please remember to always use a Developer Command Prompt / PowerShell for VS2022 for git, build, test + - For Windows on ARM (arm64, WoA) build with: + ```bash + cmake --preset arm64-windows-llvm-release -D GGML_OPENMP=OFF + cmake --build build-arm64-windows-llvm-release + ``` + Building for arm64 can also be done with the MSVC compiler with the build-arm64-windows-MSVC preset, or the standard CMake build instructions. However, note that the MSVC compiler does not support inline ARM assembly code, used e.g. for the accelerated Q4_0_N_M CPU kernels. + For building with ninja generator and clang compiler as default: + -set path:set LIB=C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\um\x64;C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Tools\MSVC\14.41.34120\lib\x64\uwp;C:\Program Files (x86)\Windows Kits\10\Lib\10.0.22621.0\ucrt\x64 ```bash - cmake -B build -G "Xcode" - cmake --build build --config Debug + cmake --preset x64-windows-llvm-release + cmake --build build-x64-windows-llvm-release ``` - - Building for Windows (x86, x64 and arm64) with MSVC or clang as compilers: - - Install Visual Studio 2022, e.g. via the [Community Edition](https://visualstudio.microsoft.com/de/vs/community/). In the installer, select at least the following options (this also automatically installs the required additional tools like CMake,...): - - Tab Workload: Desktop-development with C++ - - Tab Components (select quickly via search): C++-_CMake_ Tools for Windows, _Git_ for Windows, C++-_Clang_ Compiler for Windows, MS-Build Support for LLVM-Toolset (clang) - - Please remember to always use a Developer Command Prompt / PowerShell for VS2022 for git, build, test - - For Windows on ARM (arm64, WoA) build with: - ```bash - cmake --preset arm64-windows-llvm-release -D GGML_OPENMP=OFF - cmake --build build-arm64-windows-llvm-release - ``` - Note: Building for arm64 could also be done just with MSVC (with the build-arm64-windows-MSVC preset, or the standard CMake build instructions). But MSVC does not support inline ARM assembly-code, used e.g. for the accelerated Q4_0_4_8 CPU kernels. - -- Using `gmake` (FreeBSD): - - 1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics) - 2. Add your user to **video** group - 3. Install compilation dependencies. - - ```bash - sudo pkg install gmake automake autoconf pkgconf llvm15 openblas - - gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4 - ``` - -## Metal Build - -On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU. -To disable the Metal build at compile time use the `GGML_NO_METAL=1` flag or the `GGML_METAL=OFF` cmake option. - -When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line -argument. ## BLAS Build -Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS. There are currently several different BLAS implementations available for build and use: +Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Using BLAS doesn't affect the generation performance. There are currently several different BLAS implementations available for build and use: -### Accelerate Framework: +### Accelerate Framework This is only available on Mac PCs and it's enabled by default. You can just build using the normal instructions. -### OpenBLAS: +### OpenBLAS This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS installed on your machine. -- Using `make`: - - On Linux: - ```bash - make GGML_OPENBLAS=1 - ``` - - - On Windows: - - 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). - 2. Download the latest version of [OpenBLAS for Windows](https://github.com/xianyi/OpenBLAS/releases). - 3. Extract `w64devkit` on your pc. - 4. From the OpenBLAS zip that you just downloaded copy `libopenblas.a`, located inside the `lib` folder, inside `w64devkit\x86_64-w64-mingw32\lib`. - 5. From the same OpenBLAS zip copy the content of the `include` folder inside `w64devkit\x86_64-w64-mingw32\include`. - 6. Run `w64devkit.exe`. - 7. Use the `cd` command to reach the `llama.cpp` folder. - 8. From here you can run: - - ```bash - make GGML_OPENBLAS=1 - ``` - - Using `CMake` on Linux: ```bash @@ -136,14 +87,6 @@ This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS i Check [BLIS.md](./backend/BLIS.md) for more information. -### SYCL - -SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators. - -llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). - -For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). - ### Intel oneMKL Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./backend/SYCL.md). @@ -161,16 +104,29 @@ Building through oneAPI compilers will make avx_vnni instruction set available f Check [Optimizing and Running LLaMA2 on IntelĀ® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information. -### CUDA +### Other BLAS libraries -This provides GPU acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads). +Any other BLAS library can be used by setting the `GGML_BLAS_VENDOR` option. See the [CMake documentation](https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors) for a list of supported vendors. -For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling. +## Metal Build + +On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU. +To disable the Metal build at compile time use the `-DGGML_METAL=OFF` cmake option. + +When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers 0` command-line argument. + +## SYCL + +SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators. + +llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). + +For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). + +## CUDA + +This provides GPU acceleration using an NVIDIA GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from the [NVIDIA developer site](https://developer.nvidia.com/cuda-downloads). -- Using `make`: - ```bash - make GGML_CUDA=1 - ``` - Using `CMake`: ```bash @@ -192,14 +148,10 @@ The following compilation options are also available to tweak performance: | GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | | GGML_CUDA_FA_ALL_QUANTS | Boolean | false | Compile support for all KV cache quantization type (combinations) for the FlashAttention CUDA kernels. More fine-grained control over KV cache size but compilation takes much longer. | -### MUSA +## MUSA This provides GPU acceleration using the MUSA cores of your Moore Threads MTT GPU. Make sure to have the MUSA SDK installed. You can download it from here: [MUSA SDK](https://developer.mthreads.com/sdk/download/musa). -- Using `make`: - ```bash - make GGML_MUSA=1 - ``` - Using `CMake`: ```bash @@ -213,16 +165,12 @@ The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enab Most of the compilation options available for CUDA should also be available for MUSA, though they haven't been thoroughly tested yet. -### hipBLAS +## HIP -This provides BLAS acceleration on HIP-supported AMD GPUs. +This provides GPU acceleration on HIP-supported AMD GPUs. Make sure to have ROCm installed. You can download it from your Linux distro's package manager or from here: [ROCm Quick Start (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html#rocm-install-quick). -- Using `make`: - ```bash - make GGML_HIPBLAS=1 - ``` - Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU): ```bash HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ @@ -247,11 +195,6 @@ You can download it from your Linux distro's package manager or from here: [ROCm && cmake --build build -- -j 16 ``` -- Using `make` (example for target gfx1030, build with 16 CPU threads): - ```bash - make -j16 GGML_HIPBLAS=1 GGML_HIP_UMA=1 AMDGPU_TARGETS=gfx1030 - ``` - - Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU): ```bash set PATH=%HIP_PATH%\bin;%PATH% @@ -265,11 +208,11 @@ You can download it from your Linux distro's package manager or from here: [ROCm The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3. -### Vulkan +## Vulkan **Windows** -#### w64devkit +### w64devkit Download and extract [`w64devkit`](https://github.com/skeeto/w64devkit/releases). @@ -289,9 +232,14 @@ Libs: -lvulkan-1 EOF ``` -Switch into the `llama.cpp` directory and run `make GGML_VULKAN=1`. -#### Git Bash MINGW64 +Switch into the `llama.cpp` directory and build using CMake. +```sh +cmake -B build -DGGML_VULKAN=ON +cmake --build build --config Release +``` + +### Git Bash MINGW64 Download and install [`Git-SCM`](https://git-scm.com/downloads/win) with the default settings @@ -310,20 +258,21 @@ cmake --build build --config Release Now you can load the model in conversation mode using `Vulkan` -``` -build/bin/release/llama-cli -m "[PATH TO MODEL]" -ngl 100 -c 16384 -t 10 -n -2 -cnv +```sh +build/bin/Release/llama-cli -m "[PATH TO MODEL]" -ngl 100 -c 16384 -t 10 -n -2 -cnv ``` -#### MSYS2 +### MSYS2 Install [MSYS2](https://www.msys2.org/) and then run the following commands in a UCRT terminal to install dependencies. - ```sh - pacman -S git \ - mingw-w64-ucrt-x86_64-gcc \ - mingw-w64-ucrt-x86_64-cmake \ - mingw-w64-ucrt-x86_64-vulkan-devel \ - mingw-w64-ucrt-x86_64-shaderc - ``` -Switch into `llama.cpp` directory and build using CMake. +```sh +pacman -S git \ + mingw-w64-ucrt-x86_64-gcc \ + mingw-w64-ucrt-x86_64-cmake \ + mingw-w64-ucrt-x86_64-vulkan-devel \ + mingw-w64-ucrt-x86_64-shaderc +``` + +Switch into the `llama.cpp` directory and build using CMake. ```sh cmake -B build -DGGML_VULKAN=ON cmake --build build --config Release @@ -372,7 +321,7 @@ cmake --build build --config Release # ggml_vulkan: Using Intel(R) Graphics (ADL GT2) | uma: 1 | fp16: 1 | warp size: 32 ``` -### CANN +## CANN This provides NPU acceleration using the AI cores of your Ascend NPU. And [CANN](https://www.hiascend.com/en/software/cann) is a hierarchical APIs to help you to quickly build AI applications and service based on Ascend NPU. For more information about Ascend NPU in [Ascend Community](https://www.hiascend.com/en/). @@ -387,22 +336,26 @@ cmake --build build --config release You can test with: -`./build/bin/llama-cli -m PATH_TO_MODEL -p "Building a website can be done in 10 steps:" -ngl 32` +```bash +./build/bin/llama-cli -m PATH_TO_MODEL -p "Building a website can be done in 10 steps:" -ngl 32 +``` -If the fllowing info is output on screen, you are using `llama.cpp by CANN backend`: +If the following info is output on screen, you are using `llama.cpp` with the CANN backend: ```bash -llm_load_tensors: CANN buffer size = 13313.00 MiB +llm_load_tensors: CANN model buffer size = 13313.00 MiB llama_new_context_with_model: CANN compute buffer size = 1260.81 MiB ``` For detailed info, such as model/device supports, CANN install, please refer to [llama.cpp for CANN](./backend/CANN.md). -### Android +## Android To read documentation for how to build on Android, [click here](./android.md) -### Arm CPU optimized mulmat kernels +## Notes about GPU-accelerated backends + +The GPU may still be used to accelerate some parts of the computation even when using the `-ngl 0` option. You can fully disable GPU acceleration by using `--device none`. -Llama.cpp includes a set of optimized mulmat kernels for the Arm architecture, leveraging ArmĀ® Neonā„¢, int8mm and SVE instructions. These kernels are enabled at build time through the appropriate compiler cpu-type flags, such as `-DCMAKE_C_FLAGS=-march=armv8.2a+i8mm+sve`. Note that these optimized kernels require the model to be quantized into one of the formats: `Q4_0_4_4` (Arm Neon), `Q4_0_4_8` (int8mm) or `Q4_0_8_8` (SVE). The SVE mulmat kernel specifically requires a vector width of 256 bits. When running on devices with a different vector width, it is recommended to use the `Q4_0_4_8` (int8mm) or `Q4_0_4_4` (Arm Neon) formats for better performance. Refer to [examples/quantize/README.md](../examples/quantize/README.md) for more information on the quantization formats. +In most cases, it is possible to build and use multiple backends at the same time. For example, you can build llama.cpp with both CUDA and Vulkan support by using the `-DGGML_CUDA=ON -DGGML_VULKAN=ON` options with CMake. At runtime, you can specify which backend devices to use with the `--device` option. To see a list of available devices, use the `--list-devices` option. -To support `Q4_0_4_4`, you must build with `GGML_NO_LLAMAFILE=1` (`make`) or `-DGGML_LLAMAFILE=OFF` (`cmake`). +Backends can be built as dynamic libraries that can be loaded dynamically at runtime. This allows you to use the same llama.cpp binary on different machines with different GPUs. To enable this feature, use the `GGML_BACKEND_DL` option when building. diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index d63a96c1c2547..66cfab2c3b796 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -6,20 +6,26 @@ find_package(Threads REQUIRED) # ... +# flags + +llama_add_compile_flags() + # examples include_directories(${CMAKE_CURRENT_SOURCE_DIR}) if (EMSCRIPTEN) else() - add_subdirectory(cvector-generator) add_subdirectory(batched-bench) add_subdirectory(batched) - add_subdirectory(convert-llama2c-to-ggml) add_subdirectory(embedding) add_subdirectory(eval-callback) - add_subdirectory(export-lora) - add_subdirectory(gbnf-validator) + + if (NOT WIN32) + # disabled on Windows because it uses internal functions not exported with LLAMA_API + add_subdirectory(gbnf-validator) + endif() + add_subdirectory(gguf-hash) add_subdirectory(gguf-split) add_subdirectory(gguf) @@ -27,28 +33,41 @@ else() add_subdirectory(imatrix) add_subdirectory(infill) add_subdirectory(llama-bench) - add_subdirectory(llava) add_subdirectory(lookahead) add_subdirectory(lookup) add_subdirectory(main) add_subdirectory(parallel) add_subdirectory(passkey) add_subdirectory(perplexity) - add_subdirectory(quantize-stats) add_subdirectory(quantize) add_subdirectory(retrieval) - if (GGML_RPC) - add_subdirectory(rpc) - endif() if (LLAMA_BUILD_SERVER) - add_subdirectory(server) - endif() - if (GGML_SYCL) - add_subdirectory(sycl) + add_subdirectory(server) endif() add_subdirectory(save-load-state) + add_subdirectory(run) add_subdirectory(simple) add_subdirectory(simple-chat) add_subdirectory(speculative) + add_subdirectory(speculative-simple) add_subdirectory(tokenize) + add_subdirectory(tts) + add_subdirectory(gen-docs) + if (NOT GGML_BACKEND_DL) + # these examples use the backends directly and cannot be built with dynamic loading + add_subdirectory(convert-llama2c-to-ggml) + add_subdirectory(cvector-generator) + add_subdirectory(export-lora) + if (NOT WIN32) + # disabled on Windows because it uses internal functions not exported with LLAMA_API + add_subdirectory(quantize-stats) + endif() + add_subdirectory(llava) + if (GGML_RPC) + add_subdirectory(rpc) + endif() + if (GGML_SYCL) + add_subdirectory(sycl) + endif() + endif() endif() diff --git a/examples/base-translate.sh b/examples/base-translate.sh deleted file mode 100755 index 103a52f55e6db..0000000000000 --- a/examples/base-translate.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Few-shot translation example. -# Requires a base model (i.e. no fine-tuned or instruct models). -# -# Usage: -# -# cd llama.cpp -# make -j -# -# ./examples/base-translate.sh "" [extra-main-args] -# - -if [ $# -lt 2 ]; then - echo "Usage: ./base-translate.sh \"\" [extra-main-args]" - exit 1 -fi - -eargs="" -if [ $# -gt 2 ]; then - eargs="${@:3}" -fi - -ftmp="__llama.cpp_example_tmp__.txt" -trap "rm -f $ftmp" EXIT - -echo "Translate from English to French: - -=== - -sea otter, peppermint, plush girafe: - -sea otter => loutre de mer -peppermint => menthe poivrĆ©e -plush girafe => girafe peluche - -=== - -violin - -violin => violon - -=== - -phone, computer, mouse, keyboard: - -phone => tĆ©lĆ©phone -computer => ordinateur -mouse => souris -keyboard => clavier - -=== -" > $ftmp - -echo "$2 -" >> $ftmp - -model=$1 - -# generate the most likely continuation until the string "===" is found -./llama-cli -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs diff --git a/examples/batched-bench/CMakeLists.txt b/examples/batched-bench/CMakeLists.txt index 959acaeeebc38..68ad707f32c98 100644 --- a/examples/batched-bench/CMakeLists.txt +++ b/examples/batched-bench/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-batched-bench) add_executable(${TARGET} batched-bench.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/batched/CMakeLists.txt b/examples/batched/CMakeLists.txt index 77e33343b6673..0d439f49842b5 100644 --- a/examples/batched/CMakeLists.txt +++ b/examples/batched/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-batched) add_executable(${TARGET} batched.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 3b554033e7ee4..e2e01f2d598cd 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -65,13 +65,14 @@ int main(int argc, char ** argv) { llama_context * ctx = llama_new_context_with_model(model, ctx_params); auto sparams = llama_sampler_chain_default_params(); + sparams.no_perf = false; llama_sampler * smpl = llama_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sparams.top_k)); - llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sparams.top_p, params.sparams.min_keep)); - llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sparams.temp)); - llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sparams.seed)); + llama_sampler_chain_add(smpl, llama_sampler_init_top_k(params.sampling.top_k)); + llama_sampler_chain_add(smpl, llama_sampler_init_top_p(params.sampling.top_p, params.sampling.min_keep)); + llama_sampler_chain_add(smpl, llama_sampler_init_temp (params.sampling.temp)); + llama_sampler_chain_add(smpl, llama_sampler_init_dist (params.sampling.seed)); if (ctx == NULL) { LOG_ERR("%s: error: failed to create the llama_context\n" , __func__); diff --git a/examples/convert-llama2c-to-ggml/CMakeLists.txt b/examples/convert-llama2c-to-ggml/CMakeLists.txt index a6790e617217e..44e5f722a9739 100644 --- a/examples/convert-llama2c-to-ggml/CMakeLists.txt +++ b/examples/convert-llama2c-to-ggml/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-convert-llama2c-to-ggml) add_executable(${TARGET} convert-llama2c-to-ggml.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/convert-llama2c-to-ggml/README.md b/examples/convert-llama2c-to-ggml/README.md index 5774ac83c32c8..46a42da691830 100644 --- a/examples/convert-llama2c-to-ggml/README.md +++ b/examples/convert-llama2c-to-ggml/README.md @@ -2,11 +2,8 @@ This example reads weights from project [llama2.c](https://github.com/karpathy/llama2.c) and saves them in ggml compatible format. The vocab that is available in `models/ggml-vocab.bin` is used by default. -To convert the model first download the models from the [llama2.c](https://github.com/karpathy/llama2.c) repository: +To convert the model first download the models from the [llama2.c](https://github.com/karpathy/llama2.c) repository. -`$ make -j` - -After successful compilation, following usage options are available: ``` usage: ./llama-convert-llama2c-to-ggml [options] diff --git a/examples/cvector-generator/CMakeLists.txt b/examples/cvector-generator/CMakeLists.txt index 0a559d60c2a6d..49ad9561c82ea 100644 --- a/examples/cvector-generator/CMakeLists.txt +++ b/examples/cvector-generator/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-cvector-generator) add_executable(${TARGET} cvector-generator.cpp pca.hpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/deprecation-warning/deprecation-warning.cpp b/examples/deprecation-warning/deprecation-warning.cpp index 11b35d2c22500..c2958ea12d92d 100644 --- a/examples/deprecation-warning/deprecation-warning.cpp +++ b/examples/deprecation-warning/deprecation-warning.cpp @@ -12,7 +12,7 @@ int main(int argc, char** argv) { } // Get only the program name from the full path - auto pos = filename.find_last_of('/'); + auto pos = filename.find_last_of("/\\"); if (pos != std::string::npos) { filename = filename.substr(pos+1); } diff --git a/examples/embedding/CMakeLists.txt b/examples/embedding/CMakeLists.txt index 8256e789ad33a..809040307d2c9 100644 --- a/examples/embedding/CMakeLists.txt +++ b/examples/embedding/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-embedding) add_executable(${TARGET} embedding.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/eval-callback/CMakeLists.txt b/examples/eval-callback/CMakeLists.txt index a48753d38e16e..95915ed91c099 100644 --- a/examples/eval-callback/CMakeLists.txt +++ b/examples/eval-callback/CMakeLists.txt @@ -2,8 +2,9 @@ set(TARGET llama-eval-callback) add_executable(${TARGET} eval-callback.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) set(TEST_TARGET test-eval-callback) -add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0) +add_test(NAME ${TEST_TARGET} + COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0) set_property(TEST ${TEST_TARGET} PROPERTY LABELS eval-callback curl) diff --git a/examples/export-lora/CMakeLists.txt b/examples/export-lora/CMakeLists.txt index 1cef6e71694e2..310455787a7ef 100644 --- a/examples/export-lora/CMakeLists.txt +++ b/examples/export-lora/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-export-lora) add_executable(${TARGET} export-lora.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gbnf-validator/CMakeLists.txt b/examples/gbnf-validator/CMakeLists.txt index 4edd6ec7394c5..d2cb524c0a7f7 100644 --- a/examples/gbnf-validator/CMakeLists.txt +++ b/examples/gbnf-validator/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-gbnf-validator) add_executable(${TARGET} gbnf-validator.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gbnf-validator/gbnf-validator.cpp b/examples/gbnf-validator/gbnf-validator.cpp index 7493af9d3aec3..17a0e27c444e8 100644 --- a/examples/gbnf-validator/gbnf-validator.cpp +++ b/examples/gbnf-validator/gbnf-validator.cpp @@ -11,19 +11,15 @@ static bool llama_grammar_validate(struct llama_grammar * grammar, const std::string & input_str, size_t & error_pos, std::string & error_msg) { const auto cpts = unicode_cpts_from_utf8(input_str); - const llama_grammar_rules & rules = llama_grammar_get_rules (grammar); - llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar); + auto & stacks_cur = llama_grammar_get_stacks(grammar); size_t pos = 0; for (const auto & cpt : cpts) { - const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy - - llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur); + llama_grammar_accept(grammar, cpt); if (stacks_cur.empty()) { error_pos = pos; error_msg = "Unexpected character '" + unicode_cpt_to_utf8(cpt) + "'"; - stacks_cur = stacks_prev; return false; } ++pos; @@ -82,7 +78,8 @@ int main(int argc, char** argv) { llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root"); if (grammar == nullptr) { - throw std::runtime_error("Failed to initialize llama_grammar"); + fprintf(stdout, "Failed to initialize llama_grammar\n"); + return 1; } // Read the input file std::string input_str; diff --git a/examples/gen-docs/CMakeLists.txt b/examples/gen-docs/CMakeLists.txt index c94cda7764341..25de0af35df60 100644 --- a/examples/gen-docs/CMakeLists.txt +++ b/examples/gen-docs/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-gen-docs) add_executable(${TARGET} gen-docs.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gguf-hash/CMakeLists.txt b/examples/gguf-hash/CMakeLists.txt index 633f4553594bb..15c5c68c6f402 100644 --- a/examples/gguf-hash/CMakeLists.txt +++ b/examples/gguf-hash/CMakeLists.txt @@ -4,12 +4,19 @@ install(TARGETS ${TARGET} RUNTIME) # clibs dependencies include_directories(deps/) + add_library(xxhash OBJECT deps/xxhash/xxhash.c deps/xxhash/xxhash.h) target_link_libraries(${TARGET} PRIVATE xxhash) + add_library(sha1 OBJECT deps/sha1/sha1.c deps/sha1/sha1.h) target_link_libraries(${TARGET} PRIVATE sha1) +if (NOT MSVC) + # disable warnings in 3rd party code + target_compile_options(sha1 PRIVATE -w) +endif() + add_library(sha256 OBJECT deps/sha256/sha256.c deps/sha256/sha256.h) target_link_libraries(${TARGET} PRIVATE sha256) target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gguf-split/CMakeLists.txt b/examples/gguf-split/CMakeLists.txt index f63887da7dfca..c407e2f0af44a 100644 --- a/examples/gguf-split/CMakeLists.txt +++ b/examples/gguf-split/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-gguf-split) add_executable(${TARGET} gguf-split.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gguf-split/gguf-split.cpp b/examples/gguf-split/gguf-split.cpp index 7e62657e118a4..75f63f93891f5 100644 --- a/examples/gguf-split/gguf-split.cpp +++ b/examples/gguf-split/gguf-split.cpp @@ -287,7 +287,7 @@ struct split_strategy { } void print_info() { - printf("n_split: %ld\n", ctx_outs.size()); + printf("n_split: %zu\n", ctx_outs.size()); int i_split = 0; for (auto & ctx_out : ctx_outs) { // re-calculate the real gguf size for each split (= metadata size + total size of all tensors) @@ -297,7 +297,7 @@ struct split_strategy { total_size += ggml_nbytes(t); } total_size = total_size / 1000 / 1000; // convert to megabytes - printf("split %05d: n_tensors = %d, total_size = %ldM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size); + printf("split %05d: n_tensors = %d, total_size = %zuM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size); i_split++; } } diff --git a/examples/gguf/CMakeLists.txt b/examples/gguf/CMakeLists.txt index a9569b411956b..fb04eb83f34ce 100644 --- a/examples/gguf/CMakeLists.txt +++ b/examples/gguf/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-gguf) add_executable(${TARGET} gguf.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gritlm/CMakeLists.txt b/examples/gritlm/CMakeLists.txt index 86dfddca346fe..fa1b4dc70c2f6 100644 --- a/examples/gritlm/CMakeLists.txt +++ b/examples/gritlm/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-gritlm) add_executable(${TARGET} gritlm.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp index 6e42fa0734ecb..18a945b33905f 100644 --- a/examples/gritlm/gritlm.cpp +++ b/examples/gritlm/gritlm.cpp @@ -75,7 +75,7 @@ static std::vector> encode(llama_context * ctx, const std::ve } std::vector emb_norm(emb_unorm.size()); - common_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd); + common_embd_normalize(emb_unorm.data(), emb_norm.data(), n_embd, 2); result.push_back(emb_norm); #ifdef GRIT_DEBUG diff --git a/examples/imatrix/CMakeLists.txt b/examples/imatrix/CMakeLists.txt index d4c8265bdb9d2..412696c47c31c 100644 --- a/examples/imatrix/CMakeLists.txt +++ b/examples/imatrix/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-imatrix) add_executable(${TARGET} imatrix.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/imatrix/README.md b/examples/imatrix/README.md index bb5faec94c20a..9c056986b3ed6 100644 --- a/examples/imatrix/README.md +++ b/examples/imatrix/README.md @@ -25,8 +25,6 @@ For faster computation, make sure to use GPU offloading via the `-ngl` argument ## Example ```bash -GGML_CUDA=1 make -j - # generate importance matrix (imatrix.dat) ./llama-imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99 diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 70ff47768c02b..45206f4a7dee1 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -637,10 +637,19 @@ int main(int argc, char ** argv) { LOG_INF("%s\n", common_params_get_system_info(params).c_str()); } - if (!compute_imatrix(ctx, params)) { - return 1; + if (params.prompt.empty()) { + if (params.in_files.empty()) { + LOG_ERR("Error: No prompt provided and no precomputed matrices (--in-file) to combine.\n"); + return 1; + } + LOG_INF("No prompt provided; combining precomputed matrices only.\n"); + } else { + if (!compute_imatrix(ctx, params)) { + return 1; + } } + g_collector.save_imatrix(); LOG("\n"); diff --git a/examples/infill/CMakeLists.txt b/examples/infill/CMakeLists.txt index 9b1aa3b63c920..fb26628d82bf9 100644 --- a/examples/infill/CMakeLists.txt +++ b/examples/infill/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-infill) add_executable(${TARGET} infill.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/infill/README.md b/examples/infill/README.md index 810a0c5e76697..df4d976f2bb4f 100644 --- a/examples/infill/README.md +++ b/examples/infill/README.md @@ -14,7 +14,7 @@ In this section, we cover the most commonly used options for running the `infill - `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`). - `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses. - `-n N, --n-predict N`: Set the number of tokens to predict when generating text. Adjusting this value can influence the length of the generated text. -- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. +- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 4096, but if a LLaMA model was built with a longer context, increasing this value will provide better results for longer input/inference. - `--spm-infill`: Use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. ## Input Prompts diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 15b358dc4e854..ef700895720ff 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -73,7 +73,7 @@ int main(int argc, char ** argv) { common_init(); - auto & sparams = params.sparams; + auto & sparams = params.sampling; console::init(params.simple_io, params.use_color); atexit([]() { console::cleanup(); }); diff --git a/examples/llama-bench/CMakeLists.txt b/examples/llama-bench/CMakeLists.txt index 5bdbea4e28187..17e3b9b87bae4 100644 --- a/examples/llama-bench/CMakeLists.txt +++ b/examples/llama-bench/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-bench) add_executable(${TARGET} llama-bench.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 8f4e0e206a27e..2338ad1067dde 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -6,28 +6,28 @@ #include #include #include +#include #include #include -#include #include #include #include #include #include #include -#include #include +#include +#include "common.h" #include "ggml.h" #include "llama.h" -#include "common.h" #ifdef _WIN32 -#define WIN32_LEAN_AND_MEAN -#ifndef NOMINMAX -# define NOMINMAX -#endif -#include +# define WIN32_LEAN_AND_MEAN +# ifndef NOMINMAX +# define NOMINMAX +# endif +# include #endif // utils @@ -36,8 +36,7 @@ static uint64_t get_time_ns() { return std::chrono::nanoseconds(clock::now().time_since_epoch()).count(); } -template -static std::string join(const std::vector & values, const std::string & delim) { +template static std::string join(const std::vector & values, const std::string & delim) { std::ostringstream str; for (size_t i = 0; i < values.size(); i++) { str << values[i]; @@ -48,38 +47,35 @@ static std::string join(const std::vector & values, const std::string & delim return str.str(); } -template -static std::vector transform_to_str(const std::vector & values, F f) { +template static std::vector transform_to_str(const std::vector & values, F f) { std::vector str_values; std::transform(values.begin(), values.end(), std::back_inserter(str_values), f); return str_values; } -template -static T avg(const std::vector & v) { +template static T avg(const std::vector & v) { if (v.empty()) { return 0; } T sum = std::accumulate(v.begin(), v.end(), T(0)); - return sum / (T)v.size(); + return sum / (T) v.size(); } -template -static T stdev(const std::vector & v) { +template static T stdev(const std::vector & v) { if (v.size() <= 1) { return 0; } - T mean = avg(v); + T mean = avg(v); T sq_sum = std::inner_product(v.begin(), v.end(), v.begin(), T(0)); - T stdev = std::sqrt(sq_sum / (T)(v.size() - 1) - mean * mean * (T)v.size() / (T)(v.size() - 1)); + T stdev = std::sqrt(sq_sum / (T) (v.size() - 1) - mean * mean * (T) v.size() / (T) (v.size() - 1)); return stdev; } static std::string get_cpu_info() { std::vector cpu_list; for (size_t i = 0; i < ggml_backend_dev_count(); i++) { - auto * dev = ggml_backend_dev_get(i); - auto dev_type = ggml_backend_dev_type(dev); + auto * dev = ggml_backend_dev_get(i); + auto dev_type = ggml_backend_dev_type(dev); if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU || dev_type == GGML_BACKEND_DEVICE_TYPE_ACCEL) { cpu_list.push_back(ggml_backend_dev_description(dev)); } @@ -90,8 +86,8 @@ static std::string get_cpu_info() { static std::string get_gpu_info() { std::vector gpu_list; for (size_t i = 0; i < ggml_backend_dev_count(); i++) { - auto * dev = ggml_backend_dev_get(i); - auto dev_type = ggml_backend_dev_type(dev); + auto * dev = ggml_backend_dev_get(i); + auto dev_type = ggml_backend_dev_type(dev); if (dev_type == GGML_BACKEND_DEVICE_TYPE_GPU) { gpu_list.push_back(ggml_backend_dev_description(dev)); } @@ -100,17 +96,24 @@ static std::string get_gpu_info() { } // command line params -enum output_formats {NONE, CSV, JSON, JSONL, MARKDOWN, SQL}; +enum output_formats { NONE, CSV, JSON, JSONL, MARKDOWN, SQL }; static const char * output_format_str(output_formats format) { switch (format) { - case NONE: return "none"; - case CSV: return "csv"; - case JSON: return "json"; - case JSONL: return "jsonl"; - case MARKDOWN: return "md"; - case SQL: return "sql"; - default: GGML_ABORT("invalid output format"); + case NONE: + return "none"; + case CSV: + return "csv"; + case JSON: + return "json"; + case JSONL: + return "jsonl"; + case MARKDOWN: + return "md"; + case SQL: + return "sql"; + default: + GGML_ABORT("invalid output format"); } } @@ -135,10 +138,14 @@ static bool output_format_from_str(const std::string & s, output_formats & forma static const char * split_mode_str(llama_split_mode mode) { switch (mode) { - case LLAMA_SPLIT_MODE_NONE: return "none"; - case LLAMA_SPLIT_MODE_LAYER: return "layer"; - case LLAMA_SPLIT_MODE_ROW: return "row"; - default: GGML_ABORT("invalid split mode"); + case LLAMA_SPLIT_MODE_NONE: + return "none"; + case LLAMA_SPLIT_MODE_LAYER: + return "layer"; + case LLAMA_SPLIT_MODE_ROW: + return "row"; + default: + GGML_ABORT("invalid split mode"); } } @@ -149,59 +156,59 @@ static std::string pair_str(const std::pair & p) { } struct cmd_params { - std::vector model; - std::vector n_prompt; - std::vector n_gen; + std::vector model; + std::vector n_prompt; + std::vector n_gen; std::vector> n_pg; - std::vector n_batch; - std::vector n_ubatch; - std::vector type_k; - std::vector type_v; - std::vector n_threads; - std::vector cpu_mask; - std::vector cpu_strict; - std::vector poll; - std::vector n_gpu_layers; - std::vector rpc_servers; - std::vector split_mode; - std::vector main_gpu; - std::vector no_kv_offload; - std::vector flash_attn; - std::vector> tensor_split; - std::vector use_mmap; - std::vector embeddings; - ggml_numa_strategy numa; - int reps; - ggml_sched_priority prio; - int delay; - bool verbose; - bool progress; - output_formats output_format; - output_formats output_format_stderr; + std::vector n_batch; + std::vector n_ubatch; + std::vector type_k; + std::vector type_v; + std::vector n_threads; + std::vector cpu_mask; + std::vector cpu_strict; + std::vector poll; + std::vector n_gpu_layers; + std::vector rpc_servers; + std::vector split_mode; + std::vector main_gpu; + std::vector no_kv_offload; + std::vector flash_attn; + std::vector> tensor_split; + std::vector use_mmap; + std::vector embeddings; + ggml_numa_strategy numa; + int reps; + ggml_sched_priority prio; + int delay; + bool verbose; + bool progress; + output_formats output_format; + output_formats output_format_stderr; }; static const cmd_params cmd_params_defaults = { - /* model */ {"models/7B/ggml-model-q4_0.gguf"}, - /* n_prompt */ {512}, - /* n_gen */ {128}, + /* model */ { "models/7B/ggml-model-q4_0.gguf" }, + /* n_prompt */ { 512 }, + /* n_gen */ { 128 }, /* n_pg */ {}, - /* n_batch */ {2048}, - /* n_ubatch */ {512}, - /* type_k */ {GGML_TYPE_F16}, - /* type_v */ {GGML_TYPE_F16}, - /* n_threads */ {cpu_get_num_math()}, - /* cpu_mask */ {"0x0"}, - /* cpu_strict */ {false}, - /* poll */ {50}, - /* n_gpu_layers */ {99}, - /* rpc_servers */ {""}, - /* split_mode */ {LLAMA_SPLIT_MODE_LAYER}, - /* main_gpu */ {0}, - /* no_kv_offload */ {false}, - /* flash_attn */ {false}, - /* tensor_split */ {std::vector(llama_max_devices(), 0.0f)}, - /* use_mmap */ {true}, - /* embeddings */ {false}, + /* n_batch */ { 2048 }, + /* n_ubatch */ { 512 }, + /* type_k */ { GGML_TYPE_F16 }, + /* type_v */ { GGML_TYPE_F16 }, + /* n_threads */ { cpu_get_num_math() }, + /* cpu_mask */ { "0x0" }, + /* cpu_strict */ { false }, + /* poll */ { 50 }, + /* n_gpu_layers */ { 99 }, + /* rpc_servers */ { "" }, + /* split_mode */ { LLAMA_SPLIT_MODE_LAYER }, + /* main_gpu */ { 0 }, + /* no_kv_offload */ { false }, + /* flash_attn */ { false }, + /* tensor_split */ { std::vector(llama_max_devices(), 0.0f) }, + /* use_mmap */ { true }, + /* embeddings */ { false }, /* numa */ GGML_NUMA_STRATEGY_DISABLED, /* reps */ 5, /* prio */ GGML_SCHED_PRIO_NORMAL, @@ -218,38 +225,59 @@ static void print_usage(int /* argc */, char ** argv) { printf("options:\n"); printf(" -h, --help\n"); printf(" -m, --model (default: %s)\n", join(cmd_params_defaults.model, ",").c_str()); - printf(" -p, --n-prompt (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str()); + printf(" -p, --n-prompt (default: %s)\n", + join(cmd_params_defaults.n_prompt, ",").c_str()); printf(" -n, --n-gen (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str()); - printf(" -pg (default: %s)\n", join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str()); - printf(" -b, --batch-size (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str()); - printf(" -ub, --ubatch-size (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str()); - printf(" -ctk, --cache-type-k (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str()); - printf(" -ctv, --cache-type-v (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str()); - printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str()); - printf(" -C, --cpu-mask (default: %s)\n", join(cmd_params_defaults.cpu_mask, ",").c_str()); - printf(" --cpu-strict <0|1> (default: %s)\n", join(cmd_params_defaults.cpu_strict, ",").c_str()); + printf(" -pg (default: %s)\n", + join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str()); + printf(" -b, --batch-size (default: %s)\n", + join(cmd_params_defaults.n_batch, ",").c_str()); + printf(" -ub, --ubatch-size (default: %s)\n", + join(cmd_params_defaults.n_ubatch, ",").c_str()); + printf(" -ctk, --cache-type-k (default: %s)\n", + join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str()); + printf(" -ctv, --cache-type-v (default: %s)\n", + join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str()); + printf(" -t, --threads (default: %s)\n", + join(cmd_params_defaults.n_threads, ",").c_str()); + printf(" -C, --cpu-mask (default: %s)\n", + join(cmd_params_defaults.cpu_mask, ",").c_str()); + printf(" --cpu-strict <0|1> (default: %s)\n", + join(cmd_params_defaults.cpu_strict, ",").c_str()); printf(" --poll <0...100> (default: %s)\n", join(cmd_params_defaults.poll, ",").c_str()); - printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); + printf(" -ngl, --n-gpu-layers (default: %s)\n", + join(cmd_params_defaults.n_gpu_layers, ",").c_str()); if (llama_supports_rpc()) { - printf(" -rpc, --rpc (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str()); + printf(" -rpc, --rpc (default: %s)\n", + join(cmd_params_defaults.rpc_servers, ",").c_str()); } - printf(" -sm, --split-mode (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str()); - printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str()); - printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str()); - printf(" -fa, --flash-attn <0|1> (default: %s)\n", join(cmd_params_defaults.flash_attn, ",").c_str()); - printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str()); + printf(" -sm, --split-mode (default: %s)\n", + join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str()); + printf(" -mg, --main-gpu (default: %s)\n", + join(cmd_params_defaults.main_gpu, ",").c_str()); + printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", + join(cmd_params_defaults.no_kv_offload, ",").c_str()); + printf(" -fa, --flash-attn <0|1> (default: %s)\n", + join(cmd_params_defaults.flash_attn, ",").c_str()); + printf(" -mmp, --mmap <0|1> (default: %s)\n", + join(cmd_params_defaults.use_mmap, ",").c_str()); printf(" --numa (default: disabled)\n"); - printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str()); + printf(" -embd, --embeddings <0|1> (default: %s)\n", + join(cmd_params_defaults.embeddings, ",").c_str()); printf(" -ts, --tensor-split (default: 0)\n"); printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps); printf(" --prio <0|1|2|3> (default: %d)\n", cmd_params_defaults.prio); printf(" --delay <0...N> (seconds) (default: %d)\n", cmd_params_defaults.delay); - printf(" -o, --output (default: %s)\n", output_format_str(cmd_params_defaults.output_format)); - printf(" -oe, --output-err (default: %s)\n", output_format_str(cmd_params_defaults.output_format_stderr)); + printf(" -o, --output (default: %s)\n", + output_format_str(cmd_params_defaults.output_format)); + printf(" -oe, --output-err (default: %s)\n", + output_format_str(cmd_params_defaults.output_format_stderr)); printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0"); printf(" --progress (default: %s)\n", cmd_params_defaults.progress ? "1" : "0"); printf("\n"); - printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n"); + printf( + "Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter " + "multiple times.\n"); } static ggml_type ggml_type_from_name(const std::string & s) { @@ -281,22 +309,21 @@ static ggml_type ggml_type_from_name(const std::string & s) { return GGML_TYPE_COUNT; } - static cmd_params parse_cmd_params(int argc, char ** argv) { - cmd_params params; - std::string arg; - bool invalid_param = false; - const std::string arg_prefix = "--"; - const char split_delim = ','; - - params.verbose = cmd_params_defaults.verbose; - params.output_format = cmd_params_defaults.output_format; + cmd_params params; + std::string arg; + bool invalid_param = false; + const std::string arg_prefix = "--"; + const char split_delim = ','; + + params.verbose = cmd_params_defaults.verbose; + params.output_format = cmd_params_defaults.output_format; params.output_format_stderr = cmd_params_defaults.output_format_stderr; - params.reps = cmd_params_defaults.reps; - params.numa = cmd_params_defaults.numa; - params.prio = cmd_params_defaults.prio; - params.delay = cmd_params_defaults.delay; - params.progress = cmd_params_defaults.progress; + params.reps = cmd_params_defaults.reps; + params.numa = cmd_params_defaults.numa; + params.prio = cmd_params_defaults.prio; + params.delay = cmd_params_defaults.delay; + params.progress = cmd_params_defaults.progress; for (int i = 1; i < argc; i++) { arg = argv[i]; @@ -338,7 +365,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { invalid_param = true; break; } - params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])}); + params.n_pg.push_back({ std::stoi(p[0]), std::stoi(p[1]) }); } else if (arg == "-b" || arg == "--batch-size") { if (++i >= argc) { invalid_param = true; @@ -358,7 +385,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { invalid_param = true; break; } - auto p = string_split(argv[i], split_delim); + auto p = string_split(argv[i], split_delim); std::vector types; for (const auto & t : p) { ggml_type gt = ggml_type_from_name(t); @@ -377,7 +404,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { invalid_param = true; break; } - auto p = string_split(argv[i], split_delim); + auto p = string_split(argv[i], split_delim); std::vector types; for (const auto & t : p) { ggml_type gt = ggml_type_from_name(t); @@ -437,7 +464,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { invalid_param = true; break; } - auto p = string_split(argv[i], split_delim); + auto p = string_split(argv[i], split_delim); std::vector modes; for (const auto & m : p) { llama_split_mode mode; @@ -476,10 +503,16 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { break; } else { std::string value(argv[i]); - /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } - else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } - else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } - else { invalid_param = true; break; } + /**/ if (value == "distribute" || value == "") { + params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; + } else if (value == "isolate") { + params.numa = GGML_NUMA_STRATEGY_ISOLATE; + } else if (value == "numactl") { + params.numa = GGML_NUMA_STRATEGY_NUMACTL; + } else { + invalid_param = true; + break; + } } } else if (arg == "-fa" || arg == "--flash-attn") { if (++i >= argc) { @@ -509,9 +542,9 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { } for (auto ts : string_split(argv[i], split_delim)) { // split string by ; and / - const std::regex regex{R"([;/]+)"}; - std::sregex_token_iterator it{ts.begin(), ts.end(), regex, -1}; - std::vector split_arg{it, {}}; + const std::regex regex{ R"([;/]+)" }; + std::sregex_token_iterator it{ ts.begin(), ts.end(), regex, -1 }; + std::vector split_arg{ it, {} }; GGML_ASSERT(split_arg.size() <= llama_max_devices()); std::vector tensor_split(llama_max_devices()); @@ -570,52 +603,94 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { } // set defaults - if (params.model.empty()) { params.model = cmd_params_defaults.model; } - if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; } - if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; } - if (params.n_pg.empty()) { params.n_pg = cmd_params_defaults.n_pg; } - if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; } - if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; } - if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; } - if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; } - if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; } - if (params.rpc_servers.empty()) { params.rpc_servers = cmd_params_defaults.rpc_servers; } - if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; } - if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; } - if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; } - if (params.flash_attn.empty()) { params.flash_attn = cmd_params_defaults.flash_attn; } - if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; } - if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; } - if (params.embeddings.empty()) { params.embeddings = cmd_params_defaults.embeddings; } - if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; } - if (params.cpu_mask.empty()) { params.cpu_mask = cmd_params_defaults.cpu_mask; } - if (params.cpu_strict.empty()) { params.cpu_strict = cmd_params_defaults.cpu_strict; } - if (params.poll.empty()) { params.poll = cmd_params_defaults.poll; } + if (params.model.empty()) { + params.model = cmd_params_defaults.model; + } + if (params.n_prompt.empty()) { + params.n_prompt = cmd_params_defaults.n_prompt; + } + if (params.n_gen.empty()) { + params.n_gen = cmd_params_defaults.n_gen; + } + if (params.n_pg.empty()) { + params.n_pg = cmd_params_defaults.n_pg; + } + if (params.n_batch.empty()) { + params.n_batch = cmd_params_defaults.n_batch; + } + if (params.n_ubatch.empty()) { + params.n_ubatch = cmd_params_defaults.n_ubatch; + } + if (params.type_k.empty()) { + params.type_k = cmd_params_defaults.type_k; + } + if (params.type_v.empty()) { + params.type_v = cmd_params_defaults.type_v; + } + if (params.n_gpu_layers.empty()) { + params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; + } + if (params.rpc_servers.empty()) { + params.rpc_servers = cmd_params_defaults.rpc_servers; + } + if (params.split_mode.empty()) { + params.split_mode = cmd_params_defaults.split_mode; + } + if (params.main_gpu.empty()) { + params.main_gpu = cmd_params_defaults.main_gpu; + } + if (params.no_kv_offload.empty()) { + params.no_kv_offload = cmd_params_defaults.no_kv_offload; + } + if (params.flash_attn.empty()) { + params.flash_attn = cmd_params_defaults.flash_attn; + } + if (params.tensor_split.empty()) { + params.tensor_split = cmd_params_defaults.tensor_split; + } + if (params.use_mmap.empty()) { + params.use_mmap = cmd_params_defaults.use_mmap; + } + if (params.embeddings.empty()) { + params.embeddings = cmd_params_defaults.embeddings; + } + if (params.n_threads.empty()) { + params.n_threads = cmd_params_defaults.n_threads; + } + if (params.cpu_mask.empty()) { + params.cpu_mask = cmd_params_defaults.cpu_mask; + } + if (params.cpu_strict.empty()) { + params.cpu_strict = cmd_params_defaults.cpu_strict; + } + if (params.poll.empty()) { + params.poll = cmd_params_defaults.poll; + } return params; } struct cmd_params_instance { - std::string model; - int n_prompt; - int n_gen; - int n_batch; - int n_ubatch; - ggml_type type_k; - ggml_type type_v; - int n_threads; - std::string cpu_mask; - bool cpu_strict; - int poll; - int n_gpu_layers; - std::string rpc_servers; - llama_split_mode split_mode; - int main_gpu; - bool no_kv_offload; - bool flash_attn; + std::string model; + int n_prompt; + int n_gen; + int n_batch; + int n_ubatch; + ggml_type type_k; + ggml_type type_v; + int n_threads; + std::string cpu_mask; + bool cpu_strict; + int poll; + int n_gpu_layers; + std::string rpc_servers; + llama_split_mode split_mode; + int main_gpu; + bool no_kv_offload; + bool flash_attn; std::vector tensor_split; - bool use_mmap; - bool embeddings; + bool use_mmap; + bool embeddings; llama_model_params to_llama_mparams() const { llama_model_params mparams = llama_model_default_params(); @@ -624,35 +699,31 @@ struct cmd_params_instance { if (!rpc_servers.empty()) { mparams.rpc_servers = rpc_servers.c_str(); } - mparams.split_mode = split_mode; - mparams.main_gpu = main_gpu; + mparams.split_mode = split_mode; + mparams.main_gpu = main_gpu; mparams.tensor_split = tensor_split.data(); - mparams.use_mmap = use_mmap; + mparams.use_mmap = use_mmap; return mparams; } bool equal_mparams(const cmd_params_instance & other) const { - return model == other.model && - n_gpu_layers == other.n_gpu_layers && - rpc_servers == other.rpc_servers && - split_mode == other.split_mode && - main_gpu == other.main_gpu && - use_mmap == other.use_mmap && + return model == other.model && n_gpu_layers == other.n_gpu_layers && rpc_servers == other.rpc_servers && + split_mode == other.split_mode && main_gpu == other.main_gpu && use_mmap == other.use_mmap && tensor_split == other.tensor_split; } llama_context_params to_llama_cparams() const { llama_context_params cparams = llama_context_default_params(); - cparams.n_ctx = n_prompt + n_gen; - cparams.n_batch = n_batch; - cparams.n_ubatch = n_ubatch; - cparams.type_k = type_k; - cparams.type_v = type_v; + cparams.n_ctx = n_prompt + n_gen; + cparams.n_batch = n_batch; + cparams.n_ubatch = n_ubatch; + cparams.type_k = type_k; + cparams.type_v = type_v; cparams.offload_kqv = !no_kv_offload; - cparams.flash_attn = flash_attn; - cparams.embeddings = embeddings; + cparams.flash_attn = flash_attn; + cparams.embeddings = embeddings; return cparams; } @@ -662,6 +733,7 @@ static std::vector get_cmd_params_instances(const cmd_param std::vector instances; // this ordering minimizes the number of times that each model needs to be reloaded + // clang-format off for (const auto & m : params.model) for (const auto & nl : params.n_gpu_layers) for (const auto & rpc : params.rpc_servers) @@ -767,100 +839,94 @@ static std::vector get_cmd_params_instances(const cmd_param instances.push_back(instance); } } + // clang-format on return instances; } struct test { static const std::string build_commit; - static const int build_number; + static const int build_number; static const std::string cpu_info; static const std::string gpu_info; - std::string model_filename; - std::string model_type; - uint64_t model_size; - uint64_t model_n_params; - int n_batch; - int n_ubatch; - int n_threads; - std::string cpu_mask; - bool cpu_strict; - int poll; - ggml_type type_k; - ggml_type type_v; - int n_gpu_layers; - llama_split_mode split_mode; - int main_gpu; - bool no_kv_offload; - bool flash_attn; - std::vector tensor_split; - bool use_mmap; - bool embeddings; - int n_prompt; - int n_gen; - std::string test_time; - std::vector samples_ns; + std::string model_filename; + std::string model_type; + uint64_t model_size; + uint64_t model_n_params; + int n_batch; + int n_ubatch; + int n_threads; + std::string cpu_mask; + bool cpu_strict; + int poll; + ggml_type type_k; + ggml_type type_v; + int n_gpu_layers; + llama_split_mode split_mode; + int main_gpu; + bool no_kv_offload; + bool flash_attn; + std::vector tensor_split; + bool use_mmap; + bool embeddings; + int n_prompt; + int n_gen; + std::string test_time; + std::vector samples_ns; test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) { model_filename = inst.model; char buf[128]; llama_model_desc(lmodel, buf, sizeof(buf)); - model_type = buf; - model_size = llama_model_size(lmodel); + model_type = buf; + model_size = llama_model_size(lmodel); model_n_params = llama_model_n_params(lmodel); - n_batch = inst.n_batch; - n_ubatch = inst.n_ubatch; - n_threads = inst.n_threads; - cpu_mask = inst.cpu_mask; - cpu_strict = inst.cpu_strict; - poll = inst.poll; - type_k = inst.type_k; - type_v = inst.type_v; - n_gpu_layers = inst.n_gpu_layers; - split_mode = inst.split_mode; - main_gpu = inst.main_gpu; - no_kv_offload = inst.no_kv_offload; - flash_attn = inst.flash_attn; - tensor_split = inst.tensor_split; - use_mmap = inst.use_mmap; - embeddings = inst.embeddings; - n_prompt = inst.n_prompt; - n_gen = inst.n_gen; + n_batch = inst.n_batch; + n_ubatch = inst.n_ubatch; + n_threads = inst.n_threads; + cpu_mask = inst.cpu_mask; + cpu_strict = inst.cpu_strict; + poll = inst.poll; + type_k = inst.type_k; + type_v = inst.type_v; + n_gpu_layers = inst.n_gpu_layers; + split_mode = inst.split_mode; + main_gpu = inst.main_gpu; + no_kv_offload = inst.no_kv_offload; + flash_attn = inst.flash_attn; + tensor_split = inst.tensor_split; + use_mmap = inst.use_mmap; + embeddings = inst.embeddings; + n_prompt = inst.n_prompt; + n_gen = inst.n_gen; // RFC 3339 date-time format - time_t t = time(NULL); + time_t t = time(NULL); std::strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&t)); test_time = buf; (void) ctx; } - uint64_t avg_ns() const { - return ::avg(samples_ns); - } + uint64_t avg_ns() const { return ::avg(samples_ns); } - uint64_t stdev_ns() const { - return ::stdev(samples_ns); - } + uint64_t stdev_ns() const { return ::stdev(samples_ns); } std::vector get_ts() const { - int n_tokens = n_prompt + n_gen; + int n_tokens = n_prompt + n_gen; std::vector ts; - std::transform(samples_ns.begin(), samples_ns.end(), std::back_inserter(ts), [n_tokens](uint64_t t) { return 1e9 * n_tokens / t; }); + std::transform(samples_ns.begin(), samples_ns.end(), std::back_inserter(ts), + [n_tokens](uint64_t t) { return 1e9 * n_tokens / t; }); return ts; } - double avg_ts() const { - return ::avg(get_ts()); - } + double avg_ts() const { return ::avg(get_ts()); } - double stdev_ts() const { - return ::stdev(get_ts()); - } + double stdev_ts() const { return ::stdev(get_ts()); } static std::string get_backend() { std::vector backends; for (size_t i = 0; i < ggml_backend_reg_count(); i++) { - auto * reg = ggml_backend_reg_get(i); + auto * reg = ggml_backend_reg_get(i); std::string name = ggml_backend_reg_name(reg); if (name != "CPU") { backends.push_back(ggml_backend_reg_name(reg)); @@ -871,36 +937,27 @@ struct test { static const std::vector & get_fields() { static const std::vector fields = { - "build_commit", "build_number", - "cpu_info", "gpu_info", "backends", - "model_filename", "model_type", "model_size", "model_n_params", - "n_batch", "n_ubatch", - "n_threads", "cpu_mask", "cpu_strict", "poll", - "type_k", "type_v", - "n_gpu_layers", "split_mode", - "main_gpu", "no_kv_offload", "flash_attn", - "tensor_split", "use_mmap", "embeddings", - "n_prompt", "n_gen", "test_time", - "avg_ns", "stddev_ns", - "avg_ts", "stddev_ts", + "build_commit", "build_number", "cpu_info", "gpu_info", "backends", "model_filename", + "model_type", "model_size", "model_n_params", "n_batch", "n_ubatch", "n_threads", + "cpu_mask", "cpu_strict", "poll", "type_k", "type_v", "n_gpu_layers", + "split_mode", "main_gpu", "no_kv_offload", "flash_attn", "tensor_split", "use_mmap", + "embeddings", "n_prompt", "n_gen", "test_time", "avg_ns", "stddev_ns", + "avg_ts", "stddev_ts", }; return fields; } - enum field_type {STRING, BOOL, INT, FLOAT}; + enum field_type { STRING, BOOL, INT, FLOAT }; static field_type get_field_type(const std::string & field) { - if (field == "build_number" || field == "n_batch" || field == "n_ubatch" || - field == "n_threads" || field == "poll" || - field == "model_size" || field == "model_n_params" || - field == "n_gpu_layers" || field == "main_gpu" || - field == "n_prompt" || field == "n_gen" || - field == "avg_ns" || field == "stddev_ns") { + if (field == "build_number" || field == "n_batch" || field == "n_ubatch" || field == "n_threads" || + field == "poll" || field == "model_size" || field == "model_n_params" || field == "n_gpu_layers" || + field == "main_gpu" || field == "n_prompt" || field == "n_gen" || field == "avg_ns" || + field == "stddev_ns") { return INT; } - if (field == "f16_kv" || field == "no_kv_offload" || - field == "cpu_strict" || - field == "flash_attn" || field == "use_mmap" || field == "embeddings") { + if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" || + field == "use_mmap" || field == "embeddings") { return BOOL; } if (field == "avg_ts" || field == "stddev_ts") { @@ -911,7 +968,7 @@ struct test { std::vector get_values() const { std::string tensor_split_str; - int max_nonzero = 0; + int max_nonzero = 0; for (size_t i = 0; i < llama_max_devices(); i++) { if (tensor_split[i] > 0) { max_nonzero = i; @@ -925,29 +982,47 @@ struct test { tensor_split_str += "/"; } } - std::vector values = { - build_commit, std::to_string(build_number), - cpu_info, gpu_info, get_backend(), - model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params), - std::to_string(n_batch), std::to_string(n_ubatch), - std::to_string(n_threads), cpu_mask, std::to_string(cpu_strict), std::to_string(poll), - ggml_type_name(type_k), ggml_type_name(type_v), - std::to_string(n_gpu_layers), split_mode_str(split_mode), - std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn), - tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings), - std::to_string(n_prompt), std::to_string(n_gen), test_time, - std::to_string(avg_ns()), std::to_string(stdev_ns()), - std::to_string(avg_ts()), std::to_string(stdev_ts()) - }; + std::vector values = { build_commit, + std::to_string(build_number), + cpu_info, + gpu_info, + get_backend(), + model_filename, + model_type, + std::to_string(model_size), + std::to_string(model_n_params), + std::to_string(n_batch), + std::to_string(n_ubatch), + std::to_string(n_threads), + cpu_mask, + std::to_string(cpu_strict), + std::to_string(poll), + ggml_type_name(type_k), + ggml_type_name(type_v), + std::to_string(n_gpu_layers), + split_mode_str(split_mode), + std::to_string(main_gpu), + std::to_string(no_kv_offload), + std::to_string(flash_attn), + tensor_split_str, + std::to_string(use_mmap), + std::to_string(embeddings), + std::to_string(n_prompt), + std::to_string(n_gen), + test_time, + std::to_string(avg_ns()), + std::to_string(stdev_ns()), + std::to_string(avg_ts()), + std::to_string(stdev_ts()) }; return values; } std::map get_map() const { std::map map; - auto fields = get_fields(); - auto values = get_values(); - std::transform(fields.begin(), fields.end(), values.begin(), - std::inserter(map, map.end()), std::make_pair); + auto fields = get_fields(); + auto values = get_values(); + std::transform(fields.begin(), fields.end(), values.begin(), std::inserter(map, map.end()), + std::make_pair); return map; } }; @@ -961,9 +1036,12 @@ struct printer { virtual ~printer() {} FILE * fout; + virtual void print_header(const cmd_params & params) { (void) params; } + virtual void print_test(const test & t) = 0; - virtual void print_footer() { } + + virtual void print_footer() {} }; struct csv_printer : public printer { @@ -979,7 +1057,7 @@ struct csv_printer : public printer { return escaped; } - void print_header(const cmd_params & params) override { + void print_header(const cmd_params & params) override { std::vector fields = test::get_fields(); fprintf(fout, "%s\n", join(fields, ",").c_str()); (void) params; @@ -992,7 +1070,6 @@ struct csv_printer : public printer { } }; - static std::string escape_json(const std::string & value) { std::string escaped; for (auto c : value) { @@ -1000,7 +1077,7 @@ static std::string escape_json(const std::string & value) { escaped += "\\\""; } else if (c == '\\') { escaped += "\\\\"; - } else if (c <= 0x1f) { + } else if (c <= 0x1f) { char buf[8]; snprintf(buf, sizeof(buf), "\\u%04x", c); escaped += buf; @@ -1033,7 +1110,8 @@ struct json_printer : public printer { void print_fields(const std::vector & fields, const std::vector & values) { assert(fields.size() == values.size()); for (size_t i = 0; i < fields.size(); i++) { - fprintf(fout, " \"%s\": %s,\n", fields.at(i).c_str(), format_json_value(fields.at(i), values.at(i)).c_str()); + fprintf(fout, " \"%s\": %s,\n", fields.at(i).c_str(), + format_json_value(fields.at(i), values.at(i)).c_str()); } } @@ -1051,12 +1129,9 @@ struct json_printer : public printer { fflush(fout); } - void print_footer() override { - fprintf(fout, "\n]\n"); - } + void print_footer() override { fprintf(fout, "\n]\n"); } }; - struct jsonl_printer : public printer { void print_fields(const std::vector & fields, const std::vector & values) { assert(fields.size() == values.size()); @@ -1116,7 +1191,7 @@ struct markdown_printer : public printer { return 13; } - int width = std::max((int)field.length(), 10); + int width = std::max((int) field.length(), 10); if (test::get_field_type(field) == test::STRING) { return -width; @@ -1230,18 +1305,18 @@ struct markdown_printer : public printer { fprintf(fout, "|"); for (const auto & field : fields) { std::string value; - char buf[128]; + char buf[128]; if (field == "model") { value = t.model_type; } else if (field == "size") { - if (t.model_size < 1024*1024*1024) { + if (t.model_size < 1024 * 1024 * 1024) { snprintf(buf, sizeof(buf), "%.2f MiB", t.model_size / 1024.0 / 1024.0); } else { snprintf(buf, sizeof(buf), "%.2f GiB", t.model_size / 1024.0 / 1024.0 / 1024.0); } value = buf; } else if (field == "params") { - if (t.model_n_params < 1000*1000*1000) { + if (t.model_n_params < 1000 * 1000 * 1000) { snprintf(buf, sizeof(buf), "%.2f M", t.model_n_params / 1e6); } else { snprintf(buf, sizeof(buf), "%.2f B", t.model_n_params / 1e9); @@ -1303,7 +1378,8 @@ struct sql_printer : public printer { std::vector fields = test::get_fields(); fprintf(fout, "CREATE TABLE IF NOT EXISTS test (\n"); for (size_t i = 0; i < fields.size(); i++) { - fprintf(fout, " %s %s%s\n", fields.at(i).c_str(), get_sql_field_type(fields.at(i)).c_str(), i < fields.size() - 1 ? "," : ""); + fprintf(fout, " %s %s%s\n", fields.at(i).c_str(), get_sql_field_type(fields.at(i)).c_str(), + i < fields.size() - 1 ? "," : ""); } fprintf(fout, ");\n"); fprintf(fout, "\n"); @@ -1324,8 +1400,8 @@ struct sql_printer : public printer { static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) { llama_set_n_threads(ctx, n_threads, n_threads); - const llama_model * model = llama_get_model(ctx); - const int32_t n_vocab = llama_n_vocab(model); + const llama_model * model = llama_get_model(ctx); + const int32_t n_vocab = llama_n_vocab(model); std::vector tokens(n_batch); @@ -1333,7 +1409,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_th while (n_processed < n_prompt) { int n_tokens = std::min(n_prompt - n_processed, n_batch); - tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; + tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; for (int i = 1; i < n_tokens; i++) { tokens[i] = std::rand() % n_vocab; } @@ -1347,8 +1423,8 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_th static void test_gen(llama_context * ctx, int n_gen, int n_threads) { llama_set_n_threads(ctx, n_threads, n_threads); - const llama_model * model = llama_get_model(ctx); - const int32_t n_vocab = llama_n_vocab(model); + const llama_model * model = llama_get_model(ctx); + const int32_t n_vocab = llama_n_vocab(model); llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; @@ -1401,6 +1477,17 @@ int main(int argc, char ** argv) { cmd_params params = parse_cmd_params(argc, argv); + // initialize backends + ggml_backend_load_all(); + auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + if (!cpu_dev) { + fprintf(stderr, "%s: error: CPU backend is not loaded\n", __func__); + return 1; + } + auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); + auto * ggml_threadpool_new_fn = (decltype(ggml_threadpool_new) *) ggml_backend_reg_get_proc_address(cpu_reg, "ggml_threadpool_new"); + auto * ggml_threadpool_free_fn = (decltype(ggml_threadpool_free) *) ggml_backend_reg_get_proc_address(cpu_reg, "ggml_threadpool_free"); + // initialize llama.cpp if (!params.verbose) { llama_log_set(llama_null_log_callback, NULL); @@ -1411,7 +1498,7 @@ int main(int argc, char ** argv) { set_process_priority(params.prio); // initialize printer - std::unique_ptr p = create_printer(params.output_format); + std::unique_ptr p = create_printer(params.output_format); std::unique_ptr p_err = create_printer(params.output_format_stderr); if (p) { @@ -1426,15 +1513,15 @@ int main(int argc, char ** argv) { std::vector params_instances = get_cmd_params_instances(params); - llama_model * lmodel = nullptr; + llama_model * lmodel = nullptr; const cmd_params_instance * prev_inst = nullptr; - int params_idx = 0; + int params_idx = 0; auto params_count = params_instances.size(); for (const auto & inst : params_instances) { - params_idx ++; + params_idx++; if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: starting\n", params_idx, params_count); + fprintf(stderr, "llama-bench: benchmark %d/%zu: starting\n", params_idx, params_count); } // keep the same model between tests when possible if (!lmodel || !prev_inst || !inst.equal_mparams(*prev_inst)) { @@ -1475,7 +1562,7 @@ int main(int argc, char ** argv) { tpp.poll = t.poll; tpp.prio = params.prio; - struct ggml_threadpool* threadpool = ggml_threadpool_new(&tpp); + struct ggml_threadpool * threadpool = ggml_threadpool_new_fn(&tpp); if (!threadpool) { fprintf(stderr, "%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads); exit(1); @@ -1486,14 +1573,14 @@ int main(int argc, char ** argv) { // warmup run if (t.n_prompt > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count); + fprintf(stderr, "llama-bench: benchmark %d/%zu: warmup prompt run\n", params_idx, params_count); } //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads); } if (t.n_gen > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count); + fprintf(stderr, "llama-bench: benchmark %d/%zu: warmup generation run\n", params_idx, params_count); } test_gen(ctx, 1, t.n_threads); } @@ -1505,13 +1592,15 @@ int main(int argc, char ** argv) { if (t.n_prompt > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps); + fprintf(stderr, "llama-bench: benchmark %d/%zu: prompt run %d/%d\n", params_idx, params_count, + i + 1, params.reps); } test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads); } if (t.n_gen > 0) { if (params.progress) { - fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps); + fprintf(stderr, "llama-bench: benchmark %d/%zu: generation run %d/%d\n", params_idx, params_count, + i + 1, params.reps); } test_gen(ctx, t.n_gen, t.n_threads); } @@ -1534,7 +1623,7 @@ int main(int argc, char ** argv) { llama_free(ctx); - ggml_threadpool_free(threadpool); + ggml_threadpool_free_fn(threadpool); } llama_free_model(lmodel); diff --git a/examples/llama.android/llama/build.gradle.kts b/examples/llama.android/llama/build.gradle.kts index 2d1dfba2040da..28dbc1904888b 100644 --- a/examples/llama.android/llama/build.gradle.kts +++ b/examples/llama.android/llama/build.gradle.kts @@ -19,6 +19,7 @@ android { externalNativeBuild { cmake { arguments += "-DLLAMA_BUILD_COMMON=ON" + arguments += "-DGGML_LLAMAFILE=OFF" arguments += "-DCMAKE_BUILD_TYPE=Release" cppFlags += listOf() arguments += listOf() diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 65cd4eb515c7f..998c673d5d31f 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -210,20 +210,20 @@ actor LlamaContext { llama_kv_cache_clear(context) - let t_pp_start = ggml_time_us() + let t_pp_start = DispatchTime.now().uptimeNanoseconds / 1000; if llama_decode(context, batch) != 0 { print("llama_decode() failed during prompt") } llama_synchronize(context) - let t_pp_end = ggml_time_us() + let t_pp_end = DispatchTime.now().uptimeNanoseconds / 1000; // bench text generation llama_kv_cache_clear(context) - let t_tg_start = ggml_time_us() + let t_tg_start = DispatchTime.now().uptimeNanoseconds / 1000; for i in 0..) if (BUILD_SHARED_LIBS) @@ -35,11 +35,18 @@ add_executable(${TARGET} llava-cli.cpp) set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-cli) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) set(TARGET llama-minicpmv-cli) add_executable(${TARGET} minicpmv-cli.cpp) set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-minicpmv-cli) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) + +set(TARGET llama-qwen2vl-cli) +add_executable(${TARGET} qwen2vl-cli.cpp) +set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-qwen2vl-cli) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index aae49c965e905..3cd0d2fa8590c 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -8,21 +8,25 @@ #include "ggml-alloc.h" #include "ggml-backend.h" -#ifdef GGML_USE_CUDA -#include "ggml-cuda.h" -#endif - -#ifdef GGML_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef GGML_USE_CANN -#include "ggml-cann.h" -#endif - -#ifdef GGML_USE_VULKAN -#include "ggml-vulkan.h" -#endif +//#ifdef GGML_USE_CUDA +//#include "ggml-cuda.h" +//#endif +// +//#ifdef GGML_USE_SYCL +//#include "ggml-sycl.h" +//#endif +// +//#ifdef GGML_USE_METAL +//#include "ggml-metal.h" +//#endif +// +//#ifdef GGML_USE_CANN +//#include "ggml-cann.h" +//#endif +// +//#ifdef GGML_USE_VULKAN +//#include "ggml-vulkan.h" +//#endif #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" @@ -40,10 +44,17 @@ #include #include -#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0) -#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0) -#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0) -#define LOG_DBG(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +#if defined(LLAVA_LOG_OFF) +# define LOG_INF(...) +# define LOG_WRN(...) +# define LOG_ERR(...) +# define LOG_DBG(...) +#else // defined(LLAVA_LOG_OFF) +# define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +# define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +# define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +# define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +#endif // defined(LLAVA_LOG_OFF) //#define CLIP_DEBUG_FUNCTIONS @@ -91,7 +102,9 @@ static std::string format(const char * fmt, ...) { #define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector" #define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector" #define KEY_MINICPMV_VERSION "clip.minicpmv_version" +#define KEY_HAS_QWEN2VL_MERGER "clip.has_qwen2vl_merger" #define KEY_USE_GELU "clip.use_gelu" +#define KEY_USE_SILU "clip.use_silu" #define KEY_N_EMBD "clip.%s.embedding_length" #define KEY_N_FF "clip.%s.feed_forward_length" #define KEY_N_BLOCK "clip.%s.block_count" @@ -118,7 +131,8 @@ static std::string format(const char * fmt, ...) { #define TN_TOKEN_EMBD "%s.token_embd.weight" #define TN_POS_EMBD "%s.position_embd.weight" #define TN_CLASS_EMBD "v.class_embd" -#define TN_PATCH_EMBD "v.patch_embd.weight" +#define TN_PATCH_EMBD "v.patch_embd.weight" // not rename tensor with ".0" postfix for backwrad compat +#define TN_PATCH_EMBD_1 "v.patch_embd.weight.1" #define TN_PATCH_BIAS "v.patch_embd.bias" #define TN_ATTN_K "%s.blk.%d.attn_k.%s" #define TN_ATTN_Q "%s.blk.%d.attn_q.%s" @@ -152,6 +166,7 @@ enum projector_type { PROJECTOR_TYPE_LDP, PROJECTOR_TYPE_LDPV2, PROJECTOR_TYPE_RESAMPLER, + PROJECTOR_TYPE_MERGER, PROJECTOR_TYPE_UNKNOWN, }; @@ -160,6 +175,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_LDP, "ldp" }, { PROJECTOR_TYPE_LDPV2, "ldpv2"}, { PROJECTOR_TYPE_RESAMPLER, "resampler"}, + { PROJECTOR_TYPE_MERGER, "qwen2vl_merger"}, }; @@ -452,7 +468,8 @@ struct clip_vision_model { // embeddings struct ggml_tensor * class_embedding; - struct ggml_tensor * patch_embeddings; + struct ggml_tensor * patch_embeddings_0; + struct ggml_tensor * patch_embeddings_1; // second Conv2D kernel when we decouple Conv3D along temproal dimension (Qwen2VL) struct ggml_tensor * patch_bias; struct ggml_tensor * position_embeddings; @@ -542,6 +559,7 @@ struct clip_ctx { bool has_vision_encoder = false; bool has_llava_projector = false; bool has_minicpmv_projector = false; + bool has_qwen2vl_merger = false; int minicpmv_version = 2; struct clip_vision_model vision_model; @@ -550,6 +568,7 @@ struct clip_ctx { float image_mean[3]; float image_std[3]; bool use_gelu = false; + bool use_silu = false; int32_t ftype = 1; bool has_class_embedding = true; @@ -595,14 +614,26 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 image_size_height = imgs->data->ny; } } + else if (ctx->has_qwen2vl_merger) { + // use the image's native resolution when image is avaible + if (is_inf) { + // if (imgs->data->nx && imgs->data->ny) { + image_size_width = imgs->data->nx; + image_size_height = imgs->data->ny; + } + } const int patch_size = hparams.patch_size; const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size)); + const int patches_w = image_size_width / patch_size; + const int patches_h = image_size_height / patch_size; const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0); + const int num_position_ids = ctx->has_qwen2vl_merger ? num_positions * 4 : num_positions; const int hidden_size = hparams.hidden_size; const int n_head = hparams.n_head; const int d_head = hidden_size / n_head; int n_layer = hparams.n_layer; const float eps = hparams.eps; + int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4}; const int batch_size = imgs->size; @@ -623,10 +654,30 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 ggml_set_name(inp_raw, "inp_raw"); ggml_set_input(inp_raw); - struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1); + struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1); - inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size); - inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3)); + if (ctx->has_qwen2vl_merger) { + GGML_ASSERT(image_size_width % (patch_size * 2) == 0); + GGML_ASSERT(image_size_height % (patch_size * 2) == 0); + + auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1); + inp = ggml_add(ctx0, inp, inp_1); + inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 2, 0, 3)); // [w, h, c, b] -> [c, w, h, b] + inp = ggml_reshape_4d( + ctx0, inp, + hidden_size * 2, patches_w / 2, patches_h, batch_size); + inp = ggml_reshape_4d( + ctx0, inp, + hidden_size * 2, patches_w / 2, 2, batch_size * (patches_h / 2)); + inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 0, 2, 1, 3)); + inp = ggml_reshape_3d( + ctx0, inp, + hidden_size, patches_w * patches_h, batch_size); + } + else { + inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size); + inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3)); + } if (ctx->has_patch_bias) { // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp)); @@ -648,12 +699,14 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 } } - struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions); + struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids); ggml_set_name(positions, "positions"); ggml_set_input(positions); - embeddings = - ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions)); + if (!ctx->has_qwen2vl_merger) { // qwen2vl use rope position embedding + embeddings = + ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions)); + } if (ctx->has_minicpmv_projector) { int pos_w = image_size_width/patch_size; @@ -677,7 +730,8 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 } // loop over layers - if (ctx->has_minicpmv_projector) { + if (ctx->has_minicpmv_projector || ctx->has_qwen2vl_merger) { + // TODO: figure out why we doing thing in this way ??? n_layer += 1; } for (int il = 0; il < n_layer - 1; il++) { @@ -699,8 +753,13 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b); - Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size); + if (ctx->has_qwen2vl_merger) { + Q = ggml_rope_multi( + ctx0, Q, positions, nullptr, + d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1); + } + Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size); @@ -708,6 +767,11 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b); K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size); + if (ctx->has_qwen2vl_merger) { + K = ggml_rope_multi( + ctx0, K, positions, nullptr, + d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1); + } K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size); @@ -747,6 +811,8 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 if (ctx->use_gelu) { cur = ggml_gelu_inplace(ctx0, cur); + } else if (ctx->use_silu) { + cur = ggml_silu_inplace(ctx0, cur); } else { cur = ggml_gelu_quick_inplace(ctx0, cur); } @@ -758,6 +824,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 cur = ggml_add(ctx0, embeddings, cur); embeddings = cur; + } // post-layernorm @@ -829,7 +896,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3)); mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]); // stride = 1, padding = 1, bias is nullptr - block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1); + block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1); // layer norm // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] @@ -877,7 +944,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 // block_2 { // stride = 2 - block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1); + block_1 = ggml_conv_2d_dw(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1); // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1] // layer norm @@ -938,7 +1005,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 // mlp_2 ne [24, 24, 2048, 1] mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0); // weight ne = [3, 3, 2048, 1] - struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1); + struct ggml_tensor * peg_0 = ggml_conv_2d_dw(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1); peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3)); peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b); mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3)); @@ -1019,6 +1086,19 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 GGML_ASSERT(false); } } + else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { + embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size); + + embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); + + // GELU activation + embeddings = ggml_gelu(ctx0, embeddings); + + // Second linear layer + embeddings = ggml_mul_mat(ctx0, model.mm_1_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_1_b); + } // build the graph ggml_build_forward_expand(gf, embeddings); @@ -1142,25 +1222,30 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } } -#ifdef GGML_USE_CUDA - new_clip->backend = ggml_backend_cuda_init(0); - LOG_INF("%s: CLIP using CUDA backend\n", __func__); -#endif - -#ifdef GGML_USE_METAL - new_clip->backend = ggml_backend_metal_init(); - LOG_INF("%s: CLIP using Metal backend\n", __func__); -#endif - -#ifdef GGML_USE_CANN - new_clip->backend = ggml_backend_cann_init(0); - LOG_INF("%s: CLIP using CANN backend\n", __func__); -#endif - -#ifdef GGML_USE_VULKAN - new_clip->backend = ggml_backend_vk_init(0); - LOG_INF("%s: CLIP using Vulkan backend\n", __func__); -#endif +//#ifdef GGML_USE_CUDA +// new_clip->backend = ggml_backend_cuda_init(0); +// LOG_INF("%s: CLIP using CUDA backend\n", __func__); +//#endif +// +//#ifdef GGML_USE_METAL +// new_clip->backend = ggml_backend_metal_init(); +// LOG_INF("%s: CLIP using Metal backend\n", __func__); +//#endif +// +//#ifdef GGML_USE_CANN +// new_clip->backend = ggml_backend_cann_init(0); +// LOG_INF("%s: CLIP using CANN backend\n", __func__); +//#endif +// +//#ifdef GGML_USE_VULKAN +// new_clip->backend = ggml_backend_vk_init(0); +// LOG_INF("%s: CLIP using Vulkan backend\n", __func__); +//#endif +// +//#ifdef GGML_USE_SYCL +// new_clip->backend = ggml_backend_sycl_init(0); +// LOG_INF("%s: CLIP using SYCL backend\n", __func__); +//#endif if (!new_clip->backend) { new_clip->backend = ggml_backend_cpu_init(); @@ -1190,6 +1275,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx); } + idx = gguf_find_key(ctx, KEY_HAS_QWEN2VL_MERGER); + if (idx != -1) { + new_clip->has_qwen2vl_merger = gguf_get_val_bool(ctx, idx); + } // GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search GGML_ASSERT(new_clip->has_vision_encoder); @@ -1198,6 +1287,13 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { idx = get_key_idx(ctx, KEY_USE_GELU); new_clip->use_gelu = gguf_get_val_bool(ctx, idx); + try { + idx = get_key_idx(ctx, KEY_USE_SILU); + new_clip->use_silu = gguf_get_val_bool(ctx, idx); + } catch (std::runtime_error & /*e*/) { + new_clip->use_silu = false; + } + if (verbosity >= 1) { LOG_INF("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder); LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); @@ -1373,11 +1469,16 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } try { - vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD); + vision_model.patch_embeddings_0 = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD); vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v")); } catch(const std::exception& /*e*/) { LOG_ERR("%s: failed to load vision model tensors\n", __func__); } + try { + vision_model.patch_embeddings_1 = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD_1); + } catch(const std::exception& /*e*/) { + new_clip->has_qwen2vl_merger = false; + } // LLaVA projection if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) { @@ -1465,6 +1566,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight")); vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias")); } + else if (new_clip->proj_type == PROJECTOR_TYPE_MERGER) { + vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight")); + vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias")); + vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight")); + vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias")); + } else { std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type]; throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); @@ -1503,6 +1610,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { new_clip->compute_alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(new_clip->backend)); clip_image_f32_batch batch; batch.size = 1; + batch.data = nullptr; ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch, nullptr, false); ggml_gallocr_reserve(new_clip->compute_alloc, gf); size_t compute_memory_buffer_size = ggml_gallocr_get_buffer_size(new_clip->compute_alloc, 0); @@ -1516,6 +1624,10 @@ void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size ctx_clip->load_image_size = load_image_size; } +struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip) { + return ctx_clip->load_image_size; +} + struct clip_image_size * clip_image_size_init() { struct clip_image_size * load_image_size = new struct clip_image_size(); load_image_size->width = 448; @@ -1968,6 +2080,23 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli } return true; } + else if (ctx->has_qwen2vl_merger) { + clip_image_u8 * resized = clip_image_u8_init(); + auto patch_size = clip_patch_size(ctx) * 2; + int nx = ceil((float)img->nx / patch_size) * patch_size; + int ny = ceil((float)img->ny / patch_size) * patch_size; + bicubic_resize(*img, *resized, nx, ny); + + res_imgs->data = new clip_image_f32[1]; + // clip_image_f32 * res = clip_image_f32_init(); + normalize_image_u8_to_f32(resized, res_imgs->data, ctx->image_mean, ctx->image_std); + // res_imgs->data[0] = *res; + res_imgs->size = 1; + + // clip_image_f32_free(res); + clip_image_u8_free(resized); + return true; + } bool pad_to_square = true; if (!ctx->has_vision_encoder) { @@ -2157,6 +2286,13 @@ size_t clip_embd_nbytes(const struct clip_ctx * ctx) { return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float); } +size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_h, int img_w) { + clip_image_f32 img; + img.nx = img_w; + img.ny = img_h; + return clip_n_patches_by_img(ctx, &img) * clip_n_mmproj_embd(ctx) * sizeof(float); +} + int32_t clip_image_size(const struct clip_ctx * ctx) { return ctx->vision_model.hparams.image_size; } @@ -2178,6 +2314,13 @@ const int32_t * clip_image_grid(const struct clip_ctx * ctx) { } int clip_n_patches(const struct clip_ctx * ctx) { + clip_image_f32 img; + img.nx = ctx->vision_model.hparams.image_size; + img.ny = ctx->vision_model.hparams.image_size; + return clip_n_patches_by_img(ctx, &img); +} + +int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * img) { const auto & params = ctx->vision_model.hparams; int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); @@ -2191,6 +2334,11 @@ int clip_n_patches(const struct clip_ctx * ctx) { else if (ctx->minicpmv_version == 3) { n_patches = 64; } + } else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { + int patch_size = params.patch_size * 2; + int x_patch = img->nx / patch_size + (int)(img->nx % patch_size > 0); + int y_patch = img->ny / patch_size + (int)(img->ny % patch_size > 0); + n_patches = x_patch * y_patch; } return n_patches; @@ -2319,7 +2467,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima const int image_size = hparams.image_size; int image_size_width = image_size; int image_size_height = image_size; - if (ctx->has_minicpmv_projector) { + if (ctx->has_minicpmv_projector | ctx->has_qwen2vl_merger) { image_size_width = imgs->data[0].nx; image_size_height = imgs->data[0].ny; } @@ -2339,7 +2487,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima for (size_t i = 0; i < imgs->size; i++) { const int nx = imgs->data[i].nx; const int ny = imgs->data[i].ny; - if (!ctx->has_minicpmv_projector) { + if (!(ctx->has_minicpmv_projector | ctx->has_qwen2vl_merger)) { GGML_ASSERT(nx == image_size && ny == image_size); } @@ -2397,9 +2545,9 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h)); float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed)); - for(int i=0;ihas_qwen2vl_merger) { + struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); + + const int pw = image_size_width / patch_size; + const int ph = image_size_height / patch_size; + int* positions_data = (int*)malloc(ggml_nbytes(positions)); + + int ptr = 0; + for (int y = 0; y < ph; y+=2) + { + for (int x = 0; x < pw; x+=2) + { + for (int dy = 0; dy < 2; dy++) { + for (int dx = 0; dx < 2; dx++) { + positions_data[ptr] = y + dy; + positions_data[num_patches + ptr] = x + dx; + positions_data[num_patches * 2 + ptr] = y + dy; + positions_data[num_patches * 3 + ptr] = x + dx; + ptr++; + } + } + } + } + + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + free(positions_data); + } + else { struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); int* positions_data = (int*)malloc(ggml_nbytes(positions)); @@ -2428,16 +2603,16 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima } ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); free(positions_data); - } - { - struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); - int* patches_data = (int*)malloc(ggml_nbytes(patches)); - for (int i = 0; i < num_patches; i++) { - patches_data[i] = i + 1; + { + struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); + int* patches_data = (int*)malloc(ggml_nbytes(patches)); + for (int i = 0; i < num_patches; i++) { + patches_data[i] = i + 1; + } + ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches)); + free(patches_data); } - ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches)); - free(patches_data); } } @@ -2610,6 +2785,9 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) { return 3584; } } + if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { + return ctx->vision_model.mm_1_b->ne[0]; + } std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type]; throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); @@ -2621,3 +2799,21 @@ int clip_is_minicpmv(const struct clip_ctx * ctx) { } return 0; } + +bool clip_is_qwen2vl(const struct clip_ctx * ctx) { + return ctx->has_qwen2vl_merger; +} + + +bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec) { + clip_image_f32 clip_img; + clip_img.buf.resize(h * w * 3); + for (int i = 0; i < h*w*3; i++) + { + clip_img.buf[i] = img[i]; + } + clip_img.nx = w; + clip_img.ny = h; + clip_image_encode(ctx, n_threads, &clip_img, vec); + return true; +} diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 78588bdf1745c..1603edd265e6c 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -45,6 +45,7 @@ CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity CLIP_API void clip_free(struct clip_ctx * ctx); CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx); +CLIP_API size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_h, int img_w); CLIP_API int32_t clip_image_size (const struct clip_ctx * ctx); CLIP_API int32_t clip_patch_size (const struct clip_ctx * ctx); @@ -55,11 +56,13 @@ CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx); CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx); -CLIP_API int clip_n_patches (const struct clip_ctx * ctx); -CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx); +CLIP_API int clip_n_patches (const struct clip_ctx * ctx); +CLIP_API int clip_n_patches_by_img (const struct clip_ctx * ctx, struct clip_image_f32 * img); +CLIP_API int clip_n_mmproj_embd (const struct clip_ctx * ctx); CLIP_API int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip); CLIP_API void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size); +CLIP_API struct clip_image_size * clip_get_load_image_size(struct clip_ctx * ctx_clip); CLIP_API struct clip_image_size * clip_image_size_init(); CLIP_API struct clip_image_u8 * clip_image_u8_init (); @@ -86,6 +89,9 @@ CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, cons CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype); CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx); +CLIP_API bool clip_is_qwen2vl(const struct clip_ctx * ctx); + +CLIP_API bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec); #ifdef __cplusplus } diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 1610985858fc9..2691c6e6b2dd2 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -191,7 +191,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ LOG("\n"); - struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sparams); + struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling); if (!smpl) { LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__); exit(1); diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index be69885408433..16f30c56ceac9 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -11,13 +11,17 @@ #include #include -#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0) -#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0) - -#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0) -#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0) -#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0) -#define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +#if defined(LLAVA_LOG_OFF) +# define LOG_INF(...) +# define LOG_WRN(...) +# define LOG_ERR(...) +# define LOG_DBG(...) +#else // defined(LLAVA_LOG_OFF) +# define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +# define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +# define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +# define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +#endif // defined(LLAVA_LOG_OFF) // RGB uint8 image struct clip_image_u8 { @@ -255,25 +259,33 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli const char * mm_patch_merge_type = clip_patch_merge_type(ctx_clip); - if (clip_is_minicpmv(ctx_clip)) { + if (clip_is_minicpmv(ctx_clip) || clip_is_qwen2vl(ctx_clip)) { std::vector image_embd_v; image_embd_v.resize(img_res_v.size); struct clip_image_size * load_image_size = clip_image_size_init(); + for (size_t i = 0; i < img_res_v.size; i++) { const int64_t t_img_enc_step_start_us = ggml_time_us(); - image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); + image_embd_v[i] = (float *)malloc(clip_embd_nbytes_by_img(ctx_clip, img_res_v.data[i].nx, img_res_v.data[i].ny)); int patch_size=14; load_image_size->width = img_res_v.data[i].nx; load_image_size->height = img_res_v.data[i].ny; clip_add_load_image_size(ctx_clip, load_image_size); + bool encoded = false; - int has_minicpmv_projector = clip_is_minicpmv(ctx_clip); - if (has_minicpmv_projector == 2) { - encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]); - } - else if (has_minicpmv_projector == 3) { + if (clip_is_qwen2vl(ctx_clip)) { encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); } + else { + int has_minicpmv_projector = clip_is_minicpmv(ctx_clip); + if (has_minicpmv_projector == 2) { + encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]); + } + else if (has_minicpmv_projector == 3) { + encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); + } + } + if (!encoded) { LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size); return false; @@ -286,8 +298,11 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli int n_img_pos_out = 0; for (size_t i = 0; i < image_embd_v.size(); i++) { - std::memcpy(image_embd + n_img_pos_out * clip_n_mmproj_embd(ctx_clip), image_embd_v[i], clip_embd_nbytes(ctx_clip)); - n_img_pos_out += clip_n_patches(ctx_clip); + std::memcpy( + image_embd + n_img_pos_out * clip_n_mmproj_embd(ctx_clip), + image_embd_v[i], + clip_embd_nbytes_by_img(ctx_clip, img_res_v.data[i].nx, img_res_v.data[i].ny)); + n_img_pos_out += clip_n_patches_by_img(ctx_clip, &img_res_v.data[i]); } *n_img_pos = n_img_pos_out; for (size_t i = 0; i < image_embd_v.size(); i++) { @@ -383,7 +398,13 @@ bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, co if (clip_is_minicpmv(ctx_clip)) { num_max_patches = 10; } - float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*num_max_patches); // TODO: base on gridsize/llava model + float * image_embd; + if (clip_is_qwen2vl(ctx_clip)) { + // qwen2vl don't split image into chunks, so `num_max_patches` is not needed. + image_embd = (float *)malloc(clip_embd_nbytes_by_img(ctx_clip, img->nx, img->ny)); + } else { + image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*num_max_patches); // TODO: base on gridsize/llava model + } if (!image_embd) { LOG_ERR("Unable to allocate memory for image embeddings\n"); return false; @@ -498,10 +519,16 @@ static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long errno = 0; size_t ret = fread(buffer, 1, fileSize, file); // Read the file into the buffer if (ferror(file)) { - die_fmt("read error: %s", strerror(errno)); + LOG_ERR("read error: %s", strerror(errno)); + free(buffer); + fclose(file); + return false; } if (ret != (size_t) fileSize) { - die("unexpectedly reached end of file"); + LOG_ERR("unexpectedly reached end of file"); + free(buffer); + fclose(file); + return false; } fclose(file); // Close the file diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp index cbecec343c640..e9cbb51ed90ab 100644 --- a/examples/llava/minicpmv-cli.cpp +++ b/examples/llava/minicpmv-cli.cpp @@ -237,7 +237,7 @@ static struct common_sampler * llama_init(struct llava_context * ctx_llava, comm LOG_INF("\n"); - struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sparams); + struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling); return smpl; } diff --git a/examples/llava/qwen2_vl_surgery.py b/examples/llava/qwen2_vl_surgery.py new file mode 100644 index 0000000000000..c87606b4fdf4f --- /dev/null +++ b/examples/llava/qwen2_vl_surgery.py @@ -0,0 +1,165 @@ +import argparse +from typing import Dict + +import torch +import numpy as np +from gguf import * +from transformers import ( + Qwen2VLForConditionalGeneration, + Qwen2VLProcessor, + AutoProcessor, + Qwen2VLConfig +) + + +VISION = "clip.vision" + + +def k(raw_key: str, arch: str) -> str: + return raw_key.format(arch=arch) + + +def to_gguf_name(name: str) -> str: + og = name + name = name.replace("text_model", "t").replace("vision_model", "v") + name = name.replace("blocks", "blk").replace("embeddings.", "") + name = name.replace("attn.", "attn_") + name = name.replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("proj.", "out.") + # name = name.replace("layrnorm", "ln").replace("layer_norm", "ln").replace("layernorm", "ln") + name = name.replace("norm1", "ln1").replace("norm2", "ln2") + name = name.replace("merger.mlp", 'mm') + print(f"[to_gguf_name] {og} --> {name}") + return name + + +def find_vision_tensors(qwen2vl, dtype) -> Dict[str, np.ndarray]: + vision_model = qwen2vl.visual + tensor_map = {} + for name, ten in vision_model.state_dict().items(): + ten = ten.numpy() + if 'qkv' in name: + if ten.ndim == 2: # weight + c3, _ = ten.shape + else: # bias + c3 = ten.shape[0] + assert c3 % 3 == 0 + c = c3 // 3 + wq = ten[:c] + wk = ten[c: c * 2] + wv = ten[c * 2:] + tensor_map[to_gguf_name(f"vision_model.{name}").replace("qkv", "q")] = wq + tensor_map[to_gguf_name(f"vision_model.{name}").replace("qkv", "k")] = wk + tensor_map[to_gguf_name(f"vision_model.{name}").replace("qkv", "v")] = wv + elif 'merger' in name: + if name.endswith("ln_q.weight"): + tensor_map['v.post_ln.weight'] = ten + elif name.endswith("ln_q.bias"): + tensor_map['v.post_ln.bias'] = ten + else: + # "merger.mlp.%d.weight/bias" --> "mm.%d.weight/bias" + tensor_map[to_gguf_name(name)] = ten + elif 'patch_embed.proj.weight' in name: + # NOTE: split Conv3D into Conv2Ds + c1, c2, kt, kh, kw = ten.shape + assert kt == 2, "Current implmentation only support temporal_patch_size of 2" + tensor_map["v.patch_embd.weight"] = ten[:, :, 0, ...] + tensor_map["v.patch_embd.weight.1"] = ten[:, :, 1, ...] + else: + tensor_map[to_gguf_name(f"vision_model.{name}")] = ten + + for new_name, ten in tensor_map.items(): + if ten.ndim <= 1 or new_name.endswith("_norm.weight"): + tensor_map[new_name] = ten.astype(np.float32) + else: + tensor_map[new_name] = ten.astype(dtype) + tensor_map["v.position_embd.weight"] = np.zeros([10, 10], dtype=np.float32) # dummy tensor, just here as a placeholder + return tensor_map + + +def main(args): + if args.data_type == 'fp32': + dtype = torch.float32 + np_dtype = np.float32 + ftype = 0 + elif args.data_type == 'fp16': + dtype = torch.float32 + np_dtype = np.float16 + ftype = 1 + else: + raise ValueError() + + local_model = False + model_path = "" + model_name = args.model_name + print("model_name: ", model_name) + qwen2vl = Qwen2VLForConditionalGeneration.from_pretrained( + model_name, torch_dtype=dtype, device_map="cpu" + ) + cfg: Qwen2VLConfig = qwen2vl.config # type: ignore[reportAssignmentType] + vcfg = cfg.vision_config + + if os.path.isdir(model_name): + local_model = True + if model_name.endswith(os.sep): + model_name = model_name[:-1] + model_path = model_name + model_name = os.path.basename(model_name) + fname_out = f"{model_name.replace('/', '-').lower()}-vision.gguf" + + fout = GGUFWriter(path=fname_out, arch="clip") + fout.add_description("image encoder for Qwen2VL") + + fout.add_file_type(ftype) + fout.add_bool("clip.has_text_encoder", False) + fout.add_bool("clip.has_vision_encoder", True) + fout.add_bool("clip.has_qwen2vl_merger", True) + fout.add_string("clip.projector_type", "qwen2vl_merger") + + print(cfg.vision_config) + if 'silu' in cfg.vision_config.hidden_act.lower(): + fout.add_bool("clip.use_silu", True) + fout.add_bool("clip.use_gelu", False) + elif 'gelu' in cfg.vision_config.hidden_act.lower(): + fout.add_bool("clip.use_silu", False) + fout.add_bool("clip.use_gelu", 'quick' not in cfg.vision_config.hidden_act.lower()) + else: + raise ValueError() + + tensor_map = find_vision_tensors(qwen2vl, np_dtype) + for name, data in tensor_map.items(): + fout.add_tensor(name, data) + + fout.add_uint32("clip.vision.patch_size", vcfg.patch_size) + fout.add_uint32("clip.vision.image_size", 14 * 40) # some reasonable size that is divable by (14*2) + fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), vcfg.embed_dim) + fout.add_uint32("clip.vision.projection_dim", vcfg.hidden_size) + fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), vcfg.num_heads) + fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6) + fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), vcfg.depth) + fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), 0) # not sure what this does, put 0 here as a placeholder + fout.add_name(model_name) + """ + HACK: Since vision rope related parameter aren't stored in the `Qwen2VLConfig, + it will be hardcoded in the `clip_image_build_graph` from `clip.cpp`. + """ + + if local_model: + processor: Qwen2VLProcessor = AutoProcessor.from_pretrained(model_path) + else: + processor: Qwen2VLProcessor = AutoProcessor.from_pretrained(model_name) + fout.add_array("clip.vision.image_mean", processor.image_processor.image_mean) # type: ignore[reportAttributeAccessIssue] + fout.add_array("clip.vision.image_std", processor.image_processor.image_std) # type: ignore[reportAttributeAccessIssue] + + fout.write_header_to_file() + fout.write_kv_data_to_file() + fout.write_tensors_to_file() + fout.close() + print("save model as: ", fname_out) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("model_name", nargs='?', default="Qwen/Qwen2-VL-2B-Instruct") + parser.add_argument("--data_type", nargs='?', choices=['fp32', 'fp16'], default="fp32") + args = parser.parse_args() + main(args) diff --git a/examples/llava/qwen2vl-cli.cpp b/examples/llava/qwen2vl-cli.cpp new file mode 100644 index 0000000000000..e86a60280aed6 --- /dev/null +++ b/examples/llava/qwen2vl-cli.cpp @@ -0,0 +1,581 @@ +#include "arg.h" +#include "base64.hpp" +#include "log.h" +#include "common.h" +#include "sampling.h" +#include "clip.h" +#include "llava.h" +#include "llama.h" +#include "ggml.h" + +#ifdef GGML_USE_CUDA +#include "ggml-cuda.h" +#endif +#ifdef NDEBUG +#include "ggml-alloc.h" +#include "ggml-backend.h" +#endif + +#include +#include +#include +#include +#include +#include +#include + + +static bool qwen2vl_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, + int n_batch, int * n_past, int * st_pos_id, struct clip_image_size * image_size) { + int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + const int patch_size = 14 * 2; + const int ph = image_size->height / patch_size + (image_size->height % patch_size > 0); + const int pw = image_size->width / patch_size + (image_size->width % patch_size > 0); + auto img_tokens = image_embed->n_image_pos; + // llama_pos mrope_pos[img_tokens * 4]; + std::vector mrope_pos; + mrope_pos.resize(img_tokens * 4); + + for (int y = 0; y < ph; y++) + { + for (int x = 0; x < pw; x++) + { + int i = y * pw + x; + mrope_pos[i] = *st_pos_id; + mrope_pos[i + img_tokens] = *st_pos_id + y; + mrope_pos[i + img_tokens * 2] = *st_pos_id + x; + mrope_pos[i + img_tokens * 3] = 0; + } + } + *st_pos_id += std::max(pw, ph); + + int processed = 0; + std::vector batch_mrope_pos; + batch_mrope_pos.resize(img_tokens * 4); + + for (int i = 0; i < img_tokens; i += n_batch) { + int n_eval = img_tokens - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + + // llama_pos batch_mrope_pos[n_eval * 4]; + std::fill(batch_mrope_pos.begin(), batch_mrope_pos.end(), 0); + memcpy(batch_mrope_pos.data(), &mrope_pos[processed], n_eval * sizeof(llama_pos)); + memcpy(&batch_mrope_pos[n_eval * 1], &mrope_pos[img_tokens * 1 + processed], n_eval * sizeof(llama_pos)); + memcpy(&batch_mrope_pos[n_eval * 2], &mrope_pos[img_tokens * 2 + processed], n_eval * sizeof(llama_pos)); + memcpy(&batch_mrope_pos[n_eval * 3], &mrope_pos[img_tokens * 3 + processed], n_eval * sizeof(llama_pos)); + + llama_batch batch = { + int32_t(n_eval), // n_tokens + nullptr, // token + (image_embed->embed+i*n_embd), // embed + batch_mrope_pos.data(), // pos + nullptr, // n_seq_id + nullptr, // seq_id + nullptr, // logits + }; + + if (llama_decode(ctx_llama, batch)) { + LOG_ERR("%s : failed to eval\n", __func__); + return false; + } + *n_past += n_eval; + processed += n_eval; + } + return true; +} + + +static bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past, int * st_pos_id) { + int N = (int) tokens.size(); + std::vector pos; + for (int i = 0; i < N; i += n_batch) { + int n_eval = (int) tokens.size() - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + auto batch = llama_batch_get_one(&tokens[i], n_eval); + // TODO: add mrope pos ids somewhere else + pos.resize(batch.n_tokens * 4); + std::fill(pos.begin(), pos.end(), 0); + for (int j = 0; j < batch.n_tokens * 3; j ++) { + pos[j] = *st_pos_id + (j % batch.n_tokens); + } + batch.pos = pos.data(); + + if (llama_decode(ctx_llama, batch)) { + LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); + return false; + } + *n_past += n_eval; + *st_pos_id += n_eval; + } + return true; +} + +static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past, int * st_pos_id) { + std::vector tokens; + tokens.push_back(id); + return eval_tokens(ctx_llama, tokens, 1, n_past, st_pos_id); +} + +static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, int * st_pos_id, bool add_bos){ + std::string str2 = str; + std::vector embd_inp = common_tokenize(ctx_llama, str2, add_bos, true); + eval_tokens(ctx_llama, embd_inp, n_batch, n_past, st_pos_id); + return true; +} + +static const char * sample(struct common_sampler * smpl, + struct llama_context * ctx_llama, + int * n_past, int * st_pos_id) { + const llama_token id = common_sampler_sample(smpl, ctx_llama, -1); + common_sampler_accept(smpl, id, true); + static std::string ret; + if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { + ret = ""; + } else { + ret = common_token_to_piece(ctx_llama, id); + } + eval_id(ctx_llama, id, n_past, st_pos_id); + return ret.c_str(); +} + +static const char* IMG_BASE64_TAG_BEGIN = ""; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& begin_out, size_t& end_out) { + begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static llava_image_embed * llava_image_embed_make_with_prompt_base64(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { + size_t img_base64_str_start, img_base64_str_end; + find_image_tag_in_prompt(prompt, img_base64_str_start, img_base64_str_end); + if (img_base64_str_start == std::string::npos || img_base64_str_end == std::string::npos) { + LOG_ERR("%s: invalid base64 image tag. must be %s%s\n", __func__, IMG_BASE64_TAG_BEGIN, IMG_BASE64_TAG_END); + return NULL; + } + + auto base64_bytes_start = img_base64_str_start + strlen(IMG_BASE64_TAG_BEGIN); + auto base64_bytes_count = img_base64_str_end - base64_bytes_start; + auto base64_str = prompt.substr(base64_bytes_start, base64_bytes_count ); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + + auto embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); + if (!embed) { + LOG_ERR("%s: could not load image from base64 string.\n", __func__); + return NULL; + } + + return embed; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin, end; + find_image_tag_in_prompt(prompt, begin, end); + if (begin == std::string::npos || end == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(end + strlen(IMG_BASE64_TAG_END)); + return pre + replacement + post; +} + +struct llava_context { + struct clip_ctx * ctx_clip = NULL; + struct llama_context * ctx_llama = NULL; + struct llama_model * model = NULL; +}; + +static void print_usage(int, char ** argv) { + LOG("\n example usage:\n"); + LOG("\n %s -m --mmproj --image --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); + LOG("\n note: a lower temperature value like 0.1 is recommended for better quality.\n"); +} + +static struct llava_image_embed * load_image(llava_context * ctx_llava, common_params * params, const std::string & fname) { + + // load and preprocess the image + llava_image_embed * embed = NULL; + auto prompt = params->prompt; + if (prompt_contains_image(prompt)) { + if (!params->image.empty()) { + LOG_INF("using base64 encoded image instead of command line image path\n"); + } + embed = llava_image_embed_make_with_prompt_base64(ctx_llava->ctx_clip, params->cpuparams.n_threads, prompt); + if (!embed) { + LOG_ERR("%s: can't load image from prompt\n", __func__); + return NULL; + } + params->prompt = remove_image_from_prompt(prompt); + } else { + embed = llava_image_embed_make_with_filename(ctx_llava->ctx_clip, params->cpuparams.n_threads, fname.c_str()); + if (!embed) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str()); + return NULL; + } + } + + return embed; +} + +static void process_prompt(struct llava_context * ctx_llava, struct llava_image_embed * image_embed, common_params * params, const std::string & prompt) { + int n_past = 0; + int cur_pos_id = 0; + + const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; + + std::string system_prompt, user_prompt; + size_t image_pos = prompt.find("<|vision_start|>"); + if (image_pos != std::string::npos) { + // new templating mode: Provide the full prompt including system message and use as a placeholder for the image + system_prompt = prompt.substr(0, image_pos); + user_prompt = prompt.substr(image_pos + std::string("<|vision_pad|>").length()); + LOG_INF("system_prompt: %s\n", system_prompt.c_str()); + if (params->verbose_prompt) { + auto tmp = common_tokenize(ctx_llava->ctx_llama, system_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str()); + } + } + LOG_INF("user_prompt: %s\n", user_prompt.c_str()); + if (params->verbose_prompt) { + auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str()); + } + } + } else { + // llava-1.5 native mode + system_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|>"; + user_prompt = "<|vision_end|>" + prompt + "<|im_end|>\n<|im_start|>assistant\n"; + if (params->verbose_prompt) { + auto tmp = common_tokenize(ctx_llava->ctx_llama, user_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx_llava->ctx_llama, tmp[i]).c_str()); + } + } + } + + eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, &cur_pos_id, true); + if (image_embed != nullptr) { + auto image_size = clip_get_load_image_size(ctx_llava->ctx_clip); + qwen2vl_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past, &cur_pos_id, image_size); + } + eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, &cur_pos_id, false); + + // generate the response + + LOG("\n"); + + struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling); + if (!smpl) { + LOG_ERR("%s: failed to initialize sampling subsystem\n", __func__); + exit(1); + } + + std::string response = ""; + for (int i = 0; i < max_tgt_len; i++) { + const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past, &cur_pos_id); + response += tmp; + if (strcmp(tmp, "") == 0) break; + if (strstr(tmp, "###")) break; // Yi-VL behavior + LOG("%s", tmp); + if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works) + if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6 + if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6 + + fflush(stdout); + } + + common_sampler_free(smpl); + LOG("\n"); +} + +static struct llama_model * llava_init(common_params * params) { + llama_backend_init(); + llama_numa_init(params->numa); + + llama_model_params model_params = common_model_params_to_llama(*params); + + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); + if (model == NULL) { + LOG_ERR("%s: unable to load model\n" , __func__); + return NULL; + } + return model; +} + +static struct llava_context * llava_init_context(common_params * params, llama_model * model) { + const char * clip_path = params->mmproj.c_str(); + + auto prompt = params->prompt; + if (prompt.empty()) { + prompt = "describe the image in detail."; + } + + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); + + + llama_context_params ctx_params = common_context_params_to_llama(*params); + ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings + + llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + + if (ctx_llama == NULL) { + LOG_ERR("%s: failed to create the llama_context\n" , __func__); + return NULL; + } + + auto * ctx_llava = (struct llava_context *)malloc(sizeof(llava_context)); + + ctx_llava->ctx_llama = ctx_llama; + ctx_llava->ctx_clip = ctx_clip; + ctx_llava->model = model; + return ctx_llava; +} + +static void llava_free(struct llava_context * ctx_llava) { + if (ctx_llava->ctx_clip) { + clip_free(ctx_llava->ctx_clip); + ctx_llava->ctx_clip = NULL; + } + + llama_free(ctx_llava->ctx_llama); + llama_free_model(ctx_llava->model); + llama_backend_free(); +} + +#ifndef NDEBUG + +static void debug_test_mrope_2d() { + // 1. Initialize backend + ggml_backend_t backend = NULL; + std::string backend_name = ""; +#ifdef GGML_USE_CUDA + fprintf(stderr, "%s: using CUDA backend\n", __func__); + backend = ggml_backend_cuda_init(0); // init device 0 + backend_name = "cuda"; + if (!backend) { + fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); + } +#endif + // if there aren't GPU Backends fallback to CPU backend + if (!backend) { + backend = ggml_backend_cpu_init(); + backend_name = "cpu"; + } + + // Calculate the size needed to allocate + size_t ctx_size = 0; + ctx_size += 2 * ggml_tensor_overhead(); // tensors + // no need to allocate anything else! + + // 2. Allocate `ggml_context` to store tensor data + struct ggml_init_params params = { + /*.mem_size =*/ ctx_size, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_backend_alloc_ctx_tensors() + }; + struct ggml_context * ctx = ggml_init(params); + + struct ggml_tensor * inp_raw = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 128, 12, 30); + ggml_set_name(inp_raw, "inp_raw"); + ggml_set_input(inp_raw); + + struct ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 30 * 4); + ggml_set_name(pos, "pos"); + ggml_set_input(pos); + + std::vector dummy_q; + dummy_q.resize(128 * 12 * 30); + std::fill(dummy_q.begin(), dummy_q.end(), 0.1); + // memcpy(inp_raw->data, dummy_q.data(), 128 * 12 * 30 * ggml_element_size(inp_raw)); + + std::vector pos_id; + pos_id.resize(30 * 4); + for (int i = 0; i < 30; i ++) { + pos_id[i] = i; + pos_id[i + 30] = i + 10; + pos_id[i + 60] = i + 20; + pos_id[i + 90] = i + 30; + } + int sections[4] = {32, 32, 0, 0}; + + // 4. Allocate a `ggml_backend_buffer` to store all tensors + ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend); + + // 5. Copy tensor data from main memory (RAM) to backend buffer + ggml_backend_tensor_set(inp_raw, dummy_q.data(), 0, ggml_nbytes(inp_raw)); + ggml_backend_tensor_set(pos, pos_id.data(), 0, ggml_nbytes(pos)); + + // 6. Create a `ggml_cgraph` for mul_mat operation + struct ggml_cgraph * gf = NULL; + struct ggml_context * ctx_cgraph = NULL; + + // create a temporally context to build the graph + struct ggml_init_params params0 = { + /*.mem_size =*/ ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_gallocr_alloc_graph() + }; + ctx_cgraph = ggml_init(params0); + gf = ggml_new_graph(ctx_cgraph); + + struct ggml_tensor * result0 = ggml_rope_multi( + ctx_cgraph, inp_raw, pos, nullptr, + 128/2, sections, LLAMA_ROPE_TYPE_VISION, 32768, 1000000, 1, + 0, 1, 32, 1); + + // Add "result" tensor and all of its dependencies to the cgraph + ggml_build_forward_expand(gf, result0); + + // 7. Create a `ggml_gallocr` for cgraph computation + ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend)); + ggml_gallocr_alloc_graph(allocr, gf); + + // 9. Run the computation + int n_threads = 1; // Optional: number of threads to perform some operations with multi-threading + if (ggml_backend_is_cpu(backend)) { + ggml_backend_cpu_set_n_threads(backend, n_threads); + } + ggml_backend_graph_compute(backend, gf); + + // 10. Retrieve results (output tensors) + // in this example, output tensor is always the last tensor in the graph + struct ggml_tensor * result = result0; + // struct ggml_tensor * result = gf->nodes[gf->n_nodes - 1]; + float * result_data = (float *)malloc(ggml_nbytes(result)); + // because the tensor data is stored in device buffer, we need to copy it back to RAM + ggml_backend_tensor_get(result, result_data, 0, ggml_nbytes(result)); + const std::string bin_file = "mrope_2d_" + backend_name +".bin"; + std::ofstream outFile(bin_file, std::ios::binary); + + if (outFile.is_open()) { + outFile.write(reinterpret_cast(result_data), ggml_nbytes(result)); + outFile.close(); + std::cout << "Data successfully written to " + bin_file << std::endl; + } else { + std::cerr << "Error opening file!" << std::endl; + } + + free(result_data); + // 11. Free memory and exit + ggml_free(ctx_cgraph); + ggml_gallocr_free(allocr); + ggml_free(ctx); + ggml_backend_buffer_free(buffer); + ggml_backend_free(backend); +} + +static void debug_dump_img_embed(struct llava_context * ctx_llava) { + int n_embd = llama_n_embd(llama_get_model(ctx_llava->ctx_llama)); + int ne = n_embd * 4; + float vals[56 * 56 * 3]; + // float embd[ne]; + std::vector embd; + embd.resize(ne); + + for (int i = 0; i < 56*56; i++) + { + for (int c = 0; c < 3; c++) + vals[i * 3 + c] = (float)(i % (56 * 56)) / (56*56); + } + + clip_encode_float_image(ctx_llava->ctx_clip, 16, vals, 56, 56, embd.data()); + + std::ofstream outFile("img_embed.bin", std::ios::binary); + if (outFile.is_open()) { + outFile.write(reinterpret_cast(embd.data()), ne * sizeof(float)); + + outFile.close(); + std::cout << "Data successfully written to mrope.bin" << std::endl; + } else { + std::cerr << "Error opening file!" << std::endl; + } +} + +#endif + + +int main(int argc, char ** argv) { + ggml_time_init(); + + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) { + return 1; + } + + common_init(); + + if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { + print_usage(argc, argv); + return 1; + } + + auto * model = llava_init(¶ms); + if (model == NULL) { + fprintf(stderr, "%s: error: failed to init llava model\n", __func__); + return 1; + } + + if (prompt_contains_image(params.prompt)) { + auto * ctx_llava = llava_init_context(¶ms, model); + + auto * image_embed = load_image(ctx_llava, ¶ms, ""); + + // process the prompt + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); + + llama_perf_context_print(ctx_llava->ctx_llama); + llava_image_embed_free(image_embed); + ctx_llava->model = NULL; + llava_free(ctx_llava); +#ifndef NDEBUG + } else if (params.image[0].empty()) { + auto ctx_llava = llava_init_context(¶ms, model); + + debug_test_mrope_2d(); + debug_dump_img_embed(ctx_llava); + + llama_perf_context_print(ctx_llava->ctx_llama); + ctx_llava->model = NULL; + llava_free(ctx_llava); +#endif + } else { + for (auto & image : params.image) { + auto * ctx_llava = llava_init_context(¶ms, model); + + auto * image_embed = load_image(ctx_llava, ¶ms, image); + if (!image_embed) { + LOG_ERR("%s: failed to load image %s. Terminating\n\n", __func__, image.c_str()); + return 1; + } + + // process the prompt + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); + + llama_perf_context_print(ctx_llava->ctx_llama); + llava_image_embed_free(image_embed); + ctx_llava->model = NULL; + llava_free(ctx_llava); + } + } + + llama_free_model(model); + + return 0; +} diff --git a/examples/lookahead/CMakeLists.txt b/examples/lookahead/CMakeLists.txt index f0ae5cd89244c..3468613142de0 100644 --- a/examples/lookahead/CMakeLists.txt +++ b/examples/lookahead/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-lookahead) add_executable(${TARGET} lookahead.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index 3c0ccfea2ccd7..8d0ef8b3d75e6 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -115,7 +115,7 @@ int main(int argc, char ** argv) { llama_batch batch = llama_batch_init(params.n_ctx, 0, W + G + 1); // target model sampling context - struct common_sampler * smpl = common_sampler_init(model, params.sparams); + struct common_sampler * smpl = common_sampler_init(model, params.sampling); // verification n-grams std::vector ngrams_cur(G); diff --git a/examples/lookup/CMakeLists.txt b/examples/lookup/CMakeLists.txt index ef19fe25e31a3..fba78ceda6fd7 100644 --- a/examples/lookup/CMakeLists.txt +++ b/examples/lookup/CMakeLists.txt @@ -2,22 +2,22 @@ set(TARGET llama-lookup) add_executable(${TARGET} lookup.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) set(TARGET llama-lookup-create) add_executable(${TARGET} lookup-create.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) set(TARGET llama-lookup-merge) add_executable(${TARGET} lookup-merge.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) set(TARGET llama-lookup-stats) add_executable(${TARGET} lookup-stats.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/lookup/lookup-stats.cpp b/examples/lookup/lookup-stats.cpp index 7faebe7ba11fc..dff07c075c47f 100644 --- a/examples/lookup/lookup-stats.cpp +++ b/examples/lookup/lookup-stats.cpp @@ -21,7 +21,7 @@ int main(int argc, char ** argv){ common_init(); - const int n_draft = params.n_draft; + const int n_draft = params.speculative.n_max; // init llama.cpp llama_backend_init(); @@ -40,6 +40,7 @@ int main(int argc, char ** argv){ common_ngram_cache ngram_cache_context; common_ngram_cache ngram_cache_dynamic; common_ngram_cache ngram_cache_static; + int64_t t_draft_flat_us = 0; int64_t t_draft_us = 0; diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index a04728b1834cc..4d92bb2385358 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -22,7 +22,7 @@ int main(int argc, char ** argv){ common_init(); // max. number of additional tokens to draft if match is found - const int n_draft = params.n_draft; + const int n_draft = params.speculative.n_max; const bool dump_kv_cache = params.dump_kv_cache; @@ -102,7 +102,7 @@ int main(int argc, char ** argv){ bool has_eos = false; - struct common_sampler * smpl = common_sampler_init(model, params.sparams); + struct common_sampler * smpl = common_sampler_init(model, params.sampling); std::vector draft; diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/examples/main-cmake-pkg/CMakeLists.txt index 3b38db292320f..5563f4de0a504 100644 --- a/examples/main-cmake-pkg/CMakeLists.txt +++ b/examples/main-cmake-pkg/CMakeLists.txt @@ -29,4 +29,4 @@ add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../main/main.cpp) target_include_directories(${TARGET} PRIVATE ${_common_path}) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/main/CMakeLists.txt b/examples/main/CMakeLists.txt index 5f6efaa9aa94b..af3d9150f8640 100644 --- a/examples/main/CMakeLists.txt +++ b/examples/main/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-cli) add_executable(${TARGET} main.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/main/README.md b/examples/main/README.md index 145216938fdb7..17d80a622a8bb 100644 --- a/examples/main/README.md +++ b/examples/main/README.md @@ -66,7 +66,7 @@ In this section, we cover the most commonly used options for running the `llama- - `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g [https://huggingface.co/ggml-org/gemma-1.1-7b-it-Q4_K_M-GGUF/resolve/main/gemma-1.1-7b-it.Q4_K_M.gguf?download=true](https://huggingface.co/ggml-org/gemma-1.1-7b-it-Q4_K_M-GGUF/resolve/main/gemma-1.1-7b-it.Q4_K_M.gguf?download=true)). - `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses. - `-n N, --n-predict N`: Set the number of tokens to predict when generating text. Adjusting this value can influence the length of the generated text. -- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. +- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 4096, but if a LLaMA model was built with a longer context, increasing this value will provide better results for longer input/inference. - `-mli, --multiline-input`: Allows you to write or paste multiple lines without ending each in '\' - `-t N, --threads N`: Set the number of threads to use during generation. For optimal performance, it is recommended to set this value to the number of physical CPU cores your system has. - `-ngl N, --n-gpu-layers N`: When compiled with GPU support, this option allows offloading some layers to the GPU for computation. Generally results in increased performance. @@ -131,7 +131,7 @@ During text generation, LLaMA models have a limited context size, which means th ### Context Size -- `-c N, --ctx-size N`: Set the size of the prompt context (default: 0, 0 = loaded from model). The LLaMA models were built with a context of 2048-8192, which will yield the best results on longer input/inference. +- `-c N, --ctx-size N`: Set the size of the prompt context (default: 4096, 0 = loaded from model). If a LLaMA model was built with a longer context, increasing this value will yield the best results on longer input/inference. ### Extended Context Size @@ -177,16 +177,11 @@ Example usage: `--temp 0` - `--repeat-penalty N`: Control the repetition of token sequences in the generated text default: 1.0, 1.0 = disabled). - `--repeat-last-n N`: Last n tokens to consider for penalizing repetition (default: 64, 0 = disabled, -1 = ctx-size). -- `--no-penalize-nl`: Disable penalization for newline tokens when applying the repeat penalty. The `repeat-penalty` option helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. The default value is 1. The `repeat-last-n` option controls the number of tokens in the history to consider for penalizing repetition. A larger value will look further back in the generated text to prevent repetitions, while a smaller value will only consider recent tokens. A value of 0 disables the penalty, and a value of -1 sets the number of tokens considered equal to the context size (`ctx-size`). -Use the `--no-penalize-nl` option to disable newline penalization when applying the repeat penalty. This option is particularly useful for generating chat conversations, dialogues, code, poetry, or any text where newline tokens play a significant role in structure and formatting. Disabling newline penalization helps maintain the natural flow and intended formatting in these specific use cases. - -Example usage: `--repeat-penalty 1.15 --repeat-last-n 128 --no-penalize-nl` - ### DRY Repetition Penalty DRY (Don't Repeat Yourself) sampling is an effective technique for reducing repetition in generated text even across long contexts by penalizing tokens based on their recent usage patterns (original [PR link](https://github.com/oobabooga/text-generation-webui/pull/5677)). @@ -348,6 +343,7 @@ These options provide extra functionality and customization when running the LLa - `-h, --help`: Display a help message showing all available options and their default values. This is particularly useful for checking the latest options and default values, as they can change frequently, and the information in this document may become outdated. - `--verbose-prompt`: Print the prompt before generating text. +- `--no-display-prompt`: Don't print prompt at generation. - `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. - `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. - `-hfr URL --hf-repo URL`: The url to the Hugging Face model repository. Used in conjunction with `--hf-file` or `-hff`. The model is downloaded and stored in the file provided by `-m` or `--model`. If `-m` is not provided, the model is auto-stored in the path specified by the `LLAMA_CACHE` environment variable or in an OS-specific local cache. diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 7c4ce4be2abae..d0c28f317b8c5 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -100,7 +100,7 @@ int main(int argc, char ** argv) { common_init(); - auto & sparams = params.sparams; + auto & sparams = params.sampling; // save choice to use color for later // (note for later: this is a slightly awkward choice) @@ -165,6 +165,10 @@ int main(int argc, char ** argv) { LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads); + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); + auto * ggml_threadpool_new_fn = (decltype(ggml_threadpool_new) *) ggml_backend_reg_get_proc_address(reg, "ggml_threadpool_new"); + auto * ggml_threadpool_free_fn = (decltype(ggml_threadpool_free) *) ggml_backend_reg_get_proc_address(reg, "ggml_threadpool_free"); + struct ggml_threadpool_params tpp_batch = ggml_threadpool_params_from_cpu_params(params.cpuparams_batch); struct ggml_threadpool_params tpp = @@ -174,7 +178,7 @@ int main(int argc, char ** argv) { struct ggml_threadpool * threadpool_batch = NULL; if (!ggml_threadpool_params_match(&tpp, &tpp_batch)) { - threadpool_batch = ggml_threadpool_new(&tpp_batch); + threadpool_batch = ggml_threadpool_new_fn(&tpp_batch); if (!threadpool_batch) { LOG_ERR("%s: batch threadpool create failed : n_threads %d\n", __func__, tpp_batch.n_threads); return 1; @@ -184,7 +188,7 @@ int main(int argc, char ** argv) { tpp.paused = true; } - struct ggml_threadpool * threadpool = ggml_threadpool_new(&tpp); + struct ggml_threadpool * threadpool = ggml_threadpool_new_fn(&tpp); if (!threadpool) { LOG_ERR("%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads); return 1; @@ -890,8 +894,8 @@ int main(int argc, char ** argv) { llama_backend_free(); - ggml_threadpool_free(threadpool); - ggml_threadpool_free(threadpool_batch); + ggml_threadpool_free_fn(threadpool); + ggml_threadpool_free_fn(threadpool_batch); return 0; } diff --git a/examples/parallel/CMakeLists.txt b/examples/parallel/CMakeLists.txt index c13557bac2bac..847e916de6ed8 100644 --- a/examples/parallel/CMakeLists.txt +++ b/examples/parallel/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-parallel) add_executable(${TARGET} parallel.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index 43c8f3ed56ba9..fd2b1c0112838 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -160,7 +160,7 @@ int main(int argc, char ** argv) { for (size_t i = 0; i < clients.size(); ++i) { auto & client = clients[i]; client.id = i; - client.smpl = common_sampler_init(model, params.sparams); + client.smpl = common_sampler_init(model, params.sampling); } std::vector tokens_system; diff --git a/examples/passkey/CMakeLists.txt b/examples/passkey/CMakeLists.txt index dc467a5d3e411..9bc5110c29309 100644 --- a/examples/passkey/CMakeLists.txt +++ b/examples/passkey/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-passkey) add_executable(${TARGET} passkey.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/perplexity/CMakeLists.txt b/examples/perplexity/CMakeLists.txt index be0f2fd029e67..3e68640933afb 100644 --- a/examples/perplexity/CMakeLists.txt +++ b/examples/perplexity/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-perplexity) add_executable(${TARGET} perplexity.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/quantize-stats/CMakeLists.txt b/examples/quantize-stats/CMakeLists.txt index bb986a716883d..9a3a0d3cd2dee 100644 --- a/examples/quantize-stats/CMakeLists.txt +++ b/examples/quantize-stats/CMakeLists.txt @@ -3,4 +3,4 @@ add_executable(${TARGET} quantize-stats.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${TARGET} PRIVATE ../../common) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/quantize/CMakeLists.txt b/examples/quantize/CMakeLists.txt index 62680cda4455f..47e5cbe30cfe3 100644 --- a/examples/quantize/CMakeLists.txt +++ b/examples/quantize/CMakeLists.txt @@ -3,4 +3,4 @@ add_executable(${TARGET} quantize.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_include_directories(${TARGET} PRIVATE ../../common) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/quantize/README.md b/examples/quantize/README.md index 704f0d56bea72..f9cce7b213334 100644 --- a/examples/quantize/README.md +++ b/examples/quantize/README.md @@ -54,8 +54,6 @@ As the models are currently fully loaded into memory, you will need adequate dis Several quantization methods are supported. They differ in the resulting model disk size and inference speed. -The quantization formats `Q4_0_4_4`, `Q4_0_4_8` and `Q4_0_8_8` are block interleaved variants of the `Q4_0` format, providing a data layout that is better suited for specific implementations of optimized mulmat kernels. Since these formats differ only in data layout, they have the same quantized size as the `Q4_0` format. - *(outdated)* | Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 | @@ -83,7 +81,7 @@ The quantization formats `Q4_0_4_4`, `Q4_0_4_8` and `Q4_0_8_8` are block interle - [#4930 - imatrix for all k-quants](https://github.com/ggerganov/llama.cpp/pull/4930) - [#4951 - imatrix on the GPU](https://github.com/ggerganov/llama.cpp/pull/4957) - [#4969 - imatrix for legacy quants](https://github.com/ggerganov/llama.cpp/pull/4969) - - [#4996 - k-qunats tuning](https://github.com/ggerganov/llama.cpp/pull/4996) + - [#4996 - k-quants tuning](https://github.com/ggerganov/llama.cpp/pull/4996) - [#5060 - Q3_K_XS](https://github.com/ggerganov/llama.cpp/pull/5060) - [#5196 - 3-bit i-quants](https://github.com/ggerganov/llama.cpp/pull/5196) - [quantization tuning](https://github.com/ggerganov/llama.cpp/pull/5320), [another one](https://github.com/ggerganov/llama.cpp/pull/5334), and [another one](https://github.com/ggerganov/llama.cpp/pull/5361) diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index b989932107dba..8d47b17b6bce7 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -48,9 +48,6 @@ static const std::vector QUANT_OPTIONS = { { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", }, { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", }, { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", }, - { "Q4_0_4_4", LLAMA_FTYPE_MOSTLY_Q4_0_4_4, " 4.34G, +0.4685 ppl @ Llama-3-8B", }, - { "Q4_0_4_8", LLAMA_FTYPE_MOSTLY_Q4_0_4_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", }, - { "Q4_0_8_8", LLAMA_FTYPE_MOSTLY_Q4_0_8_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", }, { "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", }, { "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", }, { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", }, diff --git a/examples/retrieval/CMakeLists.txt b/examples/retrieval/CMakeLists.txt index 66610f3111405..512a602ec045c 100644 --- a/examples/retrieval/CMakeLists.txt +++ b/examples/retrieval/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-retrieval) add_executable(${TARGET} retrieval.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp index 1768aae510067..a5c6fe7e58523 100644 --- a/examples/retrieval/retrieval.cpp +++ b/examples/retrieval/retrieval.cpp @@ -107,7 +107,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu } float * out = output + batch.seq_id[i][0] * n_embd; - common_embd_normalize(embd, out, n_embd); + common_embd_normalize(embd, out, n_embd, 2); } } @@ -143,7 +143,7 @@ int main(int argc, char ** argv) { std::vector file_chunk = chunk_file(context_file, params.chunk_size, params.chunk_separator); chunks.insert(chunks.end(), file_chunk.begin(), file_chunk.end()); } - LOG_INF("Number of chunks: %ld\n", chunks.size()); + LOG_INF("Number of chunks: %zu\n", chunks.size()); llama_backend_init(); llama_numa_init(params.numa); @@ -282,8 +282,8 @@ int main(int argc, char ** argv) { return a.second > b.second; }); - LOG("Top %d similar chunks:\n", params.sparams.top_k); - for (int i = 0; i < std::min(params.sparams.top_k, (int) chunks.size()); i++) { + LOG("Top %d similar chunks:\n", params.sampling.top_k); + for (int i = 0; i < std::min(params.sampling.top_k, (int) chunks.size()); i++) { LOG("filename: %s\n", chunks[similarities[i].first].filename.c_str()); LOG("filepos: %lld\n", (long long int) chunks[similarities[i].first].filepos); LOG("similarity: %f\n", similarities[i].second); diff --git a/examples/run/CMakeLists.txt b/examples/run/CMakeLists.txt new file mode 100644 index 0000000000000..0686d6305701f --- /dev/null +++ b/examples/run/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET llama-run) +add_executable(${TARGET} run.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/run/README.md b/examples/run/README.md new file mode 100644 index 0000000000000..874293516f4b6 --- /dev/null +++ b/examples/run/README.md @@ -0,0 +1,49 @@ +# llama.cpp/example/run + +The purpose of this example is to demonstrate a minimal usage of llama.cpp for running models. + +```bash +llama-run granite-code +``` + +```bash +llama-run -h +Description: + Runs a llm + +Usage: + llama-run [options] model [prompt] + +Options: + -c, --context-size + Context size (default: 2048) + -n, --ngl + Number of GPU layers (default: 0) + -v, --verbose, --log-verbose + Set verbosity level to infinity (i.e. log all messages, useful for debugging) + -h, --help + Show help message + +Commands: + model + Model is a string with an optional prefix of + huggingface:// (hf://), ollama://, https:// or file://. + If no protocol is specified and a file exists in the specified + path, file:// is assumed, otherwise if a file does not exist in + the specified path, ollama:// is assumed. Models that are being + pulled are downloaded with .partial extension while being + downloaded and then renamed as the file without the .partial + extension when complete. + +Examples: + llama-run llama3 + llama-run ollama://granite-code + llama-run ollama://smollm:135m + llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf + llama-run huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf + llama-run https://example.com/some-file1.gguf + llama-run some-file2.gguf + llama-run file://some-file3.gguf + llama-run --ngl 999 some-file4.gguf + llama-run --ngl 999 some-file5.gguf Hello World +``` diff --git a/examples/run/run.cpp b/examples/run/run.cpp new file mode 100644 index 0000000000000..03da54ca3b2ef --- /dev/null +++ b/examples/run/run.cpp @@ -0,0 +1,911 @@ +#if defined(_WIN32) +# include +#else +# include +# include +# include +#endif + +#if defined(LLAMA_USE_CURL) +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "json.hpp" +#include "llama-cpp.h" + +GGML_ATTRIBUTE_FORMAT(1, 2) +static std::string fmt(const char * fmt, ...) { + va_list ap; + va_list ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + const int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT + std::string buf; + buf.resize(size); + const int size2 = vsnprintf(const_cast(buf.data()), buf.size() + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + + return buf; +} + +GGML_ATTRIBUTE_FORMAT(1, 2) +static int printe(const char * fmt, ...) { + va_list args; + va_start(args, fmt); + const int ret = vfprintf(stderr, fmt, args); + va_end(args); + + return ret; +} + +class Opt { + public: + int init(int argc, const char ** argv) { + // Parse arguments + if (parse(argc, argv)) { + printe("Error: Failed to parse arguments.\n"); + help(); + return 1; + } + + // If help is requested, show help and exit + if (help_) { + help(); + return 2; + } + + return 0; // Success + } + + std::string model_; + std::string user_; + int context_size_ = -1, ngl_ = -1; + bool verbose_ = false; + + private: + bool help_ = false; + + bool parse_flag(const char ** argv, int i, const char * short_opt, const char * long_opt) { + return strcmp(argv[i], short_opt) == 0 || strcmp(argv[i], long_opt) == 0; + } + + int handle_option_with_value(int argc, const char ** argv, int & i, int & option_value) { + if (i + 1 >= argc) { + return 1; + } + + option_value = std::atoi(argv[++i]); + return 0; + } + + int parse(int argc, const char ** argv) { + bool options_parsing = true; + for (int i = 1, positional_args_i = 0; i < argc; ++i) { + if (options_parsing && (strcmp(argv[i], "-c") == 0 || strcmp(argv[i], "--context-size") == 0)) { + if (handle_option_with_value(argc, argv, i, context_size_) == 1) { + return 1; + } + } else if (options_parsing && (strcmp(argv[i], "-n") == 0 || strcmp(argv[i], "--ngl") == 0)) { + if (handle_option_with_value(argc, argv, i, ngl_) == 1) { + return 1; + } + } else if (options_parsing && + (parse_flag(argv, i, "-v", "--verbose") || parse_flag(argv, i, "-v", "--log-verbose"))) { + verbose_ = true; + } else if (options_parsing && parse_flag(argv, i, "-h", "--help")) { + help_ = true; + return 0; + } else if (options_parsing && strcmp(argv[i], "--") == 0) { + options_parsing = false; + } else if (positional_args_i == 0) { + if (!argv[i][0] || argv[i][0] == '-') { + return 1; + } + + ++positional_args_i; + model_ = argv[i]; + } else if (positional_args_i == 1) { + ++positional_args_i; + user_ = argv[i]; + } else { + user_ += " " + std::string(argv[i]); + } + } + + return 0; + } + + void help() const { + printf( + "Description:\n" + " Runs a llm\n" + "\n" + "Usage:\n" + " llama-run [options] model [prompt]\n" + "\n" + "Options:\n" + " -c, --context-size \n" + " Context size (default: %d)\n" + " -n, --ngl \n" + " Number of GPU layers (default: %d)\n" + " -v, --verbose, --log-verbose\n" + " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n" + " -h, --help\n" + " Show help message\n" + "\n" + "Commands:\n" + " model\n" + " Model is a string with an optional prefix of \n" + " huggingface:// (hf://), ollama://, https:// or file://.\n" + " If no protocol is specified and a file exists in the specified\n" + " path, file:// is assumed, otherwise if a file does not exist in\n" + " the specified path, ollama:// is assumed. Models that are being\n" + " pulled are downloaded with .partial extension while being\n" + " downloaded and then renamed as the file without the .partial\n" + " extension when complete.\n" + "\n" + "Examples:\n" + " llama-run llama3\n" + " llama-run ollama://granite-code\n" + " llama-run ollama://smollm:135m\n" + " llama-run hf://QuantFactory/SmolLM-135M-GGUF/SmolLM-135M.Q2_K.gguf\n" + " llama-run " + "huggingface://bartowski/SmolLM-1.7B-Instruct-v0.2-GGUF/SmolLM-1.7B-Instruct-v0.2-IQ3_M.gguf\n" + " llama-run https://example.com/some-file1.gguf\n" + " llama-run some-file2.gguf\n" + " llama-run file://some-file3.gguf\n" + " llama-run --ngl 999 some-file4.gguf\n" + " llama-run --ngl 999 some-file5.gguf Hello World\n", + llama_context_default_params().n_batch, llama_model_default_params().n_gpu_layers); + } +}; + +struct progress_data { + size_t file_size = 0; + std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now(); + bool printed = false; +}; + +static int get_terminal_width() { +#if defined(_WIN32) + CONSOLE_SCREEN_BUFFER_INFO csbi; + GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &csbi); + return csbi.srWindow.Right - csbi.srWindow.Left + 1; +#else + struct winsize w; + ioctl(STDOUT_FILENO, TIOCGWINSZ, &w); + return w.ws_col; +#endif +} + +#ifdef LLAMA_USE_CURL +class File { + public: + FILE * file = nullptr; + + FILE * open(const std::string & filename, const char * mode) { + file = fopen(filename.c_str(), mode); + + return file; + } + + int lock() { + if (file) { +# ifdef _WIN32 + fd = _fileno(file); + hFile = (HANDLE) _get_osfhandle(fd); + if (hFile == INVALID_HANDLE_VALUE) { + fd = -1; + + return 1; + } + + OVERLAPPED overlapped = { 0 }; + if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD, + &overlapped)) { + fd = -1; + + return 1; + } +# else + fd = fileno(file); + if (flock(fd, LOCK_EX | LOCK_NB) != 0) { + fd = -1; + + return 1; + } +# endif + } + + return 0; + } + + ~File() { + if (fd >= 0) { +# ifdef _WIN32 + if (hFile != INVALID_HANDLE_VALUE) { + OVERLAPPED overlapped = { 0 }; + UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped); + } +# else + flock(fd, LOCK_UN); +# endif + } + + if (file) { + fclose(file); + } + } + + private: + int fd = -1; +# ifdef _WIN32 + HANDLE hFile; +# endif +}; + +class HttpClient { + public: + int init(const std::string & url, const std::vector & headers, const std::string & output_file, + const bool progress, std::string * response_str = nullptr) { + std::string output_file_partial; + curl = curl_easy_init(); + if (!curl) { + return 1; + } + + progress_data data; + File out; + if (!output_file.empty()) { + output_file_partial = output_file + ".partial"; + if (!out.open(output_file_partial, "ab")) { + printe("Failed to open file\n"); + + return 1; + } + + if (out.lock()) { + printe("Failed to exclusively lock file\n"); + + return 1; + } + } + + set_write_options(response_str, out); + data.file_size = set_resume_point(output_file_partial); + set_progress_options(progress, data); + set_headers(headers); + perform(url); + if (!output_file.empty()) { + std::filesystem::rename(output_file_partial, output_file); + } + + return 0; + } + + ~HttpClient() { + if (chunk) { + curl_slist_free_all(chunk); + } + + if (curl) { + curl_easy_cleanup(curl); + } + } + + private: + CURL * curl = nullptr; + struct curl_slist * chunk = nullptr; + + void set_write_options(std::string * response_str, const File & out) { + if (response_str) { + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, capture_data); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, response_str); + } else { + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, out.file); + } + } + + size_t set_resume_point(const std::string & output_file) { + size_t file_size = 0; + if (std::filesystem::exists(output_file)) { + file_size = std::filesystem::file_size(output_file); + curl_easy_setopt(curl, CURLOPT_RESUME_FROM_LARGE, static_cast(file_size)); + } + + return file_size; + } + + void set_progress_options(bool progress, progress_data & data) { + if (progress) { + curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L); + curl_easy_setopt(curl, CURLOPT_XFERINFODATA, &data); + curl_easy_setopt(curl, CURLOPT_XFERINFOFUNCTION, update_progress); + } + } + + void set_headers(const std::vector & headers) { + if (!headers.empty()) { + if (chunk) { + curl_slist_free_all(chunk); + chunk = 0; + } + + for (const auto & header : headers) { + chunk = curl_slist_append(chunk, header.c_str()); + } + + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk); + } + } + + void perform(const std::string & url) { + CURLcode res; + curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); + curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https"); + curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L); + res = curl_easy_perform(curl); + if (res != CURLE_OK) { + printe("curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); + } + } + + static std::string human_readable_time(double seconds) { + int hrs = static_cast(seconds) / 3600; + int mins = (static_cast(seconds) % 3600) / 60; + int secs = static_cast(seconds) % 60; + + if (hrs > 0) { + return fmt("%dh %02dm %02ds", hrs, mins, secs); + } else if (mins > 0) { + return fmt("%dm %02ds", mins, secs); + } else { + return fmt("%ds", secs); + } + } + + static std::string human_readable_size(curl_off_t size) { + static const char * suffix[] = { "B", "KB", "MB", "GB", "TB" }; + char length = sizeof(suffix) / sizeof(suffix[0]); + int i = 0; + double dbl_size = size; + if (size > 1024) { + for (i = 0; (size / 1024) > 0 && i < length - 1; i++, size /= 1024) { + dbl_size = size / 1024.0; + } + } + + return fmt("%.2f %s", dbl_size, suffix[i]); + } + + static int update_progress(void * ptr, curl_off_t total_to_download, curl_off_t now_downloaded, curl_off_t, + curl_off_t) { + progress_data * data = static_cast(ptr); + if (total_to_download <= 0) { + return 0; + } + + total_to_download += data->file_size; + const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size; + const curl_off_t percentage = calculate_percentage(now_downloaded_plus_file_size, total_to_download); + std::string progress_prefix = generate_progress_prefix(percentage); + + const double speed = calculate_speed(now_downloaded, data->start_time); + const double tim = (total_to_download - now_downloaded) / speed; + std::string progress_suffix = + generate_progress_suffix(now_downloaded_plus_file_size, total_to_download, speed, tim); + + int progress_bar_width = calculate_progress_bar_width(progress_prefix, progress_suffix); + std::string progress_bar; + generate_progress_bar(progress_bar_width, percentage, progress_bar); + + print_progress(progress_prefix, progress_bar, progress_suffix); + data->printed = true; + + return 0; + } + + static curl_off_t calculate_percentage(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) { + return (now_downloaded_plus_file_size * 100) / total_to_download; + } + + static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", percentage); } + + static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) { + const auto now = std::chrono::steady_clock::now(); + const std::chrono::duration elapsed_seconds = now - start_time; + return now_downloaded / elapsed_seconds.count(); + } + + static std::string generate_progress_suffix(curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download, + double speed, double estimated_time) { + const int width = 10; + return fmt("%*s/%*s%*s/s%*s", width, human_readable_size(now_downloaded_plus_file_size).c_str(), width, + human_readable_size(total_to_download).c_str(), width, human_readable_size(speed).c_str(), width, + human_readable_time(estimated_time).c_str()); + } + + static int calculate_progress_bar_width(const std::string & progress_prefix, const std::string & progress_suffix) { + int progress_bar_width = get_terminal_width() - progress_prefix.size() - progress_suffix.size() - 3; + if (progress_bar_width < 1) { + progress_bar_width = 1; + } + + return progress_bar_width; + } + + static std::string generate_progress_bar(int progress_bar_width, curl_off_t percentage, + std::string & progress_bar) { + const curl_off_t pos = (percentage * progress_bar_width) / 100; + for (int i = 0; i < progress_bar_width; ++i) { + progress_bar.append((i < pos) ? "ā–ˆ" : " "); + } + + return progress_bar; + } + + static void print_progress(const std::string & progress_prefix, const std::string & progress_bar, + const std::string & progress_suffix) { + printe("\r%*s\r%s%s| %s", get_terminal_width(), " ", progress_prefix.c_str(), progress_bar.c_str(), + progress_suffix.c_str()); + } + // Function to write data to a file + static size_t write_data(void * ptr, size_t size, size_t nmemb, void * stream) { + FILE * out = static_cast(stream); + return fwrite(ptr, size, nmemb, out); + } + + // Function to capture data into a string + static size_t capture_data(void * ptr, size_t size, size_t nmemb, void * stream) { + std::string * str = static_cast(stream); + str->append(static_cast(ptr), size * nmemb); + return size * nmemb; + } +}; +#endif + +class LlamaData { + public: + llama_model_ptr model; + llama_sampler_ptr sampler; + llama_context_ptr context; + std::vector messages; + std::vector msg_strs; + std::vector fmtted; + + int init(Opt & opt) { + model = initialize_model(opt); + if (!model) { + return 1; + } + + context = initialize_context(model, opt.context_size_); + if (!context) { + return 1; + } + + sampler = initialize_sampler(); + return 0; + } + + private: +#ifdef LLAMA_USE_CURL + int download(const std::string & url, const std::vector & headers, const std::string & output_file, + const bool progress, std::string * response_str = nullptr) { + HttpClient http; + if (http.init(url, headers, output_file, progress, response_str)) { + return 1; + } + + return 0; + } +#else + int download(const std::string &, const std::vector &, const std::string &, const bool, + std::string * = nullptr) { + printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__); + return 1; + } +#endif + + int huggingface_dl(const std::string & model, const std::vector headers, const std::string & bn) { + // Find the second occurrence of '/' after protocol string + size_t pos = model.find('/'); + pos = model.find('/', pos + 1); + if (pos == std::string::npos) { + return 1; + } + + const std::string hfr = model.substr(0, pos); + const std::string hff = model.substr(pos + 1); + const std::string url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff; + return download(url, headers, bn, true); + } + + int ollama_dl(std::string & model, const std::vector headers, const std::string & bn) { + if (model.find('/') == std::string::npos) { + model = "library/" + model; + } + + std::string model_tag = "latest"; + size_t colon_pos = model.find(':'); + if (colon_pos != std::string::npos) { + model_tag = model.substr(colon_pos + 1); + model = model.substr(0, colon_pos); + } + + std::string manifest_url = "https://registry.ollama.ai/v2/" + model + "/manifests/" + model_tag; + std::string manifest_str; + const int ret = download(manifest_url, headers, "", false, &manifest_str); + if (ret) { + return ret; + } + + nlohmann::json manifest = nlohmann::json::parse(manifest_str); + std::string layer; + for (const auto & l : manifest["layers"]) { + if (l["mediaType"] == "application/vnd.ollama.image.model") { + layer = l["digest"]; + break; + } + } + + std::string blob_url = "https://registry.ollama.ai/v2/" + model + "/blobs/" + layer; + return download(blob_url, headers, bn, true); + } + + std::string basename(const std::string & path) { + const size_t pos = path.find_last_of("/\\"); + if (pos == std::string::npos) { + return path; + } + + return path.substr(pos + 1); + } + + int remove_proto(std::string & model_) { + const std::string::size_type pos = model_.find("://"); + if (pos == std::string::npos) { + return 1; + } + + model_ = model_.substr(pos + 3); // Skip past "://" + return 0; + } + + int resolve_model(std::string & model_) { + int ret = 0; + if (string_starts_with(model_, "file://") || std::filesystem::exists(model_)) { + remove_proto(model_); + + return ret; + } + + const std::string bn = basename(model_); + const std::vector headers = { "--header", + "Accept: application/vnd.docker.distribution.manifest.v2+json" }; + if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://")) { + remove_proto(model_); + ret = huggingface_dl(model_, headers, bn); + } else if (string_starts_with(model_, "ollama://")) { + remove_proto(model_); + ret = ollama_dl(model_, headers, bn); + } else if (string_starts_with(model_, "https://")) { + download(model_, headers, bn, true); + } else { + ret = ollama_dl(model_, headers, bn); + } + + model_ = bn; + + return ret; + } + + // Initializes the model and returns a unique pointer to it + llama_model_ptr initialize_model(Opt & opt) { + ggml_backend_load_all(); + llama_model_params model_params = llama_model_default_params(); + model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers; + resolve_model(opt.model_); + printe( + "\r%*s" + "\rLoading model", + get_terminal_width(), " "); + llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), model_params)); + if (!model) { + printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str()); + } + + printe("\r%*s\r", static_cast(sizeof("Loading model")), " "); + return model; + } + + // Initializes the context with the specified parameters + llama_context_ptr initialize_context(const llama_model_ptr & model, const int n_ctx) { + llama_context_params ctx_params = llama_context_default_params(); + ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch; + llama_context_ptr context(llama_new_context_with_model(model.get(), ctx_params)); + if (!context) { + printe("%s: error: failed to create the llama_context\n", __func__); + } + + return context; + } + + // Initializes and configures the sampler + llama_sampler_ptr initialize_sampler() { + llama_sampler_ptr sampler(llama_sampler_chain_init(llama_sampler_chain_default_params())); + llama_sampler_chain_add(sampler.get(), llama_sampler_init_min_p(0.05f, 1)); + llama_sampler_chain_add(sampler.get(), llama_sampler_init_temp(0.8f)); + llama_sampler_chain_add(sampler.get(), llama_sampler_init_dist(LLAMA_DEFAULT_SEED)); + + return sampler; + } +}; + +// Add a message to `messages` and store its content in `msg_strs` +static void add_message(const char * role, const std::string & text, LlamaData & llama_data) { + llama_data.msg_strs.push_back(std::move(text)); + llama_data.messages.push_back({ role, llama_data.msg_strs.back().c_str() }); +} + +// Function to apply the chat template and resize `formatted` if needed +static int apply_chat_template(LlamaData & llama_data, const bool append) { + int result = llama_chat_apply_template( + llama_data.model.get(), nullptr, llama_data.messages.data(), llama_data.messages.size(), append, + append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0); + if (append && result > static_cast(llama_data.fmtted.size())) { + llama_data.fmtted.resize(result); + result = llama_chat_apply_template(llama_data.model.get(), nullptr, llama_data.messages.data(), + llama_data.messages.size(), append, llama_data.fmtted.data(), + llama_data.fmtted.size()); + } + + return result; +} + +// Function to tokenize the prompt +static int tokenize_prompt(const llama_model_ptr & model, const std::string & prompt, + std::vector & prompt_tokens) { + const int n_prompt_tokens = -llama_tokenize(model.get(), prompt.c_str(), prompt.size(), NULL, 0, true, true); + prompt_tokens.resize(n_prompt_tokens); + if (llama_tokenize(model.get(), prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, + true) < 0) { + printe("failed to tokenize the prompt\n"); + return -1; + } + + return n_prompt_tokens; +} + +// Check if we have enough space in the context to evaluate this batch +static int check_context_size(const llama_context_ptr & ctx, const llama_batch & batch) { + const int n_ctx = llama_n_ctx(ctx.get()); + const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get()); + if (n_ctx_used + batch.n_tokens > n_ctx) { + printf("\033[0m\n"); + printe("context size exceeded\n"); + return 1; + } + + return 0; +} + +// convert the token to a string +static int convert_token_to_string(const llama_model_ptr & model, const llama_token token_id, std::string & piece) { + char buf[256]; + int n = llama_token_to_piece(model.get(), token_id, buf, sizeof(buf), 0, true); + if (n < 0) { + printe("failed to convert token to piece\n"); + return 1; + } + + piece = std::string(buf, n); + return 0; +} + +static void print_word_and_concatenate_to_response(const std::string & piece, std::string & response) { + printf("%s", piece.c_str()); + fflush(stdout); + response += piece; +} + +// helper function to evaluate a prompt and generate a response +static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) { + std::vector tokens; + if (tokenize_prompt(llama_data.model, prompt, tokens) < 0) { + return 1; + } + + // prepare a batch for the prompt + llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size()); + llama_token new_token_id; + while (true) { + check_context_size(llama_data.context, batch); + if (llama_decode(llama_data.context.get(), batch)) { + printe("failed to decode\n"); + return 1; + } + + // sample the next token, check is it an end of generation? + new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1); + if (llama_token_is_eog(llama_data.model.get(), new_token_id)) { + break; + } + + std::string piece; + if (convert_token_to_string(llama_data.model, new_token_id, piece)) { + return 1; + } + + print_word_and_concatenate_to_response(piece, response); + + // prepare the next batch with the sampled token + batch = llama_batch_get_one(&new_token_id, 1); + } + + return 0; +} + +static int read_user_input(std::string & user) { + std::getline(std::cin, user); + return user.empty(); // Should have data in happy path +} + +// Function to generate a response based on the prompt +static int generate_response(LlamaData & llama_data, const std::string & prompt, std::string & response, + const bool stdout_a_terminal) { + // Set response color + if (stdout_a_terminal) { + printf("\033[33m"); + } + + if (generate(llama_data, prompt, response)) { + printe("failed to generate response\n"); + return 1; + } + + // End response with color reset and newline + printf("\n%s", stdout_a_terminal ? "\033[0m" : ""); + return 0; +} + +// Helper function to apply the chat template and handle errors +static int apply_chat_template_with_error_handling(LlamaData & llama_data, const bool append, int & output_length) { + const int new_len = apply_chat_template(llama_data, append); + if (new_len < 0) { + printe("failed to apply the chat template\n"); + return -1; + } + + output_length = new_len; + return 0; +} + +// Helper function to handle user input +static int handle_user_input(std::string & user_input, const std::string & user_) { + if (!user_.empty()) { + user_input = user_; + return 0; // No need for interactive input + } + + printf( + "\r%*s" + "\r\033[32m> \033[0m", + get_terminal_width(), " "); + return read_user_input(user_input); // Returns true if input ends the loop +} + +static bool is_stdin_a_terminal() { +#if defined(_WIN32) + HANDLE hStdin = GetStdHandle(STD_INPUT_HANDLE); + DWORD mode; + return GetConsoleMode(hStdin, &mode); +#else + return isatty(STDIN_FILENO); +#endif +} + +static bool is_stdout_a_terminal() { +#if defined(_WIN32) + HANDLE hStdout = GetStdHandle(STD_OUTPUT_HANDLE); + DWORD mode; + return GetConsoleMode(hStdout, &mode); +#else + return isatty(STDOUT_FILENO); +#endif +} + +// Function to tokenize the prompt +static int chat_loop(LlamaData & llama_data, const std::string & user_) { + int prev_len = 0; + llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get())); + static const bool stdout_a_terminal = is_stdout_a_terminal(); + while (true) { + // Get user input + std::string user_input; + while (handle_user_input(user_input, user_)) { + } + + add_message("user", user_.empty() ? user_input : user_, llama_data); + int new_len; + if (apply_chat_template_with_error_handling(llama_data, true, new_len) < 0) { + return 1; + } + + std::string prompt(llama_data.fmtted.begin() + prev_len, llama_data.fmtted.begin() + new_len); + std::string response; + if (generate_response(llama_data, prompt, response, stdout_a_terminal)) { + return 1; + } + + if (!user_.empty()) { + break; + } + + add_message("assistant", response, llama_data); + if (apply_chat_template_with_error_handling(llama_data, false, prev_len) < 0) { + return 1; + } + } + + return 0; +} + +static void log_callback(const enum ggml_log_level level, const char * text, void * p) { + const Opt * opt = static_cast(p); + if (opt->verbose_ || level == GGML_LOG_LEVEL_ERROR) { + printe("%s", text); + } +} + +static std::string read_pipe_data() { + std::ostringstream result; + result << std::cin.rdbuf(); // Read all data from std::cin + return result.str(); +} + +int main(int argc, const char ** argv) { + Opt opt; + const int ret = opt.init(argc, argv); + if (ret == 2) { + return 0; + } else if (ret) { + return 1; + } + + if (!is_stdin_a_terminal()) { + if (!opt.user_.empty()) { + opt.user_ += "\n\n"; + } + + opt.user_ += read_pipe_data(); + } + + llama_log_set(log_callback, &opt); + LlamaData llama_data; + if (llama_data.init(opt)) { + return 1; + } + + if (chat_loop(llama_data, opt.user_)) { + return 1; + } + + return 0; +} diff --git a/examples/save-load-state/CMakeLists.txt b/examples/save-load-state/CMakeLists.txt index 0fb5e359bc9ad..0f50e50deecd7 100644 --- a/examples/save-load-state/CMakeLists.txt +++ b/examples/save-load-state/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-save-load-state) add_executable(${TARGET} save-load-state.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 8c49a52a66124..2f0cf9baa32b7 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -9,7 +9,7 @@ int main(int argc, char ** argv) { common_params params; params.prompt = "The quick brown fox"; - params.sparams.seed = 1234; + params.sampling.seed = 1234; if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { return 1; @@ -42,7 +42,7 @@ int main(int argc, char ** argv) { llama_sampler * smpl = llama_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sparams.seed)); + llama_sampler_chain_add(smpl, llama_sampler_init_dist(params.sampling.seed)); // tokenize prompt auto tokens = common_tokenize(ctx, params.prompt, true); @@ -106,7 +106,7 @@ int main(int argc, char ** argv) { llama_sampler * smpl2 = llama_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl2, llama_sampler_init_dist(params.sparams.seed)); + llama_sampler_chain_add(smpl2, llama_sampler_init_dist(params.sampling.seed)); printf("\nsecond run: %s", params.prompt.c_str()); @@ -169,7 +169,7 @@ int main(int argc, char ** argv) { llama_sampler * smpl3 = llama_sampler_chain_init(sparams); - llama_sampler_chain_add(smpl3, llama_sampler_init_dist(params.sparams.seed)); + llama_sampler_chain_add(smpl3, llama_sampler_init_dist(params.sampling.seed)); printf("\nsingle seq run: %s", params.prompt.c_str()); diff --git a/examples/server/CMakeLists.txt b/examples/server/CMakeLists.txt index 93e876f5a5524..a27597cbc294a 100644 --- a/examples/server/CMakeLists.txt +++ b/examples/server/CMakeLists.txt @@ -15,13 +15,8 @@ set(TARGET_SRCS httplib.h ) set(PUBLIC_ASSETS - index.html - completion.js + index.html.gz loading.html - deps_daisyui.min.css - deps_markdown-it.js - deps_tailwindcss.js - deps_vue.esm-browser.js ) foreach(asset ${PUBLIC_ASSETS}) @@ -33,6 +28,7 @@ foreach(asset ${PUBLIC_ASSETS}) OUTPUT "${output}" COMMAND "${CMAKE_COMMAND}" "-DINPUT=${input}" "-DOUTPUT=${output}" -P "${PROJECT_SOURCE_DIR}/scripts/xxd.cmake" ) + set_source_files_properties(${output} PROPERTIES GENERATED TRUE) endforeach() add_executable(${TARGET} ${TARGET_SRCS}) @@ -50,4 +46,4 @@ if (WIN32) TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) endif() -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/server/README.md b/examples/server/README.md index 0936e0b7ba13a..6d64656926250 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -62,13 +62,15 @@ The project is under active development, and we are [looking for feedback and co | `--yarn-beta-fast N` | YaRN: low correction dim or beta (default: 32.0)
(env: LLAMA_ARG_YARN_BETA_FAST) | | `-dkvc, --dump-kv-cache` | verbose print of the KV cache | | `-nkvo, --no-kv-offload` | disable KV offload
(env: LLAMA_ARG_NO_KV_OFFLOAD) | -| `-ctk, --cache-type-k TYPE` | KV cache data type for K (default: f16)
(env: LLAMA_ARG_CACHE_TYPE_K) | -| `-ctv, --cache-type-v TYPE` | KV cache data type for V (default: f16)
(env: LLAMA_ARG_CACHE_TYPE_V) | +| `-ctk, --cache-type-k TYPE` | KV cache data type for K
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_K) | +| `-ctv, --cache-type-v TYPE` | KV cache data type for V
allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1
(default: f16)
(env: LLAMA_ARG_CACHE_TYPE_V) | | `-dt, --defrag-thold N` | KV cache defragmentation threshold (default: 0.1, < 0 - disabled)
(env: LLAMA_ARG_DEFRAG_THOLD) | | `-np, --parallel N` | number of parallel sequences to decode (default: 1)
(env: LLAMA_ARG_N_PARALLEL) | | `--mlock` | force system to keep model in RAM rather than swapping or compressing
(env: LLAMA_ARG_MLOCK) | | `--no-mmap` | do not memory-map model (slower load but may reduce pageouts if not using mlock)
(env: LLAMA_ARG_NO_MMAP) | | `--numa TYPE` | attempt optimizations that help on some NUMA systems
- distribute: spread execution evenly over all nodes
- isolate: only spawn threads on CPUs on the node that execution started on
- numactl: use the CPU map provided by numactl
if run without this previously, it is recommended to drop the system page cache before using this
see https://github.com/ggerganov/llama.cpp/issues/1437
(env: LLAMA_ARG_NUMA) | +| `-dev, --device ` | comma-separated list of devices to use for offloading (none = don't offload)
use --list-devices to see a list of available devices
(env: LLAMA_ARG_DEVICE) | +| `--list-devices` | print list of available devices and exit | | `-ngl, --gpu-layers, --n-gpu-layers N` | number of layers to store in VRAM
(env: LLAMA_ARG_N_GPU_LAYERS) | | `-sm, --split-mode {none,layer,row}` | how to split the model across multiple GPUs, one of:
- none: use one GPU only
- layer (default): split layers and KV across GPUs
- row: split rows across GPUs
(env: LLAMA_ARG_SPLIT_MODE) | | `-ts, --tensor-split N0,N1,N2,...` | fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1
(env: LLAMA_ARG_TENSOR_SPLIT) | @@ -102,7 +104,6 @@ The project is under active development, and we are [looking for feedback and co | `-s, --seed SEED` | RNG seed (default: -1, use random seed for -1) | | `--sampling-seq SEQUENCE` | simplified sequence for samplers that will be used (default: dkypmxt) | | `--ignore-eos` | ignore end of stream token and continue generating (implies --logit-bias EOS-inf) | -| `--penalize-nl` | penalize newline tokens (default: false) | | `--temp N` | temperature (default: 0.8) | | `--top-k N` | top-k sampling (default: 40, 0 = disabled) | | `--top-p N` | top-p sampling (default: 0.9, 1.0 = disabled) | @@ -136,6 +137,7 @@ The project is under active development, and we are [looking for feedback and co | -------- | ----------- | | `--no-context-shift` | disables context shift on inifinite text generation (default: disabled)
(env: LLAMA_ARG_NO_CONTEXT_SHIFT) | | `-sp, --special` | special tokens output enabled (default: false) | +| `--no-warmup` | skip warming up the model with an empty run | | `--spm-infill` | use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this. (default: disabled) | | `--pooling {none,mean,cls,last,rank}` | pooling type for embeddings, use model default if unspecified
(env: LLAMA_ARG_POOLING) | | `-cb, --cont-batching` | enable continuous batching (a.k.a dynamic batching) (default: enabled)
(env: LLAMA_ARG_CONT_BATCHING) | @@ -144,6 +146,7 @@ The project is under active development, and we are [looking for feedback and co | `--host HOST` | ip address to listen (default: 127.0.0.1)
(env: LLAMA_ARG_HOST) | | `--port PORT` | port to listen (default: 8080)
(env: LLAMA_ARG_PORT) | | `--path PATH` | path to serve static files from (default: )
(env: LLAMA_ARG_STATIC_PATH) | +| `--no-webui` | Disable the Web UI (default: enabled)
(env: LLAMA_ARG_NO_WEBUI) | | `--embedding, --embeddings` | restrict to only support embedding use case; use only with dedicated embedding models (default: disabled)
(env: LLAMA_ARG_EMBEDDINGS) | | `--reranking, --rerank` | enable reranking endpoint on server (default: disabled)
(env: LLAMA_ARG_RERANKING) | | `--api-key KEY` | API key to use for authentication (default: none)
(env: LLAMA_API_KEY) | @@ -158,9 +161,16 @@ The project is under active development, and we are [looking for feedback and co | `--props` | enable changing global properties via POST /props (default: disabled)
(env: LLAMA_ARG_ENDPOINT_PROPS) | | `--no-slots` | disables slots monitoring endpoint
(env: LLAMA_ARG_NO_ENDPOINT_SLOTS) | | `--slot-save-path PATH` | path to save slot kv cache (default: disabled) | -| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)
if suffix/prefix are specified, template will be disabled
only commonly used templates are accepted:
https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
(env: LLAMA_ARG_CHAT_TEMPLATE) | +| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)
if suffix/prefix are specified, template will be disabled
list of built-in templates:
chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, exaone3, gemma, granite, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, monarch, openchat, orion, phi3, rwkv-world, vicuna, vicuna-orca, zephyr
(env: LLAMA_ARG_CHAT_TEMPLATE) | | `-sps, --slot-prompt-similarity SIMILARITY` | how much the prompt of a request must match the prompt of a slot in order to use that slot (default: 0.50, 0.0 = disabled)
| | `--lora-init-without-apply` | load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: disabled) | +| `--draft-max, --draft, --draft-n N` | number of tokens to draft for speculative decoding (default: 16)
(env: LLAMA_ARG_DRAFT_MAX) | +| `--draft-min, --draft-n-min N` | minimum number of draft tokens to use for speculative decoding (default: 5)
(env: LLAMA_ARG_DRAFT_MIN) | +| `--draft-p-min P` | minimum speculative decoding probability (greedy) (default: 0.9)
(env: LLAMA_ARG_DRAFT_P_MIN) | +| `-cd, --ctx-size-draft N` | size of the prompt context for the draft model (default: 0, 0 = loaded from model)
(env: LLAMA_ARG_CTX_SIZE_DRAFT) | +| `-devd, --device-draft ` | comma-separated list of devices to use for offloading the draft model (none = don't offload)
use --list-devices to see a list of available devices | +| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | number of layers to store in VRAM for the draft model
(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) | +| `-md, --model-draft FNAME` | draft model for speculative decoding (default: unused)
(env: LLAMA_ARG_MODEL_DRAFT) | Note: If both command line argument and environment variable are both set for the same param, the argument will take precedence over env var. @@ -188,12 +198,6 @@ services: `llama-server` is built alongside everything else from the root of the project -- Using `make`: - - ```bash - make llama-server - ``` - - Using `CMake`: ```bash @@ -207,15 +211,6 @@ services: `llama-server` can also be built with SSL support using OpenSSL 3 -- Using `make`: - - ```bash - # NOTE: For non-system openssl, use the following: - # CXXFLAGS="-I /path/to/openssl/include" - # LDFLAGS="-L /path/to/openssl/lib" - make LLAMA_SERVER_SSL=true llama-server - ``` - - Using `CMake`: ```bash @@ -223,6 +218,37 @@ services: cmake --build build --config Release -t llama-server ``` +## Web UI + +The project includes a web-based user interface that enables interaction with the model through the `/chat/completions` endpoint. + +The web UI is developed using: +- `vue` framework for frontend development +- `tailwindcss` and `daisyui` for styling +- `vite` for build tooling + +A pre-built version is available as a single HTML file under `/public` directory. + +To build or to run the dev server (with hot reload): + +```sh +# make sure you have nodejs installed +cd examples/server/webui +npm i + +# to run the dev server +npm run dev + +# to build the public/index.html +npm run build +``` + +NOTE: if you are using the vite dev server, you can change the API base URL to llama.cpp. To do that, run this code snippet in browser's console: + +```js +localStorage.setItem('base', 'http://localhost:8080') +``` + ## Quick Start To get started right away, run the following command, making sure to use the correct path for the model you have: @@ -277,23 +303,23 @@ mkdir llama-client cd llama-client ``` -Create a index.js file and put this inside: +Create an index.js file and put this inside: ```javascript -const prompt = `Building a website can be done in 10 simple steps:`; +const prompt = "Building a website can be done in 10 simple steps:" -async function Test() { +async function test() { let response = await fetch("http://127.0.0.1:8080/completion", { - method: 'POST', + method: "POST", body: JSON.stringify({ prompt, - n_predict: 512, + n_predict: 64, }) }) console.log((await response.json()).content) } -Test() +test() ``` And run it: @@ -317,138 +343,178 @@ node index.js ### POST `/completion`: Given a `prompt`, it returns the predicted completion. - *Options:* +> [!IMPORTANT] +> +> This endpoint is **not** OAI-compatible - `prompt`: Provide the prompt for this completion as a string or as an array of strings or numbers representing tokens. Internally, if `cache_prompt` is `true`, the prompt is compared to the previous completion and only the "unseen" suffix is evaluated. A `BOS` token is inserted at the start, if all of the following conditions are true: +*Options:* - - The prompt is a string or an array with the first element given as a string - - The model's `tokenizer.ggml.add_bos_token` metadata is `true` +`prompt`: Provide the prompt for this completion as a string or as an array of strings or numbers representing tokens. Internally, if `cache_prompt` is `true`, the prompt is compared to the previous completion and only the "unseen" suffix is evaluated. A `BOS` token is inserted at the start, if all of the following conditions are true: - These input shapes and data type are allowed for `prompt`: + - The prompt is a string or an array with the first element given as a string + - The model's `tokenizer.ggml.add_bos_token` metadata is `true` - - Single string: `"string"` - - Single sequence of tokens: `[12, 34, 56]` - - Mixed tokens and strings: `[12, 34, "string", 56, 78]` +These input shapes and data type are allowed for `prompt`: - Multiple prompts are also supported. In this case, the completion result will be an array. + - Single string: `"string"` + - Single sequence of tokens: `[12, 34, 56]` + - Mixed tokens and strings: `[12, 34, "string", 56, 78]` - - Only strings: `["string1", "string2"]` - - Strings and sequences of tokens: `["string1", [12, 34, 56]]` - - Mixed types: `[[12, 34, "string", 56, 78], [12, 34, 56], "string"]` +Multiple prompts are also supported. In this case, the completion result will be an array. - `temperature`: Adjust the randomness of the generated text. Default: `0.8` + - Only strings: `["string1", "string2"]` + - Strings and sequences of tokens: `["string1", [12, 34, 56]]` + - Mixed types: `[[12, 34, "string", 56, 78], [12, 34, 56], "string"]` - `dynatemp_range`: Dynamic temperature range. The final temperature will be in the range of `[temperature - dynatemp_range; temperature + dynatemp_range]` Default: `0.0`, which is disabled. +`temperature`: Adjust the randomness of the generated text. Default: `0.8` - `dynatemp_exponent`: Dynamic temperature exponent. Default: `1.0` +`dynatemp_range`: Dynamic temperature range. The final temperature will be in the range of `[temperature - dynatemp_range; temperature + dynatemp_range]` Default: `0.0`, which is disabled. - `top_k`: Limit the next token selection to the K most probable tokens. Default: `40` +`dynatemp_exponent`: Dynamic temperature exponent. Default: `1.0` - `top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P. Default: `0.95` +`top_k`: Limit the next token selection to the K most probable tokens. Default: `40` - `min_p`: The minimum probability for a token to be considered, relative to the probability of the most likely token. Default: `0.05` +`top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P. Default: `0.95` - `n_predict`: Set the maximum number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. Default: `-1`, where `-1` is infinity. +`min_p`: The minimum probability for a token to be considered, relative to the probability of the most likely token. Default: `0.05` - `n_indent`: Specify the minimum line indentation for the generated text in number of whitespace characters. Useful for code completion tasks. Default: `0` +`n_predict`: Set the maximum number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. Default: `-1`, where `-1` is infinity. - `n_keep`: Specify the number of tokens from the prompt to retain when the context size is exceeded and tokens need to be discarded. The number excludes the BOS token. - By default, this value is set to `0`, meaning no tokens are kept. Use `-1` to retain all tokens from the prompt. +`n_indent`: Specify the minimum line indentation for the generated text in number of whitespace characters. Useful for code completion tasks. Default: `0` - `stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`. +`n_keep`: Specify the number of tokens from the prompt to retain when the context size is exceeded and tokens need to be discarded. The number excludes the BOS token. +By default, this value is set to `0`, meaning no tokens are kept. Use `-1` to retain all tokens from the prompt. - `stop`: Specify a JSON array of stopping strings. - These words will not be included in the completion, so make sure to add them to the prompt for the next iteration. Default: `[]` +`stream`: Allows receiving each predicted token in real-time instead of waiting for the completion to finish (uses a different response format). To enable this, set to `true`. - `typical_p`: Enable locally typical sampling with parameter p. Default: `1.0`, which is disabled. +`stop`: Specify a JSON array of stopping strings. +These words will not be included in the completion, so make sure to add them to the prompt for the next iteration. Default: `[]` - `repeat_penalty`: Control the repetition of token sequences in the generated text. Default: `1.1` +`typical_p`: Enable locally typical sampling with parameter p. Default: `1.0`, which is disabled. - `repeat_last_n`: Last n tokens to consider for penalizing repetition. Default: `64`, where `0` is disabled and `-1` is ctx-size. +`repeat_penalty`: Control the repetition of token sequences in the generated text. Default: `1.1` - `penalize_nl`: Penalize newline tokens when applying the repeat penalty. Default: `true` +`repeat_last_n`: Last n tokens to consider for penalizing repetition. Default: `64`, where `0` is disabled and `-1` is ctx-size. - `presence_penalty`: Repeat alpha presence penalty. Default: `0.0`, which is disabled. +`presence_penalty`: Repeat alpha presence penalty. Default: `0.0`, which is disabled. - `frequency_penalty`: Repeat alpha frequency penalty. Default: `0.0`, which is disabled. +`frequency_penalty`: Repeat alpha frequency penalty. Default: `0.0`, which is disabled. - `dry_multiplier`: Set the DRY (Don't Repeat Yourself) repetition penalty multiplier. Default: `0.0`, which is disabled. +`dry_multiplier`: Set the DRY (Don't Repeat Yourself) repetition penalty multiplier. Default: `0.0`, which is disabled. - `dry_base`: Set the DRY repetition penalty base value. Default: `1.75` +`dry_base`: Set the DRY repetition penalty base value. Default: `1.75` - `dry_allowed_length`: Tokens that extend repetition beyond this receive exponentially increasing penalty: multiplier * base ^ (length of repeating sequence before token - allowed length). Default: `2` +`dry_allowed_length`: Tokens that extend repetition beyond this receive exponentially increasing penalty: multiplier * base ^ (length of repeating sequence before token - allowed length). Default: `2` - `dry_penalty_last_n`: How many tokens to scan for repetitions. Default: `-1`, where `0` is disabled and `-1` is context size. +`dry_penalty_last_n`: How many tokens to scan for repetitions. Default: `-1`, where `0` is disabled and `-1` is context size. - `dry_sequence_breakers`: Specify an array of sequence breakers for DRY sampling. Only a JSON array of strings is accepted. Default: `['\n', ':', '"', '*']` +`dry_sequence_breakers`: Specify an array of sequence breakers for DRY sampling. Only a JSON array of strings is accepted. Default: `['\n', ':', '"', '*']` - `xtc_probability`: Set the chance for token removal via XTC sampler. Default: `0.0`, which is disabled. +`xtc_probability`: Set the chance for token removal via XTC sampler. Default: `0.0`, which is disabled. - `xtc_threshold`: Set a minimum probability threshold for tokens to be removed via XTC sampler. Default: `0.1` (> `0.5` disables XTC) +`xtc_threshold`: Set a minimum probability threshold for tokens to be removed via XTC sampler. Default: `0.1` (> `0.5` disables XTC) - `mirostat`: Enable Mirostat sampling, controlling perplexity during text generation. Default: `0`, where `0` is disabled, `1` is Mirostat, and `2` is Mirostat 2.0. +`mirostat`: Enable Mirostat sampling, controlling perplexity during text generation. Default: `0`, where `0` is disabled, `1` is Mirostat, and `2` is Mirostat 2.0. - `mirostat_tau`: Set the Mirostat target entropy, parameter tau. Default: `5.0` +`mirostat_tau`: Set the Mirostat target entropy, parameter tau. Default: `5.0` - `mirostat_eta`: Set the Mirostat learning rate, parameter eta. Default: `0.1` +`mirostat_eta`: Set the Mirostat learning rate, parameter eta. Default: `0.1` - `grammar`: Set grammar for grammar-based sampling. Default: no grammar +`grammar`: Set grammar for grammar-based sampling. Default: no grammar - `json_schema`: Set a JSON schema for grammar-based sampling (e.g. `{"items": {"type": "string"}, "minItems": 10, "maxItems": 100}` of a list of strings, or `{}` for any JSON). See [tests](../../tests/test-json-schema-to-grammar.cpp) for supported features. Default: no JSON schema. +`json_schema`: Set a JSON schema for grammar-based sampling (e.g. `{"items": {"type": "string"}, "minItems": 10, "maxItems": 100}` of a list of strings, or `{}` for any JSON). See [tests](../../tests/test-json-schema-to-grammar.cpp) for supported features. Default: no JSON schema. - `seed`: Set the random number generator (RNG) seed. Default: `-1`, which is a random seed. +`seed`: Set the random number generator (RNG) seed. Default: `-1`, which is a random seed. - `ignore_eos`: Ignore end of stream token and continue generating. Default: `false` +`ignore_eos`: Ignore end of stream token and continue generating. Default: `false` - `logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced. The tokens can also be represented as strings, e.g. `[["Hello, World!",-0.5]]` will reduce the likelihood of all the individual tokens that represent the string `Hello, World!`, just like the `presence_penalty` does. Default: `[]` +`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced. The tokens can also be represented as strings, e.g. `[["Hello, World!",-0.5]]` will reduce the likelihood of all the individual tokens that represent the string `Hello, World!`, just like the `presence_penalty` does. Default: `[]` - `n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. Note that for temperature < 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Default: `0` +`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token given the sampling settings. Note that for temperature < 0 the tokens are sampled greedily but token probabilities are still being calculated via a simple softmax of the logits without considering any other sampler settings. Default: `0` - `min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0` +`min_keep`: If greater than 0, force samplers to return N possible tokens at minimum. Default: `0` - `t_max_predict_ms`: Set a time limit in milliseconds for the prediction (a.k.a. text-generation) phase. The timeout will trigger if the generation takes more than the specified time (measured since the first token was generated) and if a new-line character has already been generated. Useful for FIM applications. Default: `0`, which is disabled. +`t_max_predict_ms`: Set a time limit in milliseconds for the prediction (a.k.a. text-generation) phase. The timeout will trigger if the generation takes more than the specified time (measured since the first token was generated) and if a new-line character has already been generated. Useful for FIM applications. Default: `0`, which is disabled. - `image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. +`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. - `id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot. Default: `-1` +`id_slot`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot. Default: `-1` - `cache_prompt`: Re-use KV cache from a previous request if possible. This way the common prefix does not have to be re-processed, only the suffix that differs between the requests. Because (depending on the backend) the logits are **not** guaranteed to be bit-for-bit identical for different batch sizes (prompt processing vs. token generation) enabling this option can cause nondeterministic results. Default: `false` +`cache_prompt`: Re-use KV cache from a previous request if possible. This way the common prefix does not have to be re-processed, only the suffix that differs between the requests. Because (depending on the backend) the logits are **not** guaranteed to be bit-for-bit identical for different batch sizes (prompt processing vs. token generation) enabling this option can cause nondeterministic results. Default: `true` - `samplers`: The order the samplers should be applied in. An array of strings representing sampler type names. If a sampler is not set, it will not be used. If a sampler is specified more than once, it will be applied multiple times. Default: `["dry", "top_k", "typ_p", "top_p", "min_p", "xtc", "temperature"]` - these are all the available values. +`return_tokens`: Return the raw generated token ids in the `tokens` field. Otherwise `tokens` remains empty. Default: `false` -**Response format** +`samplers`: The order the samplers should be applied in. An array of strings representing sampler type names. If a sampler is not set, it will not be used. If a sampler is specified more than once, it will be applied multiple times. Default: `["dry", "top_k", "typ_p", "top_p", "min_p", "xtc", "temperature"]` - these are all the available values. -- Note: When using streaming mode (`stream`), only `content` and `stop` will be returned until end of completion. +`timings_per_token`: Include prompt processing and text generation speed information in each response. Default: `false` -- `completion_probabilities`: An array of token probabilities for each completion. The array's length is `n_predict`. Each item in the array has the following structure: +`post_sampling_probs`: Returns the probabilities of top `n_probs` tokens after applying sampling chain. -```json -{ - "content": "", - "probs": [ - { - "prob": float, - "tok_str": "" - }, - { - "prob": float, - "tok_str": "" - }, - ... - ] -}, -``` +**Response format** -Notice that each `probs` is an array of length `n_probs`. +- Note: In streaming mode (`stream`), only `content`, `tokens` and `stop` will be returned until end of completion. Responses are sent using the [Server-sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html) standard. Note: the browser's `EventSource` interface cannot be used due to its lack of `POST` request support. + +- `completion_probabilities`: An array of token probabilities for each completion. The array's length is `n_predict`. Each item in the array has a nested array `top_logprobs`. It contains at **maximum** `n_probs` elements: + ```json + { + "content": "", + "tokens": [ generated token ids if requested ], + ... + "probs": [ + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + "top_logprobs": [ + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + }, + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + }, + ... + ] + }, + { + "id": , + "logprob": float, + "token": "", + "bytes": [int, int, ...], + "top_logprobs": [ + ... + ] + }, + ... + ] + }, + ``` + Please note that if `post_sampling_probs` is set to `true`: + - `logprob` will be replaced with `prob`, with the value between 0.0 and 1.0 + - `top_logprobs` will be replaced with `top_probs`. Each element contains: + - `id`: token ID + - `token`: token in string + - `bytes`: token in bytes + - `prob`: token probability, with the value between 0.0 and 1.0 + - Number of elements in `top_probs` may be less than `n_probs` - `content`: Completion result as a string (excluding `stopping_word` if any). In case of streaming mode, will contain the next token as a string. +- `tokens`: Same as `content` but represented as raw token ids. Only populated if `"return_tokens": true` or `"stream": true` in the request. - `stop`: Boolean for use with `stream` to check whether the generation has stopped (Note: This is not related to stopping words array `stop` from input options) - `generation_settings`: The provided options above excluding `prompt` but including `n_ctx`, `model`. These options may differ from the original ones in some way (e.g. bad values filtered out, strings converted to tokens, etc.). -- `model`: The path to the model loaded with `-m` -- `prompt`: The provided `prompt` -- `stopped_eos`: Indicating whether the completion has stopped because it encountered the EOS token -- `stopped_limit`: Indicating whether the completion stopped because `n_predict` tokens were generated before stop words or EOS was encountered -- `stopped_word`: Indicating whether the completion stopped due to encountering a stopping word from `stop` JSON array provided +- `model`: The model alias (for model path, please use `/props` endpoint) +- `prompt`: The processed `prompt` (special tokens may be added) +- `stop_type`: Indicating whether the completion has stopped. Possible values are: + - `none`: Generating (not stopped) + - `eos`: Stopped because it encountered the EOS token + - `limit`: Stopped because `n_predict` tokens were generated before stop words or EOS was encountered + - `word`: Stopped due to encountering a stopping word from `stop` JSON array provided - `stopping_word`: The stopping word encountered which stopped the generation (or "" if not stopped due to a stopping word) - `timings`: Hash of timing information about the completion such as the number of tokens `predicted_per_second` - `tokens_cached`: Number of tokens from the prompt which could be re-used from previous completion (`n_past`) @@ -457,13 +523,13 @@ Notice that each `probs` is an array of length `n_probs`. ### POST `/tokenize`: Tokenize a given text - *Options:* +*Options:* - `content`: (Required) The text to tokenize. +`content`: (Required) The text to tokenize. - `add_special`: (Optional) Boolean indicating if special tokens, i.e. `BOS`, should be inserted. Default: `false` +`add_special`: (Optional) Boolean indicating if special tokens, i.e. `BOS`, should be inserted. Default: `false` - `with_pieces`: (Optional) Boolean indicating whether to return token pieces along with IDs. Default: `false` +`with_pieces`: (Optional) Boolean indicating whether to return token pieces along with IDs. Default: `false` **Response:** @@ -500,52 +566,52 @@ With input 'Ć”' (utf8 hex: C3 A1) on tinyllama/stories260k ### POST `/detokenize`: Convert tokens to text - *Options:* +*Options:* - `tokens`: Set the tokens to detokenize. +`tokens`: Set the tokens to detokenize. ### POST `/embedding`: Generate embedding of a given text The same as [the embedding example](../embedding) does. - *Options:* +*Options:* - `content`: Set the text to process. +`content`: Set the text to process. - `image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `content`. You can determine the place of the image in the content as in the following: `Image: [img-21].\nCaption: This is a picture of a house`. In this case, `[img-21]` will be replaced by the embeddings of the image with id `21` in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 21}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. +`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `content`. You can determine the place of the image in the content as in the following: `Image: [img-21].\nCaption: This is a picture of a house`. In this case, `[img-21]` will be replaced by the embeddings of the image with id `21` in the following `image_data` array: `{..., "image_data": [{"data": "", "id": 21}]}`. Use `image_data` only with multimodal models, e.g., LLaVA. ### POST `/reranking`: Rerank documents according to a given query Similar to https://jina.ai/reranker/ but might change in the future. Requires a reranker model (such as [bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3)) and the `--embedding --pooling rank` options. - *Options:* +*Options:* - `query`: The query against which the documents will be ranked. +`query`: The query against which the documents will be ranked. - `documents`: An array strings representing the documents to be ranked. +`documents`: An array strings representing the documents to be ranked. - *Aliases:* - - `/rerank` - - `/v1/rerank` - - `/v1/reranking` +*Aliases:* + - `/rerank` + - `/v1/rerank` + - `/v1/reranking` - *Examples:* +*Examples:* - ```shell - curl http://127.0.0.1:8012/v1/rerank \ - -H "Content-Type: application/json" \ - -d '{ - "model": "some-model", - "query": "What is panda?", - "top_n": 3, - "documents": [ - "hi", - "it is a bear", - "The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." - ] - }' | jq - ``` +```shell +curl http://127.0.0.1:8012/v1/rerank \ + -H "Content-Type: application/json" \ + -d '{ + "model": "some-model", + "query": "What is panda?", + "top_n": 3, + "documents": [ + "hi", + "it is a bear", + "The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." + ] + }' | jq +``` ### POST `/infill`: For code infilling. @@ -589,14 +655,82 @@ This endpoint is public (no API key check). By default, it is read-only. To make ```json { - "default_generation_settings": { ... }, + "default_generation_settings": { + "id": 0, + "id_task": -1, + "n_ctx": 1024, + "speculative": false, + "is_processing": false, + "params": { + "n_predict": -1, + "seed": 4294967295, + "temperature": 0.800000011920929, + "dynatemp_range": 0.0, + "dynatemp_exponent": 1.0, + "top_k": 40, + "top_p": 0.949999988079071, + "min_p": 0.05000000074505806, + "xtc_probability": 0.0, + "xtc_threshold": 0.10000000149011612, + "typical_p": 1.0, + "repeat_last_n": 64, + "repeat_penalty": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "dry_multiplier": 0.0, + "dry_base": 1.75, + "dry_allowed_length": 2, + "dry_penalty_last_n": -1, + "dry_sequence_breakers": [ + "\n", + ":", + "\"", + "*" + ], + "mirostat": 0, + "mirostat_tau": 5.0, + "mirostat_eta": 0.10000000149011612, + "stop": [], + "max_tokens": -1, + "n_keep": 0, + "n_discard": 0, + "ignore_eos": false, + "stream": true, + "n_probs": 0, + "min_keep": 0, + "grammar": "", + "samplers": [ + "dry", + "top_k", + "typ_p", + "top_p", + "min_p", + "xtc", + "temperature" + ], + "speculative.n_max": 16, + "speculative.n_min": 5, + "speculative.p_min": 0.8999999761581421, + "timings_per_token": false + }, + "prompt": "", + "next_token": { + "has_next_token": true, + "has_new_line": false, + "n_remain": -1, + "n_decoded": 0, + "stopping_word": "" + } + }, "total_slots": 1, - "chat_template": "" + "model_path": "../models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + "chat_template": "..." } ``` - `default_generation_settings` - the default generation settings for the `/completion` endpoint, which has the same fields as the `generation_settings` response object from the `/completion` endpoint. - `total_slots` - the total number of slots for process requests (defined by `--parallel` option) +- `model_path` - the path to model file (same with `-m` argument) - `chat_template` - the model's original Jinja2 prompt template ### POST `/props`: Change server global properties. @@ -611,89 +745,131 @@ To use this endpoint with POST method, you need to start server with `--props` Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. - *Options:* +*Options:* - See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. +See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. - The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. +The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. - *Examples:* +*Examples:* - You can use either Python `openai` library with appropriate checkpoints: +You can use either Python `openai` library with appropriate checkpoints: - ```python - import openai +```python +import openai - client = openai.OpenAI( - base_url="http://localhost:8080/v1", # "http://:port" - api_key = "sk-no-key-required" - ) +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) - completion = client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, - {"role": "user", "content": "Write a limerick about python exceptions"} - ] - ) +completion = client.chat.completions.create( +model="gpt-3.5-turbo", +messages=[ + {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, + {"role": "user", "content": "Write a limerick about python exceptions"} +] +) - print(completion.choices[0].message) - ``` +print(completion.choices[0].message) +``` - ... or raw HTTP requests: +... or raw HTTP requests: - ```shell - curl http://localhost:8080/v1/chat/completions \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "model": "gpt-3.5-turbo", - "messages": [ - { - "role": "system", - "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." - }, - { - "role": "user", - "content": "Write a limerick about python exceptions" - } - ] - }' - ``` +```shell +curl http://localhost:8080/v1/chat/completions \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer no-key" \ +-d '{ +"model": "gpt-3.5-turbo", +"messages": [ +{ + "role": "system", + "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." +}, +{ + "role": "user", + "content": "Write a limerick about python exceptions" +} +] +}' +``` ### POST `/v1/embeddings`: OpenAI-compatible embeddings API - *Options:* +This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. - See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). +*Options:* - *Examples:* +See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). - - input as string +*Examples:* - ```shell - curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "input": "hello", - "model":"GPT-4", - "encoding_format": "float" - }' - ``` +- input as string - - `input` as string array + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": "hello", + "model":"GPT-4", + "encoding_format": "float" + }' + ``` - ```shell - curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "input": ["hello", "world"], - "model":"GPT-4", - "encoding_format": "float" - }' - ``` +- `input` as string array + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": ["hello", "world"], + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + +### POST `/embeddings`: non-OpenAI-compatible embeddings API + +This endpoint supports all poolings, including `--pooling none`. When the pooling is `none`, the responses will contain the *unnormalized* embeddings for *all* input tokens. For all other pooling types, only the pooled embeddings are returned, normalized using Euclidian norm. + +Note that the response format of this endpoint is different from `/v1/embeddings`. + +*Options:* + +Same as the `/v1/embeddings` endpoint. + +*Examples:* + +Same as the `/v1/embeddings` endpoint. + +**Response format** + +```json +[ + { + "index": 0, + "embedding": [ + [ ... embeddings for token 0 ... ], + [ ... embeddings for token 1 ... ], + [ ... ] + [ ... embeddings for token N-1 ... ], + ] + }, + ... + { + "index": P, + "embedding": [ + [ ... embeddings for token 0 ... ], + [ ... embeddings for token 1 ... ], + [ ... ] + [ ... embeddings for token N-1 ... ], + ] + } +] +``` ### GET `/slots`: Returns the current slots processing state @@ -710,56 +886,73 @@ Example: ```json [ - { - "dynatemp_exponent": 1.0, - "dynatemp_range": 0.0, - "frequency_penalty": 0.0, - "grammar": "", - "id": 0, - "ignore_eos": false, - "is_processing": false, - "logit_bias": [], - "min_p": 0.05000000074505806, - "mirostat": 0, - "mirostat_eta": 0.10000000149011612, - "mirostat_tau": 5.0, - "model": "llama-2-7b-32k-instruct.Q2_K.gguf", - "n_ctx": 2048, - "n_keep": 0, - "n_predict": 100000, - "n_probs": 0, - "next_token": { - "has_next_token": true, - "n_remain": -1, - "n_decoded": 0, - "stopped_eos": false, - "stopped_limit": false, - "stopped_word": false, - "stopping_word": "" - }, - "penalize_nl": true, - "presence_penalty": 0.0, - "prompt": "Say hello to llama.cpp", - "repeat_last_n": 64, - "repeat_penalty": 1.100000023841858, - "samplers": [ - "top_k", - "typical_p", - "top_p", - "min_p", - "temperature" - ], - "seed": 42, - "stop": [ - "\n" - ], - "stream": false, - "task_id": 0, - "temperature": 0.0, - "top_k": 40, - "top_p": 0.949999988079071, - "typical_p": 1.0 + { + "id": 0, + "id_task": -1, + "n_ctx": 1024, + "speculative": false, + "is_processing": false, + "params": { + "n_predict": -1, + "seed": 4294967295, + "temperature": 0.800000011920929, + "dynatemp_range": 0.0, + "dynatemp_exponent": 1.0, + "top_k": 40, + "top_p": 0.949999988079071, + "min_p": 0.05000000074505806, + "xtc_probability": 0.0, + "xtc_threshold": 0.10000000149011612, + "typical_p": 1.0, + "repeat_last_n": 64, + "repeat_penalty": 1.0, + "presence_penalty": 0.0, + "frequency_penalty": 0.0, + "dry_multiplier": 0.0, + "dry_base": 1.75, + "dry_allowed_length": 2, + "dry_penalty_last_n": -1, + "dry_sequence_breakers": [ + "\n", + ":", + "\"", + "*" + ], + "mirostat": 0, + "mirostat_tau": 5.0, + "mirostat_eta": 0.10000000149011612, + "stop": [], + "max_tokens": -1, + "n_keep": 0, + "n_discard": 0, + "ignore_eos": false, + "stream": true, + "n_probs": 0, + "min_keep": 0, + "grammar": "", + "samplers": [ + "dry", + "top_k", + "typ_p", + "top_p", + "min_p", + "xtc", + "temperature" + ], + "speculative.n_max": 16, + "speculative.n_min": 5, + "speculative.p_min": 0.8999999761581421, + "timings_per_token": false + }, + "prompt": "", + "next_token": { + "has_next_token": true, + "has_new_line": false, + "n_remain": -1, + "n_decoded": 0, + "stopping_word": "" } + } ] ``` @@ -779,9 +972,9 @@ Available metrics: ### POST `/slots/{id_slot}?action=save`: Save the prompt cache of the specified slot to a file. - *Options:* +*Options:* - `filename`: Name of the file to save the slot's prompt cache. The file will be saved in the directory specified by the `--slot-save-path` server parameter. +`filename`: Name of the file to save the slot's prompt cache. The file will be saved in the directory specified by the `--slot-save-path` server parameter. **Response format** @@ -799,9 +992,9 @@ Available metrics: ### POST `/slots/{id_slot}?action=restore`: Restore the prompt cache of the specified slot from a file. - *Options:* +*Options:* - `filename`: Name of the file to restore the slot's prompt cache from. The file should be located in the directory specified by the `--slot-save-path` server parameter. +`filename`: Name of the file to restore the slot's prompt cache from. The file should be located in the directory specified by the `--slot-save-path` server parameter. **Response format** diff --git a/examples/server/deps.sh b/examples/server/deps.sh deleted file mode 100755 index 1ff80d0569987..0000000000000 --- a/examples/server/deps.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Download and update deps for binary - -# get the directory of this script file -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -PUBLIC=$DIR/public - -echo "download js bundle files" - -# Note for contributors: Always pin to a specific version "maj.min.patch" to avoid breaking the CI - -curl -L https://cdn.tailwindcss.com/3.4.14 > $PUBLIC/deps_tailwindcss.js -echo >> $PUBLIC/deps_tailwindcss.js # add newline - -curl -L https://cdnjs.cloudflare.com/ajax/libs/daisyui/4.12.14/styled.min.css > $PUBLIC/deps_daisyui.min.css -curl -L https://cdnjs.cloudflare.com/ajax/libs/daisyui/4.12.14/themes.min.css >> $PUBLIC/deps_daisyui.min.css -echo >> $PUBLIC/deps_daisyui.min.css # add newline - -curl -L https://unpkg.com/vue@3.5.12/dist/vue.esm-browser.js > $PUBLIC/deps_vue.esm-browser.js -echo >> $PUBLIC/deps_vue.esm-browser.js # add newline - -curl -L https://cdnjs.cloudflare.com/ajax/libs/markdown-it/13.0.2/markdown-it.js > $PUBLIC/deps_markdown-it.js -echo >> $PUBLIC/deps_markdown-it.js # add newline - -ls -lah $PUBLIC diff --git a/examples/server/public/completion.js b/examples/server/public/completion.js deleted file mode 100644 index 54a0f22f58e6f..0000000000000 --- a/examples/server/public/completion.js +++ /dev/null @@ -1,225 +0,0 @@ -const paramDefaults = { - stream: true, - temperature: 0.2, -}; - -let generation_settings = null; - -export class CompletionError extends Error { - constructor(message, name, data) { - super(message); - this.name = name; - } -}; - -// Completes the prompt as a generator. Recommended for most use cases. -// -// Example: -// -// import { llama } from '/completion.js' -// -// const request = llama("Tell me a joke", {n_predict: 800}) -// for await (const chunk of request) { -// document.write(chunk.data.content) -// } -// -export async function* llama(prompt, params = {}, config = {}) { - let controller = config.controller; - const api_url = config.api_url?.replace(/\/+$/, '') || ""; - - if (!controller) { - controller = new AbortController(); - } - - const completionParams = { ...paramDefaults, ...params, prompt }; - - const response = await fetch(`${api_url}${config.endpoint || '/completion'}`, { - method: 'POST', - body: JSON.stringify(completionParams), - headers: { - 'Connection': 'keep-alive', - 'Content-Type': 'application/json', - 'Accept': 'text/event-stream', - ...(params.api_key ? {'Authorization': `Bearer ${params.api_key}`} : {}) - }, - signal: controller.signal, - }); - - const status = response.status; - if (status !== 200) { - try { - const body = await response.json(); - if (body && body.error && body.error.message) { - throw new CompletionError(body.error.message, 'ServerError'); - } - } catch (err) { - throw new CompletionError(err.message, 'ServerError'); - } - } - - const reader = response.body.getReader(); - const decoder = new TextDecoder(); - - let content = ""; - let leftover = ""; // Buffer for partially read lines - - try { - let cont = true; - - while (cont) { - const result = await reader.read(); - if (result.done) { - break; - } - - // Add any leftover data to the current chunk of data - const text = leftover + decoder.decode(result.value); - - // Check if the last character is a line break - const endsWithLineBreak = text.endsWith('\n'); - - // Split the text into lines - let lines = text.split('\n'); - - // If the text doesn't end with a line break, then the last line is incomplete - // Store it in leftover to be added to the next chunk of data - if (!endsWithLineBreak) { - leftover = lines.pop(); - } else { - leftover = ""; // Reset leftover if we have a line break at the end - } - - // Parse all sse events and add them to result - const regex = /^(\S+):\s(.*)$/gm; - for (const line of lines) { - const match = regex.exec(line); - if (match) { - result[match[1]] = match[2]; - if (result.data === '[DONE]') { - cont = false; - break; - } - - // since we know this is llama.cpp, let's just decode the json in data - if (result.data) { - result.data = JSON.parse(result.data); - content += result.data.content; - - // yield - yield result; - - // if we got a stop token from server, we will break here - if (result.data.stop) { - if (result.data.generation_settings) { - generation_settings = result.data.generation_settings; - } - cont = false; - break; - } - } - if (result.error) { - try { - result.error = JSON.parse(result.error); - if (result.error.message.includes('slot unavailable')) { - // Throw an error to be caught by upstream callers - throw new Error('slot unavailable'); - } else { - console.error(`llama.cpp error [${result.error.code} - ${result.error.type}]: ${result.error.message}`); - } - } catch(e) { - console.error(`llama.cpp error ${result.error}`) - } - } - } - } - } - } catch (e) { - if (e.name !== 'AbortError') { - console.error("llama error: ", e); - } - throw e; - } - finally { - controller.abort(); - } - - return content; -} - -// Call llama, return an event target that you can subscribe to -// -// Example: -// -// import { llamaEventTarget } from '/completion.js' -// -// const conn = llamaEventTarget(prompt) -// conn.addEventListener("message", (chunk) => { -// document.write(chunk.detail.content) -// }) -// -export const llamaEventTarget = (prompt, params = {}, config = {}) => { - const eventTarget = new EventTarget(); - (async () => { - let content = ""; - for await (const chunk of llama(prompt, params, config)) { - if (chunk.data) { - content += chunk.data.content; - eventTarget.dispatchEvent(new CustomEvent("message", { detail: chunk.data })); - } - if (chunk.data.generation_settings) { - eventTarget.dispatchEvent(new CustomEvent("generation_settings", { detail: chunk.data.generation_settings })); - } - if (chunk.data.timings) { - eventTarget.dispatchEvent(new CustomEvent("timings", { detail: chunk.data.timings })); - } - } - eventTarget.dispatchEvent(new CustomEvent("done", { detail: { content } })); - })(); - return eventTarget; -} - -// Call llama, return a promise that resolves to the completed text. This does not support streaming -// -// Example: -// -// llamaPromise(prompt).then((content) => { -// document.write(content) -// }) -// -// or -// -// const content = await llamaPromise(prompt) -// document.write(content) -// -export const llamaPromise = (prompt, params = {}, config = {}) => { - return new Promise(async (resolve, reject) => { - let content = ""; - try { - for await (const chunk of llama(prompt, params, config)) { - content += chunk.data.content; - } - resolve(content); - } catch (error) { - reject(error); - } - }); -}; - -/** - * (deprecated) - */ -export const llamaComplete = async (params, controller, callback) => { - for await (const chunk of llama(params.prompt, params, { controller })) { - callback(chunk); - } -} - -// Get the model info from the server. This is useful for getting the context window and so on. -export const llamaModelInfo = async (config = {}) => { - if (!generation_settings) { - const api_url = config.api_url?.replace(/\/+$/, '') || ""; - const props = await fetch(`${api_url}/props`).then(r => r.json()); - generation_settings = props.default_generation_settings; - } - return generation_settings; -} diff --git a/examples/server/public/deps_daisyui.min.css b/examples/server/public/deps_daisyui.min.css deleted file mode 100644 index bc85296519d9a..0000000000000 --- a/examples/server/public/deps_daisyui.min.css +++ /dev/null @@ -1,13 +0,0 @@ -.alert{display:grid;width:100%;grid-auto-flow:row;align-content:flex-start;align-items:center;justify-items:center;gap:1rem;text-align:center}@media (min-width:640px){.alert{grid-auto-flow:column;grid-template-columns:auto minmax(auto,1fr);justify-items:start;text-align:start}}.artboard{width:100%}.avatar{position:relative;display:inline-flex}.avatar>div{display:block;aspect-ratio:1/1;overflow:hidden}.avatar img{height:100%;width:100%;object-fit:cover}.avatar.placeholder>div{display:flex;align-items:center;justify-content:center}.badge{display:inline-flex;align-items:center;justify-content:center;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1);height:1.25rem;font-size:.875rem;line-height:1.25rem;width:fit-content;padding-left:.563rem;padding-right:.563rem}.btm-nav{position:fixed;bottom:0;left:0;right:0;display:flex;width:100%;flex-direction:row;align-items:center;justify-content:space-around;padding-bottom:env(safe-area-inset-bottom)}.btm-nav>*{position:relative;display:flex;height:100%;flex-basis:100%;cursor:pointer;flex-direction:column;align-items:center;justify-content:center;gap:.25rem}.breadcrumbs{max-width:100%;overflow-x:auto}.breadcrumbs>ol,.breadcrumbs>ul{display:flex;align-items:center;white-space:nowrap;min-height:min-content}.breadcrumbs>ol>li,.breadcrumbs>ul>li{display:flex;align-items:center}.breadcrumbs>ol>li>a,.breadcrumbs>ul>li>a{display:flex;cursor:pointer;align-items:center}@media(hover:hover){.breadcrumbs>ol>li>a:hover,.breadcrumbs>ul>li>a:hover{text-decoration-line:underline}}.btn{display:inline-flex;height:3rem;min-height:3rem;flex-shrink:0;cursor:pointer;user-select:none;flex-wrap:wrap;align-items:center;justify-content:center;border-radius:var(--rounded-btn,.5rem);border-color:transparent;padding-left:1rem;padding-right:1rem;text-align:center;font-size:.875rem;line-height:1em}.btn-disabled,.btn:disabled,.btn[disabled]{pointer-events:none}.btn-square{height:3rem;width:3rem;padding:0}.btn-circle{height:3rem;width:3rem;border-radius:9999px;padding:0}:where(.btn:is(input[type=checkbox])),:where(.btn:is(input[type=radio])){width:auto;appearance:none}.btn:is(input[type=checkbox]):after,.btn:is(input[type=radio]):after{--tw-content:attr(aria-label);content:var(--tw-content)}.card{position:relative;display:flex;flex-direction:column}.card:focus{outline:2px solid transparent;outline-offset:2px}.card-body{display:flex;flex:1 1 auto;flex-direction:column}.card-body :where(p){flex-grow:1}.card-actions{display:flex;flex-wrap:wrap;align-items:flex-start;gap:.5rem}.card figure{display:flex;align-items:center;justify-content:center}.card.image-full{display:grid}.card.image-full:before{position:relative;content:""}.card.image-full:before,.card.image-full>*{grid-column-start:1;grid-row-start:1}.card.image-full>figure img{height:100%;object-fit:cover}.card.image-full>.card-body{position:relative}.carousel{display:inline-flex;overflow-x:scroll;scroll-snap-type:x mandatory;scroll-behavior:smooth}.carousel-vertical{flex-direction:column;overflow-y:scroll;scroll-snap-type:y mandatory}.carousel-item{box-sizing:content-box;display:flex;flex:none;scroll-snap-align:start}.carousel-start .carousel-item{scroll-snap-align:start}.carousel-center .carousel-item{scroll-snap-align:center}.carousel-end .carousel-item{scroll-snap-align:end}.chat{display:grid;grid-template-columns:repeat(2,minmax(0,1fr));column-gap:.75rem;padding-top:.25rem;padding-bottom:.25rem}.chat-image{grid-row:span 2/span 2;align-self:flex-end}.chat-header{grid-row-start:1;font-size:.875rem;line-height:1.25rem}.chat-footer{grid-row-start:3;font-size:.875rem;line-height:1.25rem}.chat-bubble{position:relative;display:block;width:fit-content;padding-left:1rem;padding-right:1rem;padding-top:.5rem;padding-bottom:.5rem;max-width:90%}.chat-bubble:before{position:absolute;bottom:0;height:.75rem;width:.75rem;background-color:inherit;content:"";mask-size:contain;mask-repeat:no-repeat;mask-position:center}.chat-start{place-items:start;grid-template-columns:auto 1fr}.chat-start .chat-header{grid-column-start:2}.chat-start .chat-footer{grid-column-start:2}.chat-start .chat-image{grid-column-start:1}.chat-start .chat-bubble{grid-column-start:2}.chat-start .chat-bubble:before{mask-image:url("data:image/svg+xml,%3csvg width='3' height='3' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m 0 3 L 3 3 L 3 0 C 3 1 1 3 0 3'/%3e%3c/svg%3e")}[dir=rtl] .chat-start .chat-bubble:before{mask-image:url("data:image/svg+xml,%3csvg width='3' height='3' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m 0 3 L 1 3 L 3 3 C 2 3 0 1 0 0'/%3e%3c/svg%3e")}.chat-end{place-items:end;grid-template-columns:1fr auto}.chat-end .chat-header{grid-column-start:1}.chat-end .chat-footer{grid-column-start:1}.chat-end .chat-image{grid-column-start:2}.chat-end .chat-bubble{grid-column-start:1}.chat-end .chat-bubble:before{mask-image:url("data:image/svg+xml,%3csvg width='3' height='3' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m 0 3 L 1 3 L 3 3 C 2 3 0 1 0 0'/%3e%3c/svg%3e")}[dir=rtl] .chat-end .chat-bubble:before{mask-image:url("data:image/svg+xml,%3csvg width='3' height='3' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m 0 3 L 3 3 L 3 0 C 3 1 1 3 0 3'/%3e%3c/svg%3e")}.checkbox{flex-shrink:0}.collapse:not(td):not(tr):not(colgroup){visibility:visible}.collapse{position:relative;display:grid;overflow:hidden;grid-template-rows:auto 0fr;transition:grid-template-rows .2s}.collapse-content,.collapse-title,.collapse>input[type=checkbox],.collapse>input[type=radio]{grid-column-start:1;grid-row-start:1}.collapse>input[type=checkbox],.collapse>input[type=radio]{appearance:none;opacity:0}.collapse-content{visibility:hidden;grid-column-start:1;grid-row-start:2;min-height:0;transition:visibility .2s}.collapse-open,.collapse:focus:not(.collapse-close),.collapse[open]{grid-template-rows:auto 1fr}.collapse:not(.collapse-close):has(>input[type=checkbox]:checked),.collapse:not(.collapse-close):has(>input[type=radio]:checked){grid-template-rows:auto 1fr}.collapse-open>.collapse-content,.collapse:focus:not(.collapse-close)>.collapse-content,.collapse:not(.collapse-close)>input[type=checkbox]:checked~.collapse-content,.collapse:not(.collapse-close)>input[type=radio]:checked~.collapse-content,.collapse[open]>.collapse-content{visibility:visible;min-height:fit-content}:root .countdown{line-height:1em}.countdown{display:inline-flex}.countdown>*{height:1em;display:inline-block;overflow-y:hidden}.countdown>:before{position:relative;content:"00\A 01\A 02\A 03\A 04\A 05\A 06\A 07\A 08\A 09\A 10\A 11\A 12\A 13\A 14\A 15\A 16\A 17\A 18\A 19\A 20\A 21\A 22\A 23\A 24\A 25\A 26\A 27\A 28\A 29\A 30\A 31\A 32\A 33\A 34\A 35\A 36\A 37\A 38\A 39\A 40\A 41\A 42\A 43\A 44\A 45\A 46\A 47\A 48\A 49\A 50\A 51\A 52\A 53\A 54\A 55\A 56\A 57\A 58\A 59\A 60\A 61\A 62\A 63\A 64\A 65\A 66\A 67\A 68\A 69\A 70\A 71\A 72\A 73\A 74\A 75\A 76\A 77\A 78\A 79\A 80\A 81\A 82\A 83\A 84\A 85\A 86\A 87\A 88\A 89\A 90\A 91\A 92\A 93\A 94\A 95\A 96\A 97\A 98\A 99\A";white-space:pre;top:calc(var(--value) * -1em)}.diff{position:relative;display:grid;width:100%;overflow:hidden;container-type:inline-size;grid-template-columns:auto 1fr}.diff-resizer{position:relative;top:50%;z-index:1;height:3rem;width:25rem;min-width:1rem;max-width:calc(100cqi - 1rem);resize:horizontal;overflow:hidden;opacity:0;transform-origin:100% 100%;scale:4;translate:1.5rem -1.5rem;clip-path:inset(calc(100% - .75rem) 0 0 calc(100% - .75rem))}.diff-item-1,.diff-item-2,.diff-resizer{position:relative;grid-column-start:1;grid-row-start:1}.diff-item-1:after{pointer-events:none;position:absolute;bottom:0;right:1px;top:50%;z-index:1;height:2rem;width:2rem;--tw-content:'';content:var(--tw-content);translate:50% -50%}.diff-item-2{overflow:hidden}.diff-item-1>*,.diff-item-2>*{pointer-events:none;position:absolute;bottom:0;left:0;top:0;height:100%;width:100cqi;max-width:none;object-fit:cover;object-position:center}.divider{display:flex;flex-direction:row;align-items:center;align-self:stretch}.divider:after,.divider:before{height:.125rem;width:100%;flex-grow:1;--tw-content:'';content:var(--tw-content)}.divider-start:before{display:none}.divider-end:after{display:none}.drawer{position:relative;display:grid;grid-auto-columns:max-content auto}.drawer-content{grid-column-start:2;grid-row-start:1;min-width:0}.drawer-side{pointer-events:none;position:fixed;inset-inline-start:0;top:0;grid-column-start:1;grid-row-start:1;display:grid;width:100%;grid-template-columns:repeat(1,minmax(0,1fr));grid-template-rows:repeat(1,minmax(0,1fr));align-items:flex-start;justify-items:start;overflow-x:hidden;overflow-y:hidden;overscroll-behavior:contain;height:100vh;height:100dvh}.drawer-side>.drawer-overlay{position:sticky;top:0;place-self:stretch}.drawer-side>*{grid-column-start:1;grid-row-start:1}.drawer-side>:not(.drawer-overlay){transition-property:transform;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.3s;transition-timing-function:cubic-bezier(0,0,.2,1);will-change:transform;transform:translateX(-100%)}[dir=rtl] .drawer-side>:not(.drawer-overlay){transform:translateX(100%)}.drawer-toggle{position:fixed;height:0;width:0;appearance:none;opacity:0}.drawer-toggle:checked~.drawer-side{pointer-events:auto;visibility:visible;overflow-y:auto}.drawer-toggle:checked~.drawer-side>:not(.drawer-overlay){transform:translateX(0)}.drawer-end{grid-auto-columns:auto max-content}.drawer-end>.drawer-toggle~.drawer-content{grid-column-start:1}.drawer-end>.drawer-toggle~.drawer-side{grid-column-start:2;justify-items:end}.drawer-end>.drawer-toggle~.drawer-side>:not(.drawer-overlay){transform:translateX(100%)}[dir=rtl] .drawer-end>.drawer-toggle~.drawer-side>:not(.drawer-overlay){transform:translateX(-100%)}.drawer-end>.drawer-toggle:checked~.drawer-side>:not(.drawer-overlay){transform:translateX(0)}.dropdown{position:relative;display:inline-block}.dropdown>:not(summary):focus{outline:2px solid transparent;outline-offset:2px}.dropdown .dropdown-content{position:absolute}.dropdown:is(:not(details)) .dropdown-content{visibility:hidden;opacity:0}.dropdown-end .dropdown-content{inset-inline-end:0}.dropdown-left .dropdown-content{bottom:auto;inset-inline-end:100%;top:0}.dropdown-right .dropdown-content{bottom:auto;inset-inline-start:100%;top:0}.dropdown-bottom .dropdown-content{bottom:auto;top:100%}.dropdown-top .dropdown-content{bottom:100%;top:auto}.dropdown-end.dropdown-right .dropdown-content{bottom:0;top:auto}.dropdown-end.dropdown-left .dropdown-content{bottom:0;top:auto}.dropdown.dropdown-open .dropdown-content,.dropdown:focus-within .dropdown-content,.dropdown:not(.dropdown-hover):focus .dropdown-content{visibility:visible;opacity:1}@media (hover:hover){.dropdown.dropdown-hover:hover .dropdown-content{visibility:visible;opacity:1}}.dropdown:is(details) summary::-webkit-details-marker{display:none}.file-input{height:3rem;flex-shrink:1;padding-inline-end:1rem;font-size:.875rem;line-height:1.25rem;line-height:2}.file-input::file-selector-button{margin-inline-end:1rem;display:inline-flex;height:100%;flex-shrink:0;cursor:pointer;user-select:none;flex-wrap:wrap;align-items:center;justify-content:center;padding-left:1rem;padding-right:1rem;text-align:center;font-size:.875rem;line-height:1.25rem;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1);line-height:1em}.footer{display:grid;width:100%;grid-auto-flow:row;place-items:start}.footer>*{display:grid;place-items:start}.footer-center{place-items:center;text-align:center}.footer-center>*{place-items:center}@media (min-width:48rem){.footer{grid-auto-flow:column}.footer-center{grid-auto-flow:row dense}}.form-control{display:flex;flex-direction:column}.label{display:flex;user-select:none;align-items:center;justify-content:space-between}.hero{display:grid;width:100%;place-items:center;background-size:cover;background-position:center}.hero>*{grid-column-start:1;grid-row-start:1}.hero-overlay{grid-column-start:1;grid-row-start:1;height:100%;width:100%}.hero-content{z-index:0;display:flex;align-items:center;justify-content:center}.indicator{position:relative;display:inline-flex;width:max-content}.indicator :where(.indicator-item){z-index:1;position:absolute;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));white-space:nowrap}.input{flex-shrink:1;appearance:none;height:3rem;padding-left:1rem;padding-right:1rem;font-size:.875rem;line-height:1.25rem;line-height:2}.input-md[type=number]::-webkit-inner-spin-button,.input[type=number]::-webkit-inner-spin-button{margin-top:-1rem;margin-bottom:-1rem;margin-inline-end:-1rem}.input-xs[type=number]::-webkit-inner-spin-button{margin-top:-.25rem;margin-bottom:-.25rem;margin-inline-end:0}.input-sm[type=number]::-webkit-inner-spin-button{margin-top:0;margin-bottom:0;margin-inline-end:0}.input-lg[type=number]::-webkit-inner-spin-button{margin-top:-1.5rem;margin-bottom:-1.5rem;margin-inline-end:-1.5rem}.join{display:inline-flex;align-items:stretch}.join :where(.join-item){border-start-end-radius:0;border-end-end-radius:0;border-end-start-radius:0;border-start-start-radius:0}.join .join-item:not(:first-child):not(:last-child),.join :not(:first-child):not(:last-child) .join-item{border-start-end-radius:0;border-end-end-radius:0;border-end-start-radius:0;border-start-start-radius:0}.join .join-item:first-child:not(:last-child),.join :first-child:not(:last-child) .join-item{border-start-end-radius:0;border-end-end-radius:0}.join .dropdown .join-item:first-child:not(:last-child),.join :first-child:not(:last-child) .dropdown .join-item{border-start-end-radius:inherit;border-end-end-radius:inherit}.join :where(.join-item:first-child:not(:last-child)),.join :where(:first-child:not(:last-child).join-item){border-end-start-radius:inherit;border-start-start-radius:inherit}.join .join-item:last-child:not(:first-child),.join :last-child:not(:first-child) .join-item{border-end-start-radius:0;border-start-start-radius:0}.join :where(.join-item:last-child:not(:first-child)),.join :where(:last-child:not(:first-child).join-item){border-start-end-radius:inherit;border-end-end-radius:inherit}@supports not selector(:has(*)){:where(.join*){border-radius:inherit}}@supports selector(:has(*)){:where(.join:has(.join-item)){border-radius:inherit}}.kbd{display:inline-flex;align-items:center;justify-content:center}.link{cursor:pointer;text-decoration-line:underline}.link-hover{text-decoration-line:none}@media(hover:hover){.link-hover:hover{text-decoration-line:underline}}.mask{mask-size:contain;mask-repeat:no-repeat;mask-position:center}.mask-half-1{mask-size:200%;mask-position:left}.mask-half-1:where([dir=rtl],[dir=rtl]*){mask-position:right}.mask-half-2{mask-size:200%;mask-position:right}.mask-half-2:where([dir=rtl],[dir=rtl]*){mask-position:left}.menu{display:flex;flex-direction:column;flex-wrap:wrap;font-size:.875rem;line-height:1.25rem}.menu :where(liul){position:relative;white-space:nowrap}.menu :where(li:not(.menu-title)>:not(ul,details,.menu-title,.btn)),.menu :where(li:not(.menu-title)>details>summary:not(.menu-title)){display:grid;grid-auto-flow:column;align-content:flex-start;align-items:center;gap:.5rem;grid-auto-columns:minmax(auto,max-content) auto max-content;user-select:none}.menu li.disabled{cursor:not-allowed;user-select:none}.menu :where(li>.menu-dropdown:not(.menu-dropdown-show)){display:none}:where(.menuli){position:relative;display:flex;flex-shrink:0;flex-direction:column;flex-wrap:wrap;align-items:stretch}:where(.menuli) .badge{justify-self:end}.mockup-code{position:relative;overflow:hidden;overflow-x:auto}.mockup-code pre[data-prefix]:before{content:attr(data-prefix);display:inline-block;text-align:right}.mockup-window{position:relative;overflow:hidden;overflow-x:auto}.mockup-window pre[data-prefix]:before{content:attr(data-prefix);display:inline-block;text-align:right}.mockup-browser{position:relative;overflow:hidden;overflow-x:auto}.mockup-browser pre[data-prefix]:before{content:attr(data-prefix);display:inline-block;text-align:right}.modal{pointer-events:none;position:fixed;inset:0;margin:0;display:grid;height:100%;max-height:none;width:100%;max-width:none;justify-items:center;padding:0;opacity:0;overscroll-behavior:contain;z-index:999}.modal-scroll{overscroll-behavior:auto}:where(.modal){align-items:center}.modal-box{max-height:calc(100vh - 5em)}.modal-open,.modal-toggle:checked+.modal,.modal:target,.modal[open]{pointer-events:auto;visibility:visible;opacity:1}.modal-action{display:flex}.modal-toggle{position:fixed;height:0;width:0;appearance:none;opacity:0}:root:has(:is(.modal-open,.modal:target,.modal-toggle:checked+.modal,.modal[open])){overflow:hidden;scrollbar-gutter:stable}.navbar{display:flex;align-items:center}:where(.navbar>:not(script,style)){display:inline-flex;align-items:center}.navbar-start{width:50%;justify-content:flex-start}.navbar-center{flex-shrink:0}.navbar-end{width:50%;justify-content:flex-end}.progress{position:relative;width:100%;appearance:none;overflow:hidden}.radial-progress{position:relative;display:inline-grid;height:var(--size);width:var(--size);place-content:center;border-radius:9999px;background-color:transparent;vertical-align:middle;box-sizing:content-box}.radial-progress::-moz-progress-bar{appearance:none;background-color:transparent}.radial-progress::-webkit-progress-value{appearance:none;background-color:transparent}.radial-progress::-webkit-progress-bar{appearance:none;background-color:transparent}.radial-progress:after,.radial-progress:before{position:absolute;border-radius:9999px;content:""}.radial-progress:before{inset:0;background:radial-gradient(farthest-side,currentColor 98%,#0000) top/var(--thickness) var(--thickness) no-repeat,conic-gradient(currentColor calc(var(--value) * 1%),#0000 0);-webkit-mask:radial-gradient(farthest-side,#0000 calc(99% - var(--thickness)),#000 calc(100% - var(--thickness)));mask:radial-gradient(farthest-side,#0000 calc(99% - var(--thickness)),#000 calc(100% - var(--thickness)))}.radial-progress:after{inset:calc(50% - var(--thickness)/ 2);transform:rotate(calc(var(--value) * 3.6deg - 90deg)) translate(calc(var(--size)/ 2 - 50%))}.radio{flex-shrink:0}.range{height:1.5rem;width:100%;cursor:pointer}.range:focus{outline:0}.rating{position:relative;display:inline-flex}.rating :where(input){cursor:pointer;border-radius:0}.select{display:inline-flex;cursor:pointer;user-select:none;appearance:none;height:3rem;min-height:3rem;padding-inline-start:1rem;padding-inline-end:2.5rem;font-size:.875rem;line-height:1.25rem;line-height:2}.select[multiple]{height:auto}.stack{display:inline-grid}.stack>*{grid-column-start:1;grid-row-start:1;transform:translateY(10%) scale(.9);z-index:1}.stack>:nth-child(2){transform:translateY(5%) scale(.95);z-index:2}.stack>:nth-child(1){transform:translateY(0) scale(1);z-index:3}.stats{display:inline-grid}:where(.stats){grid-auto-flow:column}.stat{display:inline-grid;width:100%;grid-template-columns:repeat(1,1fr)}.stat-figure{grid-column-start:2;grid-row:span 3/span 3;grid-row-start:1;place-self:center;justify-self:end}.stat-title{grid-column-start:1;white-space:nowrap}.stat-value{grid-column-start:1;white-space:nowrap}.stat-desc{grid-column-start:1;white-space:nowrap}.stat-actions{grid-column-start:1;white-space:nowrap}.steps{display:inline-grid;grid-auto-flow:column;overflow:hidden;overflow-x:auto;counter-reset:step;grid-auto-columns:1fr}.steps .step{display:grid;grid-template-columns:repeat(1,minmax(0,1fr));grid-template-rows:repeat(2,minmax(0,1fr));place-items:center;text-align:center}.swap{position:relative;display:inline-grid;user-select:none;place-content:center}.swap>*{grid-column-start:1;grid-row-start:1}.swap input{appearance:none}.swap .swap-indeterminate,.swap .swap-on,.swap input:indeterminate~.swap-on{opacity:0}.swap input:checked~.swap-off,.swap input:indeterminate~.swap-off,.swap-active .swap-off{opacity:0}.swap input:checked~.swap-on,.swap input:indeterminate~.swap-indeterminate,.swap-active .swap-on{opacity:1}.tabs{display:grid;align-items:flex-end}.tabs-lifted:has(.tab-content[class*=" rounded-"]) .tab:first-child:not(:is(.tab-active,[aria-selected=true])),.tabs-lifted:has(.tab-content[class^=rounded-]) .tab:first-child:not(:is(.tab-active,[aria-selected=true])){border-bottom-color:transparent}.tab{position:relative;grid-row-start:1;display:inline-flex;height:2rem;cursor:pointer;user-select:none;appearance:none;flex-wrap:wrap;align-items:center;justify-content:center;text-align:center;font-size:.875rem;line-height:1.25rem;line-height:2;--tab-padding:1rem}.tab:is(input[type=radio]){width:auto;border-bottom-right-radius:0;border-bottom-left-radius:0}.tab:is(input[type=radio]):after{--tw-content:attr(aria-label);content:var(--tw-content)}.tab:not(input):empty{cursor:default;grid-column-start:span 9999}.tab-content{grid-column-start:1;grid-column-end:span 9999;grid-row-start:2;margin-top:calc(var(--tab-border) * -1);display:none;border-color:transparent;border-width:var(--tab-border,0)}:checked+.tab-content:nth-child(2),:is(.tab-active,[aria-selected=true])+.tab-content:nth-child(2){border-start-start-radius:0}:is(.tab-active,[aria-selected=true])+.tab-content,input.tab:checked+.tab-content{display:block}.table{position:relative;width:100%}.table :where(.table-pin-rowstheadtr){position:sticky;top:0;z-index:1;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)))}.table :where(.table-pin-rowstfoottr){position:sticky;bottom:0;z-index:1;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)))}.table :where(.table-pin-colstrth){position:sticky;left:0;right:0;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)))}.table-zebra tbody tr:nth-child(even) :where(.table-pin-colstrth){--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)))}.textarea{min-height:3rem;flex-shrink:1;padding-left:1rem;padding-right:1rem;padding-top:.5rem;padding-bottom:.5rem;font-size:.875rem;line-height:1.25rem;line-height:2}.timeline{position:relative;display:flex}:where(.timeline>li){position:relative;display:grid;flex-shrink:0;align-items:center;grid-template-rows:var(--timeline-row-start,minmax(0,1fr)) auto var(--timeline-row-end,minmax(0,1fr));grid-template-columns:var(--timeline-col-start,minmax(0,1fr)) auto var(--timeline-col-end,minmax(0,1fr))}.timeline>li>hr{width:100%;border-width:0}:where(.timeline>li>hr):first-child{grid-column-start:1;grid-row-start:2}:where(.timeline>li>hr):last-child{grid-column-start:3;grid-column-end:none;grid-row-start:2;grid-row-end:auto}.timeline-start{grid-column-start:1;grid-column-end:4;grid-row-start:1;grid-row-end:2;margin:.25rem;align-self:flex-end;justify-self:center}.timeline-middle{grid-column-start:2;grid-row-start:2}.timeline-end{grid-column-start:1;grid-column-end:4;grid-row-start:3;grid-row-end:4;margin:.25rem;align-self:flex-start;justify-self:center}.toast{position:fixed;display:flex;min-width:fit-content;flex-direction:column;white-space:nowrap}.toggle{flex-shrink:0}.alert{border-radius:var(--rounded-box,1rem);border-width:1px;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));padding:1rem;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--alert-bg:var(--fallback-b2,oklch(var(--b2)/1));--alert-bg-mix:var(--fallback-b1,oklch(var(--b1)/1));background-color:var(--alert-bg)}.alert-info{border-color:var(--fallback-in,oklch(var(--in)/.2));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)));--alert-bg:var(--fallback-in,oklch(var(--in)/1));--alert-bg-mix:var(--fallback-b1,oklch(var(--b1)/1))}.alert-success{border-color:var(--fallback-su,oklch(var(--su)/.2));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)));--alert-bg:var(--fallback-su,oklch(var(--su)/1));--alert-bg-mix:var(--fallback-b1,oklch(var(--b1)/1))}.alert-warning{border-color:var(--fallback-wa,oklch(var(--wa)/.2));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)));--alert-bg:var(--fallback-wa,oklch(var(--wa)/1));--alert-bg-mix:var(--fallback-b1,oklch(var(--b1)/1))}.alert-error{border-color:var(--fallback-er,oklch(var(--er)/.2));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)));--alert-bg:var(--fallback-er,oklch(var(--er)/1));--alert-bg-mix:var(--fallback-b1,oklch(var(--b1)/1))}.avatar-group{display:flex;overflow:hidden}.avatar-group :where(.avatar){overflow:hidden;border-radius:9999px;border-width:4px;--tw-border-opacity:1;border-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-border-opacity)))}.badge{border-radius:var(--rounded-badge,1.9rem);border-width:1px;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}.badge-neutral{--tw-border-opacity:1;border-color:var(--fallback-n,oklch(var(--n)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)))}.badge-primary{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.badge-secondary{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.badge-accent{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.badge-info{border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.badge-success{border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.badge-warning{border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.badge-error{border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.badge-ghost{--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}.badge-outline{border-color:currentColor;--tw-border-opacity:0.5;background-color:transparent;color:currentColor}.badge-outline.badge-neutral{--tw-text-opacity:1;color:var(--fallback-n,oklch(var(--n)/var(--tw-text-opacity)))}.badge-outline.badge-primary{--tw-text-opacity:1;color:var(--fallback-p,oklch(var(--p)/var(--tw-text-opacity)))}.badge-outline.badge-secondary{--tw-text-opacity:1;color:var(--fallback-s,oklch(var(--s)/var(--tw-text-opacity)))}.badge-outline.badge-accent{--tw-text-opacity:1;color:var(--fallback-a,oklch(var(--a)/var(--tw-text-opacity)))}.badge-outline.badge-info{--tw-text-opacity:1;color:var(--fallback-in,oklch(var(--in)/var(--tw-text-opacity)))}.badge-outline.badge-success{--tw-text-opacity:1;color:var(--fallback-su,oklch(var(--su)/var(--tw-text-opacity)))}.badge-outline.badge-warning{--tw-text-opacity:1;color:var(--fallback-wa,oklch(var(--wa)/var(--tw-text-opacity)))}.badge-outline.badge-error{--tw-text-opacity:1;color:var(--fallback-er,oklch(var(--er)/var(--tw-text-opacity)))}.btm-nav{height:4rem;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));color:currentColor}.btm-nav>*{border-color:currentColor}.btm-nav>:not(.active){padding-top:.125rem}.btm-nav>:where(.active){border-top-width:2px;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)))}.btm-nav>.disabled,.btm-nav>[disabled]{pointer-events:none;--tw-border-opacity:0;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-bg-opacity:0.1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}@media (hover:hover){.btm-nav>.disabled:hover,.btm-nav>[disabled]:hover{pointer-events:none;--tw-border-opacity:0;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-bg-opacity:0.1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}}.btm-nav>* .label{font-size:1rem;line-height:1.5rem}.breadcrumbs{padding-top:.5rem;padding-bottom:.5rem}.breadcrumbs>ol>li>a:focus,.breadcrumbs>ul>li>a:focus{outline:2px solid transparent;outline-offset:2px}.breadcrumbs>ol>li>a:focus-visible,.breadcrumbs>ul>li>a:focus-visible{outline:2px solid currentColor;outline-offset:2px}.breadcrumbs>ol>li+:before,.breadcrumbs>ul>li+:before{content:"";margin-left:.5rem;margin-right:.75rem;display:block;height:.375rem;width:.375rem;--tw-rotate:45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));opacity:.4;border-top:1px solid;border-right:1px solid;background-color:transparent}[dir=rtl] .breadcrumbs>ol>li+:before,[dir=rtl] .breadcrumbs>ul>li+:before{--tw-rotate:-135deg}.btn{gap:.5rem;font-weight:600;text-decoration-line:none;transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1);border-width:var(--border-btn,1px);transition-property:color,background-color,border-color,opacity,box-shadow,transform}@media (prefers-reduced-motion:no-preference){.btn{animation:button-pop var(--animation-btn,.25s) ease-out}}.btn:active:focus,.btn:active:hover{animation:button-pop 0s ease-out;transform:scale(var(--btn-focus-scale,.97))}.btn{--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));text-decoration-line:none;--tw-shadow:0 1px 2px 0 rgb(0 0 0 / 0.05);--tw-shadow-colored:0 1px 2px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);outline-color:var(--fallback-bc,oklch(var(--bc)/1));background-color:oklch(var(--btn-color,var(--b2)) / var(--tw-bg-opacity));--tw-bg-opacity:1;border-color:oklch(var(--btn-color,var(--b2)) / var(--tw-border-opacity));--tw-border-opacity:1}@supports not (color:oklch(0% 0 0)){.btn{background-color:var(--btn-color,var(--fallback-b2));border-color:var(--btn-color,var(--fallback-b2))}}@media (hover:hover){.btn:hover{--tw-border-opacity:1;border-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn:hover{background-color:color-mix(in oklab,oklch(var(--btn-color,var(--b2)) / var(--tw-bg-opacity,1)) 90%,#000);border-color:color-mix(in oklab,oklch(var(--btn-color,var(--b2)) / var(--tw-border-opacity,1)) 90%,#000)}}@supports not (color:oklch(0% 0 0)){.btn:hover{background-color:var(--btn-color,var(--fallback-b2));border-color:var(--btn-color,var(--fallback-b2))}}}@supports (color:color-mix(in oklab,black,black)){.btn-active{background-color:color-mix(in oklab,oklch(var(--btn-color,var(--b3)) / var(--tw-bg-opacity,1)) 90%,#000);border-color:color-mix(in oklab,oklch(var(--btn-color,var(--b3)) / var(--tw-border-opacity,1)) 90%,#000)}}.btn:focus-visible{outline-style:solid;outline-width:2px;outline-offset:2px}.btn-primary{--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)));outline-color:var(--fallback-p,oklch(var(--p)/1))}@supports (color:oklch(0% 0 0)){.btn-primary{--btn-color:var(--p)}}@supports not (color:oklch(0% 0 0)){.btn-primary{--btn-color:var(--fallback-p)}}.btn-secondary{--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)));outline-color:var(--fallback-s,oklch(var(--s)/1))}@supports (color:oklch(0% 0 0)){.btn-secondary{--btn-color:var(--s)}}@supports not (color:oklch(0% 0 0)){.btn-secondary{--btn-color:var(--fallback-s)}}.btn-accent{--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)));outline-color:var(--fallback-a,oklch(var(--a)/1))}@supports (color:oklch(0% 0 0)){.btn-accent{--btn-color:var(--a)}}@supports not (color:oklch(0% 0 0)){.btn-accent{--btn-color:var(--fallback-a)}}.btn-neutral{--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)));outline-color:var(--fallback-n,oklch(var(--n)/1))}@supports (color:oklch(0% 0 0)){.btn-neutral{--btn-color:var(--n)}}@supports not (color:oklch(0% 0 0)){.btn-neutral{--btn-color:var(--fallback-n)}}.btn-info{--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)));outline-color:var(--fallback-in,oklch(var(--in)/1))}@supports (color:oklch(0% 0 0)){.btn-info{--btn-color:var(--in)}}@supports not (color:oklch(0% 0 0)){.btn-info{--btn-color:var(--fallback-in)}}.btn-success{--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)));outline-color:var(--fallback-su,oklch(var(--su)/1))}@supports (color:oklch(0% 0 0)){.btn-success{--btn-color:var(--su)}}@supports not (color:oklch(0% 0 0)){.btn-success{--btn-color:var(--fallback-su)}}.btn-warning{--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)));outline-color:var(--fallback-wa,oklch(var(--wa)/1))}@supports (color:oklch(0% 0 0)){.btn-warning{--btn-color:var(--wa)}}@supports not (color:oklch(0% 0 0)){.btn-warning{--btn-color:var(--fallback-wa)}}.btn-error{--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)));outline-color:var(--fallback-er,oklch(var(--er)/1))}@supports (color:oklch(0% 0 0)){.btn-error{--btn-color:var(--er)}}@supports not (color:oklch(0% 0 0)){.btn-error{--btn-color:var(--fallback-er)}}.btn.glass{--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);outline-color:currentColor}@media (hover:hover){.btn.glass:hover{--glass-opacity:25%;--glass-border-opacity:15%}}.btn.glass.btn-active{--glass-opacity:25%;--glass-border-opacity:15%}.btn-ghost{border-width:1px;border-color:transparent;background-color:transparent;color:currentColor;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);outline-color:currentColor}@media (hover:hover){.btn-ghost:hover{border-color:transparent}@supports (color:oklch(0% 0 0)){.btn-ghost:hover{background-color:var(--fallback-bc,oklch(var(--bc)/.2))}}}.btn-ghost.btn-active{border-color:transparent;background-color:var(--fallback-bc,oklch(var(--bc)/.2))}.btn-link{border-color:transparent;background-color:transparent;--tw-text-opacity:1;color:var(--fallback-p,oklch(var(--p)/var(--tw-text-opacity)));text-decoration-line:underline;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);outline-color:currentColor}@media (hover:hover){.btn-link:hover{border-color:transparent;background-color:transparent;text-decoration-line:underline}}.btn-link.btn-active{border-color:transparent;background-color:transparent;text-decoration-line:underline}.btn-outline{border-color:currentColor;background-color:transparent;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}@media (hover:hover){.btn-outline:hover{--tw-border-opacity:1;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-b1,oklch(var(--b1)/var(--tw-text-opacity)))}}.btn-outline.btn-active{--tw-border-opacity:1;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-b1,oklch(var(--b1)/var(--tw-text-opacity)))}.btn-outline.btn-primary{--tw-text-opacity:1;color:var(--fallback-p,oklch(var(--p)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-primary:hover{--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-primary:hover{background-color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 90%,#000)}}}.btn-outline.btn-primary.btn-active{--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-primary.btn-active{background-color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 90%,#000)}}.btn-outline.btn-secondary{--tw-text-opacity:1;color:var(--fallback-s,oklch(var(--s)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-secondary:hover{--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-secondary:hover{background-color:color-mix(in oklab,var(--fallback-s,oklch(var(--s)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-s,oklch(var(--s)/1)) 90%,#000)}}}.btn-outline.btn-secondary.btn-active{--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-secondary.btn-active{background-color:color-mix(in oklab,var(--fallback-s,oklch(var(--s)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-s,oklch(var(--s)/1)) 90%,#000)}}.btn-outline.btn-accent{--tw-text-opacity:1;color:var(--fallback-a,oklch(var(--a)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-accent:hover{--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-accent:hover{background-color:color-mix(in oklab,var(--fallback-a,oklch(var(--a)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-a,oklch(var(--a)/1)) 90%,#000)}}}.btn-outline.btn-accent.btn-active{--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-accent.btn-active{background-color:color-mix(in oklab,var(--fallback-a,oklch(var(--a)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-a,oklch(var(--a)/1)) 90%,#000)}}.btn-outline.btn-success{--tw-text-opacity:1;color:var(--fallback-su,oklch(var(--su)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-success:hover{--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-success:hover{background-color:color-mix(in oklab,var(--fallback-su,oklch(var(--su)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-su,oklch(var(--su)/1)) 90%,#000)}}}.btn-outline.btn-success.btn-active{--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-success.btn-active{background-color:color-mix(in oklab,var(--fallback-su,oklch(var(--su)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-su,oklch(var(--su)/1)) 90%,#000)}}.btn-outline.btn-info{--tw-text-opacity:1;color:var(--fallback-in,oklch(var(--in)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-info:hover{--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-info:hover{background-color:color-mix(in oklab,var(--fallback-in,oklch(var(--in)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-in,oklch(var(--in)/1)) 90%,#000)}}}.btn-outline.btn-info.btn-active{--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-info.btn-active{background-color:color-mix(in oklab,var(--fallback-in,oklch(var(--in)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-in,oklch(var(--in)/1)) 90%,#000)}}.btn-outline.btn-warning{--tw-text-opacity:1;color:var(--fallback-wa,oklch(var(--wa)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-warning:hover{--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-warning:hover{background-color:color-mix(in oklab,var(--fallback-wa,oklch(var(--wa)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-wa,oklch(var(--wa)/1)) 90%,#000)}}}.btn-outline.btn-warning.btn-active{--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-warning.btn-active{background-color:color-mix(in oklab,var(--fallback-wa,oklch(var(--wa)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-wa,oklch(var(--wa)/1)) 90%,#000)}}.btn-outline.btn-error{--tw-text-opacity:1;color:var(--fallback-er,oklch(var(--er)/var(--tw-text-opacity)))}@media (hover:hover){.btn-outline.btn-error:hover{--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-error:hover{background-color:color-mix(in oklab,var(--fallback-er,oklch(var(--er)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-er,oklch(var(--er)/1)) 90%,#000)}}}.btn-outline.btn-error.btn-active{--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}@supports (color:color-mix(in oklab,black,black)){.btn-outline.btn-error.btn-active{background-color:color-mix(in oklab,var(--fallback-er,oklch(var(--er)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-er,oklch(var(--er)/1)) 90%,#000)}}.btn.btn-disabled,.btn:disabled,.btn[disabled]{--tw-border-opacity:0;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-bg-opacity:0.2;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}@media (hover:hover){.btn-disabled:hover,.btn:disabled:hover,.btn[disabled]:hover{--tw-border-opacity:0;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-bg-opacity:0.2;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}}.btn:is(input[type=checkbox]:checked),.btn:is(input[type=radio]:checked){--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}@media (hover:hover){@supports (color:color-mix(in oklab,black,black)){.btn:is(input[type=checkbox]:checked):hover,.btn:is(input[type=radio]:checked):hover{background-color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 90%,#000);border-color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 90%,#000)}}}.btn:is(input[type=checkbox]:checked):focus-visible,.btn:is(input[type=radio]:checked):focus-visible{outline-color:var(--fallback-p,oklch(var(--p)/1))}@keyframes button-pop{0%{transform:scale(var(--btn-focus-scale,.98))}40%{transform:scale(1.02)}100%{transform:scale(1)}}.card{border-radius:var(--rounded-box,1rem)}.card :where(figure:first-child){overflow:hidden;border-start-start-radius:inherit;border-start-end-radius:inherit;border-end-start-radius:unset;border-end-end-radius:unset}.card :where(figure:last-child){overflow:hidden;border-start-start-radius:unset;border-start-end-radius:unset;border-end-start-radius:inherit;border-end-end-radius:inherit}.card:focus-visible{outline:2px solid currentColor;outline-offset:2px}.card.bordered{border-width:1px;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)))}.card-bordered{border-width:1px;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)))}.card.compact .card-body{padding:1rem;font-size:.875rem;line-height:1.25rem}.card-body{padding:var(--padding-card,2rem);display:flex;flex-direction:column;gap:.5rem}.card-title{display:flex;align-items:center;gap:.5rem;font-size:1.25rem;line-height:1.75rem;font-weight:600}.card.image-full:before{z-index:10;border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));opacity:.75}.card.image-full>.card-body{z-index:20;--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)))}.card.image-full :where(figure){overflow:hidden;border-radius:inherit}.carousel{-ms-overflow-style:none;scrollbar-width:none}.carousel::-webkit-scrollbar{display:none}.chat-bubble{border-radius:var(--rounded-box,1rem);min-height:2.75rem;min-width:2.75rem}.chat-bubble{--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)))}.chat-bubble-primary{--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.chat-bubble-secondary{--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.chat-bubble-accent{--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.chat-bubble-info{--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.chat-bubble-success{--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.chat-bubble-warning{--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.chat-bubble-error{--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.chat-start .chat-bubble{border-end-start-radius:0}.chat-start .chat-bubble:before{inset-inline-start:-.749rem}.chat-end .chat-bubble{border-end-end-radius:0}.chat-end .chat-bubble:before{inset-inline-start:99.9%}.checkbox{--chkbg:var(--fallback-bc,oklch(var(--bc)/1));--chkfg:var(--fallback-b1,oklch(var(--b1)/1));height:1.5rem;width:1.5rem;cursor:pointer;appearance:none;border-radius:var(--rounded-btn,.5rem);border-width:1px;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:0.2}.checkbox:focus{box-shadow:none}.checkbox:focus-visible{outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/1))}.checkbox:disabled{border-width:0}.checkbox:checked,.checkbox[aria-checked=true]{background-repeat:no-repeat;animation:checkmark var(--animation-input,.2s) ease-out;background-color:var(--chkbg);background-image:linear-gradient(-45deg,transparent 65%,var(--chkbg) 65.99%),linear-gradient(45deg,transparent 75%,var(--chkbg) 75.99%),linear-gradient(-45deg,var(--chkbg) 40%,transparent 40.99%),linear-gradient(45deg,var(--chkbg) 30%,var(--chkfg) 30.99%,var(--chkfg) 40%,transparent 40.99%),linear-gradient(-45deg,var(--chkfg) 50%,var(--chkbg) 50.99%)}.checkbox:indeterminate{--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));background-repeat:no-repeat;animation:checkmark var(--animation-input,.2s) ease-out;background-image:linear-gradient(90deg,transparent 80%,var(--chkbg) 80%),linear-gradient(-90deg,transparent 80%,var(--chkbg) 80%),linear-gradient(0deg,var(--chkbg) 43%,var(--chkfg) 43%,var(--chkfg) 57%,var(--chkbg) 57%)}.checkbox-primary{--chkbg:var(--fallback-p,oklch(var(--p)/1));--chkfg:var(--fallback-pc,oklch(var(--pc)/1));--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-primary:hover{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}}.checkbox-primary:focus-visible{outline-color:var(--fallback-p,oklch(var(--p)/1))}.checkbox-primary:checked,.checkbox-primary[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.checkbox-secondary{--chkbg:var(--fallback-s,oklch(var(--s)/1));--chkfg:var(--fallback-sc,oklch(var(--sc)/1));--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-secondary:hover{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}}.checkbox-secondary:focus-visible{outline-color:var(--fallback-s,oklch(var(--s)/1))}.checkbox-secondary:checked,.checkbox-secondary[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.checkbox-accent{--chkbg:var(--fallback-a,oklch(var(--a)/1));--chkfg:var(--fallback-ac,oklch(var(--ac)/1));--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-accent:hover{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}}.checkbox-accent:focus-visible{outline-color:var(--fallback-a,oklch(var(--a)/1))}.checkbox-accent:checked,.checkbox-accent[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.checkbox-success{--chkbg:var(--fallback-su,oklch(var(--su)/1));--chkfg:var(--fallback-suc,oklch(var(--suc)/1));--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-success:hover{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}}.checkbox-success:focus-visible{outline-color:var(--fallback-su,oklch(var(--su)/1))}.checkbox-success:checked,.checkbox-success[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.checkbox-warning{--chkbg:var(--fallback-wa,oklch(var(--wa)/1));--chkfg:var(--fallback-wac,oklch(var(--wac)/1));--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-warning:hover{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}}.checkbox-warning:focus-visible{outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.checkbox-warning:checked,.checkbox-warning[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.checkbox-info{--chkbg:var(--fallback-in,oklch(var(--in)/1));--chkfg:var(--fallback-inc,oklch(var(--inc)/1));--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-info:hover{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}}.checkbox-info:focus-visible{outline-color:var(--fallback-in,oklch(var(--in)/1))}.checkbox-info:checked,.checkbox-info[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.checkbox-error{--chkbg:var(--fallback-er,oklch(var(--er)/1));--chkfg:var(--fallback-erc,oklch(var(--erc)/1));--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}@media(hover:hover){.checkbox-error:hover{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}}.checkbox-error:focus-visible{outline-color:var(--fallback-er,oklch(var(--er)/1))}.checkbox-error:checked,.checkbox-error[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.checkbox:disabled{cursor:not-allowed;border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));opacity:.2}@keyframes checkmark{0%{background-position-y:5px}50%{background-position-y:-2px}100%{background-position-y:0}}.checkbox-mark{display:none}.collapse{width:100%;border-radius:var(--rounded-box,1rem)}details.collapse{width:100%}details.collapse summary{position:relative;display:block}details.collapse summary::-webkit-details-marker{display:none}.collapse:focus-visible{outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/1))}details.collapse summary{outline:2px solid transparent;outline-offset:2px}.collapse:has(.collapse-title:focus-visible),.collapse:has(>input[type=checkbox]:focus-visible),.collapse:has(>input[type=radio]:focus-visible){outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/1))}.collapse-arrow>.collapse-title:after{position:absolute;display:block;height:.5rem;width:.5rem;--tw-translate-y:-100%;--tw-rotate:45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:150ms;transition-timing-function:cubic-bezier(0,0,.2,1);transition-duration:.2s;top:1.9rem;inset-inline-end:1.4rem;content:"";transform-origin:75% 75%;box-shadow:2px 2px;pointer-events:none}.collapse-plus>.collapse-title:after{position:absolute;display:block;height:.5rem;width:.5rem;transition-property:all;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.3s;transition-timing-function:cubic-bezier(0,0,.2,1);top:.9rem;inset-inline-end:1.4rem;content:"+";pointer-events:none}.collapse:not(.collapse-open):not(.collapse-close)>.collapse-title,.collapse:not(.collapse-open):not(.collapse-close)>input[type=checkbox],.collapse:not(.collapse-open):not(.collapse-close)>input[type=radio]:not(:checked){cursor:pointer}.collapse:focus:not(.collapse-open):not(.collapse-close):not(.collapse[open])>.collapse-title{cursor:unset}.collapse-title{position:relative}:where(.collapse>input[type=checkbox]),:where(.collapse>input[type=radio]){z-index:1}.collapse-title,:where(.collapse>input[type=checkbox]),:where(.collapse>input[type=radio]){width:100%;padding:1rem;padding-inline-end:3rem;min-height:3.75rem;transition:background-color .2s ease-out}.collapse-content{padding-left:1rem;padding-right:1rem;cursor:unset;transition:padding .2s ease-out,background-color .2s ease-out}.collapse-open>:where(.collapse-content),.collapse:focus:not(.collapse-close)>:where(.collapse-content),.collapse:not(.collapse-close)>:where(input[type=checkbox]:checked~.collapse-content),.collapse:not(.collapse-close)>:where(input[type=radio]:checked~.collapse-content),.collapse[open]>:where(.collapse-content){padding-bottom:1rem;transition:padding .2s ease-out,background-color .2s ease-out}.collapse-arrow:focus:not(.collapse-close)>.collapse-title:after,.collapse-arrow:not(.collapse-close)>input[type=checkbox]:checked~.collapse-title:after,.collapse-arrow:not(.collapse-close)>input[type=radio]:checked~.collapse-title:after,.collapse-open.collapse-arrow>.collapse-title:after,.collapse[open].collapse-arrow>.collapse-title:after{--tw-translate-y:-50%;--tw-rotate:225deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.collapse-open.collapse-plus>.collapse-title:after,.collapse-plus:focus:not(.collapse-close)>.collapse-title:after,.collapse-plus:not(.collapse-close)>input[type=checkbox]:checked~.collapse-title:after,.collapse-plus:not(.collapse-close)>input[type=radio]:checked~.collapse-title:after,.collapse[open].collapse-plus>.collapse-title:after{content:"āˆ’"}.countdown>:before{text-align:center;transition:all 1s cubic-bezier(1,0,0,1)}.diff-item-1:after{border-radius:9999px;border-width:2px;--tw-border-opacity:1;border-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-border-opacity)));background-color:var(--fallback-b1,oklch(var(--b1)/.5));--tw-shadow:0 1px 2px 0 rgb(0 0 0 / 0.05);--tw-shadow-colored:0 1px 2px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);outline-style:solid;outline-offset:-3px;outline-color:var(--fallback-bc,oklch(var(--bc)/.05));--tw-backdrop-blur:blur(8px);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);translate:50% -50%}.diff-item-2{border-right-width:2px;--tw-border-opacity:1;border-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-border-opacity)))}.divider{margin-top:1rem;margin-bottom:1rem;height:1rem;white-space:nowrap}.divider:after,.divider:before{background-color:var(--fallback-bc,oklch(var(--bc)/.1))}.divider:not(:empty){gap:1rem}.divider-neutral:after,.divider-neutral:before{--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)))}.divider-primary:after,.divider-primary:before{--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)))}.divider-secondary:after,.divider-secondary:before{--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)))}.divider-accent:after,.divider-accent:before{--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)))}.divider-success:after,.divider-success:before{--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)))}.divider-warning:after,.divider-warning:before{--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)))}.divider-info:after,.divider-info:before{--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)))}.divider-error:after,.divider-error:before{--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)))}.drawer{width:100%}.drawer-side>.drawer-overlay{cursor:pointer;background-color:transparent;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1)}.drawer-toggle:checked~.drawer-side>.drawer-overlay{background-color:#0006}.drawer-toggle:focus-visible~.drawer-content label.drawer-button{outline-style:solid;outline-width:2px;outline-offset:2px}.dropdown:is(:not(details)) .dropdown-content{transform-origin:top;--tw-scale-x:.95;--tw-scale-y:.95;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1)}.dropdown-bottom .dropdown-content{transform-origin:top}.dropdown-top .dropdown-content{transform-origin:bottom}.dropdown-left .dropdown-content{transform-origin:right}.dropdown-right .dropdown-content{transform-origin:left}.dropdown.dropdown-open .dropdown-content,.dropdown:focus .dropdown-content,.dropdown:focus-within .dropdown-content{--tw-scale-x:1;--tw-scale-y:1;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}@media (hover:hover){.dropdown.dropdown-hover:hover .dropdown-content{--tw-scale-x:1;--tw-scale-y:1;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}}.file-input{overflow:hidden;border-radius:var(--rounded-btn,.5rem);border-width:1px;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:0;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));font-size:1rem;line-height:1.5rem}.file-input::file-selector-button{border-style:solid;--tw-border-opacity:1;border-color:var(--fallback-n,oklch(var(--n)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));font-weight:600;text-transform:uppercase;--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)));text-decoration-line:none;border-width:var(--border-btn,1px);animation:button-pop var(--animation-btn,.25s) ease-out}.file-input-bordered{--tw-border-opacity:0.2}.file-input:focus{outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/.2))}.file-input-ghost{--tw-bg-opacity:0.05}.file-input-ghost:focus{--tw-bg-opacity:1;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));box-shadow:none}.file-input-ghost::file-selector-button{border-width:1px;border-color:transparent;background-color:transparent;color:currentColor}.file-input-primary{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}.file-input-primary:focus{outline-color:var(--fallback-p,oklch(var(--p)/1))}.file-input-primary::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.file-input-secondary{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}.file-input-secondary:focus{outline-color:var(--fallback-s,oklch(var(--s)/1))}.file-input-secondary::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.file-input-accent{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}.file-input-accent:focus{outline-color:var(--fallback-a,oklch(var(--a)/1))}.file-input-accent::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.file-input-info{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}.file-input-info:focus{outline-color:var(--fallback-in,oklch(var(--in)/1))}.file-input-info::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.file-input-success{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}.file-input-success:focus{outline-color:var(--fallback-su,oklch(var(--su)/1))}.file-input-success::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.file-input-warning{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}.file-input-warning:focus{outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.file-input-warning::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.file-input-error{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}.file-input-error:focus{outline-color:var(--fallback-er,oklch(var(--er)/1))}.file-input-error::file-selector-button{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.file-input-disabled,.file-input[disabled]{cursor:not-allowed;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));--tw-text-opacity:0.2}.file-input-disabled::placeholder,.file-input[disabled]::placeholder{color:var(--fallback-bc,oklch(var(--bc)/var(--tw-placeholder-opacity)));--tw-placeholder-opacity:0.2}.file-input-disabled::file-selector-button,.file-input[disabled]::file-selector-button{--tw-border-opacity:0;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-bg-opacity:0.2;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}.footer{column-gap:1rem;row-gap:2.5rem;font-size:.875rem;line-height:1.25rem}.footer>*{gap:.5rem}.footer-title{margin-bottom:.5rem;font-weight:700;text-transform:uppercase;opacity:.6}.label{padding-left:.25rem;padding-right:.25rem;padding-top:.5rem;padding-bottom:.5rem}.label-text{font-size:.875rem;line-height:1.25rem;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}.label-text-alt{font-size:.75rem;line-height:1rem;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}@media(hover:hover){.label a:hover{--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}}.hero-overlay{background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-bg-opacity:0.5}.hero-content{max-width:80rem;gap:1rem;padding:1rem}.input{border-radius:var(--rounded-btn,.5rem);border-width:1px;border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));font-size:1rem;line-height:1.5rem}.input input{--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));background-color:transparent}.input input:focus{outline:2px solid transparent;outline-offset:2px}.input[list]::-webkit-calendar-picker-indicator{line-height:1em}.input-bordered{border-color:var(--fallback-bc,oklch(var(--bc)/.2))}.input:focus,.input:focus-within{box-shadow:none;border-color:var(--fallback-bc,oklch(var(--bc)/.2));outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/.2))}.input-ghost{--tw-bg-opacity:0.05}.input-ghost:focus,.input-ghost:focus-within{--tw-bg-opacity:1;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));box-shadow:none}.input-primary{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}.input-primary:focus,.input-primary:focus-within{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));outline-color:var(--fallback-p,oklch(var(--p)/1))}.input-secondary{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}.input-secondary:focus,.input-secondary:focus-within{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));outline-color:var(--fallback-s,oklch(var(--s)/1))}.input-accent{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}.input-accent:focus,.input-accent:focus-within{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));outline-color:var(--fallback-a,oklch(var(--a)/1))}.input-info{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}.input-info:focus,.input-info:focus-within{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));outline-color:var(--fallback-in,oklch(var(--in)/1))}.input-success{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}.input-success:focus,.input-success:focus-within{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));outline-color:var(--fallback-su,oklch(var(--su)/1))}.input-warning{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}.input-warning:focus,.input-warning:focus-within{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.input-error{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}.input-error:focus,.input-error:focus-within{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));outline-color:var(--fallback-er,oklch(var(--er)/1))}.input-disabled,.input:disabled,.input:has(>input[disabled]),.input[disabled]{cursor:not-allowed;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));color:var(--fallback-bc,oklch(var(--bc)/.4))}.input-disabled::placeholder,.input:disabled::placeholder,.input:has(>input[disabled])::placeholder,.input[disabled]::placeholder{color:var(--fallback-bc,oklch(var(--bc)/var(--tw-placeholder-opacity)));--tw-placeholder-opacity:0.2}.input:has(>input[disabled])>input[disabled]{cursor:not-allowed}.input::-webkit-date-and-time-value{text-align:inherit}.join{border-radius:var(--rounded-btn,.5rem)}.join>:where(:not(:first-child)){margin-top:0;margin-bottom:0;margin-inline-start:-1px}.join>:where(:not(:first-child)):is(.btn){margin-inline-start:calc(var(--border-btn) * -1)}.join-item:focus{isolation:isolate}.kbd{border-radius:var(--rounded-btn,.5rem);border-width:1px;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:0.2;--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));padding-left:.5rem;padding-right:.5rem;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));border-bottom-width:2px;min-height:2.2em;min-width:2.2em}.link-primary{--tw-text-opacity:1;color:var(--fallback-p,oklch(var(--p)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-primary:hover{color:color-mix(in oklab,var(--fallback-p,oklch(var(--p)/1)) 80%,#000)}}}.link-secondary{--tw-text-opacity:1;color:var(--fallback-s,oklch(var(--s)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-secondary:hover{color:color-mix(in oklab,var(--fallback-s,oklch(var(--s)/1)) 80%,#000)}}}.link-accent{--tw-text-opacity:1;color:var(--fallback-a,oklch(var(--a)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-accent:hover{color:color-mix(in oklab,var(--fallback-a,oklch(var(--a)/1)) 80%,#000)}}}.link-neutral{--tw-text-opacity:1;color:var(--fallback-n,oklch(var(--n)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-neutral:hover{color:color-mix(in oklab,var(--fallback-n,oklch(var(--n)/1)) 80%,#000)}}}.link-success{--tw-text-opacity:1;color:var(--fallback-su,oklch(var(--su)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-success:hover{color:color-mix(in oklab,var(--fallback-su,oklch(var(--su)/1)) 80%,#000)}}}.link-info{--tw-text-opacity:1;color:var(--fallback-in,oklch(var(--in)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-info:hover{color:color-mix(in oklab,var(--fallback-in,oklch(var(--in)/1)) 80%,#000)}}}.link-warning{--tw-text-opacity:1;color:var(--fallback-wa,oklch(var(--wa)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-warning:hover{color:color-mix(in oklab,var(--fallback-wa,oklch(var(--wa)/1)) 80%,#000)}}}.link-error{--tw-text-opacity:1;color:var(--fallback-er,oklch(var(--er)/var(--tw-text-opacity)))}@supports(color:color-mix(in oklab,black,black)){@media(hover:hover){.link-error:hover{color:color-mix(in oklab,var(--fallback-er,oklch(var(--er)/1)) 80%,#000)}}}.link:focus{outline:2px solid transparent;outline-offset:2px}.link:focus-visible{outline:2px solid currentColor;outline-offset:2px}.loading{pointer-events:none;display:inline-block;aspect-ratio:1/1;width:1.5rem;background-color:currentColor;mask-size:100%;mask-repeat:no-repeat;mask-position:center;mask-image:url("data:image/svg+xml,%3Csvg width='24' height='24' stroke='%23000' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cstyle%3E.spinner_V8m1%7Btransform-origin:center;animation:spinner_zKoa 2s linear infinite%7D.spinner_V8m1 circle%7Bstroke-linecap:round;animation:spinner_YpZS 1.5s ease-out infinite%7D%40keyframes spinner_zKoa%7B100%25%7Btransform:rotate(360deg)%7D%7D%40keyframes spinner_YpZS%7B0%25%7Bstroke-dasharray:0 150;stroke-dashoffset:0%7D47.5%25%7Bstroke-dasharray:42 150;stroke-dashoffset:-16%7D95%25%2C100%25%7Bstroke-dasharray:42 150;stroke-dashoffset:-59%7D%7D%3C%2Fstyle%3E%3Cg class='spinner_V8m1'%3E%3Ccircle cx='12' cy='12' r='9.5' fill='none' stroke-width='3'%3E%3C%2Fcircle%3E%3C%2Fg%3E%3C%2Fsvg%3E")}.loading-spinner{mask-image:url("data:image/svg+xml,%3Csvg width='24' height='24' stroke='%23000' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cstyle%3E.spinner_V8m1%7Btransform-origin:center;animation:spinner_zKoa 2s linear infinite%7D.spinner_V8m1 circle%7Bstroke-linecap:round;animation:spinner_YpZS 1.5s ease-out infinite%7D%40keyframes spinner_zKoa%7B100%25%7Btransform:rotate(360deg)%7D%7D%40keyframes spinner_YpZS%7B0%25%7Bstroke-dasharray:0 150;stroke-dashoffset:0%7D47.5%25%7Bstroke-dasharray:42 150;stroke-dashoffset:-16%7D95%25%2C100%25%7Bstroke-dasharray:42 150;stroke-dashoffset:-59%7D%7D%3C%2Fstyle%3E%3Cg class='spinner_V8m1'%3E%3Ccircle cx='12' cy='12' r='9.5' fill='none' stroke-width='3'%3E%3C%2Fcircle%3E%3C%2Fg%3E%3C%2Fsvg%3E")}.loading-dots{mask-image:url("data:image/svg+xml,%3Csvg width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cstyle%3E.spinner_qM83%7Banimation:spinner_8HQG 1.05s infinite%7D.spinner_oXPr%7Banimation-delay:.1s%7D.spinner_ZTLf%7Banimation-delay:.2s%7D@keyframes spinner_8HQG%7B0%25,57.14%25%7Banimation-timing-function:cubic-bezier(0.33,.66,.66,1);transform:translate(0)%7D28.57%25%7Banimation-timing-function:cubic-bezier(0.33,0,.66,.33);transform:translateY(-6px)%7D100%25%7Btransform:translate(0)%7D%7D%3C/style%3E%3Ccircle class='spinner_qM83' cx='4' cy='12' r='3'/%3E%3Ccircle class='spinner_qM83 spinner_oXPr' cx='12' cy='12' r='3'/%3E%3Ccircle class='spinner_qM83 spinner_ZTLf' cx='20' cy='12' r='3'/%3E%3C/svg%3E")}.loading-ring{mask-image:url("data:image/svg+xml,%3Csvg width='44' height='44' viewBox='0 0 44 44' xmlns='http://www.w3.org/2000/svg' stroke='%23fff'%3E%3Cg fill='none' fill-rule='evenodd' stroke-width='2'%3E%3Ccircle cx='22' cy='22' r='1'%3E%3Canimate attributeName='r' begin='0s' dur='1.8s' values='1; 20' calcMode='spline' keyTimes='0; 1' keySplines='0.165, 0.84, 0.44, 1' repeatCount='indefinite' /%3E%3Canimate attributeName='stroke-opacity' begin='0s' dur='1.8s' values='1; 0' calcMode='spline' keyTimes='0; 1' keySplines='0.3, 0.61, 0.355, 1' repeatCount='indefinite' /%3E%3C/circle%3E%3Ccircle cx='22' cy='22' r='1'%3E%3Canimate attributeName='r' begin='-0.9s' dur='1.8s' values='1; 20' calcMode='spline' keyTimes='0; 1' keySplines='0.165, 0.84, 0.44, 1' repeatCount='indefinite' /%3E%3Canimate attributeName='stroke-opacity' begin='-0.9s' dur='1.8s' values='1; 0' calcMode='spline' keyTimes='0; 1' keySplines='0.3, 0.61, 0.355, 1' repeatCount='indefinite' /%3E%3C/circle%3E%3C/g%3E%3C/svg%3E")}.loading-ball{mask-image:url("data:image/svg+xml,%0A%3Csvg width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cstyle%3E.spinner_rXNP%7Banimation:spinner_YeBj .8s infinite%7D@keyframes spinner_YeBj%7B0%25%7Banimation-timing-function:cubic-bezier(0.33,0,.66,.33);cy:5px%7D46.875%25%7Bcy:20px;rx:4px;ry:4px%7D50%25%7Banimation-timing-function:cubic-bezier(0.33,.66,.66,1);cy:20.5px;rx:4.8px;ry:3px%7D53.125%25%7Brx:4px;ry:4px%7D100%25%7Bcy:5px%7D%7D%3C/style%3E%3Cellipse class='spinner_rXNP' cx='12' cy='5' rx='4' ry='4'/%3E%3C/svg%3E")}.loading-bars{mask-image:url("data:image/svg+xml,%0A%3Csvg width='24' height='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'%3E%3Cstyle%3E.spinner_hzlK%7Banimation:spinner_vc4H .8s linear infinite;animation-delay:-.8s%7D.spinner_koGT%7Banimation-delay:-.65s%7D.spinner_YF1u%7Banimation-delay:-.5s%7D@keyframes spinner_vc4H%7B0%25%7By:1px;height:22px%7D93.75%25%7By:5px;height:14px;opacity:.2%7D%7D%3C/style%3E%3Crect class='spinner_hzlK' x='1' y='1' width='6' height='22'/%3E%3Crect class='spinner_hzlK spinner_koGT' x='9' y='1' width='6' height='22'/%3E%3Crect class='spinner_hzlK spinner_YF1u' x='17' y='1' width='6' height='22'/%3E%3C/svg%3E")}.loading-infinity{mask-image:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink' style='shape-rendering: auto;' width='200px' height='200px' viewBox='0 0 100 100' preserveAspectRatio='xMidYMid'%3E%3Cpath fill='none' stroke='%230a0a0a' stroke-width='10' stroke-dasharray='205.271142578125 51.317785644531256' d='M24.3 30C11.4 30 5 43.3 5 50s6.4 20 19.3 20c19.3 0 32.1-40 51.4-40 C88.6 30 95 43.3 95 50s-6.4 20-19.3 20C56.4 70 43.6 30 24.3 30z' stroke-linecap='round' style='transform:scale(0.8);transform-origin:50px 50px'%3E%3Canimate attributeName='stroke-dashoffset' repeatCount='indefinite' dur='2s' keyTimes='0;1' values='0;256.58892822265625'%3E%3C/animate%3E%3C/path%3E%3C/svg%3E")}.loading-xs{width:1rem}.loading-sm{width:1.25rem}.loading-md{width:1.5rem}.loading-lg{width:2.5rem}.mask-squircle{mask-image:url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M100 0C20 0 0 20 0 100s20 100 100 100 100-20 100-100S180 0 100 0Z'/%3e%3c/svg%3e")}.mask-decagon{mask-image:url("data:image/svg+xml,%3csvg width='192' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 0 58.779 19.098 36.327 50v61.804l-36.327 50L96 200l-58.779-19.098-36.327-50V69.098l36.327-50z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-diamond{mask-image:url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m100 0 100 100-100 100L0 100z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-heart{mask-image:url("data:image/svg+xml,%3csvg width='200' height='185' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M100 184.606a15.384 15.384 0 0 1-8.653-2.678C53.565 156.28 37.205 138.695 28.182 127.7 8.952 104.264-.254 80.202.005 54.146.308 24.287 24.264 0 53.406 0c21.192 0 35.869 11.937 44.416 21.879a2.884 2.884 0 0 0 4.356 0C110.725 11.927 125.402 0 146.594 0c29.142 0 53.098 24.287 53.4 54.151.26 26.061-8.956 50.122-28.176 73.554-9.023 10.994-25.383 28.58-63.165 54.228a15.384 15.384 0 0 1-8.653 2.673Z' fill='black' fill-rule='nonzero'/%3e%3c/svg%3e")}.mask-hexagon{mask-image:url("data:image/svg+xml,%3csvg width='182' height='201' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M.3 65.486c0-9.196 6.687-20.063 14.211-25.078l61.86-35.946c8.36-5.016 20.899-5.016 29.258 0l61.86 35.946c8.36 5.015 14.211 15.882 14.211 25.078v71.055c0 9.196-6.687 20.063-14.211 25.079l-61.86 35.945c-8.36 4.18-20.899 4.18-29.258 0L14.51 161.62C6.151 157.44.3 145.737.3 136.54V65.486Z' fill='black' fill-rule='nonzero'/%3e%3c/svg%3e")}.mask-hexagon-2{mask-image:url("data:image/svg+xml,%3csvg width='200' height='182' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M64.786 181.4c-9.196 0-20.063-6.687-25.079-14.21L3.762 105.33c-5.016-8.36-5.016-20.9 0-29.259l35.945-61.86C44.723 5.851 55.59 0 64.786 0h71.055c9.196 0 20.063 6.688 25.079 14.211l35.945 61.86c4.18 8.36 4.18 20.899 0 29.258l-35.945 61.86c-4.18 8.36-15.883 14.211-25.079 14.211H64.786Z' fill='black' fill-rule='nonzero'/%3e%3c/svg%3e")}.mask-circle{mask-image:url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle fill='black' cx='100' cy='100' r='100' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-parallelogram{mask-image:url("data:image/svg+xml,%3csvg width='200' height='154' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M46.154 0H200l-46.154 153.846H0z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-parallelogram-2{mask-image:url("data:image/svg+xml,%3csvg width='200' height='154' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M153.846 0H0l46.154 153.846H200z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-parallelogram-3{mask-image:url("data:image/svg+xml,%3csvg width='154' height='201' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M.077 47.077v153.846l153.846-46.154V.923z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-parallelogram-4{mask-image:url("data:image/svg+xml,%3csvg width='154' height='201' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M153.923 47.077v153.846L.077 154.77V.923z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-pentagon{mask-image:url("data:image/svg+xml,%3csvg width='192' height='181' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 0 95.106 69.098-36.327 111.804H37.22L.894 69.098z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-square{mask-image:url("data:image/svg+xml,%3csvg width='200' height='200' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M0 0h200v200H0z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-star{mask-image:url("data:image/svg+xml,%3csvg width='192' height='180' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 137.263-58.779 42.024 22.163-68.389L.894 68.481l72.476-.243L96 0l22.63 68.238 72.476.243-58.49 42.417 22.163 68.389z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-star-2{mask-image:url("data:image/svg+xml,%3csvg width='192' height='180' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m96 153.044-58.779 26.243 7.02-63.513L.894 68.481l63.117-13.01L96 0l31.989 55.472 63.117 13.01-43.347 47.292 7.02 63.513z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-triangle{mask-image:url("data:image/svg+xml,%3csvg width='174' height='149' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m87 148.476-86.603.185L43.86 74.423 87 0l43.14 74.423 43.463 74.238z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-triangle-2{mask-image:url("data:image/svg+xml,%3csvg width='174' height='150' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m87 .738 86.603-.184-43.463 74.238L87 149.214 43.86 74.792.397.554z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-triangle-3{mask-image:url("data:image/svg+xml,%3csvg width='150' height='174' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='m149.369 87.107.185 86.603-74.239-43.463L.893 87.107l74.422-43.14L149.554.505z' fill-rule='evenodd'/%3e%3c/svg%3e")}.mask-triangle-4{mask-image:url("data:image/svg+xml,%3csvg width='150' height='174' xmlns='http://www.w3.org/2000/svg'%3e%3cpath fill='black' d='M.631 87.107.446.505l74.239 43.462 74.422 43.14-74.422 43.14L.446 173.71z' fill-rule='evenodd'/%3e%3c/svg%3e")}.menu{padding:.5rem}:where(.menuli:empty){--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));opacity:.1;margin:.5rem 1rem;height:1px}.menu :where(liul){margin-inline-start:1rem;padding-inline-start:.5rem}.menu :where(liul):before{position:absolute;bottom:.75rem;inset-inline-start:0;top:.75rem;width:1px;--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));opacity:.1;content:""}.menu :where(li:not(.menu-title)>:not(ul,details,.menu-title,.btn)),.menu :where(li:not(.menu-title)>details>summary:not(.menu-title)){border-radius:var(--rounded-btn,.5rem);padding-left:1rem;padding-right:1rem;padding-top:.5rem;padding-bottom:.5rem;text-align:start;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1);text-wrap:balance}:where(.menuli:not(.menu-title,.disabled)>:not(ul,details,.menu-title)):is(summary):not(.active,.btn):focus-visible,:where(.menuli:not(.menu-title,.disabled)>:not(ul,details,.menu-title)):not(summary,.active,.btn).focus,:where(.menuli:not(.menu-title,.disabled)>:not(ul,details,.menu-title)):not(summary,.active,.btn):focus,:where(.menuli:not(.menu-title,.disabled)>details>summary:not(.menu-title)):is(summary):not(.active,.btn):focus-visible,:where(.menuli:not(.menu-title,.disabled)>details>summary:not(.menu-title)):not(summary,.active,.btn).focus,:where(.menuli:not(.menu-title,.disabled)>details>summary:not(.menu-title)):not(summary,.active,.btn):focus{cursor:pointer;background-color:var(--fallback-bc,oklch(var(--bc)/.1));--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));outline:2px solid transparent;outline-offset:2px}@media (hover:hover){:where(.menuli:not(.menu-title,.disabled)>:not(ul,details,.menu-title)):not(.active,.btn):hover,:where(.menuli:not(.menu-title,.disabled)>details>summary:not(.menu-title)):not(.active,.btn):hover{cursor:pointer;outline:2px solid transparent;outline-offset:2px}@supports (color:oklch(0% 0 0)){:where(.menuli:not(.menu-title,.disabled)>:not(ul,details,.menu-title)):not(.active,.btn):hover,:where(.menuli:not(.menu-title,.disabled)>details>summary:not(.menu-title)):not(.active,.btn):hover{background-color:var(--fallback-bc,oklch(var(--bc)/.1))}}}.menu li>:not(ul,.menu-title,details,.btn).active,.menu li>:not(ul,.menu-title,details,.btn):active,.menu li>details>summary:active{--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)))}@media(hover:hover){.menu li>:not(ul,.menu-title,details,.btn).active,.menu li>:not(ul,.menu-title,details,.btn):active,.menu li>details>summary:active{--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)))}}.menu li.disabled{color:var(--fallback-bc,oklch(var(--bc)/.3))}.menu :where(li>details>summary)::-webkit-details-marker{display:none}.menu :where(li>.menu-dropdown-toggle):after,.menu :where(li>details>summary):after{justify-self:end;display:block;margin-top:-.5rem;height:.5rem;width:.5rem;transform:rotate(45deg);transition-property:transform,margin-top;transition-duration:.3s;transition-timing-function:cubic-bezier(.4,0,.2,1);content:"";transform-origin:75% 75%;box-shadow:2px 2px;pointer-events:none}.menu :where(li>.menu-dropdown-toggle.menu-dropdown-show):after,.menu :where(li>details[open]>summary):after{transform:rotate(225deg);margin-top:0}.menu-title{padding-left:1rem;padding-right:1rem;padding-top:.5rem;padding-bottom:.5rem;font-size:.875rem;line-height:1.25rem;font-weight:700;color:var(--fallback-bc,oklch(var(--bc)/.4))}.mockup-code{min-width:18rem;border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));padding-top:1.25rem;padding-bottom:1.25rem;--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)));direction:ltr}.mockup-code:before{content:"";margin-bottom:1rem;display:block;height:.75rem;width:.75rem;border-radius:9999px;opacity:.3;box-shadow:1.4em 0,2.8em 0,4.2em 0}.mockup-code pre{padding-right:1.25rem}.mockup-code pre:before{content:"";margin-right:2ch}.mockup-code pre[data-prefix]:before{content:attr(data-prefix);width:2rem;opacity:.5}.mockup-window{display:flex;flex-direction:column;border-radius:var(--rounded-box,1rem);padding-top:1.25rem}.mockup-window:before{content:"";margin-bottom:1rem;display:block;aspect-ratio:1/1;height:.75rem;flex-shrink:0;align-self:flex-start;border-radius:9999px;opacity:.3}.mockup-window:where([dir=rtl],[dir=rtl]*):before{align-self:flex-end}.mockup-window:before{box-shadow:1.4em 0,2.8em 0,4.2em 0}.mockup-phone{display:inline-block;border:4px solid #444;border-radius:50px;background-color:#000;padding:10px;margin:0 auto;overflow:hidden}.mockup-phone .camera{position:relative;top:0;left:0;background:#000;height:25px;width:150px;margin:0 auto;border-bottom-left-radius:17px;border-bottom-right-radius:17px;z-index:11}.mockup-phone .camera:before{content:"";position:absolute;top:35%;left:50%;width:50px;height:4px;border-radius:5px;background-color:#0c0b0e;transform:translate(-50%,-50%)}.mockup-phone .camera:after{content:"";position:absolute;top:20%;left:70%;width:8px;height:8px;border-radius:5px;background-color:#0f0b25}.mockup-phone .display{overflow:hidden;border-radius:40px;margin-top:-25px}.mockup-browser{border-radius:var(--rounded-box,1rem)}.mockup-browser .mockup-browser-toolbar{margin-top:.75rem;margin-bottom:.75rem;display:inline-flex;width:100%;align-items:center;padding-right:1.4em}.mockup-browser .mockup-browser-toolbar:where([dir=rtl],[dir=rtl]*){flex-direction:row-reverse}.mockup-browser .mockup-browser-toolbar:before{content:"";margin-right:4.8rem;display:inline-block;aspect-ratio:1/1;height:.75rem;border-radius:9999px;opacity:.3;box-shadow:1.4em 0,2.8em 0,4.2em 0}.mockup-browser .mockup-browser-toolbar .input{position:relative;margin-left:auto;margin-right:auto;display:block;height:1.75rem;width:24rem;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));padding-left:2rem;direction:ltr}.mockup-browser .mockup-browser-toolbar .input:before{content:"";position:absolute;left:.5rem;top:50%;aspect-ratio:1/1;height:.75rem;--tw-translate-y:-50%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));border-radius:9999px;border-width:2px;border-color:currentColor;opacity:.6}.mockup-browser .mockup-browser-toolbar .input:after{content:"";position:absolute;left:1.25rem;top:50%;height:.5rem;--tw-translate-y:25%;--tw-rotate:-45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));border-radius:9999px;border-width:1px;border-color:currentColor;opacity:.6}.modal{background-color:transparent;color:inherit;transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1);transition-property:transform,opacity,visibility;overflow-y:hidden;overscroll-behavior:contain}.modal::backdrop,.modal:not(dialog:not(.modal-open)){background-color:#0006;animation:modal-pop .2s ease-out}.modal-backdrop{z-index:-1;grid-column-start:1;grid-row-start:1;display:grid;align-self:stretch;justify-self:stretch;color:transparent}.modal-box{grid-column-start:1;grid-row-start:1;width:91.666667%;max-width:32rem;--tw-scale-x:.9;--tw-scale-y:.9;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));border-bottom-right-radius:var(--rounded-box,1rem);border-bottom-left-radius:var(--rounded-box,1rem);border-top-left-radius:var(--rounded-box,1rem);border-top-right-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));padding:1.5rem;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.2s;transition-timing-function:cubic-bezier(0,0,.2,1);box-shadow:rgba(0,0,0,.25) 0 25px 50px -12px;overflow-y:auto;overscroll-behavior:contain}.modal-open .modal-box,.modal-toggle:checked+.modal .modal-box,.modal:target .modal-box,.modal[open] .modal-box{--tw-translate-y:0px;--tw-scale-x:1;--tw-scale-y:1;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.modal-action{margin-top:1.5rem;justify-content:flex-end}.modal-action>:not([hidden])~:not([hidden]){--tw-space-x-reverse:0;margin-right:calc(.5rem * var(--tw-space-x-reverse));margin-left:calc(.5rem * calc(1 - var(--tw-space-x-reverse)))}@keyframes modal-pop{0%{opacity:0}}.navbar{padding:var(--navbar-padding,.5rem);min-height:4rem;width:100%}.progress{height:.5rem;border-radius:var(--rounded-box,1rem);background-color:var(--fallback-bc,oklch(var(--bc)/.2))}.progress::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)))}.progress-primary::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)))}.progress-secondary::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)))}.progress-accent::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)))}.progress-info::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)))}.progress-success::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)))}.progress-warning::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)))}.progress-error::-moz-progress-bar{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)))}.progress:indeterminate{--progress-color:var(--fallback-bc,oklch(var(--bc)/1))}.progress-primary:indeterminate{--progress-color:var(--fallback-p,oklch(var(--p)/1))}.progress-secondary:indeterminate{--progress-color:var(--fallback-s,oklch(var(--s)/1))}.progress-accent:indeterminate{--progress-color:var(--fallback-a,oklch(var(--a)/1))}.progress-info:indeterminate{--progress-color:var(--fallback-in,oklch(var(--in)/1))}.progress-success:indeterminate{--progress-color:var(--fallback-su,oklch(var(--su)/1))}.progress-warning:indeterminate{--progress-color:var(--fallback-wa,oklch(var(--wa)/1))}.progress-error:indeterminate{--progress-color:var(--fallback-er,oklch(var(--er)/1))}.progress::-webkit-progress-bar{border-radius:var(--rounded-box,1rem);background-color:transparent}.progress::-webkit-progress-value{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)))}.progress-primary::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)))}.progress-secondary::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)))}.progress-accent::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)))}.progress-info::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)))}.progress-success::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)))}.progress-warning::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)))}.progress-error::-webkit-progress-value{--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)))}.progress:indeterminate{background-image:repeating-linear-gradient(90deg,var(--progress-color) -1%,var(--progress-color) 10%,transparent 10%,transparent 90%);background-size:200%;background-position-x:15%;animation:progress-loading 5s ease-in-out infinite}.progress:indeterminate::-moz-progress-bar{background-color:transparent;background-image:repeating-linear-gradient(90deg,var(--progress-color) -1%,var(--progress-color) 10%,transparent 10%,transparent 90%);background-size:200%;background-position-x:15%;animation:progress-loading 5s ease-in-out infinite}@keyframes progress-loading{50%{background-position-x:-115%}}.radial-progress{--value:0;--size:5rem;--thickness:calc(var(--size) / 10)}.radial-progress:after{background-color:currentColor}.radio{--chkbg:var(--bc);height:1.5rem;width:1.5rem;cursor:pointer;appearance:none;border-radius:9999px;border-width:1px;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:0.2}.radio:focus{box-shadow:none}.radio:focus-visible{outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/1))}.radio:checked,.radio[aria-checked=true]{--tw-bg-opacity:1;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));background-image:none;animation:radiomark var(--animation-input,.2s) ease-out;box-shadow:0 0 0 4px var(--fallback-b1,oklch(var(--b1)/1)) inset,0 0 0 4px var(--fallback-b1,oklch(var(--b1)/1)) inset}.radio-primary{--chkbg:var(--p);--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}@media(hover:hover){.radio-primary:hover{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}}.radio-primary:focus-visible{outline-color:var(--fallback-p,oklch(var(--p)/1))}.radio-primary:checked,.radio-primary[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.radio-secondary{--chkbg:var(--s);--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}@media(hover:hover){.radio-secondary:hover{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}}.radio-secondary:focus-visible{outline-color:var(--fallback-s,oklch(var(--s)/1))}.radio-secondary:checked,.radio-secondary[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.radio-accent{--chkbg:var(--a);--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}@media(hover:hover){.radio-accent:hover{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}}.radio-accent:focus-visible{outline-color:var(--fallback-a,oklch(var(--a)/1))}.radio-accent:checked,.radio-accent[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.radio-success{--chkbg:var(--su);--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}@media(hover:hover){.radio-success:hover{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}}.radio-success:focus-visible{outline-color:var(--fallback-su,oklch(var(--su)/1))}.radio-success:checked,.radio-success[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.radio-warning{--chkbg:var(--wa);--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}@media(hover:hover){.radio-warning:hover{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}}.radio-warning:focus-visible{outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.radio-warning:checked,.radio-warning[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.radio-info{--chkbg:var(--in);--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}@media(hover:hover){.radio-info:hover{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}}.radio-info:focus-visible{outline-color:var(--fallback-in,oklch(var(--in)/1))}.radio-info:checked,.radio-info[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.radio-error{--chkbg:var(--er);--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}@media(hover:hover){.radio-error:hover{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}}.radio-error:focus-visible{outline-color:var(--fallback-er,oklch(var(--er)/1))}.radio-error:checked,.radio-error[aria-checked=true]{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.radio:disabled{cursor:not-allowed;opacity:.2}@keyframes radiomark{0%{box-shadow:0 0 0 12px var(--fallback-b1,oklch(var(--b1)/1)) inset,0 0 0 12px var(--fallback-b1,oklch(var(--b1)/1)) inset}50%{box-shadow:0 0 0 3px var(--fallback-b1,oklch(var(--b1)/1)) inset,0 0 0 3px var(--fallback-b1,oklch(var(--b1)/1)) inset}100%{box-shadow:0 0 0 4px var(--fallback-b1,oklch(var(--b1)/1)) inset,0 0 0 4px var(--fallback-b1,oklch(var(--b1)/1)) inset}}.radio-mark{display:none}.range{appearance:none;-webkit-appearance:none;--range-shdw:var(--fallback-bc,oklch(var(--bc)/1));overflow:hidden;border-radius:var(--rounded-box,1rem);background-color:transparent}.range:focus-visible::-webkit-slider-thumb{--focus-shadow:0 0 0 6px var(--fallback-b1,oklch(var(--b1)/1)) inset,0 0 0 2rem var(--range-shdw) inset}.range:focus-visible::-moz-range-thumb{--focus-shadow:0 0 0 6px var(--fallback-b1,oklch(var(--b1)/1)) inset,0 0 0 2rem var(--range-shdw) inset}.range::-webkit-slider-runnable-track{height:.5rem;width:100%;border-radius:var(--rounded-box,1rem);background-color:var(--fallback-bc,oklch(var(--bc)/.1))}.range::-moz-range-track{height:.5rem;width:100%;border-radius:var(--rounded-box,1rem);background-color:var(--fallback-bc,oklch(var(--bc)/.1))}.range::-webkit-slider-thumb{position:relative;height:1.5rem;width:1.5rem;border-radius:var(--rounded-box,1rem);border-style:none;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));appearance:none;-webkit-appearance:none;top:50%;color:var(--range-shdw);transform:translateY(-50%);--filler-size:100rem;--filler-offset:0.6rem;box-shadow:0 0 0 3px var(--range-shdw) inset,var(--focus-shadow,0 0),calc(var(--filler-size) * -1 - var(--filler-offset)) 0 0 var(--filler-size)}.range::-moz-range-thumb{position:relative;height:1.5rem;width:1.5rem;border-radius:var(--rounded-box,1rem);border-style:none;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));top:50%;color:var(--range-shdw);--filler-size:100rem;--filler-offset:0.5rem;box-shadow:0 0 0 3px var(--range-shdw) inset,var(--focus-shadow,0 0),calc(var(--filler-size) * -1 - var(--filler-offset)) 0 0 var(--filler-size)}.range-primary{--range-shdw:var(--fallback-p,oklch(var(--p)/1))}.range-secondary{--range-shdw:var(--fallback-s,oklch(var(--s)/1))}.range-accent{--range-shdw:var(--fallback-a,oklch(var(--a)/1))}.range-success{--range-shdw:var(--fallback-su,oklch(var(--su)/1))}.range-warning{--range-shdw:var(--fallback-wa,oklch(var(--wa)/1))}.range-info{--range-shdw:var(--fallback-in,oklch(var(--in)/1))}.range-error{--range-shdw:var(--fallback-er,oklch(var(--er)/1))}.rating input{appearance:none;-webkit-appearance:none}.rating :where(input){animation:rating-pop var(--animation-input,.25s) ease-out;height:1.5rem;width:1.5rem;background-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-bg-opacity)));--tw-bg-opacity:1}.rating .rating-hidden{width:.5rem;background-color:transparent}.rating input[type=radio]:checked{background-image:none}.rating input:checked~input,.rating input[aria-checked=true]~input{--tw-bg-opacity:0.2}.rating input:focus-visible{transition-property:transform;transition-timing-function:cubic-bezier(.4,0,.2,1);transition-duration:.3s;transition-timing-function:cubic-bezier(0,0,.2,1);transform:translateY(-.125em)}.rating input:active:focus{animation:none;transform:translateY(-.125em)}.rating-half :where(input:not(.rating-hidden)){width:.75rem}@keyframes rating-pop{0%{transform:translateY(-.125em)}40%{transform:translateY(-.125em)}100%{transform:translateY(0)}}.select{border-radius:var(--rounded-btn,.5rem);border-width:1px;border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));padding-inline-end:2.5rem}.select-bordered{border-color:var(--fallback-bc,oklch(var(--bc)/.2))}.select{background-image:linear-gradient(45deg,transparent 50%,currentColor 50%),linear-gradient(135deg,currentColor 50%,transparent 50%);background-position:calc(100% - 20px) calc(1px + 50%),calc(100% - 16.1px) calc(1px + 50%);background-size:4px 4px,4px 4px;background-repeat:no-repeat}.select:focus{box-shadow:none;border-color:var(--fallback-bc,oklch(var(--bc)/.2));outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/.2))}.select-ghost{--tw-bg-opacity:0.05}.select-ghost:focus{--tw-bg-opacity:1;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}.select-primary{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}.select-primary:focus{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));outline-color:var(--fallback-p,oklch(var(--p)/1))}.select-secondary{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}.select-secondary:focus{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));outline-color:var(--fallback-s,oklch(var(--s)/1))}.select-accent{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}.select-accent:focus{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));outline-color:var(--fallback-a,oklch(var(--a)/1))}.select-info{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}.select-info:focus{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));outline-color:var(--fallback-in,oklch(var(--in)/1))}.select-success{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}.select-success:focus{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));outline-color:var(--fallback-su,oklch(var(--su)/1))}.select-warning{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}.select-warning:focus{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.select-error{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}.select-error:focus{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));outline-color:var(--fallback-er,oklch(var(--er)/1))}.select-disabled,.select:disabled,.select[disabled]{cursor:not-allowed;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));color:var(--fallback-bc,oklch(var(--bc)/.4))}.select-disabled::placeholder,.select:disabled::placeholder,.select[disabled]::placeholder{color:var(--fallback-bc,oklch(var(--bc)/var(--tw-placeholder-opacity)));--tw-placeholder-opacity:0.2}.select-multiple,.select[multiple],.select[size].select:not([size="1"]){background-image:none;padding-right:1rem}[dir=rtl] .select{background-position:calc(0% + 12px) calc(1px + 50%),calc(0% + 16px) calc(1px + 50%)}.skeleton{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)));will-change:background-position;animation:skeleton 1.8s ease-in-out infinite;background-image:linear-gradient(105deg,transparent 0,transparent 40%,var(--fallback-b1,oklch(var(--b1)/1)) 50%,transparent 60%,transparent 100%);background-size:200% auto;background-repeat:no-repeat;background-position-x:-50%}@media (prefers-reduced-motion){.skeleton{animation-duration:15s}}@keyframes skeleton{from{background-position:150%}to{background-position:-50%}}.stack{place-items:center;align-items:flex-end}.stack>*{width:100%;opacity:.6}.stack>:nth-child(2){opacity:.8}.stack>:nth-child(1){opacity:1}.stats{border-radius:var(--rounded-box,1rem);--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}:where(.stats)>:not([hidden])~:not([hidden]){--tw-divide-x-reverse:0;border-right-width:calc(1px * var(--tw-divide-x-reverse));border-left-width:calc(1px * calc(1 - var(--tw-divide-x-reverse)));--tw-divide-y-reverse:0;border-top-width:calc(0px * calc(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(0px * var(--tw-divide-y-reverse))}:where(.stats){overflow-x:auto}[dir=rtl] .stats>:not([hidden])~:not([hidden]){--tw-divide-x-reverse:1}.stat{column-gap:1rem;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:0.1;padding-left:1.5rem;padding-right:1.5rem;padding-top:1rem;padding-bottom:1rem}.stat-title{color:var(--fallback-bc,oklch(var(--bc)/.6))}.stat-value{font-size:2.25rem;line-height:2.5rem;font-weight:800}.stat-desc{font-size:.75rem;line-height:1rem;color:var(--fallback-bc,oklch(var(--bc)/.6))}.stat-actions{margin-top:1rem}.steps .step{grid-template-rows:40px 1fr;grid-template-columns:auto;min-width:4rem}.steps .step:before{top:0;grid-column-start:1;grid-row-start:1;height:.5rem;width:100%;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));content:"";margin-inline-start:-100%}.steps .step:after{content:counter(step);counter-increment:step;z-index:1;position:relative;grid-column-start:1;grid-row-start:1;display:grid;height:2rem;width:2rem;place-items:center;place-self:center;border-radius:9999px;--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}.steps .step:first-child:before{content:none}.steps .step[data-content]:after{content:attr(data-content)}.steps .step-neutral+.step-neutral:before,.steps .step-neutral:after{--tw-bg-opacity:1;background-color:var(--fallback-n,oklch(var(--n)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-nc,oklch(var(--nc)/var(--tw-text-opacity)))}.steps .step-primary+.step-primary:before,.steps .step-primary:after{--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.steps .step-secondary+.step-secondary:before,.steps .step-secondary:after{--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.steps .step-accent+.step-accent:before,.steps .step-accent:after{--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.steps .step-info+.step-info:before{--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)))}.steps .step-info:after{--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.steps .step-success+.step-success:before{--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)))}.steps .step-success:after{--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.steps .step-warning+.step-warning:before{--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)))}.steps .step-warning:after{--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.steps .step-error+.step-error:before{--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)))}.steps .step-error:after{--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.swap{cursor:pointer}.swap>*{transition-duration:.3s;transition-timing-function:cubic-bezier(0,0,.2,1);transition-property:transform,opacity}.swap-rotate .swap-indeterminate,.swap-rotate .swap-on,.swap-rotate input:indeterminate~.swap-on{--tw-rotate:45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.swap-active:where(.swap-rotate) .swap-off,.swap-rotate input:checked~.swap-off,.swap-rotate input:indeterminate~.swap-off{--tw-rotate:-45deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.swap-active:where(.swap-rotate) .swap-on,.swap-rotate input:checked~.swap-on,.swap-rotate input:indeterminate~.swap-indeterminate{--tw-rotate:0deg;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.swap-flip{transform-style:preserve-3d;perspective:16em}.swap-flip .swap-indeterminate,.swap-flip .swap-on,.swap-flip input:indeterminate~.swap-on{transform:rotateY(180deg);backface-visibility:hidden;opacity:1}.swap-active:where(.swap-flip) .swap-off,.swap-flip input:checked~.swap-off,.swap-flip input:indeterminate~.swap-off{transform:rotateY(-180deg);backface-visibility:hidden;opacity:1}.swap-active:where(.swap-flip) .swap-on,.swap-flip input:checked~.swap-on,.swap-flip input:indeterminate~.swap-indeterminate{transform:rotateY(0)}.tabs-lifted>.tab:focus-visible{border-end-end-radius:0;border-end-start-radius:0}.tab{--tw-text-opacity:0.5}@media(hover:hover){.tab:hover{--tw-text-opacity:1}}.tab{--tab-color:var(--fallback-bc,oklch(var(--bc)/1));--tab-bg:var(--fallback-b1,oklch(var(--b1)/1));--tab-border-color:var(--fallback-b3,oklch(var(--b3)/1));color:var(--tab-color);padding-inline-start:var(--tab-padding,1rem);padding-inline-end:var(--tab-padding,1rem)}.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]),.tab:is(input:checked){border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:1;--tw-text-opacity:1}.tab:focus{outline:2px solid transparent;outline-offset:2px}.tab:focus-visible{outline:2px solid currentColor;outline-offset:-5px}.tab-disabled,.tab[disabled]{cursor:not-allowed;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}@media (hover:hover){.tab[disabled],.tab[disabled]:hover{cursor:not-allowed;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));--tw-text-opacity:0.2}}.tabs-bordered>.tab{border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));--tw-border-opacity:0.2;border-style:solid;border-bottom-width:calc(var(--tab-border,1px) + 1px)}.tabs-lifted>.tab{border:var(--tab-border,1px) solid transparent;border-width:0 0 var(--tab-border,1px) 0;border-start-start-radius:var(--tab-radius,.5rem);border-start-end-radius:var(--tab-radius,.5rem);border-bottom-color:var(--tab-border-color);padding-inline-start:var(--tab-padding,1rem);padding-inline-end:var(--tab-padding,1rem);padding-top:var(--tab-border,1px)}.tabs-lifted>.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]),.tabs-lifted>.tab:is(input:checked){background-color:var(--tab-bg);border-width:var(--tab-border,1px) var(--tab-border,1px) 0 var(--tab-border,1px);border-inline-start-color:var(--tab-border-color);border-inline-end-color:var(--tab-border-color);border-top-color:var(--tab-border-color);padding-inline-start:calc(var(--tab-padding,1rem) - var(--tab-border,1px));padding-inline-end:calc(var(--tab-padding,1rem) - var(--tab-border,1px));padding-bottom:var(--tab-border,1px);padding-top:0}.tabs-lifted>.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):before,.tabs-lifted>.tab:is(input:checked):before{z-index:1;content:"";display:block;position:absolute;width:calc(100% + var(--tab-radius,.5rem) * 2);height:var(--tab-radius,.5rem);bottom:0;background-size:var(--tab-radius,.5rem);background-position:top left,top right;background-repeat:no-repeat;--tab-grad:calc(69% - var(--tab-border, 1px));--radius-start:radial-gradient( - circle at top left, - transparent var(--tab-grad), - var(--tab-border-color) calc(var(--tab-grad) + 0.25px), - var(--tab-border-color) calc(var(--tab-grad) + var(--tab-border, 1px)), - var(--tab-bg) calc(var(--tab-grad) + var(--tab-border, 1px) + 0.25px) - );--radius-end:radial-gradient( - circle at top right, - transparent var(--tab-grad), - var(--tab-border-color) calc(var(--tab-grad) + 0.25px), - var(--tab-border-color) calc(var(--tab-grad) + var(--tab-border, 1px)), - var(--tab-bg) calc(var(--tab-grad) + var(--tab-border, 1px) + 0.25px) - );background-image:var(--radius-start),var(--radius-end)}.tabs-lifted>.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):first-child:before,.tabs-lifted>.tab:is(input:checked):first-child:before{background-image:var(--radius-end);background-position:top right}[dir=rtl] .tabs-lifted>.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):first-child:before,[dir=rtl] .tabs-lifted>.tab:is(input:checked):first-child:before{background-image:var(--radius-start);background-position:top left}.tabs-lifted>.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):last-child:before,.tabs-lifted>.tab:is(input:checked):last-child:before{background-image:var(--radius-start);background-position:top left}[dir=rtl] .tabs-lifted>.tab:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):last-child:before,[dir=rtl] .tabs-lifted>.tab:is(input:checked):last-child:before{background-image:var(--radius-end);background-position:top right}.tabs-lifted>.tab:is(input:checked)+.tabs-lifted .tab:is(input:checked):before,.tabs-lifted>:is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled])+.tabs-lifted :is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):before{background-image:var(--radius-end);background-position:top right}.tabs-boxed{border-radius:var(--rounded-btn,.5rem);--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));padding:.25rem}.tabs-boxed .tab{border-radius:var(--rounded-btn,.5rem)}.tabs-boxed :is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]),.tabs-boxed :is(input:checked){--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}@media(hover:hover){.tabs-boxed :is(.tab-active,[aria-selected=true]):not(.tab-disabled):not([disabled]):hover,.tabs-boxed :is(input:checked):hover{--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}}.table{border-radius:var(--rounded-box,1rem);text-align:left;font-size:.875rem;line-height:1.25rem}.table:where([dir=rtl],[dir=rtl]*){text-align:right}.table :where(th,td){padding-left:1rem;padding-right:1rem;padding-top:.75rem;padding-bottom:.75rem;vertical-align:middle}.table tr.active,.table tr.active:nth-child(even),.table-zebra tbody tr:nth-child(even){--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)))}@media(hover:hover){.table tr.hover:hover,.table tr.hover:nth-child(even):hover{--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)))}}.table-zebra tr.active,.table-zebra tr.active:nth-child(even),.table-zebra-zebra tbody tr:nth-child(even){--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)))}@media(hover:hover){.table-zebra tr.hover:hover,.table-zebra tr.hover:nth-child(even):hover{--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)))}}.table :where(theadtr,tbodytr:not(:last-child),tbodytr:first-child:last-child){border-bottom-width:1px;--tw-border-opacity:1;border-bottom-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)))}.table :where(thead,tfoot){white-space:nowrap;font-size:.75rem;line-height:1rem;font-weight:700;color:var(--fallback-bc,oklch(var(--bc)/.6))}.table :where(tfoot){border-top-width:1px;--tw-border-opacity:1;border-top-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)))}.textarea{border-radius:var(--rounded-btn,.5rem);border-width:1px;border-color:transparent;--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)))}.textarea-bordered{border-color:var(--fallback-bc,oklch(var(--bc)/.2))}.textarea:focus{box-shadow:none;border-color:var(--fallback-bc,oklch(var(--bc)/.2));outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/.2))}.textarea-ghost{--tw-bg-opacity:0.05}.textarea-ghost:focus{--tw-bg-opacity:1;--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));box-shadow:none}.textarea-primary{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)))}.textarea-primary:focus{--tw-border-opacity:1;border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));outline-color:var(--fallback-p,oklch(var(--p)/1))}.textarea-secondary{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)))}.textarea-secondary:focus{--tw-border-opacity:1;border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));outline-color:var(--fallback-s,oklch(var(--s)/1))}.textarea-accent{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)))}.textarea-accent:focus{--tw-border-opacity:1;border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));outline-color:var(--fallback-a,oklch(var(--a)/1))}.textarea-info{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)))}.textarea-info:focus{--tw-border-opacity:1;border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));outline-color:var(--fallback-in,oklch(var(--in)/1))}.textarea-success{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)))}.textarea-success:focus{--tw-border-opacity:1;border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));outline-color:var(--fallback-su,oklch(var(--su)/1))}.textarea-warning{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)))}.textarea-warning:focus{--tw-border-opacity:1;border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.textarea-error{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)))}.textarea-error:focus{--tw-border-opacity:1;border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));outline-color:var(--fallback-er,oklch(var(--er)/1))}.textarea-disabled,.textarea:disabled,.textarea[disabled]{cursor:not-allowed;--tw-border-opacity:1;border-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b2,oklch(var(--b2)/var(--tw-bg-opacity)));color:var(--fallback-bc,oklch(var(--bc)/.4))}.textarea-disabled::placeholder,.textarea:disabled::placeholder,.textarea[disabled]::placeholder{color:var(--fallback-bc,oklch(var(--bc)/var(--tw-placeholder-opacity)));--tw-placeholder-opacity:0.2}.timeline hr{height:.25rem}:where(.timelinehr){--tw-bg-opacity:1;background-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-bg-opacity)))}:where(.timeline:has(.timeline-middle)hr):first-child{border-start-end-radius:var(--rounded-badge,1.9rem);border-end-end-radius:var(--rounded-badge,1.9rem);border-start-start-radius:0;border-end-start-radius:0}:where(.timeline:has(.timeline-middle)hr):last-child{border-start-start-radius:var(--rounded-badge,1.9rem);border-end-start-radius:var(--rounded-badge,1.9rem);border-start-end-radius:0;border-end-end-radius:0}:where(.timeline:not(:has(.timeline-middle)):first-childhr:last-child){border-start-start-radius:var(--rounded-badge,1.9rem);border-end-start-radius:var(--rounded-badge,1.9rem);border-start-end-radius:0;border-end-end-radius:0}:where(.timeline:not(:has(.timeline-middle)):last-childhr:first-child){border-start-end-radius:var(--rounded-badge,1.9rem);border-end-end-radius:var(--rounded-badge,1.9rem);border-start-start-radius:0;border-end-start-radius:0}.timeline-box{border-radius:var(--rounded-box,1rem);border-width:1px;--tw-border-opacity:1;border-color:var(--fallback-b3,oklch(var(--b3)/var(--tw-border-opacity)));--tw-bg-opacity:1;background-color:var(--fallback-b1,oklch(var(--b1)/var(--tw-bg-opacity)));padding-left:1rem;padding-right:1rem;padding-top:.5rem;padding-bottom:.5rem;--tw-shadow:0 1px 2px 0 rgb(0 0 0 / 0.05);--tw-shadow-colored:0 1px 2px 0 var(--tw-shadow-color);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.toast{gap:.5rem;padding:1rem}.toast>*{animation:toast-pop .25s ease-out}@keyframes toast-pop{0%{transform:scale(.9);opacity:0}100%{transform:scale(1);opacity:1}}.toggle{--tglbg:var(--fallback-b1,oklch(var(--b1)/1));--handleoffset:1.5rem;--handleoffsetcalculator:calc(var(--handleoffset) * -1);--togglehandleborder:0 0;height:1.5rem;width:3rem;cursor:pointer;appearance:none;border-radius:var(--rounded-badge,1.9rem);border-width:1px;border-color:currentColor;background-color:currentColor;color:var(--fallback-bc,oklch(var(--bc)/.5));transition:background,box-shadow var(--animation-input,.2s) ease-out;box-shadow:var(--handleoffsetcalculator) 0 0 2px var(--tglbg) inset,0 0 0 2px var(--tglbg) inset,var(--togglehandleborder)}[dir=rtl] .toggle{--handleoffsetcalculator:calc(var(--handleoffset) * 1)}.toggle:focus-visible{outline-style:solid;outline-width:2px;outline-offset:2px;outline-color:var(--fallback-bc,oklch(var(--bc)/.2))}.toggle:hover{background-color:currentColor}.toggle:checked,.toggle[aria-checked=true]{background-image:none;--handleoffsetcalculator:var(--handleoffset);--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)))}[dir=rtl] .toggle:checked,[dir=rtl] .toggle[aria-checked=true]{--handleoffsetcalculator:calc(var(--handleoffset) * -1)}.toggle:indeterminate{--tw-text-opacity:1;color:var(--fallback-bc,oklch(var(--bc)/var(--tw-text-opacity)));box-shadow:calc(var(--handleoffset)/ 2) 0 0 2px var(--tglbg) inset,calc(var(--handleoffset)/ -2) 0 0 2px var(--tglbg) inset,0 0 0 2px var(--tglbg) inset}[dir=rtl] .toggle:indeterminate{box-shadow:calc(var(--handleoffset)/ 2) 0 0 2px var(--tglbg) inset,calc(var(--handleoffset)/ -2) 0 0 2px var(--tglbg) inset,0 0 0 2px var(--tglbg) inset}.toggle-primary:focus-visible{outline-color:var(--fallback-p,oklch(var(--p)/1))}.toggle-primary:checked,.toggle-primary[aria-checked=true]{border-color:var(--fallback-p,oklch(var(--p)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-p,oklch(var(--p)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-pc,oklch(var(--pc)/var(--tw-text-opacity)))}.toggle-secondary:focus-visible{outline-color:var(--fallback-s,oklch(var(--s)/1))}.toggle-secondary:checked,.toggle-secondary[aria-checked=true]{border-color:var(--fallback-s,oklch(var(--s)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-s,oklch(var(--s)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-sc,oklch(var(--sc)/var(--tw-text-opacity)))}.toggle-accent:focus-visible{outline-color:var(--fallback-a,oklch(var(--a)/1))}.toggle-accent:checked,.toggle-accent[aria-checked=true]{border-color:var(--fallback-a,oklch(var(--a)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-a,oklch(var(--a)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-ac,oklch(var(--ac)/var(--tw-text-opacity)))}.toggle-success:focus-visible{outline-color:var(--fallback-su,oklch(var(--su)/1))}.toggle-success:checked,.toggle-success[aria-checked=true]{border-color:var(--fallback-su,oklch(var(--su)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-su,oklch(var(--su)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-suc,oklch(var(--suc)/var(--tw-text-opacity)))}.toggle-warning:focus-visible{outline-color:var(--fallback-wa,oklch(var(--wa)/1))}.toggle-warning:checked,.toggle-warning[aria-checked=true]{border-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-wa,oklch(var(--wa)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-wac,oklch(var(--wac)/var(--tw-text-opacity)))}.toggle-info:focus-visible{outline-color:var(--fallback-in,oklch(var(--in)/1))}.toggle-info:checked,.toggle-info[aria-checked=true]{border-color:var(--fallback-in,oklch(var(--in)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-in,oklch(var(--in)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-inc,oklch(var(--inc)/var(--tw-text-opacity)))}.toggle-error:focus-visible{outline-color:var(--fallback-er,oklch(var(--er)/1))}.toggle-error:checked,.toggle-error[aria-checked=true]{border-color:var(--fallback-er,oklch(var(--er)/var(--tw-border-opacity)));--tw-border-opacity:0.1;--tw-bg-opacity:1;background-color:var(--fallback-er,oklch(var(--er)/var(--tw-bg-opacity)));--tw-text-opacity:1;color:var(--fallback-erc,oklch(var(--erc)/var(--tw-text-opacity)))}.toggle:disabled{cursor:not-allowed;--tw-border-opacity:1;border-color:var(--fallback-bc,oklch(var(--bc)/var(--tw-border-opacity)));background-color:transparent;opacity:.3;--togglehandleborder:0 0 0 3px var(--fallback-bc,oklch(var(--bc)/1)) inset,var(--handleoffsetcalculator) 0 0 3px var(--fallback-bc,oklch(var(--bc)/1)) inset}.toggle-mark{display:none}:root .prose{--tw-prose-body:var(--fallback-bc,oklch(var(--bc)/0.8));--tw-prose-headings:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-lead:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-links:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-bold:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-counters:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-bullets:var(--fallback-bc,oklch(var(--bc)/0.5));--tw-prose-hr:var(--fallback-bc,oklch(var(--bc)/0.2));--tw-prose-quotes:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-quote-borders:var(--fallback-bc,oklch(var(--bc)/0.2));--tw-prose-captions:var(--fallback-bc,oklch(var(--bc)/0.5));--tw-prose-code:var(--fallback-bc,oklch(var(--bc)/1));--tw-prose-pre-code:var(--fallback-nc,oklch(var(--nc)/1));--tw-prose-pre-bg:var(--fallback-n,oklch(var(--n)/1));--tw-prose-th-borders:var(--fallback-bc,oklch(var(--bc)/0.5));--tw-prose-td-borders:var(--fallback-bc,oklch(var(--bc)/0.2))}.prose :where(code):not(:where([class~=not-prose]*,pre*)){padding:1px 8px;border-radius:var(--rounded-badge);font-weight:initial;background-color:var(--fallback-bc,oklch(var(--bc)/.1))}@supports not (color:oklch(0% 0 0)){.prose :where(code):not(:where([class~=not-prose]*,pre*)){background-color:var(--fallback-b3,oklch(var(--b3)/1))}}.prose :where(code):not(:where([class~=not-prose],[class~=not-prose]*))::after,.prose :where(code):not(:where([class~=not-prose],[class~=not-prose]*))::before{display:none}.prose pre code{border-radius:0;padding:0}.prose :where(tbodytr,thead):not(:where([class~=not-prose]*)){border-bottom-color:var(--fallback-bc,oklch(var(--bc)/.2))}:root{color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:89.824% 0.06192 275.75;--ac:15.352% 0.0368 183.61;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:49.12% 0.3096 275.75;--s:69.71% 0.329 342.55;--sc:98.71% 0.0106 342.55;--a:76.76% 0.184 183.61;--n:32.1785% 0.02476 255.701624;--nc:89.4994% 0.011585 252.096176;--b1:100% 0 0;--b2:96.1151% 0 0;--b3:92.4169% 0.00108 197.137559;--bc:27.8078% 0.029596 256.847952}@media (prefers-color-scheme:dark){:root{color-scheme:dark;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:13.138% 0.0392 275.75;--sc:14.96% 0.052 342.55;--ac:14.902% 0.0334 183.61;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:65.69% 0.196 275.75;--s:74.8% 0.26 342.55;--a:74.51% 0.167 183.61;--n:31.3815% 0.021108 254.139175;--nc:74.6477% 0.0216 264.435964;--b1:25.3267% 0.015896 252.417568;--b2:23.2607% 0.013807 253.100675;--b3:21.1484% 0.01165 254.087939;--bc:74.6477% 0.0216 264.435964}}[data-theme=light]{color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:89.824% 0.06192 275.75;--ac:15.352% 0.0368 183.61;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:49.12% 0.3096 275.75;--s:69.71% 0.329 342.55;--sc:98.71% 0.0106 342.55;--a:76.76% 0.184 183.61;--n:32.1785% 0.02476 255.701624;--nc:89.4994% 0.011585 252.096176;--b1:100% 0 0;--b2:96.1151% 0 0;--b3:92.4169% 0.00108 197.137559;--bc:27.8078% 0.029596 256.847952}:root:has(input.theme-controller[value=light]:checked){color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:89.824% 0.06192 275.75;--ac:15.352% 0.0368 183.61;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:49.12% 0.3096 275.75;--s:69.71% 0.329 342.55;--sc:98.71% 0.0106 342.55;--a:76.76% 0.184 183.61;--n:32.1785% 0.02476 255.701624;--nc:89.4994% 0.011585 252.096176;--b1:100% 0 0;--b2:96.1151% 0 0;--b3:92.4169% 0.00108 197.137559;--bc:27.8078% 0.029596 256.847952}[data-theme=dark]{color-scheme:dark;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:13.138% 0.0392 275.75;--sc:14.96% 0.052 342.55;--ac:14.902% 0.0334 183.61;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:65.69% 0.196 275.75;--s:74.8% 0.26 342.55;--a:74.51% 0.167 183.61;--n:31.3815% 0.021108 254.139175;--nc:74.6477% 0.0216 264.435964;--b1:25.3267% 0.015896 252.417568;--b2:23.2607% 0.013807 253.100675;--b3:21.1484% 0.01165 254.087939;--bc:74.6477% 0.0216 264.435964}:root:has(input.theme-controller[value=dark]:checked){color-scheme:dark;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:13.138% 0.0392 275.75;--sc:14.96% 0.052 342.55;--ac:14.902% 0.0334 183.61;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:65.69% 0.196 275.75;--s:74.8% 0.26 342.55;--a:74.51% 0.167 183.61;--n:31.3815% 0.021108 254.139175;--nc:74.6477% 0.0216 264.435964;--b1:25.3267% 0.015896 252.417568;--b2:23.2607% 0.013807 253.100675;--b3:21.1484% 0.01165 254.087939;--bc:74.6477% 0.0216 264.435964}[data-theme=cupcake]{color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:15.2344% 0.017892 200.026556;--sc:15.787% 0.020249 356.29965;--ac:15.8762% 0.029206 78.618794;--nc:84.7148% 0.013247 313.189598;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--p:76.172% 0.089459 200.026556;--s:78.9351% 0.101246 356.29965;--a:79.3811% 0.146032 78.618794;--n:23.5742% 0.066235 313.189598;--b1:97.7882% 0.00418 56.375637;--b2:93.9822% 0.007638 61.449292;--b3:91.5861% 0.006811 53.440502;--bc:23.5742% 0.066235 313.189598;--rounded-btn:1.9rem;--tab-border:2px;--tab-radius:0.7rem}:root:has(input.theme-controller[value=cupcake]:checked){color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:15.2344% 0.017892 200.026556;--sc:15.787% 0.020249 356.29965;--ac:15.8762% 0.029206 78.618794;--nc:84.7148% 0.013247 313.189598;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--p:76.172% 0.089459 200.026556;--s:78.9351% 0.101246 356.29965;--a:79.3811% 0.146032 78.618794;--n:23.5742% 0.066235 313.189598;--b1:97.7882% 0.00418 56.375637;--b2:93.9822% 0.007638 61.449292;--b3:91.5861% 0.006811 53.440502;--bc:23.5742% 0.066235 313.189598;--rounded-btn:1.9rem;--tab-border:2px;--tab-radius:0.7rem}[data-theme=bumblebee]{color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:20% 0 0;--ac:16.254% 0.0314 56.52;--nc:82.55% 0.015 281.99;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:89.51% 0.2132 96.61;--pc:38.92% 0.046 96.61;--s:80.39% 0.194 70.76;--sc:39.38% 0.068 70.76;--a:81.27% 0.157 56.52;--n:12.75% 0.075 281.99;--b1:100% 0 0}:root:has(input.theme-controller[value=bumblebee]:checked){color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:20% 0 0;--ac:16.254% 0.0314 56.52;--nc:82.55% 0.015 281.99;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:89.51% 0.2132 96.61;--pc:38.92% 0.046 96.61;--s:80.39% 0.194 70.76;--sc:39.38% 0.068 70.76;--a:81.27% 0.157 56.52;--n:12.75% 0.075 281.99;--b1:100% 0 0}[data-theme=emerald]{color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:76.6626% 0.135433 153.450024;--pc:33.3872% 0.040618 162.240129;--s:61.3028% 0.202368 261.294233;--sc:100% 0 0;--a:72.7725% 0.149783 33.200363;--ac:0% 0 0;--n:35.5192% 0.032071 262.988584;--nc:98.4625% 0.001706 247.838921;--b1:100% 0 0;--bc:35.5192% 0.032071 262.988584;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}:root:has(input.theme-controller[value=emerald]:checked){color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:76.6626% 0.135433 153.450024;--pc:33.3872% 0.040618 162.240129;--s:61.3028% 0.202368 261.294233;--sc:100% 0 0;--a:72.7725% 0.149783 33.200363;--ac:0% 0 0;--n:35.5192% 0.032071 262.988584;--nc:98.4625% 0.001706 247.838921;--b1:100% 0 0;--bc:35.5192% 0.032071 262.988584;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}[data-theme=corporate]{color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:12.078% 0.0456 269.1;--sc:13.0739% 0.010951 256.688055;--ac:15.3934% 0.022799 163.57888;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--border-btn:1px;--tab-border:1px;--p:60.39% 0.228 269.1;--s:65.3694% 0.054756 256.688055;--a:76.9669% 0.113994 163.57888;--n:22.3899% 0.031305 278.07229;--nc:95.8796% 0.008588 247.915135;--b1:100% 0 0;--bc:22.3899% 0.031305 278.07229;--rounded-box:0.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem;--tab-radius:0.25rem;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}:root:has(input.theme-controller[value=corporate]:checked){color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:12.078% 0.0456 269.1;--sc:13.0739% 0.010951 256.688055;--ac:15.3934% 0.022799 163.57888;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--border-btn:1px;--tab-border:1px;--p:60.39% 0.228 269.1;--s:65.3694% 0.054756 256.688055;--a:76.9669% 0.113994 163.57888;--n:22.3899% 0.031305 278.07229;--nc:95.8796% 0.008588 247.915135;--b1:100% 0 0;--bc:22.3899% 0.031305 278.07229;--rounded-box:0.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem;--tab-radius:0.25rem;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}[data-theme=synthwave]{color-scheme:dark;--b2:20.2941% 0.076211 287.835609;--b3:18.7665% 0.070475 287.835609;--pc:14.4421% 0.031903 342.009383;--sc:15.6543% 0.02362 227.382405;--ac:17.608% 0.0412 93.72;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:72.2105% 0.159514 342.009383;--s:78.2714% 0.118101 227.382405;--a:88.04% 0.206 93.72;--n:25.5554% 0.103537 286.507967;--nc:97.9365% 0.00819 301.358346;--b1:21.8216% 0.081948 287.835609;--bc:97.9365% 0.00819 301.358346;--in:76.5197% 0.12273 231.831603;--inc:23.5017% 0.096418 290.329844;--su:86.0572% 0.115038 178.624677;--suc:23.5017% 0.096418 290.329844;--wa:85.531% 0.122117 93.722227;--wac:23.5017% 0.096418 290.329844;--er:73.7005% 0.121339 32.639257;--erc:23.5017% 0.096418 290.329844}:root:has(input.theme-controller[value=synthwave]:checked){color-scheme:dark;--b2:20.2941% 0.076211 287.835609;--b3:18.7665% 0.070475 287.835609;--pc:14.4421% 0.031903 342.009383;--sc:15.6543% 0.02362 227.382405;--ac:17.608% 0.0412 93.72;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:72.2105% 0.159514 342.009383;--s:78.2714% 0.118101 227.382405;--a:88.04% 0.206 93.72;--n:25.5554% 0.103537 286.507967;--nc:97.9365% 0.00819 301.358346;--b1:21.8216% 0.081948 287.835609;--bc:97.9365% 0.00819 301.358346;--in:76.5197% 0.12273 231.831603;--inc:23.5017% 0.096418 290.329844;--su:86.0572% 0.115038 178.624677;--suc:23.5017% 0.096418 290.329844;--wa:85.531% 0.122117 93.722227;--wac:23.5017% 0.096418 290.329844;--er:73.7005% 0.121339 32.639257;--erc:23.5017% 0.096418 290.329844}[data-theme=retro]{color-scheme:light;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:13.144% 0.0398 27.33;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:76.8664% 0.104092 22.664655;--pc:26.5104% 0.006243 0.522862;--s:80.7415% 0.052534 159.094608;--sc:26.5104% 0.006243 0.522862;--a:70.3919% 0.125455 52.953428;--ac:26.5104% 0.006243 0.522862;--n:28.4181% 0.009519 355.534017;--nc:92.5604% 0.025113 89.217311;--b1:91.6374% 0.034554 90.51575;--b2:88.2722% 0.049418 91.774344;--b3:84.133% 0.065952 90.856665;--bc:26.5104% 0.006243 0.522862;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:65.72% 0.199 27.33;--rounded-box:0.4rem;--rounded-btn:0.4rem;--rounded-badge:0.4rem;--tab-radius:0.4rem}:root:has(input.theme-controller[value=retro]:checked){color-scheme:light;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:13.144% 0.0398 27.33;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:76.8664% 0.104092 22.664655;--pc:26.5104% 0.006243 0.522862;--s:80.7415% 0.052534 159.094608;--sc:26.5104% 0.006243 0.522862;--a:70.3919% 0.125455 52.953428;--ac:26.5104% 0.006243 0.522862;--n:28.4181% 0.009519 355.534017;--nc:92.5604% 0.025113 89.217311;--b1:91.6374% 0.034554 90.51575;--b2:88.2722% 0.049418 91.774344;--b3:84.133% 0.065952 90.856665;--bc:26.5104% 0.006243 0.522862;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:65.72% 0.199 27.33;--rounded-box:0.4rem;--rounded-btn:0.4rem;--rounded-badge:0.4rem;--tab-radius:0.4rem}[data-theme=cyberpunk]{color-scheme:light;--b2:87.8943% 0.16647 104.32;--b3:81.2786% 0.15394 104.32;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:18.902% 0.0358 104.32;--pc:14.844% 0.0418 6.35;--sc:16.666% 0.0368 204.72;--ac:14.372% 0.04352 310.43;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;--p:74.22% 0.209 6.35;--s:83.33% 0.184 204.72;--a:71.86% 0.2176 310.43;--n:23.04% 0.065 269.31;--nc:94.51% 0.179 104.32;--b1:94.51% 0.179 104.32;--rounded-box:0;--rounded-btn:0;--rounded-badge:0;--tab-radius:0}:root:has(input.theme-controller[value=cyberpunk]:checked){color-scheme:light;--b2:87.8943% 0.16647 104.32;--b3:81.2786% 0.15394 104.32;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:18.902% 0.0358 104.32;--pc:14.844% 0.0418 6.35;--sc:16.666% 0.0368 204.72;--ac:14.372% 0.04352 310.43;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;--p:74.22% 0.209 6.35;--s:83.33% 0.184 204.72;--a:71.86% 0.2176 310.43;--n:23.04% 0.065 269.31;--nc:94.51% 0.179 104.32;--b1:94.51% 0.179 104.32;--rounded-box:0;--rounded-btn:0;--rounded-badge:0;--tab-radius:0}[data-theme=valentine]{color-scheme:light;--b2:88.0567% 0.024834 337.06289;--b3:81.4288% 0.022964 337.06289;--pc:13.7239% 0.030755 15.066527;--sc:14.3942% 0.029258 293.189609;--ac:14.2537% 0.014961 197.828857;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:14.614% 0.0414 27.33;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:68.6197% 0.153774 15.066527;--s:71.971% 0.14629 293.189609;--a:71.2685% 0.074804 197.828857;--n:54.6053% 0.143342 358.004839;--nc:90.2701% 0.037202 336.955191;--b1:94.6846% 0.026703 337.06289;--bc:37.3085% 0.081131 4.606426;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:73.07% 0.207 27.33;--rounded-btn:1.9rem;--tab-radius:0.7rem}:root:has(input.theme-controller[value=valentine]:checked){color-scheme:light;--b2:88.0567% 0.024834 337.06289;--b3:81.4288% 0.022964 337.06289;--pc:13.7239% 0.030755 15.066527;--sc:14.3942% 0.029258 293.189609;--ac:14.2537% 0.014961 197.828857;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:14.614% 0.0414 27.33;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:68.6197% 0.153774 15.066527;--s:71.971% 0.14629 293.189609;--a:71.2685% 0.074804 197.828857;--n:54.6053% 0.143342 358.004839;--nc:90.2701% 0.037202 336.955191;--b1:94.6846% 0.026703 337.06289;--bc:37.3085% 0.081131 4.606426;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:73.07% 0.207 27.33;--rounded-btn:1.9rem;--tab-radius:0.7rem}[data-theme=halloween]{color-scheme:dark;--b2:23.0416% 0 0;--b3:21.3072% 0 0;--bc:84.9552% 0 0;--sc:89.196% 0.0496 305.03;--nc:84.8742% 0.009322 65.681484;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:13.144% 0.0398 27.33;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:77.48% 0.204 60.62;--pc:19.6935% 0.004671 196.779412;--s:45.98% 0.248 305.03;--a:64.8% 0.223 136.073479;--ac:0% 0 0;--n:24.371% 0.046608 65.681484;--b1:24.7759% 0 0;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:65.72% 0.199 27.33}:root:has(input.theme-controller[value=halloween]:checked){color-scheme:dark;--b2:23.0416% 0 0;--b3:21.3072% 0 0;--bc:84.9552% 0 0;--sc:89.196% 0.0496 305.03;--nc:84.8742% 0.009322 65.681484;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:13.144% 0.0398 27.33;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:77.48% 0.204 60.62;--pc:19.6935% 0.004671 196.779412;--s:45.98% 0.248 305.03;--a:64.8% 0.223 136.073479;--ac:0% 0 0;--n:24.371% 0.046608 65.681484;--b1:24.7759% 0 0;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:65.72% 0.199 27.33}[data-theme=garden]{color-scheme:light;--b2:86.4453% 0.002011 17.197414;--b3:79.9386% 0.00186 17.197414;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--sc:89.699% 0.022197 355.095988;--ac:11.2547% 0.010859 154.390187;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:62.45% 0.278 3.83636;--pc:100% 0 0;--s:48.4952% 0.110985 355.095988;--a:56.2735% 0.054297 154.390187;--n:24.1559% 0.049362 89.070594;--nc:92.9519% 0.002163 17.197414;--b1:92.9519% 0.002163 17.197414;--bc:16.9617% 0.001664 17.32068}:root:has(input.theme-controller[value=garden]:checked){color-scheme:light;--b2:86.4453% 0.002011 17.197414;--b3:79.9386% 0.00186 17.197414;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--sc:89.699% 0.022197 355.095988;--ac:11.2547% 0.010859 154.390187;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:62.45% 0.278 3.83636;--pc:100% 0 0;--s:48.4952% 0.110985 355.095988;--a:56.2735% 0.054297 154.390187;--n:24.1559% 0.049362 89.070594;--nc:92.9519% 0.002163 17.197414;--b1:92.9519% 0.002163 17.197414;--bc:16.9617% 0.001664 17.32068}[data-theme=forest]{color-scheme:dark;--b2:17.522% 0.007709 17.911578;--b3:16.2032% 0.007129 17.911578;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:83.7682% 0.001658 17.911578;--sc:13.9553% 0.027077 168.327128;--ac:14.1257% 0.02389 185.713193;--nc:86.1397% 0.007806 171.364646;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:68.6283% 0.185567 148.958922;--pc:0% 0 0;--s:69.7764% 0.135385 168.327128;--a:70.6285% 0.119451 185.713193;--n:30.6985% 0.039032 171.364646;--b1:18.8409% 0.00829 17.911578;--rounded-btn:1.9rem}:root:has(input.theme-controller[value=forest]:checked){color-scheme:dark;--b2:17.522% 0.007709 17.911578;--b3:16.2032% 0.007129 17.911578;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:83.7682% 0.001658 17.911578;--sc:13.9553% 0.027077 168.327128;--ac:14.1257% 0.02389 185.713193;--nc:86.1397% 0.007806 171.364646;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:68.6283% 0.185567 148.958922;--pc:0% 0 0;--s:69.7764% 0.135385 168.327128;--a:70.6285% 0.119451 185.713193;--n:30.6985% 0.039032 171.364646;--b1:18.8409% 0.00829 17.911578;--rounded-btn:1.9rem}[data-theme=aqua]{color-scheme:dark;--b2:45.3464% 0.118611 261.181672;--b3:41.9333% 0.109683 261.181672;--bc:89.7519% 0.025508 261.181672;--sc:12.1365% 0.02175 309.782946;--ac:18.6854% 0.020445 94.555431;--nc:12.2124% 0.023402 243.760661;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:14.79% 0.038 27.33;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:85.6617% 0.14498 198.6458;--pc:40.1249% 0.068266 197.603872;--s:60.6827% 0.108752 309.782946;--a:93.4269% 0.102225 94.555431;--n:61.0622% 0.117009 243.760661;--b1:48.7596% 0.127539 261.181672;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:73.95% 0.19 27.33}:root:has(input.theme-controller[value=aqua]:checked){color-scheme:dark;--b2:45.3464% 0.118611 261.181672;--b3:41.9333% 0.109683 261.181672;--bc:89.7519% 0.025508 261.181672;--sc:12.1365% 0.02175 309.782946;--ac:18.6854% 0.020445 94.555431;--nc:12.2124% 0.023402 243.760661;--inc:90.923% 0.043042 262.880917;--suc:12.541% 0.033982 149.213788;--wac:13.3168% 0.031484 58.31834;--erc:14.79% 0.038 27.33;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:85.6617% 0.14498 198.6458;--pc:40.1249% 0.068266 197.603872;--s:60.6827% 0.108752 309.782946;--a:93.4269% 0.102225 94.555431;--n:61.0622% 0.117009 243.760661;--b1:48.7596% 0.127539 261.181672;--in:54.615% 0.215208 262.880917;--su:62.7052% 0.169912 149.213788;--wa:66.584% 0.157422 58.31834;--er:73.95% 0.19 27.33}[data-theme=lofi]{color-scheme:light;--inc:15.908% 0.0206 205.9;--suc:18.026% 0.0306 164.14;--wac:17.674% 0.027 79.94;--erc:15.732% 0.03 28.47;--border-btn:1px;--tab-border:1px;--p:15.9066% 0 0;--pc:100% 0 0;--s:21.455% 0.001566 17.278957;--sc:100% 0 0;--a:26.8618% 0 0;--ac:100% 0 0;--n:0% 0 0;--nc:100% 0 0;--b1:100% 0 0;--b2:96.1151% 0 0;--b3:92.268% 0.001082 17.17934;--bc:0% 0 0;--in:79.54% 0.103 205.9;--su:90.13% 0.153 164.14;--wa:88.37% 0.135 79.94;--er:78.66% 0.15 28.47;--rounded-box:0.25rem;--rounded-btn:0.125rem;--rounded-badge:0.125rem;--tab-radius:0.125rem;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}:root:has(input.theme-controller[value=lofi]:checked){color-scheme:light;--inc:15.908% 0.0206 205.9;--suc:18.026% 0.0306 164.14;--wac:17.674% 0.027 79.94;--erc:15.732% 0.03 28.47;--border-btn:1px;--tab-border:1px;--p:15.9066% 0 0;--pc:100% 0 0;--s:21.455% 0.001566 17.278957;--sc:100% 0 0;--a:26.8618% 0 0;--ac:100% 0 0;--n:0% 0 0;--nc:100% 0 0;--b1:100% 0 0;--b2:96.1151% 0 0;--b3:92.268% 0.001082 17.17934;--bc:0% 0 0;--in:79.54% 0.103 205.9;--su:90.13% 0.153 164.14;--wa:88.37% 0.135 79.94;--er:78.66% 0.15 28.47;--rounded-box:0.25rem;--rounded-btn:0.125rem;--rounded-badge:0.125rem;--tab-radius:0.125rem;--animation-btn:0;--animation-input:0;--btn-focus-scale:1}[data-theme=pastel]{color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:20% 0 0;--pc:16.6166% 0.006979 316.8737;--sc:17.6153% 0.009839 8.688364;--ac:17.8419% 0.012056 170.923263;--nc:14.2681% 0.014702 228.183906;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:83.0828% 0.034896 316.8737;--s:88.0763% 0.049197 8.688364;--a:89.2096% 0.06028 170.923263;--n:71.3406% 0.07351 228.183906;--b1:100% 0 0;--b2:98.4625% 0.001706 247.838921;--b3:87.1681% 0.009339 258.338227;--rounded-btn:1.9rem;--tab-radius:0.7rem}:root:has(input.theme-controller[value=pastel]:checked){color-scheme:light;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--bc:20% 0 0;--pc:16.6166% 0.006979 316.8737;--sc:17.6153% 0.009839 8.688364;--ac:17.8419% 0.012056 170.923263;--nc:14.2681% 0.014702 228.183906;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:83.0828% 0.034896 316.8737;--s:88.0763% 0.049197 8.688364;--a:89.2096% 0.06028 170.923263;--n:71.3406% 0.07351 228.183906;--b1:100% 0 0;--b2:98.4625% 0.001706 247.838921;--b3:87.1681% 0.009339 258.338227;--rounded-btn:1.9rem;--tab-radius:0.7rem}[data-theme=fantasy]{color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:87.49% 0.0378 325.02;--sc:90.784% 0.0324 241.36;--ac:15.196% 0.0408 56.72;--nc:85.5616% 0.005919 256.847952;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:37.45% 0.189 325.02;--s:53.92% 0.162 241.36;--a:75.98% 0.204 56.72;--n:27.8078% 0.029596 256.847952;--b1:100% 0 0;--bc:27.8078% 0.029596 256.847952}:root:has(input.theme-controller[value=fantasy]:checked){color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--in:72.06% 0.191 231.6;--su:64.8% 0.150 160;--wa:84.71% 0.199 83.87;--er:71.76% 0.221 22.18;--pc:87.49% 0.0378 325.02;--sc:90.784% 0.0324 241.36;--ac:15.196% 0.0408 56.72;--nc:85.5616% 0.005919 256.847952;--inc:0% 0 0;--suc:0% 0 0;--wac:0% 0 0;--erc:0% 0 0;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:37.45% 0.189 325.02;--s:53.92% 0.162 241.36;--a:75.98% 0.204 56.72;--n:27.8078% 0.029596 256.847952;--b1:100% 0 0;--bc:27.8078% 0.029596 256.847952}[data-theme=wireframe]{color-scheme:light;--bc:20% 0 0;--pc:15.6521% 0 0;--sc:15.6521% 0 0;--ac:15.6521% 0 0;--nc:18.8014% 0 0;--inc:89.0403% 0.062643 264.052021;--suc:90.395% 0.035372 142.495339;--wac:14.1626% 0.019994 108.702381;--erc:12.5591% 0.051537 29.233885;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;font-family:Chalkboard,comic sans ms,sans-serif;--p:78.2604% 0 0;--s:78.2604% 0 0;--a:78.2604% 0 0;--n:94.007% 0 0;--b1:100% 0 0;--b2:94.9119% 0 0;--b3:89.7547% 0 0;--in:45.2014% 0.313214 264.052021;--su:51.9752% 0.176858 142.495339;--wa:70.8131% 0.099969 108.702381;--er:62.7955% 0.257683 29.233885;--rounded-box:0.2rem;--rounded-btn:0.2rem;--rounded-badge:0.2rem;--tab-radius:0.2rem}:root:has(input.theme-controller[value=wireframe]:checked){color-scheme:light;--bc:20% 0 0;--pc:15.6521% 0 0;--sc:15.6521% 0 0;--ac:15.6521% 0 0;--nc:18.8014% 0 0;--inc:89.0403% 0.062643 264.052021;--suc:90.395% 0.035372 142.495339;--wac:14.1626% 0.019994 108.702381;--erc:12.5591% 0.051537 29.233885;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;font-family:Chalkboard,comic sans ms,sans-serif;--p:78.2604% 0 0;--s:78.2604% 0 0;--a:78.2604% 0 0;--n:94.007% 0 0;--b1:100% 0 0;--b2:94.9119% 0 0;--b3:89.7547% 0 0;--in:45.2014% 0.313214 264.052021;--su:51.9752% 0.176858 142.495339;--wa:70.8131% 0.099969 108.702381;--er:62.7955% 0.257683 29.233885;--rounded-box:0.2rem;--rounded-btn:0.2rem;--rounded-badge:0.2rem;--tab-radius:0.2rem}[data-theme=black]{color-scheme:dark;--pc:86.736% 0 0;--sc:86.736% 0 0;--ac:86.736% 0 0;--nc:86.736% 0 0;--inc:89.0403% 0.062643 264.052021;--suc:90.395% 0.035372 142.495339;--wac:19.3597% 0.042201 109.769232;--erc:12.5591% 0.051537 29.233885;--border-btn:1px;--tab-border:1px;--p:33.6799% 0 0;--s:33.6799% 0 0;--a:33.6799% 0 0;--b1:0% 0 0;--b2:19.1251% 0 0;--b3:26.8618% 0 0;--bc:87.6096% 0 0;--n:33.6799% 0 0;--in:45.2014% 0.313214 264.052021;--su:51.9752% 0.176858 142.495339;--wa:96.7983% 0.211006 109.769232;--er:62.7955% 0.257683 29.233885;--rounded-box:0;--rounded-btn:0;--rounded-badge:0;--animation-btn:0;--animation-input:0;--btn-focus-scale:1;--tab-radius:0}:root:has(input.theme-controller[value=black]:checked){color-scheme:dark;--pc:86.736% 0 0;--sc:86.736% 0 0;--ac:86.736% 0 0;--nc:86.736% 0 0;--inc:89.0403% 0.062643 264.052021;--suc:90.395% 0.035372 142.495339;--wac:19.3597% 0.042201 109.769232;--erc:12.5591% 0.051537 29.233885;--border-btn:1px;--tab-border:1px;--p:33.6799% 0 0;--s:33.6799% 0 0;--a:33.6799% 0 0;--b1:0% 0 0;--b2:19.1251% 0 0;--b3:26.8618% 0 0;--bc:87.6096% 0 0;--n:33.6799% 0 0;--in:45.2014% 0.313214 264.052021;--su:51.9752% 0.176858 142.495339;--wa:96.7983% 0.211006 109.769232;--er:62.7955% 0.257683 29.233885;--rounded-box:0;--rounded-btn:0;--rounded-badge:0;--animation-btn:0;--animation-input:0;--btn-focus-scale:1;--tab-radius:0}[data-theme=luxury]{color-scheme:dark;--pc:20% 0 0;--sc:85.5163% 0.012821 261.069149;--ac:87.3349% 0.010348 338.82597;--inc:15.8122% 0.024356 237.133883;--suc:15.6239% 0.038579 132.154381;--wac:17.2255% 0.027305 102.89115;--erc:14.3506% 0.035271 22.568916;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:100% 0 0;--s:27.5815% 0.064106 261.069149;--a:36.6744% 0.051741 338.82597;--n:24.27% 0.057015 59.825019;--nc:93.2033% 0.089631 90.861683;--b1:14.0765% 0.004386 285.822869;--b2:20.2191% 0.004211 308.22937;--b3:29.8961% 0.003818 308.318612;--bc:75.6879% 0.123666 76.890484;--in:79.0612% 0.121778 237.133883;--su:78.1197% 0.192894 132.154381;--wa:86.1274% 0.136524 102.89115;--er:71.7531% 0.176357 22.568916}:root:has(input.theme-controller[value=luxury]:checked){color-scheme:dark;--pc:20% 0 0;--sc:85.5163% 0.012821 261.069149;--ac:87.3349% 0.010348 338.82597;--inc:15.8122% 0.024356 237.133883;--suc:15.6239% 0.038579 132.154381;--wac:17.2255% 0.027305 102.89115;--erc:14.3506% 0.035271 22.568916;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:100% 0 0;--s:27.5815% 0.064106 261.069149;--a:36.6744% 0.051741 338.82597;--n:24.27% 0.057015 59.825019;--nc:93.2033% 0.089631 90.861683;--b1:14.0765% 0.004386 285.822869;--b2:20.2191% 0.004211 308.22937;--b3:29.8961% 0.003818 308.318612;--bc:75.6879% 0.123666 76.890484;--in:79.0612% 0.121778 237.133883;--su:78.1197% 0.192894 132.154381;--wa:86.1274% 0.136524 102.89115;--er:71.7531% 0.176357 22.568916}[data-theme=dracula]{color-scheme:dark;--b2:26.8053% 0.020556 277.508664;--b3:24.7877% 0.019009 277.508664;--pc:15.0922% 0.036614 346.812432;--sc:14.8405% 0.029709 301.883095;--ac:16.6785% 0.024826 66.558491;--nc:87.8891% 0.006515 275.524078;--inc:17.6526% 0.018676 212.846491;--suc:17.4199% 0.043903 148.024881;--wac:19.1068% 0.026849 112.757109;--erc:13.6441% 0.041266 24.430965;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:75.4611% 0.18307 346.812432;--s:74.2023% 0.148546 301.883095;--a:83.3927% 0.124132 66.558491;--n:39.4456% 0.032576 275.524078;--b1:28.8229% 0.022103 277.508664;--bc:97.7477% 0.007913 106.545019;--in:88.263% 0.09338 212.846491;--su:87.0995% 0.219516 148.024881;--wa:95.5338% 0.134246 112.757109;--er:68.2204% 0.206328 24.430965}:root:has(input.theme-controller[value=dracula]:checked){color-scheme:dark;--b2:26.8053% 0.020556 277.508664;--b3:24.7877% 0.019009 277.508664;--pc:15.0922% 0.036614 346.812432;--sc:14.8405% 0.029709 301.883095;--ac:16.6785% 0.024826 66.558491;--nc:87.8891% 0.006515 275.524078;--inc:17.6526% 0.018676 212.846491;--suc:17.4199% 0.043903 148.024881;--wac:19.1068% 0.026849 112.757109;--erc:13.6441% 0.041266 24.430965;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:75.4611% 0.18307 346.812432;--s:74.2023% 0.148546 301.883095;--a:83.3927% 0.124132 66.558491;--n:39.4456% 0.032576 275.524078;--b1:28.8229% 0.022103 277.508664;--bc:97.7477% 0.007913 106.545019;--in:88.263% 0.09338 212.846491;--su:87.0995% 0.219516 148.024881;--wa:95.5338% 0.134246 112.757109;--er:68.2204% 0.206328 24.430965}[data-theme=cmyk]{color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--bc:20% 0 0;--pc:14.3544% 0.02666 239.443325;--sc:12.8953% 0.040552 359.339283;--ac:18.8458% 0.037948 105.306968;--nc:84.3557% 0 0;--inc:13.6952% 0.0189 217.284104;--suc:89.3898% 0.032505 321.406278;--wac:14.2473% 0.031969 52.023412;--erc:12.4027% 0.041677 28.717543;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:71.7722% 0.133298 239.443325;--s:64.4766% 0.202758 359.339283;--a:94.2289% 0.189741 105.306968;--n:21.7787% 0 0;--b1:100% 0 0;--in:68.4759% 0.094499 217.284104;--su:46.949% 0.162524 321.406278;--wa:71.2364% 0.159843 52.023412;--er:62.0133% 0.208385 28.717543}:root:has(input.theme-controller[value=cmyk]:checked){color-scheme:light;--b2:93% 0 0;--b3:86% 0 0;--bc:20% 0 0;--pc:14.3544% 0.02666 239.443325;--sc:12.8953% 0.040552 359.339283;--ac:18.8458% 0.037948 105.306968;--nc:84.3557% 0 0;--inc:13.6952% 0.0189 217.284104;--suc:89.3898% 0.032505 321.406278;--wac:14.2473% 0.031969 52.023412;--erc:12.4027% 0.041677 28.717543;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:71.7722% 0.133298 239.443325;--s:64.4766% 0.202758 359.339283;--a:94.2289% 0.189741 105.306968;--n:21.7787% 0 0;--b1:100% 0 0;--in:68.4759% 0.094499 217.284104;--su:46.949% 0.162524 321.406278;--wa:71.2364% 0.159843 52.023412;--er:62.0133% 0.208385 28.717543}[data-theme=autumn]{color-scheme:light;--b2:89.1077% 0 0;--b3:82.4006% 0 0;--bc:19.1629% 0 0;--pc:88.1446% 0.032232 17.530175;--sc:12.3353% 0.033821 23.865865;--ac:14.6851% 0.018999 60.729616;--nc:90.8734% 0.007475 51.902819;--inc:13.8449% 0.019596 207.284192;--suc:12.199% 0.016032 174.616213;--wac:14.0163% 0.032982 56.844303;--erc:90.614% 0.0482 24.16;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:40.7232% 0.16116 17.530175;--s:61.6763% 0.169105 23.865865;--a:73.4253% 0.094994 60.729616;--n:54.3672% 0.037374 51.902819;--b1:95.8147% 0 0;--in:69.2245% 0.097979 207.284192;--su:60.9951% 0.080159 174.616213;--wa:70.0817% 0.164909 56.844303;--er:53.07% 0.241 24.16}:root:has(input.theme-controller[value=autumn]:checked){color-scheme:light;--b2:89.1077% 0 0;--b3:82.4006% 0 0;--bc:19.1629% 0 0;--pc:88.1446% 0.032232 17.530175;--sc:12.3353% 0.033821 23.865865;--ac:14.6851% 0.018999 60.729616;--nc:90.8734% 0.007475 51.902819;--inc:13.8449% 0.019596 207.284192;--suc:12.199% 0.016032 174.616213;--wac:14.0163% 0.032982 56.844303;--erc:90.614% 0.0482 24.16;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:40.7232% 0.16116 17.530175;--s:61.6763% 0.169105 23.865865;--a:73.4253% 0.094994 60.729616;--n:54.3672% 0.037374 51.902819;--b1:95.8147% 0 0;--in:69.2245% 0.097979 207.284192;--su:60.9951% 0.080159 174.616213;--wa:70.0817% 0.164909 56.844303;--er:53.07% 0.241 24.16}[data-theme=business]{color-scheme:dark;--b2:22.6487% 0 0;--b3:20.944% 0 0;--bc:84.8707% 0 0;--pc:88.3407% 0.019811 251.473931;--sc:12.8185% 0.005481 229.389418;--ac:13.4542% 0.033545 35.791525;--nc:85.4882% 0.00265 253.041249;--inc:12.5233% 0.028702 240.033697;--suc:14.0454% 0.018919 156.59611;--wac:15.4965% 0.023141 81.519177;--erc:90.3221% 0.029356 29.674507;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:41.7036% 0.099057 251.473931;--s:64.0924% 0.027405 229.389418;--a:67.271% 0.167726 35.791525;--n:27.441% 0.01325 253.041249;--b1:24.3535% 0 0;--in:62.6163% 0.143511 240.033697;--su:70.2268% 0.094594 156.59611;--wa:77.4824% 0.115704 81.519177;--er:51.6105% 0.14678 29.674507;--rounded-box:0.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem}:root:has(input.theme-controller[value=business]:checked){color-scheme:dark;--b2:22.6487% 0 0;--b3:20.944% 0 0;--bc:84.8707% 0 0;--pc:88.3407% 0.019811 251.473931;--sc:12.8185% 0.005481 229.389418;--ac:13.4542% 0.033545 35.791525;--nc:85.4882% 0.00265 253.041249;--inc:12.5233% 0.028702 240.033697;--suc:14.0454% 0.018919 156.59611;--wac:15.4965% 0.023141 81.519177;--erc:90.3221% 0.029356 29.674507;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:41.7036% 0.099057 251.473931;--s:64.0924% 0.027405 229.389418;--a:67.271% 0.167726 35.791525;--n:27.441% 0.01325 253.041249;--b1:24.3535% 0 0;--in:62.6163% 0.143511 240.033697;--su:70.2268% 0.094594 156.59611;--wa:77.4824% 0.115704 81.519177;--er:51.6105% 0.14678 29.674507;--rounded-box:0.25rem;--rounded-btn:.125rem;--rounded-badge:.125rem}[data-theme=acid]{color-scheme:light;--b2:91.6146% 0 0;--b3:84.7189% 0 0;--bc:19.7021% 0 0;--pc:14.38% 0.0714 330.759573;--sc:14.674% 0.0448 48.250878;--ac:18.556% 0.0528 122.962951;--nc:84.262% 0.0256 278.68;--inc:12.144% 0.0454 252.05;--suc:17.144% 0.0532 158.53;--wac:18.202% 0.0424 100.5;--erc:12.968% 0.0586 29.349188;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:71.9% 0.357 330.759573;--s:73.37% 0.224 48.250878;--a:92.78% 0.264 122.962951;--n:21.31% 0.128 278.68;--b1:98.5104% 0 0;--in:60.72% 0.227 252.05;--su:85.72% 0.266 158.53;--wa:91.01% 0.212 100.5;--er:64.84% 0.293 29.349188;--rounded-box:1.25rem;--rounded-btn:1rem;--rounded-badge:1rem;--tab-radius:0.7rem}:root:has(input.theme-controller[value=acid]:checked){color-scheme:light;--b2:91.6146% 0 0;--b3:84.7189% 0 0;--bc:19.7021% 0 0;--pc:14.38% 0.0714 330.759573;--sc:14.674% 0.0448 48.250878;--ac:18.556% 0.0528 122.962951;--nc:84.262% 0.0256 278.68;--inc:12.144% 0.0454 252.05;--suc:17.144% 0.0532 158.53;--wac:18.202% 0.0424 100.5;--erc:12.968% 0.0586 29.349188;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:71.9% 0.357 330.759573;--s:73.37% 0.224 48.250878;--a:92.78% 0.264 122.962951;--n:21.31% 0.128 278.68;--b1:98.5104% 0 0;--in:60.72% 0.227 252.05;--su:85.72% 0.266 158.53;--wa:91.01% 0.212 100.5;--er:64.84% 0.293 29.349188;--rounded-box:1.25rem;--rounded-btn:1rem;--rounded-badge:1rem;--tab-radius:0.7rem}[data-theme=lemonade]{color-scheme:light;--b2:91.8003% 0.0186 123.72;--b3:84.8906% 0.0172 123.72;--bc:19.742% 0.004 123.72;--pc:11.784% 0.0398 134.6;--sc:15.55% 0.0392 111.09;--ac:17.078% 0.0402 100.73;--nc:86.196% 0.015 108.6;--inc:17.238% 0.0094 224.14;--suc:17.238% 0.0094 157.85;--wac:17.238% 0.0094 102.15;--erc:17.238% 0.0094 25.85;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:58.92% 0.199 134.6;--s:77.75% 0.196 111.09;--a:85.39% 0.201 100.73;--n:30.98% 0.075 108.6;--b1:98.71% 0.02 123.72;--in:86.19% 0.047 224.14;--su:86.19% 0.047 157.85;--wa:86.19% 0.047 102.15;--er:86.19% 0.047 25.85}:root:has(input.theme-controller[value=lemonade]:checked){color-scheme:light;--b2:91.8003% 0.0186 123.72;--b3:84.8906% 0.0172 123.72;--bc:19.742% 0.004 123.72;--pc:11.784% 0.0398 134.6;--sc:15.55% 0.0392 111.09;--ac:17.078% 0.0402 100.73;--nc:86.196% 0.015 108.6;--inc:17.238% 0.0094 224.14;--suc:17.238% 0.0094 157.85;--wac:17.238% 0.0094 102.15;--erc:17.238% 0.0094 25.85;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:58.92% 0.199 134.6;--s:77.75% 0.196 111.09;--a:85.39% 0.201 100.73;--n:30.98% 0.075 108.6;--b1:98.71% 0.02 123.72;--in:86.19% 0.047 224.14;--su:86.19% 0.047 157.85;--wa:86.19% 0.047 102.15;--er:86.19% 0.047 25.85}[data-theme=night]{color-scheme:dark;--b2:19.3144% 0.037037 265.754874;--b3:17.8606% 0.034249 265.754874;--bc:84.1536% 0.007965 265.754874;--pc:15.0703% 0.027798 232.66148;--sc:13.6023% 0.031661 276.934902;--ac:14.4721% 0.035244 350.048739;--nc:85.5899% 0.00737 260.030984;--suc:15.6904% 0.026506 181.911977;--wac:16.6486% 0.027912 82.95003;--erc:14.3572% 0.034051 13.11834;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:75.3513% 0.138989 232.66148;--s:68.0113% 0.158303 276.934902;--a:72.3603% 0.176218 350.048739;--n:27.9495% 0.036848 260.030984;--b1:20.7682% 0.039824 265.754874;--in:68.4553% 0.148062 237.25135;--inc:0% 0 0;--su:78.452% 0.132529 181.911977;--wa:83.2428% 0.139558 82.95003;--er:71.7858% 0.170255 13.11834}:root:has(input.theme-controller[value=night]:checked){color-scheme:dark;--b2:19.3144% 0.037037 265.754874;--b3:17.8606% 0.034249 265.754874;--bc:84.1536% 0.007965 265.754874;--pc:15.0703% 0.027798 232.66148;--sc:13.6023% 0.031661 276.934902;--ac:14.4721% 0.035244 350.048739;--nc:85.5899% 0.00737 260.030984;--suc:15.6904% 0.026506 181.911977;--wac:16.6486% 0.027912 82.95003;--erc:14.3572% 0.034051 13.11834;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:75.3513% 0.138989 232.66148;--s:68.0113% 0.158303 276.934902;--a:72.3603% 0.176218 350.048739;--n:27.9495% 0.036848 260.030984;--b1:20.7682% 0.039824 265.754874;--in:68.4553% 0.148062 237.25135;--inc:0% 0 0;--su:78.452% 0.132529 181.911977;--wa:83.2428% 0.139558 82.95003;--er:71.7858% 0.170255 13.11834}[data-theme=coffee]{color-scheme:dark;--b2:20.1585% 0.021457 329.708637;--b3:18.6412% 0.019842 329.708637;--pc:14.3993% 0.024765 62.756393;--sc:86.893% 0.00597 199.19444;--ac:88.5243% 0.014881 224.389184;--nc:83.3022% 0.003149 326.261446;--inc:15.898% 0.012774 184.558367;--suc:14.9445% 0.014491 131.116276;--wac:17.6301% 0.028162 87.722413;--erc:15.4637% 0.025644 31.871922;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:71.9967% 0.123825 62.756393;--s:34.465% 0.029849 199.19444;--a:42.6213% 0.074405 224.389184;--n:16.5109% 0.015743 326.261446;--b1:21.6758% 0.023072 329.708637;--bc:72.3547% 0.092794 79.129387;--in:79.4902% 0.063869 184.558367;--su:74.7224% 0.072456 131.116276;--wa:88.1503% 0.140812 87.722413;--er:77.3187% 0.12822 31.871922}:root:has(input.theme-controller[value=coffee]:checked){color-scheme:dark;--b2:20.1585% 0.021457 329.708637;--b3:18.6412% 0.019842 329.708637;--pc:14.3993% 0.024765 62.756393;--sc:86.893% 0.00597 199.19444;--ac:88.5243% 0.014881 224.389184;--nc:83.3022% 0.003149 326.261446;--inc:15.898% 0.012774 184.558367;--suc:14.9445% 0.014491 131.116276;--wac:17.6301% 0.028162 87.722413;--erc:15.4637% 0.025644 31.871922;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:71.9967% 0.123825 62.756393;--s:34.465% 0.029849 199.19444;--a:42.6213% 0.074405 224.389184;--n:16.5109% 0.015743 326.261446;--b1:21.6758% 0.023072 329.708637;--bc:72.3547% 0.092794 79.129387;--in:79.4902% 0.063869 184.558367;--su:74.7224% 0.072456 131.116276;--wa:88.1503% 0.140812 87.722413;--er:77.3187% 0.12822 31.871922}[data-theme=winter]{color-scheme:light;--pc:91.372% 0.051 257.57;--sc:88.5103% 0.03222 282.339433;--ac:11.988% 0.038303 335.171434;--nc:83.9233% 0.012704 257.651965;--inc:17.6255% 0.017178 214.515264;--suc:16.0988% 0.015404 197.823719;--wac:17.8345% 0.009167 71.47031;--erc:14.6185% 0.022037 20.076293;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:56.86% 0.255 257.57;--s:42.5516% 0.161098 282.339433;--a:59.9398% 0.191515 335.171434;--n:19.6166% 0.063518 257.651965;--b1:100% 0 0;--b2:97.4663% 0.011947 259.822565;--b3:93.2686% 0.016223 262.751375;--bc:41.8869% 0.053885 255.824911;--in:88.1275% 0.085888 214.515264;--su:80.4941% 0.077019 197.823719;--wa:89.1725% 0.045833 71.47031;--er:73.0926% 0.110185 20.076293}:root:has(input.theme-controller[value=winter]:checked){color-scheme:light;--pc:91.372% 0.051 257.57;--sc:88.5103% 0.03222 282.339433;--ac:11.988% 0.038303 335.171434;--nc:83.9233% 0.012704 257.651965;--inc:17.6255% 0.017178 214.515264;--suc:16.0988% 0.015404 197.823719;--wac:17.8345% 0.009167 71.47031;--erc:14.6185% 0.022037 20.076293;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:56.86% 0.255 257.57;--s:42.5516% 0.161098 282.339433;--a:59.9398% 0.191515 335.171434;--n:19.6166% 0.063518 257.651965;--b1:100% 0 0;--b2:97.4663% 0.011947 259.822565;--b3:93.2686% 0.016223 262.751375;--bc:41.8869% 0.053885 255.824911;--in:88.1275% 0.085888 214.515264;--su:80.4941% 0.077019 197.823719;--wa:89.1725% 0.045833 71.47031;--er:73.0926% 0.110185 20.076293}[data-theme=dim]{color-scheme:dark;--pc:17.2267% 0.028331 139.549991;--sc:14.6752% 0.033181 35.353059;--ac:14.8459% 0.026728 311.37924;--inc:17.2157% 0.028409 206.182959;--suc:17.2343% 0.028437 166.534048;--wac:17.2327% 0.028447 94.818679;--erc:16.4838% 0.019914 33.756357;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:86.1335% 0.141656 139.549991;--s:73.3759% 0.165904 35.353059;--a:74.2296% 0.133641 311.37924;--n:24.7311% 0.020483 264.094728;--nc:82.9011% 0.031335 222.959324;--b1:30.8577% 0.023243 264.149498;--b2:28.0368% 0.01983 264.182074;--b3:26.3469% 0.018403 262.177739;--bc:82.9011% 0.031335 222.959324;--in:86.0785% 0.142046 206.182959;--su:86.1717% 0.142187 166.534048;--wa:86.1634% 0.142236 94.818679;--er:82.4189% 0.09957 33.756357}:root:has(input.theme-controller[value=dim]:checked){color-scheme:dark;--pc:17.2267% 0.028331 139.549991;--sc:14.6752% 0.033181 35.353059;--ac:14.8459% 0.026728 311.37924;--inc:17.2157% 0.028409 206.182959;--suc:17.2343% 0.028437 166.534048;--wac:17.2327% 0.028447 94.818679;--erc:16.4838% 0.019914 33.756357;--rounded-box:1rem;--rounded-btn:0.5rem;--rounded-badge:1.9rem;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--tab-radius:0.5rem;--p:86.1335% 0.141656 139.549991;--s:73.3759% 0.165904 35.353059;--a:74.2296% 0.133641 311.37924;--n:24.7311% 0.020483 264.094728;--nc:82.9011% 0.031335 222.959324;--b1:30.8577% 0.023243 264.149498;--b2:28.0368% 0.01983 264.182074;--b3:26.3469% 0.018403 262.177739;--bc:82.9011% 0.031335 222.959324;--in:86.0785% 0.142046 206.182959;--su:86.1717% 0.142187 166.534048;--wa:86.1634% 0.142236 94.818679;--er:82.4189% 0.09957 33.756357}[data-theme=nord]{color-scheme:light;--pc:11.8872% 0.015449 254.027774;--sc:13.9303% 0.011822 248.687186;--ac:15.4929% 0.01245 217.469017;--inc:13.8414% 0.012499 332.664922;--suc:15.3654% 0.01498 131.063061;--wac:17.0972% 0.017847 84.093335;--erc:12.122% 0.024119 15.341883;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:59.4359% 0.077246 254.027774;--s:69.6516% 0.059108 248.687186;--a:77.4643% 0.062249 217.469017;--n:45.229% 0.035214 264.1312;--nc:89.9258% 0.016374 262.749256;--b1:95.1276% 0.007445 260.731539;--b2:93.2996% 0.010389 261.788485;--b3:89.9258% 0.016374 262.749256;--bc:32.4374% 0.022945 264.182036;--in:69.2072% 0.062496 332.664922;--su:76.827% 0.074899 131.063061;--wa:85.4862% 0.089234 84.093335;--er:60.61% 0.120594 15.341883;--rounded-box:0.4rem;--rounded-btn:0.2rem;--rounded-badge:0.4rem;--tab-radius:0.2rem}:root:has(input.theme-controller[value=nord]:checked){color-scheme:light;--pc:11.8872% 0.015449 254.027774;--sc:13.9303% 0.011822 248.687186;--ac:15.4929% 0.01245 217.469017;--inc:13.8414% 0.012499 332.664922;--suc:15.3654% 0.01498 131.063061;--wac:17.0972% 0.017847 84.093335;--erc:12.122% 0.024119 15.341883;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:59.4359% 0.077246 254.027774;--s:69.6516% 0.059108 248.687186;--a:77.4643% 0.062249 217.469017;--n:45.229% 0.035214 264.1312;--nc:89.9258% 0.016374 262.749256;--b1:95.1276% 0.007445 260.731539;--b2:93.2996% 0.010389 261.788485;--b3:89.9258% 0.016374 262.749256;--bc:32.4374% 0.022945 264.182036;--in:69.2072% 0.062496 332.664922;--su:76.827% 0.074899 131.063061;--wa:85.4862% 0.089234 84.093335;--er:60.61% 0.120594 15.341883;--rounded-box:0.4rem;--rounded-btn:0.2rem;--rounded-badge:0.4rem;--tab-radius:0.2rem}[data-theme=sunset]{color-scheme:dark;--pc:14.9408% 0.031656 39.94703;--sc:14.5075% 0.035531 2.72034;--ac:14.2589% 0.033336 299.844533;--inc:17.1119% 0.017054 206.015183;--suc:17.1122% 0.017172 144.77874;--wac:17.1139% 0.016961 74.427797;--erc:17.1023% 0.015778 16.886379;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:74.7039% 0.158278 39.94703;--s:72.5375% 0.177654 2.72034;--a:71.2947% 0.166678 299.844533;--n:26% 0.019 237.69;--nc:70% 0.019 237.69;--b1:22% 0.019 237.69;--b2:20% 0.019 237.69;--b3:18% 0.019 237.69;--bc:77.3835% 0.043586 245.096534;--in:85.5596% 0.085271 206.015183;--su:85.5609% 0.08586 144.77874;--wa:85.5695% 0.084806 74.427797;--er:85.5116% 0.07889 16.886379;--rounded-box:1.2rem;--rounded-btn:0.8rem;--rounded-badge:0.4rem;--tab-radius:0.7rem}:root:has(input.theme-controller[value=sunset]:checked){color-scheme:dark;--pc:14.9408% 0.031656 39.94703;--sc:14.5075% 0.035531 2.72034;--ac:14.2589% 0.033336 299.844533;--inc:17.1119% 0.017054 206.015183;--suc:17.1122% 0.017172 144.77874;--wac:17.1139% 0.016961 74.427797;--erc:17.1023% 0.015778 16.886379;--animation-btn:0.25s;--animation-input:.2s;--btn-focus-scale:0.95;--border-btn:1px;--tab-border:1px;--p:74.7039% 0.158278 39.94703;--s:72.5375% 0.177654 2.72034;--a:71.2947% 0.166678 299.844533;--n:26% 0.019 237.69;--nc:70% 0.019 237.69;--b1:22% 0.019 237.69;--b2:20% 0.019 237.69;--b3:18% 0.019 237.69;--bc:77.3835% 0.043586 245.096534;--in:85.5596% 0.085271 206.015183;--su:85.5609% 0.08586 144.77874;--wa:85.5695% 0.084806 74.427797;--er:85.5116% 0.07889 16.886379;--rounded-box:1.2rem;--rounded-btn:0.8rem;--rounded-badge:0.4rem;--tab-radius:0.7rem} diff --git a/examples/server/public/deps_markdown-it.js b/examples/server/public/deps_markdown-it.js deleted file mode 100644 index 1be0cebe6a440..0000000000000 --- a/examples/server/public/deps_markdown-it.js +++ /dev/null @@ -1,8442 +0,0 @@ -/*! markdown-it 13.0.2 https://github.com/markdown-it/markdown-it @license MIT */ -(function(global, factory) { - typeof exports === "object" && typeof module !== "undefined" ? module.exports = factory() : typeof define === "function" && define.amd ? define(factory) : (global = typeof globalThis !== "undefined" ? globalThis : global || self, - global.markdownit = factory()); -})(this, (function() { - "use strict"; - function createCommonjsModule(fn, basedir, module) { - return module = { - path: basedir, - exports: {}, - require: function(path, base) { - return commonjsRequire(path, base === undefined || base === null ? module.path : base); - } - }, fn(module, module.exports), module.exports; - } - function getAugmentedNamespace(n) { - if (n.__esModule) return n; - var a = Object.defineProperty({}, "__esModule", { - value: true - }); - Object.keys(n).forEach((function(k) { - var d = Object.getOwnPropertyDescriptor(n, k); - Object.defineProperty(a, k, d.get ? d : { - enumerable: true, - get: function() { - return n[k]; - } - }); - })); - return a; - } - function commonjsRequire() { - throw new Error("Dynamic requires are not currently supported by @rollup/plugin-commonjs"); - } - var require$$0 = { - Aacute: "\xc1", - aacute: "\xe1", - Abreve: "\u0102", - abreve: "\u0103", - ac: "\u223e", - acd: "\u223f", - acE: "\u223e\u0333", - Acirc: "\xc2", - acirc: "\xe2", - acute: "\xb4", - Acy: "\u0410", - acy: "\u0430", - AElig: "\xc6", - aelig: "\xe6", - af: "\u2061", - Afr: "\ud835\udd04", - afr: "\ud835\udd1e", - Agrave: "\xc0", - agrave: "\xe0", - alefsym: "\u2135", - aleph: "\u2135", - Alpha: "\u0391", - alpha: "\u03b1", - Amacr: "\u0100", - amacr: "\u0101", - amalg: "\u2a3f", - amp: "&", - AMP: "&", - andand: "\u2a55", - And: "\u2a53", - and: "\u2227", - andd: "\u2a5c", - andslope: "\u2a58", - andv: "\u2a5a", - ang: "\u2220", - ange: "\u29a4", - angle: "\u2220", - angmsdaa: "\u29a8", - angmsdab: "\u29a9", - angmsdac: "\u29aa", - angmsdad: "\u29ab", - angmsdae: "\u29ac", - angmsdaf: "\u29ad", - angmsdag: "\u29ae", - angmsdah: "\u29af", - angmsd: "\u2221", - angrt: "\u221f", - angrtvb: "\u22be", - angrtvbd: "\u299d", - angsph: "\u2222", - angst: "\xc5", - angzarr: "\u237c", - Aogon: "\u0104", - aogon: "\u0105", - Aopf: "\ud835\udd38", - aopf: "\ud835\udd52", - apacir: "\u2a6f", - ap: "\u2248", - apE: "\u2a70", - ape: "\u224a", - apid: "\u224b", - apos: "'", - ApplyFunction: "\u2061", - approx: "\u2248", - approxeq: "\u224a", - Aring: "\xc5", - aring: "\xe5", - Ascr: "\ud835\udc9c", - ascr: "\ud835\udcb6", - Assign: "\u2254", - ast: "*", - asymp: "\u2248", - asympeq: "\u224d", - Atilde: "\xc3", - atilde: "\xe3", - Auml: "\xc4", - auml: "\xe4", - awconint: "\u2233", - awint: "\u2a11", - backcong: "\u224c", - backepsilon: "\u03f6", - backprime: "\u2035", - backsim: "\u223d", - backsimeq: "\u22cd", - Backslash: "\u2216", - Barv: "\u2ae7", - barvee: "\u22bd", - barwed: "\u2305", - Barwed: "\u2306", - barwedge: "\u2305", - bbrk: "\u23b5", - bbrktbrk: "\u23b6", - bcong: "\u224c", - Bcy: "\u0411", - bcy: "\u0431", - bdquo: "\u201e", - becaus: "\u2235", - because: "\u2235", - Because: "\u2235", - bemptyv: "\u29b0", - bepsi: "\u03f6", - bernou: "\u212c", - Bernoullis: "\u212c", - Beta: "\u0392", - beta: "\u03b2", - beth: "\u2136", - between: "\u226c", - Bfr: "\ud835\udd05", - bfr: "\ud835\udd1f", - bigcap: "\u22c2", - bigcirc: "\u25ef", - bigcup: "\u22c3", - bigodot: "\u2a00", - bigoplus: "\u2a01", - bigotimes: "\u2a02", - bigsqcup: "\u2a06", - bigstar: "\u2605", - bigtriangledown: "\u25bd", - bigtriangleup: "\u25b3", - biguplus: "\u2a04", - bigvee: "\u22c1", - bigwedge: "\u22c0", - bkarow: "\u290d", - blacklozenge: "\u29eb", - blacksquare: "\u25aa", - blacktriangle: "\u25b4", - blacktriangledown: "\u25be", - blacktriangleleft: "\u25c2", - blacktriangleright: "\u25b8", - blank: "\u2423", - blk12: "\u2592", - blk14: "\u2591", - blk34: "\u2593", - block: "\u2588", - bne: "=\u20e5", - bnequiv: "\u2261\u20e5", - bNot: "\u2aed", - bnot: "\u2310", - Bopf: "\ud835\udd39", - bopf: "\ud835\udd53", - bot: "\u22a5", - bottom: "\u22a5", - bowtie: "\u22c8", - boxbox: "\u29c9", - boxdl: "\u2510", - boxdL: "\u2555", - boxDl: "\u2556", - boxDL: "\u2557", - boxdr: "\u250c", - boxdR: "\u2552", - boxDr: "\u2553", - boxDR: "\u2554", - boxh: "\u2500", - boxH: "\u2550", - boxhd: "\u252c", - boxHd: "\u2564", - boxhD: "\u2565", - boxHD: "\u2566", - boxhu: "\u2534", - boxHu: "\u2567", - boxhU: "\u2568", - boxHU: "\u2569", - boxminus: "\u229f", - boxplus: "\u229e", - boxtimes: "\u22a0", - boxul: "\u2518", - boxuL: "\u255b", - boxUl: "\u255c", - boxUL: "\u255d", - boxur: "\u2514", - boxuR: "\u2558", - boxUr: "\u2559", - boxUR: "\u255a", - boxv: "\u2502", - boxV: "\u2551", - boxvh: "\u253c", - boxvH: "\u256a", - boxVh: "\u256b", - boxVH: "\u256c", - boxvl: "\u2524", - boxvL: "\u2561", - boxVl: "\u2562", - boxVL: "\u2563", - boxvr: "\u251c", - boxvR: "\u255e", - boxVr: "\u255f", - boxVR: "\u2560", - bprime: "\u2035", - breve: "\u02d8", - Breve: "\u02d8", - brvbar: "\xa6", - bscr: "\ud835\udcb7", - Bscr: "\u212c", - bsemi: "\u204f", - bsim: "\u223d", - bsime: "\u22cd", - bsolb: "\u29c5", - bsol: "\\", - bsolhsub: "\u27c8", - bull: "\u2022", - bullet: "\u2022", - bump: "\u224e", - bumpE: "\u2aae", - bumpe: "\u224f", - Bumpeq: "\u224e", - bumpeq: "\u224f", - Cacute: "\u0106", - cacute: "\u0107", - capand: "\u2a44", - capbrcup: "\u2a49", - capcap: "\u2a4b", - cap: "\u2229", - Cap: "\u22d2", - capcup: "\u2a47", - capdot: "\u2a40", - CapitalDifferentialD: "\u2145", - caps: "\u2229\ufe00", - caret: "\u2041", - caron: "\u02c7", - Cayleys: "\u212d", - ccaps: "\u2a4d", - Ccaron: "\u010c", - ccaron: "\u010d", - Ccedil: "\xc7", - ccedil: "\xe7", - Ccirc: "\u0108", - ccirc: "\u0109", - Cconint: "\u2230", - ccups: "\u2a4c", - ccupssm: "\u2a50", - Cdot: "\u010a", - cdot: "\u010b", - cedil: "\xb8", - Cedilla: "\xb8", - cemptyv: "\u29b2", - cent: "\xa2", - centerdot: "\xb7", - CenterDot: "\xb7", - cfr: "\ud835\udd20", - Cfr: "\u212d", - CHcy: "\u0427", - chcy: "\u0447", - check: "\u2713", - checkmark: "\u2713", - Chi: "\u03a7", - chi: "\u03c7", - circ: "\u02c6", - circeq: "\u2257", - circlearrowleft: "\u21ba", - circlearrowright: "\u21bb", - circledast: "\u229b", - circledcirc: "\u229a", - circleddash: "\u229d", - CircleDot: "\u2299", - circledR: "\xae", - circledS: "\u24c8", - CircleMinus: "\u2296", - CirclePlus: "\u2295", - CircleTimes: "\u2297", - cir: "\u25cb", - cirE: "\u29c3", - cire: "\u2257", - cirfnint: "\u2a10", - cirmid: "\u2aef", - cirscir: "\u29c2", - ClockwiseContourIntegral: "\u2232", - CloseCurlyDoubleQuote: "\u201d", - CloseCurlyQuote: "\u2019", - clubs: "\u2663", - clubsuit: "\u2663", - colon: ":", - Colon: "\u2237", - Colone: "\u2a74", - colone: "\u2254", - coloneq: "\u2254", - comma: ",", - commat: "@", - comp: "\u2201", - compfn: "\u2218", - complement: "\u2201", - complexes: "\u2102", - cong: "\u2245", - congdot: "\u2a6d", - Congruent: "\u2261", - conint: "\u222e", - Conint: "\u222f", - ContourIntegral: "\u222e", - copf: "\ud835\udd54", - Copf: "\u2102", - coprod: "\u2210", - Coproduct: "\u2210", - copy: "\xa9", - COPY: "\xa9", - copysr: "\u2117", - CounterClockwiseContourIntegral: "\u2233", - crarr: "\u21b5", - cross: "\u2717", - Cross: "\u2a2f", - Cscr: "\ud835\udc9e", - cscr: "\ud835\udcb8", - csub: "\u2acf", - csube: "\u2ad1", - csup: "\u2ad0", - csupe: "\u2ad2", - ctdot: "\u22ef", - cudarrl: "\u2938", - cudarrr: "\u2935", - cuepr: "\u22de", - cuesc: "\u22df", - cularr: "\u21b6", - cularrp: "\u293d", - cupbrcap: "\u2a48", - cupcap: "\u2a46", - CupCap: "\u224d", - cup: "\u222a", - Cup: "\u22d3", - cupcup: "\u2a4a", - cupdot: "\u228d", - cupor: "\u2a45", - cups: "\u222a\ufe00", - curarr: "\u21b7", - curarrm: "\u293c", - curlyeqprec: "\u22de", - curlyeqsucc: "\u22df", - curlyvee: "\u22ce", - curlywedge: "\u22cf", - curren: "\xa4", - curvearrowleft: "\u21b6", - curvearrowright: "\u21b7", - cuvee: "\u22ce", - cuwed: "\u22cf", - cwconint: "\u2232", - cwint: "\u2231", - cylcty: "\u232d", - dagger: "\u2020", - Dagger: "\u2021", - daleth: "\u2138", - darr: "\u2193", - Darr: "\u21a1", - dArr: "\u21d3", - dash: "\u2010", - Dashv: "\u2ae4", - dashv: "\u22a3", - dbkarow: "\u290f", - dblac: "\u02dd", - Dcaron: "\u010e", - dcaron: "\u010f", - Dcy: "\u0414", - dcy: "\u0434", - ddagger: "\u2021", - ddarr: "\u21ca", - DD: "\u2145", - dd: "\u2146", - DDotrahd: "\u2911", - ddotseq: "\u2a77", - deg: "\xb0", - Del: "\u2207", - Delta: "\u0394", - delta: "\u03b4", - demptyv: "\u29b1", - dfisht: "\u297f", - Dfr: "\ud835\udd07", - dfr: "\ud835\udd21", - dHar: "\u2965", - dharl: "\u21c3", - dharr: "\u21c2", - DiacriticalAcute: "\xb4", - DiacriticalDot: "\u02d9", - DiacriticalDoubleAcute: "\u02dd", - DiacriticalGrave: "`", - DiacriticalTilde: "\u02dc", - diam: "\u22c4", - diamond: "\u22c4", - Diamond: "\u22c4", - diamondsuit: "\u2666", - diams: "\u2666", - die: "\xa8", - DifferentialD: "\u2146", - digamma: "\u03dd", - disin: "\u22f2", - div: "\xf7", - divide: "\xf7", - divideontimes: "\u22c7", - divonx: "\u22c7", - DJcy: "\u0402", - djcy: "\u0452", - dlcorn: "\u231e", - dlcrop: "\u230d", - dollar: "$", - Dopf: "\ud835\udd3b", - dopf: "\ud835\udd55", - Dot: "\xa8", - dot: "\u02d9", - DotDot: "\u20dc", - doteq: "\u2250", - doteqdot: "\u2251", - DotEqual: "\u2250", - dotminus: "\u2238", - dotplus: "\u2214", - dotsquare: "\u22a1", - doublebarwedge: "\u2306", - DoubleContourIntegral: "\u222f", - DoubleDot: "\xa8", - DoubleDownArrow: "\u21d3", - DoubleLeftArrow: "\u21d0", - DoubleLeftRightArrow: "\u21d4", - DoubleLeftTee: "\u2ae4", - DoubleLongLeftArrow: "\u27f8", - DoubleLongLeftRightArrow: "\u27fa", - DoubleLongRightArrow: "\u27f9", - DoubleRightArrow: "\u21d2", - DoubleRightTee: "\u22a8", - DoubleUpArrow: "\u21d1", - DoubleUpDownArrow: "\u21d5", - DoubleVerticalBar: "\u2225", - DownArrowBar: "\u2913", - downarrow: "\u2193", - DownArrow: "\u2193", - Downarrow: "\u21d3", - DownArrowUpArrow: "\u21f5", - DownBreve: "\u0311", - downdownarrows: "\u21ca", - downharpoonleft: "\u21c3", - downharpoonright: "\u21c2", - DownLeftRightVector: "\u2950", - DownLeftTeeVector: "\u295e", - DownLeftVectorBar: "\u2956", - DownLeftVector: "\u21bd", - DownRightTeeVector: "\u295f", - DownRightVectorBar: "\u2957", - DownRightVector: "\u21c1", - DownTeeArrow: "\u21a7", - DownTee: "\u22a4", - drbkarow: "\u2910", - drcorn: "\u231f", - drcrop: "\u230c", - Dscr: "\ud835\udc9f", - dscr: "\ud835\udcb9", - DScy: "\u0405", - dscy: "\u0455", - dsol: "\u29f6", - Dstrok: "\u0110", - dstrok: "\u0111", - dtdot: "\u22f1", - dtri: "\u25bf", - dtrif: "\u25be", - duarr: "\u21f5", - duhar: "\u296f", - dwangle: "\u29a6", - DZcy: "\u040f", - dzcy: "\u045f", - dzigrarr: "\u27ff", - Eacute: "\xc9", - eacute: "\xe9", - easter: "\u2a6e", - Ecaron: "\u011a", - ecaron: "\u011b", - Ecirc: "\xca", - ecirc: "\xea", - ecir: "\u2256", - ecolon: "\u2255", - Ecy: "\u042d", - ecy: "\u044d", - eDDot: "\u2a77", - Edot: "\u0116", - edot: "\u0117", - eDot: "\u2251", - ee: "\u2147", - efDot: "\u2252", - Efr: "\ud835\udd08", - efr: "\ud835\udd22", - eg: "\u2a9a", - Egrave: "\xc8", - egrave: "\xe8", - egs: "\u2a96", - egsdot: "\u2a98", - el: "\u2a99", - Element: "\u2208", - elinters: "\u23e7", - ell: "\u2113", - els: "\u2a95", - elsdot: "\u2a97", - Emacr: "\u0112", - emacr: "\u0113", - empty: "\u2205", - emptyset: "\u2205", - EmptySmallSquare: "\u25fb", - emptyv: "\u2205", - EmptyVerySmallSquare: "\u25ab", - emsp13: "\u2004", - emsp14: "\u2005", - emsp: "\u2003", - ENG: "\u014a", - eng: "\u014b", - ensp: "\u2002", - Eogon: "\u0118", - eogon: "\u0119", - Eopf: "\ud835\udd3c", - eopf: "\ud835\udd56", - epar: "\u22d5", - eparsl: "\u29e3", - eplus: "\u2a71", - epsi: "\u03b5", - Epsilon: "\u0395", - epsilon: "\u03b5", - epsiv: "\u03f5", - eqcirc: "\u2256", - eqcolon: "\u2255", - eqsim: "\u2242", - eqslantgtr: "\u2a96", - eqslantless: "\u2a95", - Equal: "\u2a75", - equals: "=", - EqualTilde: "\u2242", - equest: "\u225f", - Equilibrium: "\u21cc", - equiv: "\u2261", - equivDD: "\u2a78", - eqvparsl: "\u29e5", - erarr: "\u2971", - erDot: "\u2253", - escr: "\u212f", - Escr: "\u2130", - esdot: "\u2250", - Esim: "\u2a73", - esim: "\u2242", - Eta: "\u0397", - eta: "\u03b7", - ETH: "\xd0", - eth: "\xf0", - Euml: "\xcb", - euml: "\xeb", - euro: "\u20ac", - excl: "!", - exist: "\u2203", - Exists: "\u2203", - expectation: "\u2130", - exponentiale: "\u2147", - ExponentialE: "\u2147", - fallingdotseq: "\u2252", - Fcy: "\u0424", - fcy: "\u0444", - female: "\u2640", - ffilig: "\ufb03", - fflig: "\ufb00", - ffllig: "\ufb04", - Ffr: "\ud835\udd09", - ffr: "\ud835\udd23", - filig: "\ufb01", - FilledSmallSquare: "\u25fc", - FilledVerySmallSquare: "\u25aa", - fjlig: "fj", - flat: "\u266d", - fllig: "\ufb02", - fltns: "\u25b1", - fnof: "\u0192", - Fopf: "\ud835\udd3d", - fopf: "\ud835\udd57", - forall: "\u2200", - ForAll: "\u2200", - fork: "\u22d4", - forkv: "\u2ad9", - Fouriertrf: "\u2131", - fpartint: "\u2a0d", - frac12: "\xbd", - frac13: "\u2153", - frac14: "\xbc", - frac15: "\u2155", - frac16: "\u2159", - frac18: "\u215b", - frac23: "\u2154", - frac25: "\u2156", - frac34: "\xbe", - frac35: "\u2157", - frac38: "\u215c", - frac45: "\u2158", - frac56: "\u215a", - frac58: "\u215d", - frac78: "\u215e", - frasl: "\u2044", - frown: "\u2322", - fscr: "\ud835\udcbb", - Fscr: "\u2131", - gacute: "\u01f5", - Gamma: "\u0393", - gamma: "\u03b3", - Gammad: "\u03dc", - gammad: "\u03dd", - gap: "\u2a86", - Gbreve: "\u011e", - gbreve: "\u011f", - Gcedil: "\u0122", - Gcirc: "\u011c", - gcirc: "\u011d", - Gcy: "\u0413", - gcy: "\u0433", - Gdot: "\u0120", - gdot: "\u0121", - ge: "\u2265", - gE: "\u2267", - gEl: "\u2a8c", - gel: "\u22db", - geq: "\u2265", - geqq: "\u2267", - geqslant: "\u2a7e", - gescc: "\u2aa9", - ges: "\u2a7e", - gesdot: "\u2a80", - gesdoto: "\u2a82", - gesdotol: "\u2a84", - gesl: "\u22db\ufe00", - gesles: "\u2a94", - Gfr: "\ud835\udd0a", - gfr: "\ud835\udd24", - gg: "\u226b", - Gg: "\u22d9", - ggg: "\u22d9", - gimel: "\u2137", - GJcy: "\u0403", - gjcy: "\u0453", - gla: "\u2aa5", - gl: "\u2277", - glE: "\u2a92", - glj: "\u2aa4", - gnap: "\u2a8a", - gnapprox: "\u2a8a", - gne: "\u2a88", - gnE: "\u2269", - gneq: "\u2a88", - gneqq: "\u2269", - gnsim: "\u22e7", - Gopf: "\ud835\udd3e", - gopf: "\ud835\udd58", - grave: "`", - GreaterEqual: "\u2265", - GreaterEqualLess: "\u22db", - GreaterFullEqual: "\u2267", - GreaterGreater: "\u2aa2", - GreaterLess: "\u2277", - GreaterSlantEqual: "\u2a7e", - GreaterTilde: "\u2273", - Gscr: "\ud835\udca2", - gscr: "\u210a", - gsim: "\u2273", - gsime: "\u2a8e", - gsiml: "\u2a90", - gtcc: "\u2aa7", - gtcir: "\u2a7a", - gt: ">", - GT: ">", - Gt: "\u226b", - gtdot: "\u22d7", - gtlPar: "\u2995", - gtquest: "\u2a7c", - gtrapprox: "\u2a86", - gtrarr: "\u2978", - gtrdot: "\u22d7", - gtreqless: "\u22db", - gtreqqless: "\u2a8c", - gtrless: "\u2277", - gtrsim: "\u2273", - gvertneqq: "\u2269\ufe00", - gvnE: "\u2269\ufe00", - Hacek: "\u02c7", - hairsp: "\u200a", - half: "\xbd", - hamilt: "\u210b", - HARDcy: "\u042a", - hardcy: "\u044a", - harrcir: "\u2948", - harr: "\u2194", - hArr: "\u21d4", - harrw: "\u21ad", - Hat: "^", - hbar: "\u210f", - Hcirc: "\u0124", - hcirc: "\u0125", - hearts: "\u2665", - heartsuit: "\u2665", - hellip: "\u2026", - hercon: "\u22b9", - hfr: "\ud835\udd25", - Hfr: "\u210c", - HilbertSpace: "\u210b", - hksearow: "\u2925", - hkswarow: "\u2926", - hoarr: "\u21ff", - homtht: "\u223b", - hookleftarrow: "\u21a9", - hookrightarrow: "\u21aa", - hopf: "\ud835\udd59", - Hopf: "\u210d", - horbar: "\u2015", - HorizontalLine: "\u2500", - hscr: "\ud835\udcbd", - Hscr: "\u210b", - hslash: "\u210f", - Hstrok: "\u0126", - hstrok: "\u0127", - HumpDownHump: "\u224e", - HumpEqual: "\u224f", - hybull: "\u2043", - hyphen: "\u2010", - Iacute: "\xcd", - iacute: "\xed", - ic: "\u2063", - Icirc: "\xce", - icirc: "\xee", - Icy: "\u0418", - icy: "\u0438", - Idot: "\u0130", - IEcy: "\u0415", - iecy: "\u0435", - iexcl: "\xa1", - iff: "\u21d4", - ifr: "\ud835\udd26", - Ifr: "\u2111", - Igrave: "\xcc", - igrave: "\xec", - ii: "\u2148", - iiiint: "\u2a0c", - iiint: "\u222d", - iinfin: "\u29dc", - iiota: "\u2129", - IJlig: "\u0132", - ijlig: "\u0133", - Imacr: "\u012a", - imacr: "\u012b", - image: "\u2111", - ImaginaryI: "\u2148", - imagline: "\u2110", - imagpart: "\u2111", - imath: "\u0131", - Im: "\u2111", - imof: "\u22b7", - imped: "\u01b5", - Implies: "\u21d2", - incare: "\u2105", - in: "\u2208", - infin: "\u221e", - infintie: "\u29dd", - inodot: "\u0131", - intcal: "\u22ba", - int: "\u222b", - Int: "\u222c", - integers: "\u2124", - Integral: "\u222b", - intercal: "\u22ba", - Intersection: "\u22c2", - intlarhk: "\u2a17", - intprod: "\u2a3c", - InvisibleComma: "\u2063", - InvisibleTimes: "\u2062", - IOcy: "\u0401", - iocy: "\u0451", - Iogon: "\u012e", - iogon: "\u012f", - Iopf: "\ud835\udd40", - iopf: "\ud835\udd5a", - Iota: "\u0399", - iota: "\u03b9", - iprod: "\u2a3c", - iquest: "\xbf", - iscr: "\ud835\udcbe", - Iscr: "\u2110", - isin: "\u2208", - isindot: "\u22f5", - isinE: "\u22f9", - isins: "\u22f4", - isinsv: "\u22f3", - isinv: "\u2208", - it: "\u2062", - Itilde: "\u0128", - itilde: "\u0129", - Iukcy: "\u0406", - iukcy: "\u0456", - Iuml: "\xcf", - iuml: "\xef", - Jcirc: "\u0134", - jcirc: "\u0135", - Jcy: "\u0419", - jcy: "\u0439", - Jfr: "\ud835\udd0d", - jfr: "\ud835\udd27", - jmath: "\u0237", - Jopf: "\ud835\udd41", - jopf: "\ud835\udd5b", - Jscr: "\ud835\udca5", - jscr: "\ud835\udcbf", - Jsercy: "\u0408", - jsercy: "\u0458", - Jukcy: "\u0404", - jukcy: "\u0454", - Kappa: "\u039a", - kappa: "\u03ba", - kappav: "\u03f0", - Kcedil: "\u0136", - kcedil: "\u0137", - Kcy: "\u041a", - kcy: "\u043a", - Kfr: "\ud835\udd0e", - kfr: "\ud835\udd28", - kgreen: "\u0138", - KHcy: "\u0425", - khcy: "\u0445", - KJcy: "\u040c", - kjcy: "\u045c", - Kopf: "\ud835\udd42", - kopf: "\ud835\udd5c", - Kscr: "\ud835\udca6", - kscr: "\ud835\udcc0", - lAarr: "\u21da", - Lacute: "\u0139", - lacute: "\u013a", - laemptyv: "\u29b4", - lagran: "\u2112", - Lambda: "\u039b", - lambda: "\u03bb", - lang: "\u27e8", - Lang: "\u27ea", - langd: "\u2991", - langle: "\u27e8", - lap: "\u2a85", - Laplacetrf: "\u2112", - laquo: "\xab", - larrb: "\u21e4", - larrbfs: "\u291f", - larr: "\u2190", - Larr: "\u219e", - lArr: "\u21d0", - larrfs: "\u291d", - larrhk: "\u21a9", - larrlp: "\u21ab", - larrpl: "\u2939", - larrsim: "\u2973", - larrtl: "\u21a2", - latail: "\u2919", - lAtail: "\u291b", - lat: "\u2aab", - late: "\u2aad", - lates: "\u2aad\ufe00", - lbarr: "\u290c", - lBarr: "\u290e", - lbbrk: "\u2772", - lbrace: "{", - lbrack: "[", - lbrke: "\u298b", - lbrksld: "\u298f", - lbrkslu: "\u298d", - Lcaron: "\u013d", - lcaron: "\u013e", - Lcedil: "\u013b", - lcedil: "\u013c", - lceil: "\u2308", - lcub: "{", - Lcy: "\u041b", - lcy: "\u043b", - ldca: "\u2936", - ldquo: "\u201c", - ldquor: "\u201e", - ldrdhar: "\u2967", - ldrushar: "\u294b", - ldsh: "\u21b2", - le: "\u2264", - lE: "\u2266", - LeftAngleBracket: "\u27e8", - LeftArrowBar: "\u21e4", - leftarrow: "\u2190", - LeftArrow: "\u2190", - Leftarrow: "\u21d0", - LeftArrowRightArrow: "\u21c6", - leftarrowtail: "\u21a2", - LeftCeiling: "\u2308", - LeftDoubleBracket: "\u27e6", - LeftDownTeeVector: "\u2961", - LeftDownVectorBar: "\u2959", - LeftDownVector: "\u21c3", - LeftFloor: "\u230a", - leftharpoondown: "\u21bd", - leftharpoonup: "\u21bc", - leftleftarrows: "\u21c7", - leftrightarrow: "\u2194", - LeftRightArrow: "\u2194", - Leftrightarrow: "\u21d4", - leftrightarrows: "\u21c6", - leftrightharpoons: "\u21cb", - leftrightsquigarrow: "\u21ad", - LeftRightVector: "\u294e", - LeftTeeArrow: "\u21a4", - LeftTee: "\u22a3", - LeftTeeVector: "\u295a", - leftthreetimes: "\u22cb", - LeftTriangleBar: "\u29cf", - LeftTriangle: "\u22b2", - LeftTriangleEqual: "\u22b4", - LeftUpDownVector: "\u2951", - LeftUpTeeVector: "\u2960", - LeftUpVectorBar: "\u2958", - LeftUpVector: "\u21bf", - LeftVectorBar: "\u2952", - LeftVector: "\u21bc", - lEg: "\u2a8b", - leg: "\u22da", - leq: "\u2264", - leqq: "\u2266", - leqslant: "\u2a7d", - lescc: "\u2aa8", - les: "\u2a7d", - lesdot: "\u2a7f", - lesdoto: "\u2a81", - lesdotor: "\u2a83", - lesg: "\u22da\ufe00", - lesges: "\u2a93", - lessapprox: "\u2a85", - lessdot: "\u22d6", - lesseqgtr: "\u22da", - lesseqqgtr: "\u2a8b", - LessEqualGreater: "\u22da", - LessFullEqual: "\u2266", - LessGreater: "\u2276", - lessgtr: "\u2276", - LessLess: "\u2aa1", - lesssim: "\u2272", - LessSlantEqual: "\u2a7d", - LessTilde: "\u2272", - lfisht: "\u297c", - lfloor: "\u230a", - Lfr: "\ud835\udd0f", - lfr: "\ud835\udd29", - lg: "\u2276", - lgE: "\u2a91", - lHar: "\u2962", - lhard: "\u21bd", - lharu: "\u21bc", - lharul: "\u296a", - lhblk: "\u2584", - LJcy: "\u0409", - ljcy: "\u0459", - llarr: "\u21c7", - ll: "\u226a", - Ll: "\u22d8", - llcorner: "\u231e", - Lleftarrow: "\u21da", - llhard: "\u296b", - lltri: "\u25fa", - Lmidot: "\u013f", - lmidot: "\u0140", - lmoustache: "\u23b0", - lmoust: "\u23b0", - lnap: "\u2a89", - lnapprox: "\u2a89", - lne: "\u2a87", - lnE: "\u2268", - lneq: "\u2a87", - lneqq: "\u2268", - lnsim: "\u22e6", - loang: "\u27ec", - loarr: "\u21fd", - lobrk: "\u27e6", - longleftarrow: "\u27f5", - LongLeftArrow: "\u27f5", - Longleftarrow: "\u27f8", - longleftrightarrow: "\u27f7", - LongLeftRightArrow: "\u27f7", - Longleftrightarrow: "\u27fa", - longmapsto: "\u27fc", - longrightarrow: "\u27f6", - LongRightArrow: "\u27f6", - Longrightarrow: "\u27f9", - looparrowleft: "\u21ab", - looparrowright: "\u21ac", - lopar: "\u2985", - Lopf: "\ud835\udd43", - lopf: "\ud835\udd5d", - loplus: "\u2a2d", - lotimes: "\u2a34", - lowast: "\u2217", - lowbar: "_", - LowerLeftArrow: "\u2199", - LowerRightArrow: "\u2198", - loz: "\u25ca", - lozenge: "\u25ca", - lozf: "\u29eb", - lpar: "(", - lparlt: "\u2993", - lrarr: "\u21c6", - lrcorner: "\u231f", - lrhar: "\u21cb", - lrhard: "\u296d", - lrm: "\u200e", - lrtri: "\u22bf", - lsaquo: "\u2039", - lscr: "\ud835\udcc1", - Lscr: "\u2112", - lsh: "\u21b0", - Lsh: "\u21b0", - lsim: "\u2272", - lsime: "\u2a8d", - lsimg: "\u2a8f", - lsqb: "[", - lsquo: "\u2018", - lsquor: "\u201a", - Lstrok: "\u0141", - lstrok: "\u0142", - ltcc: "\u2aa6", - ltcir: "\u2a79", - lt: "<", - LT: "<", - Lt: "\u226a", - ltdot: "\u22d6", - lthree: "\u22cb", - ltimes: "\u22c9", - ltlarr: "\u2976", - ltquest: "\u2a7b", - ltri: "\u25c3", - ltrie: "\u22b4", - ltrif: "\u25c2", - ltrPar: "\u2996", - lurdshar: "\u294a", - luruhar: "\u2966", - lvertneqq: "\u2268\ufe00", - lvnE: "\u2268\ufe00", - macr: "\xaf", - male: "\u2642", - malt: "\u2720", - maltese: "\u2720", - Map: "\u2905", - map: "\u21a6", - mapsto: "\u21a6", - mapstodown: "\u21a7", - mapstoleft: "\u21a4", - mapstoup: "\u21a5", - marker: "\u25ae", - mcomma: "\u2a29", - Mcy: "\u041c", - mcy: "\u043c", - mdash: "\u2014", - mDDot: "\u223a", - measuredangle: "\u2221", - MediumSpace: "\u205f", - Mellintrf: "\u2133", - Mfr: "\ud835\udd10", - mfr: "\ud835\udd2a", - mho: "\u2127", - micro: "\xb5", - midast: "*", - midcir: "\u2af0", - mid: "\u2223", - middot: "\xb7", - minusb: "\u229f", - minus: "\u2212", - minusd: "\u2238", - minusdu: "\u2a2a", - MinusPlus: "\u2213", - mlcp: "\u2adb", - mldr: "\u2026", - mnplus: "\u2213", - models: "\u22a7", - Mopf: "\ud835\udd44", - mopf: "\ud835\udd5e", - mp: "\u2213", - mscr: "\ud835\udcc2", - Mscr: "\u2133", - mstpos: "\u223e", - Mu: "\u039c", - mu: "\u03bc", - multimap: "\u22b8", - mumap: "\u22b8", - nabla: "\u2207", - Nacute: "\u0143", - nacute: "\u0144", - nang: "\u2220\u20d2", - nap: "\u2249", - napE: "\u2a70\u0338", - napid: "\u224b\u0338", - napos: "\u0149", - napprox: "\u2249", - natural: "\u266e", - naturals: "\u2115", - natur: "\u266e", - nbsp: "\xa0", - nbump: "\u224e\u0338", - nbumpe: "\u224f\u0338", - ncap: "\u2a43", - Ncaron: "\u0147", - ncaron: "\u0148", - Ncedil: "\u0145", - ncedil: "\u0146", - ncong: "\u2247", - ncongdot: "\u2a6d\u0338", - ncup: "\u2a42", - Ncy: "\u041d", - ncy: "\u043d", - ndash: "\u2013", - nearhk: "\u2924", - nearr: "\u2197", - neArr: "\u21d7", - nearrow: "\u2197", - ne: "\u2260", - nedot: "\u2250\u0338", - NegativeMediumSpace: "\u200b", - NegativeThickSpace: "\u200b", - NegativeThinSpace: "\u200b", - NegativeVeryThinSpace: "\u200b", - nequiv: "\u2262", - nesear: "\u2928", - nesim: "\u2242\u0338", - NestedGreaterGreater: "\u226b", - NestedLessLess: "\u226a", - NewLine: "\n", - nexist: "\u2204", - nexists: "\u2204", - Nfr: "\ud835\udd11", - nfr: "\ud835\udd2b", - ngE: "\u2267\u0338", - nge: "\u2271", - ngeq: "\u2271", - ngeqq: "\u2267\u0338", - ngeqslant: "\u2a7e\u0338", - nges: "\u2a7e\u0338", - nGg: "\u22d9\u0338", - ngsim: "\u2275", - nGt: "\u226b\u20d2", - ngt: "\u226f", - ngtr: "\u226f", - nGtv: "\u226b\u0338", - nharr: "\u21ae", - nhArr: "\u21ce", - nhpar: "\u2af2", - ni: "\u220b", - nis: "\u22fc", - nisd: "\u22fa", - niv: "\u220b", - NJcy: "\u040a", - njcy: "\u045a", - nlarr: "\u219a", - nlArr: "\u21cd", - nldr: "\u2025", - nlE: "\u2266\u0338", - nle: "\u2270", - nleftarrow: "\u219a", - nLeftarrow: "\u21cd", - nleftrightarrow: "\u21ae", - nLeftrightarrow: "\u21ce", - nleq: "\u2270", - nleqq: "\u2266\u0338", - nleqslant: "\u2a7d\u0338", - nles: "\u2a7d\u0338", - nless: "\u226e", - nLl: "\u22d8\u0338", - nlsim: "\u2274", - nLt: "\u226a\u20d2", - nlt: "\u226e", - nltri: "\u22ea", - nltrie: "\u22ec", - nLtv: "\u226a\u0338", - nmid: "\u2224", - NoBreak: "\u2060", - NonBreakingSpace: "\xa0", - nopf: "\ud835\udd5f", - Nopf: "\u2115", - Not: "\u2aec", - not: "\xac", - NotCongruent: "\u2262", - NotCupCap: "\u226d", - NotDoubleVerticalBar: "\u2226", - NotElement: "\u2209", - NotEqual: "\u2260", - NotEqualTilde: "\u2242\u0338", - NotExists: "\u2204", - NotGreater: "\u226f", - NotGreaterEqual: "\u2271", - NotGreaterFullEqual: "\u2267\u0338", - NotGreaterGreater: "\u226b\u0338", - NotGreaterLess: "\u2279", - NotGreaterSlantEqual: "\u2a7e\u0338", - NotGreaterTilde: "\u2275", - NotHumpDownHump: "\u224e\u0338", - NotHumpEqual: "\u224f\u0338", - notin: "\u2209", - notindot: "\u22f5\u0338", - notinE: "\u22f9\u0338", - notinva: "\u2209", - notinvb: "\u22f7", - notinvc: "\u22f6", - NotLeftTriangleBar: "\u29cf\u0338", - NotLeftTriangle: "\u22ea", - NotLeftTriangleEqual: "\u22ec", - NotLess: "\u226e", - NotLessEqual: "\u2270", - NotLessGreater: "\u2278", - NotLessLess: "\u226a\u0338", - NotLessSlantEqual: "\u2a7d\u0338", - NotLessTilde: "\u2274", - NotNestedGreaterGreater: "\u2aa2\u0338", - NotNestedLessLess: "\u2aa1\u0338", - notni: "\u220c", - notniva: "\u220c", - notnivb: "\u22fe", - notnivc: "\u22fd", - NotPrecedes: "\u2280", - NotPrecedesEqual: "\u2aaf\u0338", - NotPrecedesSlantEqual: "\u22e0", - NotReverseElement: "\u220c", - NotRightTriangleBar: "\u29d0\u0338", - NotRightTriangle: "\u22eb", - NotRightTriangleEqual: "\u22ed", - NotSquareSubset: "\u228f\u0338", - NotSquareSubsetEqual: "\u22e2", - NotSquareSuperset: "\u2290\u0338", - NotSquareSupersetEqual: "\u22e3", - NotSubset: "\u2282\u20d2", - NotSubsetEqual: "\u2288", - NotSucceeds: "\u2281", - NotSucceedsEqual: "\u2ab0\u0338", - NotSucceedsSlantEqual: "\u22e1", - NotSucceedsTilde: "\u227f\u0338", - NotSuperset: "\u2283\u20d2", - NotSupersetEqual: "\u2289", - NotTilde: "\u2241", - NotTildeEqual: "\u2244", - NotTildeFullEqual: "\u2247", - NotTildeTilde: "\u2249", - NotVerticalBar: "\u2224", - nparallel: "\u2226", - npar: "\u2226", - nparsl: "\u2afd\u20e5", - npart: "\u2202\u0338", - npolint: "\u2a14", - npr: "\u2280", - nprcue: "\u22e0", - nprec: "\u2280", - npreceq: "\u2aaf\u0338", - npre: "\u2aaf\u0338", - nrarrc: "\u2933\u0338", - nrarr: "\u219b", - nrArr: "\u21cf", - nrarrw: "\u219d\u0338", - nrightarrow: "\u219b", - nRightarrow: "\u21cf", - nrtri: "\u22eb", - nrtrie: "\u22ed", - nsc: "\u2281", - nsccue: "\u22e1", - nsce: "\u2ab0\u0338", - Nscr: "\ud835\udca9", - nscr: "\ud835\udcc3", - nshortmid: "\u2224", - nshortparallel: "\u2226", - nsim: "\u2241", - nsime: "\u2244", - nsimeq: "\u2244", - nsmid: "\u2224", - nspar: "\u2226", - nsqsube: "\u22e2", - nsqsupe: "\u22e3", - nsub: "\u2284", - nsubE: "\u2ac5\u0338", - nsube: "\u2288", - nsubset: "\u2282\u20d2", - nsubseteq: "\u2288", - nsubseteqq: "\u2ac5\u0338", - nsucc: "\u2281", - nsucceq: "\u2ab0\u0338", - nsup: "\u2285", - nsupE: "\u2ac6\u0338", - nsupe: "\u2289", - nsupset: "\u2283\u20d2", - nsupseteq: "\u2289", - nsupseteqq: "\u2ac6\u0338", - ntgl: "\u2279", - Ntilde: "\xd1", - ntilde: "\xf1", - ntlg: "\u2278", - ntriangleleft: "\u22ea", - ntrianglelefteq: "\u22ec", - ntriangleright: "\u22eb", - ntrianglerighteq: "\u22ed", - Nu: "\u039d", - nu: "\u03bd", - num: "#", - numero: "\u2116", - numsp: "\u2007", - nvap: "\u224d\u20d2", - nvdash: "\u22ac", - nvDash: "\u22ad", - nVdash: "\u22ae", - nVDash: "\u22af", - nvge: "\u2265\u20d2", - nvgt: ">\u20d2", - nvHarr: "\u2904", - nvinfin: "\u29de", - nvlArr: "\u2902", - nvle: "\u2264\u20d2", - nvlt: "<\u20d2", - nvltrie: "\u22b4\u20d2", - nvrArr: "\u2903", - nvrtrie: "\u22b5\u20d2", - nvsim: "\u223c\u20d2", - nwarhk: "\u2923", - nwarr: "\u2196", - nwArr: "\u21d6", - nwarrow: "\u2196", - nwnear: "\u2927", - Oacute: "\xd3", - oacute: "\xf3", - oast: "\u229b", - Ocirc: "\xd4", - ocirc: "\xf4", - ocir: "\u229a", - Ocy: "\u041e", - ocy: "\u043e", - odash: "\u229d", - Odblac: "\u0150", - odblac: "\u0151", - odiv: "\u2a38", - odot: "\u2299", - odsold: "\u29bc", - OElig: "\u0152", - oelig: "\u0153", - ofcir: "\u29bf", - Ofr: "\ud835\udd12", - ofr: "\ud835\udd2c", - ogon: "\u02db", - Ograve: "\xd2", - ograve: "\xf2", - ogt: "\u29c1", - ohbar: "\u29b5", - ohm: "\u03a9", - oint: "\u222e", - olarr: "\u21ba", - olcir: "\u29be", - olcross: "\u29bb", - oline: "\u203e", - olt: "\u29c0", - Omacr: "\u014c", - omacr: "\u014d", - Omega: "\u03a9", - omega: "\u03c9", - Omicron: "\u039f", - omicron: "\u03bf", - omid: "\u29b6", - ominus: "\u2296", - Oopf: "\ud835\udd46", - oopf: "\ud835\udd60", - opar: "\u29b7", - OpenCurlyDoubleQuote: "\u201c", - OpenCurlyQuote: "\u2018", - operp: "\u29b9", - oplus: "\u2295", - orarr: "\u21bb", - Or: "\u2a54", - or: "\u2228", - ord: "\u2a5d", - order: "\u2134", - orderof: "\u2134", - ordf: "\xaa", - ordm: "\xba", - origof: "\u22b6", - oror: "\u2a56", - orslope: "\u2a57", - orv: "\u2a5b", - oS: "\u24c8", - Oscr: "\ud835\udcaa", - oscr: "\u2134", - Oslash: "\xd8", - oslash: "\xf8", - osol: "\u2298", - Otilde: "\xd5", - otilde: "\xf5", - otimesas: "\u2a36", - Otimes: "\u2a37", - otimes: "\u2297", - Ouml: "\xd6", - ouml: "\xf6", - ovbar: "\u233d", - OverBar: "\u203e", - OverBrace: "\u23de", - OverBracket: "\u23b4", - OverParenthesis: "\u23dc", - para: "\xb6", - parallel: "\u2225", - par: "\u2225", - parsim: "\u2af3", - parsl: "\u2afd", - part: "\u2202", - PartialD: "\u2202", - Pcy: "\u041f", - pcy: "\u043f", - percnt: "%", - period: ".", - permil: "\u2030", - perp: "\u22a5", - pertenk: "\u2031", - Pfr: "\ud835\udd13", - pfr: "\ud835\udd2d", - Phi: "\u03a6", - phi: "\u03c6", - phiv: "\u03d5", - phmmat: "\u2133", - phone: "\u260e", - Pi: "\u03a0", - pi: "\u03c0", - pitchfork: "\u22d4", - piv: "\u03d6", - planck: "\u210f", - planckh: "\u210e", - plankv: "\u210f", - plusacir: "\u2a23", - plusb: "\u229e", - pluscir: "\u2a22", - plus: "+", - plusdo: "\u2214", - plusdu: "\u2a25", - pluse: "\u2a72", - PlusMinus: "\xb1", - plusmn: "\xb1", - plussim: "\u2a26", - plustwo: "\u2a27", - pm: "\xb1", - Poincareplane: "\u210c", - pointint: "\u2a15", - popf: "\ud835\udd61", - Popf: "\u2119", - pound: "\xa3", - prap: "\u2ab7", - Pr: "\u2abb", - pr: "\u227a", - prcue: "\u227c", - precapprox: "\u2ab7", - prec: "\u227a", - preccurlyeq: "\u227c", - Precedes: "\u227a", - PrecedesEqual: "\u2aaf", - PrecedesSlantEqual: "\u227c", - PrecedesTilde: "\u227e", - preceq: "\u2aaf", - precnapprox: "\u2ab9", - precneqq: "\u2ab5", - precnsim: "\u22e8", - pre: "\u2aaf", - prE: "\u2ab3", - precsim: "\u227e", - prime: "\u2032", - Prime: "\u2033", - primes: "\u2119", - prnap: "\u2ab9", - prnE: "\u2ab5", - prnsim: "\u22e8", - prod: "\u220f", - Product: "\u220f", - profalar: "\u232e", - profline: "\u2312", - profsurf: "\u2313", - prop: "\u221d", - Proportional: "\u221d", - Proportion: "\u2237", - propto: "\u221d", - prsim: "\u227e", - prurel: "\u22b0", - Pscr: "\ud835\udcab", - pscr: "\ud835\udcc5", - Psi: "\u03a8", - psi: "\u03c8", - puncsp: "\u2008", - Qfr: "\ud835\udd14", - qfr: "\ud835\udd2e", - qint: "\u2a0c", - qopf: "\ud835\udd62", - Qopf: "\u211a", - qprime: "\u2057", - Qscr: "\ud835\udcac", - qscr: "\ud835\udcc6", - quaternions: "\u210d", - quatint: "\u2a16", - quest: "?", - questeq: "\u225f", - quot: '"', - QUOT: '"', - rAarr: "\u21db", - race: "\u223d\u0331", - Racute: "\u0154", - racute: "\u0155", - radic: "\u221a", - raemptyv: "\u29b3", - rang: "\u27e9", - Rang: "\u27eb", - rangd: "\u2992", - range: "\u29a5", - rangle: "\u27e9", - raquo: "\xbb", - rarrap: "\u2975", - rarrb: "\u21e5", - rarrbfs: "\u2920", - rarrc: "\u2933", - rarr: "\u2192", - Rarr: "\u21a0", - rArr: "\u21d2", - rarrfs: "\u291e", - rarrhk: "\u21aa", - rarrlp: "\u21ac", - rarrpl: "\u2945", - rarrsim: "\u2974", - Rarrtl: "\u2916", - rarrtl: "\u21a3", - rarrw: "\u219d", - ratail: "\u291a", - rAtail: "\u291c", - ratio: "\u2236", - rationals: "\u211a", - rbarr: "\u290d", - rBarr: "\u290f", - RBarr: "\u2910", - rbbrk: "\u2773", - rbrace: "}", - rbrack: "]", - rbrke: "\u298c", - rbrksld: "\u298e", - rbrkslu: "\u2990", - Rcaron: "\u0158", - rcaron: "\u0159", - Rcedil: "\u0156", - rcedil: "\u0157", - rceil: "\u2309", - rcub: "}", - Rcy: "\u0420", - rcy: "\u0440", - rdca: "\u2937", - rdldhar: "\u2969", - rdquo: "\u201d", - rdquor: "\u201d", - rdsh: "\u21b3", - real: "\u211c", - realine: "\u211b", - realpart: "\u211c", - reals: "\u211d", - Re: "\u211c", - rect: "\u25ad", - reg: "\xae", - REG: "\xae", - ReverseElement: "\u220b", - ReverseEquilibrium: "\u21cb", - ReverseUpEquilibrium: "\u296f", - rfisht: "\u297d", - rfloor: "\u230b", - rfr: "\ud835\udd2f", - Rfr: "\u211c", - rHar: "\u2964", - rhard: "\u21c1", - rharu: "\u21c0", - rharul: "\u296c", - Rho: "\u03a1", - rho: "\u03c1", - rhov: "\u03f1", - RightAngleBracket: "\u27e9", - RightArrowBar: "\u21e5", - rightarrow: "\u2192", - RightArrow: "\u2192", - Rightarrow: "\u21d2", - RightArrowLeftArrow: "\u21c4", - rightarrowtail: "\u21a3", - RightCeiling: "\u2309", - RightDoubleBracket: "\u27e7", - RightDownTeeVector: "\u295d", - RightDownVectorBar: "\u2955", - RightDownVector: "\u21c2", - RightFloor: "\u230b", - rightharpoondown: "\u21c1", - rightharpoonup: "\u21c0", - rightleftarrows: "\u21c4", - rightleftharpoons: "\u21cc", - rightrightarrows: "\u21c9", - rightsquigarrow: "\u219d", - RightTeeArrow: "\u21a6", - RightTee: "\u22a2", - RightTeeVector: "\u295b", - rightthreetimes: "\u22cc", - RightTriangleBar: "\u29d0", - RightTriangle: "\u22b3", - RightTriangleEqual: "\u22b5", - RightUpDownVector: "\u294f", - RightUpTeeVector: "\u295c", - RightUpVectorBar: "\u2954", - RightUpVector: "\u21be", - RightVectorBar: "\u2953", - RightVector: "\u21c0", - ring: "\u02da", - risingdotseq: "\u2253", - rlarr: "\u21c4", - rlhar: "\u21cc", - rlm: "\u200f", - rmoustache: "\u23b1", - rmoust: "\u23b1", - rnmid: "\u2aee", - roang: "\u27ed", - roarr: "\u21fe", - robrk: "\u27e7", - ropar: "\u2986", - ropf: "\ud835\udd63", - Ropf: "\u211d", - roplus: "\u2a2e", - rotimes: "\u2a35", - RoundImplies: "\u2970", - rpar: ")", - rpargt: "\u2994", - rppolint: "\u2a12", - rrarr: "\u21c9", - Rrightarrow: "\u21db", - rsaquo: "\u203a", - rscr: "\ud835\udcc7", - Rscr: "\u211b", - rsh: "\u21b1", - Rsh: "\u21b1", - rsqb: "]", - rsquo: "\u2019", - rsquor: "\u2019", - rthree: "\u22cc", - rtimes: "\u22ca", - rtri: "\u25b9", - rtrie: "\u22b5", - rtrif: "\u25b8", - rtriltri: "\u29ce", - RuleDelayed: "\u29f4", - ruluhar: "\u2968", - rx: "\u211e", - Sacute: "\u015a", - sacute: "\u015b", - sbquo: "\u201a", - scap: "\u2ab8", - Scaron: "\u0160", - scaron: "\u0161", - Sc: "\u2abc", - sc: "\u227b", - sccue: "\u227d", - sce: "\u2ab0", - scE: "\u2ab4", - Scedil: "\u015e", - scedil: "\u015f", - Scirc: "\u015c", - scirc: "\u015d", - scnap: "\u2aba", - scnE: "\u2ab6", - scnsim: "\u22e9", - scpolint: "\u2a13", - scsim: "\u227f", - Scy: "\u0421", - scy: "\u0441", - sdotb: "\u22a1", - sdot: "\u22c5", - sdote: "\u2a66", - searhk: "\u2925", - searr: "\u2198", - seArr: "\u21d8", - searrow: "\u2198", - sect: "\xa7", - semi: ";", - seswar: "\u2929", - setminus: "\u2216", - setmn: "\u2216", - sext: "\u2736", - Sfr: "\ud835\udd16", - sfr: "\ud835\udd30", - sfrown: "\u2322", - sharp: "\u266f", - SHCHcy: "\u0429", - shchcy: "\u0449", - SHcy: "\u0428", - shcy: "\u0448", - ShortDownArrow: "\u2193", - ShortLeftArrow: "\u2190", - shortmid: "\u2223", - shortparallel: "\u2225", - ShortRightArrow: "\u2192", - ShortUpArrow: "\u2191", - shy: "\xad", - Sigma: "\u03a3", - sigma: "\u03c3", - sigmaf: "\u03c2", - sigmav: "\u03c2", - sim: "\u223c", - simdot: "\u2a6a", - sime: "\u2243", - simeq: "\u2243", - simg: "\u2a9e", - simgE: "\u2aa0", - siml: "\u2a9d", - simlE: "\u2a9f", - simne: "\u2246", - simplus: "\u2a24", - simrarr: "\u2972", - slarr: "\u2190", - SmallCircle: "\u2218", - smallsetminus: "\u2216", - smashp: "\u2a33", - smeparsl: "\u29e4", - smid: "\u2223", - smile: "\u2323", - smt: "\u2aaa", - smte: "\u2aac", - smtes: "\u2aac\ufe00", - SOFTcy: "\u042c", - softcy: "\u044c", - solbar: "\u233f", - solb: "\u29c4", - sol: "/", - Sopf: "\ud835\udd4a", - sopf: "\ud835\udd64", - spades: "\u2660", - spadesuit: "\u2660", - spar: "\u2225", - sqcap: "\u2293", - sqcaps: "\u2293\ufe00", - sqcup: "\u2294", - sqcups: "\u2294\ufe00", - Sqrt: "\u221a", - sqsub: "\u228f", - sqsube: "\u2291", - sqsubset: "\u228f", - sqsubseteq: "\u2291", - sqsup: "\u2290", - sqsupe: "\u2292", - sqsupset: "\u2290", - sqsupseteq: "\u2292", - square: "\u25a1", - Square: "\u25a1", - SquareIntersection: "\u2293", - SquareSubset: "\u228f", - SquareSubsetEqual: "\u2291", - SquareSuperset: "\u2290", - SquareSupersetEqual: "\u2292", - SquareUnion: "\u2294", - squarf: "\u25aa", - squ: "\u25a1", - squf: "\u25aa", - srarr: "\u2192", - Sscr: "\ud835\udcae", - sscr: "\ud835\udcc8", - ssetmn: "\u2216", - ssmile: "\u2323", - sstarf: "\u22c6", - Star: "\u22c6", - star: "\u2606", - starf: "\u2605", - straightepsilon: "\u03f5", - straightphi: "\u03d5", - strns: "\xaf", - sub: "\u2282", - Sub: "\u22d0", - subdot: "\u2abd", - subE: "\u2ac5", - sube: "\u2286", - subedot: "\u2ac3", - submult: "\u2ac1", - subnE: "\u2acb", - subne: "\u228a", - subplus: "\u2abf", - subrarr: "\u2979", - subset: "\u2282", - Subset: "\u22d0", - subseteq: "\u2286", - subseteqq: "\u2ac5", - SubsetEqual: "\u2286", - subsetneq: "\u228a", - subsetneqq: "\u2acb", - subsim: "\u2ac7", - subsub: "\u2ad5", - subsup: "\u2ad3", - succapprox: "\u2ab8", - succ: "\u227b", - succcurlyeq: "\u227d", - Succeeds: "\u227b", - SucceedsEqual: "\u2ab0", - SucceedsSlantEqual: "\u227d", - SucceedsTilde: "\u227f", - succeq: "\u2ab0", - succnapprox: "\u2aba", - succneqq: "\u2ab6", - succnsim: "\u22e9", - succsim: "\u227f", - SuchThat: "\u220b", - sum: "\u2211", - Sum: "\u2211", - sung: "\u266a", - sup1: "\xb9", - sup2: "\xb2", - sup3: "\xb3", - sup: "\u2283", - Sup: "\u22d1", - supdot: "\u2abe", - supdsub: "\u2ad8", - supE: "\u2ac6", - supe: "\u2287", - supedot: "\u2ac4", - Superset: "\u2283", - SupersetEqual: "\u2287", - suphsol: "\u27c9", - suphsub: "\u2ad7", - suplarr: "\u297b", - supmult: "\u2ac2", - supnE: "\u2acc", - supne: "\u228b", - supplus: "\u2ac0", - supset: "\u2283", - Supset: "\u22d1", - supseteq: "\u2287", - supseteqq: "\u2ac6", - supsetneq: "\u228b", - supsetneqq: "\u2acc", - supsim: "\u2ac8", - supsub: "\u2ad4", - supsup: "\u2ad6", - swarhk: "\u2926", - swarr: "\u2199", - swArr: "\u21d9", - swarrow: "\u2199", - swnwar: "\u292a", - szlig: "\xdf", - Tab: "\t", - target: "\u2316", - Tau: "\u03a4", - tau: "\u03c4", - tbrk: "\u23b4", - Tcaron: "\u0164", - tcaron: "\u0165", - Tcedil: "\u0162", - tcedil: "\u0163", - Tcy: "\u0422", - tcy: "\u0442", - tdot: "\u20db", - telrec: "\u2315", - Tfr: "\ud835\udd17", - tfr: "\ud835\udd31", - there4: "\u2234", - therefore: "\u2234", - Therefore: "\u2234", - Theta: "\u0398", - theta: "\u03b8", - thetasym: "\u03d1", - thetav: "\u03d1", - thickapprox: "\u2248", - thicksim: "\u223c", - ThickSpace: "\u205f\u200a", - ThinSpace: "\u2009", - thinsp: "\u2009", - thkap: "\u2248", - thksim: "\u223c", - THORN: "\xde", - thorn: "\xfe", - tilde: "\u02dc", - Tilde: "\u223c", - TildeEqual: "\u2243", - TildeFullEqual: "\u2245", - TildeTilde: "\u2248", - timesbar: "\u2a31", - timesb: "\u22a0", - times: "\xd7", - timesd: "\u2a30", - tint: "\u222d", - toea: "\u2928", - topbot: "\u2336", - topcir: "\u2af1", - top: "\u22a4", - Topf: "\ud835\udd4b", - topf: "\ud835\udd65", - topfork: "\u2ada", - tosa: "\u2929", - tprime: "\u2034", - trade: "\u2122", - TRADE: "\u2122", - triangle: "\u25b5", - triangledown: "\u25bf", - triangleleft: "\u25c3", - trianglelefteq: "\u22b4", - triangleq: "\u225c", - triangleright: "\u25b9", - trianglerighteq: "\u22b5", - tridot: "\u25ec", - trie: "\u225c", - triminus: "\u2a3a", - TripleDot: "\u20db", - triplus: "\u2a39", - trisb: "\u29cd", - tritime: "\u2a3b", - trpezium: "\u23e2", - Tscr: "\ud835\udcaf", - tscr: "\ud835\udcc9", - TScy: "\u0426", - tscy: "\u0446", - TSHcy: "\u040b", - tshcy: "\u045b", - Tstrok: "\u0166", - tstrok: "\u0167", - twixt: "\u226c", - twoheadleftarrow: "\u219e", - twoheadrightarrow: "\u21a0", - Uacute: "\xda", - uacute: "\xfa", - uarr: "\u2191", - Uarr: "\u219f", - uArr: "\u21d1", - Uarrocir: "\u2949", - Ubrcy: "\u040e", - ubrcy: "\u045e", - Ubreve: "\u016c", - ubreve: "\u016d", - Ucirc: "\xdb", - ucirc: "\xfb", - Ucy: "\u0423", - ucy: "\u0443", - udarr: "\u21c5", - Udblac: "\u0170", - udblac: "\u0171", - udhar: "\u296e", - ufisht: "\u297e", - Ufr: "\ud835\udd18", - ufr: "\ud835\udd32", - Ugrave: "\xd9", - ugrave: "\xf9", - uHar: "\u2963", - uharl: "\u21bf", - uharr: "\u21be", - uhblk: "\u2580", - ulcorn: "\u231c", - ulcorner: "\u231c", - ulcrop: "\u230f", - ultri: "\u25f8", - Umacr: "\u016a", - umacr: "\u016b", - uml: "\xa8", - UnderBar: "_", - UnderBrace: "\u23df", - UnderBracket: "\u23b5", - UnderParenthesis: "\u23dd", - Union: "\u22c3", - UnionPlus: "\u228e", - Uogon: "\u0172", - uogon: "\u0173", - Uopf: "\ud835\udd4c", - uopf: "\ud835\udd66", - UpArrowBar: "\u2912", - uparrow: "\u2191", - UpArrow: "\u2191", - Uparrow: "\u21d1", - UpArrowDownArrow: "\u21c5", - updownarrow: "\u2195", - UpDownArrow: "\u2195", - Updownarrow: "\u21d5", - UpEquilibrium: "\u296e", - upharpoonleft: "\u21bf", - upharpoonright: "\u21be", - uplus: "\u228e", - UpperLeftArrow: "\u2196", - UpperRightArrow: "\u2197", - upsi: "\u03c5", - Upsi: "\u03d2", - upsih: "\u03d2", - Upsilon: "\u03a5", - upsilon: "\u03c5", - UpTeeArrow: "\u21a5", - UpTee: "\u22a5", - upuparrows: "\u21c8", - urcorn: "\u231d", - urcorner: "\u231d", - urcrop: "\u230e", - Uring: "\u016e", - uring: "\u016f", - urtri: "\u25f9", - Uscr: "\ud835\udcb0", - uscr: "\ud835\udcca", - utdot: "\u22f0", - Utilde: "\u0168", - utilde: "\u0169", - utri: "\u25b5", - utrif: "\u25b4", - uuarr: "\u21c8", - Uuml: "\xdc", - uuml: "\xfc", - uwangle: "\u29a7", - vangrt: "\u299c", - varepsilon: "\u03f5", - varkappa: "\u03f0", - varnothing: "\u2205", - varphi: "\u03d5", - varpi: "\u03d6", - varpropto: "\u221d", - varr: "\u2195", - vArr: "\u21d5", - varrho: "\u03f1", - varsigma: "\u03c2", - varsubsetneq: "\u228a\ufe00", - varsubsetneqq: "\u2acb\ufe00", - varsupsetneq: "\u228b\ufe00", - varsupsetneqq: "\u2acc\ufe00", - vartheta: "\u03d1", - vartriangleleft: "\u22b2", - vartriangleright: "\u22b3", - vBar: "\u2ae8", - Vbar: "\u2aeb", - vBarv: "\u2ae9", - Vcy: "\u0412", - vcy: "\u0432", - vdash: "\u22a2", - vDash: "\u22a8", - Vdash: "\u22a9", - VDash: "\u22ab", - Vdashl: "\u2ae6", - veebar: "\u22bb", - vee: "\u2228", - Vee: "\u22c1", - veeeq: "\u225a", - vellip: "\u22ee", - verbar: "|", - Verbar: "\u2016", - vert: "|", - Vert: "\u2016", - VerticalBar: "\u2223", - VerticalLine: "|", - VerticalSeparator: "\u2758", - VerticalTilde: "\u2240", - VeryThinSpace: "\u200a", - Vfr: "\ud835\udd19", - vfr: "\ud835\udd33", - vltri: "\u22b2", - vnsub: "\u2282\u20d2", - vnsup: "\u2283\u20d2", - Vopf: "\ud835\udd4d", - vopf: "\ud835\udd67", - vprop: "\u221d", - vrtri: "\u22b3", - Vscr: "\ud835\udcb1", - vscr: "\ud835\udccb", - vsubnE: "\u2acb\ufe00", - vsubne: "\u228a\ufe00", - vsupnE: "\u2acc\ufe00", - vsupne: "\u228b\ufe00", - Vvdash: "\u22aa", - vzigzag: "\u299a", - Wcirc: "\u0174", - wcirc: "\u0175", - wedbar: "\u2a5f", - wedge: "\u2227", - Wedge: "\u22c0", - wedgeq: "\u2259", - weierp: "\u2118", - Wfr: "\ud835\udd1a", - wfr: "\ud835\udd34", - Wopf: "\ud835\udd4e", - wopf: "\ud835\udd68", - wp: "\u2118", - wr: "\u2240", - wreath: "\u2240", - Wscr: "\ud835\udcb2", - wscr: "\ud835\udccc", - xcap: "\u22c2", - xcirc: "\u25ef", - xcup: "\u22c3", - xdtri: "\u25bd", - Xfr: "\ud835\udd1b", - xfr: "\ud835\udd35", - xharr: "\u27f7", - xhArr: "\u27fa", - Xi: "\u039e", - xi: "\u03be", - xlarr: "\u27f5", - xlArr: "\u27f8", - xmap: "\u27fc", - xnis: "\u22fb", - xodot: "\u2a00", - Xopf: "\ud835\udd4f", - xopf: "\ud835\udd69", - xoplus: "\u2a01", - xotime: "\u2a02", - xrarr: "\u27f6", - xrArr: "\u27f9", - Xscr: "\ud835\udcb3", - xscr: "\ud835\udccd", - xsqcup: "\u2a06", - xuplus: "\u2a04", - xutri: "\u25b3", - xvee: "\u22c1", - xwedge: "\u22c0", - Yacute: "\xdd", - yacute: "\xfd", - YAcy: "\u042f", - yacy: "\u044f", - Ycirc: "\u0176", - ycirc: "\u0177", - Ycy: "\u042b", - ycy: "\u044b", - yen: "\xa5", - Yfr: "\ud835\udd1c", - yfr: "\ud835\udd36", - YIcy: "\u0407", - yicy: "\u0457", - Yopf: "\ud835\udd50", - yopf: "\ud835\udd6a", - Yscr: "\ud835\udcb4", - yscr: "\ud835\udcce", - YUcy: "\u042e", - yucy: "\u044e", - yuml: "\xff", - Yuml: "\u0178", - Zacute: "\u0179", - zacute: "\u017a", - Zcaron: "\u017d", - zcaron: "\u017e", - Zcy: "\u0417", - zcy: "\u0437", - Zdot: "\u017b", - zdot: "\u017c", - zeetrf: "\u2128", - ZeroWidthSpace: "\u200b", - Zeta: "\u0396", - zeta: "\u03b6", - zfr: "\ud835\udd37", - Zfr: "\u2128", - ZHcy: "\u0416", - zhcy: "\u0436", - zigrarr: "\u21dd", - zopf: "\ud835\udd6b", - Zopf: "\u2124", - Zscr: "\ud835\udcb5", - zscr: "\ud835\udccf", - zwj: "\u200d", - zwnj: "\u200c" - }; - /*eslint quotes:0*/ var entities = require$$0; - var regex$4 = /[!-#%-\*,-\/:;\?@\[-\]_\{\}\xA1\xA7\xAB\xB6\xB7\xBB\xBF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166D\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4E\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]|\uD800[\uDD00-\uDD02\uDF9F\uDFD0]|\uD801\uDD6F|\uD802[\uDC57\uDD1F\uDD3F\uDE50-\uDE58\uDE7F\uDEF0-\uDEF6\uDF39-\uDF3F\uDF99-\uDF9C]|\uD803[\uDF55-\uDF59]|\uD804[\uDC47-\uDC4D\uDCBB\uDCBC\uDCBE-\uDCC1\uDD40-\uDD43\uDD74\uDD75\uDDC5-\uDDC8\uDDCD\uDDDB\uDDDD-\uDDDF\uDE38-\uDE3D\uDEA9]|\uD805[\uDC4B-\uDC4F\uDC5B\uDC5D\uDCC6\uDDC1-\uDDD7\uDE41-\uDE43\uDE60-\uDE6C\uDF3C-\uDF3E]|\uD806[\uDC3B\uDE3F-\uDE46\uDE9A-\uDE9C\uDE9E-\uDEA2]|\uD807[\uDC41-\uDC45\uDC70\uDC71\uDEF7\uDEF8]|\uD809[\uDC70-\uDC74]|\uD81A[\uDE6E\uDE6F\uDEF5\uDF37-\uDF3B\uDF44]|\uD81B[\uDE97-\uDE9A]|\uD82F\uDC9F|\uD836[\uDE87-\uDE8B]|\uD83A[\uDD5E\uDD5F]/; - var encodeCache = {}; - // Create a lookup array where anything but characters in `chars` string - // and alphanumeric chars is percent-encoded. - - function getEncodeCache(exclude) { - var i, ch, cache = encodeCache[exclude]; - if (cache) { - return cache; - } - cache = encodeCache[exclude] = []; - for (i = 0; i < 128; i++) { - ch = String.fromCharCode(i); - if (/^[0-9a-z]$/i.test(ch)) { - // always allow unencoded alphanumeric characters - cache.push(ch); - } else { - cache.push("%" + ("0" + i.toString(16).toUpperCase()).slice(-2)); - } - } - for (i = 0; i < exclude.length; i++) { - cache[exclude.charCodeAt(i)] = exclude[i]; - } - return cache; - } - // Encode unsafe characters with percent-encoding, skipping already - // encoded sequences. - - // - string - string to encode - // - exclude - list of characters to ignore (in addition to a-zA-Z0-9) - // - keepEscaped - don't encode '%' in a correct escape sequence (default: true) - - function encode$2(string, exclude, keepEscaped) { - var i, l, code, nextCode, cache, result = ""; - if (typeof exclude !== "string") { - // encode(string, keepEscaped) - keepEscaped = exclude; - exclude = encode$2.defaultChars; - } - if (typeof keepEscaped === "undefined") { - keepEscaped = true; - } - cache = getEncodeCache(exclude); - for (i = 0, l = string.length; i < l; i++) { - code = string.charCodeAt(i); - if (keepEscaped && code === 37 /* % */ && i + 2 < l) { - if (/^[0-9a-f]{2}$/i.test(string.slice(i + 1, i + 3))) { - result += string.slice(i, i + 3); - i += 2; - continue; - } - } - if (code < 128) { - result += cache[code]; - continue; - } - if (code >= 55296 && code <= 57343) { - if (code >= 55296 && code <= 56319 && i + 1 < l) { - nextCode = string.charCodeAt(i + 1); - if (nextCode >= 56320 && nextCode <= 57343) { - result += encodeURIComponent(string[i] + string[i + 1]); - i++; - continue; - } - } - result += "%EF%BF%BD"; - continue; - } - result += encodeURIComponent(string[i]); - } - return result; - } - encode$2.defaultChars = ";/?:@&=+$,-_.!~*'()#"; - encode$2.componentChars = "-_.!~*'()"; - var encode_1 = encode$2; - /* eslint-disable no-bitwise */ var decodeCache = {}; - function getDecodeCache(exclude) { - var i, ch, cache = decodeCache[exclude]; - if (cache) { - return cache; - } - cache = decodeCache[exclude] = []; - for (i = 0; i < 128; i++) { - ch = String.fromCharCode(i); - cache.push(ch); - } - for (i = 0; i < exclude.length; i++) { - ch = exclude.charCodeAt(i); - cache[ch] = "%" + ("0" + ch.toString(16).toUpperCase()).slice(-2); - } - return cache; - } - // Decode percent-encoded string. - - function decode$2(string, exclude) { - var cache; - if (typeof exclude !== "string") { - exclude = decode$2.defaultChars; - } - cache = getDecodeCache(exclude); - return string.replace(/(%[a-f0-9]{2})+/gi, (function(seq) { - var i, l, b1, b2, b3, b4, chr, result = ""; - for (i = 0, l = seq.length; i < l; i += 3) { - b1 = parseInt(seq.slice(i + 1, i + 3), 16); - if (b1 < 128) { - result += cache[b1]; - continue; - } - if ((b1 & 224) === 192 && i + 3 < l) { - // 110xxxxx 10xxxxxx - b2 = parseInt(seq.slice(i + 4, i + 6), 16); - if ((b2 & 192) === 128) { - chr = b1 << 6 & 1984 | b2 & 63; - if (chr < 128) { - result += "\ufffd\ufffd"; - } else { - result += String.fromCharCode(chr); - } - i += 3; - continue; - } - } - if ((b1 & 240) === 224 && i + 6 < l) { - // 1110xxxx 10xxxxxx 10xxxxxx - b2 = parseInt(seq.slice(i + 4, i + 6), 16); - b3 = parseInt(seq.slice(i + 7, i + 9), 16); - if ((b2 & 192) === 128 && (b3 & 192) === 128) { - chr = b1 << 12 & 61440 | b2 << 6 & 4032 | b3 & 63; - if (chr < 2048 || chr >= 55296 && chr <= 57343) { - result += "\ufffd\ufffd\ufffd"; - } else { - result += String.fromCharCode(chr); - } - i += 6; - continue; - } - } - if ((b1 & 248) === 240 && i + 9 < l) { - // 111110xx 10xxxxxx 10xxxxxx 10xxxxxx - b2 = parseInt(seq.slice(i + 4, i + 6), 16); - b3 = parseInt(seq.slice(i + 7, i + 9), 16); - b4 = parseInt(seq.slice(i + 10, i + 12), 16); - if ((b2 & 192) === 128 && (b3 & 192) === 128 && (b4 & 192) === 128) { - chr = b1 << 18 & 1835008 | b2 << 12 & 258048 | b3 << 6 & 4032 | b4 & 63; - if (chr < 65536 || chr > 1114111) { - result += "\ufffd\ufffd\ufffd\ufffd"; - } else { - chr -= 65536; - result += String.fromCharCode(55296 + (chr >> 10), 56320 + (chr & 1023)); - } - i += 9; - continue; - } - } - result += "\ufffd"; - } - return result; - })); - } - decode$2.defaultChars = ";/?:@&=+$,#"; - decode$2.componentChars = ""; - var decode_1 = decode$2; - var format$1 = function format(url) { - var result = ""; - result += url.protocol || ""; - result += url.slashes ? "//" : ""; - result += url.auth ? url.auth + "@" : ""; - if (url.hostname && url.hostname.indexOf(":") !== -1) { - // ipv6 address - result += "[" + url.hostname + "]"; - } else { - result += url.hostname || ""; - } - result += url.port ? ":" + url.port : ""; - result += url.pathname || ""; - result += url.search || ""; - result += url.hash || ""; - return result; - }; - // Copyright Joyent, Inc. and other Node contributors. - - // Changes from joyent/node: - - // 1. No leading slash in paths, - // e.g. in `url.parse('http://foo?bar')` pathname is ``, not `/` - - // 2. Backslashes are not replaced with slashes, - // so `http:\\example.org\` is treated like a relative path - - // 3. Trailing colon is treated like a part of the path, - // i.e. in `http://example.org:foo` pathname is `:foo` - - // 4. Nothing is URL-encoded in the resulting object, - // (in joyent/node some chars in auth and paths are encoded) - - // 5. `url.parse()` does not have `parseQueryString` argument - - // 6. Removed extraneous result properties: `host`, `path`, `query`, etc., - // which can be constructed using other parts of the url. - - function Url() { - this.protocol = null; - this.slashes = null; - this.auth = null; - this.port = null; - this.hostname = null; - this.hash = null; - this.search = null; - this.pathname = null; - } - // Reference: RFC 3986, RFC 1808, RFC 2396 - // define these here so at least they only have to be - // compiled once on the first module load. - var protocolPattern = /^([a-z0-9.+-]+:)/i, portPattern = /:[0-9]*$/, - // Special case for a simple path URL - simplePathPattern = /^(\/\/?(?!\/)[^\?\s]*)(\?[^\s]*)?$/, - // RFC 2396: characters reserved for delimiting URLs. - // We actually just auto-escape these. - delims = [ "<", ">", '"', "`", " ", "\r", "\n", "\t" ], - // RFC 2396: characters not allowed for various reasons. - unwise = [ "{", "}", "|", "\\", "^", "`" ].concat(delims), - // Allowed by RFCs, but cause of XSS attacks. Always escape these. - autoEscape = [ "'" ].concat(unwise), - // Characters that are never ever allowed in a hostname. - // Note that any invalid chars are also handled, but these - // are the ones that are *expected* to be seen, so we fast-path - // them. - nonHostChars = [ "%", "/", "?", ";", "#" ].concat(autoEscape), hostEndingChars = [ "/", "?", "#" ], hostnameMaxLen = 255, hostnamePartPattern = /^[+a-z0-9A-Z_-]{0,63}$/, hostnamePartStart = /^([+a-z0-9A-Z_-]{0,63})(.*)$/, - // protocols that can allow "unsafe" and "unwise" chars. - /* eslint-disable no-script-url */ - // protocols that never have a hostname. - hostlessProtocol = { - javascript: true, - "javascript:": true - }, - // protocols that always contain a // bit. - slashedProtocol = { - http: true, - https: true, - ftp: true, - gopher: true, - file: true, - "http:": true, - "https:": true, - "ftp:": true, - "gopher:": true, - "file:": true - }; - /* eslint-enable no-script-url */ function urlParse(url, slashesDenoteHost) { - if (url && url instanceof Url) { - return url; - } - var u = new Url; - u.parse(url, slashesDenoteHost); - return u; - } - Url.prototype.parse = function(url, slashesDenoteHost) { - var i, l, lowerProto, hec, slashes, rest = url; - // trim before proceeding. - // This is to support parse stuff like " http://foo.com \n" - rest = rest.trim(); - if (!slashesDenoteHost && url.split("#").length === 1) { - // Try fast path regexp - var simplePath = simplePathPattern.exec(rest); - if (simplePath) { - this.pathname = simplePath[1]; - if (simplePath[2]) { - this.search = simplePath[2]; - } - return this; - } - } - var proto = protocolPattern.exec(rest); - if (proto) { - proto = proto[0]; - lowerProto = proto.toLowerCase(); - this.protocol = proto; - rest = rest.substr(proto.length); - } - // figure out if it's got a host - // user@server is *always* interpreted as a hostname, and url - // resolution will treat //foo/bar as host=foo,path=bar because that's - // how the browser resolves relative URLs. - if (slashesDenoteHost || proto || rest.match(/^\/\/[^@\/]+@[^@\/]+/)) { - slashes = rest.substr(0, 2) === "//"; - if (slashes && !(proto && hostlessProtocol[proto])) { - rest = rest.substr(2); - this.slashes = true; - } - } - if (!hostlessProtocol[proto] && (slashes || proto && !slashedProtocol[proto])) { - // there's a hostname. - // the first instance of /, ?, ;, or # ends the host. - // If there is an @ in the hostname, then non-host chars *are* allowed - // to the left of the last @ sign, unless some host-ending character - // comes *before* the @-sign. - // URLs are obnoxious. - // ex: - // http://a@b@c/ => user:a@b host:c - // http://a@b?@c => user:a host:c path:/?@c - // v0.12 TODO(isaacs): This is not quite how Chrome does things. - // Review our test case against browsers more comprehensively. - // find the first instance of any hostEndingChars - var hostEnd = -1; - for (i = 0; i < hostEndingChars.length; i++) { - hec = rest.indexOf(hostEndingChars[i]); - if (hec !== -1 && (hostEnd === -1 || hec < hostEnd)) { - hostEnd = hec; - } - } - // at this point, either we have an explicit point where the - // auth portion cannot go past, or the last @ char is the decider. - var auth, atSign; - if (hostEnd === -1) { - // atSign can be anywhere. - atSign = rest.lastIndexOf("@"); - } else { - // atSign must be in auth portion. - // http://a@b/c@d => host:b auth:a path:/c@d - atSign = rest.lastIndexOf("@", hostEnd); - } - // Now we have a portion which is definitely the auth. - // Pull that off. - if (atSign !== -1) { - auth = rest.slice(0, atSign); - rest = rest.slice(atSign + 1); - this.auth = auth; - } - // the host is the remaining to the left of the first non-host char - hostEnd = -1; - for (i = 0; i < nonHostChars.length; i++) { - hec = rest.indexOf(nonHostChars[i]); - if (hec !== -1 && (hostEnd === -1 || hec < hostEnd)) { - hostEnd = hec; - } - } - // if we still have not hit it, then the entire thing is a host. - if (hostEnd === -1) { - hostEnd = rest.length; - } - if (rest[hostEnd - 1] === ":") { - hostEnd--; - } - var host = rest.slice(0, hostEnd); - rest = rest.slice(hostEnd); - // pull out port. - this.parseHost(host); - // we've indicated that there is a hostname, - // so even if it's empty, it has to be present. - this.hostname = this.hostname || ""; - // if hostname begins with [ and ends with ] - // assume that it's an IPv6 address. - var ipv6Hostname = this.hostname[0] === "[" && this.hostname[this.hostname.length - 1] === "]"; - // validate a little. - if (!ipv6Hostname) { - var hostparts = this.hostname.split(/\./); - for (i = 0, l = hostparts.length; i < l; i++) { - var part = hostparts[i]; - if (!part) { - continue; - } - if (!part.match(hostnamePartPattern)) { - var newpart = ""; - for (var j = 0, k = part.length; j < k; j++) { - if (part.charCodeAt(j) > 127) { - // we replace non-ASCII char with a temporary placeholder - // we need this to make sure size of hostname is not - // broken by replacing non-ASCII by nothing - newpart += "x"; - } else { - newpart += part[j]; - } - } - // we test again with ASCII char only - if (!newpart.match(hostnamePartPattern)) { - var validParts = hostparts.slice(0, i); - var notHost = hostparts.slice(i + 1); - var bit = part.match(hostnamePartStart); - if (bit) { - validParts.push(bit[1]); - notHost.unshift(bit[2]); - } - if (notHost.length) { - rest = notHost.join(".") + rest; - } - this.hostname = validParts.join("."); - break; - } - } - } - } - if (this.hostname.length > hostnameMaxLen) { - this.hostname = ""; - } - // strip [ and ] from the hostname - // the host field still retains them, though - if (ipv6Hostname) { - this.hostname = this.hostname.substr(1, this.hostname.length - 2); - } - } - // chop off from the tail first. - var hash = rest.indexOf("#"); - if (hash !== -1) { - // got a fragment string. - this.hash = rest.substr(hash); - rest = rest.slice(0, hash); - } - var qm = rest.indexOf("?"); - if (qm !== -1) { - this.search = rest.substr(qm); - rest = rest.slice(0, qm); - } - if (rest) { - this.pathname = rest; - } - if (slashedProtocol[lowerProto] && this.hostname && !this.pathname) { - this.pathname = ""; - } - return this; - }; - Url.prototype.parseHost = function(host) { - var port = portPattern.exec(host); - if (port) { - port = port[0]; - if (port !== ":") { - this.port = port.substr(1); - } - host = host.substr(0, host.length - port.length); - } - if (host) { - this.hostname = host; - } - }; - var parse$1 = urlParse; - var encode$1 = encode_1; - var decode$1 = decode_1; - var format = format$1; - var parse = parse$1; - var mdurl = { - encode: encode$1, - decode: decode$1, - format: format, - parse: parse - }; - var regex$3 = /[\0-\uD7FF\uE000-\uFFFF]|[\uD800-\uDBFF][\uDC00-\uDFFF]|[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?:[^\uD800-\uDBFF]|^)[\uDC00-\uDFFF]/; - var regex$2 = /[\0-\x1F\x7F-\x9F]/; - var regex$1 = /[\xAD\u0600-\u0605\u061C\u06DD\u070F\u08E2\u180E\u200B-\u200F\u202A-\u202E\u2060-\u2064\u2066-\u206F\uFEFF\uFFF9-\uFFFB]|\uD804[\uDCBD\uDCCD]|\uD82F[\uDCA0-\uDCA3]|\uD834[\uDD73-\uDD7A]|\uDB40[\uDC01\uDC20-\uDC7F]/; - var regex = /[ \xA0\u1680\u2000-\u200A\u2028\u2029\u202F\u205F\u3000]/; - var Any = regex$3; - var Cc = regex$2; - var Cf = regex$1; - var P = regex$4; - var Z = regex; - var uc_micro = { - Any: Any, - Cc: Cc, - Cf: Cf, - P: P, - Z: Z - }; - var utils = createCommonjsModule((function(module, exports) { - function _class(obj) { - return Object.prototype.toString.call(obj); - } - function isString(obj) { - return _class(obj) === "[object String]"; - } - var _hasOwnProperty = Object.prototype.hasOwnProperty; - function has(object, key) { - return _hasOwnProperty.call(object, key); - } - // Merge objects - - function assign(obj /*from1, from2, from3, ...*/) { - var sources = Array.prototype.slice.call(arguments, 1); - sources.forEach((function(source) { - if (!source) { - return; - } - if (typeof source !== "object") { - throw new TypeError(source + "must be object"); - } - Object.keys(source).forEach((function(key) { - obj[key] = source[key]; - })); - })); - return obj; - } - // Remove element from array and put another array at those position. - // Useful for some operations with tokens - function arrayReplaceAt(src, pos, newElements) { - return [].concat(src.slice(0, pos), newElements, src.slice(pos + 1)); - } - //////////////////////////////////////////////////////////////////////////////// - function isValidEntityCode(c) { - /*eslint no-bitwise:0*/ - // broken sequence - if (c >= 55296 && c <= 57343) { - return false; - } - // never used - if (c >= 64976 && c <= 65007) { - return false; - } - if ((c & 65535) === 65535 || (c & 65535) === 65534) { - return false; - } - // control codes - if (c >= 0 && c <= 8) { - return false; - } - if (c === 11) { - return false; - } - if (c >= 14 && c <= 31) { - return false; - } - if (c >= 127 && c <= 159) { - return false; - } - // out of range - if (c > 1114111) { - return false; - } - return true; - } - function fromCodePoint(c) { - /*eslint no-bitwise:0*/ - if (c > 65535) { - c -= 65536; - var surrogate1 = 55296 + (c >> 10), surrogate2 = 56320 + (c & 1023); - return String.fromCharCode(surrogate1, surrogate2); - } - return String.fromCharCode(c); - } - var UNESCAPE_MD_RE = /\\([!"#$%&'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])/g; - var ENTITY_RE = /&([a-z#][a-z0-9]{1,31});/gi; - var UNESCAPE_ALL_RE = new RegExp(UNESCAPE_MD_RE.source + "|" + ENTITY_RE.source, "gi"); - var DIGITAL_ENTITY_TEST_RE = /^#((?:x[a-f0-9]{1,8}|[0-9]{1,8}))$/i; - function replaceEntityPattern(match, name) { - var code; - if (has(entities, name)) { - return entities[name]; - } - if (name.charCodeAt(0) === 35 /* # */ && DIGITAL_ENTITY_TEST_RE.test(name)) { - code = name[1].toLowerCase() === "x" ? parseInt(name.slice(2), 16) : parseInt(name.slice(1), 10); - if (isValidEntityCode(code)) { - return fromCodePoint(code); - } - } - return match; - } - /*function replaceEntities(str) { - if (str.indexOf('&') < 0) { return str; } - - return str.replace(ENTITY_RE, replaceEntityPattern); - }*/ function unescapeMd(str) { - if (str.indexOf("\\") < 0) { - return str; - } - return str.replace(UNESCAPE_MD_RE, "$1"); - } - function unescapeAll(str) { - if (str.indexOf("\\") < 0 && str.indexOf("&") < 0) { - return str; - } - return str.replace(UNESCAPE_ALL_RE, (function(match, escaped, entity) { - if (escaped) { - return escaped; - } - return replaceEntityPattern(match, entity); - })); - } - //////////////////////////////////////////////////////////////////////////////// - var HTML_ESCAPE_TEST_RE = /[&<>"]/; - var HTML_ESCAPE_REPLACE_RE = /[&<>"]/g; - var HTML_REPLACEMENTS = { - "&": "&", - "<": "<", - ">": ">", - '"': """ - }; - function replaceUnsafeChar(ch) { - return HTML_REPLACEMENTS[ch]; - } - function escapeHtml(str) { - if (HTML_ESCAPE_TEST_RE.test(str)) { - return str.replace(HTML_ESCAPE_REPLACE_RE, replaceUnsafeChar); - } - return str; - } - //////////////////////////////////////////////////////////////////////////////// - var REGEXP_ESCAPE_RE = /[.?*+^$[\]\\(){}|-]/g; - function escapeRE(str) { - return str.replace(REGEXP_ESCAPE_RE, "\\$&"); - } - //////////////////////////////////////////////////////////////////////////////// - function isSpace(code) { - switch (code) { - case 9: - case 32: - return true; - } - return false; - } - // Zs (unicode class) || [\t\f\v\r\n] - function isWhiteSpace(code) { - if (code >= 8192 && code <= 8202) { - return true; - } - switch (code) { - case 9: - // \t - case 10: - // \n - case 11: - // \v - case 12: - // \f - case 13: - // \r - case 32: - case 160: - case 5760: - case 8239: - case 8287: - case 12288: - return true; - } - return false; - } - //////////////////////////////////////////////////////////////////////////////// - /*eslint-disable max-len*/ - // Currently without astral characters support. - function isPunctChar(ch) { - return regex$4.test(ch); - } - // Markdown ASCII punctuation characters. - - // !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~ - // http://spec.commonmark.org/0.15/#ascii-punctuation-character - - // Don't confuse with unicode punctuation !!! It lacks some chars in ascii range. - - function isMdAsciiPunct(ch) { - switch (ch) { - case 33 /* ! */ : - case 34 /* " */ : - case 35 /* # */ : - case 36 /* $ */ : - case 37 /* % */ : - case 38 /* & */ : - case 39 /* ' */ : - case 40 /* ( */ : - case 41 /* ) */ : - case 42 /* * */ : - case 43 /* + */ : - case 44 /* , */ : - case 45 /* - */ : - case 46 /* . */ : - case 47 /* / */ : - case 58 /* : */ : - case 59 /* ; */ : - case 60 /* < */ : - case 61 /* = */ : - case 62 /* > */ : - case 63 /* ? */ : - case 64 /* @ */ : - case 91 /* [ */ : - case 92 /* \ */ : - case 93 /* ] */ : - case 94 /* ^ */ : - case 95 /* _ */ : - case 96 /* ` */ : - case 123 /* { */ : - case 124 /* | */ : - case 125 /* } */ : - case 126 /* ~ */ : - return true; - - default: - return false; - } - } - // Hepler to unify [reference labels]. - - function normalizeReference(str) { - // Trim and collapse whitespace - str = str.trim().replace(/\s+/g, " "); - // In node v10 'įŗž'.toLowerCase() === 'į¹¾', which is presumed to be a bug - // fixed in v12 (couldn't find any details). - - // So treat this one as a special case - // (remove this when node v10 is no longer supported). - - if ("\u1e9e".toLowerCase() === "\u1e7e") { - str = str.replace(/\u1e9e/g, "\xdf"); - } - // .toLowerCase().toUpperCase() should get rid of all differences - // between letter variants. - - // Simple .toLowerCase() doesn't normalize 125 code points correctly, - // and .toUpperCase doesn't normalize 6 of them (list of exceptions: - // İ, Ļ“, įŗž, Ω, K, ā„« - those are already uppercased, but have differently - // uppercased versions). - - // Here's an example showing how it happens. Lets take greek letter omega: - // uppercase U+0398 (Θ), U+03f4 (Ļ“) and lowercase U+03b8 (Īø), U+03d1 (Ļ‘) - - // Unicode entries: - // 0398;GREEK CAPITAL LETTER THETA;Lu;0;L;;;;;N;;;;03B8; - // 03B8;GREEK SMALL LETTER THETA;Ll;0;L;;;;;N;;;0398;;0398 - // 03D1;GREEK THETA SYMBOL;Ll;0;L; 03B8;;;;N;GREEK SMALL LETTER SCRIPT THETA;;0398;;0398 - // 03F4;GREEK CAPITAL THETA SYMBOL;Lu;0;L; 0398;;;;N;;;;03B8; - - // Case-insensitive comparison should treat all of them as equivalent. - - // But .toLowerCase() doesn't change Ļ‘ (it's already lowercase), - // and .toUpperCase() doesn't change Ļ“ (already uppercase). - - // Applying first lower then upper case normalizes any character: - // '\u0398\u03f4\u03b8\u03d1'.toLowerCase().toUpperCase() === '\u0398\u0398\u0398\u0398' - - // Note: this is equivalent to unicode case folding; unicode normalization - // is a different step that is not required here. - - // Final result should be uppercased, because it's later stored in an object - // (this avoid a conflict with Object.prototype members, - // most notably, `__proto__`) - - return str.toLowerCase().toUpperCase(); - } - //////////////////////////////////////////////////////////////////////////////// - // Re-export libraries commonly used in both markdown-it and its plugins, - // so plugins won't have to depend on them explicitly, which reduces their - // bundled size (e.g. a browser build). - - exports.lib = {}; - exports.lib.mdurl = mdurl; - exports.lib.ucmicro = uc_micro; - exports.assign = assign; - exports.isString = isString; - exports.has = has; - exports.unescapeMd = unescapeMd; - exports.unescapeAll = unescapeAll; - exports.isValidEntityCode = isValidEntityCode; - exports.fromCodePoint = fromCodePoint; - // exports.replaceEntities = replaceEntities; - exports.escapeHtml = escapeHtml; - exports.arrayReplaceAt = arrayReplaceAt; - exports.isSpace = isSpace; - exports.isWhiteSpace = isWhiteSpace; - exports.isMdAsciiPunct = isMdAsciiPunct; - exports.isPunctChar = isPunctChar; - exports.escapeRE = escapeRE; - exports.normalizeReference = normalizeReference; - })); - // Parse link label - var parse_link_label = function parseLinkLabel(state, start, disableNested) { - var level, found, marker, prevPos, labelEnd = -1, max = state.posMax, oldPos = state.pos; - state.pos = start + 1; - level = 1; - while (state.pos < max) { - marker = state.src.charCodeAt(state.pos); - if (marker === 93 /* ] */) { - level--; - if (level === 0) { - found = true; - break; - } - } - prevPos = state.pos; - state.md.inline.skipToken(state); - if (marker === 91 /* [ */) { - if (prevPos === state.pos - 1) { - // increase level if we find text `[`, which is not a part of any token - level++; - } else if (disableNested) { - state.pos = oldPos; - return -1; - } - } - } - if (found) { - labelEnd = state.pos; - } - // restore old state - state.pos = oldPos; - return labelEnd; - }; - var unescapeAll$2 = utils.unescapeAll; - var parse_link_destination = function parseLinkDestination(str, start, max) { - var code, level, pos = start, result = { - ok: false, - pos: 0, - lines: 0, - str: "" - }; - if (str.charCodeAt(pos) === 60 /* < */) { - pos++; - while (pos < max) { - code = str.charCodeAt(pos); - if (code === 10 /* \n */) { - return result; - } - if (code === 60 /* < */) { - return result; - } - if (code === 62 /* > */) { - result.pos = pos + 1; - result.str = unescapeAll$2(str.slice(start + 1, pos)); - result.ok = true; - return result; - } - if (code === 92 /* \ */ && pos + 1 < max) { - pos += 2; - continue; - } - pos++; - } - // no closing '>' - return result; - } - // this should be ... } else { ... branch - level = 0; - while (pos < max) { - code = str.charCodeAt(pos); - if (code === 32) { - break; - } - // ascii control characters - if (code < 32 || code === 127) { - break; - } - if (code === 92 /* \ */ && pos + 1 < max) { - if (str.charCodeAt(pos + 1) === 32) { - break; - } - pos += 2; - continue; - } - if (code === 40 /* ( */) { - level++; - if (level > 32) { - return result; - } - } - if (code === 41 /* ) */) { - if (level === 0) { - break; - } - level--; - } - pos++; - } - if (start === pos) { - return result; - } - if (level !== 0) { - return result; - } - result.str = unescapeAll$2(str.slice(start, pos)); - result.pos = pos; - result.ok = true; - return result; - }; - var unescapeAll$1 = utils.unescapeAll; - var parse_link_title = function parseLinkTitle(str, start, max) { - var code, marker, lines = 0, pos = start, result = { - ok: false, - pos: 0, - lines: 0, - str: "" - }; - if (pos >= max) { - return result; - } - marker = str.charCodeAt(pos); - if (marker !== 34 /* " */ && marker !== 39 /* ' */ && marker !== 40 /* ( */) { - return result; - } - pos++; - // if opening marker is "(", switch it to closing marker ")" - if (marker === 40) { - marker = 41; - } - while (pos < max) { - code = str.charCodeAt(pos); - if (code === marker) { - result.pos = pos + 1; - result.lines = lines; - result.str = unescapeAll$1(str.slice(start + 1, pos)); - result.ok = true; - return result; - } else if (code === 40 /* ( */ && marker === 41 /* ) */) { - return result; - } else if (code === 10) { - lines++; - } else if (code === 92 /* \ */ && pos + 1 < max) { - pos++; - if (str.charCodeAt(pos) === 10) { - lines++; - } - } - pos++; - } - return result; - }; - var parseLinkLabel = parse_link_label; - var parseLinkDestination = parse_link_destination; - var parseLinkTitle = parse_link_title; - var helpers = { - parseLinkLabel: parseLinkLabel, - parseLinkDestination: parseLinkDestination, - parseLinkTitle: parseLinkTitle - }; - var assign$1 = utils.assign; - var unescapeAll = utils.unescapeAll; - var escapeHtml = utils.escapeHtml; - //////////////////////////////////////////////////////////////////////////////// - var default_rules = {}; - default_rules.code_inline = function(tokens, idx, options, env, slf) { - var token = tokens[idx]; - return "" + escapeHtml(token.content) + ""; - }; - default_rules.code_block = function(tokens, idx, options, env, slf) { - var token = tokens[idx]; - return "" + escapeHtml(tokens[idx].content) + "\n"; - }; - default_rules.fence = function(tokens, idx, options, env, slf) { - var token = tokens[idx], info = token.info ? unescapeAll(token.info).trim() : "", langName = "", langAttrs = "", highlighted, i, arr, tmpAttrs, tmpToken; - if (info) { - arr = info.split(/(\s+)/g); - langName = arr[0]; - langAttrs = arr.slice(2).join(""); - } - if (options.highlight) { - highlighted = options.highlight(token.content, langName, langAttrs) || escapeHtml(token.content); - } else { - highlighted = escapeHtml(token.content); - } - if (highlighted.indexOf("" + highlighted + "\n"; - } - return "
" + highlighted + "
\n"; - }; - default_rules.image = function(tokens, idx, options, env, slf) { - var token = tokens[idx]; - // "alt" attr MUST be set, even if empty. Because it's mandatory and - // should be placed on proper position for tests. - - // Replace content with actual value - token.attrs[token.attrIndex("alt")][1] = slf.renderInlineAsText(token.children, options, env); - return slf.renderToken(tokens, idx, options); - }; - default_rules.hardbreak = function(tokens, idx, options /*, env */) { - return options.xhtmlOut ? "
\n" : "
\n"; - }; - default_rules.softbreak = function(tokens, idx, options /*, env */) { - return options.breaks ? options.xhtmlOut ? "
\n" : "
\n" : "\n"; - }; - default_rules.text = function(tokens, idx /*, options, env */) { - return escapeHtml(tokens[idx].content); - }; - default_rules.html_block = function(tokens, idx /*, options, env */) { - return tokens[idx].content; - }; - default_rules.html_inline = function(tokens, idx /*, options, env */) { - return tokens[idx].content; - }; - /** - * new Renderer() - * - * Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults. - **/ function Renderer() { - /** - * Renderer#rules -> Object - * - * Contains render rules for tokens. Can be updated and extended. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')(); - * - * md.renderer.rules.strong_open = function () { return ''; }; - * md.renderer.rules.strong_close = function () { return ''; }; - * - * var result = md.renderInline(...); - * ``` - * - * Each rule is called as independent static function with fixed signature: - * - * ```javascript - * function my_token_render(tokens, idx, options, env, renderer) { - * // ... - * return renderedHTML; - * } - * ``` - * - * See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js) - * for more details and examples. - **/ - this.rules = assign$1({}, default_rules); - } - /** - * Renderer.renderAttrs(token) -> String - * - * Render token attributes to string. - **/ Renderer.prototype.renderAttrs = function renderAttrs(token) { - var i, l, result; - if (!token.attrs) { - return ""; - } - result = ""; - for (i = 0, l = token.attrs.length; i < l; i++) { - result += " " + escapeHtml(token.attrs[i][0]) + '="' + escapeHtml(token.attrs[i][1]) + '"'; - } - return result; - }; - /** - * Renderer.renderToken(tokens, idx, options) -> String - * - tokens (Array): list of tokens - * - idx (Numbed): token index to render - * - options (Object): params of parser instance - * - * Default token renderer. Can be overriden by custom function - * in [[Renderer#rules]]. - **/ Renderer.prototype.renderToken = function renderToken(tokens, idx, options) { - var nextToken, result = "", needLf = false, token = tokens[idx]; - // Tight list paragraphs - if (token.hidden) { - return ""; - } - // Insert a newline between hidden paragraph and subsequent opening - // block-level tag. - - // For example, here we should insert a newline before blockquote: - // - a - // > - - if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) { - result += "\n"; - } - // Add token name, e.g. ``. - needLf = false; - } - } - } - } - result += needLf ? ">\n" : ">"; - return result; - }; - /** - * Renderer.renderInline(tokens, options, env) -> String - * - tokens (Array): list on block tokens to render - * - options (Object): params of parser instance - * - env (Object): additional data from parsed input (references, for example) - * - * The same as [[Renderer.render]], but for single token of `inline` type. - **/ Renderer.prototype.renderInline = function(tokens, options, env) { - var type, result = "", rules = this.rules; - for (var i = 0, len = tokens.length; i < len; i++) { - type = tokens[i].type; - if (typeof rules[type] !== "undefined") { - result += rules[type](tokens, i, options, env, this); - } else { - result += this.renderToken(tokens, i, options); - } - } - return result; - }; - /** internal - * Renderer.renderInlineAsText(tokens, options, env) -> String - * - tokens (Array): list on block tokens to render - * - options (Object): params of parser instance - * - env (Object): additional data from parsed input (references, for example) - * - * Special kludge for image `alt` attributes to conform CommonMark spec. - * Don't try to use it! Spec requires to show `alt` content with stripped markup, - * instead of simple escaping. - **/ Renderer.prototype.renderInlineAsText = function(tokens, options, env) { - var result = ""; - for (var i = 0, len = tokens.length; i < len; i++) { - if (tokens[i].type === "text") { - result += tokens[i].content; - } else if (tokens[i].type === "image") { - result += this.renderInlineAsText(tokens[i].children, options, env); - } else if (tokens[i].type === "softbreak") { - result += "\n"; - } - } - return result; - }; - /** - * Renderer.render(tokens, options, env) -> String - * - tokens (Array): list on block tokens to render - * - options (Object): params of parser instance - * - env (Object): additional data from parsed input (references, for example) - * - * Takes token stream and generates HTML. Probably, you will never need to call - * this method directly. - **/ Renderer.prototype.render = function(tokens, options, env) { - var i, len, type, result = "", rules = this.rules; - for (i = 0, len = tokens.length; i < len; i++) { - type = tokens[i].type; - if (type === "inline") { - result += this.renderInline(tokens[i].children, options, env); - } else if (typeof rules[type] !== "undefined") { - result += rules[type](tokens, i, options, env, this); - } else { - result += this.renderToken(tokens, i, options, env); - } - } - return result; - }; - var renderer = Renderer; - /** - * class Ruler - * - * Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and - * [[MarkdownIt#inline]] to manage sequences of functions (rules): - * - * - keep rules in defined order - * - assign the name to each rule - * - enable/disable rules - * - add/replace rules - * - allow assign rules to additional named chains (in the same) - * - cacheing lists of active rules - * - * You will not need use this class directly until write plugins. For simple - * rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and - * [[MarkdownIt.use]]. - **/ - /** - * new Ruler() - **/ function Ruler() { - // List of added rules. Each element is: - // { - // name: XXX, - // enabled: Boolean, - // fn: Function(), - // alt: [ name2, name3 ] - // } - this.__rules__ = []; - // Cached rule chains. - - // First level - chain name, '' for default. - // Second level - diginal anchor for fast filtering by charcodes. - - this.__cache__ = null; - } - //////////////////////////////////////////////////////////////////////////////// - // Helper methods, should not be used directly - // Find rule index by name - - Ruler.prototype.__find__ = function(name) { - for (var i = 0; i < this.__rules__.length; i++) { - if (this.__rules__[i].name === name) { - return i; - } - } - return -1; - }; - // Build rules lookup cache - - Ruler.prototype.__compile__ = function() { - var self = this; - var chains = [ "" ]; - // collect unique names - self.__rules__.forEach((function(rule) { - if (!rule.enabled) { - return; - } - rule.alt.forEach((function(altName) { - if (chains.indexOf(altName) < 0) { - chains.push(altName); - } - })); - })); - self.__cache__ = {}; - chains.forEach((function(chain) { - self.__cache__[chain] = []; - self.__rules__.forEach((function(rule) { - if (!rule.enabled) { - return; - } - if (chain && rule.alt.indexOf(chain) < 0) { - return; - } - self.__cache__[chain].push(rule.fn); - })); - })); - }; - /** - * Ruler.at(name, fn [, options]) - * - name (String): rule name to replace. - * - fn (Function): new rule function. - * - options (Object): new rule options (not mandatory). - * - * Replace rule by name with new function & options. Throws error if name not - * found. - * - * ##### Options: - * - * - __alt__ - array with names of "alternate" chains. - * - * ##### Example - * - * Replace existing typographer replacement rule with new one: - * - * ```javascript - * var md = require('markdown-it')(); - * - * md.core.ruler.at('replacements', function replace(state) { - * //... - * }); - * ``` - **/ Ruler.prototype.at = function(name, fn, options) { - var index = this.__find__(name); - var opt = options || {}; - if (index === -1) { - throw new Error("Parser rule not found: " + name); - } - this.__rules__[index].fn = fn; - this.__rules__[index].alt = opt.alt || []; - this.__cache__ = null; - }; - /** - * Ruler.before(beforeName, ruleName, fn [, options]) - * - beforeName (String): new rule will be added before this one. - * - ruleName (String): name of added rule. - * - fn (Function): rule function. - * - options (Object): rule options (not mandatory). - * - * Add new rule to chain before one with given name. See also - * [[Ruler.after]], [[Ruler.push]]. - * - * ##### Options: - * - * - __alt__ - array with names of "alternate" chains. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')(); - * - * md.block.ruler.before('paragraph', 'my_rule', function replace(state) { - * //... - * }); - * ``` - **/ Ruler.prototype.before = function(beforeName, ruleName, fn, options) { - var index = this.__find__(beforeName); - var opt = options || {}; - if (index === -1) { - throw new Error("Parser rule not found: " + beforeName); - } - this.__rules__.splice(index, 0, { - name: ruleName, - enabled: true, - fn: fn, - alt: opt.alt || [] - }); - this.__cache__ = null; - }; - /** - * Ruler.after(afterName, ruleName, fn [, options]) - * - afterName (String): new rule will be added after this one. - * - ruleName (String): name of added rule. - * - fn (Function): rule function. - * - options (Object): rule options (not mandatory). - * - * Add new rule to chain after one with given name. See also - * [[Ruler.before]], [[Ruler.push]]. - * - * ##### Options: - * - * - __alt__ - array with names of "alternate" chains. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')(); - * - * md.inline.ruler.after('text', 'my_rule', function replace(state) { - * //... - * }); - * ``` - **/ Ruler.prototype.after = function(afterName, ruleName, fn, options) { - var index = this.__find__(afterName); - var opt = options || {}; - if (index === -1) { - throw new Error("Parser rule not found: " + afterName); - } - this.__rules__.splice(index + 1, 0, { - name: ruleName, - enabled: true, - fn: fn, - alt: opt.alt || [] - }); - this.__cache__ = null; - }; - /** - * Ruler.push(ruleName, fn [, options]) - * - ruleName (String): name of added rule. - * - fn (Function): rule function. - * - options (Object): rule options (not mandatory). - * - * Push new rule to the end of chain. See also - * [[Ruler.before]], [[Ruler.after]]. - * - * ##### Options: - * - * - __alt__ - array with names of "alternate" chains. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')(); - * - * md.core.ruler.push('my_rule', function replace(state) { - * //... - * }); - * ``` - **/ Ruler.prototype.push = function(ruleName, fn, options) { - var opt = options || {}; - this.__rules__.push({ - name: ruleName, - enabled: true, - fn: fn, - alt: opt.alt || [] - }); - this.__cache__ = null; - }; - /** - * Ruler.enable(list [, ignoreInvalid]) -> Array - * - list (String|Array): list of rule names to enable. - * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found. - * - * Enable rules with given names. If any rule name not found - throw Error. - * Errors can be disabled by second param. - * - * Returns list of found rule names (if no exception happened). - * - * See also [[Ruler.disable]], [[Ruler.enableOnly]]. - **/ Ruler.prototype.enable = function(list, ignoreInvalid) { - if (!Array.isArray(list)) { - list = [ list ]; - } - var result = []; - // Search by name and enable - list.forEach((function(name) { - var idx = this.__find__(name); - if (idx < 0) { - if (ignoreInvalid) { - return; - } - throw new Error("Rules manager: invalid rule name " + name); - } - this.__rules__[idx].enabled = true; - result.push(name); - }), this); - this.__cache__ = null; - return result; - }; - /** - * Ruler.enableOnly(list [, ignoreInvalid]) - * - list (String|Array): list of rule names to enable (whitelist). - * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found. - * - * Enable rules with given names, and disable everything else. If any rule name - * not found - throw Error. Errors can be disabled by second param. - * - * See also [[Ruler.disable]], [[Ruler.enable]]. - **/ Ruler.prototype.enableOnly = function(list, ignoreInvalid) { - if (!Array.isArray(list)) { - list = [ list ]; - } - this.__rules__.forEach((function(rule) { - rule.enabled = false; - })); - this.enable(list, ignoreInvalid); - }; - /** - * Ruler.disable(list [, ignoreInvalid]) -> Array - * - list (String|Array): list of rule names to disable. - * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found. - * - * Disable rules with given names. If any rule name not found - throw Error. - * Errors can be disabled by second param. - * - * Returns list of found rule names (if no exception happened). - * - * See also [[Ruler.enable]], [[Ruler.enableOnly]]. - **/ Ruler.prototype.disable = function(list, ignoreInvalid) { - if (!Array.isArray(list)) { - list = [ list ]; - } - var result = []; - // Search by name and disable - list.forEach((function(name) { - var idx = this.__find__(name); - if (idx < 0) { - if (ignoreInvalid) { - return; - } - throw new Error("Rules manager: invalid rule name " + name); - } - this.__rules__[idx].enabled = false; - result.push(name); - }), this); - this.__cache__ = null; - return result; - }; - /** - * Ruler.getRules(chainName) -> Array - * - * Return array of active functions (rules) for given chain name. It analyzes - * rules configuration, compiles caches if not exists and returns result. - * - * Default chain name is `''` (empty string). It can't be skipped. That's - * done intentionally, to keep signature monomorphic for high speed. - **/ Ruler.prototype.getRules = function(chainName) { - if (this.__cache__ === null) { - this.__compile__(); - } - // Chain can be empty, if rules disabled. But we still have to return Array. - return this.__cache__[chainName] || []; - }; - var ruler = Ruler; - // Normalize input string - // https://spec.commonmark.org/0.29/#line-ending - var NEWLINES_RE = /\r\n?|\n/g; - var NULL_RE = /\0/g; - var normalize = function normalize(state) { - var str; - // Normalize newlines - str = state.src.replace(NEWLINES_RE, "\n"); - // Replace NULL characters - str = str.replace(NULL_RE, "\ufffd"); - state.src = str; - }; - var block = function block(state) { - var token; - if (state.inlineMode) { - token = new state.Token("inline", "", 0); - token.content = state.src; - token.map = [ 0, 1 ]; - token.children = []; - state.tokens.push(token); - } else { - state.md.block.parse(state.src, state.md, state.env, state.tokens); - } - }; - var inline = function inline(state) { - var tokens = state.tokens, tok, i, l; - // Parse inlines - for (i = 0, l = tokens.length; i < l; i++) { - tok = tokens[i]; - if (tok.type === "inline") { - state.md.inline.parse(tok.content, state.md, state.env, tok.children); - } - } - }; - var arrayReplaceAt = utils.arrayReplaceAt; - function isLinkOpen$1(str) { - return /^\s]/i.test(str); - } - function isLinkClose$1(str) { - return /^<\/a\s*>/i.test(str); - } - var linkify$1 = function linkify(state) { - var i, j, l, tokens, token, currentToken, nodes, ln, text, pos, lastPos, level, htmlLinkLevel, url, fullUrl, urlText, blockTokens = state.tokens, links; - if (!state.md.options.linkify) { - return; - } - for (j = 0, l = blockTokens.length; j < l; j++) { - if (blockTokens[j].type !== "inline" || !state.md.linkify.pretest(blockTokens[j].content)) { - continue; - } - tokens = blockTokens[j].children; - htmlLinkLevel = 0; - // We scan from the end, to keep position when new tags added. - // Use reversed logic in links start/end match - for (i = tokens.length - 1; i >= 0; i--) { - currentToken = tokens[i]; - // Skip content of markdown links - if (currentToken.type === "link_close") { - i--; - while (tokens[i].level !== currentToken.level && tokens[i].type !== "link_open") { - i--; - } - continue; - } - // Skip content of html tag links - if (currentToken.type === "html_inline") { - if (isLinkOpen$1(currentToken.content) && htmlLinkLevel > 0) { - htmlLinkLevel--; - } - if (isLinkClose$1(currentToken.content)) { - htmlLinkLevel++; - } - } - if (htmlLinkLevel > 0) { - continue; - } - if (currentToken.type === "text" && state.md.linkify.test(currentToken.content)) { - text = currentToken.content; - links = state.md.linkify.match(text); - // Now split string to nodes - nodes = []; - level = currentToken.level; - lastPos = 0; - // forbid escape sequence at the start of the string, - // this avoids http\://example.com/ from being linkified as - // http://example.com/ - if (links.length > 0 && links[0].index === 0 && i > 0 && tokens[i - 1].type === "text_special") { - links = links.slice(1); - } - for (ln = 0; ln < links.length; ln++) { - url = links[ln].url; - fullUrl = state.md.normalizeLink(url); - if (!state.md.validateLink(fullUrl)) { - continue; - } - urlText = links[ln].text; - // Linkifier might send raw hostnames like "example.com", where url - // starts with domain name. So we prepend http:// in those cases, - // and remove it afterwards. - - if (!links[ln].schema) { - urlText = state.md.normalizeLinkText("http://" + urlText).replace(/^http:\/\//, ""); - } else if (links[ln].schema === "mailto:" && !/^mailto:/i.test(urlText)) { - urlText = state.md.normalizeLinkText("mailto:" + urlText).replace(/^mailto:/, ""); - } else { - urlText = state.md.normalizeLinkText(urlText); - } - pos = links[ln].index; - if (pos > lastPos) { - token = new state.Token("text", "", 0); - token.content = text.slice(lastPos, pos); - token.level = level; - nodes.push(token); - } - token = new state.Token("link_open", "a", 1); - token.attrs = [ [ "href", fullUrl ] ]; - token.level = level++; - token.markup = "linkify"; - token.info = "auto"; - nodes.push(token); - token = new state.Token("text", "", 0); - token.content = urlText; - token.level = level; - nodes.push(token); - token = new state.Token("link_close", "a", -1); - token.level = --level; - token.markup = "linkify"; - token.info = "auto"; - nodes.push(token); - lastPos = links[ln].lastIndex; - } - if (lastPos < text.length) { - token = new state.Token("text", "", 0); - token.content = text.slice(lastPos); - token.level = level; - nodes.push(token); - } - // replace current node - blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes); - } - } - } - }; - // Simple typographic replacements - // TODO: - // - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾ - // - multiplications 2 x 4 -> 2 Ɨ 4 - var RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/; - // Workaround for phantomjs - need regex without /g flag, - // or root check will fail every second time - var SCOPED_ABBR_TEST_RE = /\((c|tm|r)\)/i; - var SCOPED_ABBR_RE = /\((c|tm|r)\)/gi; - var SCOPED_ABBR = { - c: "\xa9", - r: "\xae", - tm: "\u2122" - }; - function replaceFn(match, name) { - return SCOPED_ABBR[name.toLowerCase()]; - } - function replace_scoped(inlineTokens) { - var i, token, inside_autolink = 0; - for (i = inlineTokens.length - 1; i >= 0; i--) { - token = inlineTokens[i]; - if (token.type === "text" && !inside_autolink) { - token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn); - } - if (token.type === "link_open" && token.info === "auto") { - inside_autolink--; - } - if (token.type === "link_close" && token.info === "auto") { - inside_autolink++; - } - } - } - function replace_rare(inlineTokens) { - var i, token, inside_autolink = 0; - for (i = inlineTokens.length - 1; i >= 0; i--) { - token = inlineTokens[i]; - if (token.type === "text" && !inside_autolink) { - if (RARE_RE.test(token.content)) { - token.content = token.content.replace(/\+-/g, "\xb1").replace(/\.{2,}/g, "\u2026").replace(/([?!])\u2026/g, "$1..").replace(/([?!]){4,}/g, "$1$1$1").replace(/,{2,}/g, ",").replace(/(^|[^-])---(?=[^-]|$)/gm, "$1\u2014").replace(/(^|\s)--(?=\s|$)/gm, "$1\u2013").replace(/(^|[^-\s])--(?=[^-\s]|$)/gm, "$1\u2013"); - } - } - if (token.type === "link_open" && token.info === "auto") { - inside_autolink--; - } - if (token.type === "link_close" && token.info === "auto") { - inside_autolink++; - } - } - } - var replacements = function replace(state) { - var blkIdx; - if (!state.md.options.typographer) { - return; - } - for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) { - if (state.tokens[blkIdx].type !== "inline") { - continue; - } - if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) { - replace_scoped(state.tokens[blkIdx].children); - } - if (RARE_RE.test(state.tokens[blkIdx].content)) { - replace_rare(state.tokens[blkIdx].children); - } - } - }; - var isWhiteSpace$1 = utils.isWhiteSpace; - var isPunctChar$1 = utils.isPunctChar; - var isMdAsciiPunct$1 = utils.isMdAsciiPunct; - var QUOTE_TEST_RE = /['"]/; - var QUOTE_RE = /['"]/g; - var APOSTROPHE = "\u2019"; - /* ’ */ function replaceAt(str, index, ch) { - return str.slice(0, index) + ch + str.slice(index + 1); - } - function process_inlines(tokens, state) { - var i, token, text, t, pos, max, thisLevel, item, lastChar, nextChar, isLastPunctChar, isNextPunctChar, isLastWhiteSpace, isNextWhiteSpace, canOpen, canClose, j, isSingle, stack, openQuote, closeQuote; - stack = []; - for (i = 0; i < tokens.length; i++) { - token = tokens[i]; - thisLevel = tokens[i].level; - for (j = stack.length - 1; j >= 0; j--) { - if (stack[j].level <= thisLevel) { - break; - } - } - stack.length = j + 1; - if (token.type !== "text") { - continue; - } - text = token.content; - pos = 0; - max = text.length; - /*eslint no-labels:0,block-scoped-var:0*/ OUTER: while (pos < max) { - QUOTE_RE.lastIndex = pos; - t = QUOTE_RE.exec(text); - if (!t) { - break; - } - canOpen = canClose = true; - pos = t.index + 1; - isSingle = t[0] === "'"; - // Find previous character, - // default to space if it's the beginning of the line - - lastChar = 32; - if (t.index - 1 >= 0) { - lastChar = text.charCodeAt(t.index - 1); - } else { - for (j = i - 1; j >= 0; j--) { - if (tokens[j].type === "softbreak" || tokens[j].type === "hardbreak") break; - // lastChar defaults to 0x20 - if (!tokens[j].content) continue; - // should skip all tokens except 'text', 'html_inline' or 'code_inline' - lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1); - break; - } - } - // Find next character, - // default to space if it's the end of the line - - nextChar = 32; - if (pos < max) { - nextChar = text.charCodeAt(pos); - } else { - for (j = i + 1; j < tokens.length; j++) { - if (tokens[j].type === "softbreak" || tokens[j].type === "hardbreak") break; - // nextChar defaults to 0x20 - if (!tokens[j].content) continue; - // should skip all tokens except 'text', 'html_inline' or 'code_inline' - nextChar = tokens[j].content.charCodeAt(0); - break; - } - } - isLastPunctChar = isMdAsciiPunct$1(lastChar) || isPunctChar$1(String.fromCharCode(lastChar)); - isNextPunctChar = isMdAsciiPunct$1(nextChar) || isPunctChar$1(String.fromCharCode(nextChar)); - isLastWhiteSpace = isWhiteSpace$1(lastChar); - isNextWhiteSpace = isWhiteSpace$1(nextChar); - if (isNextWhiteSpace) { - canOpen = false; - } else if (isNextPunctChar) { - if (!(isLastWhiteSpace || isLastPunctChar)) { - canOpen = false; - } - } - if (isLastWhiteSpace) { - canClose = false; - } else if (isLastPunctChar) { - if (!(isNextWhiteSpace || isNextPunctChar)) { - canClose = false; - } - } - if (nextChar === 34 /* " */ && t[0] === '"') { - if (lastChar >= 48 /* 0 */ && lastChar <= 57 /* 9 */) { - // special case: 1"" - count first quote as an inch - canClose = canOpen = false; - } - } - if (canOpen && canClose) { - // Replace quotes in the middle of punctuation sequence, but not - // in the middle of the words, i.e.: - // 1. foo " bar " baz - not replaced - // 2. foo-"-bar-"-baz - replaced - // 3. foo"bar"baz - not replaced - canOpen = isLastPunctChar; - canClose = isNextPunctChar; - } - if (!canOpen && !canClose) { - // middle of word - if (isSingle) { - token.content = replaceAt(token.content, t.index, APOSTROPHE); - } - continue; - } - if (canClose) { - // this could be a closing quote, rewind the stack to get a match - for (j = stack.length - 1; j >= 0; j--) { - item = stack[j]; - if (stack[j].level < thisLevel) { - break; - } - if (item.single === isSingle && stack[j].level === thisLevel) { - item = stack[j]; - if (isSingle) { - openQuote = state.md.options.quotes[2]; - closeQuote = state.md.options.quotes[3]; - } else { - openQuote = state.md.options.quotes[0]; - closeQuote = state.md.options.quotes[1]; - } - // replace token.content *before* tokens[item.token].content, - // because, if they are pointing at the same token, replaceAt - // could mess up indices when quote length != 1 - token.content = replaceAt(token.content, t.index, closeQuote); - tokens[item.token].content = replaceAt(tokens[item.token].content, item.pos, openQuote); - pos += closeQuote.length - 1; - if (item.token === i) { - pos += openQuote.length - 1; - } - text = token.content; - max = text.length; - stack.length = j; - continue OUTER; - } - } - } - if (canOpen) { - stack.push({ - token: i, - pos: t.index, - single: isSingle, - level: thisLevel - }); - } else if (canClose && isSingle) { - token.content = replaceAt(token.content, t.index, APOSTROPHE); - } - } - } - } - var smartquotes = function smartquotes(state) { - /*eslint max-depth:0*/ - var blkIdx; - if (!state.md.options.typographer) { - return; - } - for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) { - if (state.tokens[blkIdx].type !== "inline" || !QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) { - continue; - } - process_inlines(state.tokens[blkIdx].children, state); - } - }; - // Join raw text tokens with the rest of the text - var text_join = function text_join(state) { - var j, l, tokens, curr, max, last, blockTokens = state.tokens; - for (j = 0, l = blockTokens.length; j < l; j++) { - if (blockTokens[j].type !== "inline") continue; - tokens = blockTokens[j].children; - max = tokens.length; - for (curr = 0; curr < max; curr++) { - if (tokens[curr].type === "text_special") { - tokens[curr].type = "text"; - } - } - for (curr = last = 0; curr < max; curr++) { - if (tokens[curr].type === "text" && curr + 1 < max && tokens[curr + 1].type === "text") { - // collapse two adjacent text nodes - tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content; - } else { - if (curr !== last) { - tokens[last] = tokens[curr]; - } - last++; - } - } - if (curr !== last) { - tokens.length = last; - } - } - }; - // Token class - /** - * class Token - **/ - /** - * new Token(type, tag, nesting) - * - * Create new token and fill passed properties. - **/ function Token(type, tag, nesting) { - /** - * Token#type -> String - * - * Type of the token (string, e.g. "paragraph_open") - **/ - this.type = type; - /** - * Token#tag -> String - * - * html tag name, e.g. "p" - **/ this.tag = tag; - /** - * Token#attrs -> Array - * - * Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]` - **/ this.attrs = null; - /** - * Token#map -> Array - * - * Source map info. Format: `[ line_begin, line_end ]` - **/ this.map = null; - /** - * Token#nesting -> Number - * - * Level change (number in {-1, 0, 1} set), where: - * - * - `1` means the tag is opening - * - `0` means the tag is self-closing - * - `-1` means the tag is closing - **/ this.nesting = nesting; - /** - * Token#level -> Number - * - * nesting level, the same as `state.level` - **/ this.level = 0; - /** - * Token#children -> Array - * - * An array of child nodes (inline and img tokens) - **/ this.children = null; - /** - * Token#content -> String - * - * In a case of self-closing tag (code, html, fence, etc.), - * it has contents of this tag. - **/ this.content = ""; - /** - * Token#markup -> String - * - * '*' or '_' for emphasis, fence string for fence, etc. - **/ this.markup = ""; - /** - * Token#info -> String - * - * Additional information: - * - * - Info string for "fence" tokens - * - The value "auto" for autolink "link_open" and "link_close" tokens - * - The string value of the item marker for ordered-list "list_item_open" tokens - **/ this.info = ""; - /** - * Token#meta -> Object - * - * A place for plugins to store an arbitrary data - **/ this.meta = null; - /** - * Token#block -> Boolean - * - * True for block-level tokens, false for inline tokens. - * Used in renderer to calculate line breaks - **/ this.block = false; - /** - * Token#hidden -> Boolean - * - * If it's true, ignore this element when rendering. Used for tight lists - * to hide paragraphs. - **/ this.hidden = false; - } - /** - * Token.attrIndex(name) -> Number - * - * Search attribute index by name. - **/ Token.prototype.attrIndex = function attrIndex(name) { - var attrs, i, len; - if (!this.attrs) { - return -1; - } - attrs = this.attrs; - for (i = 0, len = attrs.length; i < len; i++) { - if (attrs[i][0] === name) { - return i; - } - } - return -1; - }; - /** - * Token.attrPush(attrData) - * - * Add `[ name, value ]` attribute to list. Init attrs if necessary - **/ Token.prototype.attrPush = function attrPush(attrData) { - if (this.attrs) { - this.attrs.push(attrData); - } else { - this.attrs = [ attrData ]; - } - }; - /** - * Token.attrSet(name, value) - * - * Set `name` attribute to `value`. Override old value if exists. - **/ Token.prototype.attrSet = function attrSet(name, value) { - var idx = this.attrIndex(name), attrData = [ name, value ]; - if (idx < 0) { - this.attrPush(attrData); - } else { - this.attrs[idx] = attrData; - } - }; - /** - * Token.attrGet(name) - * - * Get the value of attribute `name`, or null if it does not exist. - **/ Token.prototype.attrGet = function attrGet(name) { - var idx = this.attrIndex(name), value = null; - if (idx >= 0) { - value = this.attrs[idx][1]; - } - return value; - }; - /** - * Token.attrJoin(name, value) - * - * Join value to existing attribute via space. Or create new attribute if not - * exists. Useful to operate with token classes. - **/ Token.prototype.attrJoin = function attrJoin(name, value) { - var idx = this.attrIndex(name); - if (idx < 0) { - this.attrPush([ name, value ]); - } else { - this.attrs[idx][1] = this.attrs[idx][1] + " " + value; - } - }; - var token = Token; - function StateCore(src, md, env) { - this.src = src; - this.env = env; - this.tokens = []; - this.inlineMode = false; - this.md = md; - // link to parser instance - } - // re-export Token class to use in core rules - StateCore.prototype.Token = token; - var state_core = StateCore; - var _rules$2 = [ [ "normalize", normalize ], [ "block", block ], [ "inline", inline ], [ "linkify", linkify$1 ], [ "replacements", replacements ], [ "smartquotes", smartquotes ], - // `text_join` finds `text_special` tokens (for escape sequences) - // and joins them with the rest of the text - [ "text_join", text_join ] ]; - /** - * new Core() - **/ function Core() { - /** - * Core#ruler -> Ruler - * - * [[Ruler]] instance. Keep configuration of core rules. - **/ - this.ruler = new ruler; - for (var i = 0; i < _rules$2.length; i++) { - this.ruler.push(_rules$2[i][0], _rules$2[i][1]); - } - } - /** - * Core.process(state) - * - * Executes core chain rules. - **/ Core.prototype.process = function(state) { - var i, l, rules; - rules = this.ruler.getRules(""); - for (i = 0, l = rules.length; i < l; i++) { - rules[i](state); - } - }; - Core.prototype.State = state_core; - var parser_core = Core; - var isSpace$a = utils.isSpace; - function getLine(state, line) { - var pos = state.bMarks[line] + state.tShift[line], max = state.eMarks[line]; - return state.src.slice(pos, max); - } - function escapedSplit(str) { - var result = [], pos = 0, max = str.length, ch, isEscaped = false, lastPos = 0, current = ""; - ch = str.charCodeAt(pos); - while (pos < max) { - if (ch === 124 /* | */) { - if (!isEscaped) { - // pipe separating cells, '|' - result.push(current + str.substring(lastPos, pos)); - current = ""; - lastPos = pos + 1; - } else { - // escaped pipe, '\|' - current += str.substring(lastPos, pos - 1); - lastPos = pos; - } - } - isEscaped = ch === 92 /* \ */; - pos++; - ch = str.charCodeAt(pos); - } - result.push(current + str.substring(lastPos)); - return result; - } - var table = function table(state, startLine, endLine, silent) { - var ch, lineText, pos, i, l, nextLine, columns, columnCount, token, aligns, t, tableLines, tbodyLines, oldParentType, terminate, terminatorRules, firstCh, secondCh; - // should have at least two lines - if (startLine + 2 > endLine) { - return false; - } - nextLine = startLine + 1; - if (state.sCount[nextLine] < state.blkIndent) { - return false; - } - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[nextLine] - state.blkIndent >= 4) { - return false; - } - // first character of the second line should be '|', '-', ':', - // and no other characters are allowed but spaces; - // basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp - pos = state.bMarks[nextLine] + state.tShift[nextLine]; - if (pos >= state.eMarks[nextLine]) { - return false; - } - firstCh = state.src.charCodeAt(pos++); - if (firstCh !== 124 /* | */ && firstCh !== 45 /* - */ && firstCh !== 58 /* : */) { - return false; - } - if (pos >= state.eMarks[nextLine]) { - return false; - } - secondCh = state.src.charCodeAt(pos++); - if (secondCh !== 124 /* | */ && secondCh !== 45 /* - */ && secondCh !== 58 /* : */ && !isSpace$a(secondCh)) { - return false; - } - // if first character is '-', then second character must not be a space - // (due to parsing ambiguity with list) - if (firstCh === 45 /* - */ && isSpace$a(secondCh)) { - return false; - } - while (pos < state.eMarks[nextLine]) { - ch = state.src.charCodeAt(pos); - if (ch !== 124 /* | */ && ch !== 45 /* - */ && ch !== 58 /* : */ && !isSpace$a(ch)) { - return false; - } - pos++; - } - lineText = getLine(state, startLine + 1); - columns = lineText.split("|"); - aligns = []; - for (i = 0; i < columns.length; i++) { - t = columns[i].trim(); - if (!t) { - // allow empty columns before and after table, but not in between columns; - // e.g. allow ` |---| `, disallow ` ---||--- ` - if (i === 0 || i === columns.length - 1) { - continue; - } else { - return false; - } - } - if (!/^:?-+:?$/.test(t)) { - return false; - } - if (t.charCodeAt(t.length - 1) === 58 /* : */) { - aligns.push(t.charCodeAt(0) === 58 /* : */ ? "center" : "right"); - } else if (t.charCodeAt(0) === 58 /* : */) { - aligns.push("left"); - } else { - aligns.push(""); - } - } - lineText = getLine(state, startLine).trim(); - if (lineText.indexOf("|") === -1) { - return false; - } - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - columns = escapedSplit(lineText); - if (columns.length && columns[0] === "") columns.shift(); - if (columns.length && columns[columns.length - 1] === "") columns.pop(); - // header row will define an amount of columns in the entire table, - // and align row should be exactly the same (the rest of the rows can differ) - columnCount = columns.length; - if (columnCount === 0 || columnCount !== aligns.length) { - return false; - } - if (silent) { - return true; - } - oldParentType = state.parentType; - state.parentType = "table"; - // use 'blockquote' lists for termination because it's - // the most similar to tables - terminatorRules = state.md.block.ruler.getRules("blockquote"); - token = state.push("table_open", "table", 1); - token.map = tableLines = [ startLine, 0 ]; - token = state.push("thead_open", "thead", 1); - token.map = [ startLine, startLine + 1 ]; - token = state.push("tr_open", "tr", 1); - token.map = [ startLine, startLine + 1 ]; - for (i = 0; i < columns.length; i++) { - token = state.push("th_open", "th", 1); - if (aligns[i]) { - token.attrs = [ [ "style", "text-align:" + aligns[i] ] ]; - } - token = state.push("inline", "", 0); - token.content = columns[i].trim(); - token.children = []; - token = state.push("th_close", "th", -1); - } - token = state.push("tr_close", "tr", -1); - token = state.push("thead_close", "thead", -1); - for (nextLine = startLine + 2; nextLine < endLine; nextLine++) { - if (state.sCount[nextLine] < state.blkIndent) { - break; - } - terminate = false; - for (i = 0, l = terminatorRules.length; i < l; i++) { - if (terminatorRules[i](state, nextLine, endLine, true)) { - terminate = true; - break; - } - } - if (terminate) { - break; - } - lineText = getLine(state, nextLine).trim(); - if (!lineText) { - break; - } - if (state.sCount[nextLine] - state.blkIndent >= 4) { - break; - } - columns = escapedSplit(lineText); - if (columns.length && columns[0] === "") columns.shift(); - if (columns.length && columns[columns.length - 1] === "") columns.pop(); - if (nextLine === startLine + 2) { - token = state.push("tbody_open", "tbody", 1); - token.map = tbodyLines = [ startLine + 2, 0 ]; - } - token = state.push("tr_open", "tr", 1); - token.map = [ nextLine, nextLine + 1 ]; - for (i = 0; i < columnCount; i++) { - token = state.push("td_open", "td", 1); - if (aligns[i]) { - token.attrs = [ [ "style", "text-align:" + aligns[i] ] ]; - } - token = state.push("inline", "", 0); - token.content = columns[i] ? columns[i].trim() : ""; - token.children = []; - token = state.push("td_close", "td", -1); - } - token = state.push("tr_close", "tr", -1); - } - if (tbodyLines) { - token = state.push("tbody_close", "tbody", -1); - tbodyLines[1] = nextLine; - } - token = state.push("table_close", "table", -1); - tableLines[1] = nextLine; - state.parentType = oldParentType; - state.line = nextLine; - return true; - }; - // Code block (4 spaces padded) - var code = function code(state, startLine, endLine /*, silent*/) { - var nextLine, last, token; - if (state.sCount[startLine] - state.blkIndent < 4) { - return false; - } - last = nextLine = startLine + 1; - while (nextLine < endLine) { - if (state.isEmpty(nextLine)) { - nextLine++; - continue; - } - if (state.sCount[nextLine] - state.blkIndent >= 4) { - nextLine++; - last = nextLine; - continue; - } - break; - } - state.line = last; - token = state.push("code_block", "code", 0); - token.content = state.getLines(startLine, last, 4 + state.blkIndent, false) + "\n"; - token.map = [ startLine, state.line ]; - return true; - }; - // fences (``` lang, ~~~ lang) - var fence = function fence(state, startLine, endLine, silent) { - var marker, len, params, nextLine, mem, token, markup, haveEndMarker = false, pos = state.bMarks[startLine] + state.tShift[startLine], max = state.eMarks[startLine]; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - if (pos + 3 > max) { - return false; - } - marker = state.src.charCodeAt(pos); - if (marker !== 126 /* ~ */ && marker !== 96 /* ` */) { - return false; - } - // scan marker length - mem = pos; - pos = state.skipChars(pos, marker); - len = pos - mem; - if (len < 3) { - return false; - } - markup = state.src.slice(mem, pos); - params = state.src.slice(pos, max); - if (marker === 96 /* ` */) { - if (params.indexOf(String.fromCharCode(marker)) >= 0) { - return false; - } - } - // Since start is found, we can report success here in validation mode - if (silent) { - return true; - } - // search end of block - nextLine = startLine; - for (;;) { - nextLine++; - if (nextLine >= endLine) { - // unclosed block should be autoclosed by end of document. - // also block seems to be autoclosed by end of parent - break; - } - pos = mem = state.bMarks[nextLine] + state.tShift[nextLine]; - max = state.eMarks[nextLine]; - if (pos < max && state.sCount[nextLine] < state.blkIndent) { - // non-empty line with negative indent should stop the list: - // - ``` - // test - break; - } - if (state.src.charCodeAt(pos) !== marker) { - continue; - } - if (state.sCount[nextLine] - state.blkIndent >= 4) { - // closing fence should be indented less than 4 spaces - continue; - } - pos = state.skipChars(pos, marker); - // closing code fence must be at least as long as the opening one - if (pos - mem < len) { - continue; - } - // make sure tail has spaces only - pos = state.skipSpaces(pos); - if (pos < max) { - continue; - } - haveEndMarker = true; - // found! - break; - } - // If a fence has heading spaces, they should be removed from its inner block - len = state.sCount[startLine]; - state.line = nextLine + (haveEndMarker ? 1 : 0); - token = state.push("fence", "code", 0); - token.info = params; - token.content = state.getLines(startLine + 1, nextLine, len, true); - token.markup = markup; - token.map = [ startLine, state.line ]; - return true; - }; - var isSpace$9 = utils.isSpace; - var blockquote = function blockquote(state, startLine, endLine, silent) { - var adjustTab, ch, i, initial, l, lastLineEmpty, lines, nextLine, offset, oldBMarks, oldBSCount, oldIndent, oldParentType, oldSCount, oldTShift, spaceAfterMarker, terminate, terminatorRules, token, isOutdented, oldLineMax = state.lineMax, pos = state.bMarks[startLine] + state.tShift[startLine], max = state.eMarks[startLine]; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - // check the block quote marker - if (state.src.charCodeAt(pos) !== 62 /* > */) { - return false; - } - // we know that it's going to be a valid blockquote, - // so no point trying to find the end of it in silent mode - if (silent) { - return true; - } - oldBMarks = []; - oldBSCount = []; - oldSCount = []; - oldTShift = []; - terminatorRules = state.md.block.ruler.getRules("blockquote"); - oldParentType = state.parentType; - state.parentType = "blockquote"; - // Search the end of the block - - // Block ends with either: - // 1. an empty line outside: - // ``` - // > test - - // ``` - // 2. an empty line inside: - // ``` - // > - // test - // ``` - // 3. another tag: - // ``` - // > test - // - - - - // ``` - for (nextLine = startLine; nextLine < endLine; nextLine++) { - // check if it's outdented, i.e. it's inside list item and indented - // less than said list item: - // ``` - // 1. anything - // > current blockquote - // 2. checking this line - // ``` - isOutdented = state.sCount[nextLine] < state.blkIndent; - pos = state.bMarks[nextLine] + state.tShift[nextLine]; - max = state.eMarks[nextLine]; - if (pos >= max) { - // Case 1: line is not inside the blockquote, and this line is empty. - break; - } - if (state.src.charCodeAt(pos++) === 62 /* > */ && !isOutdented) { - // This line is inside the blockquote. - // set offset past spaces and ">" - initial = state.sCount[nextLine] + 1; - // skip one optional space after '>' - if (state.src.charCodeAt(pos) === 32 /* space */) { - // ' > test ' - // ^ -- position start of line here: - pos++; - initial++; - adjustTab = false; - spaceAfterMarker = true; - } else if (state.src.charCodeAt(pos) === 9 /* tab */) { - spaceAfterMarker = true; - if ((state.bsCount[nextLine] + initial) % 4 === 3) { - // ' >\t test ' - // ^ -- position start of line here (tab has width===1) - pos++; - initial++; - adjustTab = false; - } else { - // ' >\t test ' - // ^ -- position start of line here + shift bsCount slightly - // to make extra space appear - adjustTab = true; - } - } else { - spaceAfterMarker = false; - } - offset = initial; - oldBMarks.push(state.bMarks[nextLine]); - state.bMarks[nextLine] = pos; - while (pos < max) { - ch = state.src.charCodeAt(pos); - if (isSpace$9(ch)) { - if (ch === 9) { - offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4; - } else { - offset++; - } - } else { - break; - } - pos++; - } - lastLineEmpty = pos >= max; - oldBSCount.push(state.bsCount[nextLine]); - state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0); - oldSCount.push(state.sCount[nextLine]); - state.sCount[nextLine] = offset - initial; - oldTShift.push(state.tShift[nextLine]); - state.tShift[nextLine] = pos - state.bMarks[nextLine]; - continue; - } - // Case 2: line is not inside the blockquote, and the last line was empty. - if (lastLineEmpty) { - break; - } - // Case 3: another tag found. - terminate = false; - for (i = 0, l = terminatorRules.length; i < l; i++) { - if (terminatorRules[i](state, nextLine, endLine, true)) { - terminate = true; - break; - } - } - if (terminate) { - // Quirk to enforce "hard termination mode" for paragraphs; - // normally if you call `tokenize(state, startLine, nextLine)`, - // paragraphs will look below nextLine for paragraph continuation, - // but if blockquote is terminated by another tag, they shouldn't - state.lineMax = nextLine; - if (state.blkIndent !== 0) { - // state.blkIndent was non-zero, we now set it to zero, - // so we need to re-calculate all offsets to appear as - // if indent wasn't changed - oldBMarks.push(state.bMarks[nextLine]); - oldBSCount.push(state.bsCount[nextLine]); - oldTShift.push(state.tShift[nextLine]); - oldSCount.push(state.sCount[nextLine]); - state.sCount[nextLine] -= state.blkIndent; - } - break; - } - oldBMarks.push(state.bMarks[nextLine]); - oldBSCount.push(state.bsCount[nextLine]); - oldTShift.push(state.tShift[nextLine]); - oldSCount.push(state.sCount[nextLine]); - // A negative indentation means that this is a paragraph continuation - - state.sCount[nextLine] = -1; - } - oldIndent = state.blkIndent; - state.blkIndent = 0; - token = state.push("blockquote_open", "blockquote", 1); - token.markup = ">"; - token.map = lines = [ startLine, 0 ]; - state.md.block.tokenize(state, startLine, nextLine); - token = state.push("blockquote_close", "blockquote", -1); - token.markup = ">"; - state.lineMax = oldLineMax; - state.parentType = oldParentType; - lines[1] = state.line; - // Restore original tShift; this might not be necessary since the parser - // has already been here, but just to make sure we can do that. - for (i = 0; i < oldTShift.length; i++) { - state.bMarks[i + startLine] = oldBMarks[i]; - state.tShift[i + startLine] = oldTShift[i]; - state.sCount[i + startLine] = oldSCount[i]; - state.bsCount[i + startLine] = oldBSCount[i]; - } - state.blkIndent = oldIndent; - return true; - }; - var isSpace$8 = utils.isSpace; - var hr = function hr(state, startLine, endLine, silent) { - var marker, cnt, ch, token, pos = state.bMarks[startLine] + state.tShift[startLine], max = state.eMarks[startLine]; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - marker = state.src.charCodeAt(pos++); - // Check hr marker - if (marker !== 42 /* * */ && marker !== 45 /* - */ && marker !== 95 /* _ */) { - return false; - } - // markers can be mixed with spaces, but there should be at least 3 of them - cnt = 1; - while (pos < max) { - ch = state.src.charCodeAt(pos++); - if (ch !== marker && !isSpace$8(ch)) { - return false; - } - if (ch === marker) { - cnt++; - } - } - if (cnt < 3) { - return false; - } - if (silent) { - return true; - } - state.line = startLine + 1; - token = state.push("hr", "hr", 0); - token.map = [ startLine, state.line ]; - token.markup = Array(cnt + 1).join(String.fromCharCode(marker)); - return true; - }; - var isSpace$7 = utils.isSpace; - // Search `[-+*][\n ]`, returns next pos after marker on success - // or -1 on fail. - function skipBulletListMarker(state, startLine) { - var marker, pos, max, ch; - pos = state.bMarks[startLine] + state.tShift[startLine]; - max = state.eMarks[startLine]; - marker = state.src.charCodeAt(pos++); - // Check bullet - if (marker !== 42 /* * */ && marker !== 45 /* - */ && marker !== 43 /* + */) { - return -1; - } - if (pos < max) { - ch = state.src.charCodeAt(pos); - if (!isSpace$7(ch)) { - // " -test " - is not a list item - return -1; - } - } - return pos; - } - // Search `\d+[.)][\n ]`, returns next pos after marker on success - // or -1 on fail. - function skipOrderedListMarker(state, startLine) { - var ch, start = state.bMarks[startLine] + state.tShift[startLine], pos = start, max = state.eMarks[startLine]; - // List marker should have at least 2 chars (digit + dot) - if (pos + 1 >= max) { - return -1; - } - ch = state.src.charCodeAt(pos++); - if (ch < 48 /* 0 */ || ch > 57 /* 9 */) { - return -1; - } - for (;;) { - // EOL -> fail - if (pos >= max) { - return -1; - } - ch = state.src.charCodeAt(pos++); - if (ch >= 48 /* 0 */ && ch <= 57 /* 9 */) { - // List marker should have no more than 9 digits - // (prevents integer overflow in browsers) - if (pos - start >= 10) { - return -1; - } - continue; - } - // found valid marker - if (ch === 41 /* ) */ || ch === 46 /* . */) { - break; - } - return -1; - } - if (pos < max) { - ch = state.src.charCodeAt(pos); - if (!isSpace$7(ch)) { - // " 1.test " - is not a list item - return -1; - } - } - return pos; - } - function markTightParagraphs(state, idx) { - var i, l, level = state.level + 2; - for (i = idx + 2, l = state.tokens.length - 2; i < l; i++) { - if (state.tokens[i].level === level && state.tokens[i].type === "paragraph_open") { - state.tokens[i + 2].hidden = true; - state.tokens[i].hidden = true; - i += 2; - } - } - } - var list = function list(state, startLine, endLine, silent) { - var ch, contentStart, i, indent, indentAfterMarker, initial, isOrdered, itemLines, l, listLines, listTokIdx, markerCharCode, markerValue, max, offset, oldListIndent, oldParentType, oldSCount, oldTShift, oldTight, pos, posAfterMarker, prevEmptyEnd, start, terminate, terminatorRules, token, nextLine = startLine, isTerminatingParagraph = false, tight = true; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[nextLine] - state.blkIndent >= 4) { - return false; - } - // Special case: - // - item 1 - // - item 2 - // - item 3 - // - item 4 - // - this one is a paragraph continuation - if (state.listIndent >= 0 && state.sCount[nextLine] - state.listIndent >= 4 && state.sCount[nextLine] < state.blkIndent) { - return false; - } - // limit conditions when list can interrupt - // a paragraph (validation mode only) - if (silent && state.parentType === "paragraph") { - // Next list item should still terminate previous list item; - // This code can fail if plugins use blkIndent as well as lists, - // but I hope the spec gets fixed long before that happens. - if (state.sCount[nextLine] >= state.blkIndent) { - isTerminatingParagraph = true; - } - } - // Detect list type and position after marker - if ((posAfterMarker = skipOrderedListMarker(state, nextLine)) >= 0) { - isOrdered = true; - start = state.bMarks[nextLine] + state.tShift[nextLine]; - markerValue = Number(state.src.slice(start, posAfterMarker - 1)); - // If we're starting a new ordered list right after - // a paragraph, it should start with 1. - if (isTerminatingParagraph && markerValue !== 1) return false; - } else if ((posAfterMarker = skipBulletListMarker(state, nextLine)) >= 0) { - isOrdered = false; - } else { - return false; - } - // If we're starting a new unordered list right after - // a paragraph, first line should not be empty. - if (isTerminatingParagraph) { - if (state.skipSpaces(posAfterMarker) >= state.eMarks[nextLine]) return false; - } - // For validation mode we can terminate immediately - if (silent) { - return true; - } - // We should terminate list on style change. Remember first one to compare. - markerCharCode = state.src.charCodeAt(posAfterMarker - 1); - // Start list - listTokIdx = state.tokens.length; - if (isOrdered) { - token = state.push("ordered_list_open", "ol", 1); - if (markerValue !== 1) { - token.attrs = [ [ "start", markerValue ] ]; - } - } else { - token = state.push("bullet_list_open", "ul", 1); - } - token.map = listLines = [ nextLine, 0 ]; - token.markup = String.fromCharCode(markerCharCode); - - // Iterate list items - - prevEmptyEnd = false; - terminatorRules = state.md.block.ruler.getRules("list"); - oldParentType = state.parentType; - state.parentType = "list"; - while (nextLine < endLine) { - pos = posAfterMarker; - max = state.eMarks[nextLine]; - initial = offset = state.sCount[nextLine] + posAfterMarker - (state.bMarks[nextLine] + state.tShift[nextLine]); - while (pos < max) { - ch = state.src.charCodeAt(pos); - if (ch === 9) { - offset += 4 - (offset + state.bsCount[nextLine]) % 4; - } else if (ch === 32) { - offset++; - } else { - break; - } - pos++; - } - contentStart = pos; - if (contentStart >= max) { - // trimming space in "- \n 3" case, indent is 1 here - indentAfterMarker = 1; - } else { - indentAfterMarker = offset - initial; - } - // If we have more than 4 spaces, the indent is 1 - // (the rest is just indented code block) - if (indentAfterMarker > 4) { - indentAfterMarker = 1; - } - // " - test" - // ^^^^^ - calculating total length of this thing - indent = initial + indentAfterMarker; - // Run subparser & write tokens - token = state.push("list_item_open", "li", 1); - token.markup = String.fromCharCode(markerCharCode); - token.map = itemLines = [ nextLine, 0 ]; - if (isOrdered) { - token.info = state.src.slice(start, posAfterMarker - 1); - } - // change current state, then restore it after parser subcall - oldTight = state.tight; - oldTShift = state.tShift[nextLine]; - oldSCount = state.sCount[nextLine]; - // - example list - // ^ listIndent position will be here - // ^ blkIndent position will be here - - oldListIndent = state.listIndent; - state.listIndent = state.blkIndent; - state.blkIndent = indent; - state.tight = true; - state.tShift[nextLine] = contentStart - state.bMarks[nextLine]; - state.sCount[nextLine] = offset; - if (contentStart >= max && state.isEmpty(nextLine + 1)) { - // workaround for this case - // (list item is empty, list terminates before "foo"): - // ~~~~~~~~ - // - - // foo - // ~~~~~~~~ - state.line = Math.min(state.line + 2, endLine); - } else { - state.md.block.tokenize(state, nextLine, endLine, true); - } - // If any of list item is tight, mark list as tight - if (!state.tight || prevEmptyEnd) { - tight = false; - } - // Item become loose if finish with empty line, - // but we should filter last element, because it means list finish - prevEmptyEnd = state.line - nextLine > 1 && state.isEmpty(state.line - 1); - state.blkIndent = state.listIndent; - state.listIndent = oldListIndent; - state.tShift[nextLine] = oldTShift; - state.sCount[nextLine] = oldSCount; - state.tight = oldTight; - token = state.push("list_item_close", "li", -1); - token.markup = String.fromCharCode(markerCharCode); - nextLine = state.line; - itemLines[1] = nextLine; - if (nextLine >= endLine) { - break; - } - - // Try to check if list is terminated or continued. - - if (state.sCount[nextLine] < state.blkIndent) { - break; - } - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[nextLine] - state.blkIndent >= 4) { - break; - } - // fail if terminating block found - terminate = false; - for (i = 0, l = terminatorRules.length; i < l; i++) { - if (terminatorRules[i](state, nextLine, endLine, true)) { - terminate = true; - break; - } - } - if (terminate) { - break; - } - // fail if list has another type - if (isOrdered) { - posAfterMarker = skipOrderedListMarker(state, nextLine); - if (posAfterMarker < 0) { - break; - } - start = state.bMarks[nextLine] + state.tShift[nextLine]; - } else { - posAfterMarker = skipBulletListMarker(state, nextLine); - if (posAfterMarker < 0) { - break; - } - } - if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) { - break; - } - } - // Finalize list - if (isOrdered) { - token = state.push("ordered_list_close", "ol", -1); - } else { - token = state.push("bullet_list_close", "ul", -1); - } - token.markup = String.fromCharCode(markerCharCode); - listLines[1] = nextLine; - state.line = nextLine; - state.parentType = oldParentType; - // mark paragraphs tight if needed - if (tight) { - markTightParagraphs(state, listTokIdx); - } - return true; - }; - var normalizeReference$2 = utils.normalizeReference; - var isSpace$6 = utils.isSpace; - var reference = function reference(state, startLine, _endLine, silent) { - var ch, destEndPos, destEndLineNo, endLine, href, i, l, label, labelEnd, oldParentType, res, start, str, terminate, terminatorRules, title, lines = 0, pos = state.bMarks[startLine] + state.tShift[startLine], max = state.eMarks[startLine], nextLine = startLine + 1; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - if (state.src.charCodeAt(pos) !== 91 /* [ */) { - return false; - } - // Simple check to quickly interrupt scan on [link](url) at the start of line. - // Can be useful on practice: https://github.com/markdown-it/markdown-it/issues/54 - while (++pos < max) { - if (state.src.charCodeAt(pos) === 93 /* ] */ && state.src.charCodeAt(pos - 1) !== 92 /* \ */) { - if (pos + 1 === max) { - return false; - } - if (state.src.charCodeAt(pos + 1) !== 58 /* : */) { - return false; - } - break; - } - } - endLine = state.lineMax; - // jump line-by-line until empty one or EOF - terminatorRules = state.md.block.ruler.getRules("reference"); - oldParentType = state.parentType; - state.parentType = "reference"; - for (;nextLine < endLine && !state.isEmpty(nextLine); nextLine++) { - // this would be a code block normally, but after paragraph - // it's considered a lazy continuation regardless of what's there - if (state.sCount[nextLine] - state.blkIndent > 3) { - continue; - } - // quirk for blockquotes, this line should already be checked by that rule - if (state.sCount[nextLine] < 0) { - continue; - } - // Some tags can terminate paragraph without empty line. - terminate = false; - for (i = 0, l = terminatorRules.length; i < l; i++) { - if (terminatorRules[i](state, nextLine, endLine, true)) { - terminate = true; - break; - } - } - if (terminate) { - break; - } - } - str = state.getLines(startLine, nextLine, state.blkIndent, false).trim(); - max = str.length; - for (pos = 1; pos < max; pos++) { - ch = str.charCodeAt(pos); - if (ch === 91 /* [ */) { - return false; - } else if (ch === 93 /* ] */) { - labelEnd = pos; - break; - } else if (ch === 10 /* \n */) { - lines++; - } else if (ch === 92 /* \ */) { - pos++; - if (pos < max && str.charCodeAt(pos) === 10) { - lines++; - } - } - } - if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 58 /* : */) { - return false; - } - // [label]: destination 'title' - // ^^^ skip optional whitespace here - for (pos = labelEnd + 2; pos < max; pos++) { - ch = str.charCodeAt(pos); - if (ch === 10) { - lines++; - } else if (isSpace$6(ch)) ; else { - break; - } - } - // [label]: destination 'title' - // ^^^^^^^^^^^ parse this - res = state.md.helpers.parseLinkDestination(str, pos, max); - if (!res.ok) { - return false; - } - href = state.md.normalizeLink(res.str); - if (!state.md.validateLink(href)) { - return false; - } - pos = res.pos; - lines += res.lines; - // save cursor state, we could require to rollback later - destEndPos = pos; - destEndLineNo = lines; - // [label]: destination 'title' - // ^^^ skipping those spaces - start = pos; - for (;pos < max; pos++) { - ch = str.charCodeAt(pos); - if (ch === 10) { - lines++; - } else if (isSpace$6(ch)) ; else { - break; - } - } - // [label]: destination 'title' - // ^^^^^^^ parse this - res = state.md.helpers.parseLinkTitle(str, pos, max); - if (pos < max && start !== pos && res.ok) { - title = res.str; - pos = res.pos; - lines += res.lines; - } else { - title = ""; - pos = destEndPos; - lines = destEndLineNo; - } - // skip trailing spaces until the rest of the line - while (pos < max) { - ch = str.charCodeAt(pos); - if (!isSpace$6(ch)) { - break; - } - pos++; - } - if (pos < max && str.charCodeAt(pos) !== 10) { - if (title) { - // garbage at the end of the line after title, - // but it could still be a valid reference if we roll back - title = ""; - pos = destEndPos; - lines = destEndLineNo; - while (pos < max) { - ch = str.charCodeAt(pos); - if (!isSpace$6(ch)) { - break; - } - pos++; - } - } - } - if (pos < max && str.charCodeAt(pos) !== 10) { - // garbage at the end of the line - return false; - } - label = normalizeReference$2(str.slice(1, labelEnd)); - if (!label) { - // CommonMark 0.20 disallows empty labels - return false; - } - // Reference can not terminate anything. This check is for safety only. - /*istanbul ignore if*/ if (silent) { - return true; - } - if (typeof state.env.references === "undefined") { - state.env.references = {}; - } - if (typeof state.env.references[label] === "undefined") { - state.env.references[label] = { - title: title, - href: href - }; - } - state.parentType = oldParentType; - state.line = startLine + lines + 1; - return true; - }; - // List of valid html blocks names, accorting to commonmark spec - var html_blocks = [ "address", "article", "aside", "base", "basefont", "blockquote", "body", "caption", "center", "col", "colgroup", "dd", "details", "dialog", "dir", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "frame", "frameset", "h1", "h2", "h3", "h4", "h5", "h6", "head", "header", "hr", "html", "iframe", "legend", "li", "link", "main", "menu", "menuitem", "nav", "noframes", "ol", "optgroup", "option", "p", "param", "section", "source", "summary", "table", "tbody", "td", "tfoot", "th", "thead", "title", "tr", "track", "ul" ]; - // Regexps to match html elements - var attr_name = "[a-zA-Z_:][a-zA-Z0-9:._-]*"; - var unquoted = "[^\"'=<>`\\x00-\\x20]+"; - var single_quoted = "'[^']*'"; - var double_quoted = '"[^"]*"'; - var attr_value = "(?:" + unquoted + "|" + single_quoted + "|" + double_quoted + ")"; - var attribute = "(?:\\s+" + attr_name + "(?:\\s*=\\s*" + attr_value + ")?)"; - var open_tag = "<[A-Za-z][A-Za-z0-9\\-]*" + attribute + "*\\s*\\/?>"; - var close_tag = "<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>"; - var comment = "\x3c!----\x3e|\x3c!--(?:-?[^>-])(?:-?[^-])*--\x3e"; - var processing = "<[?][\\s\\S]*?[?]>"; - var declaration = "]*>"; - var cdata = ""; - var HTML_TAG_RE$1 = new RegExp("^(?:" + open_tag + "|" + close_tag + "|" + comment + "|" + processing + "|" + declaration + "|" + cdata + ")"); - var HTML_OPEN_CLOSE_TAG_RE$1 = new RegExp("^(?:" + open_tag + "|" + close_tag + ")"); - var HTML_TAG_RE_1 = HTML_TAG_RE$1; - var HTML_OPEN_CLOSE_TAG_RE_1 = HTML_OPEN_CLOSE_TAG_RE$1; - var html_re = { - HTML_TAG_RE: HTML_TAG_RE_1, - HTML_OPEN_CLOSE_TAG_RE: HTML_OPEN_CLOSE_TAG_RE_1 - }; - var HTML_OPEN_CLOSE_TAG_RE = html_re.HTML_OPEN_CLOSE_TAG_RE; - // An array of opening and corresponding closing sequences for html tags, - // last argument defines whether it can terminate a paragraph or not - - var HTML_SEQUENCES = [ [ /^<(script|pre|style|textarea)(?=(\s|>|$))/i, /<\/(script|pre|style|textarea)>/i, true ], [ /^/, true ], [ /^<\?/, /\?>/, true ], [ /^/, true ], [ /^/, true ], [ new RegExp("^|$))", "i"), /^$/, true ], [ new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + "\\s*$"), /^$/, false ] ]; - var html_block = function html_block(state, startLine, endLine, silent) { - var i, nextLine, token, lineText, pos = state.bMarks[startLine] + state.tShift[startLine], max = state.eMarks[startLine]; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - if (!state.md.options.html) { - return false; - } - if (state.src.charCodeAt(pos) !== 60 /* < */) { - return false; - } - lineText = state.src.slice(pos, max); - for (i = 0; i < HTML_SEQUENCES.length; i++) { - if (HTML_SEQUENCES[i][0].test(lineText)) { - break; - } - } - if (i === HTML_SEQUENCES.length) { - return false; - } - if (silent) { - // true if this sequence can be a terminator, false otherwise - return HTML_SEQUENCES[i][2]; - } - nextLine = startLine + 1; - // If we are here - we detected HTML block. - // Let's roll down till block end. - if (!HTML_SEQUENCES[i][1].test(lineText)) { - for (;nextLine < endLine; nextLine++) { - if (state.sCount[nextLine] < state.blkIndent) { - break; - } - pos = state.bMarks[nextLine] + state.tShift[nextLine]; - max = state.eMarks[nextLine]; - lineText = state.src.slice(pos, max); - if (HTML_SEQUENCES[i][1].test(lineText)) { - if (lineText.length !== 0) { - nextLine++; - } - break; - } - } - } - state.line = nextLine; - token = state.push("html_block", "", 0); - token.map = [ startLine, nextLine ]; - token.content = state.getLines(startLine, nextLine, state.blkIndent, true); - return true; - }; - var isSpace$5 = utils.isSpace; - var heading = function heading(state, startLine, endLine, silent) { - var ch, level, tmp, token, pos = state.bMarks[startLine] + state.tShift[startLine], max = state.eMarks[startLine]; - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - ch = state.src.charCodeAt(pos); - if (ch !== 35 /* # */ || pos >= max) { - return false; - } - // count heading level - level = 1; - ch = state.src.charCodeAt(++pos); - while (ch === 35 /* # */ && pos < max && level <= 6) { - level++; - ch = state.src.charCodeAt(++pos); - } - if (level > 6 || pos < max && !isSpace$5(ch)) { - return false; - } - if (silent) { - return true; - } - // Let's cut tails like ' ### ' from the end of string - max = state.skipSpacesBack(max, pos); - tmp = state.skipCharsBack(max, 35, pos); - // # - if (tmp > pos && isSpace$5(state.src.charCodeAt(tmp - 1))) { - max = tmp; - } - state.line = startLine + 1; - token = state.push("heading_open", "h" + String(level), 1); - token.markup = "########".slice(0, level); - token.map = [ startLine, state.line ]; - token = state.push("inline", "", 0); - token.content = state.src.slice(pos, max).trim(); - token.map = [ startLine, state.line ]; - token.children = []; - token = state.push("heading_close", "h" + String(level), -1); - token.markup = "########".slice(0, level); - return true; - }; - // lheading (---, ===) - var lheading = function lheading(state, startLine, endLine /*, silent*/) { - var content, terminate, i, l, token, pos, max, level, marker, nextLine = startLine + 1, oldParentType, terminatorRules = state.md.block.ruler.getRules("paragraph"); - // if it's indented more than 3 spaces, it should be a code block - if (state.sCount[startLine] - state.blkIndent >= 4) { - return false; - } - oldParentType = state.parentType; - state.parentType = "paragraph"; - // use paragraph to match terminatorRules - // jump line-by-line until empty one or EOF - for (;nextLine < endLine && !state.isEmpty(nextLine); nextLine++) { - // this would be a code block normally, but after paragraph - // it's considered a lazy continuation regardless of what's there - if (state.sCount[nextLine] - state.blkIndent > 3) { - continue; - } - - // Check for underline in setext header - - if (state.sCount[nextLine] >= state.blkIndent) { - pos = state.bMarks[nextLine] + state.tShift[nextLine]; - max = state.eMarks[nextLine]; - if (pos < max) { - marker = state.src.charCodeAt(pos); - if (marker === 45 /* - */ || marker === 61 /* = */) { - pos = state.skipChars(pos, marker); - pos = state.skipSpaces(pos); - if (pos >= max) { - level = marker === 61 /* = */ ? 1 : 2; - break; - } - } - } - } - // quirk for blockquotes, this line should already be checked by that rule - if (state.sCount[nextLine] < 0) { - continue; - } - // Some tags can terminate paragraph without empty line. - terminate = false; - for (i = 0, l = terminatorRules.length; i < l; i++) { - if (terminatorRules[i](state, nextLine, endLine, true)) { - terminate = true; - break; - } - } - if (terminate) { - break; - } - } - if (!level) { - // Didn't find valid underline - return false; - } - content = state.getLines(startLine, nextLine, state.blkIndent, false).trim(); - state.line = nextLine + 1; - token = state.push("heading_open", "h" + String(level), 1); - token.markup = String.fromCharCode(marker); - token.map = [ startLine, state.line ]; - token = state.push("inline", "", 0); - token.content = content; - token.map = [ startLine, state.line - 1 ]; - token.children = []; - token = state.push("heading_close", "h" + String(level), -1); - token.markup = String.fromCharCode(marker); - state.parentType = oldParentType; - return true; - }; - // Paragraph - var paragraph = function paragraph(state, startLine, endLine) { - var content, terminate, i, l, token, oldParentType, nextLine = startLine + 1, terminatorRules = state.md.block.ruler.getRules("paragraph"); - oldParentType = state.parentType; - state.parentType = "paragraph"; - // jump line-by-line until empty one or EOF - for (;nextLine < endLine && !state.isEmpty(nextLine); nextLine++) { - // this would be a code block normally, but after paragraph - // it's considered a lazy continuation regardless of what's there - if (state.sCount[nextLine] - state.blkIndent > 3) { - continue; - } - // quirk for blockquotes, this line should already be checked by that rule - if (state.sCount[nextLine] < 0) { - continue; - } - // Some tags can terminate paragraph without empty line. - terminate = false; - for (i = 0, l = terminatorRules.length; i < l; i++) { - if (terminatorRules[i](state, nextLine, endLine, true)) { - terminate = true; - break; - } - } - if (terminate) { - break; - } - } - content = state.getLines(startLine, nextLine, state.blkIndent, false).trim(); - state.line = nextLine; - token = state.push("paragraph_open", "p", 1); - token.map = [ startLine, state.line ]; - token = state.push("inline", "", 0); - token.content = content; - token.map = [ startLine, state.line ]; - token.children = []; - token = state.push("paragraph_close", "p", -1); - state.parentType = oldParentType; - return true; - }; - var isSpace$4 = utils.isSpace; - function StateBlock(src, md, env, tokens) { - var ch, s, start, pos, len, indent, offset, indent_found; - this.src = src; - // link to parser instance - this.md = md; - this.env = env; - - // Internal state vartiables - - this.tokens = tokens; - this.bMarks = []; - // line begin offsets for fast jumps - this.eMarks = []; - // line end offsets for fast jumps - this.tShift = []; - // offsets of the first non-space characters (tabs not expanded) - this.sCount = []; - // indents for each line (tabs expanded) - // An amount of virtual spaces (tabs expanded) between beginning - // of each line (bMarks) and real beginning of that line. - - // It exists only as a hack because blockquotes override bMarks - // losing information in the process. - - // It's used only when expanding tabs, you can think about it as - // an initial tab length, e.g. bsCount=21 applied to string `\t123` - // means first tab should be expanded to 4-21%4 === 3 spaces. - - this.bsCount = []; - // block parser variables - this.blkIndent = 0; - // required block content indent (for example, if we are - // inside a list, it would be positioned after list marker) - this.line = 0; - // line index in src - this.lineMax = 0; - // lines count - this.tight = false; - // loose/tight mode for lists - this.ddIndent = -1; - // indent of the current dd block (-1 if there isn't any) - this.listIndent = -1; - // indent of the current list block (-1 if there isn't any) - // can be 'blockquote', 'list', 'root', 'paragraph' or 'reference' - // used in lists to determine if they interrupt a paragraph - this.parentType = "root"; - this.level = 0; - // renderer - this.result = ""; - // Create caches - // Generate markers. - s = this.src; - indent_found = false; - for (start = pos = indent = offset = 0, len = s.length; pos < len; pos++) { - ch = s.charCodeAt(pos); - if (!indent_found) { - if (isSpace$4(ch)) { - indent++; - if (ch === 9) { - offset += 4 - offset % 4; - } else { - offset++; - } - continue; - } else { - indent_found = true; - } - } - if (ch === 10 || pos === len - 1) { - if (ch !== 10) { - pos++; - } - this.bMarks.push(start); - this.eMarks.push(pos); - this.tShift.push(indent); - this.sCount.push(offset); - this.bsCount.push(0); - indent_found = false; - indent = 0; - offset = 0; - start = pos + 1; - } - } - // Push fake entry to simplify cache bounds checks - this.bMarks.push(s.length); - this.eMarks.push(s.length); - this.tShift.push(0); - this.sCount.push(0); - this.bsCount.push(0); - this.lineMax = this.bMarks.length - 1; - // don't count last fake line - } - // Push new token to "stream". - - StateBlock.prototype.push = function(type, tag, nesting) { - var token$1 = new token(type, tag, nesting); - token$1.block = true; - if (nesting < 0) this.level--; - // closing tag - token$1.level = this.level; - if (nesting > 0) this.level++; - // opening tag - this.tokens.push(token$1); - return token$1; - }; - StateBlock.prototype.isEmpty = function isEmpty(line) { - return this.bMarks[line] + this.tShift[line] >= this.eMarks[line]; - }; - StateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) { - for (var max = this.lineMax; from < max; from++) { - if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) { - break; - } - } - return from; - }; - // Skip spaces from given position. - StateBlock.prototype.skipSpaces = function skipSpaces(pos) { - var ch; - for (var max = this.src.length; pos < max; pos++) { - ch = this.src.charCodeAt(pos); - if (!isSpace$4(ch)) { - break; - } - } - return pos; - }; - // Skip spaces from given position in reverse. - StateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) { - if (pos <= min) { - return pos; - } - while (pos > min) { - if (!isSpace$4(this.src.charCodeAt(--pos))) { - return pos + 1; - } - } - return pos; - }; - // Skip char codes from given position - StateBlock.prototype.skipChars = function skipChars(pos, code) { - for (var max = this.src.length; pos < max; pos++) { - if (this.src.charCodeAt(pos) !== code) { - break; - } - } - return pos; - }; - // Skip char codes reverse from given position - 1 - StateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) { - if (pos <= min) { - return pos; - } - while (pos > min) { - if (code !== this.src.charCodeAt(--pos)) { - return pos + 1; - } - } - return pos; - }; - // cut lines range from source. - StateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) { - var i, lineIndent, ch, first, last, queue, lineStart, line = begin; - if (begin >= end) { - return ""; - } - queue = new Array(end - begin); - for (i = 0; line < end; line++, i++) { - lineIndent = 0; - lineStart = first = this.bMarks[line]; - if (line + 1 < end || keepLastLF) { - // No need for bounds check because we have fake entry on tail. - last = this.eMarks[line] + 1; - } else { - last = this.eMarks[line]; - } - while (first < last && lineIndent < indent) { - ch = this.src.charCodeAt(first); - if (isSpace$4(ch)) { - if (ch === 9) { - lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4; - } else { - lineIndent++; - } - } else if (first - lineStart < this.tShift[line]) { - // patched tShift masked characters to look like spaces (blockquotes, list markers) - lineIndent++; - } else { - break; - } - first++; - } - if (lineIndent > indent) { - // partially expanding tabs in code blocks, e.g '\t\tfoobar' - // with indent=2 becomes ' \tfoobar' - queue[i] = new Array(lineIndent - indent + 1).join(" ") + this.src.slice(first, last); - } else { - queue[i] = this.src.slice(first, last); - } - } - return queue.join(""); - }; - // re-export Token class to use in block rules - StateBlock.prototype.Token = token; - var state_block = StateBlock; - var _rules$1 = [ - // First 2 params - rule name & source. Secondary array - list of rules, - // which can be terminated by this one. - [ "table", table, [ "paragraph", "reference" ] ], [ "code", code ], [ "fence", fence, [ "paragraph", "reference", "blockquote", "list" ] ], [ "blockquote", blockquote, [ "paragraph", "reference", "blockquote", "list" ] ], [ "hr", hr, [ "paragraph", "reference", "blockquote", "list" ] ], [ "list", list, [ "paragraph", "reference", "blockquote" ] ], [ "reference", reference ], [ "html_block", html_block, [ "paragraph", "reference", "blockquote" ] ], [ "heading", heading, [ "paragraph", "reference", "blockquote" ] ], [ "lheading", lheading ], [ "paragraph", paragraph ] ]; - /** - * new ParserBlock() - **/ function ParserBlock() { - /** - * ParserBlock#ruler -> Ruler - * - * [[Ruler]] instance. Keep configuration of block rules. - **/ - this.ruler = new ruler; - for (var i = 0; i < _rules$1.length; i++) { - this.ruler.push(_rules$1[i][0], _rules$1[i][1], { - alt: (_rules$1[i][2] || []).slice() - }); - } - } - // Generate tokens for input range - - ParserBlock.prototype.tokenize = function(state, startLine, endLine) { - var ok, i, prevLine, rules = this.ruler.getRules(""), len = rules.length, line = startLine, hasEmptyLines = false, maxNesting = state.md.options.maxNesting; - while (line < endLine) { - state.line = line = state.skipEmptyLines(line); - if (line >= endLine) { - break; - } - // Termination condition for nested calls. - // Nested calls currently used for blockquotes & lists - if (state.sCount[line] < state.blkIndent) { - break; - } - // If nesting level exceeded - skip tail to the end. That's not ordinary - // situation and we should not care about content. - if (state.level >= maxNesting) { - state.line = endLine; - break; - } - // Try all possible rules. - // On success, rule should: - - // - update `state.line` - // - update `state.tokens` - // - return true - prevLine = state.line; - for (i = 0; i < len; i++) { - ok = rules[i](state, line, endLine, false); - if (ok) { - if (prevLine >= state.line) { - throw new Error("block rule didn't increment state.line"); - } - break; - } - } - // this can only happen if user disables paragraph rule - if (!ok) throw new Error("none of the block rules matched"); - // set state.tight if we had an empty line before current tag - // i.e. latest empty line should not count - state.tight = !hasEmptyLines; - // paragraph might "eat" one newline after it in nested lists - if (state.isEmpty(state.line - 1)) { - hasEmptyLines = true; - } - line = state.line; - if (line < endLine && state.isEmpty(line)) { - hasEmptyLines = true; - line++; - state.line = line; - } - } - }; - /** - * ParserBlock.parse(str, md, env, outTokens) - * - * Process input string and push block tokens into `outTokens` - **/ ParserBlock.prototype.parse = function(src, md, env, outTokens) { - var state; - if (!src) { - return; - } - state = new this.State(src, md, env, outTokens); - this.tokenize(state, state.line, state.lineMax); - }; - ParserBlock.prototype.State = state_block; - var parser_block = ParserBlock; - // Skip text characters for text token, place those to pending buffer - // Rule to skip pure text - // '{}$%@~+=:' reserved for extentions - // !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~ - // !!!! Don't confuse with "Markdown ASCII Punctuation" chars - // http://spec.commonmark.org/0.15/#ascii-punctuation-character - function isTerminatorChar(ch) { - switch (ch) { - case 10 /* \n */ : - case 33 /* ! */ : - case 35 /* # */ : - case 36 /* $ */ : - case 37 /* % */ : - case 38 /* & */ : - case 42 /* * */ : - case 43 /* + */ : - case 45 /* - */ : - case 58 /* : */ : - case 60 /* < */ : - case 61 /* = */ : - case 62 /* > */ : - case 64 /* @ */ : - case 91 /* [ */ : - case 92 /* \ */ : - case 93 /* ] */ : - case 94 /* ^ */ : - case 95 /* _ */ : - case 96 /* ` */ : - case 123 /* { */ : - case 125 /* } */ : - case 126 /* ~ */ : - return true; - - default: - return false; - } - } - var text = function text(state, silent) { - var pos = state.pos; - while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) { - pos++; - } - if (pos === state.pos) { - return false; - } - if (!silent) { - state.pending += state.src.slice(state.pos, pos); - } - state.pos = pos; - return true; - }; - // Process links like https://example.org/ - // RFC3986: scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) - var SCHEME_RE = /(?:^|[^a-z0-9.+-])([a-z][a-z0-9.+-]*)$/i; - var linkify = function linkify(state, silent) { - var pos, max, match, proto, link, url, fullUrl, token; - if (!state.md.options.linkify) return false; - if (state.linkLevel > 0) return false; - pos = state.pos; - max = state.posMax; - if (pos + 3 > max) return false; - if (state.src.charCodeAt(pos) !== 58 /* : */) return false; - if (state.src.charCodeAt(pos + 1) !== 47 /* / */) return false; - if (state.src.charCodeAt(pos + 2) !== 47 /* / */) return false; - match = state.pending.match(SCHEME_RE); - if (!match) return false; - proto = match[1]; - link = state.md.linkify.matchAtStart(state.src.slice(pos - proto.length)); - if (!link) return false; - url = link.url; - // invalid link, but still detected by linkify somehow; - // need to check to prevent infinite loop below - if (url.length <= proto.length) return false; - // disallow '*' at the end of the link (conflicts with emphasis) - url = url.replace(/\*+$/, ""); - fullUrl = state.md.normalizeLink(url); - if (!state.md.validateLink(fullUrl)) return false; - if (!silent) { - state.pending = state.pending.slice(0, -proto.length); - token = state.push("link_open", "a", 1); - token.attrs = [ [ "href", fullUrl ] ]; - token.markup = "linkify"; - token.info = "auto"; - token = state.push("text", "", 0); - token.content = state.md.normalizeLinkText(url); - token = state.push("link_close", "a", -1); - token.markup = "linkify"; - token.info = "auto"; - } - state.pos += url.length - proto.length; - return true; - }; - var isSpace$3 = utils.isSpace; - var newline = function newline(state, silent) { - var pmax, max, ws, pos = state.pos; - if (state.src.charCodeAt(pos) !== 10 /* \n */) { - return false; - } - pmax = state.pending.length - 1; - max = state.posMax; - // ' \n' -> hardbreak - // Lookup in pending chars is bad practice! Don't copy to other rules! - // Pending string is stored in concat mode, indexed lookups will cause - // convertion to flat mode. - if (!silent) { - if (pmax >= 0 && state.pending.charCodeAt(pmax) === 32) { - if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 32) { - // Find whitespaces tail of pending chars. - ws = pmax - 1; - while (ws >= 1 && state.pending.charCodeAt(ws - 1) === 32) ws--; - state.pending = state.pending.slice(0, ws); - state.push("hardbreak", "br", 0); - } else { - state.pending = state.pending.slice(0, -1); - state.push("softbreak", "br", 0); - } - } else { - state.push("softbreak", "br", 0); - } - } - pos++; - // skip heading spaces for next line - while (pos < max && isSpace$3(state.src.charCodeAt(pos))) { - pos++; - } - state.pos = pos; - return true; - }; - var isSpace$2 = utils.isSpace; - var ESCAPED = []; - for (var i = 0; i < 256; i++) { - ESCAPED.push(0); - } - "\\!\"#$%&'()*+,./:;<=>?@[]^_`{|}~-".split("").forEach((function(ch) { - ESCAPED[ch.charCodeAt(0)] = 1; - })); - var _escape = function escape(state, silent) { - var ch1, ch2, origStr, escapedStr, token, pos = state.pos, max = state.posMax; - if (state.src.charCodeAt(pos) !== 92 /* \ */) return false; - pos++; - // '\' at the end of the inline block - if (pos >= max) return false; - ch1 = state.src.charCodeAt(pos); - if (ch1 === 10) { - if (!silent) { - state.push("hardbreak", "br", 0); - } - pos++; - // skip leading whitespaces from next line - while (pos < max) { - ch1 = state.src.charCodeAt(pos); - if (!isSpace$2(ch1)) break; - pos++; - } - state.pos = pos; - return true; - } - escapedStr = state.src[pos]; - if (ch1 >= 55296 && ch1 <= 56319 && pos + 1 < max) { - ch2 = state.src.charCodeAt(pos + 1); - if (ch2 >= 56320 && ch2 <= 57343) { - escapedStr += state.src[pos + 1]; - pos++; - } - } - origStr = "\\" + escapedStr; - if (!silent) { - token = state.push("text_special", "", 0); - if (ch1 < 256 && ESCAPED[ch1] !== 0) { - token.content = escapedStr; - } else { - token.content = origStr; - } - token.markup = origStr; - token.info = "escape"; - } - state.pos = pos + 1; - return true; - }; - // Parse backticks - var backticks = function backtick(state, silent) { - var start, max, marker, token, matchStart, matchEnd, openerLength, closerLength, pos = state.pos, ch = state.src.charCodeAt(pos); - if (ch !== 96 /* ` */) { - return false; - } - start = pos; - pos++; - max = state.posMax; - // scan marker length - while (pos < max && state.src.charCodeAt(pos) === 96 /* ` */) { - pos++; - } - marker = state.src.slice(start, pos); - openerLength = marker.length; - if (state.backticksScanned && (state.backticks[openerLength] || 0) <= start) { - if (!silent) state.pending += marker; - state.pos += openerLength; - return true; - } - matchEnd = pos; - // Nothing found in the cache, scan until the end of the line (or until marker is found) - while ((matchStart = state.src.indexOf("`", matchEnd)) !== -1) { - matchEnd = matchStart + 1; - // scan marker length - while (matchEnd < max && state.src.charCodeAt(matchEnd) === 96 /* ` */) { - matchEnd++; - } - closerLength = matchEnd - matchStart; - if (closerLength === openerLength) { - // Found matching closer length. - if (!silent) { - token = state.push("code_inline", "code", 0); - token.markup = marker; - token.content = state.src.slice(pos, matchStart).replace(/\n/g, " ").replace(/^ (.+) $/, "$1"); - } - state.pos = matchEnd; - return true; - } - // Some different length found, put it in cache as upper limit of where closer can be found - state.backticks[closerLength] = matchStart; - } - // Scanned through the end, didn't find anything - state.backticksScanned = true; - if (!silent) state.pending += marker; - state.pos += openerLength; - return true; - }; - // ~~strike through~~ - // Insert each marker as a separate text token, and add it to delimiter list - - var tokenize$1 = function strikethrough(state, silent) { - var i, scanned, token, len, ch, start = state.pos, marker = state.src.charCodeAt(start); - if (silent) { - return false; - } - if (marker !== 126 /* ~ */) { - return false; - } - scanned = state.scanDelims(state.pos, true); - len = scanned.length; - ch = String.fromCharCode(marker); - if (len < 2) { - return false; - } - if (len % 2) { - token = state.push("text", "", 0); - token.content = ch; - len--; - } - for (i = 0; i < len; i += 2) { - token = state.push("text", "", 0); - token.content = ch + ch; - state.delimiters.push({ - marker: marker, - length: 0, - // disable "rule of 3" length checks meant for emphasis - token: state.tokens.length - 1, - end: -1, - open: scanned.can_open, - close: scanned.can_close - }); - } - state.pos += scanned.length; - return true; - }; - function postProcess$1(state, delimiters) { - var i, j, startDelim, endDelim, token, loneMarkers = [], max = delimiters.length; - for (i = 0; i < max; i++) { - startDelim = delimiters[i]; - if (startDelim.marker !== 126 /* ~ */) { - continue; - } - if (startDelim.end === -1) { - continue; - } - endDelim = delimiters[startDelim.end]; - token = state.tokens[startDelim.token]; - token.type = "s_open"; - token.tag = "s"; - token.nesting = 1; - token.markup = "~~"; - token.content = ""; - token = state.tokens[endDelim.token]; - token.type = "s_close"; - token.tag = "s"; - token.nesting = -1; - token.markup = "~~"; - token.content = ""; - if (state.tokens[endDelim.token - 1].type === "text" && state.tokens[endDelim.token - 1].content === "~") { - loneMarkers.push(endDelim.token - 1); - } - } - // If a marker sequence has an odd number of characters, it's splitted - // like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the - // start of the sequence. - - // So, we have to move all those markers after subsequent s_close tags. - - while (loneMarkers.length) { - i = loneMarkers.pop(); - j = i + 1; - while (j < state.tokens.length && state.tokens[j].type === "s_close") { - j++; - } - j--; - if (i !== j) { - token = state.tokens[j]; - state.tokens[j] = state.tokens[i]; - state.tokens[i] = token; - } - } - } - // Walk through delimiter list and replace text tokens with tags - - var postProcess_1$1 = function strikethrough(state) { - var curr, tokens_meta = state.tokens_meta, max = state.tokens_meta.length; - postProcess$1(state, state.delimiters); - for (curr = 0; curr < max; curr++) { - if (tokens_meta[curr] && tokens_meta[curr].delimiters) { - postProcess$1(state, tokens_meta[curr].delimiters); - } - } - }; - var strikethrough = { - tokenize: tokenize$1, - postProcess: postProcess_1$1 - }; - // Process *this* and _that_ - // Insert each marker as a separate text token, and add it to delimiter list - - var tokenize = function emphasis(state, silent) { - var i, scanned, token, start = state.pos, marker = state.src.charCodeAt(start); - if (silent) { - return false; - } - if (marker !== 95 /* _ */ && marker !== 42 /* * */) { - return false; - } - scanned = state.scanDelims(state.pos, marker === 42); - for (i = 0; i < scanned.length; i++) { - token = state.push("text", "", 0); - token.content = String.fromCharCode(marker); - state.delimiters.push({ - // Char code of the starting marker (number). - marker: marker, - // Total length of these series of delimiters. - length: scanned.length, - // A position of the token this delimiter corresponds to. - token: state.tokens.length - 1, - // If this delimiter is matched as a valid opener, `end` will be - // equal to its position, otherwise it's `-1`. - end: -1, - // Boolean flags that determine if this delimiter could open or close - // an emphasis. - open: scanned.can_open, - close: scanned.can_close - }); - } - state.pos += scanned.length; - return true; - }; - function postProcess(state, delimiters) { - var i, startDelim, endDelim, token, ch, isStrong, max = delimiters.length; - for (i = max - 1; i >= 0; i--) { - startDelim = delimiters[i]; - if (startDelim.marker !== 95 /* _ */ && startDelim.marker !== 42 /* * */) { - continue; - } - // Process only opening markers - if (startDelim.end === -1) { - continue; - } - endDelim = delimiters[startDelim.end]; - // If the previous delimiter has the same marker and is adjacent to this one, - // merge those into one strong delimiter. - - // `whatever` -> `whatever` - - isStrong = i > 0 && delimiters[i - 1].end === startDelim.end + 1 && - // check that first two markers match and adjacent - delimiters[i - 1].marker === startDelim.marker && delimiters[i - 1].token === startDelim.token - 1 && - // check that last two markers are adjacent (we can safely assume they match) - delimiters[startDelim.end + 1].token === endDelim.token + 1; - ch = String.fromCharCode(startDelim.marker); - token = state.tokens[startDelim.token]; - token.type = isStrong ? "strong_open" : "em_open"; - token.tag = isStrong ? "strong" : "em"; - token.nesting = 1; - token.markup = isStrong ? ch + ch : ch; - token.content = ""; - token = state.tokens[endDelim.token]; - token.type = isStrong ? "strong_close" : "em_close"; - token.tag = isStrong ? "strong" : "em"; - token.nesting = -1; - token.markup = isStrong ? ch + ch : ch; - token.content = ""; - if (isStrong) { - state.tokens[delimiters[i - 1].token].content = ""; - state.tokens[delimiters[startDelim.end + 1].token].content = ""; - i--; - } - } - } - // Walk through delimiter list and replace text tokens with tags - - var postProcess_1 = function emphasis(state) { - var curr, tokens_meta = state.tokens_meta, max = state.tokens_meta.length; - postProcess(state, state.delimiters); - for (curr = 0; curr < max; curr++) { - if (tokens_meta[curr] && tokens_meta[curr].delimiters) { - postProcess(state, tokens_meta[curr].delimiters); - } - } - }; - var emphasis = { - tokenize: tokenize, - postProcess: postProcess_1 - }; - var normalizeReference$1 = utils.normalizeReference; - var isSpace$1 = utils.isSpace; - var link = function link(state, silent) { - var attrs, code, label, labelEnd, labelStart, pos, res, ref, token, href = "", title = "", oldPos = state.pos, max = state.posMax, start = state.pos, parseReference = true; - if (state.src.charCodeAt(state.pos) !== 91 /* [ */) { - return false; - } - labelStart = state.pos + 1; - labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true); - // parser failed to find ']', so it's not a valid link - if (labelEnd < 0) { - return false; - } - pos = labelEnd + 1; - if (pos < max && state.src.charCodeAt(pos) === 40 /* ( */) { - // Inline link - // might have found a valid shortcut link, disable reference parsing - parseReference = false; - // [link]( "title" ) - // ^^ skipping these spaces - pos++; - for (;pos < max; pos++) { - code = state.src.charCodeAt(pos); - if (!isSpace$1(code) && code !== 10) { - break; - } - } - if (pos >= max) { - return false; - } - // [link]( "title" ) - // ^^^^^^ parsing link destination - start = pos; - res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax); - if (res.ok) { - href = state.md.normalizeLink(res.str); - if (state.md.validateLink(href)) { - pos = res.pos; - } else { - href = ""; - } - // [link]( "title" ) - // ^^ skipping these spaces - start = pos; - for (;pos < max; pos++) { - code = state.src.charCodeAt(pos); - if (!isSpace$1(code) && code !== 10) { - break; - } - } - // [link]( "title" ) - // ^^^^^^^ parsing link title - res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax); - if (pos < max && start !== pos && res.ok) { - title = res.str; - pos = res.pos; - // [link]( "title" ) - // ^^ skipping these spaces - for (;pos < max; pos++) { - code = state.src.charCodeAt(pos); - if (!isSpace$1(code) && code !== 10) { - break; - } - } - } - } - if (pos >= max || state.src.charCodeAt(pos) !== 41 /* ) */) { - // parsing a valid shortcut link failed, fallback to reference - parseReference = true; - } - pos++; - } - if (parseReference) { - // Link reference - if (typeof state.env.references === "undefined") { - return false; - } - if (pos < max && state.src.charCodeAt(pos) === 91 /* [ */) { - start = pos + 1; - pos = state.md.helpers.parseLinkLabel(state, pos); - if (pos >= 0) { - label = state.src.slice(start, pos++); - } else { - pos = labelEnd + 1; - } - } else { - pos = labelEnd + 1; - } - // covers label === '' and label === undefined - // (collapsed reference link and shortcut reference link respectively) - if (!label) { - label = state.src.slice(labelStart, labelEnd); - } - ref = state.env.references[normalizeReference$1(label)]; - if (!ref) { - state.pos = oldPos; - return false; - } - href = ref.href; - title = ref.title; - } - - // We found the end of the link, and know for a fact it's a valid link; - // so all that's left to do is to call tokenizer. - - if (!silent) { - state.pos = labelStart; - state.posMax = labelEnd; - token = state.push("link_open", "a", 1); - token.attrs = attrs = [ [ "href", href ] ]; - if (title) { - attrs.push([ "title", title ]); - } - state.linkLevel++; - state.md.inline.tokenize(state); - state.linkLevel--; - token = state.push("link_close", "a", -1); - } - state.pos = pos; - state.posMax = max; - return true; - }; - var normalizeReference = utils.normalizeReference; - var isSpace = utils.isSpace; - var image = function image(state, silent) { - var attrs, code, content, label, labelEnd, labelStart, pos, ref, res, title, token, tokens, start, href = "", oldPos = state.pos, max = state.posMax; - if (state.src.charCodeAt(state.pos) !== 33 /* ! */) { - return false; - } - if (state.src.charCodeAt(state.pos + 1) !== 91 /* [ */) { - return false; - } - labelStart = state.pos + 2; - labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, false); - // parser failed to find ']', so it's not a valid link - if (labelEnd < 0) { - return false; - } - pos = labelEnd + 1; - if (pos < max && state.src.charCodeAt(pos) === 40 /* ( */) { - // Inline link - // [link]( "title" ) - // ^^ skipping these spaces - pos++; - for (;pos < max; pos++) { - code = state.src.charCodeAt(pos); - if (!isSpace(code) && code !== 10) { - break; - } - } - if (pos >= max) { - return false; - } - // [link]( "title" ) - // ^^^^^^ parsing link destination - start = pos; - res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax); - if (res.ok) { - href = state.md.normalizeLink(res.str); - if (state.md.validateLink(href)) { - pos = res.pos; - } else { - href = ""; - } - } - // [link]( "title" ) - // ^^ skipping these spaces - start = pos; - for (;pos < max; pos++) { - code = state.src.charCodeAt(pos); - if (!isSpace(code) && code !== 10) { - break; - } - } - // [link]( "title" ) - // ^^^^^^^ parsing link title - res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax); - if (pos < max && start !== pos && res.ok) { - title = res.str; - pos = res.pos; - // [link]( "title" ) - // ^^ skipping these spaces - for (;pos < max; pos++) { - code = state.src.charCodeAt(pos); - if (!isSpace(code) && code !== 10) { - break; - } - } - } else { - title = ""; - } - if (pos >= max || state.src.charCodeAt(pos) !== 41 /* ) */) { - state.pos = oldPos; - return false; - } - pos++; - } else { - // Link reference - if (typeof state.env.references === "undefined") { - return false; - } - if (pos < max && state.src.charCodeAt(pos) === 91 /* [ */) { - start = pos + 1; - pos = state.md.helpers.parseLinkLabel(state, pos); - if (pos >= 0) { - label = state.src.slice(start, pos++); - } else { - pos = labelEnd + 1; - } - } else { - pos = labelEnd + 1; - } - // covers label === '' and label === undefined - // (collapsed reference link and shortcut reference link respectively) - if (!label) { - label = state.src.slice(labelStart, labelEnd); - } - ref = state.env.references[normalizeReference(label)]; - if (!ref) { - state.pos = oldPos; - return false; - } - href = ref.href; - title = ref.title; - } - - // We found the end of the link, and know for a fact it's a valid link; - // so all that's left to do is to call tokenizer. - - if (!silent) { - content = state.src.slice(labelStart, labelEnd); - state.md.inline.parse(content, state.md, state.env, tokens = []); - token = state.push("image", "img", 0); - token.attrs = attrs = [ [ "src", href ], [ "alt", "" ] ]; - token.children = tokens; - token.content = content; - if (title) { - attrs.push([ "title", title ]); - } - } - state.pos = pos; - state.posMax = max; - return true; - }; - // Process autolinks '' - /*eslint max-len:0*/ var EMAIL_RE = /^([a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)$/; - var AUTOLINK_RE = /^([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)$/; - var autolink = function autolink(state, silent) { - var url, fullUrl, token, ch, start, max, pos = state.pos; - if (state.src.charCodeAt(pos) !== 60 /* < */) { - return false; - } - start = state.pos; - max = state.posMax; - for (;;) { - if (++pos >= max) return false; - ch = state.src.charCodeAt(pos); - if (ch === 60 /* < */) return false; - if (ch === 62 /* > */) break; - } - url = state.src.slice(start + 1, pos); - if (AUTOLINK_RE.test(url)) { - fullUrl = state.md.normalizeLink(url); - if (!state.md.validateLink(fullUrl)) { - return false; - } - if (!silent) { - token = state.push("link_open", "a", 1); - token.attrs = [ [ "href", fullUrl ] ]; - token.markup = "autolink"; - token.info = "auto"; - token = state.push("text", "", 0); - token.content = state.md.normalizeLinkText(url); - token = state.push("link_close", "a", -1); - token.markup = "autolink"; - token.info = "auto"; - } - state.pos += url.length + 2; - return true; - } - if (EMAIL_RE.test(url)) { - fullUrl = state.md.normalizeLink("mailto:" + url); - if (!state.md.validateLink(fullUrl)) { - return false; - } - if (!silent) { - token = state.push("link_open", "a", 1); - token.attrs = [ [ "href", fullUrl ] ]; - token.markup = "autolink"; - token.info = "auto"; - token = state.push("text", "", 0); - token.content = state.md.normalizeLinkText(url); - token = state.push("link_close", "a", -1); - token.markup = "autolink"; - token.info = "auto"; - } - state.pos += url.length + 2; - return true; - } - return false; - }; - var HTML_TAG_RE = html_re.HTML_TAG_RE; - function isLinkOpen(str) { - return /^\s]/i.test(str); - } - function isLinkClose(str) { - return /^<\/a\s*>/i.test(str); - } - function isLetter(ch) { - /*eslint no-bitwise:0*/ - var lc = ch | 32; - // to lower case - return lc >= 97 /* a */ && lc <= 122 /* z */; - } - var html_inline = function html_inline(state, silent) { - var ch, match, max, token, pos = state.pos; - if (!state.md.options.html) { - return false; - } - // Check start - max = state.posMax; - if (state.src.charCodeAt(pos) !== 60 /* < */ || pos + 2 >= max) { - return false; - } - // Quick fail on second char - ch = state.src.charCodeAt(pos + 1); - if (ch !== 33 /* ! */ && ch !== 63 /* ? */ && ch !== 47 /* / */ && !isLetter(ch)) { - return false; - } - match = state.src.slice(pos).match(HTML_TAG_RE); - if (!match) { - return false; - } - if (!silent) { - token = state.push("html_inline", "", 0); - token.content = match[0]; - if (isLinkOpen(token.content)) state.linkLevel++; - if (isLinkClose(token.content)) state.linkLevel--; - } - state.pos += match[0].length; - return true; - }; - var has = utils.has; - var isValidEntityCode = utils.isValidEntityCode; - var fromCodePoint = utils.fromCodePoint; - var DIGITAL_RE = /^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i; - var NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i; - var entity = function entity(state, silent) { - var ch, code, match, token, pos = state.pos, max = state.posMax; - if (state.src.charCodeAt(pos) !== 38 /* & */) return false; - if (pos + 1 >= max) return false; - ch = state.src.charCodeAt(pos + 1); - if (ch === 35 /* # */) { - match = state.src.slice(pos).match(DIGITAL_RE); - if (match) { - if (!silent) { - code = match[1][0].toLowerCase() === "x" ? parseInt(match[1].slice(1), 16) : parseInt(match[1], 10); - token = state.push("text_special", "", 0); - token.content = isValidEntityCode(code) ? fromCodePoint(code) : fromCodePoint(65533); - token.markup = match[0]; - token.info = "entity"; - } - state.pos += match[0].length; - return true; - } - } else { - match = state.src.slice(pos).match(NAMED_RE); - if (match) { - if (has(entities, match[1])) { - if (!silent) { - token = state.push("text_special", "", 0); - token.content = entities[match[1]]; - token.markup = match[0]; - token.info = "entity"; - } - state.pos += match[0].length; - return true; - } - } - } - return false; - }; - // For each opening emphasis-like marker find a matching closing one - function processDelimiters(delimiters) { - var closerIdx, openerIdx, closer, opener, minOpenerIdx, newMinOpenerIdx, isOddMatch, lastJump, openersBottom = {}, max = delimiters.length; - if (!max) return; - // headerIdx is the first delimiter of the current (where closer is) delimiter run - var headerIdx = 0; - var lastTokenIdx = -2; - // needs any value lower than -1 - var jumps = []; - for (closerIdx = 0; closerIdx < max; closerIdx++) { - closer = delimiters[closerIdx]; - jumps.push(0); - // markers belong to same delimiter run if: - // - they have adjacent tokens - // - AND markers are the same - - if (delimiters[headerIdx].marker !== closer.marker || lastTokenIdx !== closer.token - 1) { - headerIdx = closerIdx; - } - lastTokenIdx = closer.token; - // Length is only used for emphasis-specific "rule of 3", - // if it's not defined (in strikethrough or 3rd party plugins), - // we can default it to 0 to disable those checks. - - closer.length = closer.length || 0; - if (!closer.close) continue; - // Previously calculated lower bounds (previous fails) - // for each marker, each delimiter length modulo 3, - // and for whether this closer can be an opener; - // https://github.com/commonmark/cmark/commit/34250e12ccebdc6372b8b49c44fab57c72443460 - if (!openersBottom.hasOwnProperty(closer.marker)) { - openersBottom[closer.marker] = [ -1, -1, -1, -1, -1, -1 ]; - } - minOpenerIdx = openersBottom[closer.marker][(closer.open ? 3 : 0) + closer.length % 3]; - openerIdx = headerIdx - jumps[headerIdx] - 1; - newMinOpenerIdx = openerIdx; - for (;openerIdx > minOpenerIdx; openerIdx -= jumps[openerIdx] + 1) { - opener = delimiters[openerIdx]; - if (opener.marker !== closer.marker) continue; - if (opener.open && opener.end < 0) { - isOddMatch = false; - // from spec: - - // If one of the delimiters can both open and close emphasis, then the - // sum of the lengths of the delimiter runs containing the opening and - // closing delimiters must not be a multiple of 3 unless both lengths - // are multiples of 3. - - if (opener.close || closer.open) { - if ((opener.length + closer.length) % 3 === 0) { - if (opener.length % 3 !== 0 || closer.length % 3 !== 0) { - isOddMatch = true; - } - } - } - if (!isOddMatch) { - // If previous delimiter cannot be an opener, we can safely skip - // the entire sequence in future checks. This is required to make - // sure algorithm has linear complexity (see *_*_*_*_*_... case). - lastJump = openerIdx > 0 && !delimiters[openerIdx - 1].open ? jumps[openerIdx - 1] + 1 : 0; - jumps[closerIdx] = closerIdx - openerIdx + lastJump; - jumps[openerIdx] = lastJump; - closer.open = false; - opener.end = closerIdx; - opener.close = false; - newMinOpenerIdx = -1; - // treat next token as start of run, - // it optimizes skips in **<...>**a**<...>** pathological case - lastTokenIdx = -2; - break; - } - } - } - if (newMinOpenerIdx !== -1) { - // If match for this delimiter run failed, we want to set lower bound for - // future lookups. This is required to make sure algorithm has linear - // complexity. - // See details here: - // https://github.com/commonmark/cmark/issues/178#issuecomment-270417442 - openersBottom[closer.marker][(closer.open ? 3 : 0) + (closer.length || 0) % 3] = newMinOpenerIdx; - } - } - } - var balance_pairs = function link_pairs(state) { - var curr, tokens_meta = state.tokens_meta, max = state.tokens_meta.length; - processDelimiters(state.delimiters); - for (curr = 0; curr < max; curr++) { - if (tokens_meta[curr] && tokens_meta[curr].delimiters) { - processDelimiters(tokens_meta[curr].delimiters); - } - } - }; - // Clean up tokens after emphasis and strikethrough postprocessing: - var fragments_join = function fragments_join(state) { - var curr, last, level = 0, tokens = state.tokens, max = state.tokens.length; - for (curr = last = 0; curr < max; curr++) { - // re-calculate levels after emphasis/strikethrough turns some text nodes - // into opening/closing tags - if (tokens[curr].nesting < 0) level--; - // closing tag - tokens[curr].level = level; - if (tokens[curr].nesting > 0) level++; - // opening tag - if (tokens[curr].type === "text" && curr + 1 < max && tokens[curr + 1].type === "text") { - // collapse two adjacent text nodes - tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content; - } else { - if (curr !== last) { - tokens[last] = tokens[curr]; - } - last++; - } - } - if (curr !== last) { - tokens.length = last; - } - }; - var isWhiteSpace = utils.isWhiteSpace; - var isPunctChar = utils.isPunctChar; - var isMdAsciiPunct = utils.isMdAsciiPunct; - function StateInline(src, md, env, outTokens) { - this.src = src; - this.env = env; - this.md = md; - this.tokens = outTokens; - this.tokens_meta = Array(outTokens.length); - this.pos = 0; - this.posMax = this.src.length; - this.level = 0; - this.pending = ""; - this.pendingLevel = 0; - // Stores { start: end } pairs. Useful for backtrack - // optimization of pairs parse (emphasis, strikes). - this.cache = {}; - // List of emphasis-like delimiters for current tag - this.delimiters = []; - // Stack of delimiter lists for upper level tags - this._prev_delimiters = []; - // backtick length => last seen position - this.backticks = {}; - this.backticksScanned = false; - // Counter used to disable inline linkify-it execution - // inside and markdown links - this.linkLevel = 0; - } - // Flush pending text - - StateInline.prototype.pushPending = function() { - var token$1 = new token("text", "", 0); - token$1.content = this.pending; - token$1.level = this.pendingLevel; - this.tokens.push(token$1); - this.pending = ""; - return token$1; - }; - // Push new token to "stream". - // If pending text exists - flush it as text token - - StateInline.prototype.push = function(type, tag, nesting) { - if (this.pending) { - this.pushPending(); - } - var token$1 = new token(type, tag, nesting); - var token_meta = null; - if (nesting < 0) { - // closing tag - this.level--; - this.delimiters = this._prev_delimiters.pop(); - } - token$1.level = this.level; - if (nesting > 0) { - // opening tag - this.level++; - this._prev_delimiters.push(this.delimiters); - this.delimiters = []; - token_meta = { - delimiters: this.delimiters - }; - } - this.pendingLevel = this.level; - this.tokens.push(token$1); - this.tokens_meta.push(token_meta); - return token$1; - }; - // Scan a sequence of emphasis-like markers, and determine whether - // it can start an emphasis sequence or end an emphasis sequence. - - // - start - position to scan from (it should point at a valid marker); - // - canSplitWord - determine if these markers can be found inside a word - - StateInline.prototype.scanDelims = function(start, canSplitWord) { - var pos = start, lastChar, nextChar, count, can_open, can_close, isLastWhiteSpace, isLastPunctChar, isNextWhiteSpace, isNextPunctChar, left_flanking = true, right_flanking = true, max = this.posMax, marker = this.src.charCodeAt(start); - // treat beginning of the line as a whitespace - lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 32; - while (pos < max && this.src.charCodeAt(pos) === marker) { - pos++; - } - count = pos - start; - // treat end of the line as a whitespace - nextChar = pos < max ? this.src.charCodeAt(pos) : 32; - isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar)); - isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar)); - isLastWhiteSpace = isWhiteSpace(lastChar); - isNextWhiteSpace = isWhiteSpace(nextChar); - if (isNextWhiteSpace) { - left_flanking = false; - } else if (isNextPunctChar) { - if (!(isLastWhiteSpace || isLastPunctChar)) { - left_flanking = false; - } - } - if (isLastWhiteSpace) { - right_flanking = false; - } else if (isLastPunctChar) { - if (!(isNextWhiteSpace || isNextPunctChar)) { - right_flanking = false; - } - } - if (!canSplitWord) { - can_open = left_flanking && (!right_flanking || isLastPunctChar); - can_close = right_flanking && (!left_flanking || isNextPunctChar); - } else { - can_open = left_flanking; - can_close = right_flanking; - } - return { - can_open: can_open, - can_close: can_close, - length: count - }; - }; - // re-export Token class to use in block rules - StateInline.prototype.Token = token; - var state_inline = StateInline; - //////////////////////////////////////////////////////////////////////////////// - // Parser rules - var _rules = [ [ "text", text ], [ "linkify", linkify ], [ "newline", newline ], [ "escape", _escape ], [ "backticks", backticks ], [ "strikethrough", strikethrough.tokenize ], [ "emphasis", emphasis.tokenize ], [ "link", link ], [ "image", image ], [ "autolink", autolink ], [ "html_inline", html_inline ], [ "entity", entity ] ]; - // `rule2` ruleset was created specifically for emphasis/strikethrough - // post-processing and may be changed in the future. - - // Don't use this for anything except pairs (plugins working with `balance_pairs`). - - var _rules2 = [ [ "balance_pairs", balance_pairs ], [ "strikethrough", strikethrough.postProcess ], [ "emphasis", emphasis.postProcess ], - // rules for pairs separate '**' into its own text tokens, which may be left unused, - // rule below merges unused segments back with the rest of the text - [ "fragments_join", fragments_join ] ]; - /** - * new ParserInline() - **/ function ParserInline() { - var i; - /** - * ParserInline#ruler -> Ruler - * - * [[Ruler]] instance. Keep configuration of inline rules. - **/ this.ruler = new ruler; - for (i = 0; i < _rules.length; i++) { - this.ruler.push(_rules[i][0], _rules[i][1]); - } - /** - * ParserInline#ruler2 -> Ruler - * - * [[Ruler]] instance. Second ruler used for post-processing - * (e.g. in emphasis-like rules). - **/ this.ruler2 = new ruler; - for (i = 0; i < _rules2.length; i++) { - this.ruler2.push(_rules2[i][0], _rules2[i][1]); - } - } - // Skip single token by running all rules in validation mode; - // returns `true` if any rule reported success - - ParserInline.prototype.skipToken = function(state) { - var ok, i, pos = state.pos, rules = this.ruler.getRules(""), len = rules.length, maxNesting = state.md.options.maxNesting, cache = state.cache; - if (typeof cache[pos] !== "undefined") { - state.pos = cache[pos]; - return; - } - if (state.level < maxNesting) { - for (i = 0; i < len; i++) { - // Increment state.level and decrement it later to limit recursion. - // It's harmless to do here, because no tokens are created. But ideally, - // we'd need a separate private state variable for this purpose. - state.level++; - ok = rules[i](state, true); - state.level--; - if (ok) { - if (pos >= state.pos) { - throw new Error("inline rule didn't increment state.pos"); - } - break; - } - } - } else { - // Too much nesting, just skip until the end of the paragraph. - // NOTE: this will cause links to behave incorrectly in the following case, - // when an amount of `[` is exactly equal to `maxNesting + 1`: - // [[[[[[[[[[[[[[[[[[[[[foo]() - // TODO: remove this workaround when CM standard will allow nested links - // (we can replace it by preventing links from being parsed in - // validation mode) - state.pos = state.posMax; - } - if (!ok) { - state.pos++; - } - cache[pos] = state.pos; - }; - // Generate tokens for input range - - ParserInline.prototype.tokenize = function(state) { - var ok, i, prevPos, rules = this.ruler.getRules(""), len = rules.length, end = state.posMax, maxNesting = state.md.options.maxNesting; - while (state.pos < end) { - // Try all possible rules. - // On success, rule should: - // - update `state.pos` - // - update `state.tokens` - // - return true - prevPos = state.pos; - if (state.level < maxNesting) { - for (i = 0; i < len; i++) { - ok = rules[i](state, false); - if (ok) { - if (prevPos >= state.pos) { - throw new Error("inline rule didn't increment state.pos"); - } - break; - } - } - } - if (ok) { - if (state.pos >= end) { - break; - } - continue; - } - state.pending += state.src[state.pos++]; - } - if (state.pending) { - state.pushPending(); - } - }; - /** - * ParserInline.parse(str, md, env, outTokens) - * - * Process input string and push inline tokens into `outTokens` - **/ ParserInline.prototype.parse = function(str, md, env, outTokens) { - var i, rules, len; - var state = new this.State(str, md, env, outTokens); - this.tokenize(state); - rules = this.ruler2.getRules(""); - len = rules.length; - for (i = 0; i < len; i++) { - rules[i](state); - } - }; - ParserInline.prototype.State = state_inline; - var parser_inline = ParserInline; - var re = function(opts) { - var re = {}; - opts = opts || {}; - // Use direct extract instead of `regenerate` to reduse browserified size - re.src_Any = regex$3.source; - re.src_Cc = regex$2.source; - re.src_Z = regex.source; - re.src_P = regex$4.source; - // \p{\Z\P\Cc\CF} (white spaces + control + format + punctuation) - re.src_ZPCc = [ re.src_Z, re.src_P, re.src_Cc ].join("|"); - // \p{\Z\Cc} (white spaces + control) - re.src_ZCc = [ re.src_Z, re.src_Cc ].join("|"); - // Experimental. List of chars, completely prohibited in links - // because can separate it from other part of text - var text_separators = "[><\uff5c]"; - // All possible word characters (everything without punctuation, spaces & controls) - // Defined via punctuation & spaces to save space - // Should be something like \p{\L\N\S\M} (\w but without `_`) - re.src_pseudo_letter = "(?:(?!" + text_separators + "|" + re.src_ZPCc + ")" + re.src_Any + ")"; - // The same as abothe but without [0-9] - // var src_pseudo_letter_non_d = '(?:(?![0-9]|' + src_ZPCc + ')' + src_Any + ')'; - //////////////////////////////////////////////////////////////////////////////// - re.src_ip4 = "(?:(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"; - // Prohibit any of "@/[]()" in user/pass to avoid wrong domain fetch. - re.src_auth = "(?:(?:(?!" + re.src_ZCc + "|[@/\\[\\]()]).)+@)?"; - re.src_port = "(?::(?:6(?:[0-4]\\d{3}|5(?:[0-4]\\d{2}|5(?:[0-2]\\d|3[0-5])))|[1-5]?\\d{1,4}))?"; - re.src_host_terminator = "(?=$|" + text_separators + "|" + re.src_ZPCc + ")" + "(?!" + (opts["---"] ? "-(?!--)|" : "-|") + "_|:\\d|\\.-|\\.(?!$|" + re.src_ZPCc + "))"; - re.src_path = "(?:" + "[/?#]" + "(?:" + "(?!" + re.src_ZCc + "|" + text_separators + "|[()[\\]{}.,\"'?!\\-;]).|" + "\\[(?:(?!" + re.src_ZCc + "|\\]).)*\\]|" + "\\((?:(?!" + re.src_ZCc + "|[)]).)*\\)|" + "\\{(?:(?!" + re.src_ZCc + "|[}]).)*\\}|" + '\\"(?:(?!' + re.src_ZCc + '|["]).)+\\"|' + "\\'(?:(?!" + re.src_ZCc + "|[']).)+\\'|" + "\\'(?=" + re.src_pseudo_letter + "|[-])|" + // allow `I'm_king` if no pair found - "\\.{2,}[a-zA-Z0-9%/&]|" + // google has many dots in "google search" links (#66, #81). - // github has ... in commit range links, - // Restrict to - // - english - // - percent-encoded - // - parts of file path - // - params separator - // until more examples found. - "\\.(?!" + re.src_ZCc + "|[.]|$)|" + (opts["---"] ? "\\-(?!--(?:[^-]|$))(?:-*)|" : "\\-+|") + ",(?!" + re.src_ZCc + "|$)|" + // allow `,,,` in paths - ";(?!" + re.src_ZCc + "|$)|" + // allow `;` if not followed by space-like char - "\\!+(?!" + re.src_ZCc + "|[!]|$)|" + // allow `!!!` in paths, but not at the end - "\\?(?!" + re.src_ZCc + "|[?]|$)" + ")+" + "|\\/" + ")?"; - // Allow anything in markdown spec, forbid quote (") at the first position - // because emails enclosed in quotes are far more common - re.src_email_name = '[\\-;:&=\\+\\$,\\.a-zA-Z0-9_][\\-;:&=\\+\\$,\\"\\.a-zA-Z0-9_]*'; - re.src_xn = "xn--[a-z0-9\\-]{1,59}"; - // More to read about domain names - // http://serverfault.com/questions/638260/ - re.src_domain_root = - // Allow letters & digits (http://test1) - "(?:" + re.src_xn + "|" + re.src_pseudo_letter + "{1,63}" + ")"; - re.src_domain = "(?:" + re.src_xn + "|" + "(?:" + re.src_pseudo_letter + ")" + "|" + "(?:" + re.src_pseudo_letter + "(?:-|" + re.src_pseudo_letter + "){0,61}" + re.src_pseudo_letter + ")" + ")"; - re.src_host = "(?:" + - // Don't need IP check, because digits are already allowed in normal domain names - // src_ip4 + - // '|' + - "(?:(?:(?:" + re.src_domain + ")\\.)*" + re.src_domain /*_root*/ + ")" + ")"; - re.tpl_host_fuzzy = "(?:" + re.src_ip4 + "|" + "(?:(?:(?:" + re.src_domain + ")\\.)+(?:%TLDS%))" + ")"; - re.tpl_host_no_ip_fuzzy = "(?:(?:(?:" + re.src_domain + ")\\.)+(?:%TLDS%))"; - re.src_host_strict = re.src_host + re.src_host_terminator; - re.tpl_host_fuzzy_strict = re.tpl_host_fuzzy + re.src_host_terminator; - re.src_host_port_strict = re.src_host + re.src_port + re.src_host_terminator; - re.tpl_host_port_fuzzy_strict = re.tpl_host_fuzzy + re.src_port + re.src_host_terminator; - re.tpl_host_port_no_ip_fuzzy_strict = re.tpl_host_no_ip_fuzzy + re.src_port + re.src_host_terminator; - //////////////////////////////////////////////////////////////////////////////// - // Main rules - // Rude test fuzzy links by host, for quick deny - re.tpl_host_fuzzy_test = "localhost|www\\.|\\.\\d{1,3}\\.|(?:\\.(?:%TLDS%)(?:" + re.src_ZPCc + "|>|$))"; - re.tpl_email_fuzzy = "(^|" + text_separators + '|"|\\(|' + re.src_ZCc + ")" + "(" + re.src_email_name + "@" + re.tpl_host_fuzzy_strict + ")"; - re.tpl_link_fuzzy = - // Fuzzy link can't be prepended with .:/\- and non punctuation. - // but can start with > (markdown blockquote) - "(^|(?![.:/\\-_@])(?:[$+<=>^`|\uff5c]|" + re.src_ZPCc + "))" + "((?![$+<=>^`|\uff5c])" + re.tpl_host_port_fuzzy_strict + re.src_path + ")"; - re.tpl_link_no_ip_fuzzy = - // Fuzzy link can't be prepended with .:/\- and non punctuation. - // but can start with > (markdown blockquote) - "(^|(?![.:/\\-_@])(?:[$+<=>^`|\uff5c]|" + re.src_ZPCc + "))" + "((?![$+<=>^`|\uff5c])" + re.tpl_host_port_no_ip_fuzzy_strict + re.src_path + ")"; - return re; - }; - //////////////////////////////////////////////////////////////////////////////// - // Helpers - // Merge objects - - function assign(obj /*from1, from2, from3, ...*/) { - var sources = Array.prototype.slice.call(arguments, 1); - sources.forEach((function(source) { - if (!source) { - return; - } - Object.keys(source).forEach((function(key) { - obj[key] = source[key]; - })); - })); - return obj; - } - function _class(obj) { - return Object.prototype.toString.call(obj); - } - function isString(obj) { - return _class(obj) === "[object String]"; - } - function isObject(obj) { - return _class(obj) === "[object Object]"; - } - function isRegExp(obj) { - return _class(obj) === "[object RegExp]"; - } - function isFunction(obj) { - return _class(obj) === "[object Function]"; - } - function escapeRE(str) { - return str.replace(/[.?*+^$[\]\\(){}|-]/g, "\\$&"); - } - //////////////////////////////////////////////////////////////////////////////// - var defaultOptions = { - fuzzyLink: true, - fuzzyEmail: true, - fuzzyIP: false - }; - function isOptionsObj(obj) { - return Object.keys(obj || {}).reduce((function(acc, k) { - return acc || defaultOptions.hasOwnProperty(k); - }), false); - } - var defaultSchemas = { - "http:": { - validate: function(text, pos, self) { - var tail = text.slice(pos); - if (!self.re.http) { - // compile lazily, because "host"-containing variables can change on tlds update. - self.re.http = new RegExp("^\\/\\/" + self.re.src_auth + self.re.src_host_port_strict + self.re.src_path, "i"); - } - if (self.re.http.test(tail)) { - return tail.match(self.re.http)[0].length; - } - return 0; - } - }, - "https:": "http:", - "ftp:": "http:", - "//": { - validate: function(text, pos, self) { - var tail = text.slice(pos); - if (!self.re.no_http) { - // compile lazily, because "host"-containing variables can change on tlds update. - self.re.no_http = new RegExp("^" + self.re.src_auth + - // Don't allow single-level domains, because of false positives like '//test' - // with code comments - "(?:localhost|(?:(?:" + self.re.src_domain + ")\\.)+" + self.re.src_domain_root + ")" + self.re.src_port + self.re.src_host_terminator + self.re.src_path, "i"); - } - if (self.re.no_http.test(tail)) { - // should not be `://` & `///`, that protects from errors in protocol name - if (pos >= 3 && text[pos - 3] === ":") { - return 0; - } - if (pos >= 3 && text[pos - 3] === "/") { - return 0; - } - return tail.match(self.re.no_http)[0].length; - } - return 0; - } - }, - "mailto:": { - validate: function(text, pos, self) { - var tail = text.slice(pos); - if (!self.re.mailto) { - self.re.mailto = new RegExp("^" + self.re.src_email_name + "@" + self.re.src_host_strict, "i"); - } - if (self.re.mailto.test(tail)) { - return tail.match(self.re.mailto)[0].length; - } - return 0; - } - } - }; - /*eslint-disable max-len*/ - // RE pattern for 2-character tlds (autogenerated by ./support/tlds_2char_gen.js) - var tlds_2ch_src_re = "a[cdefgilmnoqrstuwxz]|b[abdefghijmnorstvwyz]|c[acdfghiklmnoruvwxyz]|d[ejkmoz]|e[cegrstu]|f[ijkmor]|g[abdefghilmnpqrstuwy]|h[kmnrtu]|i[delmnoqrst]|j[emop]|k[eghimnprwyz]|l[abcikrstuvy]|m[acdeghklmnopqrstuvwxyz]|n[acefgilopruz]|om|p[aefghklmnrstwy]|qa|r[eosuw]|s[abcdeghijklmnortuvxyz]|t[cdfghjklmnortvwz]|u[agksyz]|v[aceginu]|w[fs]|y[et]|z[amw]"; - // DON'T try to make PRs with changes. Extend TLDs with LinkifyIt.tlds() instead - var tlds_default = "biz|com|edu|gov|net|org|pro|web|xxx|aero|asia|coop|info|museum|name|shop|\u0440\u0444".split("|"); - /*eslint-enable max-len*/ - //////////////////////////////////////////////////////////////////////////////// - function resetScanCache(self) { - self.__index__ = -1; - self.__text_cache__ = ""; - } - function createValidator(re) { - return function(text, pos) { - var tail = text.slice(pos); - if (re.test(tail)) { - return tail.match(re)[0].length; - } - return 0; - }; - } - function createNormalizer() { - return function(match, self) { - self.normalize(match); - }; - } - // Schemas compiler. Build regexps. - - function compile(self) { - // Load & clone RE patterns. - var re$1 = self.re = re(self.__opts__); - // Define dynamic patterns - var tlds = self.__tlds__.slice(); - self.onCompile(); - if (!self.__tlds_replaced__) { - tlds.push(tlds_2ch_src_re); - } - tlds.push(re$1.src_xn); - re$1.src_tlds = tlds.join("|"); - function untpl(tpl) { - return tpl.replace("%TLDS%", re$1.src_tlds); - } - re$1.email_fuzzy = RegExp(untpl(re$1.tpl_email_fuzzy), "i"); - re$1.link_fuzzy = RegExp(untpl(re$1.tpl_link_fuzzy), "i"); - re$1.link_no_ip_fuzzy = RegExp(untpl(re$1.tpl_link_no_ip_fuzzy), "i"); - re$1.host_fuzzy_test = RegExp(untpl(re$1.tpl_host_fuzzy_test), "i"); - - // Compile each schema - - var aliases = []; - self.__compiled__ = {}; - // Reset compiled data - function schemaError(name, val) { - throw new Error('(LinkifyIt) Invalid schema "' + name + '": ' + val); - } - Object.keys(self.__schemas__).forEach((function(name) { - var val = self.__schemas__[name]; - // skip disabled methods - if (val === null) { - return; - } - var compiled = { - validate: null, - link: null - }; - self.__compiled__[name] = compiled; - if (isObject(val)) { - if (isRegExp(val.validate)) { - compiled.validate = createValidator(val.validate); - } else if (isFunction(val.validate)) { - compiled.validate = val.validate; - } else { - schemaError(name, val); - } - if (isFunction(val.normalize)) { - compiled.normalize = val.normalize; - } else if (!val.normalize) { - compiled.normalize = createNormalizer(); - } else { - schemaError(name, val); - } - return; - } - if (isString(val)) { - aliases.push(name); - return; - } - schemaError(name, val); - })); - - // Compile postponed aliases - - aliases.forEach((function(alias) { - if (!self.__compiled__[self.__schemas__[alias]]) { - // Silently fail on missed schemas to avoid errons on disable. - // schemaError(alias, self.__schemas__[alias]); - return; - } - self.__compiled__[alias].validate = self.__compiled__[self.__schemas__[alias]].validate; - self.__compiled__[alias].normalize = self.__compiled__[self.__schemas__[alias]].normalize; - })); - - // Fake record for guessed links - - self.__compiled__[""] = { - validate: null, - normalize: createNormalizer() - }; - - // Build schema condition - - var slist = Object.keys(self.__compiled__).filter((function(name) { - // Filter disabled & fake schemas - return name.length > 0 && self.__compiled__[name]; - })).map(escapeRE).join("|"); - // (?!_) cause 1.5x slowdown - self.re.schema_test = RegExp("(^|(?!_)(?:[><\uff5c]|" + re$1.src_ZPCc + "))(" + slist + ")", "i"); - self.re.schema_search = RegExp("(^|(?!_)(?:[><\uff5c]|" + re$1.src_ZPCc + "))(" + slist + ")", "ig"); - self.re.schema_at_start = RegExp("^" + self.re.schema_search.source, "i"); - self.re.pretest = RegExp("(" + self.re.schema_test.source + ")|(" + self.re.host_fuzzy_test.source + ")|@", "i"); - - // Cleanup - - resetScanCache(self); - } - /** - * class Match - * - * Match result. Single element of array, returned by [[LinkifyIt#match]] - **/ function Match(self, shift) { - var start = self.__index__, end = self.__last_index__, text = self.__text_cache__.slice(start, end); - /** - * Match#schema -> String - * - * Prefix (protocol) for matched string. - **/ this.schema = self.__schema__.toLowerCase(); - /** - * Match#index -> Number - * - * First position of matched string. - **/ this.index = start + shift; - /** - * Match#lastIndex -> Number - * - * Next position after matched string. - **/ this.lastIndex = end + shift; - /** - * Match#raw -> String - * - * Matched string. - **/ this.raw = text; - /** - * Match#text -> String - * - * Notmalized text of matched string. - **/ this.text = text; - /** - * Match#url -> String - * - * Normalized url of matched string. - **/ this.url = text; - } - function createMatch(self, shift) { - var match = new Match(self, shift); - self.__compiled__[match.schema].normalize(match, self); - return match; - } - /** - * class LinkifyIt - **/ - /** - * new LinkifyIt(schemas, options) - * - schemas (Object): Optional. Additional schemas to validate (prefix/validator) - * - options (Object): { fuzzyLink|fuzzyEmail|fuzzyIP: true|false } - * - * Creates new linkifier instance with optional additional schemas. - * Can be called without `new` keyword for convenience. - * - * By default understands: - * - * - `http(s)://...` , `ftp://...`, `mailto:...` & `//...` links - * - "fuzzy" links and emails (example.com, foo@bar.com). - * - * `schemas` is an object, where each key/value describes protocol/rule: - * - * - __key__ - link prefix (usually, protocol name with `:` at the end, `skype:` - * for example). `linkify-it` makes shure that prefix is not preceeded with - * alphanumeric char and symbols. Only whitespaces and punctuation allowed. - * - __value__ - rule to check tail after link prefix - * - _String_ - just alias to existing rule - * - _Object_ - * - _validate_ - validator function (should return matched length on success), - * or `RegExp`. - * - _normalize_ - optional function to normalize text & url of matched result - * (for example, for @twitter mentions). - * - * `options`: - * - * - __fuzzyLink__ - recognige URL-s without `http(s):` prefix. Default `true`. - * - __fuzzyIP__ - allow IPs in fuzzy links above. Can conflict with some texts - * like version numbers. Default `false`. - * - __fuzzyEmail__ - recognize emails without `mailto:` prefix. - * - **/ function LinkifyIt(schemas, options) { - if (!(this instanceof LinkifyIt)) { - return new LinkifyIt(schemas, options); - } - if (!options) { - if (isOptionsObj(schemas)) { - options = schemas; - schemas = {}; - } - } - this.__opts__ = assign({}, defaultOptions, options); - // Cache last tested result. Used to skip repeating steps on next `match` call. - this.__index__ = -1; - this.__last_index__ = -1; - // Next scan position - this.__schema__ = ""; - this.__text_cache__ = ""; - this.__schemas__ = assign({}, defaultSchemas, schemas); - this.__compiled__ = {}; - this.__tlds__ = tlds_default; - this.__tlds_replaced__ = false; - this.re = {}; - compile(this); - } - /** chainable - * LinkifyIt#add(schema, definition) - * - schema (String): rule name (fixed pattern prefix) - * - definition (String|RegExp|Object): schema definition - * - * Add new rule definition. See constructor description for details. - **/ LinkifyIt.prototype.add = function add(schema, definition) { - this.__schemas__[schema] = definition; - compile(this); - return this; - }; - /** chainable - * LinkifyIt#set(options) - * - options (Object): { fuzzyLink|fuzzyEmail|fuzzyIP: true|false } - * - * Set recognition options for links without schema. - **/ LinkifyIt.prototype.set = function set(options) { - this.__opts__ = assign(this.__opts__, options); - return this; - }; - /** - * LinkifyIt#test(text) -> Boolean - * - * Searches linkifiable pattern and returns `true` on success or `false` on fail. - **/ LinkifyIt.prototype.test = function test(text) { - // Reset scan cache - this.__text_cache__ = text; - this.__index__ = -1; - if (!text.length) { - return false; - } - var m, ml, me, len, shift, next, re, tld_pos, at_pos; - // try to scan for link with schema - that's the most simple rule - if (this.re.schema_test.test(text)) { - re = this.re.schema_search; - re.lastIndex = 0; - while ((m = re.exec(text)) !== null) { - len = this.testSchemaAt(text, m[2], re.lastIndex); - if (len) { - this.__schema__ = m[2]; - this.__index__ = m.index + m[1].length; - this.__last_index__ = m.index + m[0].length + len; - break; - } - } - } - if (this.__opts__.fuzzyLink && this.__compiled__["http:"]) { - // guess schemaless links - tld_pos = text.search(this.re.host_fuzzy_test); - if (tld_pos >= 0) { - // if tld is located after found link - no need to check fuzzy pattern - if (this.__index__ < 0 || tld_pos < this.__index__) { - if ((ml = text.match(this.__opts__.fuzzyIP ? this.re.link_fuzzy : this.re.link_no_ip_fuzzy)) !== null) { - shift = ml.index + ml[1].length; - if (this.__index__ < 0 || shift < this.__index__) { - this.__schema__ = ""; - this.__index__ = shift; - this.__last_index__ = ml.index + ml[0].length; - } - } - } - } - } - if (this.__opts__.fuzzyEmail && this.__compiled__["mailto:"]) { - // guess schemaless emails - at_pos = text.indexOf("@"); - if (at_pos >= 0) { - // We can't skip this check, because this cases are possible: - // 192.168.1.1@gmail.com, my.in@example.com - if ((me = text.match(this.re.email_fuzzy)) !== null) { - shift = me.index + me[1].length; - next = me.index + me[0].length; - if (this.__index__ < 0 || shift < this.__index__ || shift === this.__index__ && next > this.__last_index__) { - this.__schema__ = "mailto:"; - this.__index__ = shift; - this.__last_index__ = next; - } - } - } - } - return this.__index__ >= 0; - }; - /** - * LinkifyIt#pretest(text) -> Boolean - * - * Very quick check, that can give false positives. Returns true if link MAY BE - * can exists. Can be used for speed optimization, when you need to check that - * link NOT exists. - **/ LinkifyIt.prototype.pretest = function pretest(text) { - return this.re.pretest.test(text); - }; - /** - * LinkifyIt#testSchemaAt(text, name, position) -> Number - * - text (String): text to scan - * - name (String): rule (schema) name - * - position (Number): text offset to check from - * - * Similar to [[LinkifyIt#test]] but checks only specific protocol tail exactly - * at given position. Returns length of found pattern (0 on fail). - **/ LinkifyIt.prototype.testSchemaAt = function testSchemaAt(text, schema, pos) { - // If not supported schema check requested - terminate - if (!this.__compiled__[schema.toLowerCase()]) { - return 0; - } - return this.__compiled__[schema.toLowerCase()].validate(text, pos, this); - }; - /** - * LinkifyIt#match(text) -> Array|null - * - * Returns array of found link descriptions or `null` on fail. We strongly - * recommend to use [[LinkifyIt#test]] first, for best speed. - * - * ##### Result match description - * - * - __schema__ - link schema, can be empty for fuzzy links, or `//` for - * protocol-neutral links. - * - __index__ - offset of matched text - * - __lastIndex__ - index of next char after mathch end - * - __raw__ - matched text - * - __text__ - normalized text - * - __url__ - link, generated from matched text - **/ LinkifyIt.prototype.match = function match(text) { - var shift = 0, result = []; - // Try to take previous element from cache, if .test() called before - if (this.__index__ >= 0 && this.__text_cache__ === text) { - result.push(createMatch(this, shift)); - shift = this.__last_index__; - } - // Cut head if cache was used - var tail = shift ? text.slice(shift) : text; - // Scan string until end reached - while (this.test(tail)) { - result.push(createMatch(this, shift)); - tail = tail.slice(this.__last_index__); - shift += this.__last_index__; - } - if (result.length) { - return result; - } - return null; - }; - /** - * LinkifyIt#matchAtStart(text) -> Match|null - * - * Returns fully-formed (not fuzzy) link if it starts at the beginning - * of the string, and null otherwise. - **/ LinkifyIt.prototype.matchAtStart = function matchAtStart(text) { - // Reset scan cache - this.__text_cache__ = text; - this.__index__ = -1; - if (!text.length) return null; - var m = this.re.schema_at_start.exec(text); - if (!m) return null; - var len = this.testSchemaAt(text, m[2], m[0].length); - if (!len) return null; - this.__schema__ = m[2]; - this.__index__ = m.index + m[1].length; - this.__last_index__ = m.index + m[0].length + len; - return createMatch(this, 0); - }; - /** chainable - * LinkifyIt#tlds(list [, keepOld]) -> this - * - list (Array): list of tlds - * - keepOld (Boolean): merge with current list if `true` (`false` by default) - * - * Load (or merge) new tlds list. Those are user for fuzzy links (without prefix) - * to avoid false positives. By default this algorythm used: - * - * - hostname with any 2-letter root zones are ok. - * - biz|com|edu|gov|net|org|pro|web|xxx|aero|asia|coop|info|museum|name|shop|рф - * are ok. - * - encoded (`xn--...`) root zones are ok. - * - * If list is replaced, then exact match for 2-chars root zones will be checked. - **/ LinkifyIt.prototype.tlds = function tlds(list, keepOld) { - list = Array.isArray(list) ? list : [ list ]; - if (!keepOld) { - this.__tlds__ = list.slice(); - this.__tlds_replaced__ = true; - compile(this); - return this; - } - this.__tlds__ = this.__tlds__.concat(list).sort().filter((function(el, idx, arr) { - return el !== arr[idx - 1]; - })).reverse(); - compile(this); - return this; - }; - /** - * LinkifyIt#normalize(match) - * - * Default normalizer (if schema does not define it's own). - **/ LinkifyIt.prototype.normalize = function normalize(match) { - // Do minimal possible changes by default. Need to collect feedback prior - // to move forward https://github.com/markdown-it/linkify-it/issues/1 - if (!match.schema) { - match.url = "http://" + match.url; - } - if (match.schema === "mailto:" && !/^mailto:/i.test(match.url)) { - match.url = "mailto:" + match.url; - } - }; - /** - * LinkifyIt#onCompile() - * - * Override to modify basic RegExp-s. - **/ LinkifyIt.prototype.onCompile = function onCompile() {}; - var linkifyIt = LinkifyIt; - /*! https://mths.be/punycode v1.4.1 by @mathias */ - /** Highest positive signed 32-bit float value */ var maxInt = 2147483647; - // aka. 0x7FFFFFFF or 2^31-1 - /** Bootstring parameters */ var base = 36; - var tMin = 1; - var tMax = 26; - var skew = 38; - var damp = 700; - var initialBias = 72; - var initialN = 128; - // 0x80 - var delimiter = "-"; - // '\x2D' - /** Regular expressions */ var regexPunycode = /^xn--/; - var regexNonASCII = /[^\x20-\x7E]/; - // unprintable ASCII chars + non-ASCII chars - var regexSeparators = /[\x2E\u3002\uFF0E\uFF61]/g; - // RFC 3490 separators - /** Error messages */ var errors = { - overflow: "Overflow: input needs wider integers to process", - "not-basic": "Illegal input >= 0x80 (not a basic code point)", - "invalid-input": "Invalid input" - }; - /** Convenience shortcuts */ var baseMinusTMin = base - tMin; - var floor = Math.floor; - var stringFromCharCode = String.fromCharCode; - /*--------------------------------------------------------------------------*/ - /** - * A generic error utility function. - * @private - * @param {String} type The error type. - * @returns {Error} Throws a `RangeError` with the applicable error message. - */ function error(type) { - throw new RangeError(errors[type]); - } - /** - * A generic `Array#map` utility function. - * @private - * @param {Array} array The array to iterate over. - * @param {Function} callback The function that gets called for every array - * item. - * @returns {Array} A new array of values returned by the callback function. - */ function map(array, fn) { - var length = array.length; - var result = []; - while (length--) { - result[length] = fn(array[length]); - } - return result; - } - /** - * A simple `Array#map`-like wrapper to work with domain name strings or email - * addresses. - * @private - * @param {String} domain The domain name or email address. - * @param {Function} callback The function that gets called for every - * character. - * @returns {Array} A new string of characters returned by the callback - * function. - */ function mapDomain(string, fn) { - var parts = string.split("@"); - var result = ""; - if (parts.length > 1) { - // In email addresses, only the domain name should be punycoded. Leave - // the local part (i.e. everything up to `@`) intact. - result = parts[0] + "@"; - string = parts[1]; - } - // Avoid `split(regex)` for IE8 compatibility. See #17. - string = string.replace(regexSeparators, "."); - var labels = string.split("."); - var encoded = map(labels, fn).join("."); - return result + encoded; - } - /** - * Creates an array containing the numeric code points of each Unicode - * character in the string. While JavaScript uses UCS-2 internally, - * this function will convert a pair of surrogate halves (each of which - * UCS-2 exposes as separate characters) into a single code point, - * matching UTF-16. - * @see `punycode.ucs2.encode` - * @see - * @memberOf punycode.ucs2 - * @name decode - * @param {String} string The Unicode input string (UCS-2). - * @returns {Array} The new array of code points. - */ function ucs2decode(string) { - var output = [], counter = 0, length = string.length, value, extra; - while (counter < length) { - value = string.charCodeAt(counter++); - if (value >= 55296 && value <= 56319 && counter < length) { - // high surrogate, and there is a next character - extra = string.charCodeAt(counter++); - if ((extra & 64512) == 56320) { - // low surrogate - output.push(((value & 1023) << 10) + (extra & 1023) + 65536); - } else { - // unmatched surrogate; only append this code unit, in case the next - // code unit is the high surrogate of a surrogate pair - output.push(value); - counter--; - } - } else { - output.push(value); - } - } - return output; - } - /** - * Creates a string based on an array of numeric code points. - * @see `punycode.ucs2.decode` - * @memberOf punycode.ucs2 - * @name encode - * @param {Array} codePoints The array of numeric code points. - * @returns {String} The new Unicode string (UCS-2). - */ function ucs2encode(array) { - return map(array, (function(value) { - var output = ""; - if (value > 65535) { - value -= 65536; - output += stringFromCharCode(value >>> 10 & 1023 | 55296); - value = 56320 | value & 1023; - } - output += stringFromCharCode(value); - return output; - })).join(""); - } - /** - * Converts a basic code point into a digit/integer. - * @see `digitToBasic()` - * @private - * @param {Number} codePoint The basic numeric code point value. - * @returns {Number} The numeric value of a basic code point (for use in - * representing integers) in the range `0` to `base - 1`, or `base` if - * the code point does not represent a value. - */ function basicToDigit(codePoint) { - if (codePoint - 48 < 10) { - return codePoint - 22; - } - if (codePoint - 65 < 26) { - return codePoint - 65; - } - if (codePoint - 97 < 26) { - return codePoint - 97; - } - return base; - } - /** - * Converts a digit/integer into a basic code point. - * @see `basicToDigit()` - * @private - * @param {Number} digit The numeric value of a basic code point. - * @returns {Number} The basic code point whose value (when used for - * representing integers) is `digit`, which needs to be in the range - * `0` to `base - 1`. If `flag` is non-zero, the uppercase form is - * used; else, the lowercase form is used. The behavior is undefined - * if `flag` is non-zero and `digit` has no uppercase form. - */ function digitToBasic(digit, flag) { - // 0..25 map to ASCII a..z or A..Z - // 26..35 map to ASCII 0..9 - return digit + 22 + 75 * (digit < 26) - ((flag != 0) << 5); - } - /** - * Bias adaptation function as per section 3.4 of RFC 3492. - * https://tools.ietf.org/html/rfc3492#section-3.4 - * @private - */ function adapt(delta, numPoints, firstTime) { - var k = 0; - delta = firstTime ? floor(delta / damp) : delta >> 1; - delta += floor(delta / numPoints); - for (;delta > baseMinusTMin * tMax >> 1; k += base) { - delta = floor(delta / baseMinusTMin); - } - return floor(k + (baseMinusTMin + 1) * delta / (delta + skew)); - } - /** - * Converts a Punycode string of ASCII-only symbols to a string of Unicode - * symbols. - * @memberOf punycode - * @param {String} input The Punycode string of ASCII-only symbols. - * @returns {String} The resulting string of Unicode symbols. - */ function decode(input) { - // Don't use UCS-2 - var output = [], inputLength = input.length, out, i = 0, n = initialN, bias = initialBias, basic, j, index, oldi, w, k, digit, t, - /** Cached calculation results */ - baseMinusT; - // Handle the basic code points: let `basic` be the number of input code - // points before the last delimiter, or `0` if there is none, then copy - // the first basic code points to the output. - basic = input.lastIndexOf(delimiter); - if (basic < 0) { - basic = 0; - } - for (j = 0; j < basic; ++j) { - // if it's not a basic code point - if (input.charCodeAt(j) >= 128) { - error("not-basic"); - } - output.push(input.charCodeAt(j)); - } - // Main decoding loop: start just after the last delimiter if any basic code - // points were copied; start at the beginning otherwise. - for (index = basic > 0 ? basic + 1 : 0; index < inputLength; ) { - // `index` is the index of the next character to be consumed. - // Decode a generalized variable-length integer into `delta`, - // which gets added to `i`. The overflow checking is easier - // if we increase `i` as we go, then subtract off its starting - // value at the end to obtain `delta`. - for (oldi = i, w = 1, k = base; ;k += base) { - if (index >= inputLength) { - error("invalid-input"); - } - digit = basicToDigit(input.charCodeAt(index++)); - if (digit >= base || digit > floor((maxInt - i) / w)) { - error("overflow"); - } - i += digit * w; - t = k <= bias ? tMin : k >= bias + tMax ? tMax : k - bias; - if (digit < t) { - break; - } - baseMinusT = base - t; - if (w > floor(maxInt / baseMinusT)) { - error("overflow"); - } - w *= baseMinusT; - } - out = output.length + 1; - bias = adapt(i - oldi, out, oldi == 0); - // `i` was supposed to wrap around from `out` to `0`, - // incrementing `n` each time, so we'll fix that now: - if (floor(i / out) > maxInt - n) { - error("overflow"); - } - n += floor(i / out); - i %= out; - // Insert `n` at position `i` of the output - output.splice(i++, 0, n); - } - return ucs2encode(output); - } - /** - * Converts a string of Unicode symbols (e.g. a domain name label) to a - * Punycode string of ASCII-only symbols. - * @memberOf punycode - * @param {String} input The string of Unicode symbols. - * @returns {String} The resulting Punycode string of ASCII-only symbols. - */ function encode(input) { - var n, delta, handledCPCount, basicLength, bias, j, m, q, k, t, currentValue, output = [], - /** `inputLength` will hold the number of code points in `input`. */ - inputLength, - /** Cached calculation results */ - handledCPCountPlusOne, baseMinusT, qMinusT; - // Convert the input in UCS-2 to Unicode - input = ucs2decode(input); - // Cache the length - inputLength = input.length; - // Initialize the state - n = initialN; - delta = 0; - bias = initialBias; - // Handle the basic code points - for (j = 0; j < inputLength; ++j) { - currentValue = input[j]; - if (currentValue < 128) { - output.push(stringFromCharCode(currentValue)); - } - } - handledCPCount = basicLength = output.length; - // `handledCPCount` is the number of code points that have been handled; - // `basicLength` is the number of basic code points. - // Finish the basic string - if it is not empty - with a delimiter - if (basicLength) { - output.push(delimiter); - } - // Main encoding loop: - while (handledCPCount < inputLength) { - // All non-basic code points < n have been handled already. Find the next - // larger one: - for (m = maxInt, j = 0; j < inputLength; ++j) { - currentValue = input[j]; - if (currentValue >= n && currentValue < m) { - m = currentValue; - } - } - // Increase `delta` enough to advance the decoder's state to , - // but guard against overflow - handledCPCountPlusOne = handledCPCount + 1; - if (m - n > floor((maxInt - delta) / handledCPCountPlusOne)) { - error("overflow"); - } - delta += (m - n) * handledCPCountPlusOne; - n = m; - for (j = 0; j < inputLength; ++j) { - currentValue = input[j]; - if (currentValue < n && ++delta > maxInt) { - error("overflow"); - } - if (currentValue == n) { - // Represent delta as a generalized variable-length integer - for (q = delta, k = base; ;k += base) { - t = k <= bias ? tMin : k >= bias + tMax ? tMax : k - bias; - if (q < t) { - break; - } - qMinusT = q - t; - baseMinusT = base - t; - output.push(stringFromCharCode(digitToBasic(t + qMinusT % baseMinusT, 0))); - q = floor(qMinusT / baseMinusT); - } - output.push(stringFromCharCode(digitToBasic(q, 0))); - bias = adapt(delta, handledCPCountPlusOne, handledCPCount == basicLength); - delta = 0; - ++handledCPCount; - } - } - ++delta; - ++n; - } - return output.join(""); - } - /** - * Converts a Punycode string representing a domain name or an email address - * to Unicode. Only the Punycoded parts of the input will be converted, i.e. - * it doesn't matter if you call it on a string that has already been - * converted to Unicode. - * @memberOf punycode - * @param {String} input The Punycoded domain name or email address to - * convert to Unicode. - * @returns {String} The Unicode representation of the given Punycode - * string. - */ function toUnicode(input) { - return mapDomain(input, (function(string) { - return regexPunycode.test(string) ? decode(string.slice(4).toLowerCase()) : string; - })); - } - /** - * Converts a Unicode string representing a domain name or an email address to - * Punycode. Only the non-ASCII parts of the domain name will be converted, - * i.e. it doesn't matter if you call it with a domain that's already in - * ASCII. - * @memberOf punycode - * @param {String} input The domain name or email address to convert, as a - * Unicode string. - * @returns {String} The Punycode representation of the given domain name or - * email address. - */ function toASCII(input) { - return mapDomain(input, (function(string) { - return regexNonASCII.test(string) ? "xn--" + encode(string) : string; - })); - } - var version = "1.4.1"; - /** - * An object of methods to convert from JavaScript's internal character - * representation (UCS-2) to Unicode code points, and back. - * @see - * @memberOf punycode - * @type Object - */ var ucs2 = { - decode: ucs2decode, - encode: ucs2encode - }; - var punycode$1 = { - version: version, - ucs2: ucs2, - toASCII: toASCII, - toUnicode: toUnicode, - encode: encode, - decode: decode - }; - var punycode$2 = Object.freeze({ - __proto__: null, - decode: decode, - encode: encode, - toUnicode: toUnicode, - toASCII: toASCII, - version: version, - ucs2: ucs2, - default: punycode$1 - }); - // markdown-it default options - var _default = { - options: { - html: false, - // Enable HTML tags in source - xhtmlOut: false, - // Use '/' to close single tags (
) - breaks: false, - // Convert '\n' in paragraphs into
- langPrefix: "language-", - // CSS language prefix for fenced blocks - linkify: false, - // autoconvert URL-like texts to links - // Enable some language-neutral replacements + quotes beautification - typographer: false, - // Double + single quotes replacement pairs, when typographer enabled, - // and smartquotes on. Could be either a String or an Array. - // For example, you can use 'Ā«Ā»ā€žā€œ' for Russian, 'ā€žā€œā€šā€˜' for German, - // and ['Ā«\xA0', '\xA0Ā»', '‹\xA0', '\xA0›'] for French (including nbsp). - quotes: "\u201c\u201d\u2018\u2019", - /* ā€œā€ā€˜ā€™ */ - // Highlighter function. Should return escaped HTML, - // or '' if the source string is not changed and should be escaped externaly. - // If result starts with ) - breaks: false, - // Convert '\n' in paragraphs into
- langPrefix: "language-", - // CSS language prefix for fenced blocks - linkify: false, - // autoconvert URL-like texts to links - // Enable some language-neutral replacements + quotes beautification - typographer: false, - // Double + single quotes replacement pairs, when typographer enabled, - // and smartquotes on. Could be either a String or an Array. - // For example, you can use 'Ā«Ā»ā€žā€œ' for Russian, 'ā€žā€œā€šā€˜' for German, - // and ['Ā«\xA0', '\xA0Ā»', '‹\xA0', '\xA0›'] for French (including nbsp). - quotes: "\u201c\u201d\u2018\u2019", - /* ā€œā€ā€˜ā€™ */ - // Highlighter function. Should return escaped HTML, - // or '' if the source string is not changed and should be escaped externaly. - // If result starts with ) - breaks: false, - // Convert '\n' in paragraphs into
- langPrefix: "language-", - // CSS language prefix for fenced blocks - linkify: false, - // autoconvert URL-like texts to links - // Enable some language-neutral replacements + quotes beautification - typographer: false, - // Double + single quotes replacement pairs, when typographer enabled, - // and smartquotes on. Could be either a String or an Array. - // For example, you can use 'Ā«Ā»ā€žā€œ' for Russian, 'ā€žā€œā€šā€˜' for German, - // and ['Ā«\xA0', '\xA0Ā»', '‹\xA0', '\xA0›'] for French (including nbsp). - quotes: "\u201c\u201d\u2018\u2019", - /* ā€œā€ā€˜ā€™ */ - // Highlighter function. Should return escaped HTML, - // or '' if the source string is not changed and should be escaped externaly. - // If result starts with = 0) { - try { - parsed.hostname = punycode.toASCII(parsed.hostname); - } catch (er) {} - } - } - return mdurl.encode(mdurl.format(parsed)); - } - function normalizeLinkText(url) { - var parsed = mdurl.parse(url, true); - if (parsed.hostname) { - // Encode hostnames in urls like: - // `http://host/`, `https://host/`, `mailto:user@host`, `//host/` - // We don't encode unknown schemas, because it's likely that we encode - // something we shouldn't (e.g. `skype:name` treated as `skype:host`) - if (!parsed.protocol || RECODE_HOSTNAME_FOR.indexOf(parsed.protocol) >= 0) { - try { - parsed.hostname = punycode.toUnicode(parsed.hostname); - } catch (er) {} - } - } - // add '%' to exclude list because of https://github.com/markdown-it/markdown-it/issues/720 - return mdurl.decode(mdurl.format(parsed), mdurl.decode.defaultChars + "%"); - } - /** - * class MarkdownIt - * - * Main parser/renderer class. - * - * ##### Usage - * - * ```javascript - * // node.js, "classic" way: - * var MarkdownIt = require('markdown-it'), - * md = new MarkdownIt(); - * var result = md.render('# markdown-it rulezz!'); - * - * // node.js, the same, but with sugar: - * var md = require('markdown-it')(); - * var result = md.render('# markdown-it rulezz!'); - * - * // browser without AMD, added to "window" on script load - * // Note, there are no dash. - * var md = window.markdownit(); - * var result = md.render('# markdown-it rulezz!'); - * ``` - * - * Single line rendering, without paragraph wrap: - * - * ```javascript - * var md = require('markdown-it')(); - * var result = md.renderInline('__markdown-it__ rulezz!'); - * ``` - **/ - /** - * new MarkdownIt([presetName, options]) - * - presetName (String): optional, `commonmark` / `zero` - * - options (Object) - * - * Creates parser instanse with given config. Can be called without `new`. - * - * ##### presetName - * - * MarkdownIt provides named presets as a convenience to quickly - * enable/disable active syntax rules and options for common use cases. - * - * - ["commonmark"](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/commonmark.js) - - * configures parser to strict [CommonMark](http://commonmark.org/) mode. - * - [default](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/default.js) - - * similar to GFM, used when no preset name given. Enables all available rules, - * but still without html, typographer & autolinker. - * - ["zero"](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/zero.js) - - * all rules disabled. Useful to quickly setup your config via `.enable()`. - * For example, when you need only `bold` and `italic` markup and nothing else. - * - * ##### options: - * - * - __html__ - `false`. Set `true` to enable HTML tags in source. Be careful! - * That's not safe! You may need external sanitizer to protect output from XSS. - * It's better to extend features via plugins, instead of enabling HTML. - * - __xhtmlOut__ - `false`. Set `true` to add '/' when closing single tags - * (`
`). This is needed only for full CommonMark compatibility. In real - * world you will need HTML output. - * - __breaks__ - `false`. Set `true` to convert `\n` in paragraphs into `
`. - * - __langPrefix__ - `language-`. CSS language class prefix for fenced blocks. - * Can be useful for external highlighters. - * - __linkify__ - `false`. Set `true` to autoconvert URL-like text to links. - * - __typographer__ - `false`. Set `true` to enable [some language-neutral - * replacement](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/replacements.js) + - * quotes beautification (smartquotes). - * - __quotes__ - `ā€œā€ā€˜ā€™`, String or Array. Double + single quotes replacement - * pairs, when typographer enabled and smartquotes on. For example, you can - * use `'Ā«Ā»ā€žā€œ'` for Russian, `'ā€žā€œā€šā€˜'` for German, and - * `['Ā«\xA0', '\xA0Ā»', '‹\xA0', '\xA0›']` for French (including nbsp). - * - __highlight__ - `null`. Highlighter function for fenced code blocks. - * Highlighter `function (str, lang)` should return escaped HTML. It can also - * return empty string if the source was not changed and should be escaped - * externaly. If result starts with `): - * - * ```javascript - * var hljs = require('highlight.js') // https://highlightjs.org/ - * - * // Actual default values - * var md = require('markdown-it')({ - * highlight: function (str, lang) { - * if (lang && hljs.getLanguage(lang)) { - * try { - * return '
' +
-	 *                hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +
-	 *                '
'; - * } catch (__) {} - * } - * - * return '
' + md.utils.escapeHtml(str) + '
'; - * } - * }); - * ``` - * - **/ function MarkdownIt(presetName, options) { - if (!(this instanceof MarkdownIt)) { - return new MarkdownIt(presetName, options); - } - if (!options) { - if (!utils.isString(presetName)) { - options = presetName || {}; - presetName = "default"; - } - } - /** - * MarkdownIt#inline -> ParserInline - * - * Instance of [[ParserInline]]. You may need it to add new rules when - * writing plugins. For simple rules control use [[MarkdownIt.disable]] and - * [[MarkdownIt.enable]]. - **/ this.inline = new parser_inline; - /** - * MarkdownIt#block -> ParserBlock - * - * Instance of [[ParserBlock]]. You may need it to add new rules when - * writing plugins. For simple rules control use [[MarkdownIt.disable]] and - * [[MarkdownIt.enable]]. - **/ this.block = new parser_block; - /** - * MarkdownIt#core -> Core - * - * Instance of [[Core]] chain executor. You may need it to add new rules when - * writing plugins. For simple rules control use [[MarkdownIt.disable]] and - * [[MarkdownIt.enable]]. - **/ this.core = new parser_core; - /** - * MarkdownIt#renderer -> Renderer - * - * Instance of [[Renderer]]. Use it to modify output look. Or to add rendering - * rules for new token types, generated by plugins. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')(); - * - * function myToken(tokens, idx, options, env, self) { - * //... - * return result; - * }; - * - * md.renderer.rules['my_token'] = myToken - * ``` - * - * See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js). - **/ this.renderer = new renderer; - /** - * MarkdownIt#linkify -> LinkifyIt - * - * [linkify-it](https://github.com/markdown-it/linkify-it) instance. - * Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.js) - * rule. - **/ this.linkify = new linkifyIt; - /** - * MarkdownIt#validateLink(url) -> Boolean - * - * Link validation function. CommonMark allows too much in links. By default - * we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas - * except some embedded image types. - * - * You can change this behaviour: - * - * ```javascript - * var md = require('markdown-it')(); - * // enable everything - * md.validateLink = function () { return true; } - * ``` - **/ this.validateLink = validateLink; - /** - * MarkdownIt#normalizeLink(url) -> String - * - * Function used to encode link url to a machine-readable format, - * which includes url-encoding, punycode, etc. - **/ this.normalizeLink = normalizeLink; - /** - * MarkdownIt#normalizeLinkText(url) -> String - * - * Function used to decode link url to a human-readable format` - **/ this.normalizeLinkText = normalizeLinkText; - // Expose utils & helpers for easy acces from plugins - /** - * MarkdownIt#utils -> utils - * - * Assorted utility functions, useful to write plugins. See details - * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.js). - **/ this.utils = utils; - /** - * MarkdownIt#helpers -> helpers - * - * Link components parser functions, useful to write plugins. See details - * [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers). - **/ this.helpers = utils.assign({}, helpers); - this.options = {}; - this.configure(presetName); - if (options) { - this.set(options); - } - } - /** chainable - * MarkdownIt.set(options) - * - * Set parser options (in the same format as in constructor). Probably, you - * will never need it, but you can change options after constructor call. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')() - * .set({ html: true, breaks: true }) - * .set({ typographer, true }); - * ``` - * - * __Note:__ To achieve the best possible performance, don't modify a - * `markdown-it` instance options on the fly. If you need multiple configurations - * it's best to create multiple instances and initialize each with separate - * config. - **/ MarkdownIt.prototype.set = function(options) { - utils.assign(this.options, options); - return this; - }; - /** chainable, internal - * MarkdownIt.configure(presets) - * - * Batch load of all options and compenent settings. This is internal method, - * and you probably will not need it. But if you will - see available presets - * and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets) - * - * We strongly recommend to use presets instead of direct config loads. That - * will give better compatibility with next versions. - **/ MarkdownIt.prototype.configure = function(presets) { - var self = this, presetName; - if (utils.isString(presets)) { - presetName = presets; - presets = config[presetName]; - if (!presets) { - throw new Error('Wrong `markdown-it` preset "' + presetName + '", check name'); - } - } - if (!presets) { - throw new Error("Wrong `markdown-it` preset, can't be empty"); - } - if (presets.options) { - self.set(presets.options); - } - if (presets.components) { - Object.keys(presets.components).forEach((function(name) { - if (presets.components[name].rules) { - self[name].ruler.enableOnly(presets.components[name].rules); - } - if (presets.components[name].rules2) { - self[name].ruler2.enableOnly(presets.components[name].rules2); - } - })); - } - return this; - }; - /** chainable - * MarkdownIt.enable(list, ignoreInvalid) - * - list (String|Array): rule name or list of rule names to enable - * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found. - * - * Enable list or rules. It will automatically find appropriate components, - * containing rules with given names. If rule not found, and `ignoreInvalid` - * not set - throws exception. - * - * ##### Example - * - * ```javascript - * var md = require('markdown-it')() - * .enable(['sub', 'sup']) - * .disable('smartquotes'); - * ``` - **/ MarkdownIt.prototype.enable = function(list, ignoreInvalid) { - var result = []; - if (!Array.isArray(list)) { - list = [ list ]; - } - [ "core", "block", "inline" ].forEach((function(chain) { - result = result.concat(this[chain].ruler.enable(list, true)); - }), this); - result = result.concat(this.inline.ruler2.enable(list, true)); - var missed = list.filter((function(name) { - return result.indexOf(name) < 0; - })); - if (missed.length && !ignoreInvalid) { - throw new Error("MarkdownIt. Failed to enable unknown rule(s): " + missed); - } - return this; - }; - /** chainable - * MarkdownIt.disable(list, ignoreInvalid) - * - list (String|Array): rule name or list of rule names to disable. - * - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found. - * - * The same as [[MarkdownIt.enable]], but turn specified rules off. - **/ MarkdownIt.prototype.disable = function(list, ignoreInvalid) { - var result = []; - if (!Array.isArray(list)) { - list = [ list ]; - } - [ "core", "block", "inline" ].forEach((function(chain) { - result = result.concat(this[chain].ruler.disable(list, true)); - }), this); - result = result.concat(this.inline.ruler2.disable(list, true)); - var missed = list.filter((function(name) { - return result.indexOf(name) < 0; - })); - if (missed.length && !ignoreInvalid) { - throw new Error("MarkdownIt. Failed to disable unknown rule(s): " + missed); - } - return this; - }; - /** chainable - * MarkdownIt.use(plugin, params) - * - * Load specified plugin with given params into current parser instance. - * It's just a sugar to call `plugin(md, params)` with curring. - * - * ##### Example - * - * ```javascript - * var iterator = require('markdown-it-for-inline'); - * var md = require('markdown-it')() - * .use(iterator, 'foo_replace', 'text', function (tokens, idx) { - * tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar'); - * }); - * ``` - **/ MarkdownIt.prototype.use = function(plugin /*, params, ... */) { - var args = [ this ].concat(Array.prototype.slice.call(arguments, 1)); - plugin.apply(plugin, args); - return this; - }; - /** internal - * MarkdownIt.parse(src, env) -> Array - * - src (String): source string - * - env (Object): environment sandbox - * - * Parse input string and return list of block tokens (special token type - * "inline" will contain list of inline tokens). You should not call this - * method directly, until you write custom renderer (for example, to produce - * AST). - * - * `env` is used to pass data between "distributed" rules and return additional - * metadata like reference info, needed for the renderer. It also can be used to - * inject data in specific cases. Usually, you will be ok to pass `{}`, - * and then pass updated object to renderer. - **/ MarkdownIt.prototype.parse = function(src, env) { - if (typeof src !== "string") { - throw new Error("Input data should be a String"); - } - var state = new this.core.State(src, this, env); - this.core.process(state); - return state.tokens; - }; - /** - * MarkdownIt.render(src [, env]) -> String - * - src (String): source string - * - env (Object): environment sandbox - * - * Render markdown string into html. It does all magic for you :). - * - * `env` can be used to inject additional metadata (`{}` by default). - * But you will not need it with high probability. See also comment - * in [[MarkdownIt.parse]]. - **/ MarkdownIt.prototype.render = function(src, env) { - env = env || {}; - return this.renderer.render(this.parse(src, env), this.options, env); - }; - /** internal - * MarkdownIt.parseInline(src, env) -> Array - * - src (String): source string - * - env (Object): environment sandbox - * - * The same as [[MarkdownIt.parse]] but skip all block rules. It returns the - * block tokens list with the single `inline` element, containing parsed inline - * tokens in `children` property. Also updates `env` object. - **/ MarkdownIt.prototype.parseInline = function(src, env) { - var state = new this.core.State(src, this, env); - state.inlineMode = true; - this.core.process(state); - return state.tokens; - }; - /** - * MarkdownIt.renderInline(src [, env]) -> String - * - src (String): source string - * - env (Object): environment sandbox - * - * Similar to [[MarkdownIt.render]] but for single paragraph content. Result - * will NOT be wrapped into `

` tags. - **/ MarkdownIt.prototype.renderInline = function(src, env) { - env = env || {}; - return this.renderer.render(this.parseInline(src, env), this.options, env); - }; - var lib = MarkdownIt; - var markdownIt = lib; - return markdownIt; -})); - diff --git a/examples/server/public/deps_tailwindcss.js b/examples/server/public/deps_tailwindcss.js deleted file mode 100644 index 6736cb8ca7d16..0000000000000 --- a/examples/server/public/deps_tailwindcss.js +++ /dev/null @@ -1,82 +0,0 @@ -(()=>{var Iv=Object.create;var Ui=Object.defineProperty;var Dv=Object.getOwnPropertyDescriptor;var qv=Object.getOwnPropertyNames;var $v=Object.getPrototypeOf,Lv=Object.prototype.hasOwnProperty;var cf=r=>Ui(r,"__esModule",{value:!0});var pf=r=>{if(typeof require!="undefined")return require(r);throw new Error('Dynamic require of "'+r+'" is not supported')};var R=(r,e)=>()=>(r&&(e=r(r=0)),e);var x=(r,e)=>()=>(e||r((e={exports:{}}).exports,e),e.exports),Ge=(r,e)=>{cf(r);for(var t in e)Ui(r,t,{get:e[t],enumerable:!0})},Mv=(r,e,t)=>{if(e&&typeof e=="object"||typeof e=="function")for(let i of qv(e))!Lv.call(r,i)&&i!=="default"&&Ui(r,i,{get:()=>e[i],enumerable:!(t=Dv(e,i))||t.enumerable});return r},pe=r=>Mv(cf(Ui(r!=null?Iv($v(r)):{},"default",r&&r.__esModule&&"default"in r?{get:()=>r.default,enumerable:!0}:{value:r,enumerable:!0})),r);var m,u=R(()=>{m={platform:"",env:{},versions:{node:"14.17.6"}}});var Nv,be,ft=R(()=>{u();Nv=0,be={readFileSync:r=>self[r]||"",statSync:()=>({mtimeMs:Nv++}),promises:{readFile:r=>Promise.resolve(self[r]||"")}}});var Ns=x((sP,hf)=>{u();"use strict";var df=class{constructor(e={}){if(!(e.maxSize&&e.maxSize>0))throw new TypeError("`maxSize` must be a number greater than 0");if(typeof e.maxAge=="number"&&e.maxAge===0)throw new TypeError("`maxAge` must be a number greater than 0");this.maxSize=e.maxSize,this.maxAge=e.maxAge||1/0,this.onEviction=e.onEviction,this.cache=new Map,this.oldCache=new Map,this._size=0}_emitEvictions(e){if(typeof this.onEviction=="function")for(let[t,i]of e)this.onEviction(t,i.value)}_deleteIfExpired(e,t){return typeof t.expiry=="number"&&t.expiry<=Date.now()?(typeof this.onEviction=="function"&&this.onEviction(e,t.value),this.delete(e)):!1}_getOrDeleteIfExpired(e,t){if(this._deleteIfExpired(e,t)===!1)return t.value}_getItemValue(e,t){return t.expiry?this._getOrDeleteIfExpired(e,t):t.value}_peek(e,t){let i=t.get(e);return this._getItemValue(e,i)}_set(e,t){this.cache.set(e,t),this._size++,this._size>=this.maxSize&&(this._size=0,this._emitEvictions(this.oldCache),this.oldCache=this.cache,this.cache=new Map)}_moveToRecent(e,t){this.oldCache.delete(e),this._set(e,t)}*_entriesAscending(){for(let e of this.oldCache){let[t,i]=e;this.cache.has(t)||this._deleteIfExpired(t,i)===!1&&(yield e)}for(let e of this.cache){let[t,i]=e;this._deleteIfExpired(t,i)===!1&&(yield e)}}get(e){if(this.cache.has(e)){let t=this.cache.get(e);return this._getItemValue(e,t)}if(this.oldCache.has(e)){let t=this.oldCache.get(e);if(this._deleteIfExpired(e,t)===!1)return this._moveToRecent(e,t),t.value}}set(e,t,{maxAge:i=this.maxAge===1/0?void 0:Date.now()+this.maxAge}={}){this.cache.has(e)?this.cache.set(e,{value:t,maxAge:i}):this._set(e,{value:t,expiry:i})}has(e){return this.cache.has(e)?!this._deleteIfExpired(e,this.cache.get(e)):this.oldCache.has(e)?!this._deleteIfExpired(e,this.oldCache.get(e)):!1}peek(e){if(this.cache.has(e))return this._peek(e,this.cache);if(this.oldCache.has(e))return this._peek(e,this.oldCache)}delete(e){let t=this.cache.delete(e);return t&&this._size--,this.oldCache.delete(e)||t}clear(){this.cache.clear(),this.oldCache.clear(),this._size=0}resize(e){if(!(e&&e>0))throw new TypeError("`maxSize` must be a number greater than 0");let t=[...this._entriesAscending()],i=t.length-e;i<0?(this.cache=new Map(t),this.oldCache=new Map,this._size=t.length):(i>0&&this._emitEvictions(t.slice(0,i)),this.oldCache=new Map(t.slice(i)),this.cache=new Map,this._size=0),this.maxSize=e}*keys(){for(let[e]of this)yield e}*values(){for(let[,e]of this)yield e}*[Symbol.iterator](){for(let e of this.cache){let[t,i]=e;this._deleteIfExpired(t,i)===!1&&(yield[t,i.value])}for(let e of this.oldCache){let[t,i]=e;this.cache.has(t)||this._deleteIfExpired(t,i)===!1&&(yield[t,i.value])}}*entriesDescending(){let e=[...this.cache];for(let t=e.length-1;t>=0;--t){let i=e[t],[n,a]=i;this._deleteIfExpired(n,a)===!1&&(yield[n,a.value])}e=[...this.oldCache];for(let t=e.length-1;t>=0;--t){let i=e[t],[n,a]=i;this.cache.has(n)||this._deleteIfExpired(n,a)===!1&&(yield[n,a.value])}}*entriesAscending(){for(let[e,t]of this._entriesAscending())yield[e,t.value]}get size(){if(!this._size)return this.oldCache.size;let e=0;for(let t of this.oldCache.keys())this.cache.has(t)||e++;return Math.min(this._size+e,this.maxSize)}};hf.exports=df});var mf,gf=R(()=>{u();mf=r=>r&&r._hash});function Vi(r){return mf(r,{ignoreUnknown:!0})}var yf=R(()=>{u();gf()});function xt(r){if(r=`${r}`,r==="0")return"0";if(/^[+-]?(\d+|\d*\.\d+)(e[+-]?\d+)?(%|\w+)?$/.test(r))return r.replace(/^[+-]?/,t=>t==="-"?"":"-");let e=["var","calc","min","max","clamp"];for(let t of e)if(r.includes(`${t}(`))return`calc(${r} * -1)`}var Hi=R(()=>{u()});var bf,wf=R(()=>{u();bf=["preflight","container","accessibility","pointerEvents","visibility","position","inset","isolation","zIndex","order","gridColumn","gridColumnStart","gridColumnEnd","gridRow","gridRowStart","gridRowEnd","float","clear","margin","boxSizing","lineClamp","display","aspectRatio","size","height","maxHeight","minHeight","width","minWidth","maxWidth","flex","flexShrink","flexGrow","flexBasis","tableLayout","captionSide","borderCollapse","borderSpacing","transformOrigin","translate","rotate","skew","scale","transform","animation","cursor","touchAction","userSelect","resize","scrollSnapType","scrollSnapAlign","scrollSnapStop","scrollMargin","scrollPadding","listStylePosition","listStyleType","listStyleImage","appearance","columns","breakBefore","breakInside","breakAfter","gridAutoColumns","gridAutoFlow","gridAutoRows","gridTemplateColumns","gridTemplateRows","flexDirection","flexWrap","placeContent","placeItems","alignContent","alignItems","justifyContent","justifyItems","gap","space","divideWidth","divideStyle","divideColor","divideOpacity","placeSelf","alignSelf","justifySelf","overflow","overscrollBehavior","scrollBehavior","textOverflow","hyphens","whitespace","textWrap","wordBreak","borderRadius","borderWidth","borderStyle","borderColor","borderOpacity","backgroundColor","backgroundOpacity","backgroundImage","gradientColorStops","boxDecorationBreak","backgroundSize","backgroundAttachment","backgroundClip","backgroundPosition","backgroundRepeat","backgroundOrigin","fill","stroke","strokeWidth","objectFit","objectPosition","padding","textAlign","textIndent","verticalAlign","fontFamily","fontSize","fontWeight","textTransform","fontStyle","fontVariantNumeric","lineHeight","letterSpacing","textColor","textOpacity","textDecoration","textDecorationColor","textDecorationStyle","textDecorationThickness","textUnderlineOffset","fontSmoothing","placeholderColor","placeholderOpacity","caretColor","accentColor","opacity","backgroundBlendMode","mixBlendMode","boxShadow","boxShadowColor","outlineStyle","outlineWidth","outlineOffset","outlineColor","ringWidth","ringColor","ringOpacity","ringOffsetWidth","ringOffsetColor","blur","brightness","contrast","dropShadow","grayscale","hueRotate","invert","saturate","sepia","filter","backdropBlur","backdropBrightness","backdropContrast","backdropGrayscale","backdropHueRotate","backdropInvert","backdropOpacity","backdropSaturate","backdropSepia","backdropFilter","transitionProperty","transitionDelay","transitionDuration","transitionTimingFunction","willChange","contain","content","forcedColorAdjust"]});function vf(r,e){return r===void 0?e:Array.isArray(r)?r:[...new Set(e.filter(i=>r!==!1&&r[i]!==!1).concat(Object.keys(r).filter(i=>r[i]!==!1)))]}var xf=R(()=>{u()});var kf={};Ge(kf,{default:()=>Qe});var Qe,Wi=R(()=>{u();Qe=new Proxy({},{get:()=>String})});function Bs(r,e,t){typeof m!="undefined"&&m.env.JEST_WORKER_ID||t&&Sf.has(t)||(t&&Sf.add(t),console.warn(""),e.forEach(i=>console.warn(r,"-",i)))}function Fs(r){return Qe.dim(r)}var Sf,G,Be=R(()=>{u();Wi();Sf=new Set;G={info(r,e){Bs(Qe.bold(Qe.cyan("info")),...Array.isArray(r)?[r]:[e,r])},warn(r,e){["content-problems"].includes(r)||Bs(Qe.bold(Qe.yellow("warn")),...Array.isArray(r)?[r]:[e,r])},risk(r,e){Bs(Qe.bold(Qe.magenta("risk")),...Array.isArray(r)?[r]:[e,r])}}});var Af={};Ge(Af,{default:()=>js});function qr({version:r,from:e,to:t}){G.warn(`${e}-color-renamed`,[`As of Tailwind CSS ${r}, \`${e}\` has been renamed to \`${t}\`.`,"Update your configuration file to silence this warning."])}var js,zs=R(()=>{u();Be();js={inherit:"inherit",current:"currentColor",transparent:"transparent",black:"#000",white:"#fff",slate:{50:"#f8fafc",100:"#f1f5f9",200:"#e2e8f0",300:"#cbd5e1",400:"#94a3b8",500:"#64748b",600:"#475569",700:"#334155",800:"#1e293b",900:"#0f172a",950:"#020617"},gray:{50:"#f9fafb",100:"#f3f4f6",200:"#e5e7eb",300:"#d1d5db",400:"#9ca3af",500:"#6b7280",600:"#4b5563",700:"#374151",800:"#1f2937",900:"#111827",950:"#030712"},zinc:{50:"#fafafa",100:"#f4f4f5",200:"#e4e4e7",300:"#d4d4d8",400:"#a1a1aa",500:"#71717a",600:"#52525b",700:"#3f3f46",800:"#27272a",900:"#18181b",950:"#09090b"},neutral:{50:"#fafafa",100:"#f5f5f5",200:"#e5e5e5",300:"#d4d4d4",400:"#a3a3a3",500:"#737373",600:"#525252",700:"#404040",800:"#262626",900:"#171717",950:"#0a0a0a"},stone:{50:"#fafaf9",100:"#f5f5f4",200:"#e7e5e4",300:"#d6d3d1",400:"#a8a29e",500:"#78716c",600:"#57534e",700:"#44403c",800:"#292524",900:"#1c1917",950:"#0c0a09"},red:{50:"#fef2f2",100:"#fee2e2",200:"#fecaca",300:"#fca5a5",400:"#f87171",500:"#ef4444",600:"#dc2626",700:"#b91c1c",800:"#991b1b",900:"#7f1d1d",950:"#450a0a"},orange:{50:"#fff7ed",100:"#ffedd5",200:"#fed7aa",300:"#fdba74",400:"#fb923c",500:"#f97316",600:"#ea580c",700:"#c2410c",800:"#9a3412",900:"#7c2d12",950:"#431407"},amber:{50:"#fffbeb",100:"#fef3c7",200:"#fde68a",300:"#fcd34d",400:"#fbbf24",500:"#f59e0b",600:"#d97706",700:"#b45309",800:"#92400e",900:"#78350f",950:"#451a03"},yellow:{50:"#fefce8",100:"#fef9c3",200:"#fef08a",300:"#fde047",400:"#facc15",500:"#eab308",600:"#ca8a04",700:"#a16207",800:"#854d0e",900:"#713f12",950:"#422006"},lime:{50:"#f7fee7",100:"#ecfccb",200:"#d9f99d",300:"#bef264",400:"#a3e635",500:"#84cc16",600:"#65a30d",700:"#4d7c0f",800:"#3f6212",900:"#365314",950:"#1a2e05"},green:{50:"#f0fdf4",100:"#dcfce7",200:"#bbf7d0",300:"#86efac",400:"#4ade80",500:"#22c55e",600:"#16a34a",700:"#15803d",800:"#166534",900:"#14532d",950:"#052e16"},emerald:{50:"#ecfdf5",100:"#d1fae5",200:"#a7f3d0",300:"#6ee7b7",400:"#34d399",500:"#10b981",600:"#059669",700:"#047857",800:"#065f46",900:"#064e3b",950:"#022c22"},teal:{50:"#f0fdfa",100:"#ccfbf1",200:"#99f6e4",300:"#5eead4",400:"#2dd4bf",500:"#14b8a6",600:"#0d9488",700:"#0f766e",800:"#115e59",900:"#134e4a",950:"#042f2e"},cyan:{50:"#ecfeff",100:"#cffafe",200:"#a5f3fc",300:"#67e8f9",400:"#22d3ee",500:"#06b6d4",600:"#0891b2",700:"#0e7490",800:"#155e75",900:"#164e63",950:"#083344"},sky:{50:"#f0f9ff",100:"#e0f2fe",200:"#bae6fd",300:"#7dd3fc",400:"#38bdf8",500:"#0ea5e9",600:"#0284c7",700:"#0369a1",800:"#075985",900:"#0c4a6e",950:"#082f49"},blue:{50:"#eff6ff",100:"#dbeafe",200:"#bfdbfe",300:"#93c5fd",400:"#60a5fa",500:"#3b82f6",600:"#2563eb",700:"#1d4ed8",800:"#1e40af",900:"#1e3a8a",950:"#172554"},indigo:{50:"#eef2ff",100:"#e0e7ff",200:"#c7d2fe",300:"#a5b4fc",400:"#818cf8",500:"#6366f1",600:"#4f46e5",700:"#4338ca",800:"#3730a3",900:"#312e81",950:"#1e1b4b"},violet:{50:"#f5f3ff",100:"#ede9fe",200:"#ddd6fe",300:"#c4b5fd",400:"#a78bfa",500:"#8b5cf6",600:"#7c3aed",700:"#6d28d9",800:"#5b21b6",900:"#4c1d95",950:"#2e1065"},purple:{50:"#faf5ff",100:"#f3e8ff",200:"#e9d5ff",300:"#d8b4fe",400:"#c084fc",500:"#a855f7",600:"#9333ea",700:"#7e22ce",800:"#6b21a8",900:"#581c87",950:"#3b0764"},fuchsia:{50:"#fdf4ff",100:"#fae8ff",200:"#f5d0fe",300:"#f0abfc",400:"#e879f9",500:"#d946ef",600:"#c026d3",700:"#a21caf",800:"#86198f",900:"#701a75",950:"#4a044e"},pink:{50:"#fdf2f8",100:"#fce7f3",200:"#fbcfe8",300:"#f9a8d4",400:"#f472b6",500:"#ec4899",600:"#db2777",700:"#be185d",800:"#9d174d",900:"#831843",950:"#500724"},rose:{50:"#fff1f2",100:"#ffe4e6",200:"#fecdd3",300:"#fda4af",400:"#fb7185",500:"#f43f5e",600:"#e11d48",700:"#be123c",800:"#9f1239",900:"#881337",950:"#4c0519"},get lightBlue(){return qr({version:"v2.2",from:"lightBlue",to:"sky"}),this.sky},get warmGray(){return qr({version:"v3.0",from:"warmGray",to:"stone"}),this.stone},get trueGray(){return qr({version:"v3.0",from:"trueGray",to:"neutral"}),this.neutral},get coolGray(){return qr({version:"v3.0",from:"coolGray",to:"gray"}),this.gray},get blueGray(){return qr({version:"v3.0",from:"blueGray",to:"slate"}),this.slate}}});function Us(r,...e){for(let t of e){for(let i in t)r?.hasOwnProperty?.(i)||(r[i]=t[i]);for(let i of Object.getOwnPropertySymbols(t))r?.hasOwnProperty?.(i)||(r[i]=t[i])}return r}var Cf=R(()=>{u()});function kt(r){if(Array.isArray(r))return r;let e=r.split("[").length-1,t=r.split("]").length-1;if(e!==t)throw new Error(`Path is invalid. Has unbalanced brackets: ${r}`);return r.split(/\.(?![^\[]*\])|[\[\]]/g).filter(Boolean)}var Gi=R(()=>{u()});function we(r,e){return Qi.future.includes(e)?r.future==="all"||(r?.future?.[e]??_f[e]??!1):Qi.experimental.includes(e)?r.experimental==="all"||(r?.experimental?.[e]??_f[e]??!1):!1}function Ef(r){return r.experimental==="all"?Qi.experimental:Object.keys(r?.experimental??{}).filter(e=>Qi.experimental.includes(e)&&r.experimental[e])}function Of(r){if(m.env.JEST_WORKER_ID===void 0&&Ef(r).length>0){let e=Ef(r).map(t=>Qe.yellow(t)).join(", ");G.warn("experimental-flags-enabled",[`You have enabled experimental features: ${e}`,"Experimental features in Tailwind CSS are not covered by semver, may introduce breaking changes, and can change at any time."])}}var _f,Qi,ct=R(()=>{u();Wi();Be();_f={optimizeUniversalDefaults:!1,generalizedModifiers:!0,disableColorOpacityUtilitiesByDefault:!1,relativeContentPathsByDefault:!1},Qi={future:["hoverOnlyWhenSupported","respectDefaultRingColorOpacity","disableColorOpacityUtilitiesByDefault","relativeContentPathsByDefault"],experimental:["optimizeUniversalDefaults","generalizedModifiers"]}});function Tf(r){(()=>{if(r.purge||!r.content||!Array.isArray(r.content)&&!(typeof r.content=="object"&&r.content!==null))return!1;if(Array.isArray(r.content))return r.content.every(t=>typeof t=="string"?!0:!(typeof t?.raw!="string"||t?.extension&&typeof t?.extension!="string"));if(typeof r.content=="object"&&r.content!==null){if(Object.keys(r.content).some(t=>!["files","relative","extract","transform"].includes(t)))return!1;if(Array.isArray(r.content.files)){if(!r.content.files.every(t=>typeof t=="string"?!0:!(typeof t?.raw!="string"||t?.extension&&typeof t?.extension!="string")))return!1;if(typeof r.content.extract=="object"){for(let t of Object.values(r.content.extract))if(typeof t!="function")return!1}else if(!(r.content.extract===void 0||typeof r.content.extract=="function"))return!1;if(typeof r.content.transform=="object"){for(let t of Object.values(r.content.transform))if(typeof t!="function")return!1}else if(!(r.content.transform===void 0||typeof r.content.transform=="function"))return!1;if(typeof r.content.relative!="boolean"&&typeof r.content.relative!="undefined")return!1}return!0}return!1})()||G.warn("purge-deprecation",["The `purge`/`content` options have changed in Tailwind CSS v3.0.","Update your configuration file to eliminate this warning.","https://tailwindcss.com/docs/upgrade-guide#configure-content-sources"]),r.safelist=(()=>{let{content:t,purge:i,safelist:n}=r;return Array.isArray(n)?n:Array.isArray(t?.safelist)?t.safelist:Array.isArray(i?.safelist)?i.safelist:Array.isArray(i?.options?.safelist)?i.options.safelist:[]})(),r.blocklist=(()=>{let{blocklist:t}=r;if(Array.isArray(t)){if(t.every(i=>typeof i=="string"))return t;G.warn("blocklist-invalid",["The `blocklist` option must be an array of strings.","https://tailwindcss.com/docs/content-configuration#discarding-classes"])}return[]})(),typeof r.prefix=="function"?(G.warn("prefix-function",["As of Tailwind CSS v3.0, `prefix` cannot be a function.","Update `prefix` in your configuration to be a string to eliminate this warning.","https://tailwindcss.com/docs/upgrade-guide#prefix-cannot-be-a-function"]),r.prefix=""):r.prefix=r.prefix??"",r.content={relative:(()=>{let{content:t}=r;return t?.relative?t.relative:we(r,"relativeContentPathsByDefault")})(),files:(()=>{let{content:t,purge:i}=r;return Array.isArray(i)?i:Array.isArray(i?.content)?i.content:Array.isArray(t)?t:Array.isArray(t?.content)?t.content:Array.isArray(t?.files)?t.files:[]})(),extract:(()=>{let t=(()=>r.purge?.extract?r.purge.extract:r.content?.extract?r.content.extract:r.purge?.extract?.DEFAULT?r.purge.extract.DEFAULT:r.content?.extract?.DEFAULT?r.content.extract.DEFAULT:r.purge?.options?.extractors?r.purge.options.extractors:r.content?.options?.extractors?r.content.options.extractors:{})(),i={},n=(()=>{if(r.purge?.options?.defaultExtractor)return r.purge.options.defaultExtractor;if(r.content?.options?.defaultExtractor)return r.content.options.defaultExtractor})();if(n!==void 0&&(i.DEFAULT=n),typeof t=="function")i.DEFAULT=t;else if(Array.isArray(t))for(let{extensions:a,extractor:s}of t??[])for(let o of a)i[o]=s;else typeof t=="object"&&t!==null&&Object.assign(i,t);return i})(),transform:(()=>{let t=(()=>r.purge?.transform?r.purge.transform:r.content?.transform?r.content.transform:r.purge?.transform?.DEFAULT?r.purge.transform.DEFAULT:r.content?.transform?.DEFAULT?r.content.transform.DEFAULT:{})(),i={};return typeof t=="function"?i.DEFAULT=t:typeof t=="object"&&t!==null&&Object.assign(i,t),i})()};for(let t of r.content.files)if(typeof t=="string"&&/{([^,]*?)}/g.test(t)){G.warn("invalid-glob-braces",[`The glob pattern ${Fs(t)} in your Tailwind CSS configuration is invalid.`,`Update it to ${Fs(t.replace(/{([^,]*?)}/g,"$1"))} to silence this warning.`]);break}return r}var Rf=R(()=>{u();ct();Be()});function ke(r){if(Object.prototype.toString.call(r)!=="[object Object]")return!1;let e=Object.getPrototypeOf(r);return e===null||Object.getPrototypeOf(e)===null}var Kt=R(()=>{u()});function St(r){return Array.isArray(r)?r.map(e=>St(e)):typeof r=="object"&&r!==null?Object.fromEntries(Object.entries(r).map(([e,t])=>[e,St(t)])):r}var Yi=R(()=>{u()});function jt(r){return r.replace(/\\,/g,"\\2c ")}var Ki=R(()=>{u()});var Vs,Pf=R(()=>{u();Vs={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}});function $r(r,{loose:e=!1}={}){if(typeof r!="string")return null;if(r=r.trim(),r==="transparent")return{mode:"rgb",color:["0","0","0"],alpha:"0"};if(r in Vs)return{mode:"rgb",color:Vs[r].map(a=>a.toString())};let t=r.replace(Fv,(a,s,o,l,c)=>["#",s,s,o,o,l,l,c?c+c:""].join("")).match(Bv);if(t!==null)return{mode:"rgb",color:[parseInt(t[1],16),parseInt(t[2],16),parseInt(t[3],16)].map(a=>a.toString()),alpha:t[4]?(parseInt(t[4],16)/255).toString():void 0};let i=r.match(jv)??r.match(zv);if(i===null)return null;let n=[i[2],i[3],i[4]].filter(Boolean).map(a=>a.toString());return n.length===2&&n[0].startsWith("var(")?{mode:i[1],color:[n[0]],alpha:n[1]}:!e&&n.length!==3||n.length<3&&!n.some(a=>/^var\(.*?\)$/.test(a))?null:{mode:i[1],color:n,alpha:i[5]?.toString?.()}}function Hs({mode:r,color:e,alpha:t}){let i=t!==void 0;return r==="rgba"||r==="hsla"?`${r}(${e.join(", ")}${i?`, ${t}`:""})`:`${r}(${e.join(" ")}${i?` / ${t}`:""})`}var Bv,Fv,At,Xi,If,Ct,jv,zv,Ws=R(()=>{u();Pf();Bv=/^#([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})?$/i,Fv=/^#([a-f\d])([a-f\d])([a-f\d])([a-f\d])?$/i,At=/(?:\d+|\d*\.\d+)%?/,Xi=/(?:\s*,\s*|\s+)/,If=/\s*[,/]\s*/,Ct=/var\(--(?:[^ )]*?)(?:,(?:[^ )]*?|var\(--[^ )]*?\)))?\)/,jv=new RegExp(`^(rgba?)\\(\\s*(${At.source}|${Ct.source})(?:${Xi.source}(${At.source}|${Ct.source}))?(?:${Xi.source}(${At.source}|${Ct.source}))?(?:${If.source}(${At.source}|${Ct.source}))?\\s*\\)$`),zv=new RegExp(`^(hsla?)\\(\\s*((?:${At.source})(?:deg|rad|grad|turn)?|${Ct.source})(?:${Xi.source}(${At.source}|${Ct.source}))?(?:${Xi.source}(${At.source}|${Ct.source}))?(?:${If.source}(${At.source}|${Ct.source}))?\\s*\\)$`)});function Ze(r,e,t){if(typeof r=="function")return r({opacityValue:e});let i=$r(r,{loose:!0});return i===null?t:Hs({...i,alpha:e})}function Ae({color:r,property:e,variable:t}){let i=[].concat(e);if(typeof r=="function")return{[t]:"1",...Object.fromEntries(i.map(a=>[a,r({opacityVariable:t,opacityValue:`var(${t})`})]))};let n=$r(r);return n===null?Object.fromEntries(i.map(a=>[a,r])):n.alpha!==void 0?Object.fromEntries(i.map(a=>[a,r])):{[t]:"1",...Object.fromEntries(i.map(a=>[a,Hs({...n,alpha:`var(${t})`})]))}}var Lr=R(()=>{u();Ws()});function ve(r,e){let t=[],i=[],n=0,a=!1;for(let s=0;s{u()});function Ji(r){return ve(r,",").map(t=>{let i=t.trim(),n={raw:i},a=i.split(Vv),s=new Set;for(let o of a)Df.lastIndex=0,!s.has("KEYWORD")&&Uv.has(o)?(n.keyword=o,s.add("KEYWORD")):Df.test(o)?s.has("X")?s.has("Y")?s.has("BLUR")?s.has("SPREAD")||(n.spread=o,s.add("SPREAD")):(n.blur=o,s.add("BLUR")):(n.y=o,s.add("Y")):(n.x=o,s.add("X")):n.color?(n.unknown||(n.unknown=[]),n.unknown.push(o)):n.color=o;return n.valid=n.x!==void 0&&n.y!==void 0,n})}function qf(r){return r.map(e=>e.valid?[e.keyword,e.x,e.y,e.blur,e.spread,e.color].filter(Boolean).join(" "):e.raw).join(", ")}var Uv,Vv,Df,Gs=R(()=>{u();zt();Uv=new Set(["inset","inherit","initial","revert","unset"]),Vv=/\ +(?![^(]*\))/g,Df=/^-?(\d+|\.\d+)(.*?)$/g});function Qs(r){return Hv.some(e=>new RegExp(`^${e}\\(.*\\)`).test(r))}function K(r,e=null,t=!0){let i=e&&Wv.has(e.property);return r.startsWith("--")&&!i?`var(${r})`:r.includes("url(")?r.split(/(url\(.*?\))/g).filter(Boolean).map(n=>/^url\(.*?\)$/.test(n)?n:K(n,e,!1)).join(""):(r=r.replace(/([^\\])_+/g,(n,a)=>a+" ".repeat(n.length-1)).replace(/^_/g," ").replace(/\\_/g,"_"),t&&(r=r.trim()),r=Gv(r),r)}function Ye(r){return r.includes("=")&&(r=r.replace(/(=.*)/g,(e,t)=>{if(t[1]==="'"||t[1]==='"')return t;if(t.length>2){let i=t[t.length-1];if(t[t.length-2]===" "&&(i==="i"||i==="I"||i==="s"||i==="S"))return`="${t.slice(1,-2)}" ${t[t.length-1]}`}return`="${t.slice(1)}"`})),r}function Gv(r){let e=["theme"],t=["min-content","max-content","fit-content","safe-area-inset-top","safe-area-inset-right","safe-area-inset-bottom","safe-area-inset-left","titlebar-area-x","titlebar-area-y","titlebar-area-width","titlebar-area-height","keyboard-inset-top","keyboard-inset-right","keyboard-inset-bottom","keyboard-inset-left","keyboard-inset-width","keyboard-inset-height","radial-gradient","linear-gradient","conic-gradient","repeating-radial-gradient","repeating-linear-gradient","repeating-conic-gradient","anchor-size"];return r.replace(/(calc|min|max|clamp)\(.+\)/g,i=>{let n="";function a(){let s=n.trimEnd();return s[s.length-1]}for(let s=0;si[s+p]===d)},l=function(f){let d=1/0;for(let h of f){let b=i.indexOf(h,s);b!==-1&&bo(f))){let f=t.find(d=>o(d));n+=f,s+=f.length-1}else e.some(f=>o(f))?n+=l([")"]):o("[")?n+=l(["]"]):["+","-","*","/"].includes(c)&&!["(","+","-","*","/",","].includes(a())?n+=` ${c} `:n+=c}return n.replace(/\s+/g," ")})}function Ys(r){return r.startsWith("url(")}function Ks(r){return!isNaN(Number(r))||Qs(r)}function Mr(r){return r.endsWith("%")&&Ks(r.slice(0,-1))||Qs(r)}function Nr(r){return r==="0"||new RegExp(`^[+-]?[0-9]*.?[0-9]+(?:[eE][+-]?[0-9]+)?${Yv}$`).test(r)||Qs(r)}function $f(r){return Kv.has(r)}function Lf(r){let e=Ji(K(r));for(let t of e)if(!t.valid)return!1;return!0}function Mf(r){let e=0;return ve(r,"_").every(i=>(i=K(i),i.startsWith("var(")?!0:$r(i,{loose:!0})!==null?(e++,!0):!1))?e>0:!1}function Nf(r){let e=0;return ve(r,",").every(i=>(i=K(i),i.startsWith("var(")?!0:Ys(i)||Jv(i)||["element(","image(","cross-fade(","image-set("].some(n=>i.startsWith(n))?(e++,!0):!1))?e>0:!1}function Jv(r){r=K(r);for(let e of Xv)if(r.startsWith(`${e}(`))return!0;return!1}function Bf(r){let e=0;return ve(r,"_").every(i=>(i=K(i),i.startsWith("var(")?!0:Zv.has(i)||Nr(i)||Mr(i)?(e++,!0):!1))?e>0:!1}function Ff(r){let e=0;return ve(r,",").every(i=>(i=K(i),i.startsWith("var(")?!0:i.includes(" ")&&!/(['"])([^"']+)\1/g.test(i)||/^\d/g.test(i)?!1:(e++,!0)))?e>0:!1}function jf(r){return ex.has(r)}function zf(r){return tx.has(r)}function Uf(r){return rx.has(r)}var Hv,Wv,Qv,Yv,Kv,Xv,Zv,ex,tx,rx,Br=R(()=>{u();Ws();Gs();zt();Hv=["min","max","clamp","calc"];Wv=new Set(["scroll-timeline-name","timeline-scope","view-timeline-name","font-palette","anchor-name","anchor-scope","position-anchor","position-try-options","scroll-timeline","animation-timeline","view-timeline","position-try"]);Qv=["cm","mm","Q","in","pc","pt","px","em","ex","ch","rem","lh","rlh","vw","vh","vmin","vmax","vb","vi","svw","svh","lvw","lvh","dvw","dvh","cqw","cqh","cqi","cqb","cqmin","cqmax"],Yv=`(?:${Qv.join("|")})`;Kv=new Set(["thin","medium","thick"]);Xv=new Set(["conic-gradient","linear-gradient","radial-gradient","repeating-conic-gradient","repeating-linear-gradient","repeating-radial-gradient"]);Zv=new Set(["center","top","right","bottom","left"]);ex=new Set(["serif","sans-serif","monospace","cursive","fantasy","system-ui","ui-serif","ui-sans-serif","ui-monospace","ui-rounded","math","emoji","fangsong"]);tx=new Set(["xx-small","x-small","small","medium","large","x-large","xx-large","xxx-large"]);rx=new Set(["larger","smaller"])});function Vf(r){let e=["cover","contain"];return ve(r,",").every(t=>{let i=ve(t,"_").filter(Boolean);return i.length===1&&e.includes(i[0])?!0:i.length!==1&&i.length!==2?!1:i.every(n=>Nr(n)||Mr(n)||n==="auto")})}var Hf=R(()=>{u();Br();zt()});function Wf(r,e){r.walkClasses(t=>{t.value=e(t.value),t.raws&&t.raws.value&&(t.raws.value=jt(t.raws.value))})}function Gf(r,e){if(!_t(r))return;let t=r.slice(1,-1);if(!!e(t))return K(t)}function ix(r,e={},t){let i=e[r];if(i!==void 0)return xt(i);if(_t(r)){let n=Gf(r,t);return n===void 0?void 0:xt(n)}}function Zi(r,e={},{validate:t=()=>!0}={}){let i=e.values?.[r];return i!==void 0?i:e.supportsNegativeValues&&r.startsWith("-")?ix(r.slice(1),e.values,t):Gf(r,t)}function _t(r){return r.startsWith("[")&&r.endsWith("]")}function Qf(r){let e=r.lastIndexOf("/"),t=r.lastIndexOf("[",e),i=r.indexOf("]",e);return r[e-1]==="]"||r[e+1]==="["||t!==-1&&i!==-1&&t")){let e=r;return({opacityValue:t=1})=>e.replace(//g,t)}return r}function Yf(r){return K(r.slice(1,-1))}function nx(r,e={},{tailwindConfig:t={}}={}){if(e.values?.[r]!==void 0)return Xt(e.values?.[r]);let[i,n]=Qf(r);if(n!==void 0){let a=e.values?.[i]??(_t(i)?i.slice(1,-1):void 0);return a===void 0?void 0:(a=Xt(a),_t(n)?Ze(a,Yf(n)):t.theme?.opacity?.[n]===void 0?void 0:Ze(a,t.theme.opacity[n]))}return Zi(r,e,{validate:Mf})}function sx(r,e={}){return e.values?.[r]}function qe(r){return(e,t)=>Zi(e,t,{validate:r})}function ax(r,e){let t=r.indexOf(e);return t===-1?[void 0,r]:[r.slice(0,t),r.slice(t+1)]}function Js(r,e,t,i){if(t.values&&e in t.values)for(let{type:a}of r??[]){let s=Xs[a](e,t,{tailwindConfig:i});if(s!==void 0)return[s,a,null]}if(_t(e)){let a=e.slice(1,-1),[s,o]=ax(a,":");if(!/^[\w-_]+$/g.test(s))o=a;else if(s!==void 0&&!Kf.includes(s))return[];if(o.length>0&&Kf.includes(s))return[Zi(`[${o}]`,t),s,null]}let n=Zs(r,e,t,i);for(let a of n)return a;return[]}function*Zs(r,e,t,i){let n=we(i,"generalizedModifiers"),[a,s]=Qf(e);if(n&&t.modifiers!=null&&(t.modifiers==="any"||typeof t.modifiers=="object"&&(s&&_t(s)||s in t.modifiers))||(a=e,s=void 0),s!==void 0&&a===""&&(a="DEFAULT"),s!==void 0&&typeof t.modifiers=="object"){let l=t.modifiers?.[s]??null;l!==null?s=l:_t(s)&&(s=Yf(s))}for(let{type:l}of r??[]){let c=Xs[l](a,t,{tailwindConfig:i});c!==void 0&&(yield[c,l,s??null])}}var Xs,Kf,Fr=R(()=>{u();Ki();Lr();Br();Hi();Hf();ct();Xs={any:Zi,color:nx,url:qe(Ys),image:qe(Nf),length:qe(Nr),percentage:qe(Mr),position:qe(Bf),lookup:sx,"generic-name":qe(jf),"family-name":qe(Ff),number:qe(Ks),"line-width":qe($f),"absolute-size":qe(zf),"relative-size":qe(Uf),shadow:qe(Lf),size:qe(Vf)},Kf=Object.keys(Xs)});function X(r){return typeof r=="function"?r({}):r}var ea=R(()=>{u()});function Jt(r){return typeof r=="function"}function jr(r,...e){let t=e.pop();for(let i of e)for(let n in i){let a=t(r[n],i[n]);a===void 0?ke(r[n])&&ke(i[n])?r[n]=jr({},r[n],i[n],t):r[n]=i[n]:r[n]=a}return r}function ox(r,...e){return Jt(r)?r(...e):r}function lx(r){return r.reduce((e,{extend:t})=>jr(e,t,(i,n)=>i===void 0?[n]:Array.isArray(i)?[n,...i]:[n,i]),{})}function ux(r){return{...r.reduce((e,t)=>Us(e,t),{}),extend:lx(r)}}function Xf(r,e){if(Array.isArray(r)&&ke(r[0]))return r.concat(e);if(Array.isArray(e)&&ke(e[0])&&ke(r))return[r,...e];if(Array.isArray(e))return e}function fx({extend:r,...e}){return jr(e,r,(t,i)=>!Jt(t)&&!i.some(Jt)?jr({},t,...i,Xf):(n,a)=>jr({},...[t,...i].map(s=>ox(s,n,a)),Xf))}function*cx(r){let e=kt(r);if(e.length===0||(yield e,Array.isArray(r)))return;let t=/^(.*?)\s*\/\s*([^/]+)$/,i=r.match(t);if(i!==null){let[,n,a]=i,s=kt(n);s.alpha=a,yield s}}function px(r){let e=(t,i)=>{for(let n of cx(t)){let a=0,s=r;for(;s!=null&&a(t[i]=Jt(r[i])?r[i](e,ta):r[i],t),{})}function Jf(r){let e=[];return r.forEach(t=>{e=[...e,t];let i=t?.plugins??[];i.length!==0&&i.forEach(n=>{n.__isOptionsFunction&&(n=n()),e=[...e,...Jf([n?.config??{}])]})}),e}function dx(r){return[...r].reduceRight((t,i)=>Jt(i)?i({corePlugins:t}):vf(i,t),bf)}function hx(r){return[...r].reduceRight((t,i)=>[...t,...i],[])}function ra(r){let e=[...Jf(r),{prefix:"",important:!1,separator:":"}];return Tf(Us({theme:px(fx(ux(e.map(t=>t?.theme??{})))),corePlugins:dx(e.map(t=>t.corePlugins)),plugins:hx(r.map(t=>t?.plugins??[]))},...e))}var ta,Zf=R(()=>{u();Hi();wf();xf();zs();Cf();Gi();Rf();Kt();Yi();Fr();Lr();ea();ta={colors:js,negative(r){return Object.keys(r).filter(e=>r[e]!=="0").reduce((e,t)=>{let i=xt(r[t]);return i!==void 0&&(e[`-${t}`]=i),e},{})},breakpoints(r){return Object.keys(r).filter(e=>typeof r[e]=="string").reduce((e,t)=>({...e,[`screen-${t}`]:r[t]}),{})}}});var en=x((l3,ec)=>{u();ec.exports={content:[],presets:[],darkMode:"media",theme:{accentColor:({theme:r})=>({...r("colors"),auto:"auto"}),animation:{none:"none",spin:"spin 1s linear infinite",ping:"ping 1s cubic-bezier(0, 0, 0.2, 1) infinite",pulse:"pulse 2s cubic-bezier(0.4, 0, 0.6, 1) infinite",bounce:"bounce 1s infinite"},aria:{busy:'busy="true"',checked:'checked="true"',disabled:'disabled="true"',expanded:'expanded="true"',hidden:'hidden="true"',pressed:'pressed="true"',readonly:'readonly="true"',required:'required="true"',selected:'selected="true"'},aspectRatio:{auto:"auto",square:"1 / 1",video:"16 / 9"},backdropBlur:({theme:r})=>r("blur"),backdropBrightness:({theme:r})=>r("brightness"),backdropContrast:({theme:r})=>r("contrast"),backdropGrayscale:({theme:r})=>r("grayscale"),backdropHueRotate:({theme:r})=>r("hueRotate"),backdropInvert:({theme:r})=>r("invert"),backdropOpacity:({theme:r})=>r("opacity"),backdropSaturate:({theme:r})=>r("saturate"),backdropSepia:({theme:r})=>r("sepia"),backgroundColor:({theme:r})=>r("colors"),backgroundImage:{none:"none","gradient-to-t":"linear-gradient(to top, var(--tw-gradient-stops))","gradient-to-tr":"linear-gradient(to top right, var(--tw-gradient-stops))","gradient-to-r":"linear-gradient(to right, var(--tw-gradient-stops))","gradient-to-br":"linear-gradient(to bottom right, var(--tw-gradient-stops))","gradient-to-b":"linear-gradient(to bottom, var(--tw-gradient-stops))","gradient-to-bl":"linear-gradient(to bottom left, var(--tw-gradient-stops))","gradient-to-l":"linear-gradient(to left, var(--tw-gradient-stops))","gradient-to-tl":"linear-gradient(to top left, var(--tw-gradient-stops))"},backgroundOpacity:({theme:r})=>r("opacity"),backgroundPosition:{bottom:"bottom",center:"center",left:"left","left-bottom":"left bottom","left-top":"left top",right:"right","right-bottom":"right bottom","right-top":"right top",top:"top"},backgroundSize:{auto:"auto",cover:"cover",contain:"contain"},blur:{0:"0",none:"",sm:"4px",DEFAULT:"8px",md:"12px",lg:"16px",xl:"24px","2xl":"40px","3xl":"64px"},borderColor:({theme:r})=>({...r("colors"),DEFAULT:r("colors.gray.200","currentColor")}),borderOpacity:({theme:r})=>r("opacity"),borderRadius:{none:"0px",sm:"0.125rem",DEFAULT:"0.25rem",md:"0.375rem",lg:"0.5rem",xl:"0.75rem","2xl":"1rem","3xl":"1.5rem",full:"9999px"},borderSpacing:({theme:r})=>({...r("spacing")}),borderWidth:{DEFAULT:"1px",0:"0px",2:"2px",4:"4px",8:"8px"},boxShadow:{sm:"0 1px 2px 0 rgb(0 0 0 / 0.05)",DEFAULT:"0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)",md:"0 4px 6px -1px rgb(0 0 0 / 0.1), 0 2px 4px -2px rgb(0 0 0 / 0.1)",lg:"0 10px 15px -3px rgb(0 0 0 / 0.1), 0 4px 6px -4px rgb(0 0 0 / 0.1)",xl:"0 20px 25px -5px rgb(0 0 0 / 0.1), 0 8px 10px -6px rgb(0 0 0 / 0.1)","2xl":"0 25px 50px -12px rgb(0 0 0 / 0.25)",inner:"inset 0 2px 4px 0 rgb(0 0 0 / 0.05)",none:"none"},boxShadowColor:({theme:r})=>r("colors"),brightness:{0:"0",50:".5",75:".75",90:".9",95:".95",100:"1",105:"1.05",110:"1.1",125:"1.25",150:"1.5",200:"2"},caretColor:({theme:r})=>r("colors"),colors:({colors:r})=>({inherit:r.inherit,current:r.current,transparent:r.transparent,black:r.black,white:r.white,slate:r.slate,gray:r.gray,zinc:r.zinc,neutral:r.neutral,stone:r.stone,red:r.red,orange:r.orange,amber:r.amber,yellow:r.yellow,lime:r.lime,green:r.green,emerald:r.emerald,teal:r.teal,cyan:r.cyan,sky:r.sky,blue:r.blue,indigo:r.indigo,violet:r.violet,purple:r.purple,fuchsia:r.fuchsia,pink:r.pink,rose:r.rose}),columns:{auto:"auto",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12","3xs":"16rem","2xs":"18rem",xs:"20rem",sm:"24rem",md:"28rem",lg:"32rem",xl:"36rem","2xl":"42rem","3xl":"48rem","4xl":"56rem","5xl":"64rem","6xl":"72rem","7xl":"80rem"},container:{},content:{none:"none"},contrast:{0:"0",50:".5",75:".75",100:"1",125:"1.25",150:"1.5",200:"2"},cursor:{auto:"auto",default:"default",pointer:"pointer",wait:"wait",text:"text",move:"move",help:"help","not-allowed":"not-allowed",none:"none","context-menu":"context-menu",progress:"progress",cell:"cell",crosshair:"crosshair","vertical-text":"vertical-text",alias:"alias",copy:"copy","no-drop":"no-drop",grab:"grab",grabbing:"grabbing","all-scroll":"all-scroll","col-resize":"col-resize","row-resize":"row-resize","n-resize":"n-resize","e-resize":"e-resize","s-resize":"s-resize","w-resize":"w-resize","ne-resize":"ne-resize","nw-resize":"nw-resize","se-resize":"se-resize","sw-resize":"sw-resize","ew-resize":"ew-resize","ns-resize":"ns-resize","nesw-resize":"nesw-resize","nwse-resize":"nwse-resize","zoom-in":"zoom-in","zoom-out":"zoom-out"},divideColor:({theme:r})=>r("borderColor"),divideOpacity:({theme:r})=>r("borderOpacity"),divideWidth:({theme:r})=>r("borderWidth"),dropShadow:{sm:"0 1px 1px rgb(0 0 0 / 0.05)",DEFAULT:["0 1px 2px rgb(0 0 0 / 0.1)","0 1px 1px rgb(0 0 0 / 0.06)"],md:["0 4px 3px rgb(0 0 0 / 0.07)","0 2px 2px rgb(0 0 0 / 0.06)"],lg:["0 10px 8px rgb(0 0 0 / 0.04)","0 4px 3px rgb(0 0 0 / 0.1)"],xl:["0 20px 13px rgb(0 0 0 / 0.03)","0 8px 5px rgb(0 0 0 / 0.08)"],"2xl":"0 25px 25px rgb(0 0 0 / 0.15)",none:"0 0 #0000"},fill:({theme:r})=>({none:"none",...r("colors")}),flex:{1:"1 1 0%",auto:"1 1 auto",initial:"0 1 auto",none:"none"},flexBasis:({theme:r})=>({auto:"auto",...r("spacing"),"1/2":"50%","1/3":"33.333333%","2/3":"66.666667%","1/4":"25%","2/4":"50%","3/4":"75%","1/5":"20%","2/5":"40%","3/5":"60%","4/5":"80%","1/6":"16.666667%","2/6":"33.333333%","3/6":"50%","4/6":"66.666667%","5/6":"83.333333%","1/12":"8.333333%","2/12":"16.666667%","3/12":"25%","4/12":"33.333333%","5/12":"41.666667%","6/12":"50%","7/12":"58.333333%","8/12":"66.666667%","9/12":"75%","10/12":"83.333333%","11/12":"91.666667%",full:"100%"}),flexGrow:{0:"0",DEFAULT:"1"},flexShrink:{0:"0",DEFAULT:"1"},fontFamily:{sans:["ui-sans-serif","system-ui","sans-serif",'"Apple Color Emoji"','"Segoe UI Emoji"','"Segoe UI Symbol"','"Noto Color Emoji"'],serif:["ui-serif","Georgia","Cambria",'"Times New Roman"',"Times","serif"],mono:["ui-monospace","SFMono-Regular","Menlo","Monaco","Consolas",'"Liberation Mono"','"Courier New"',"monospace"]},fontSize:{xs:["0.75rem",{lineHeight:"1rem"}],sm:["0.875rem",{lineHeight:"1.25rem"}],base:["1rem",{lineHeight:"1.5rem"}],lg:["1.125rem",{lineHeight:"1.75rem"}],xl:["1.25rem",{lineHeight:"1.75rem"}],"2xl":["1.5rem",{lineHeight:"2rem"}],"3xl":["1.875rem",{lineHeight:"2.25rem"}],"4xl":["2.25rem",{lineHeight:"2.5rem"}],"5xl":["3rem",{lineHeight:"1"}],"6xl":["3.75rem",{lineHeight:"1"}],"7xl":["4.5rem",{lineHeight:"1"}],"8xl":["6rem",{lineHeight:"1"}],"9xl":["8rem",{lineHeight:"1"}]},fontWeight:{thin:"100",extralight:"200",light:"300",normal:"400",medium:"500",semibold:"600",bold:"700",extrabold:"800",black:"900"},gap:({theme:r})=>r("spacing"),gradientColorStops:({theme:r})=>r("colors"),gradientColorStopPositions:{"0%":"0%","5%":"5%","10%":"10%","15%":"15%","20%":"20%","25%":"25%","30%":"30%","35%":"35%","40%":"40%","45%":"45%","50%":"50%","55%":"55%","60%":"60%","65%":"65%","70%":"70%","75%":"75%","80%":"80%","85%":"85%","90%":"90%","95%":"95%","100%":"100%"},grayscale:{0:"0",DEFAULT:"100%"},gridAutoColumns:{auto:"auto",min:"min-content",max:"max-content",fr:"minmax(0, 1fr)"},gridAutoRows:{auto:"auto",min:"min-content",max:"max-content",fr:"minmax(0, 1fr)"},gridColumn:{auto:"auto","span-1":"span 1 / span 1","span-2":"span 2 / span 2","span-3":"span 3 / span 3","span-4":"span 4 / span 4","span-5":"span 5 / span 5","span-6":"span 6 / span 6","span-7":"span 7 / span 7","span-8":"span 8 / span 8","span-9":"span 9 / span 9","span-10":"span 10 / span 10","span-11":"span 11 / span 11","span-12":"span 12 / span 12","span-full":"1 / -1"},gridColumnEnd:{auto:"auto",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12",13:"13"},gridColumnStart:{auto:"auto",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12",13:"13"},gridRow:{auto:"auto","span-1":"span 1 / span 1","span-2":"span 2 / span 2","span-3":"span 3 / span 3","span-4":"span 4 / span 4","span-5":"span 5 / span 5","span-6":"span 6 / span 6","span-7":"span 7 / span 7","span-8":"span 8 / span 8","span-9":"span 9 / span 9","span-10":"span 10 / span 10","span-11":"span 11 / span 11","span-12":"span 12 / span 12","span-full":"1 / -1"},gridRowEnd:{auto:"auto",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12",13:"13"},gridRowStart:{auto:"auto",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12",13:"13"},gridTemplateColumns:{none:"none",subgrid:"subgrid",1:"repeat(1, minmax(0, 1fr))",2:"repeat(2, minmax(0, 1fr))",3:"repeat(3, minmax(0, 1fr))",4:"repeat(4, minmax(0, 1fr))",5:"repeat(5, minmax(0, 1fr))",6:"repeat(6, minmax(0, 1fr))",7:"repeat(7, minmax(0, 1fr))",8:"repeat(8, minmax(0, 1fr))",9:"repeat(9, minmax(0, 1fr))",10:"repeat(10, minmax(0, 1fr))",11:"repeat(11, minmax(0, 1fr))",12:"repeat(12, minmax(0, 1fr))"},gridTemplateRows:{none:"none",subgrid:"subgrid",1:"repeat(1, minmax(0, 1fr))",2:"repeat(2, minmax(0, 1fr))",3:"repeat(3, minmax(0, 1fr))",4:"repeat(4, minmax(0, 1fr))",5:"repeat(5, minmax(0, 1fr))",6:"repeat(6, minmax(0, 1fr))",7:"repeat(7, minmax(0, 1fr))",8:"repeat(8, minmax(0, 1fr))",9:"repeat(9, minmax(0, 1fr))",10:"repeat(10, minmax(0, 1fr))",11:"repeat(11, minmax(0, 1fr))",12:"repeat(12, minmax(0, 1fr))"},height:({theme:r})=>({auto:"auto",...r("spacing"),"1/2":"50%","1/3":"33.333333%","2/3":"66.666667%","1/4":"25%","2/4":"50%","3/4":"75%","1/5":"20%","2/5":"40%","3/5":"60%","4/5":"80%","1/6":"16.666667%","2/6":"33.333333%","3/6":"50%","4/6":"66.666667%","5/6":"83.333333%",full:"100%",screen:"100vh",svh:"100svh",lvh:"100lvh",dvh:"100dvh",min:"min-content",max:"max-content",fit:"fit-content"}),hueRotate:{0:"0deg",15:"15deg",30:"30deg",60:"60deg",90:"90deg",180:"180deg"},inset:({theme:r})=>({auto:"auto",...r("spacing"),"1/2":"50%","1/3":"33.333333%","2/3":"66.666667%","1/4":"25%","2/4":"50%","3/4":"75%",full:"100%"}),invert:{0:"0",DEFAULT:"100%"},keyframes:{spin:{to:{transform:"rotate(360deg)"}},ping:{"75%, 100%":{transform:"scale(2)",opacity:"0"}},pulse:{"50%":{opacity:".5"}},bounce:{"0%, 100%":{transform:"translateY(-25%)",animationTimingFunction:"cubic-bezier(0.8,0,1,1)"},"50%":{transform:"none",animationTimingFunction:"cubic-bezier(0,0,0.2,1)"}}},letterSpacing:{tighter:"-0.05em",tight:"-0.025em",normal:"0em",wide:"0.025em",wider:"0.05em",widest:"0.1em"},lineHeight:{none:"1",tight:"1.25",snug:"1.375",normal:"1.5",relaxed:"1.625",loose:"2",3:".75rem",4:"1rem",5:"1.25rem",6:"1.5rem",7:"1.75rem",8:"2rem",9:"2.25rem",10:"2.5rem"},listStyleType:{none:"none",disc:"disc",decimal:"decimal"},listStyleImage:{none:"none"},margin:({theme:r})=>({auto:"auto",...r("spacing")}),lineClamp:{1:"1",2:"2",3:"3",4:"4",5:"5",6:"6"},maxHeight:({theme:r})=>({...r("spacing"),none:"none",full:"100%",screen:"100vh",svh:"100svh",lvh:"100lvh",dvh:"100dvh",min:"min-content",max:"max-content",fit:"fit-content"}),maxWidth:({theme:r,breakpoints:e})=>({...r("spacing"),none:"none",xs:"20rem",sm:"24rem",md:"28rem",lg:"32rem",xl:"36rem","2xl":"42rem","3xl":"48rem","4xl":"56rem","5xl":"64rem","6xl":"72rem","7xl":"80rem",full:"100%",min:"min-content",max:"max-content",fit:"fit-content",prose:"65ch",...e(r("screens"))}),minHeight:({theme:r})=>({...r("spacing"),full:"100%",screen:"100vh",svh:"100svh",lvh:"100lvh",dvh:"100dvh",min:"min-content",max:"max-content",fit:"fit-content"}),minWidth:({theme:r})=>({...r("spacing"),full:"100%",min:"min-content",max:"max-content",fit:"fit-content"}),objectPosition:{bottom:"bottom",center:"center",left:"left","left-bottom":"left bottom","left-top":"left top",right:"right","right-bottom":"right bottom","right-top":"right top",top:"top"},opacity:{0:"0",5:"0.05",10:"0.1",15:"0.15",20:"0.2",25:"0.25",30:"0.3",35:"0.35",40:"0.4",45:"0.45",50:"0.5",55:"0.55",60:"0.6",65:"0.65",70:"0.7",75:"0.75",80:"0.8",85:"0.85",90:"0.9",95:"0.95",100:"1"},order:{first:"-9999",last:"9999",none:"0",1:"1",2:"2",3:"3",4:"4",5:"5",6:"6",7:"7",8:"8",9:"9",10:"10",11:"11",12:"12"},outlineColor:({theme:r})=>r("colors"),outlineOffset:{0:"0px",1:"1px",2:"2px",4:"4px",8:"8px"},outlineWidth:{0:"0px",1:"1px",2:"2px",4:"4px",8:"8px"},padding:({theme:r})=>r("spacing"),placeholderColor:({theme:r})=>r("colors"),placeholderOpacity:({theme:r})=>r("opacity"),ringColor:({theme:r})=>({DEFAULT:r("colors.blue.500","#3b82f6"),...r("colors")}),ringOffsetColor:({theme:r})=>r("colors"),ringOffsetWidth:{0:"0px",1:"1px",2:"2px",4:"4px",8:"8px"},ringOpacity:({theme:r})=>({DEFAULT:"0.5",...r("opacity")}),ringWidth:{DEFAULT:"3px",0:"0px",1:"1px",2:"2px",4:"4px",8:"8px"},rotate:{0:"0deg",1:"1deg",2:"2deg",3:"3deg",6:"6deg",12:"12deg",45:"45deg",90:"90deg",180:"180deg"},saturate:{0:"0",50:".5",100:"1",150:"1.5",200:"2"},scale:{0:"0",50:".5",75:".75",90:".9",95:".95",100:"1",105:"1.05",110:"1.1",125:"1.25",150:"1.5"},screens:{sm:"640px",md:"768px",lg:"1024px",xl:"1280px","2xl":"1536px"},scrollMargin:({theme:r})=>({...r("spacing")}),scrollPadding:({theme:r})=>r("spacing"),sepia:{0:"0",DEFAULT:"100%"},skew:{0:"0deg",1:"1deg",2:"2deg",3:"3deg",6:"6deg",12:"12deg"},space:({theme:r})=>({...r("spacing")}),spacing:{px:"1px",0:"0px",.5:"0.125rem",1:"0.25rem",1.5:"0.375rem",2:"0.5rem",2.5:"0.625rem",3:"0.75rem",3.5:"0.875rem",4:"1rem",5:"1.25rem",6:"1.5rem",7:"1.75rem",8:"2rem",9:"2.25rem",10:"2.5rem",11:"2.75rem",12:"3rem",14:"3.5rem",16:"4rem",20:"5rem",24:"6rem",28:"7rem",32:"8rem",36:"9rem",40:"10rem",44:"11rem",48:"12rem",52:"13rem",56:"14rem",60:"15rem",64:"16rem",72:"18rem",80:"20rem",96:"24rem"},stroke:({theme:r})=>({none:"none",...r("colors")}),strokeWidth:{0:"0",1:"1",2:"2"},supports:{},data:{},textColor:({theme:r})=>r("colors"),textDecorationColor:({theme:r})=>r("colors"),textDecorationThickness:{auto:"auto","from-font":"from-font",0:"0px",1:"1px",2:"2px",4:"4px",8:"8px"},textIndent:({theme:r})=>({...r("spacing")}),textOpacity:({theme:r})=>r("opacity"),textUnderlineOffset:{auto:"auto",0:"0px",1:"1px",2:"2px",4:"4px",8:"8px"},transformOrigin:{center:"center",top:"top","top-right":"top right",right:"right","bottom-right":"bottom right",bottom:"bottom","bottom-left":"bottom left",left:"left","top-left":"top left"},transitionDelay:{0:"0s",75:"75ms",100:"100ms",150:"150ms",200:"200ms",300:"300ms",500:"500ms",700:"700ms",1e3:"1000ms"},transitionDuration:{DEFAULT:"150ms",0:"0s",75:"75ms",100:"100ms",150:"150ms",200:"200ms",300:"300ms",500:"500ms",700:"700ms",1e3:"1000ms"},transitionProperty:{none:"none",all:"all",DEFAULT:"color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter",colors:"color, background-color, border-color, text-decoration-color, fill, stroke",opacity:"opacity",shadow:"box-shadow",transform:"transform"},transitionTimingFunction:{DEFAULT:"cubic-bezier(0.4, 0, 0.2, 1)",linear:"linear",in:"cubic-bezier(0.4, 0, 1, 1)",out:"cubic-bezier(0, 0, 0.2, 1)","in-out":"cubic-bezier(0.4, 0, 0.2, 1)"},translate:({theme:r})=>({...r("spacing"),"1/2":"50%","1/3":"33.333333%","2/3":"66.666667%","1/4":"25%","2/4":"50%","3/4":"75%",full:"100%"}),size:({theme:r})=>({auto:"auto",...r("spacing"),"1/2":"50%","1/3":"33.333333%","2/3":"66.666667%","1/4":"25%","2/4":"50%","3/4":"75%","1/5":"20%","2/5":"40%","3/5":"60%","4/5":"80%","1/6":"16.666667%","2/6":"33.333333%","3/6":"50%","4/6":"66.666667%","5/6":"83.333333%","1/12":"8.333333%","2/12":"16.666667%","3/12":"25%","4/12":"33.333333%","5/12":"41.666667%","6/12":"50%","7/12":"58.333333%","8/12":"66.666667%","9/12":"75%","10/12":"83.333333%","11/12":"91.666667%",full:"100%",min:"min-content",max:"max-content",fit:"fit-content"}),width:({theme:r})=>({auto:"auto",...r("spacing"),"1/2":"50%","1/3":"33.333333%","2/3":"66.666667%","1/4":"25%","2/4":"50%","3/4":"75%","1/5":"20%","2/5":"40%","3/5":"60%","4/5":"80%","1/6":"16.666667%","2/6":"33.333333%","3/6":"50%","4/6":"66.666667%","5/6":"83.333333%","1/12":"8.333333%","2/12":"16.666667%","3/12":"25%","4/12":"33.333333%","5/12":"41.666667%","6/12":"50%","7/12":"58.333333%","8/12":"66.666667%","9/12":"75%","10/12":"83.333333%","11/12":"91.666667%",full:"100%",screen:"100vw",svw:"100svw",lvw:"100lvw",dvw:"100dvw",min:"min-content",max:"max-content",fit:"fit-content"}),willChange:{auto:"auto",scroll:"scroll-position",contents:"contents",transform:"transform"},zIndex:{auto:"auto",0:"0",10:"10",20:"20",30:"30",40:"40",50:"50"}},plugins:[]}});function tn(r){let e=(r?.presets??[tc.default]).slice().reverse().flatMap(n=>tn(n instanceof Function?n():n)),t={respectDefaultRingColorOpacity:{theme:{ringColor:({theme:n})=>({DEFAULT:"#3b82f67f",...n("colors")})}},disableColorOpacityUtilitiesByDefault:{corePlugins:{backgroundOpacity:!1,borderOpacity:!1,divideOpacity:!1,placeholderOpacity:!1,ringOpacity:!1,textOpacity:!1}}},i=Object.keys(t).filter(n=>we(r,n)).map(n=>t[n]);return[r,...i,...e]}var tc,rc=R(()=>{u();tc=pe(en());ct()});var ic={};Ge(ic,{default:()=>zr});function zr(...r){let[,...e]=tn(r[0]);return ra([...r,...e])}var ia=R(()=>{u();Zf();rc()});var Ur={};Ge(Ur,{default:()=>me});var me,et=R(()=>{u();me={resolve:r=>r,extname:r=>"."+r.split(".").pop()}});function rn(r){return typeof r=="object"&&r!==null}function gx(r){return Object.keys(r).length===0}function nc(r){return typeof r=="string"||r instanceof String}function na(r){return rn(r)&&r.config===void 0&&!gx(r)?null:rn(r)&&r.config!==void 0&&nc(r.config)?me.resolve(r.config):rn(r)&&r.config!==void 0&&rn(r.config)?null:nc(r)?me.resolve(r):yx()}function yx(){for(let r of mx)try{let e=me.resolve(r);return be.accessSync(e),e}catch(e){}return null}var mx,sc=R(()=>{u();ft();et();mx=["./tailwind.config.js","./tailwind.config.cjs","./tailwind.config.mjs","./tailwind.config.ts","./tailwind.config.cts","./tailwind.config.mts"]});var ac={};Ge(ac,{default:()=>sa});var sa,aa=R(()=>{u();sa={parse:r=>({href:r})}});var oa=x(()=>{u()});var nn=x((b3,uc)=>{u();"use strict";var oc=(Wi(),kf),lc=oa(),Zt=class extends Error{constructor(e,t,i,n,a,s){super(e);this.name="CssSyntaxError",this.reason=e,a&&(this.file=a),n&&(this.source=n),s&&(this.plugin=s),typeof t!="undefined"&&typeof i!="undefined"&&(typeof t=="number"?(this.line=t,this.column=i):(this.line=t.line,this.column=t.column,this.endLine=i.line,this.endColumn=i.column)),this.setMessage(),Error.captureStackTrace&&Error.captureStackTrace(this,Zt)}setMessage(){this.message=this.plugin?this.plugin+": ":"",this.message+=this.file?this.file:"",typeof this.line!="undefined"&&(this.message+=":"+this.line+":"+this.column),this.message+=": "+this.reason}showSourceCode(e){if(!this.source)return"";let t=this.source;e==null&&(e=oc.isColorSupported),lc&&e&&(t=lc(t));let i=t.split(/\r?\n/),n=Math.max(this.line-3,0),a=Math.min(this.line+2,i.length),s=String(a).length,o,l;if(e){let{bold:c,red:f,gray:d}=oc.createColors(!0);o=p=>c(f(p)),l=p=>d(p)}else o=l=c=>c;return i.slice(n,a).map((c,f)=>{let d=n+1+f,p=" "+(" "+d).slice(-s)+" | ";if(d===this.line){let h=l(p.replace(/\d/g," "))+c.slice(0,this.column-1).replace(/[^\t]/g," ");return o(">")+l(p)+c+` - `+h+o("^")}return" "+l(p)+c}).join(` -`)}toString(){let e=this.showSourceCode();return e&&(e=` - -`+e+` -`),this.name+": "+this.message+e}};uc.exports=Zt;Zt.default=Zt});var sn=x((w3,la)=>{u();"use strict";la.exports.isClean=Symbol("isClean");la.exports.my=Symbol("my")});var ua=x((v3,cc)=>{u();"use strict";var fc={colon:": ",indent:" ",beforeDecl:` -`,beforeRule:` -`,beforeOpen:" ",beforeClose:` -`,beforeComment:` -`,after:` -`,emptyBody:"",commentLeft:" ",commentRight:" ",semicolon:!1};function bx(r){return r[0].toUpperCase()+r.slice(1)}var an=class{constructor(e){this.builder=e}stringify(e,t){if(!this[e.type])throw new Error("Unknown AST node type "+e.type+". Maybe you need to change PostCSS stringifier.");this[e.type](e,t)}document(e){this.body(e)}root(e){this.body(e),e.raws.after&&this.builder(e.raws.after)}comment(e){let t=this.raw(e,"left","commentLeft"),i=this.raw(e,"right","commentRight");this.builder("/*"+t+e.text+i+"*/",e)}decl(e,t){let i=this.raw(e,"between","colon"),n=e.prop+i+this.rawValue(e,"value");e.important&&(n+=e.raws.important||" !important"),t&&(n+=";"),this.builder(n,e)}rule(e){this.block(e,this.rawValue(e,"selector")),e.raws.ownSemicolon&&this.builder(e.raws.ownSemicolon,e,"end")}atrule(e,t){let i="@"+e.name,n=e.params?this.rawValue(e,"params"):"";if(typeof e.raws.afterName!="undefined"?i+=e.raws.afterName:n&&(i+=" "),e.nodes)this.block(e,i+n);else{let a=(e.raws.between||"")+(t?";":"");this.builder(i+n+a,e)}}body(e){let t=e.nodes.length-1;for(;t>0&&e.nodes[t].type==="comment";)t-=1;let i=this.raw(e,"semicolon");for(let n=0;n{if(n=l.raws[t],typeof n!="undefined")return!1})}return typeof n=="undefined"&&(n=fc[i]),s.rawCache[i]=n,n}rawSemicolon(e){let t;return e.walk(i=>{if(i.nodes&&i.nodes.length&&i.last.type==="decl"&&(t=i.raws.semicolon,typeof t!="undefined"))return!1}),t}rawEmptyBody(e){let t;return e.walk(i=>{if(i.nodes&&i.nodes.length===0&&(t=i.raws.after,typeof t!="undefined"))return!1}),t}rawIndent(e){if(e.raws.indent)return e.raws.indent;let t;return e.walk(i=>{let n=i.parent;if(n&&n!==e&&n.parent&&n.parent===e&&typeof i.raws.before!="undefined"){let a=i.raws.before.split(` -`);return t=a[a.length-1],t=t.replace(/\S/g,""),!1}}),t}rawBeforeComment(e,t){let i;return e.walkComments(n=>{if(typeof n.raws.before!="undefined")return i=n.raws.before,i.includes(` -`)&&(i=i.replace(/[^\n]+$/,"")),!1}),typeof i=="undefined"?i=this.raw(t,null,"beforeDecl"):i&&(i=i.replace(/\S/g,"")),i}rawBeforeDecl(e,t){let i;return e.walkDecls(n=>{if(typeof n.raws.before!="undefined")return i=n.raws.before,i.includes(` -`)&&(i=i.replace(/[^\n]+$/,"")),!1}),typeof i=="undefined"?i=this.raw(t,null,"beforeRule"):i&&(i=i.replace(/\S/g,"")),i}rawBeforeRule(e){let t;return e.walk(i=>{if(i.nodes&&(i.parent!==e||e.first!==i)&&typeof i.raws.before!="undefined")return t=i.raws.before,t.includes(` -`)&&(t=t.replace(/[^\n]+$/,"")),!1}),t&&(t=t.replace(/\S/g,"")),t}rawBeforeClose(e){let t;return e.walk(i=>{if(i.nodes&&i.nodes.length>0&&typeof i.raws.after!="undefined")return t=i.raws.after,t.includes(` -`)&&(t=t.replace(/[^\n]+$/,"")),!1}),t&&(t=t.replace(/\S/g,"")),t}rawBeforeOpen(e){let t;return e.walk(i=>{if(i.type!=="decl"&&(t=i.raws.between,typeof t!="undefined"))return!1}),t}rawColon(e){let t;return e.walkDecls(i=>{if(typeof i.raws.between!="undefined")return t=i.raws.between.replace(/[^\s:]/g,""),!1}),t}beforeAfter(e,t){let i;e.type==="decl"?i=this.raw(e,null,"beforeDecl"):e.type==="comment"?i=this.raw(e,null,"beforeComment"):t==="before"?i=this.raw(e,null,"beforeRule"):i=this.raw(e,null,"beforeClose");let n=e.parent,a=0;for(;n&&n.type!=="root";)a+=1,n=n.parent;if(i.includes(` -`)){let s=this.raw(e,null,"indent");if(s.length)for(let o=0;o{u();"use strict";var wx=ua();function fa(r,e){new wx(e).stringify(r)}pc.exports=fa;fa.default=fa});var Hr=x((k3,dc)=>{u();"use strict";var{isClean:on,my:vx}=sn(),xx=nn(),kx=ua(),Sx=Vr();function ca(r,e){let t=new r.constructor;for(let i in r){if(!Object.prototype.hasOwnProperty.call(r,i)||i==="proxyCache")continue;let n=r[i],a=typeof n;i==="parent"&&a==="object"?e&&(t[i]=e):i==="source"?t[i]=n:Array.isArray(n)?t[i]=n.map(s=>ca(s,t)):(a==="object"&&n!==null&&(n=ca(n)),t[i]=n)}return t}var ln=class{constructor(e={}){this.raws={},this[on]=!1,this[vx]=!0;for(let t in e)if(t==="nodes"){this.nodes=[];for(let i of e[t])typeof i.clone=="function"?this.append(i.clone()):this.append(i)}else this[t]=e[t]}error(e,t={}){if(this.source){let{start:i,end:n}=this.rangeBy(t);return this.source.input.error(e,{line:i.line,column:i.column},{line:n.line,column:n.column},t)}return new xx(e)}warn(e,t,i){let n={node:this};for(let a in i)n[a]=i[a];return e.warn(t,n)}remove(){return this.parent&&this.parent.removeChild(this),this.parent=void 0,this}toString(e=Sx){e.stringify&&(e=e.stringify);let t="";return e(this,i=>{t+=i}),t}assign(e={}){for(let t in e)this[t]=e[t];return this}clone(e={}){let t=ca(this);for(let i in e)t[i]=e[i];return t}cloneBefore(e={}){let t=this.clone(e);return this.parent.insertBefore(this,t),t}cloneAfter(e={}){let t=this.clone(e);return this.parent.insertAfter(this,t),t}replaceWith(...e){if(this.parent){let t=this,i=!1;for(let n of e)n===this?i=!0:i?(this.parent.insertAfter(t,n),t=n):this.parent.insertBefore(t,n);i||this.remove()}return this}next(){if(!this.parent)return;let e=this.parent.index(this);return this.parent.nodes[e+1]}prev(){if(!this.parent)return;let e=this.parent.index(this);return this.parent.nodes[e-1]}before(e){return this.parent.insertBefore(this,e),this}after(e){return this.parent.insertAfter(this,e),this}root(){let e=this;for(;e.parent&&e.parent.type!=="document";)e=e.parent;return e}raw(e,t){return new kx().raw(this,e,t)}cleanRaws(e){delete this.raws.before,delete this.raws.after,e||delete this.raws.between}toJSON(e,t){let i={},n=t==null;t=t||new Map;let a=0;for(let s in this){if(!Object.prototype.hasOwnProperty.call(this,s)||s==="parent"||s==="proxyCache")continue;let o=this[s];if(Array.isArray(o))i[s]=o.map(l=>typeof l=="object"&&l.toJSON?l.toJSON(null,t):l);else if(typeof o=="object"&&o.toJSON)i[s]=o.toJSON(null,t);else if(s==="source"){let l=t.get(o.input);l==null&&(l=a,t.set(o.input,a),a++),i[s]={inputId:l,start:o.start,end:o.end}}else i[s]=o}return n&&(i.inputs=[...t.keys()].map(s=>s.toJSON())),i}positionInside(e){let t=this.toString(),i=this.source.start.column,n=this.source.start.line;for(let a=0;ae.root().toProxy():e[t]}}}toProxy(){return this.proxyCache||(this.proxyCache=new Proxy(this,this.getProxyProcessor())),this.proxyCache}addToError(e){if(e.postcssNode=this,e.stack&&this.source&&/\n\s{4}at /.test(e.stack)){let t=this.source;e.stack=e.stack.replace(/\n\s{4}at /,`$&${t.input.from}:${t.start.line}:${t.start.column}$&`)}return e}markDirty(){if(this[on]){this[on]=!1;let e=this;for(;e=e.parent;)e[on]=!1}}get proxyOf(){return this}};dc.exports=ln;ln.default=ln});var Wr=x((S3,hc)=>{u();"use strict";var Ax=Hr(),un=class extends Ax{constructor(e){e&&typeof e.value!="undefined"&&typeof e.value!="string"&&(e={...e,value:String(e.value)});super(e);this.type="decl"}get variable(){return this.prop.startsWith("--")||this.prop[0]==="$"}};hc.exports=un;un.default=un});var pa=x((A3,mc)=>{u();mc.exports=function(r,e){return{generate:()=>{let t="";return r(e,i=>{t+=i}),[t]}}}});var Gr=x((C3,gc)=>{u();"use strict";var Cx=Hr(),fn=class extends Cx{constructor(e){super(e);this.type="comment"}};gc.exports=fn;fn.default=fn});var Et=x((_3,Cc)=>{u();"use strict";var{isClean:yc,my:bc}=sn(),wc=Wr(),vc=Gr(),_x=Hr(),xc,da,ha,kc;function Sc(r){return r.map(e=>(e.nodes&&(e.nodes=Sc(e.nodes)),delete e.source,e))}function Ac(r){if(r[yc]=!1,r.proxyOf.nodes)for(let e of r.proxyOf.nodes)Ac(e)}var Fe=class extends _x{push(e){return e.parent=this,this.proxyOf.nodes.push(e),this}each(e){if(!this.proxyOf.nodes)return;let t=this.getIterator(),i,n;for(;this.indexes[t]{let n;try{n=e(t,i)}catch(a){throw t.addToError(a)}return n!==!1&&t.walk&&(n=t.walk(e)),n})}walkDecls(e,t){return t?e instanceof RegExp?this.walk((i,n)=>{if(i.type==="decl"&&e.test(i.prop))return t(i,n)}):this.walk((i,n)=>{if(i.type==="decl"&&i.prop===e)return t(i,n)}):(t=e,this.walk((i,n)=>{if(i.type==="decl")return t(i,n)}))}walkRules(e,t){return t?e instanceof RegExp?this.walk((i,n)=>{if(i.type==="rule"&&e.test(i.selector))return t(i,n)}):this.walk((i,n)=>{if(i.type==="rule"&&i.selector===e)return t(i,n)}):(t=e,this.walk((i,n)=>{if(i.type==="rule")return t(i,n)}))}walkAtRules(e,t){return t?e instanceof RegExp?this.walk((i,n)=>{if(i.type==="atrule"&&e.test(i.name))return t(i,n)}):this.walk((i,n)=>{if(i.type==="atrule"&&i.name===e)return t(i,n)}):(t=e,this.walk((i,n)=>{if(i.type==="atrule")return t(i,n)}))}walkComments(e){return this.walk((t,i)=>{if(t.type==="comment")return e(t,i)})}append(...e){for(let t of e){let i=this.normalize(t,this.last);for(let n of i)this.proxyOf.nodes.push(n)}return this.markDirty(),this}prepend(...e){e=e.reverse();for(let t of e){let i=this.normalize(t,this.first,"prepend").reverse();for(let n of i)this.proxyOf.nodes.unshift(n);for(let n in this.indexes)this.indexes[n]=this.indexes[n]+i.length}return this.markDirty(),this}cleanRaws(e){if(super.cleanRaws(e),this.nodes)for(let t of this.nodes)t.cleanRaws(e)}insertBefore(e,t){let i=this.index(e),n=i===0?"prepend":!1,a=this.normalize(t,this.proxyOf.nodes[i],n).reverse();i=this.index(e);for(let o of a)this.proxyOf.nodes.splice(i,0,o);let s;for(let o in this.indexes)s=this.indexes[o],i<=s&&(this.indexes[o]=s+a.length);return this.markDirty(),this}insertAfter(e,t){let i=this.index(e),n=this.normalize(t,this.proxyOf.nodes[i]).reverse();i=this.index(e);for(let s of n)this.proxyOf.nodes.splice(i+1,0,s);let a;for(let s in this.indexes)a=this.indexes[s],i=e&&(this.indexes[i]=t-1);return this.markDirty(),this}removeAll(){for(let e of this.proxyOf.nodes)e.parent=void 0;return this.proxyOf.nodes=[],this.markDirty(),this}replaceValues(e,t,i){return i||(i=t,t={}),this.walkDecls(n=>{t.props&&!t.props.includes(n.prop)||t.fast&&!n.value.includes(t.fast)||(n.value=n.value.replace(e,i))}),this.markDirty(),this}every(e){return this.nodes.every(e)}some(e){return this.nodes.some(e)}index(e){return typeof e=="number"?e:(e.proxyOf&&(e=e.proxyOf),this.proxyOf.nodes.indexOf(e))}get first(){if(!!this.proxyOf.nodes)return this.proxyOf.nodes[0]}get last(){if(!!this.proxyOf.nodes)return this.proxyOf.nodes[this.proxyOf.nodes.length-1]}normalize(e,t){if(typeof e=="string")e=Sc(xc(e).nodes);else if(Array.isArray(e)){e=e.slice(0);for(let n of e)n.parent&&n.parent.removeChild(n,"ignore")}else if(e.type==="root"&&this.type!=="document"){e=e.nodes.slice(0);for(let n of e)n.parent&&n.parent.removeChild(n,"ignore")}else if(e.type)e=[e];else if(e.prop){if(typeof e.value=="undefined")throw new Error("Value field is missed in node creation");typeof e.value!="string"&&(e.value=String(e.value)),e=[new wc(e)]}else if(e.selector)e=[new da(e)];else if(e.name)e=[new ha(e)];else if(e.text)e=[new vc(e)];else throw new Error("Unknown node type in node creation");return e.map(n=>(n[bc]||Fe.rebuild(n),n=n.proxyOf,n.parent&&n.parent.removeChild(n),n[yc]&&Ac(n),typeof n.raws.before=="undefined"&&t&&typeof t.raws.before!="undefined"&&(n.raws.before=t.raws.before.replace(/\S/g,"")),n.parent=this.proxyOf,n))}getProxyProcessor(){return{set(e,t,i){return e[t]===i||(e[t]=i,(t==="name"||t==="params"||t==="selector")&&e.markDirty()),!0},get(e,t){return t==="proxyOf"?e:e[t]?t==="each"||typeof t=="string"&&t.startsWith("walk")?(...i)=>e[t](...i.map(n=>typeof n=="function"?(a,s)=>n(a.toProxy(),s):n)):t==="every"||t==="some"?i=>e[t]((n,...a)=>i(n.toProxy(),...a)):t==="root"?()=>e.root().toProxy():t==="nodes"?e.nodes.map(i=>i.toProxy()):t==="first"||t==="last"?e[t].toProxy():e[t]:e[t]}}}getIterator(){this.lastEach||(this.lastEach=0),this.indexes||(this.indexes={}),this.lastEach+=1;let e=this.lastEach;return this.indexes[e]=0,e}};Fe.registerParse=r=>{xc=r};Fe.registerRule=r=>{da=r};Fe.registerAtRule=r=>{ha=r};Fe.registerRoot=r=>{kc=r};Cc.exports=Fe;Fe.default=Fe;Fe.rebuild=r=>{r.type==="atrule"?Object.setPrototypeOf(r,ha.prototype):r.type==="rule"?Object.setPrototypeOf(r,da.prototype):r.type==="decl"?Object.setPrototypeOf(r,wc.prototype):r.type==="comment"?Object.setPrototypeOf(r,vc.prototype):r.type==="root"&&Object.setPrototypeOf(r,kc.prototype),r[bc]=!0,r.nodes&&r.nodes.forEach(e=>{Fe.rebuild(e)})}});var cn=x((E3,Oc)=>{u();"use strict";var Ex=Et(),_c,Ec,er=class extends Ex{constructor(e){super({type:"document",...e});this.nodes||(this.nodes=[])}toResult(e={}){return new _c(new Ec,this,e).stringify()}};er.registerLazyResult=r=>{_c=r};er.registerProcessor=r=>{Ec=r};Oc.exports=er;er.default=er});var ma=x((O3,Rc)=>{u();"use strict";var Tc={};Rc.exports=function(e){Tc[e]||(Tc[e]=!0,typeof console!="undefined"&&console.warn&&console.warn(e))}});var ga=x((T3,Pc)=>{u();"use strict";var pn=class{constructor(e,t={}){if(this.type="warning",this.text=e,t.node&&t.node.source){let i=t.node.rangeBy(t);this.line=i.start.line,this.column=i.start.column,this.endLine=i.end.line,this.endColumn=i.end.column}for(let i in t)this[i]=t[i]}toString(){return this.node?this.node.error(this.text,{plugin:this.plugin,index:this.index,word:this.word}).message:this.plugin?this.plugin+": "+this.text:this.text}};Pc.exports=pn;pn.default=pn});var hn=x((R3,Ic)=>{u();"use strict";var Ox=ga(),dn=class{constructor(e,t,i){this.processor=e,this.messages=[],this.root=t,this.opts=i,this.css=void 0,this.map=void 0}toString(){return this.css}warn(e,t={}){t.plugin||this.lastPlugin&&this.lastPlugin.postcssPlugin&&(t.plugin=this.lastPlugin.postcssPlugin);let i=new Ox(e,t);return this.messages.push(i),i}warnings(){return this.messages.filter(e=>e.type==="warning")}get content(){return this.css}};Ic.exports=dn;dn.default=dn});var Mc=x((P3,Lc)=>{u();"use strict";var ya="'".charCodeAt(0),Dc='"'.charCodeAt(0),mn="\\".charCodeAt(0),qc="/".charCodeAt(0),gn=` -`.charCodeAt(0),Qr=" ".charCodeAt(0),yn="\f".charCodeAt(0),bn=" ".charCodeAt(0),wn="\r".charCodeAt(0),Tx="[".charCodeAt(0),Rx="]".charCodeAt(0),Px="(".charCodeAt(0),Ix=")".charCodeAt(0),Dx="{".charCodeAt(0),qx="}".charCodeAt(0),$x=";".charCodeAt(0),Lx="*".charCodeAt(0),Mx=":".charCodeAt(0),Nx="@".charCodeAt(0),vn=/[\t\n\f\r "#'()/;[\\\]{}]/g,xn=/[\t\n\f\r !"#'():;@[\\\]{}]|\/(?=\*)/g,Bx=/.[\n"'(/\\]/,$c=/[\da-f]/i;Lc.exports=function(e,t={}){let i=e.css.valueOf(),n=t.ignoreErrors,a,s,o,l,c,f,d,p,h,b,v=i.length,y=0,w=[],k=[];function S(){return y}function E(T){throw e.error("Unclosed "+T,y)}function O(){return k.length===0&&y>=v}function B(T){if(k.length)return k.pop();if(y>=v)return;let F=T?T.ignoreUnclosed:!1;switch(a=i.charCodeAt(y),a){case gn:case Qr:case bn:case wn:case yn:{s=y;do s+=1,a=i.charCodeAt(s);while(a===Qr||a===gn||a===bn||a===wn||a===yn);b=["space",i.slice(y,s)],y=s-1;break}case Tx:case Rx:case Dx:case qx:case Mx:case $x:case Ix:{let Y=String.fromCharCode(a);b=[Y,Y,y];break}case Px:{if(p=w.length?w.pop()[1]:"",h=i.charCodeAt(y+1),p==="url"&&h!==ya&&h!==Dc&&h!==Qr&&h!==gn&&h!==bn&&h!==yn&&h!==wn){s=y;do{if(f=!1,s=i.indexOf(")",s+1),s===-1)if(n||F){s=y;break}else E("bracket");for(d=s;i.charCodeAt(d-1)===mn;)d-=1,f=!f}while(f);b=["brackets",i.slice(y,s+1),y,s],y=s}else s=i.indexOf(")",y+1),l=i.slice(y,s+1),s===-1||Bx.test(l)?b=["(","(",y]:(b=["brackets",l,y,s],y=s);break}case ya:case Dc:{o=a===ya?"'":'"',s=y;do{if(f=!1,s=i.indexOf(o,s+1),s===-1)if(n||F){s=y+1;break}else E("string");for(d=s;i.charCodeAt(d-1)===mn;)d-=1,f=!f}while(f);b=["string",i.slice(y,s+1),y,s],y=s;break}case Nx:{vn.lastIndex=y+1,vn.test(i),vn.lastIndex===0?s=i.length-1:s=vn.lastIndex-2,b=["at-word",i.slice(y,s+1),y,s],y=s;break}case mn:{for(s=y,c=!0;i.charCodeAt(s+1)===mn;)s+=1,c=!c;if(a=i.charCodeAt(s+1),c&&a!==qc&&a!==Qr&&a!==gn&&a!==bn&&a!==wn&&a!==yn&&(s+=1,$c.test(i.charAt(s)))){for(;$c.test(i.charAt(s+1));)s+=1;i.charCodeAt(s+1)===Qr&&(s+=1)}b=["word",i.slice(y,s+1),y,s],y=s;break}default:{a===qc&&i.charCodeAt(y+1)===Lx?(s=i.indexOf("*/",y+2)+1,s===0&&(n||F?s=i.length:E("comment")),b=["comment",i.slice(y,s+1),y,s],y=s):(xn.lastIndex=y+1,xn.test(i),xn.lastIndex===0?s=i.length-1:s=xn.lastIndex-2,b=["word",i.slice(y,s+1),y,s],w.push(b),y=s);break}}return y++,b}function N(T){k.push(T)}return{back:N,nextToken:B,endOfFile:O,position:S}}});var kn=x((I3,Bc)=>{u();"use strict";var Nc=Et(),Yr=class extends Nc{constructor(e){super(e);this.type="atrule"}append(...e){return this.proxyOf.nodes||(this.nodes=[]),super.append(...e)}prepend(...e){return this.proxyOf.nodes||(this.nodes=[]),super.prepend(...e)}};Bc.exports=Yr;Yr.default=Yr;Nc.registerAtRule(Yr)});var tr=x((D3,Uc)=>{u();"use strict";var Fc=Et(),jc,zc,Ut=class extends Fc{constructor(e){super(e);this.type="root",this.nodes||(this.nodes=[])}removeChild(e,t){let i=this.index(e);return!t&&i===0&&this.nodes.length>1&&(this.nodes[1].raws.before=this.nodes[i].raws.before),super.removeChild(e)}normalize(e,t,i){let n=super.normalize(e);if(t){if(i==="prepend")this.nodes.length>1?t.raws.before=this.nodes[1].raws.before:delete t.raws.before;else if(this.first!==t)for(let a of n)a.raws.before=t.raws.before}return n}toResult(e={}){return new jc(new zc,this,e).stringify()}};Ut.registerLazyResult=r=>{jc=r};Ut.registerProcessor=r=>{zc=r};Uc.exports=Ut;Ut.default=Ut;Fc.registerRoot(Ut)});var ba=x((q3,Vc)=>{u();"use strict";var Kr={split(r,e,t){let i=[],n="",a=!1,s=0,o=!1,l="",c=!1;for(let f of r)c?c=!1:f==="\\"?c=!0:o?f===l&&(o=!1):f==='"'||f==="'"?(o=!0,l=f):f==="("?s+=1:f===")"?s>0&&(s-=1):s===0&&e.includes(f)&&(a=!0),a?(n!==""&&i.push(n.trim()),n="",a=!1):n+=f;return(t||n!=="")&&i.push(n.trim()),i},space(r){let e=[" ",` -`," "];return Kr.split(r,e)},comma(r){return Kr.split(r,[","],!0)}};Vc.exports=Kr;Kr.default=Kr});var Sn=x(($3,Wc)=>{u();"use strict";var Hc=Et(),Fx=ba(),Xr=class extends Hc{constructor(e){super(e);this.type="rule",this.nodes||(this.nodes=[])}get selectors(){return Fx.comma(this.selector)}set selectors(e){let t=this.selector?this.selector.match(/,\s*/):null,i=t?t[0]:","+this.raw("between","beforeOpen");this.selector=e.join(i)}};Wc.exports=Xr;Xr.default=Xr;Hc.registerRule(Xr)});var Xc=x((L3,Kc)=>{u();"use strict";var jx=Wr(),zx=Mc(),Ux=Gr(),Vx=kn(),Hx=tr(),Gc=Sn(),Qc={empty:!0,space:!0};function Wx(r){for(let e=r.length-1;e>=0;e--){let t=r[e],i=t[3]||t[2];if(i)return i}}var Yc=class{constructor(e){this.input=e,this.root=new Hx,this.current=this.root,this.spaces="",this.semicolon=!1,this.customProperty=!1,this.createTokenizer(),this.root.source={input:e,start:{offset:0,line:1,column:1}}}createTokenizer(){this.tokenizer=zx(this.input)}parse(){let e;for(;!this.tokenizer.endOfFile();)switch(e=this.tokenizer.nextToken(),e[0]){case"space":this.spaces+=e[1];break;case";":this.freeSemicolon(e);break;case"}":this.end(e);break;case"comment":this.comment(e);break;case"at-word":this.atrule(e);break;case"{":this.emptyRule(e);break;default:this.other(e);break}this.endFile()}comment(e){let t=new Ux;this.init(t,e[2]),t.source.end=this.getPosition(e[3]||e[2]);let i=e[1].slice(2,-2);if(/^\s*$/.test(i))t.text="",t.raws.left=i,t.raws.right="";else{let n=i.match(/^(\s*)([^]*\S)(\s*)$/);t.text=n[2],t.raws.left=n[1],t.raws.right=n[3]}}emptyRule(e){let t=new Gc;this.init(t,e[2]),t.selector="",t.raws.between="",this.current=t}other(e){let t=!1,i=null,n=!1,a=null,s=[],o=e[1].startsWith("--"),l=[],c=e;for(;c;){if(i=c[0],l.push(c),i==="("||i==="[")a||(a=c),s.push(i==="("?")":"]");else if(o&&n&&i==="{")a||(a=c),s.push("}");else if(s.length===0)if(i===";")if(n){this.decl(l,o);return}else break;else if(i==="{"){this.rule(l);return}else if(i==="}"){this.tokenizer.back(l.pop()),t=!0;break}else i===":"&&(n=!0);else i===s[s.length-1]&&(s.pop(),s.length===0&&(a=null));c=this.tokenizer.nextToken()}if(this.tokenizer.endOfFile()&&(t=!0),s.length>0&&this.unclosedBracket(a),t&&n){if(!o)for(;l.length&&(c=l[l.length-1][0],!(c!=="space"&&c!=="comment"));)this.tokenizer.back(l.pop());this.decl(l,o)}else this.unknownWord(l)}rule(e){e.pop();let t=new Gc;this.init(t,e[0][2]),t.raws.between=this.spacesAndCommentsFromEnd(e),this.raw(t,"selector",e),this.current=t}decl(e,t){let i=new jx;this.init(i,e[0][2]);let n=e[e.length-1];for(n[0]===";"&&(this.semicolon=!0,e.pop()),i.source.end=this.getPosition(n[3]||n[2]||Wx(e));e[0][0]!=="word";)e.length===1&&this.unknownWord(e),i.raws.before+=e.shift()[1];for(i.source.start=this.getPosition(e[0][2]),i.prop="";e.length;){let c=e[0][0];if(c===":"||c==="space"||c==="comment")break;i.prop+=e.shift()[1]}i.raws.between="";let a;for(;e.length;)if(a=e.shift(),a[0]===":"){i.raws.between+=a[1];break}else a[0]==="word"&&/\w/.test(a[1])&&this.unknownWord([a]),i.raws.between+=a[1];(i.prop[0]==="_"||i.prop[0]==="*")&&(i.raws.before+=i.prop[0],i.prop=i.prop.slice(1));let s=[],o;for(;e.length&&(o=e[0][0],!(o!=="space"&&o!=="comment"));)s.push(e.shift());this.precheckMissedSemicolon(e);for(let c=e.length-1;c>=0;c--){if(a=e[c],a[1].toLowerCase()==="!important"){i.important=!0;let f=this.stringFrom(e,c);f=this.spacesFromEnd(e)+f,f!==" !important"&&(i.raws.important=f);break}else if(a[1].toLowerCase()==="important"){let f=e.slice(0),d="";for(let p=c;p>0;p--){let h=f[p][0];if(d.trim().indexOf("!")===0&&h!=="space")break;d=f.pop()[1]+d}d.trim().indexOf("!")===0&&(i.important=!0,i.raws.important=d,e=f)}if(a[0]!=="space"&&a[0]!=="comment")break}e.some(c=>c[0]!=="space"&&c[0]!=="comment")&&(i.raws.between+=s.map(c=>c[1]).join(""),s=[]),this.raw(i,"value",s.concat(e),t),i.value.includes(":")&&!t&&this.checkMissedSemicolon(e)}atrule(e){let t=new Vx;t.name=e[1].slice(1),t.name===""&&this.unnamedAtrule(t,e),this.init(t,e[2]);let i,n,a,s=!1,o=!1,l=[],c=[];for(;!this.tokenizer.endOfFile();){if(e=this.tokenizer.nextToken(),i=e[0],i==="("||i==="["?c.push(i==="("?")":"]"):i==="{"&&c.length>0?c.push("}"):i===c[c.length-1]&&c.pop(),c.length===0)if(i===";"){t.source.end=this.getPosition(e[2]),this.semicolon=!0;break}else if(i==="{"){o=!0;break}else if(i==="}"){if(l.length>0){for(a=l.length-1,n=l[a];n&&n[0]==="space";)n=l[--a];n&&(t.source.end=this.getPosition(n[3]||n[2]))}this.end(e);break}else l.push(e);else l.push(e);if(this.tokenizer.endOfFile()){s=!0;break}}t.raws.between=this.spacesAndCommentsFromEnd(l),l.length?(t.raws.afterName=this.spacesAndCommentsFromStart(l),this.raw(t,"params",l),s&&(e=l[l.length-1],t.source.end=this.getPosition(e[3]||e[2]),this.spaces=t.raws.between,t.raws.between="")):(t.raws.afterName="",t.params=""),o&&(t.nodes=[],this.current=t)}end(e){this.current.nodes&&this.current.nodes.length&&(this.current.raws.semicolon=this.semicolon),this.semicolon=!1,this.current.raws.after=(this.current.raws.after||"")+this.spaces,this.spaces="",this.current.parent?(this.current.source.end=this.getPosition(e[2]),this.current=this.current.parent):this.unexpectedClose(e)}endFile(){this.current.parent&&this.unclosedBlock(),this.current.nodes&&this.current.nodes.length&&(this.current.raws.semicolon=this.semicolon),this.current.raws.after=(this.current.raws.after||"")+this.spaces}freeSemicolon(e){if(this.spaces+=e[1],this.current.nodes){let t=this.current.nodes[this.current.nodes.length-1];t&&t.type==="rule"&&!t.raws.ownSemicolon&&(t.raws.ownSemicolon=this.spaces,this.spaces="")}}getPosition(e){let t=this.input.fromOffset(e);return{offset:e,line:t.line,column:t.col}}init(e,t){this.current.push(e),e.source={start:this.getPosition(t),input:this.input},e.raws.before=this.spaces,this.spaces="",e.type!=="comment"&&(this.semicolon=!1)}raw(e,t,i,n){let a,s,o=i.length,l="",c=!0,f,d;for(let p=0;ph+b[1],"");e.raws[t]={value:l,raw:p}}e[t]=l}spacesAndCommentsFromEnd(e){let t,i="";for(;e.length&&(t=e[e.length-1][0],!(t!=="space"&&t!=="comment"));)i=e.pop()[1]+i;return i}spacesAndCommentsFromStart(e){let t,i="";for(;e.length&&(t=e[0][0],!(t!=="space"&&t!=="comment"));)i+=e.shift()[1];return i}spacesFromEnd(e){let t,i="";for(;e.length&&(t=e[e.length-1][0],t==="space");)i=e.pop()[1]+i;return i}stringFrom(e,t){let i="";for(let n=t;n=0&&(n=e[a],!(n[0]!=="space"&&(i+=1,i===2)));a--);throw this.input.error("Missed semicolon",n[0]==="word"?n[3]+1:n[2])}};Kc.exports=Yc});var Jc=x(()=>{u()});var ep=x((B3,Zc)=>{u();var Gx="useandom-26T198340PX75pxJACKVERYMINDBUSHWOLF_GQZbfghjklqvwyzrict",Qx=(r,e=21)=>(t=e)=>{let i="",n=t;for(;n--;)i+=r[Math.random()*r.length|0];return i},Yx=(r=21)=>{let e="",t=r;for(;t--;)e+=Gx[Math.random()*64|0];return e};Zc.exports={nanoid:Yx,customAlphabet:Qx}});var wa=x((F3,tp)=>{u();tp.exports={}});var Cn=x((j3,sp)=>{u();"use strict";var{SourceMapConsumer:Kx,SourceMapGenerator:Xx}=Jc(),{fileURLToPath:rp,pathToFileURL:An}=(aa(),ac),{resolve:va,isAbsolute:xa}=(et(),Ur),{nanoid:Jx}=ep(),ka=oa(),ip=nn(),Zx=wa(),Sa=Symbol("fromOffsetCache"),e1=Boolean(Kx&&Xx),np=Boolean(va&&xa),Jr=class{constructor(e,t={}){if(e===null||typeof e=="undefined"||typeof e=="object"&&!e.toString)throw new Error(`PostCSS received ${e} instead of CSS string`);if(this.css=e.toString(),this.css[0]==="\uFEFF"||this.css[0]==="\uFFFE"?(this.hasBOM=!0,this.css=this.css.slice(1)):this.hasBOM=!1,t.from&&(!np||/^\w+:\/\//.test(t.from)||xa(t.from)?this.file=t.from:this.file=va(t.from)),np&&e1){let i=new Zx(this.css,t);if(i.text){this.map=i;let n=i.consumer().file;!this.file&&n&&(this.file=this.mapResolve(n))}}this.file||(this.id=""),this.map&&(this.map.file=this.from)}fromOffset(e){let t,i;if(this[Sa])i=this[Sa];else{let a=this.css.split(` -`);i=new Array(a.length);let s=0;for(let o=0,l=a.length;o=t)n=i.length-1;else{let a=i.length-2,s;for(;n>1),e=i[s+1])n=s+1;else{n=s;break}}return{line:n+1,col:e-i[n]+1}}error(e,t,i,n={}){let a,s,o;if(t&&typeof t=="object"){let c=t,f=i;if(typeof c.offset=="number"){let d=this.fromOffset(c.offset);t=d.line,i=d.col}else t=c.line,i=c.column;if(typeof f.offset=="number"){let d=this.fromOffset(f.offset);s=d.line,o=d.col}else s=f.line,o=f.column}else if(!i){let c=this.fromOffset(t);t=c.line,i=c.col}let l=this.origin(t,i,s,o);return l?a=new ip(e,l.endLine===void 0?l.line:{line:l.line,column:l.column},l.endLine===void 0?l.column:{line:l.endLine,column:l.endColumn},l.source,l.file,n.plugin):a=new ip(e,s===void 0?t:{line:t,column:i},s===void 0?i:{line:s,column:o},this.css,this.file,n.plugin),a.input={line:t,column:i,endLine:s,endColumn:o,source:this.css},this.file&&(An&&(a.input.url=An(this.file).toString()),a.input.file=this.file),a}origin(e,t,i,n){if(!this.map)return!1;let a=this.map.consumer(),s=a.originalPositionFor({line:e,column:t});if(!s.source)return!1;let o;typeof i=="number"&&(o=a.originalPositionFor({line:i,column:n}));let l;xa(s.source)?l=An(s.source):l=new URL(s.source,this.map.consumer().sourceRoot||An(this.map.mapFile));let c={url:l.toString(),line:s.line,column:s.column,endLine:o&&o.line,endColumn:o&&o.column};if(l.protocol==="file:")if(rp)c.file=rp(l);else throw new Error("file: protocol is not available in this PostCSS build");let f=a.sourceContentFor(s.source);return f&&(c.source=f),c}mapResolve(e){return/^\w+:\/\//.test(e)?e:va(this.map.consumer().sourceRoot||this.map.root||".",e)}get from(){return this.file||this.id}toJSON(){let e={};for(let t of["hasBOM","css","file","id"])this[t]!=null&&(e[t]=this[t]);return this.map&&(e.map={...this.map},e.map.consumerCache&&(e.map.consumerCache=void 0)),e}};sp.exports=Jr;Jr.default=Jr;ka&&ka.registerInput&&ka.registerInput(Jr)});var En=x((z3,ap)=>{u();"use strict";var t1=Et(),r1=Xc(),i1=Cn();function _n(r,e){let t=new i1(r,e),i=new r1(t);try{i.parse()}catch(n){throw n}return i.root}ap.exports=_n;_n.default=_n;t1.registerParse(_n)});var _a=x((V3,fp)=>{u();"use strict";var{isClean:tt,my:n1}=sn(),s1=pa(),a1=Vr(),o1=Et(),l1=cn(),U3=ma(),op=hn(),u1=En(),f1=tr(),c1={document:"Document",root:"Root",atrule:"AtRule",rule:"Rule",decl:"Declaration",comment:"Comment"},p1={postcssPlugin:!0,prepare:!0,Once:!0,Document:!0,Root:!0,Declaration:!0,Rule:!0,AtRule:!0,Comment:!0,DeclarationExit:!0,RuleExit:!0,AtRuleExit:!0,CommentExit:!0,RootExit:!0,DocumentExit:!0,OnceExit:!0},d1={postcssPlugin:!0,prepare:!0,Once:!0},rr=0;function Zr(r){return typeof r=="object"&&typeof r.then=="function"}function lp(r){let e=!1,t=c1[r.type];return r.type==="decl"?e=r.prop.toLowerCase():r.type==="atrule"&&(e=r.name.toLowerCase()),e&&r.append?[t,t+"-"+e,rr,t+"Exit",t+"Exit-"+e]:e?[t,t+"-"+e,t+"Exit",t+"Exit-"+e]:r.append?[t,rr,t+"Exit"]:[t,t+"Exit"]}function up(r){let e;return r.type==="document"?e=["Document",rr,"DocumentExit"]:r.type==="root"?e=["Root",rr,"RootExit"]:e=lp(r),{node:r,events:e,eventIndex:0,visitors:[],visitorIndex:0,iterator:0}}function Aa(r){return r[tt]=!1,r.nodes&&r.nodes.forEach(e=>Aa(e)),r}var Ca={},pt=class{constructor(e,t,i){this.stringified=!1,this.processed=!1;let n;if(typeof t=="object"&&t!==null&&(t.type==="root"||t.type==="document"))n=Aa(t);else if(t instanceof pt||t instanceof op)n=Aa(t.root),t.map&&(typeof i.map=="undefined"&&(i.map={}),i.map.inline||(i.map.inline=!1),i.map.prev=t.map);else{let a=u1;i.syntax&&(a=i.syntax.parse),i.parser&&(a=i.parser),a.parse&&(a=a.parse);try{n=a(t,i)}catch(s){this.processed=!0,this.error=s}n&&!n[n1]&&o1.rebuild(n)}this.result=new op(e,n,i),this.helpers={...Ca,result:this.result,postcss:Ca},this.plugins=this.processor.plugins.map(a=>typeof a=="object"&&a.prepare?{...a,...a.prepare(this.result)}:a)}get[Symbol.toStringTag](){return"LazyResult"}get processor(){return this.result.processor}get opts(){return this.result.opts}get css(){return this.stringify().css}get content(){return this.stringify().content}get map(){return this.stringify().map}get root(){return this.sync().root}get messages(){return this.sync().messages}warnings(){return this.sync().warnings()}toString(){return this.css}then(e,t){return this.async().then(e,t)}catch(e){return this.async().catch(e)}finally(e){return this.async().then(e,e)}async(){return this.error?Promise.reject(this.error):this.processed?Promise.resolve(this.result):(this.processing||(this.processing=this.runAsync()),this.processing)}sync(){if(this.error)throw this.error;if(this.processed)return this.result;if(this.processed=!0,this.processing)throw this.getAsyncError();for(let e of this.plugins){let t=this.runOnRoot(e);if(Zr(t))throw this.getAsyncError()}if(this.prepareVisitors(),this.hasListener){let e=this.result.root;for(;!e[tt];)e[tt]=!0,this.walkSync(e);if(this.listeners.OnceExit)if(e.type==="document")for(let t of e.nodes)this.visitSync(this.listeners.OnceExit,t);else this.visitSync(this.listeners.OnceExit,e)}return this.result}stringify(){if(this.error)throw this.error;if(this.stringified)return this.result;this.stringified=!0,this.sync();let e=this.result.opts,t=a1;e.syntax&&(t=e.syntax.stringify),e.stringifier&&(t=e.stringifier),t.stringify&&(t=t.stringify);let n=new s1(t,this.result.root,this.result.opts).generate();return this.result.css=n[0],this.result.map=n[1],this.result}walkSync(e){e[tt]=!0;let t=lp(e);for(let i of t)if(i===rr)e.nodes&&e.each(n=>{n[tt]||this.walkSync(n)});else{let n=this.listeners[i];if(n&&this.visitSync(n,e.toProxy()))return}}visitSync(e,t){for(let[i,n]of e){this.result.lastPlugin=i;let a;try{a=n(t,this.helpers)}catch(s){throw this.handleError(s,t.proxyOf)}if(t.type!=="root"&&t.type!=="document"&&!t.parent)return!0;if(Zr(a))throw this.getAsyncError()}}runOnRoot(e){this.result.lastPlugin=e;try{if(typeof e=="object"&&e.Once){if(this.result.root.type==="document"){let t=this.result.root.nodes.map(i=>e.Once(i,this.helpers));return Zr(t[0])?Promise.all(t):t}return e.Once(this.result.root,this.helpers)}else if(typeof e=="function")return e(this.result.root,this.result)}catch(t){throw this.handleError(t)}}getAsyncError(){throw new Error("Use process(css).then(cb) to work with async plugins")}handleError(e,t){let i=this.result.lastPlugin;try{t&&t.addToError(e),this.error=e,e.name==="CssSyntaxError"&&!e.plugin?(e.plugin=i.postcssPlugin,e.setMessage()):i.postcssVersion}catch(n){console&&console.error&&console.error(n)}return e}async runAsync(){this.plugin=0;for(let e=0;e0;){let i=this.visitTick(t);if(Zr(i))try{await i}catch(n){let a=t[t.length-1].node;throw this.handleError(n,a)}}}if(this.listeners.OnceExit)for(let[t,i]of this.listeners.OnceExit){this.result.lastPlugin=t;try{if(e.type==="document"){let n=e.nodes.map(a=>i(a,this.helpers));await Promise.all(n)}else await i(e,this.helpers)}catch(n){throw this.handleError(n)}}}return this.processed=!0,this.stringify()}prepareVisitors(){this.listeners={};let e=(t,i,n)=>{this.listeners[i]||(this.listeners[i]=[]),this.listeners[i].push([t,n])};for(let t of this.plugins)if(typeof t=="object")for(let i in t){if(!p1[i]&&/^[A-Z]/.test(i))throw new Error(`Unknown event ${i} in ${t.postcssPlugin}. Try to update PostCSS (${this.processor.version} now).`);if(!d1[i])if(typeof t[i]=="object")for(let n in t[i])n==="*"?e(t,i,t[i][n]):e(t,i+"-"+n.toLowerCase(),t[i][n]);else typeof t[i]=="function"&&e(t,i,t[i])}this.hasListener=Object.keys(this.listeners).length>0}visitTick(e){let t=e[e.length-1],{node:i,visitors:n}=t;if(i.type!=="root"&&i.type!=="document"&&!i.parent){e.pop();return}if(n.length>0&&t.visitorIndex{Ca=r};fp.exports=pt;pt.default=pt;f1.registerLazyResult(pt);l1.registerLazyResult(pt)});var pp=x((W3,cp)=>{u();"use strict";var h1=pa(),m1=Vr(),H3=ma(),g1=En(),y1=hn(),On=class{constructor(e,t,i){t=t.toString(),this.stringified=!1,this._processor=e,this._css=t,this._opts=i,this._map=void 0;let n,a=m1;this.result=new y1(this._processor,n,this._opts),this.result.css=t;let s=this;Object.defineProperty(this.result,"root",{get(){return s.root}});let o=new h1(a,n,this._opts,t);if(o.isMap()){let[l,c]=o.generate();l&&(this.result.css=l),c&&(this.result.map=c)}}get[Symbol.toStringTag](){return"NoWorkResult"}get processor(){return this.result.processor}get opts(){return this.result.opts}get css(){return this.result.css}get content(){return this.result.css}get map(){return this.result.map}get root(){if(this._root)return this._root;let e,t=g1;try{e=t(this._css,this._opts)}catch(i){this.error=i}if(this.error)throw this.error;return this._root=e,e}get messages(){return[]}warnings(){return[]}toString(){return this._css}then(e,t){return this.async().then(e,t)}catch(e){return this.async().catch(e)}finally(e){return this.async().then(e,e)}async(){return this.error?Promise.reject(this.error):Promise.resolve(this.result)}sync(){if(this.error)throw this.error;return this.result}};cp.exports=On;On.default=On});var hp=x((G3,dp)=>{u();"use strict";var b1=pp(),w1=_a(),v1=cn(),x1=tr(),ir=class{constructor(e=[]){this.version="8.4.24",this.plugins=this.normalize(e)}use(e){return this.plugins=this.plugins.concat(this.normalize([e])),this}process(e,t={}){return this.plugins.length===0&&typeof t.parser=="undefined"&&typeof t.stringifier=="undefined"&&typeof t.syntax=="undefined"?new b1(this,e,t):new w1(this,e,t)}normalize(e){let t=[];for(let i of e)if(i.postcss===!0?i=i():i.postcss&&(i=i.postcss),typeof i=="object"&&Array.isArray(i.plugins))t=t.concat(i.plugins);else if(typeof i=="object"&&i.postcssPlugin)t.push(i);else if(typeof i=="function")t.push(i);else if(!(typeof i=="object"&&(i.parse||i.stringify)))throw new Error(i+" is not a PostCSS plugin");return t}};dp.exports=ir;ir.default=ir;x1.registerProcessor(ir);v1.registerProcessor(ir)});var gp=x((Q3,mp)=>{u();"use strict";var k1=Wr(),S1=wa(),A1=Gr(),C1=kn(),_1=Cn(),E1=tr(),O1=Sn();function ei(r,e){if(Array.isArray(r))return r.map(n=>ei(n));let{inputs:t,...i}=r;if(t){e=[];for(let n of t){let a={...n,__proto__:_1.prototype};a.map&&(a.map={...a.map,__proto__:S1.prototype}),e.push(a)}}if(i.nodes&&(i.nodes=r.nodes.map(n=>ei(n,e))),i.source){let{inputId:n,...a}=i.source;i.source=a,n!=null&&(i.source.input=e[n])}if(i.type==="root")return new E1(i);if(i.type==="decl")return new k1(i);if(i.type==="rule")return new O1(i);if(i.type==="comment")return new A1(i);if(i.type==="atrule")return new C1(i);throw new Error("Unknown node type: "+r.type)}mp.exports=ei;ei.default=ei});var $e=x((Y3,Sp)=>{u();"use strict";var T1=nn(),yp=Wr(),R1=_a(),P1=Et(),Ea=hp(),I1=Vr(),D1=gp(),bp=cn(),q1=ga(),wp=Gr(),vp=kn(),$1=hn(),L1=Cn(),M1=En(),N1=ba(),xp=Sn(),kp=tr(),B1=Hr();function Z(...r){return r.length===1&&Array.isArray(r[0])&&(r=r[0]),new Ea(r)}Z.plugin=function(e,t){let i=!1;function n(...s){console&&console.warn&&!i&&(i=!0,console.warn(e+`: postcss.plugin was deprecated. Migration guide: -https://evilmartians.com/chronicles/postcss-8-plugin-migration`),m.env.LANG&&m.env.LANG.startsWith("cn")&&console.warn(e+`: \u91CC\u9762 postcss.plugin \u88AB\u5F03\u7528. \u8FC1\u79FB\u6307\u5357: -https://www.w3ctech.com/topic/2226`));let o=t(...s);return o.postcssPlugin=e,o.postcssVersion=new Ea().version,o}let a;return Object.defineProperty(n,"postcss",{get(){return a||(a=n()),a}}),n.process=function(s,o,l){return Z([n(l)]).process(s,o)},n};Z.stringify=I1;Z.parse=M1;Z.fromJSON=D1;Z.list=N1;Z.comment=r=>new wp(r);Z.atRule=r=>new vp(r);Z.decl=r=>new yp(r);Z.rule=r=>new xp(r);Z.root=r=>new kp(r);Z.document=r=>new bp(r);Z.CssSyntaxError=T1;Z.Declaration=yp;Z.Container=P1;Z.Processor=Ea;Z.Document=bp;Z.Comment=wp;Z.Warning=q1;Z.AtRule=vp;Z.Result=$1;Z.Input=L1;Z.Rule=xp;Z.Root=kp;Z.Node=B1;R1.registerPostcss(Z);Sp.exports=Z;Z.default=Z});var re,ee,K3,X3,J3,Z3,eI,tI,rI,iI,nI,sI,aI,oI,lI,uI,fI,cI,pI,dI,hI,mI,gI,yI,bI,wI,Ot=R(()=>{u();re=pe($e()),ee=re.default,K3=re.default.stringify,X3=re.default.fromJSON,J3=re.default.plugin,Z3=re.default.parse,eI=re.default.list,tI=re.default.document,rI=re.default.comment,iI=re.default.atRule,nI=re.default.rule,sI=re.default.decl,aI=re.default.root,oI=re.default.CssSyntaxError,lI=re.default.Declaration,uI=re.default.Container,fI=re.default.Processor,cI=re.default.Document,pI=re.default.Comment,dI=re.default.Warning,hI=re.default.AtRule,mI=re.default.Result,gI=re.default.Input,yI=re.default.Rule,bI=re.default.Root,wI=re.default.Node});var Oa=x((xI,Ap)=>{u();Ap.exports=function(r,e,t,i,n){for(e=e.split?e.split("."):e,i=0;i{u();"use strict";Tn.__esModule=!0;Tn.default=z1;function F1(r){for(var e=r.toLowerCase(),t="",i=!1,n=0;n<6&&e[n]!==void 0;n++){var a=e.charCodeAt(n),s=a>=97&&a<=102||a>=48&&a<=57;if(i=a===32,!s)break;t+=e[n]}if(t.length!==0){var o=parseInt(t,16),l=o>=55296&&o<=57343;return l||o===0||o>1114111?["\uFFFD",t.length+(i?1:0)]:[String.fromCodePoint(o),t.length+(i?1:0)]}}var j1=/\\/;function z1(r){var e=j1.test(r);if(!e)return r;for(var t="",i=0;i{u();"use strict";Pn.__esModule=!0;Pn.default=U1;function U1(r){for(var e=arguments.length,t=new Array(e>1?e-1:0),i=1;i0;){var n=t.shift();if(!r[n])return;r=r[n]}return r}_p.exports=Pn.default});var Tp=x((In,Op)=>{u();"use strict";In.__esModule=!0;In.default=V1;function V1(r){for(var e=arguments.length,t=new Array(e>1?e-1:0),i=1;i0;){var n=t.shift();r[n]||(r[n]={}),r=r[n]}}Op.exports=In.default});var Pp=x((Dn,Rp)=>{u();"use strict";Dn.__esModule=!0;Dn.default=H1;function H1(r){for(var e="",t=r.indexOf("/*"),i=0;t>=0;){e=e+r.slice(i,t);var n=r.indexOf("*/",t+2);if(n<0)return e;i=n+2,t=r.indexOf("/*",i)}return e=e+r.slice(i),e}Rp.exports=Dn.default});var ti=x(rt=>{u();"use strict";rt.__esModule=!0;rt.unesc=rt.stripComments=rt.getProp=rt.ensureObject=void 0;var W1=qn(Rn());rt.unesc=W1.default;var G1=qn(Ep());rt.getProp=G1.default;var Q1=qn(Tp());rt.ensureObject=Q1.default;var Y1=qn(Pp());rt.stripComments=Y1.default;function qn(r){return r&&r.__esModule?r:{default:r}}});var dt=x((ri,qp)=>{u();"use strict";ri.__esModule=!0;ri.default=void 0;var Ip=ti();function Dp(r,e){for(var t=0;ti||this.source.end.linen||this.source.end.line===i&&this.source.end.column{u();"use strict";ie.__esModule=!0;ie.UNIVERSAL=ie.TAG=ie.STRING=ie.SELECTOR=ie.ROOT=ie.PSEUDO=ie.NESTING=ie.ID=ie.COMMENT=ie.COMBINATOR=ie.CLASS=ie.ATTRIBUTE=void 0;var Z1="tag";ie.TAG=Z1;var ek="string";ie.STRING=ek;var tk="selector";ie.SELECTOR=tk;var rk="root";ie.ROOT=rk;var ik="pseudo";ie.PSEUDO=ik;var nk="nesting";ie.NESTING=nk;var sk="id";ie.ID=sk;var ak="comment";ie.COMMENT=ak;var ok="combinator";ie.COMBINATOR=ok;var lk="class";ie.CLASS=lk;var uk="attribute";ie.ATTRIBUTE=uk;var fk="universal";ie.UNIVERSAL=fk});var $n=x((ii,Np)=>{u();"use strict";ii.__esModule=!0;ii.default=void 0;var ck=dk(dt()),ht=pk(Se());function $p(r){if(typeof WeakMap!="function")return null;var e=new WeakMap,t=new WeakMap;return($p=function(n){return n?t:e})(r)}function pk(r,e){if(!e&&r&&r.__esModule)return r;if(r===null||typeof r!="object"&&typeof r!="function")return{default:r};var t=$p(e);if(t&&t.has(r))return t.get(r);var i={},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in r)if(a!=="default"&&Object.prototype.hasOwnProperty.call(r,a)){var s=n?Object.getOwnPropertyDescriptor(r,a):null;s&&(s.get||s.set)?Object.defineProperty(i,a,s):i[a]=r[a]}return i.default=r,t&&t.set(r,i),i}function dk(r){return r&&r.__esModule?r:{default:r}}function hk(r,e){var t=typeof Symbol!="undefined"&&r[Symbol.iterator]||r["@@iterator"];if(t)return(t=t.call(r)).next.bind(t);if(Array.isArray(r)||(t=mk(r))||e&&r&&typeof r.length=="number"){t&&(r=t);var i=0;return function(){return i>=r.length?{done:!0}:{done:!1,value:r[i++]}}}throw new TypeError(`Invalid attempt to iterate non-iterable instance. -In order to be iterable, non-array objects must have a [Symbol.iterator]() method.`)}function mk(r,e){if(!!r){if(typeof r=="string")return Lp(r,e);var t=Object.prototype.toString.call(r).slice(8,-1);if(t==="Object"&&r.constructor&&(t=r.constructor.name),t==="Map"||t==="Set")return Array.from(r);if(t==="Arguments"||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(t))return Lp(r,e)}}function Lp(r,e){(e==null||e>r.length)&&(e=r.length);for(var t=0,i=new Array(e);t=n&&(this.indexes[s]=a-1);return this},t.removeAll=function(){for(var n=hk(this.nodes),a;!(a=n()).done;){var s=a.value;s.parent=void 0}return this.nodes=[],this},t.empty=function(){return this.removeAll()},t.insertAfter=function(n,a){a.parent=this;var s=this.index(n);this.nodes.splice(s+1,0,a),a.parent=this;var o;for(var l in this.indexes)o=this.indexes[l],s<=o&&(this.indexes[l]=o+1);return this},t.insertBefore=function(n,a){a.parent=this;var s=this.index(n);this.nodes.splice(s,0,a),a.parent=this;var o;for(var l in this.indexes)o=this.indexes[l],o<=s&&(this.indexes[l]=o+1);return this},t._findChildAtPosition=function(n,a){var s=void 0;return this.each(function(o){if(o.atPosition){var l=o.atPosition(n,a);if(l)return s=l,!1}else if(o.isAtPosition(n,a))return s=o,!1}),s},t.atPosition=function(n,a){if(this.isAtPosition(n,a))return this._findChildAtPosition(n,a)||this},t._inferEndPosition=function(){this.last&&this.last.source&&this.last.source.end&&(this.source=this.source||{},this.source.end=this.source.end||{},Object.assign(this.source.end,this.last.source.end))},t.each=function(n){this.lastEach||(this.lastEach=0),this.indexes||(this.indexes={}),this.lastEach++;var a=this.lastEach;if(this.indexes[a]=0,!!this.length){for(var s,o;this.indexes[a]{u();"use strict";ni.__esModule=!0;ni.default=void 0;var wk=xk($n()),vk=Se();function xk(r){return r&&r.__esModule?r:{default:r}}function Bp(r,e){for(var t=0;t{u();"use strict";si.__esModule=!0;si.default=void 0;var Ck=Ek($n()),_k=Se();function Ek(r){return r&&r.__esModule?r:{default:r}}function Ok(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,Ia(r,e)}function Ia(r,e){return Ia=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},Ia(r,e)}var Tk=function(r){Ok(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=_k.SELECTOR,i}return e}(Ck.default);si.default=Tk;jp.exports=si.default});var Ln=x((AI,zp)=>{u();"use strict";var Rk={},Pk=Rk.hasOwnProperty,Ik=function(e,t){if(!e)return t;var i={};for(var n in t)i[n]=Pk.call(e,n)?e[n]:t[n];return i},Dk=/[ -,\.\/:-@\[-\^`\{-~]/,qk=/[ -,\.\/:-@\[\]\^`\{-~]/,$k=/(^|\\+)?(\\[A-F0-9]{1,6})\x20(?![a-fA-F0-9\x20])/g,qa=function r(e,t){t=Ik(t,r.options),t.quotes!="single"&&t.quotes!="double"&&(t.quotes="single");for(var i=t.quotes=="double"?'"':"'",n=t.isIdentifier,a=e.charAt(0),s="",o=0,l=e.length;o126){if(f>=55296&&f<=56319&&o{u();"use strict";ai.__esModule=!0;ai.default=void 0;var Lk=Up(Ln()),Mk=ti(),Nk=Up(dt()),Bk=Se();function Up(r){return r&&r.__esModule?r:{default:r}}function Vp(r,e){for(var t=0;t{u();"use strict";oi.__esModule=!0;oi.default=void 0;var Uk=Hk(dt()),Vk=Se();function Hk(r){return r&&r.__esModule?r:{default:r}}function Wk(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,Ma(r,e)}function Ma(r,e){return Ma=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},Ma(r,e)}var Gk=function(r){Wk(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=Vk.COMMENT,i}return e}(Uk.default);oi.default=Gk;Wp.exports=oi.default});var Fa=x((li,Gp)=>{u();"use strict";li.__esModule=!0;li.default=void 0;var Qk=Kk(dt()),Yk=Se();function Kk(r){return r&&r.__esModule?r:{default:r}}function Xk(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,Ba(r,e)}function Ba(r,e){return Ba=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},Ba(r,e)}var Jk=function(r){Xk(e,r);function e(i){var n;return n=r.call(this,i)||this,n.type=Yk.ID,n}var t=e.prototype;return t.valueToString=function(){return"#"+r.prototype.valueToString.call(this)},e}(Qk.default);li.default=Jk;Gp.exports=li.default});var Mn=x((ui,Kp)=>{u();"use strict";ui.__esModule=!0;ui.default=void 0;var Zk=Qp(Ln()),eS=ti(),tS=Qp(dt());function Qp(r){return r&&r.__esModule?r:{default:r}}function Yp(r,e){for(var t=0;t{u();"use strict";fi.__esModule=!0;fi.default=void 0;var sS=oS(Mn()),aS=Se();function oS(r){return r&&r.__esModule?r:{default:r}}function lS(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,za(r,e)}function za(r,e){return za=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},za(r,e)}var uS=function(r){lS(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=aS.TAG,i}return e}(sS.default);fi.default=uS;Xp.exports=fi.default});var Ha=x((ci,Jp)=>{u();"use strict";ci.__esModule=!0;ci.default=void 0;var fS=pS(dt()),cS=Se();function pS(r){return r&&r.__esModule?r:{default:r}}function dS(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,Va(r,e)}function Va(r,e){return Va=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},Va(r,e)}var hS=function(r){dS(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=cS.STRING,i}return e}(fS.default);ci.default=hS;Jp.exports=ci.default});var Ga=x((pi,Zp)=>{u();"use strict";pi.__esModule=!0;pi.default=void 0;var mS=yS($n()),gS=Se();function yS(r){return r&&r.__esModule?r:{default:r}}function bS(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,Wa(r,e)}function Wa(r,e){return Wa=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},Wa(r,e)}var wS=function(r){bS(e,r);function e(i){var n;return n=r.call(this,i)||this,n.type=gS.PSEUDO,n}var t=e.prototype;return t.toString=function(){var n=this.length?"("+this.map(String).join(",")+")":"";return[this.rawSpaceBefore,this.stringifyProperty("value"),n,this.rawSpaceAfter].join("")},e}(mS.default);pi.default=wS;Zp.exports=pi.default});var Nn={};Ge(Nn,{deprecate:()=>vS});function vS(r){return r}var Bn=R(()=>{u()});var td=x((CI,ed)=>{u();ed.exports=(Bn(),Nn).deprecate});var Za=x(mi=>{u();"use strict";mi.__esModule=!0;mi.default=void 0;mi.unescapeValue=Xa;var di=Ya(Ln()),xS=Ya(Rn()),kS=Ya(Mn()),SS=Se(),Qa;function Ya(r){return r&&r.__esModule?r:{default:r}}function rd(r,e){for(var t=0;t0&&!n.quoted&&o.before.length===0&&!(n.spaces.value&&n.spaces.value.after)&&(o.before=" "),id(s,o)}))),a.push("]"),a.push(this.rawSpaceAfter),a.join("")},AS(e,[{key:"quoted",get:function(){var n=this.quoteMark;return n==="'"||n==='"'},set:function(n){OS()}},{key:"quoteMark",get:function(){return this._quoteMark},set:function(n){if(!this._constructed){this._quoteMark=n;return}this._quoteMark!==n&&(this._quoteMark=n,this._syncRawValue())}},{key:"qualifiedAttribute",get:function(){return this.qualifiedName(this.raws.attribute||this.attribute)}},{key:"insensitiveFlag",get:function(){return this.insensitive?"i":""}},{key:"value",get:function(){return this._value},set:function(n){if(this._constructed){var a=Xa(n),s=a.deprecatedUsage,o=a.unescaped,l=a.quoteMark;if(s&&ES(),o===this._value&&l===this._quoteMark)return;this._value=o,this._quoteMark=l,this._syncRawValue()}else this._value=n}},{key:"insensitive",get:function(){return this._insensitive},set:function(n){n||(this._insensitive=!1,this.raws&&(this.raws.insensitiveFlag==="I"||this.raws.insensitiveFlag==="i")&&(this.raws.insensitiveFlag=void 0)),this._insensitive=n}},{key:"attribute",get:function(){return this._attribute},set:function(n){this._handleEscapes("attribute",n),this._attribute=n}}]),e}(kS.default);mi.default=Fn;Fn.NO_QUOTE=null;Fn.SINGLE_QUOTE="'";Fn.DOUBLE_QUOTE='"';var Ja=(Qa={"'":{quotes:"single",wrap:!0},'"':{quotes:"double",wrap:!0}},Qa[null]={isIdentifier:!0},Qa);function id(r,e){return""+e.before+r+e.after}});var to=x((gi,nd)=>{u();"use strict";gi.__esModule=!0;gi.default=void 0;var PS=DS(Mn()),IS=Se();function DS(r){return r&&r.__esModule?r:{default:r}}function qS(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,eo(r,e)}function eo(r,e){return eo=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},eo(r,e)}var $S=function(r){qS(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=IS.UNIVERSAL,i.value="*",i}return e}(PS.default);gi.default=$S;nd.exports=gi.default});var io=x((yi,sd)=>{u();"use strict";yi.__esModule=!0;yi.default=void 0;var LS=NS(dt()),MS=Se();function NS(r){return r&&r.__esModule?r:{default:r}}function BS(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,ro(r,e)}function ro(r,e){return ro=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},ro(r,e)}var FS=function(r){BS(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=MS.COMBINATOR,i}return e}(LS.default);yi.default=FS;sd.exports=yi.default});var so=x((bi,ad)=>{u();"use strict";bi.__esModule=!0;bi.default=void 0;var jS=US(dt()),zS=Se();function US(r){return r&&r.__esModule?r:{default:r}}function VS(r,e){r.prototype=Object.create(e.prototype),r.prototype.constructor=r,no(r,e)}function no(r,e){return no=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(i,n){return i.__proto__=n,i},no(r,e)}var HS=function(r){VS(e,r);function e(t){var i;return i=r.call(this,t)||this,i.type=zS.NESTING,i.value="&",i}return e}(jS.default);bi.default=HS;ad.exports=bi.default});var ld=x((jn,od)=>{u();"use strict";jn.__esModule=!0;jn.default=WS;function WS(r){return r.sort(function(e,t){return e-t})}od.exports=jn.default});var ao=x(M=>{u();"use strict";M.__esModule=!0;M.word=M.tilde=M.tab=M.str=M.space=M.slash=M.singleQuote=M.semicolon=M.plus=M.pipe=M.openSquare=M.openParenthesis=M.newline=M.greaterThan=M.feed=M.equals=M.doubleQuote=M.dollar=M.cr=M.comment=M.comma=M.combinator=M.colon=M.closeSquare=M.closeParenthesis=M.caret=M.bang=M.backslash=M.at=M.asterisk=M.ampersand=void 0;var GS=38;M.ampersand=GS;var QS=42;M.asterisk=QS;var YS=64;M.at=YS;var KS=44;M.comma=KS;var XS=58;M.colon=XS;var JS=59;M.semicolon=JS;var ZS=40;M.openParenthesis=ZS;var eA=41;M.closeParenthesis=eA;var tA=91;M.openSquare=tA;var rA=93;M.closeSquare=rA;var iA=36;M.dollar=iA;var nA=126;M.tilde=nA;var sA=94;M.caret=sA;var aA=43;M.plus=aA;var oA=61;M.equals=oA;var lA=124;M.pipe=lA;var uA=62;M.greaterThan=uA;var fA=32;M.space=fA;var ud=39;M.singleQuote=ud;var cA=34;M.doubleQuote=cA;var pA=47;M.slash=pA;var dA=33;M.bang=dA;var hA=92;M.backslash=hA;var mA=13;M.cr=mA;var gA=12;M.feed=gA;var yA=10;M.newline=yA;var bA=9;M.tab=bA;var wA=ud;M.str=wA;var vA=-1;M.comment=vA;var xA=-2;M.word=xA;var kA=-3;M.combinator=kA});var pd=x(wi=>{u();"use strict";wi.__esModule=!0;wi.FIELDS=void 0;wi.default=TA;var D=SA(ao()),nr,te;function fd(r){if(typeof WeakMap!="function")return null;var e=new WeakMap,t=new WeakMap;return(fd=function(n){return n?t:e})(r)}function SA(r,e){if(!e&&r&&r.__esModule)return r;if(r===null||typeof r!="object"&&typeof r!="function")return{default:r};var t=fd(e);if(t&&t.has(r))return t.get(r);var i={},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in r)if(a!=="default"&&Object.prototype.hasOwnProperty.call(r,a)){var s=n?Object.getOwnPropertyDescriptor(r,a):null;s&&(s.get||s.set)?Object.defineProperty(i,a,s):i[a]=r[a]}return i.default=r,t&&t.set(r,i),i}var AA=(nr={},nr[D.tab]=!0,nr[D.newline]=!0,nr[D.cr]=!0,nr[D.feed]=!0,nr),CA=(te={},te[D.space]=!0,te[D.tab]=!0,te[D.newline]=!0,te[D.cr]=!0,te[D.feed]=!0,te[D.ampersand]=!0,te[D.asterisk]=!0,te[D.bang]=!0,te[D.comma]=!0,te[D.colon]=!0,te[D.semicolon]=!0,te[D.openParenthesis]=!0,te[D.closeParenthesis]=!0,te[D.openSquare]=!0,te[D.closeSquare]=!0,te[D.singleQuote]=!0,te[D.doubleQuote]=!0,te[D.plus]=!0,te[D.pipe]=!0,te[D.tilde]=!0,te[D.greaterThan]=!0,te[D.equals]=!0,te[D.dollar]=!0,te[D.caret]=!0,te[D.slash]=!0,te),oo={},cd="0123456789abcdefABCDEF";for(zn=0;zn0?(k=s+v,S=w-y[v].length):(k=s,S=a),O=D.comment,s=k,p=k,d=w-S):c===D.slash?(w=o,O=c,p=s,d=o-a,l=w+1):(w=_A(t,o),O=D.word,p=s,d=w-a),l=w+1;break}e.push([O,s,o-a,p,d,o,l]),S&&(a=S,S=null),o=l}return e}});var vd=x((vi,wd)=>{u();"use strict";vi.__esModule=!0;vi.default=void 0;var RA=je(Pa()),lo=je(Da()),PA=je(La()),dd=je(Na()),IA=je(Fa()),DA=je(Ua()),uo=je(Ha()),qA=je(Ga()),hd=Un(Za()),$A=je(to()),fo=je(io()),LA=je(so()),MA=je(ld()),P=Un(pd()),$=Un(ao()),NA=Un(Se()),le=ti(),Vt,co;function md(r){if(typeof WeakMap!="function")return null;var e=new WeakMap,t=new WeakMap;return(md=function(n){return n?t:e})(r)}function Un(r,e){if(!e&&r&&r.__esModule)return r;if(r===null||typeof r!="object"&&typeof r!="function")return{default:r};var t=md(e);if(t&&t.has(r))return t.get(r);var i={},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in r)if(a!=="default"&&Object.prototype.hasOwnProperty.call(r,a)){var s=n?Object.getOwnPropertyDescriptor(r,a):null;s&&(s.get||s.set)?Object.defineProperty(i,a,s):i[a]=r[a]}return i.default=r,t&&t.set(r,i),i}function je(r){return r&&r.__esModule?r:{default:r}}function gd(r,e){for(var t=0;t0){var s=this.current.last;if(s){var o=this.convertWhitespaceNodesToSpace(a),l=o.space,c=o.rawSpace;c!==void 0&&(s.rawSpaceAfter+=c),s.spaces.after+=l}else a.forEach(function(O){return i.newNode(O)})}return}var f=this.currToken,d=void 0;n>this.position&&(d=this.parseWhitespaceEquivalentTokens(n));var p;if(this.isNamedCombinator()?p=this.namedCombinator():this.currToken[P.FIELDS.TYPE]===$.combinator?(p=new fo.default({value:this.content(),source:sr(this.currToken),sourceIndex:this.currToken[P.FIELDS.START_POS]}),this.position++):po[this.currToken[P.FIELDS.TYPE]]||d||this.unexpected(),p){if(d){var h=this.convertWhitespaceNodesToSpace(d),b=h.space,v=h.rawSpace;p.spaces.before=b,p.rawSpaceBefore=v}}else{var y=this.convertWhitespaceNodesToSpace(d,!0),w=y.space,k=y.rawSpace;k||(k=w);var S={},E={spaces:{}};w.endsWith(" ")&&k.endsWith(" ")?(S.before=w.slice(0,w.length-1),E.spaces.before=k.slice(0,k.length-1)):w.startsWith(" ")&&k.startsWith(" ")?(S.after=w.slice(1),E.spaces.after=k.slice(1)):E.value=k,p=new fo.default({value:" ",source:ho(f,this.tokens[this.position-1]),sourceIndex:f[P.FIELDS.START_POS],spaces:S,raws:E})}return this.currToken&&this.currToken[P.FIELDS.TYPE]===$.space&&(p.spaces.after=this.optionalSpace(this.content()),this.position++),this.newNode(p)},e.comma=function(){if(this.position===this.tokens.length-1){this.root.trailingComma=!0,this.position++;return}this.current._inferEndPosition();var i=new lo.default({source:{start:yd(this.tokens[this.position+1])}});this.current.parent.append(i),this.current=i,this.position++},e.comment=function(){var i=this.currToken;this.newNode(new dd.default({value:this.content(),source:sr(i),sourceIndex:i[P.FIELDS.START_POS]})),this.position++},e.error=function(i,n){throw this.root.error(i,n)},e.missingBackslash=function(){return this.error("Expected a backslash preceding the semicolon.",{index:this.currToken[P.FIELDS.START_POS]})},e.missingParenthesis=function(){return this.expected("opening parenthesis",this.currToken[P.FIELDS.START_POS])},e.missingSquareBracket=function(){return this.expected("opening square bracket",this.currToken[P.FIELDS.START_POS])},e.unexpected=function(){return this.error("Unexpected '"+this.content()+"'. Escaping special characters with \\ may help.",this.currToken[P.FIELDS.START_POS])},e.unexpectedPipe=function(){return this.error("Unexpected '|'.",this.currToken[P.FIELDS.START_POS])},e.namespace=function(){var i=this.prevToken&&this.content(this.prevToken)||!0;if(this.nextToken[P.FIELDS.TYPE]===$.word)return this.position++,this.word(i);if(this.nextToken[P.FIELDS.TYPE]===$.asterisk)return this.position++,this.universal(i);this.unexpectedPipe()},e.nesting=function(){if(this.nextToken){var i=this.content(this.nextToken);if(i==="|"){this.position++;return}}var n=this.currToken;this.newNode(new LA.default({value:this.content(),source:sr(n),sourceIndex:n[P.FIELDS.START_POS]})),this.position++},e.parentheses=function(){var i=this.current.last,n=1;if(this.position++,i&&i.type===NA.PSEUDO){var a=new lo.default({source:{start:yd(this.tokens[this.position-1])}}),s=this.current;for(i.append(a),this.current=a;this.position1&&i.nextToken&&i.nextToken[P.FIELDS.TYPE]===$.openParenthesis&&i.error("Misplaced parenthesis.",{index:i.nextToken[P.FIELDS.START_POS]})});else return this.expected(["pseudo-class","pseudo-element"],this.currToken[P.FIELDS.START_POS])},e.space=function(){var i=this.content();this.position===0||this.prevToken[P.FIELDS.TYPE]===$.comma||this.prevToken[P.FIELDS.TYPE]===$.openParenthesis||this.current.nodes.every(function(n){return n.type==="comment"})?(this.spaces=this.optionalSpace(i),this.position++):this.position===this.tokens.length-1||this.nextToken[P.FIELDS.TYPE]===$.comma||this.nextToken[P.FIELDS.TYPE]===$.closeParenthesis?(this.current.last.spaces.after=this.optionalSpace(i),this.position++):this.combinator()},e.string=function(){var i=this.currToken;this.newNode(new uo.default({value:this.content(),source:sr(i),sourceIndex:i[P.FIELDS.START_POS]})),this.position++},e.universal=function(i){var n=this.nextToken;if(n&&this.content(n)==="|")return this.position++,this.namespace();var a=this.currToken;this.newNode(new $A.default({value:this.content(),source:sr(a),sourceIndex:a[P.FIELDS.START_POS]}),i),this.position++},e.splitWord=function(i,n){for(var a=this,s=this.nextToken,o=this.content();s&&~[$.dollar,$.caret,$.equals,$.word].indexOf(s[P.FIELDS.TYPE]);){this.position++;var l=this.content();if(o+=l,l.lastIndexOf("\\")===l.length-1){var c=this.nextToken;c&&c[P.FIELDS.TYPE]===$.space&&(o+=this.requiredSpace(this.content(c)),this.position++)}s=this.nextToken}var f=mo(o,".").filter(function(b){var v=o[b-1]==="\\",y=/^\d+\.\d+%$/.test(o);return!v&&!y}),d=mo(o,"#").filter(function(b){return o[b-1]!=="\\"}),p=mo(o,"#{");p.length&&(d=d.filter(function(b){return!~p.indexOf(b)}));var h=(0,MA.default)(jA([0].concat(f,d)));h.forEach(function(b,v){var y=h[v+1]||o.length,w=o.slice(b,y);if(v===0&&n)return n.call(a,w,h.length);var k,S=a.currToken,E=S[P.FIELDS.START_POS]+h[v],O=Ht(S[1],S[2]+b,S[3],S[2]+(y-1));if(~f.indexOf(b)){var B={value:w.slice(1),source:O,sourceIndex:E};k=new PA.default(ar(B,"value"))}else if(~d.indexOf(b)){var N={value:w.slice(1),source:O,sourceIndex:E};k=new IA.default(ar(N,"value"))}else{var T={value:w,source:O,sourceIndex:E};ar(T,"value"),k=new DA.default(T)}a.newNode(k,i),i=null}),this.position++},e.word=function(i){var n=this.nextToken;return n&&this.content(n)==="|"?(this.position++,this.namespace()):this.splitWord(i)},e.loop=function(){for(;this.position{u();"use strict";xi.__esModule=!0;xi.default=void 0;var UA=VA(vd());function VA(r){return r&&r.__esModule?r:{default:r}}var HA=function(){function r(t,i){this.func=t||function(){},this.funcRes=null,this.options=i}var e=r.prototype;return e._shouldUpdateSelector=function(i,n){n===void 0&&(n={});var a=Object.assign({},this.options,n);return a.updateSelector===!1?!1:typeof i!="string"},e._isLossy=function(i){i===void 0&&(i={});var n=Object.assign({},this.options,i);return n.lossless===!1},e._root=function(i,n){n===void 0&&(n={});var a=new UA.default(i,this._parseOptions(n));return a.root},e._parseOptions=function(i){return{lossy:this._isLossy(i)}},e._run=function(i,n){var a=this;return n===void 0&&(n={}),new Promise(function(s,o){try{var l=a._root(i,n);Promise.resolve(a.func(l)).then(function(c){var f=void 0;return a._shouldUpdateSelector(i,n)&&(f=l.toString(),i.selector=f),{transform:c,root:l,string:f}}).then(s,o)}catch(c){o(c);return}})},e._runSync=function(i,n){n===void 0&&(n={});var a=this._root(i,n),s=this.func(a);if(s&&typeof s.then=="function")throw new Error("Selector processor returned a promise to a synchronous call.");var o=void 0;return n.updateSelector&&typeof i!="string"&&(o=a.toString(),i.selector=o),{transform:s,root:a,string:o}},e.ast=function(i,n){return this._run(i,n).then(function(a){return a.root})},e.astSync=function(i,n){return this._runSync(i,n).root},e.transform=function(i,n){return this._run(i,n).then(function(a){return a.transform})},e.transformSync=function(i,n){return this._runSync(i,n).transform},e.process=function(i,n){return this._run(i,n).then(function(a){return a.string||a.root.toString()})},e.processSync=function(i,n){var a=this._runSync(i,n);return a.string||a.root.toString()},r}();xi.default=HA;xd.exports=xi.default});var Sd=x(ne=>{u();"use strict";ne.__esModule=!0;ne.universal=ne.tag=ne.string=ne.selector=ne.root=ne.pseudo=ne.nesting=ne.id=ne.comment=ne.combinator=ne.className=ne.attribute=void 0;var WA=ze(Za()),GA=ze(La()),QA=ze(io()),YA=ze(Na()),KA=ze(Fa()),XA=ze(so()),JA=ze(Ga()),ZA=ze(Pa()),eC=ze(Da()),tC=ze(Ha()),rC=ze(Ua()),iC=ze(to());function ze(r){return r&&r.__esModule?r:{default:r}}var nC=function(e){return new WA.default(e)};ne.attribute=nC;var sC=function(e){return new GA.default(e)};ne.className=sC;var aC=function(e){return new QA.default(e)};ne.combinator=aC;var oC=function(e){return new YA.default(e)};ne.comment=oC;var lC=function(e){return new KA.default(e)};ne.id=lC;var uC=function(e){return new XA.default(e)};ne.nesting=uC;var fC=function(e){return new JA.default(e)};ne.pseudo=fC;var cC=function(e){return new ZA.default(e)};ne.root=cC;var pC=function(e){return new eC.default(e)};ne.selector=pC;var dC=function(e){return new tC.default(e)};ne.string=dC;var hC=function(e){return new rC.default(e)};ne.tag=hC;var mC=function(e){return new iC.default(e)};ne.universal=mC});var Ed=x(J=>{u();"use strict";J.__esModule=!0;J.isComment=J.isCombinator=J.isClassName=J.isAttribute=void 0;J.isContainer=EC;J.isIdentifier=void 0;J.isNamespace=OC;J.isNesting=void 0;J.isNode=go;J.isPseudo=void 0;J.isPseudoClass=_C;J.isPseudoElement=_d;J.isUniversal=J.isTag=J.isString=J.isSelector=J.isRoot=void 0;var ue=Se(),Oe,gC=(Oe={},Oe[ue.ATTRIBUTE]=!0,Oe[ue.CLASS]=!0,Oe[ue.COMBINATOR]=!0,Oe[ue.COMMENT]=!0,Oe[ue.ID]=!0,Oe[ue.NESTING]=!0,Oe[ue.PSEUDO]=!0,Oe[ue.ROOT]=!0,Oe[ue.SELECTOR]=!0,Oe[ue.STRING]=!0,Oe[ue.TAG]=!0,Oe[ue.UNIVERSAL]=!0,Oe);function go(r){return typeof r=="object"&&gC[r.type]}function Ue(r,e){return go(e)&&e.type===r}var Ad=Ue.bind(null,ue.ATTRIBUTE);J.isAttribute=Ad;var yC=Ue.bind(null,ue.CLASS);J.isClassName=yC;var bC=Ue.bind(null,ue.COMBINATOR);J.isCombinator=bC;var wC=Ue.bind(null,ue.COMMENT);J.isComment=wC;var vC=Ue.bind(null,ue.ID);J.isIdentifier=vC;var xC=Ue.bind(null,ue.NESTING);J.isNesting=xC;var yo=Ue.bind(null,ue.PSEUDO);J.isPseudo=yo;var kC=Ue.bind(null,ue.ROOT);J.isRoot=kC;var SC=Ue.bind(null,ue.SELECTOR);J.isSelector=SC;var AC=Ue.bind(null,ue.STRING);J.isString=AC;var Cd=Ue.bind(null,ue.TAG);J.isTag=Cd;var CC=Ue.bind(null,ue.UNIVERSAL);J.isUniversal=CC;function _d(r){return yo(r)&&r.value&&(r.value.startsWith("::")||r.value.toLowerCase()===":before"||r.value.toLowerCase()===":after"||r.value.toLowerCase()===":first-letter"||r.value.toLowerCase()===":first-line")}function _C(r){return yo(r)&&!_d(r)}function EC(r){return!!(go(r)&&r.walk)}function OC(r){return Ad(r)||Cd(r)}});var Od=x(Ke=>{u();"use strict";Ke.__esModule=!0;var bo=Se();Object.keys(bo).forEach(function(r){r==="default"||r==="__esModule"||r in Ke&&Ke[r]===bo[r]||(Ke[r]=bo[r])});var wo=Sd();Object.keys(wo).forEach(function(r){r==="default"||r==="__esModule"||r in Ke&&Ke[r]===wo[r]||(Ke[r]=wo[r])});var vo=Ed();Object.keys(vo).forEach(function(r){r==="default"||r==="__esModule"||r in Ke&&Ke[r]===vo[r]||(Ke[r]=vo[r])})});var it=x((ki,Rd)=>{u();"use strict";ki.__esModule=!0;ki.default=void 0;var TC=IC(kd()),RC=PC(Od());function Td(r){if(typeof WeakMap!="function")return null;var e=new WeakMap,t=new WeakMap;return(Td=function(n){return n?t:e})(r)}function PC(r,e){if(!e&&r&&r.__esModule)return r;if(r===null||typeof r!="object"&&typeof r!="function")return{default:r};var t=Td(e);if(t&&t.has(r))return t.get(r);var i={},n=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in r)if(a!=="default"&&Object.prototype.hasOwnProperty.call(r,a)){var s=n?Object.getOwnPropertyDescriptor(r,a):null;s&&(s.get||s.set)?Object.defineProperty(i,a,s):i[a]=r[a]}return i.default=r,t&&t.set(r,i),i}function IC(r){return r&&r.__esModule?r:{default:r}}var xo=function(e){return new TC.default(e)};Object.assign(xo,RC);delete xo.__esModule;var DC=xo;ki.default=DC;Rd.exports=ki.default});function mt(r){return["fontSize","outline"].includes(r)?e=>(typeof e=="function"&&(e=e({})),Array.isArray(e)&&(e=e[0]),e):r==="fontFamily"?e=>{typeof e=="function"&&(e=e({}));let t=Array.isArray(e)&&ke(e[1])?e[0]:e;return Array.isArray(t)?t.join(", "):t}:["boxShadow","transitionProperty","transitionDuration","transitionDelay","transitionTimingFunction","backgroundImage","backgroundSize","backgroundColor","cursor","animation"].includes(r)?e=>(typeof e=="function"&&(e=e({})),Array.isArray(e)&&(e=e.join(", ")),e):["gridTemplateColumns","gridTemplateRows","objectPosition"].includes(r)?e=>(typeof e=="function"&&(e=e({})),typeof e=="string"&&(e=ee.list.comma(e).join(" ")),e):(e,t={})=>(typeof e=="function"&&(e=e(t)),e)}var Si=R(()=>{u();Ot();Kt()});var Md=x(($I,_o)=>{u();var{Rule:Pd,AtRule:qC}=$e(),Id=it();function ko(r,e){let t;try{Id(i=>{t=i}).processSync(r)}catch(i){throw r.includes(":")?e?e.error("Missed semicolon"):i:e?e.error(i.message):i}return t.at(0)}function Dd(r,e){let t=!1;return r.each(i=>{if(i.type==="nesting"){let n=e.clone({});i.value!=="&"?i.replaceWith(ko(i.value.replace("&",n.toString()))):i.replaceWith(n),t=!0}else"nodes"in i&&i.nodes&&Dd(i,e)&&(t=!0)}),t}function qd(r,e){let t=[];return r.selectors.forEach(i=>{let n=ko(i,r);e.selectors.forEach(a=>{if(!a)return;let s=ko(a,e);Dd(s,n)||(s.prepend(Id.combinator({value:" "})),s.prepend(n.clone({}))),t.push(s.toString())})}),t}function Vn(r,e){let t=r.prev();for(e.after(r);t&&t.type==="comment";){let i=t.prev();e.after(t),t=i}return r}function $C(r){return function e(t,i,n,a=n){let s=[];if(i.each(o=>{o.type==="rule"&&n?a&&(o.selectors=qd(t,o)):o.type==="atrule"&&o.nodes?r[o.name]?e(t,o,a):i[Ao]!==!1&&s.push(o):s.push(o)}),n&&s.length){let o=t.clone({nodes:[]});for(let l of s)o.append(l);i.prepend(o)}}}function So(r,e,t){let i=new Pd({selector:r,nodes:[]});return i.append(e),t.after(i),i}function $d(r,e){let t={};for(let i of r)t[i]=!0;if(e)for(let i of e)t[i.replace(/^@/,"")]=!0;return t}function LC(r){r=r.trim();let e=r.match(/^\((.*)\)$/);if(!e)return{type:"basic",selector:r};let t=e[1].match(/^(with(?:out)?):(.+)$/);if(t){let i=t[1]==="with",n=Object.fromEntries(t[2].trim().split(/\s+/).map(s=>[s,!0]));if(i&&n.all)return{type:"noop"};let a=s=>!!n[s];return n.all?a=()=>!0:i&&(a=s=>s==="all"?!1:!n[s]),{type:"withrules",escapes:a}}return{type:"unknown"}}function MC(r){let e=[],t=r.parent;for(;t&&t instanceof qC;)e.push(t),t=t.parent;return e}function NC(r){let e=r[Ld];if(!e)r.after(r.nodes);else{let t=r.nodes,i,n=-1,a,s,o,l=MC(r);if(l.forEach((c,f)=>{if(e(c.name))i=c,n=f,s=o;else{let d=o;o=c.clone({nodes:[]}),d&&o.append(d),a=a||o}}),i?s?(a.append(t),i.after(s)):i.after(t):r.after(t),r.next()&&i){let c;l.slice(0,n+1).forEach((f,d,p)=>{let h=c;c=f.clone({nodes:[]}),h&&c.append(h);let b=[],y=(p[d-1]||r).next();for(;y;)b.push(y),y=y.next();c.append(b)}),c&&(s||t[t.length-1]).after(c)}}r.remove()}var Ao=Symbol("rootRuleMergeSel"),Ld=Symbol("rootRuleEscapes");function BC(r){let{params:e}=r,{type:t,selector:i,escapes:n}=LC(e);if(t==="unknown")throw r.error(`Unknown @${r.name} parameter ${JSON.stringify(e)}`);if(t==="basic"&&i){let a=new Pd({selector:i,nodes:r.nodes});r.removeAll(),r.append(a)}r[Ld]=n,r[Ao]=n?!n("all"):t==="noop"}var Co=Symbol("hasRootRule");_o.exports=(r={})=>{let e=$d(["media","supports","layer","container"],r.bubble),t=$C(e),i=$d(["document","font-face","keyframes","-webkit-keyframes","-moz-keyframes"],r.unwrap),n=(r.rootRuleName||"at-root").replace(/^@/,""),a=r.preserveEmpty;return{postcssPlugin:"postcss-nested",Once(s){s.walkAtRules(n,o=>{BC(o),s[Co]=!0})},Rule(s){let o=!1,l=s,c=!1,f=[];s.each(d=>{d.type==="rule"?(f.length&&(l=So(s.selector,f,l),f=[]),c=!0,o=!0,d.selectors=qd(s,d),l=Vn(d,l)):d.type==="atrule"?(f.length&&(l=So(s.selector,f,l),f=[]),d.name===n?(o=!0,t(s,d,!0,d[Ao]),l=Vn(d,l)):e[d.name]?(c=!0,o=!0,t(s,d,!0),l=Vn(d,l)):i[d.name]?(c=!0,o=!0,t(s,d,!1),l=Vn(d,l)):c&&f.push(d)):d.type==="decl"&&c&&f.push(d)}),f.length&&(l=So(s.selector,f,l)),o&&a!==!0&&(s.raws.semicolon=!0,s.nodes.length===0&&s.remove())},RootExit(s){s[Co]&&(s.walkAtRules(n,NC),s[Co]=!1)}}};_o.exports.postcss=!0});var jd=x((LI,Fd)=>{u();"use strict";var Nd=/-(\w|$)/g,Bd=(r,e)=>e.toUpperCase(),FC=r=>(r=r.toLowerCase(),r==="float"?"cssFloat":r.startsWith("-ms-")?r.substr(1).replace(Nd,Bd):r.replace(Nd,Bd));Fd.exports=FC});var To=x((MI,zd)=>{u();var jC=jd(),zC={boxFlex:!0,boxFlexGroup:!0,columnCount:!0,flex:!0,flexGrow:!0,flexPositive:!0,flexShrink:!0,flexNegative:!0,fontWeight:!0,lineClamp:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,tabSize:!0,widows:!0,zIndex:!0,zoom:!0,fillOpacity:!0,strokeDashoffset:!0,strokeOpacity:!0,strokeWidth:!0};function Eo(r){return typeof r.nodes=="undefined"?!0:Oo(r)}function Oo(r){let e,t={};return r.each(i=>{if(i.type==="atrule")e="@"+i.name,i.params&&(e+=" "+i.params),typeof t[e]=="undefined"?t[e]=Eo(i):Array.isArray(t[e])?t[e].push(Eo(i)):t[e]=[t[e],Eo(i)];else if(i.type==="rule"){let n=Oo(i);if(t[i.selector])for(let a in n)t[i.selector][a]=n[a];else t[i.selector]=n}else if(i.type==="decl"){i.prop[0]==="-"&&i.prop[1]==="-"||i.parent&&i.parent.selector===":export"?e=i.prop:e=jC(i.prop);let n=i.value;!isNaN(i.value)&&zC[e]&&(n=parseFloat(i.value)),i.important&&(n+=" !important"),typeof t[e]=="undefined"?t[e]=n:Array.isArray(t[e])?t[e].push(n):t[e]=[t[e],n]}}),t}zd.exports=Oo});var Hn=x((NI,Wd)=>{u();var Ai=$e(),Ud=/\s*!important\s*$/i,UC={"box-flex":!0,"box-flex-group":!0,"column-count":!0,flex:!0,"flex-grow":!0,"flex-positive":!0,"flex-shrink":!0,"flex-negative":!0,"font-weight":!0,"line-clamp":!0,"line-height":!0,opacity:!0,order:!0,orphans:!0,"tab-size":!0,widows:!0,"z-index":!0,zoom:!0,"fill-opacity":!0,"stroke-dashoffset":!0,"stroke-opacity":!0,"stroke-width":!0};function VC(r){return r.replace(/([A-Z])/g,"-$1").replace(/^ms-/,"-ms-").toLowerCase()}function Vd(r,e,t){t===!1||t===null||(e.startsWith("--")||(e=VC(e)),typeof t=="number"&&(t===0||UC[e]?t=t.toString():t+="px"),e==="css-float"&&(e="float"),Ud.test(t)?(t=t.replace(Ud,""),r.push(Ai.decl({prop:e,value:t,important:!0}))):r.push(Ai.decl({prop:e,value:t})))}function Hd(r,e,t){let i=Ai.atRule({name:e[1],params:e[3]||""});typeof t=="object"&&(i.nodes=[],Ro(t,i)),r.push(i)}function Ro(r,e){let t,i,n;for(t in r)if(i=r[t],!(i===null||typeof i=="undefined"))if(t[0]==="@"){let a=t.match(/@(\S+)(\s+([\W\w]*)\s*)?/);if(Array.isArray(i))for(let s of i)Hd(e,a,s);else Hd(e,a,i)}else if(Array.isArray(i))for(let a of i)Vd(e,t,a);else typeof i=="object"?(n=Ai.rule({selector:t}),Ro(i,n),e.push(n)):Vd(e,t,i)}Wd.exports=function(r){let e=Ai.root();return Ro(r,e),e}});var Po=x((BI,Gd)=>{u();var HC=To();Gd.exports=function(e){return console&&console.warn&&e.warnings().forEach(t=>{let i=t.plugin||"PostCSS";console.warn(i+": "+t.text)}),HC(e.root)}});var Yd=x((FI,Qd)=>{u();var WC=$e(),GC=Po(),QC=Hn();Qd.exports=function(e){let t=WC(e);return async i=>{let n=await t.process(i,{parser:QC,from:void 0});return GC(n)}}});var Xd=x((jI,Kd)=>{u();var YC=$e(),KC=Po(),XC=Hn();Kd.exports=function(r){let e=YC(r);return t=>{let i=e.process(t,{parser:XC,from:void 0});return KC(i)}}});var Zd=x((zI,Jd)=>{u();var JC=To(),ZC=Hn(),e_=Yd(),t_=Xd();Jd.exports={objectify:JC,parse:ZC,async:e_,sync:t_}});var or,eh,UI,VI,HI,WI,th=R(()=>{u();or=pe(Zd()),eh=or.default,UI=or.default.objectify,VI=or.default.parse,HI=or.default.async,WI=or.default.sync});function lr(r){return Array.isArray(r)?r.flatMap(e=>ee([(0,rh.default)({bubble:["screen"]})]).process(e,{parser:eh}).root.nodes):lr([r])}var rh,Io=R(()=>{u();Ot();rh=pe(Md());th()});function ur(r,e,t=!1){if(r==="")return e;let i=typeof e=="string"?(0,ih.default)().astSync(e):e;return i.walkClasses(n=>{let a=n.value,s=t&&a.startsWith("-");n.value=s?`-${r}${a.slice(1)}`:`${r}${a}`}),typeof e=="string"?i.toString():i}var ih,Wn=R(()=>{u();ih=pe(it())});function Te(r){let e=nh.default.className();return e.value=r,jt(e?.raws?.value??e.value)}var nh,fr=R(()=>{u();nh=pe(it());Ki()});function Do(r){return jt(`.${Te(r)}`)}function Gn(r,e){return Do(Ci(r,e))}function Ci(r,e){return e==="DEFAULT"?r:e==="-"||e==="-DEFAULT"?`-${r}`:e.startsWith("-")?`-${r}${e}`:e.startsWith("/")?`${r}${e}`:`${r}-${e}`}var qo=R(()=>{u();fr();Ki()});function L(r,e=[[r,[r]]],{filterDefault:t=!1,...i}={}){let n=mt(r);return function({matchUtilities:a,theme:s}){for(let o of e){let l=Array.isArray(o[0])?o:[o];a(l.reduce((c,[f,d])=>Object.assign(c,{[f]:p=>d.reduce((h,b)=>Array.isArray(b)?Object.assign(h,{[b[0]]:b[1]}):Object.assign(h,{[b]:n(p)}),{})}),{}),{...i,values:t?Object.fromEntries(Object.entries(s(r)??{}).filter(([c])=>c!=="DEFAULT")):s(r)})}}}var sh=R(()=>{u();Si()});function Tt(r){return r=Array.isArray(r)?r:[r],r.map(e=>{let t=e.values.map(i=>i.raw!==void 0?i.raw:[i.min&&`(min-width: ${i.min})`,i.max&&`(max-width: ${i.max})`].filter(Boolean).join(" and "));return e.not?`not all and ${t}`:t}).join(", ")}var Qn=R(()=>{u()});function $o(r){return r.split(l_).map(t=>{let i=t.trim(),n={value:i},a=i.split(u_),s=new Set;for(let o of a)!s.has("DIRECTIONS")&&r_.has(o)?(n.direction=o,s.add("DIRECTIONS")):!s.has("PLAY_STATES")&&i_.has(o)?(n.playState=o,s.add("PLAY_STATES")):!s.has("FILL_MODES")&&n_.has(o)?(n.fillMode=o,s.add("FILL_MODES")):!s.has("ITERATION_COUNTS")&&(s_.has(o)||f_.test(o))?(n.iterationCount=o,s.add("ITERATION_COUNTS")):!s.has("TIMING_FUNCTION")&&a_.has(o)||!s.has("TIMING_FUNCTION")&&o_.some(l=>o.startsWith(`${l}(`))?(n.timingFunction=o,s.add("TIMING_FUNCTION")):!s.has("DURATION")&&ah.test(o)?(n.duration=o,s.add("DURATION")):!s.has("DELAY")&&ah.test(o)?(n.delay=o,s.add("DELAY")):s.has("NAME")?(n.unknown||(n.unknown=[]),n.unknown.push(o)):(n.name=o,s.add("NAME"));return n})}var r_,i_,n_,s_,a_,o_,l_,u_,ah,f_,oh=R(()=>{u();r_=new Set(["normal","reverse","alternate","alternate-reverse"]),i_=new Set(["running","paused"]),n_=new Set(["none","forwards","backwards","both"]),s_=new Set(["infinite"]),a_=new Set(["linear","ease","ease-in","ease-out","ease-in-out","step-start","step-end"]),o_=["cubic-bezier","steps"],l_=/\,(?![^(]*\))/g,u_=/\ +(?![^(]*\))/g,ah=/^(-?[\d.]+m?s)$/,f_=/^(\d+)$/});var lh,xe,uh=R(()=>{u();lh=r=>Object.assign({},...Object.entries(r??{}).flatMap(([e,t])=>typeof t=="object"?Object.entries(lh(t)).map(([i,n])=>({[e+(i==="DEFAULT"?"":`-${i}`)]:n})):[{[`${e}`]:t}])),xe=lh});var ch,fh=R(()=>{ch="3.4.14"});function Rt(r,e=!0){return Array.isArray(r)?r.map(t=>{if(e&&Array.isArray(t))throw new Error("The tuple syntax is not supported for `screens`.");if(typeof t=="string")return{name:t.toString(),not:!1,values:[{min:t,max:void 0}]};let[i,n]=t;return i=i.toString(),typeof n=="string"?{name:i,not:!1,values:[{min:n,max:void 0}]}:Array.isArray(n)?{name:i,not:!1,values:n.map(a=>dh(a))}:{name:i,not:!1,values:[dh(n)]}}):Rt(Object.entries(r??{}),!1)}function Yn(r){return r.values.length!==1?{result:!1,reason:"multiple-values"}:r.values[0].raw!==void 0?{result:!1,reason:"raw-values"}:r.values[0].min!==void 0&&r.values[0].max!==void 0?{result:!1,reason:"min-and-max"}:{result:!0,reason:null}}function ph(r,e,t){let i=Kn(e,r),n=Kn(t,r),a=Yn(i),s=Yn(n);if(a.reason==="multiple-values"||s.reason==="multiple-values")throw new Error("Attempted to sort a screen with multiple values. This should never happen. Please open a bug report.");if(a.reason==="raw-values"||s.reason==="raw-values")throw new Error("Attempted to sort a screen with raw values. This should never happen. Please open a bug report.");if(a.reason==="min-and-max"||s.reason==="min-and-max")throw new Error("Attempted to sort a screen with both min and max values. This should never happen. Please open a bug report.");let{min:o,max:l}=i.values[0],{min:c,max:f}=n.values[0];e.not&&([o,l]=[l,o]),t.not&&([c,f]=[f,c]),o=o===void 0?o:parseFloat(o),l=l===void 0?l:parseFloat(l),c=c===void 0?c:parseFloat(c),f=f===void 0?f:parseFloat(f);let[d,p]=r==="min"?[o,c]:[f,l];return d-p}function Kn(r,e){return typeof r=="object"?r:{name:"arbitrary-screen",values:[{[e]:r}]}}function dh({"min-width":r,min:e=r,max:t,raw:i}={}){return{min:e,max:t,raw:i}}var Xn=R(()=>{u()});function Jn(r,e){r.walkDecls(t=>{if(e.includes(t.prop)){t.remove();return}for(let i of e)t.value.includes(`/ var(${i})`)&&(t.value=t.value.replace(`/ var(${i})`,""))})}var hh=R(()=>{u()});var se,Xe,nt,ge,mh,gh=R(()=>{u();ft();et();Ot();sh();Qn();fr();oh();uh();Lr();ea();Kt();Si();fh();Be();Xn();Gs();hh();ct();Br();_i();se={childVariant:({addVariant:r})=>{r("*","& > *")},pseudoElementVariants:({addVariant:r})=>{r("first-letter","&::first-letter"),r("first-line","&::first-line"),r("marker",[({container:e})=>(Jn(e,["--tw-text-opacity"]),"& *::marker"),({container:e})=>(Jn(e,["--tw-text-opacity"]),"&::marker")]),r("selection",["& *::selection","&::selection"]),r("file","&::file-selector-button"),r("placeholder","&::placeholder"),r("backdrop","&::backdrop"),r("before",({container:e})=>(e.walkRules(t=>{let i=!1;t.walkDecls("content",()=>{i=!0}),i||t.prepend(ee.decl({prop:"content",value:"var(--tw-content)"}))}),"&::before")),r("after",({container:e})=>(e.walkRules(t=>{let i=!1;t.walkDecls("content",()=>{i=!0}),i||t.prepend(ee.decl({prop:"content",value:"var(--tw-content)"}))}),"&::after"))},pseudoClassVariants:({addVariant:r,matchVariant:e,config:t,prefix:i})=>{let n=[["first","&:first-child"],["last","&:last-child"],["only","&:only-child"],["odd","&:nth-child(odd)"],["even","&:nth-child(even)"],"first-of-type","last-of-type","only-of-type",["visited",({container:s})=>(Jn(s,["--tw-text-opacity","--tw-border-opacity","--tw-bg-opacity"]),"&:visited")],"target",["open","&[open]"],"default","checked","indeterminate","placeholder-shown","autofill","optional","required","valid","invalid","in-range","out-of-range","read-only","empty","focus-within",["hover",we(t(),"hoverOnlyWhenSupported")?"@media (hover: hover) and (pointer: fine) { &:hover }":"&:hover"],"focus","focus-visible","active","enabled","disabled"].map(s=>Array.isArray(s)?s:[s,`&:${s}`]);for(let[s,o]of n)r(s,l=>typeof o=="function"?o(l):o);let a={group:(s,{modifier:o})=>o?[`:merge(${i(".group")}\\/${Te(o)})`," &"]:[`:merge(${i(".group")})`," &"],peer:(s,{modifier:o})=>o?[`:merge(${i(".peer")}\\/${Te(o)})`," ~ &"]:[`:merge(${i(".peer")})`," ~ &"]};for(let[s,o]of Object.entries(a))e(s,(l="",c)=>{let f=K(typeof l=="function"?l(c):l);f.includes("&")||(f="&"+f);let[d,p]=o("",c),h=null,b=null,v=0;for(let y=0;y{r("ltr",'&:where([dir="ltr"], [dir="ltr"] *)'),r("rtl",'&:where([dir="rtl"], [dir="rtl"] *)')},reducedMotionVariants:({addVariant:r})=>{r("motion-safe","@media (prefers-reduced-motion: no-preference)"),r("motion-reduce","@media (prefers-reduced-motion: reduce)")},darkVariants:({config:r,addVariant:e})=>{let[t,i=".dark"]=[].concat(r("darkMode","media"));if(t===!1&&(t="media",G.warn("darkmode-false",["The `darkMode` option in your Tailwind CSS configuration is set to `false`, which now behaves the same as `media`.","Change `darkMode` to `media` or remove it entirely.","https://tailwindcss.com/docs/upgrade-guide#remove-dark-mode-configuration"])),t==="variant"){let n;if(Array.isArray(i)||typeof i=="function"?n=i:typeof i=="string"&&(n=[i]),Array.isArray(n))for(let a of n)a===".dark"?(t=!1,G.warn("darkmode-variant-without-selector",["When using `variant` for `darkMode`, you must provide a selector.",'Example: `darkMode: ["variant", ".your-selector &"]`'])):a.includes("&")||(t=!1,G.warn("darkmode-variant-without-ampersand",["When using `variant` for `darkMode`, your selector must contain `&`.",'Example `darkMode: ["variant", ".your-selector &"]`']));i=n}t==="selector"?e("dark",`&:where(${i}, ${i} *)`):t==="media"?e("dark","@media (prefers-color-scheme: dark)"):t==="variant"?e("dark",i):t==="class"&&e("dark",`&:is(${i} *)`)},printVariant:({addVariant:r})=>{r("print","@media print")},screenVariants:({theme:r,addVariant:e,matchVariant:t})=>{let i=r("screens")??{},n=Object.values(i).every(w=>typeof w=="string"),a=Rt(r("screens")),s=new Set([]);function o(w){return w.match(/(\D+)$/)?.[1]??"(none)"}function l(w){w!==void 0&&s.add(o(w))}function c(w){return l(w),s.size===1}for(let w of a)for(let k of w.values)l(k.min),l(k.max);let f=s.size<=1;function d(w){return Object.fromEntries(a.filter(k=>Yn(k).result).map(k=>{let{min:S,max:E}=k.values[0];if(w==="min"&&S!==void 0)return k;if(w==="min"&&E!==void 0)return{...k,not:!k.not};if(w==="max"&&E!==void 0)return k;if(w==="max"&&S!==void 0)return{...k,not:!k.not}}).map(k=>[k.name,k]))}function p(w){return(k,S)=>ph(w,k.value,S.value)}let h=p("max"),b=p("min");function v(w){return k=>{if(n)if(f){if(typeof k=="string"&&!c(k))return G.warn("minmax-have-mixed-units",["The `min-*` and `max-*` variants are not supported with a `screens` configuration containing mixed units."]),[]}else return G.warn("mixed-screen-units",["The `min-*` and `max-*` variants are not supported with a `screens` configuration containing mixed units."]),[];else return G.warn("complex-screen-config",["The `min-*` and `max-*` variants are not supported with a `screens` configuration containing objects."]),[];return[`@media ${Tt(Kn(k,w))}`]}}t("max",v("max"),{sort:h,values:n?d("max"):{}});let y="min-screens";for(let w of a)e(w.name,`@media ${Tt(w)}`,{id:y,sort:n&&f?b:void 0,value:w});t("min",v("min"),{id:y,sort:b})},supportsVariants:({matchVariant:r,theme:e})=>{r("supports",(t="")=>{let i=K(t),n=/^\w*\s*\(/.test(i);return i=n?i.replace(/\b(and|or|not)\b/g," $1 "):i,n?`@supports ${i}`:(i.includes(":")||(i=`${i}: var(--tw)`),i.startsWith("(")&&i.endsWith(")")||(i=`(${i})`),`@supports ${i}`)},{values:e("supports")??{}})},hasVariants:({matchVariant:r,prefix:e})=>{r("has",t=>`&:has(${K(t)})`,{values:{},[Pt]:{respectPrefix:!1}}),r("group-has",(t,{modifier:i})=>i?`:merge(${e(".group")}\\/${i}):has(${K(t)}) &`:`:merge(${e(".group")}):has(${K(t)}) &`,{values:{},[Pt]:{respectPrefix:!1}}),r("peer-has",(t,{modifier:i})=>i?`:merge(${e(".peer")}\\/${i}):has(${K(t)}) ~ &`:`:merge(${e(".peer")}):has(${K(t)}) ~ &`,{values:{},[Pt]:{respectPrefix:!1}})},ariaVariants:({matchVariant:r,theme:e})=>{r("aria",t=>`&[aria-${Ye(K(t))}]`,{values:e("aria")??{}}),r("group-aria",(t,{modifier:i})=>i?`:merge(.group\\/${i})[aria-${Ye(K(t))}] &`:`:merge(.group)[aria-${Ye(K(t))}] &`,{values:e("aria")??{}}),r("peer-aria",(t,{modifier:i})=>i?`:merge(.peer\\/${i})[aria-${Ye(K(t))}] ~ &`:`:merge(.peer)[aria-${Ye(K(t))}] ~ &`,{values:e("aria")??{}})},dataVariants:({matchVariant:r,theme:e})=>{r("data",t=>`&[data-${Ye(K(t))}]`,{values:e("data")??{}}),r("group-data",(t,{modifier:i})=>i?`:merge(.group\\/${i})[data-${Ye(K(t))}] &`:`:merge(.group)[data-${Ye(K(t))}] &`,{values:e("data")??{}}),r("peer-data",(t,{modifier:i})=>i?`:merge(.peer\\/${i})[data-${Ye(K(t))}] ~ &`:`:merge(.peer)[data-${Ye(K(t))}] ~ &`,{values:e("data")??{}})},orientationVariants:({addVariant:r})=>{r("portrait","@media (orientation: portrait)"),r("landscape","@media (orientation: landscape)")},prefersContrastVariants:({addVariant:r})=>{r("contrast-more","@media (prefers-contrast: more)"),r("contrast-less","@media (prefers-contrast: less)")},forcedColorsVariants:({addVariant:r})=>{r("forced-colors","@media (forced-colors: active)")}},Xe=["translate(var(--tw-translate-x), var(--tw-translate-y))","rotate(var(--tw-rotate))","skewX(var(--tw-skew-x))","skewY(var(--tw-skew-y))","scaleX(var(--tw-scale-x))","scaleY(var(--tw-scale-y))"].join(" "),nt=["var(--tw-blur)","var(--tw-brightness)","var(--tw-contrast)","var(--tw-grayscale)","var(--tw-hue-rotate)","var(--tw-invert)","var(--tw-saturate)","var(--tw-sepia)","var(--tw-drop-shadow)"].join(" "),ge=["var(--tw-backdrop-blur)","var(--tw-backdrop-brightness)","var(--tw-backdrop-contrast)","var(--tw-backdrop-grayscale)","var(--tw-backdrop-hue-rotate)","var(--tw-backdrop-invert)","var(--tw-backdrop-opacity)","var(--tw-backdrop-saturate)","var(--tw-backdrop-sepia)"].join(" "),mh={preflight:({addBase:r})=>{let e=ee.parse(`*,::after,::before{box-sizing:border-box;border-width:0;border-style:solid;border-color:theme('borderColor.DEFAULT', currentColor)}::after,::before{--tw-content:''}:host,html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;tab-size:4;font-family:theme('fontFamily.sans', ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji");font-feature-settings:theme('fontFamily.sans[1].fontFeatureSettings', normal);font-variation-settings:theme('fontFamily.sans[1].fontVariationSettings', normal);-webkit-tap-highlight-color:transparent}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,pre,samp{font-family:theme('fontFamily.mono', ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace);font-feature-settings:theme('fontFamily.mono[1].fontFeatureSettings', normal);font-variation-settings:theme('fontFamily.mono[1].fontVariationSettings', normal);font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-feature-settings:inherit;font-variation-settings:inherit;font-size:100%;font-weight:inherit;line-height:inherit;letter-spacing:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,input:where([type=button]),input:where([type=reset]),input:where([type=submit]){-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}menu,ol,ul{list-style:none;margin:0;padding:0}dialog{padding:0}textarea{resize:vertical}input::placeholder,textarea::placeholder{opacity:1;color:theme('colors.gray.4', #9ca3af)}[role=button],button{cursor:pointer}:disabled{cursor:default}audio,canvas,embed,iframe,img,object,svg,video{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]:where(:not([hidden=until-found])){display:none}`);r([ee.comment({text:`! tailwindcss v${ch} | MIT License | https://tailwindcss.com`}),...e.nodes])},container:(()=>{function r(t=[]){return t.flatMap(i=>i.values.map(n=>n.min)).filter(i=>i!==void 0)}function e(t,i,n){if(typeof n=="undefined")return[];if(!(typeof n=="object"&&n!==null))return[{screen:"DEFAULT",minWidth:0,padding:n}];let a=[];n.DEFAULT&&a.push({screen:"DEFAULT",minWidth:0,padding:n.DEFAULT});for(let s of t)for(let o of i)for(let{min:l}of o.values)l===s&&a.push({minWidth:s,padding:n[o.name]});return a}return function({addComponents:t,theme:i}){let n=Rt(i("container.screens",i("screens"))),a=r(n),s=e(a,n,i("container.padding")),o=c=>{let f=s.find(d=>d.minWidth===c);return f?{paddingRight:f.padding,paddingLeft:f.padding}:{}},l=Array.from(new Set(a.slice().sort((c,f)=>parseInt(c)-parseInt(f)))).map(c=>({[`@media (min-width: ${c})`]:{".container":{"max-width":c,...o(c)}}}));t([{".container":Object.assign({width:"100%"},i("container.center",!1)?{marginRight:"auto",marginLeft:"auto"}:{},o(0))},...l])}})(),accessibility:({addUtilities:r})=>{r({".sr-only":{position:"absolute",width:"1px",height:"1px",padding:"0",margin:"-1px",overflow:"hidden",clip:"rect(0, 0, 0, 0)",whiteSpace:"nowrap",borderWidth:"0"},".not-sr-only":{position:"static",width:"auto",height:"auto",padding:"0",margin:"0",overflow:"visible",clip:"auto",whiteSpace:"normal"}})},pointerEvents:({addUtilities:r})=>{r({".pointer-events-none":{"pointer-events":"none"},".pointer-events-auto":{"pointer-events":"auto"}})},visibility:({addUtilities:r})=>{r({".visible":{visibility:"visible"},".invisible":{visibility:"hidden"},".collapse":{visibility:"collapse"}})},position:({addUtilities:r})=>{r({".static":{position:"static"},".fixed":{position:"fixed"},".absolute":{position:"absolute"},".relative":{position:"relative"},".sticky":{position:"sticky"}})},inset:L("inset",[["inset",["inset"]],[["inset-x",["left","right"]],["inset-y",["top","bottom"]]],[["start",["inset-inline-start"]],["end",["inset-inline-end"]],["top",["top"]],["right",["right"]],["bottom",["bottom"]],["left",["left"]]]],{supportsNegativeValues:!0}),isolation:({addUtilities:r})=>{r({".isolate":{isolation:"isolate"},".isolation-auto":{isolation:"auto"}})},zIndex:L("zIndex",[["z",["zIndex"]]],{supportsNegativeValues:!0}),order:L("order",void 0,{supportsNegativeValues:!0}),gridColumn:L("gridColumn",[["col",["gridColumn"]]]),gridColumnStart:L("gridColumnStart",[["col-start",["gridColumnStart"]]],{supportsNegativeValues:!0}),gridColumnEnd:L("gridColumnEnd",[["col-end",["gridColumnEnd"]]],{supportsNegativeValues:!0}),gridRow:L("gridRow",[["row",["gridRow"]]]),gridRowStart:L("gridRowStart",[["row-start",["gridRowStart"]]],{supportsNegativeValues:!0}),gridRowEnd:L("gridRowEnd",[["row-end",["gridRowEnd"]]],{supportsNegativeValues:!0}),float:({addUtilities:r})=>{r({".float-start":{float:"inline-start"},".float-end":{float:"inline-end"},".float-right":{float:"right"},".float-left":{float:"left"},".float-none":{float:"none"}})},clear:({addUtilities:r})=>{r({".clear-start":{clear:"inline-start"},".clear-end":{clear:"inline-end"},".clear-left":{clear:"left"},".clear-right":{clear:"right"},".clear-both":{clear:"both"},".clear-none":{clear:"none"}})},margin:L("margin",[["m",["margin"]],[["mx",["margin-left","margin-right"]],["my",["margin-top","margin-bottom"]]],[["ms",["margin-inline-start"]],["me",["margin-inline-end"]],["mt",["margin-top"]],["mr",["margin-right"]],["mb",["margin-bottom"]],["ml",["margin-left"]]]],{supportsNegativeValues:!0}),boxSizing:({addUtilities:r})=>{r({".box-border":{"box-sizing":"border-box"},".box-content":{"box-sizing":"content-box"}})},lineClamp:({matchUtilities:r,addUtilities:e,theme:t})=>{r({"line-clamp":i=>({overflow:"hidden",display:"-webkit-box","-webkit-box-orient":"vertical","-webkit-line-clamp":`${i}`})},{values:t("lineClamp")}),e({".line-clamp-none":{overflow:"visible",display:"block","-webkit-box-orient":"horizontal","-webkit-line-clamp":"none"}})},display:({addUtilities:r})=>{r({".block":{display:"block"},".inline-block":{display:"inline-block"},".inline":{display:"inline"},".flex":{display:"flex"},".inline-flex":{display:"inline-flex"},".table":{display:"table"},".inline-table":{display:"inline-table"},".table-caption":{display:"table-caption"},".table-cell":{display:"table-cell"},".table-column":{display:"table-column"},".table-column-group":{display:"table-column-group"},".table-footer-group":{display:"table-footer-group"},".table-header-group":{display:"table-header-group"},".table-row-group":{display:"table-row-group"},".table-row":{display:"table-row"},".flow-root":{display:"flow-root"},".grid":{display:"grid"},".inline-grid":{display:"inline-grid"},".contents":{display:"contents"},".list-item":{display:"list-item"},".hidden":{display:"none"}})},aspectRatio:L("aspectRatio",[["aspect",["aspect-ratio"]]]),size:L("size",[["size",["width","height"]]]),height:L("height",[["h",["height"]]]),maxHeight:L("maxHeight",[["max-h",["maxHeight"]]]),minHeight:L("minHeight",[["min-h",["minHeight"]]]),width:L("width",[["w",["width"]]]),minWidth:L("minWidth",[["min-w",["minWidth"]]]),maxWidth:L("maxWidth",[["max-w",["maxWidth"]]]),flex:L("flex"),flexShrink:L("flexShrink",[["flex-shrink",["flex-shrink"]],["shrink",["flex-shrink"]]]),flexGrow:L("flexGrow",[["flex-grow",["flex-grow"]],["grow",["flex-grow"]]]),flexBasis:L("flexBasis",[["basis",["flex-basis"]]]),tableLayout:({addUtilities:r})=>{r({".table-auto":{"table-layout":"auto"},".table-fixed":{"table-layout":"fixed"}})},captionSide:({addUtilities:r})=>{r({".caption-top":{"caption-side":"top"},".caption-bottom":{"caption-side":"bottom"}})},borderCollapse:({addUtilities:r})=>{r({".border-collapse":{"border-collapse":"collapse"},".border-separate":{"border-collapse":"separate"}})},borderSpacing:({addDefaults:r,matchUtilities:e,theme:t})=>{r("border-spacing",{"--tw-border-spacing-x":0,"--tw-border-spacing-y":0}),e({"border-spacing":i=>({"--tw-border-spacing-x":i,"--tw-border-spacing-y":i,"@defaults border-spacing":{},"border-spacing":"var(--tw-border-spacing-x) var(--tw-border-spacing-y)"}),"border-spacing-x":i=>({"--tw-border-spacing-x":i,"@defaults border-spacing":{},"border-spacing":"var(--tw-border-spacing-x) var(--tw-border-spacing-y)"}),"border-spacing-y":i=>({"--tw-border-spacing-y":i,"@defaults border-spacing":{},"border-spacing":"var(--tw-border-spacing-x) var(--tw-border-spacing-y)"})},{values:t("borderSpacing")})},transformOrigin:L("transformOrigin",[["origin",["transformOrigin"]]]),translate:L("translate",[[["translate-x",[["@defaults transform",{}],"--tw-translate-x",["transform",Xe]]],["translate-y",[["@defaults transform",{}],"--tw-translate-y",["transform",Xe]]]]],{supportsNegativeValues:!0}),rotate:L("rotate",[["rotate",[["@defaults transform",{}],"--tw-rotate",["transform",Xe]]]],{supportsNegativeValues:!0}),skew:L("skew",[[["skew-x",[["@defaults transform",{}],"--tw-skew-x",["transform",Xe]]],["skew-y",[["@defaults transform",{}],"--tw-skew-y",["transform",Xe]]]]],{supportsNegativeValues:!0}),scale:L("scale",[["scale",[["@defaults transform",{}],"--tw-scale-x","--tw-scale-y",["transform",Xe]]],[["scale-x",[["@defaults transform",{}],"--tw-scale-x",["transform",Xe]]],["scale-y",[["@defaults transform",{}],"--tw-scale-y",["transform",Xe]]]]],{supportsNegativeValues:!0}),transform:({addDefaults:r,addUtilities:e})=>{r("transform",{"--tw-translate-x":"0","--tw-translate-y":"0","--tw-rotate":"0","--tw-skew-x":"0","--tw-skew-y":"0","--tw-scale-x":"1","--tw-scale-y":"1"}),e({".transform":{"@defaults transform":{},transform:Xe},".transform-cpu":{transform:Xe},".transform-gpu":{transform:Xe.replace("translate(var(--tw-translate-x), var(--tw-translate-y))","translate3d(var(--tw-translate-x), var(--tw-translate-y), 0)")},".transform-none":{transform:"none"}})},animation:({matchUtilities:r,theme:e,config:t})=>{let i=a=>Te(t("prefix")+a),n=Object.fromEntries(Object.entries(e("keyframes")??{}).map(([a,s])=>[a,{[`@keyframes ${i(a)}`]:s}]));r({animate:a=>{let s=$o(a);return[...s.flatMap(o=>n[o.name]),{animation:s.map(({name:o,value:l})=>o===void 0||n[o]===void 0?l:l.replace(o,i(o))).join(", ")}]}},{values:e("animation")})},cursor:L("cursor"),touchAction:({addDefaults:r,addUtilities:e})=>{r("touch-action",{"--tw-pan-x":" ","--tw-pan-y":" ","--tw-pinch-zoom":" "});let t="var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom)";e({".touch-auto":{"touch-action":"auto"},".touch-none":{"touch-action":"none"},".touch-pan-x":{"@defaults touch-action":{},"--tw-pan-x":"pan-x","touch-action":t},".touch-pan-left":{"@defaults touch-action":{},"--tw-pan-x":"pan-left","touch-action":t},".touch-pan-right":{"@defaults touch-action":{},"--tw-pan-x":"pan-right","touch-action":t},".touch-pan-y":{"@defaults touch-action":{},"--tw-pan-y":"pan-y","touch-action":t},".touch-pan-up":{"@defaults touch-action":{},"--tw-pan-y":"pan-up","touch-action":t},".touch-pan-down":{"@defaults touch-action":{},"--tw-pan-y":"pan-down","touch-action":t},".touch-pinch-zoom":{"@defaults touch-action":{},"--tw-pinch-zoom":"pinch-zoom","touch-action":t},".touch-manipulation":{"touch-action":"manipulation"}})},userSelect:({addUtilities:r})=>{r({".select-none":{"user-select":"none"},".select-text":{"user-select":"text"},".select-all":{"user-select":"all"},".select-auto":{"user-select":"auto"}})},resize:({addUtilities:r})=>{r({".resize-none":{resize:"none"},".resize-y":{resize:"vertical"},".resize-x":{resize:"horizontal"},".resize":{resize:"both"}})},scrollSnapType:({addDefaults:r,addUtilities:e})=>{r("scroll-snap-type",{"--tw-scroll-snap-strictness":"proximity"}),e({".snap-none":{"scroll-snap-type":"none"},".snap-x":{"@defaults scroll-snap-type":{},"scroll-snap-type":"x var(--tw-scroll-snap-strictness)"},".snap-y":{"@defaults scroll-snap-type":{},"scroll-snap-type":"y var(--tw-scroll-snap-strictness)"},".snap-both":{"@defaults scroll-snap-type":{},"scroll-snap-type":"both var(--tw-scroll-snap-strictness)"},".snap-mandatory":{"--tw-scroll-snap-strictness":"mandatory"},".snap-proximity":{"--tw-scroll-snap-strictness":"proximity"}})},scrollSnapAlign:({addUtilities:r})=>{r({".snap-start":{"scroll-snap-align":"start"},".snap-end":{"scroll-snap-align":"end"},".snap-center":{"scroll-snap-align":"center"},".snap-align-none":{"scroll-snap-align":"none"}})},scrollSnapStop:({addUtilities:r})=>{r({".snap-normal":{"scroll-snap-stop":"normal"},".snap-always":{"scroll-snap-stop":"always"}})},scrollMargin:L("scrollMargin",[["scroll-m",["scroll-margin"]],[["scroll-mx",["scroll-margin-left","scroll-margin-right"]],["scroll-my",["scroll-margin-top","scroll-margin-bottom"]]],[["scroll-ms",["scroll-margin-inline-start"]],["scroll-me",["scroll-margin-inline-end"]],["scroll-mt",["scroll-margin-top"]],["scroll-mr",["scroll-margin-right"]],["scroll-mb",["scroll-margin-bottom"]],["scroll-ml",["scroll-margin-left"]]]],{supportsNegativeValues:!0}),scrollPadding:L("scrollPadding",[["scroll-p",["scroll-padding"]],[["scroll-px",["scroll-padding-left","scroll-padding-right"]],["scroll-py",["scroll-padding-top","scroll-padding-bottom"]]],[["scroll-ps",["scroll-padding-inline-start"]],["scroll-pe",["scroll-padding-inline-end"]],["scroll-pt",["scroll-padding-top"]],["scroll-pr",["scroll-padding-right"]],["scroll-pb",["scroll-padding-bottom"]],["scroll-pl",["scroll-padding-left"]]]]),listStylePosition:({addUtilities:r})=>{r({".list-inside":{"list-style-position":"inside"},".list-outside":{"list-style-position":"outside"}})},listStyleType:L("listStyleType",[["list",["listStyleType"]]]),listStyleImage:L("listStyleImage",[["list-image",["listStyleImage"]]]),appearance:({addUtilities:r})=>{r({".appearance-none":{appearance:"none"},".appearance-auto":{appearance:"auto"}})},columns:L("columns",[["columns",["columns"]]]),breakBefore:({addUtilities:r})=>{r({".break-before-auto":{"break-before":"auto"},".break-before-avoid":{"break-before":"avoid"},".break-before-all":{"break-before":"all"},".break-before-avoid-page":{"break-before":"avoid-page"},".break-before-page":{"break-before":"page"},".break-before-left":{"break-before":"left"},".break-before-right":{"break-before":"right"},".break-before-column":{"break-before":"column"}})},breakInside:({addUtilities:r})=>{r({".break-inside-auto":{"break-inside":"auto"},".break-inside-avoid":{"break-inside":"avoid"},".break-inside-avoid-page":{"break-inside":"avoid-page"},".break-inside-avoid-column":{"break-inside":"avoid-column"}})},breakAfter:({addUtilities:r})=>{r({".break-after-auto":{"break-after":"auto"},".break-after-avoid":{"break-after":"avoid"},".break-after-all":{"break-after":"all"},".break-after-avoid-page":{"break-after":"avoid-page"},".break-after-page":{"break-after":"page"},".break-after-left":{"break-after":"left"},".break-after-right":{"break-after":"right"},".break-after-column":{"break-after":"column"}})},gridAutoColumns:L("gridAutoColumns",[["auto-cols",["gridAutoColumns"]]]),gridAutoFlow:({addUtilities:r})=>{r({".grid-flow-row":{gridAutoFlow:"row"},".grid-flow-col":{gridAutoFlow:"column"},".grid-flow-dense":{gridAutoFlow:"dense"},".grid-flow-row-dense":{gridAutoFlow:"row dense"},".grid-flow-col-dense":{gridAutoFlow:"column dense"}})},gridAutoRows:L("gridAutoRows",[["auto-rows",["gridAutoRows"]]]),gridTemplateColumns:L("gridTemplateColumns",[["grid-cols",["gridTemplateColumns"]]]),gridTemplateRows:L("gridTemplateRows",[["grid-rows",["gridTemplateRows"]]]),flexDirection:({addUtilities:r})=>{r({".flex-row":{"flex-direction":"row"},".flex-row-reverse":{"flex-direction":"row-reverse"},".flex-col":{"flex-direction":"column"},".flex-col-reverse":{"flex-direction":"column-reverse"}})},flexWrap:({addUtilities:r})=>{r({".flex-wrap":{"flex-wrap":"wrap"},".flex-wrap-reverse":{"flex-wrap":"wrap-reverse"},".flex-nowrap":{"flex-wrap":"nowrap"}})},placeContent:({addUtilities:r})=>{r({".place-content-center":{"place-content":"center"},".place-content-start":{"place-content":"start"},".place-content-end":{"place-content":"end"},".place-content-between":{"place-content":"space-between"},".place-content-around":{"place-content":"space-around"},".place-content-evenly":{"place-content":"space-evenly"},".place-content-baseline":{"place-content":"baseline"},".place-content-stretch":{"place-content":"stretch"}})},placeItems:({addUtilities:r})=>{r({".place-items-start":{"place-items":"start"},".place-items-end":{"place-items":"end"},".place-items-center":{"place-items":"center"},".place-items-baseline":{"place-items":"baseline"},".place-items-stretch":{"place-items":"stretch"}})},alignContent:({addUtilities:r})=>{r({".content-normal":{"align-content":"normal"},".content-center":{"align-content":"center"},".content-start":{"align-content":"flex-start"},".content-end":{"align-content":"flex-end"},".content-between":{"align-content":"space-between"},".content-around":{"align-content":"space-around"},".content-evenly":{"align-content":"space-evenly"},".content-baseline":{"align-content":"baseline"},".content-stretch":{"align-content":"stretch"}})},alignItems:({addUtilities:r})=>{r({".items-start":{"align-items":"flex-start"},".items-end":{"align-items":"flex-end"},".items-center":{"align-items":"center"},".items-baseline":{"align-items":"baseline"},".items-stretch":{"align-items":"stretch"}})},justifyContent:({addUtilities:r})=>{r({".justify-normal":{"justify-content":"normal"},".justify-start":{"justify-content":"flex-start"},".justify-end":{"justify-content":"flex-end"},".justify-center":{"justify-content":"center"},".justify-between":{"justify-content":"space-between"},".justify-around":{"justify-content":"space-around"},".justify-evenly":{"justify-content":"space-evenly"},".justify-stretch":{"justify-content":"stretch"}})},justifyItems:({addUtilities:r})=>{r({".justify-items-start":{"justify-items":"start"},".justify-items-end":{"justify-items":"end"},".justify-items-center":{"justify-items":"center"},".justify-items-stretch":{"justify-items":"stretch"}})},gap:L("gap",[["gap",["gap"]],[["gap-x",["columnGap"]],["gap-y",["rowGap"]]]]),space:({matchUtilities:r,addUtilities:e,theme:t})=>{r({"space-x":i=>(i=i==="0"?"0px":i,{"& > :not([hidden]) ~ :not([hidden])":{"--tw-space-x-reverse":"0","margin-right":`calc(${i} * var(--tw-space-x-reverse))`,"margin-left":`calc(${i} * calc(1 - var(--tw-space-x-reverse)))`}}),"space-y":i=>(i=i==="0"?"0px":i,{"& > :not([hidden]) ~ :not([hidden])":{"--tw-space-y-reverse":"0","margin-top":`calc(${i} * calc(1 - var(--tw-space-y-reverse)))`,"margin-bottom":`calc(${i} * var(--tw-space-y-reverse))`}})},{values:t("space"),supportsNegativeValues:!0}),e({".space-y-reverse > :not([hidden]) ~ :not([hidden])":{"--tw-space-y-reverse":"1"},".space-x-reverse > :not([hidden]) ~ :not([hidden])":{"--tw-space-x-reverse":"1"}})},divideWidth:({matchUtilities:r,addUtilities:e,theme:t})=>{r({"divide-x":i=>(i=i==="0"?"0px":i,{"& > :not([hidden]) ~ :not([hidden])":{"@defaults border-width":{},"--tw-divide-x-reverse":"0","border-right-width":`calc(${i} * var(--tw-divide-x-reverse))`,"border-left-width":`calc(${i} * calc(1 - var(--tw-divide-x-reverse)))`}}),"divide-y":i=>(i=i==="0"?"0px":i,{"& > :not([hidden]) ~ :not([hidden])":{"@defaults border-width":{},"--tw-divide-y-reverse":"0","border-top-width":`calc(${i} * calc(1 - var(--tw-divide-y-reverse)))`,"border-bottom-width":`calc(${i} * var(--tw-divide-y-reverse))`}})},{values:t("divideWidth"),type:["line-width","length","any"]}),e({".divide-y-reverse > :not([hidden]) ~ :not([hidden])":{"@defaults border-width":{},"--tw-divide-y-reverse":"1"},".divide-x-reverse > :not([hidden]) ~ :not([hidden])":{"@defaults border-width":{},"--tw-divide-x-reverse":"1"}})},divideStyle:({addUtilities:r})=>{r({".divide-solid > :not([hidden]) ~ :not([hidden])":{"border-style":"solid"},".divide-dashed > :not([hidden]) ~ :not([hidden])":{"border-style":"dashed"},".divide-dotted > :not([hidden]) ~ :not([hidden])":{"border-style":"dotted"},".divide-double > :not([hidden]) ~ :not([hidden])":{"border-style":"double"},".divide-none > :not([hidden]) ~ :not([hidden])":{"border-style":"none"}})},divideColor:({matchUtilities:r,theme:e,corePlugins:t})=>{r({divide:i=>t("divideOpacity")?{["& > :not([hidden]) ~ :not([hidden])"]:Ae({color:i,property:"border-color",variable:"--tw-divide-opacity"})}:{["& > :not([hidden]) ~ :not([hidden])"]:{"border-color":X(i)}}},{values:(({DEFAULT:i,...n})=>n)(xe(e("divideColor"))),type:["color","any"]})},divideOpacity:({matchUtilities:r,theme:e})=>{r({"divide-opacity":t=>({["& > :not([hidden]) ~ :not([hidden])"]:{"--tw-divide-opacity":t}})},{values:e("divideOpacity")})},placeSelf:({addUtilities:r})=>{r({".place-self-auto":{"place-self":"auto"},".place-self-start":{"place-self":"start"},".place-self-end":{"place-self":"end"},".place-self-center":{"place-self":"center"},".place-self-stretch":{"place-self":"stretch"}})},alignSelf:({addUtilities:r})=>{r({".self-auto":{"align-self":"auto"},".self-start":{"align-self":"flex-start"},".self-end":{"align-self":"flex-end"},".self-center":{"align-self":"center"},".self-stretch":{"align-self":"stretch"},".self-baseline":{"align-self":"baseline"}})},justifySelf:({addUtilities:r})=>{r({".justify-self-auto":{"justify-self":"auto"},".justify-self-start":{"justify-self":"start"},".justify-self-end":{"justify-self":"end"},".justify-self-center":{"justify-self":"center"},".justify-self-stretch":{"justify-self":"stretch"}})},overflow:({addUtilities:r})=>{r({".overflow-auto":{overflow:"auto"},".overflow-hidden":{overflow:"hidden"},".overflow-clip":{overflow:"clip"},".overflow-visible":{overflow:"visible"},".overflow-scroll":{overflow:"scroll"},".overflow-x-auto":{"overflow-x":"auto"},".overflow-y-auto":{"overflow-y":"auto"},".overflow-x-hidden":{"overflow-x":"hidden"},".overflow-y-hidden":{"overflow-y":"hidden"},".overflow-x-clip":{"overflow-x":"clip"},".overflow-y-clip":{"overflow-y":"clip"},".overflow-x-visible":{"overflow-x":"visible"},".overflow-y-visible":{"overflow-y":"visible"},".overflow-x-scroll":{"overflow-x":"scroll"},".overflow-y-scroll":{"overflow-y":"scroll"}})},overscrollBehavior:({addUtilities:r})=>{r({".overscroll-auto":{"overscroll-behavior":"auto"},".overscroll-contain":{"overscroll-behavior":"contain"},".overscroll-none":{"overscroll-behavior":"none"},".overscroll-y-auto":{"overscroll-behavior-y":"auto"},".overscroll-y-contain":{"overscroll-behavior-y":"contain"},".overscroll-y-none":{"overscroll-behavior-y":"none"},".overscroll-x-auto":{"overscroll-behavior-x":"auto"},".overscroll-x-contain":{"overscroll-behavior-x":"contain"},".overscroll-x-none":{"overscroll-behavior-x":"none"}})},scrollBehavior:({addUtilities:r})=>{r({".scroll-auto":{"scroll-behavior":"auto"},".scroll-smooth":{"scroll-behavior":"smooth"}})},textOverflow:({addUtilities:r})=>{r({".truncate":{overflow:"hidden","text-overflow":"ellipsis","white-space":"nowrap"},".overflow-ellipsis":{"text-overflow":"ellipsis"},".text-ellipsis":{"text-overflow":"ellipsis"},".text-clip":{"text-overflow":"clip"}})},hyphens:({addUtilities:r})=>{r({".hyphens-none":{hyphens:"none"},".hyphens-manual":{hyphens:"manual"},".hyphens-auto":{hyphens:"auto"}})},whitespace:({addUtilities:r})=>{r({".whitespace-normal":{"white-space":"normal"},".whitespace-nowrap":{"white-space":"nowrap"},".whitespace-pre":{"white-space":"pre"},".whitespace-pre-line":{"white-space":"pre-line"},".whitespace-pre-wrap":{"white-space":"pre-wrap"},".whitespace-break-spaces":{"white-space":"break-spaces"}})},textWrap:({addUtilities:r})=>{r({".text-wrap":{"text-wrap":"wrap"},".text-nowrap":{"text-wrap":"nowrap"},".text-balance":{"text-wrap":"balance"},".text-pretty":{"text-wrap":"pretty"}})},wordBreak:({addUtilities:r})=>{r({".break-normal":{"overflow-wrap":"normal","word-break":"normal"},".break-words":{"overflow-wrap":"break-word"},".break-all":{"word-break":"break-all"},".break-keep":{"word-break":"keep-all"}})},borderRadius:L("borderRadius",[["rounded",["border-radius"]],[["rounded-s",["border-start-start-radius","border-end-start-radius"]],["rounded-e",["border-start-end-radius","border-end-end-radius"]],["rounded-t",["border-top-left-radius","border-top-right-radius"]],["rounded-r",["border-top-right-radius","border-bottom-right-radius"]],["rounded-b",["border-bottom-right-radius","border-bottom-left-radius"]],["rounded-l",["border-top-left-radius","border-bottom-left-radius"]]],[["rounded-ss",["border-start-start-radius"]],["rounded-se",["border-start-end-radius"]],["rounded-ee",["border-end-end-radius"]],["rounded-es",["border-end-start-radius"]],["rounded-tl",["border-top-left-radius"]],["rounded-tr",["border-top-right-radius"]],["rounded-br",["border-bottom-right-radius"]],["rounded-bl",["border-bottom-left-radius"]]]]),borderWidth:L("borderWidth",[["border",[["@defaults border-width",{}],"border-width"]],[["border-x",[["@defaults border-width",{}],"border-left-width","border-right-width"]],["border-y",[["@defaults border-width",{}],"border-top-width","border-bottom-width"]]],[["border-s",[["@defaults border-width",{}],"border-inline-start-width"]],["border-e",[["@defaults border-width",{}],"border-inline-end-width"]],["border-t",[["@defaults border-width",{}],"border-top-width"]],["border-r",[["@defaults border-width",{}],"border-right-width"]],["border-b",[["@defaults border-width",{}],"border-bottom-width"]],["border-l",[["@defaults border-width",{}],"border-left-width"]]]],{type:["line-width","length"]}),borderStyle:({addUtilities:r})=>{r({".border-solid":{"border-style":"solid"},".border-dashed":{"border-style":"dashed"},".border-dotted":{"border-style":"dotted"},".border-double":{"border-style":"double"},".border-hidden":{"border-style":"hidden"},".border-none":{"border-style":"none"}})},borderColor:({matchUtilities:r,theme:e,corePlugins:t})=>{r({border:i=>t("borderOpacity")?Ae({color:i,property:"border-color",variable:"--tw-border-opacity"}):{"border-color":X(i)}},{values:(({DEFAULT:i,...n})=>n)(xe(e("borderColor"))),type:["color","any"]}),r({"border-x":i=>t("borderOpacity")?Ae({color:i,property:["border-left-color","border-right-color"],variable:"--tw-border-opacity"}):{"border-left-color":X(i),"border-right-color":X(i)},"border-y":i=>t("borderOpacity")?Ae({color:i,property:["border-top-color","border-bottom-color"],variable:"--tw-border-opacity"}):{"border-top-color":X(i),"border-bottom-color":X(i)}},{values:(({DEFAULT:i,...n})=>n)(xe(e("borderColor"))),type:["color","any"]}),r({"border-s":i=>t("borderOpacity")?Ae({color:i,property:"border-inline-start-color",variable:"--tw-border-opacity"}):{"border-inline-start-color":X(i)},"border-e":i=>t("borderOpacity")?Ae({color:i,property:"border-inline-end-color",variable:"--tw-border-opacity"}):{"border-inline-end-color":X(i)},"border-t":i=>t("borderOpacity")?Ae({color:i,property:"border-top-color",variable:"--tw-border-opacity"}):{"border-top-color":X(i)},"border-r":i=>t("borderOpacity")?Ae({color:i,property:"border-right-color",variable:"--tw-border-opacity"}):{"border-right-color":X(i)},"border-b":i=>t("borderOpacity")?Ae({color:i,property:"border-bottom-color",variable:"--tw-border-opacity"}):{"border-bottom-color":X(i)},"border-l":i=>t("borderOpacity")?Ae({color:i,property:"border-left-color",variable:"--tw-border-opacity"}):{"border-left-color":X(i)}},{values:(({DEFAULT:i,...n})=>n)(xe(e("borderColor"))),type:["color","any"]})},borderOpacity:L("borderOpacity",[["border-opacity",["--tw-border-opacity"]]]),backgroundColor:({matchUtilities:r,theme:e,corePlugins:t})=>{r({bg:i=>t("backgroundOpacity")?Ae({color:i,property:"background-color",variable:"--tw-bg-opacity"}):{"background-color":X(i)}},{values:xe(e("backgroundColor")),type:["color","any"]})},backgroundOpacity:L("backgroundOpacity",[["bg-opacity",["--tw-bg-opacity"]]]),backgroundImage:L("backgroundImage",[["bg",["background-image"]]],{type:["lookup","image","url"]}),gradientColorStops:(()=>{function r(e){return Ze(e,0,"rgb(255 255 255 / 0)")}return function({matchUtilities:e,theme:t,addDefaults:i}){i("gradient-color-stops",{"--tw-gradient-from-position":" ","--tw-gradient-via-position":" ","--tw-gradient-to-position":" "});let n={values:xe(t("gradientColorStops")),type:["color","any"]},a={values:t("gradientColorStopPositions"),type:["length","percentage"]};e({from:s=>{let o=r(s);return{"@defaults gradient-color-stops":{},"--tw-gradient-from":`${X(s)} var(--tw-gradient-from-position)`,"--tw-gradient-to":`${o} var(--tw-gradient-to-position)`,"--tw-gradient-stops":"var(--tw-gradient-from), var(--tw-gradient-to)"}}},n),e({from:s=>({"--tw-gradient-from-position":s})},a),e({via:s=>{let o=r(s);return{"@defaults gradient-color-stops":{},"--tw-gradient-to":`${o} var(--tw-gradient-to-position)`,"--tw-gradient-stops":`var(--tw-gradient-from), ${X(s)} var(--tw-gradient-via-position), var(--tw-gradient-to)`}}},n),e({via:s=>({"--tw-gradient-via-position":s})},a),e({to:s=>({"@defaults gradient-color-stops":{},"--tw-gradient-to":`${X(s)} var(--tw-gradient-to-position)`})},n),e({to:s=>({"--tw-gradient-to-position":s})},a)}})(),boxDecorationBreak:({addUtilities:r})=>{r({".decoration-slice":{"box-decoration-break":"slice"},".decoration-clone":{"box-decoration-break":"clone"},".box-decoration-slice":{"box-decoration-break":"slice"},".box-decoration-clone":{"box-decoration-break":"clone"}})},backgroundSize:L("backgroundSize",[["bg",["background-size"]]],{type:["lookup","length","percentage","size"]}),backgroundAttachment:({addUtilities:r})=>{r({".bg-fixed":{"background-attachment":"fixed"},".bg-local":{"background-attachment":"local"},".bg-scroll":{"background-attachment":"scroll"}})},backgroundClip:({addUtilities:r})=>{r({".bg-clip-border":{"background-clip":"border-box"},".bg-clip-padding":{"background-clip":"padding-box"},".bg-clip-content":{"background-clip":"content-box"},".bg-clip-text":{"background-clip":"text"}})},backgroundPosition:L("backgroundPosition",[["bg",["background-position"]]],{type:["lookup",["position",{preferOnConflict:!0}]]}),backgroundRepeat:({addUtilities:r})=>{r({".bg-repeat":{"background-repeat":"repeat"},".bg-no-repeat":{"background-repeat":"no-repeat"},".bg-repeat-x":{"background-repeat":"repeat-x"},".bg-repeat-y":{"background-repeat":"repeat-y"},".bg-repeat-round":{"background-repeat":"round"},".bg-repeat-space":{"background-repeat":"space"}})},backgroundOrigin:({addUtilities:r})=>{r({".bg-origin-border":{"background-origin":"border-box"},".bg-origin-padding":{"background-origin":"padding-box"},".bg-origin-content":{"background-origin":"content-box"}})},fill:({matchUtilities:r,theme:e})=>{r({fill:t=>({fill:X(t)})},{values:xe(e("fill")),type:["color","any"]})},stroke:({matchUtilities:r,theme:e})=>{r({stroke:t=>({stroke:X(t)})},{values:xe(e("stroke")),type:["color","url","any"]})},strokeWidth:L("strokeWidth",[["stroke",["stroke-width"]]],{type:["length","number","percentage"]}),objectFit:({addUtilities:r})=>{r({".object-contain":{"object-fit":"contain"},".object-cover":{"object-fit":"cover"},".object-fill":{"object-fit":"fill"},".object-none":{"object-fit":"none"},".object-scale-down":{"object-fit":"scale-down"}})},objectPosition:L("objectPosition",[["object",["object-position"]]]),padding:L("padding",[["p",["padding"]],[["px",["padding-left","padding-right"]],["py",["padding-top","padding-bottom"]]],[["ps",["padding-inline-start"]],["pe",["padding-inline-end"]],["pt",["padding-top"]],["pr",["padding-right"]],["pb",["padding-bottom"]],["pl",["padding-left"]]]]),textAlign:({addUtilities:r})=>{r({".text-left":{"text-align":"left"},".text-center":{"text-align":"center"},".text-right":{"text-align":"right"},".text-justify":{"text-align":"justify"},".text-start":{"text-align":"start"},".text-end":{"text-align":"end"}})},textIndent:L("textIndent",[["indent",["text-indent"]]],{supportsNegativeValues:!0}),verticalAlign:({addUtilities:r,matchUtilities:e})=>{r({".align-baseline":{"vertical-align":"baseline"},".align-top":{"vertical-align":"top"},".align-middle":{"vertical-align":"middle"},".align-bottom":{"vertical-align":"bottom"},".align-text-top":{"vertical-align":"text-top"},".align-text-bottom":{"vertical-align":"text-bottom"},".align-sub":{"vertical-align":"sub"},".align-super":{"vertical-align":"super"}}),e({align:t=>({"vertical-align":t})})},fontFamily:({matchUtilities:r,theme:e})=>{r({font:t=>{let[i,n={}]=Array.isArray(t)&&ke(t[1])?t:[t],{fontFeatureSettings:a,fontVariationSettings:s}=n;return{"font-family":Array.isArray(i)?i.join(", "):i,...a===void 0?{}:{"font-feature-settings":a},...s===void 0?{}:{"font-variation-settings":s}}}},{values:e("fontFamily"),type:["lookup","generic-name","family-name"]})},fontSize:({matchUtilities:r,theme:e})=>{r({text:(t,{modifier:i})=>{let[n,a]=Array.isArray(t)?t:[t];if(i)return{"font-size":n,"line-height":i};let{lineHeight:s,letterSpacing:o,fontWeight:l}=ke(a)?a:{lineHeight:a};return{"font-size":n,...s===void 0?{}:{"line-height":s},...o===void 0?{}:{"letter-spacing":o},...l===void 0?{}:{"font-weight":l}}}},{values:e("fontSize"),modifiers:e("lineHeight"),type:["absolute-size","relative-size","length","percentage"]})},fontWeight:L("fontWeight",[["font",["fontWeight"]]],{type:["lookup","number","any"]}),textTransform:({addUtilities:r})=>{r({".uppercase":{"text-transform":"uppercase"},".lowercase":{"text-transform":"lowercase"},".capitalize":{"text-transform":"capitalize"},".normal-case":{"text-transform":"none"}})},fontStyle:({addUtilities:r})=>{r({".italic":{"font-style":"italic"},".not-italic":{"font-style":"normal"}})},fontVariantNumeric:({addDefaults:r,addUtilities:e})=>{let t="var(--tw-ordinal) var(--tw-slashed-zero) var(--tw-numeric-figure) var(--tw-numeric-spacing) var(--tw-numeric-fraction)";r("font-variant-numeric",{"--tw-ordinal":" ","--tw-slashed-zero":" ","--tw-numeric-figure":" ","--tw-numeric-spacing":" ","--tw-numeric-fraction":" "}),e({".normal-nums":{"font-variant-numeric":"normal"},".ordinal":{"@defaults font-variant-numeric":{},"--tw-ordinal":"ordinal","font-variant-numeric":t},".slashed-zero":{"@defaults font-variant-numeric":{},"--tw-slashed-zero":"slashed-zero","font-variant-numeric":t},".lining-nums":{"@defaults font-variant-numeric":{},"--tw-numeric-figure":"lining-nums","font-variant-numeric":t},".oldstyle-nums":{"@defaults font-variant-numeric":{},"--tw-numeric-figure":"oldstyle-nums","font-variant-numeric":t},".proportional-nums":{"@defaults font-variant-numeric":{},"--tw-numeric-spacing":"proportional-nums","font-variant-numeric":t},".tabular-nums":{"@defaults font-variant-numeric":{},"--tw-numeric-spacing":"tabular-nums","font-variant-numeric":t},".diagonal-fractions":{"@defaults font-variant-numeric":{},"--tw-numeric-fraction":"diagonal-fractions","font-variant-numeric":t},".stacked-fractions":{"@defaults font-variant-numeric":{},"--tw-numeric-fraction":"stacked-fractions","font-variant-numeric":t}})},lineHeight:L("lineHeight",[["leading",["lineHeight"]]]),letterSpacing:L("letterSpacing",[["tracking",["letterSpacing"]]],{supportsNegativeValues:!0}),textColor:({matchUtilities:r,theme:e,corePlugins:t})=>{r({text:i=>t("textOpacity")?Ae({color:i,property:"color",variable:"--tw-text-opacity"}):{color:X(i)}},{values:xe(e("textColor")),type:["color","any"]})},textOpacity:L("textOpacity",[["text-opacity",["--tw-text-opacity"]]]),textDecoration:({addUtilities:r})=>{r({".underline":{"text-decoration-line":"underline"},".overline":{"text-decoration-line":"overline"},".line-through":{"text-decoration-line":"line-through"},".no-underline":{"text-decoration-line":"none"}})},textDecorationColor:({matchUtilities:r,theme:e})=>{r({decoration:t=>({"text-decoration-color":X(t)})},{values:xe(e("textDecorationColor")),type:["color","any"]})},textDecorationStyle:({addUtilities:r})=>{r({".decoration-solid":{"text-decoration-style":"solid"},".decoration-double":{"text-decoration-style":"double"},".decoration-dotted":{"text-decoration-style":"dotted"},".decoration-dashed":{"text-decoration-style":"dashed"},".decoration-wavy":{"text-decoration-style":"wavy"}})},textDecorationThickness:L("textDecorationThickness",[["decoration",["text-decoration-thickness"]]],{type:["length","percentage"]}),textUnderlineOffset:L("textUnderlineOffset",[["underline-offset",["text-underline-offset"]]],{type:["length","percentage","any"]}),fontSmoothing:({addUtilities:r})=>{r({".antialiased":{"-webkit-font-smoothing":"antialiased","-moz-osx-font-smoothing":"grayscale"},".subpixel-antialiased":{"-webkit-font-smoothing":"auto","-moz-osx-font-smoothing":"auto"}})},placeholderColor:({matchUtilities:r,theme:e,corePlugins:t})=>{r({placeholder:i=>t("placeholderOpacity")?{"&::placeholder":Ae({color:i,property:"color",variable:"--tw-placeholder-opacity"})}:{"&::placeholder":{color:X(i)}}},{values:xe(e("placeholderColor")),type:["color","any"]})},placeholderOpacity:({matchUtilities:r,theme:e})=>{r({"placeholder-opacity":t=>({["&::placeholder"]:{"--tw-placeholder-opacity":t}})},{values:e("placeholderOpacity")})},caretColor:({matchUtilities:r,theme:e})=>{r({caret:t=>({"caret-color":X(t)})},{values:xe(e("caretColor")),type:["color","any"]})},accentColor:({matchUtilities:r,theme:e})=>{r({accent:t=>({"accent-color":X(t)})},{values:xe(e("accentColor")),type:["color","any"]})},opacity:L("opacity",[["opacity",["opacity"]]]),backgroundBlendMode:({addUtilities:r})=>{r({".bg-blend-normal":{"background-blend-mode":"normal"},".bg-blend-multiply":{"background-blend-mode":"multiply"},".bg-blend-screen":{"background-blend-mode":"screen"},".bg-blend-overlay":{"background-blend-mode":"overlay"},".bg-blend-darken":{"background-blend-mode":"darken"},".bg-blend-lighten":{"background-blend-mode":"lighten"},".bg-blend-color-dodge":{"background-blend-mode":"color-dodge"},".bg-blend-color-burn":{"background-blend-mode":"color-burn"},".bg-blend-hard-light":{"background-blend-mode":"hard-light"},".bg-blend-soft-light":{"background-blend-mode":"soft-light"},".bg-blend-difference":{"background-blend-mode":"difference"},".bg-blend-exclusion":{"background-blend-mode":"exclusion"},".bg-blend-hue":{"background-blend-mode":"hue"},".bg-blend-saturation":{"background-blend-mode":"saturation"},".bg-blend-color":{"background-blend-mode":"color"},".bg-blend-luminosity":{"background-blend-mode":"luminosity"}})},mixBlendMode:({addUtilities:r})=>{r({".mix-blend-normal":{"mix-blend-mode":"normal"},".mix-blend-multiply":{"mix-blend-mode":"multiply"},".mix-blend-screen":{"mix-blend-mode":"screen"},".mix-blend-overlay":{"mix-blend-mode":"overlay"},".mix-blend-darken":{"mix-blend-mode":"darken"},".mix-blend-lighten":{"mix-blend-mode":"lighten"},".mix-blend-color-dodge":{"mix-blend-mode":"color-dodge"},".mix-blend-color-burn":{"mix-blend-mode":"color-burn"},".mix-blend-hard-light":{"mix-blend-mode":"hard-light"},".mix-blend-soft-light":{"mix-blend-mode":"soft-light"},".mix-blend-difference":{"mix-blend-mode":"difference"},".mix-blend-exclusion":{"mix-blend-mode":"exclusion"},".mix-blend-hue":{"mix-blend-mode":"hue"},".mix-blend-saturation":{"mix-blend-mode":"saturation"},".mix-blend-color":{"mix-blend-mode":"color"},".mix-blend-luminosity":{"mix-blend-mode":"luminosity"},".mix-blend-plus-darker":{"mix-blend-mode":"plus-darker"},".mix-blend-plus-lighter":{"mix-blend-mode":"plus-lighter"}})},boxShadow:(()=>{let r=mt("boxShadow"),e=["var(--tw-ring-offset-shadow, 0 0 #0000)","var(--tw-ring-shadow, 0 0 #0000)","var(--tw-shadow)"].join(", ");return function({matchUtilities:t,addDefaults:i,theme:n}){i("box-shadow",{"--tw-ring-offset-shadow":"0 0 #0000","--tw-ring-shadow":"0 0 #0000","--tw-shadow":"0 0 #0000","--tw-shadow-colored":"0 0 #0000"}),t({shadow:a=>{a=r(a);let s=Ji(a);for(let o of s)!o.valid||(o.color="var(--tw-shadow-color)");return{"@defaults box-shadow":{},"--tw-shadow":a==="none"?"0 0 #0000":a,"--tw-shadow-colored":a==="none"?"0 0 #0000":qf(s),"box-shadow":e}}},{values:n("boxShadow"),type:["shadow"]})}})(),boxShadowColor:({matchUtilities:r,theme:e})=>{r({shadow:t=>({"--tw-shadow-color":X(t),"--tw-shadow":"var(--tw-shadow-colored)"})},{values:xe(e("boxShadowColor")),type:["color","any"]})},outlineStyle:({addUtilities:r})=>{r({".outline-none":{outline:"2px solid transparent","outline-offset":"2px"},".outline":{"outline-style":"solid"},".outline-dashed":{"outline-style":"dashed"},".outline-dotted":{"outline-style":"dotted"},".outline-double":{"outline-style":"double"}})},outlineWidth:L("outlineWidth",[["outline",["outline-width"]]],{type:["length","number","percentage"]}),outlineOffset:L("outlineOffset",[["outline-offset",["outline-offset"]]],{type:["length","number","percentage","any"],supportsNegativeValues:!0}),outlineColor:({matchUtilities:r,theme:e})=>{r({outline:t=>({"outline-color":X(t)})},{values:xe(e("outlineColor")),type:["color","any"]})},ringWidth:({matchUtilities:r,addDefaults:e,addUtilities:t,theme:i,config:n})=>{let a=(()=>{if(we(n(),"respectDefaultRingColorOpacity"))return i("ringColor.DEFAULT");let s=i("ringOpacity.DEFAULT","0.5");return i("ringColor")?.DEFAULT?Ze(i("ringColor")?.DEFAULT,s,`rgb(147 197 253 / ${s})`):`rgb(147 197 253 / ${s})`})();e("ring-width",{"--tw-ring-inset":" ","--tw-ring-offset-width":i("ringOffsetWidth.DEFAULT","0px"),"--tw-ring-offset-color":i("ringOffsetColor.DEFAULT","#fff"),"--tw-ring-color":a,"--tw-ring-offset-shadow":"0 0 #0000","--tw-ring-shadow":"0 0 #0000","--tw-shadow":"0 0 #0000","--tw-shadow-colored":"0 0 #0000"}),r({ring:s=>({"@defaults ring-width":{},"--tw-ring-offset-shadow":"var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color)","--tw-ring-shadow":`var(--tw-ring-inset) 0 0 0 calc(${s} + var(--tw-ring-offset-width)) var(--tw-ring-color)`,"box-shadow":["var(--tw-ring-offset-shadow)","var(--tw-ring-shadow)","var(--tw-shadow, 0 0 #0000)"].join(", ")})},{values:i("ringWidth"),type:"length"}),t({".ring-inset":{"@defaults ring-width":{},"--tw-ring-inset":"inset"}})},ringColor:({matchUtilities:r,theme:e,corePlugins:t})=>{r({ring:i=>t("ringOpacity")?Ae({color:i,property:"--tw-ring-color",variable:"--tw-ring-opacity"}):{"--tw-ring-color":X(i)}},{values:Object.fromEntries(Object.entries(xe(e("ringColor"))).filter(([i])=>i!=="DEFAULT")),type:["color","any"]})},ringOpacity:r=>{let{config:e}=r;return L("ringOpacity",[["ring-opacity",["--tw-ring-opacity"]]],{filterDefault:!we(e(),"respectDefaultRingColorOpacity")})(r)},ringOffsetWidth:L("ringOffsetWidth",[["ring-offset",["--tw-ring-offset-width"]]],{type:"length"}),ringOffsetColor:({matchUtilities:r,theme:e})=>{r({"ring-offset":t=>({"--tw-ring-offset-color":X(t)})},{values:xe(e("ringOffsetColor")),type:["color","any"]})},blur:({matchUtilities:r,theme:e})=>{r({blur:t=>({"--tw-blur":t.trim()===""?" ":`blur(${t})`,"@defaults filter":{},filter:nt})},{values:e("blur")})},brightness:({matchUtilities:r,theme:e})=>{r({brightness:t=>({"--tw-brightness":`brightness(${t})`,"@defaults filter":{},filter:nt})},{values:e("brightness")})},contrast:({matchUtilities:r,theme:e})=>{r({contrast:t=>({"--tw-contrast":`contrast(${t})`,"@defaults filter":{},filter:nt})},{values:e("contrast")})},dropShadow:({matchUtilities:r,theme:e})=>{r({"drop-shadow":t=>({"--tw-drop-shadow":Array.isArray(t)?t.map(i=>`drop-shadow(${i})`).join(" "):`drop-shadow(${t})`,"@defaults filter":{},filter:nt})},{values:e("dropShadow")})},grayscale:({matchUtilities:r,theme:e})=>{r({grayscale:t=>({"--tw-grayscale":`grayscale(${t})`,"@defaults filter":{},filter:nt})},{values:e("grayscale")})},hueRotate:({matchUtilities:r,theme:e})=>{r({"hue-rotate":t=>({"--tw-hue-rotate":`hue-rotate(${t})`,"@defaults filter":{},filter:nt})},{values:e("hueRotate"),supportsNegativeValues:!0})},invert:({matchUtilities:r,theme:e})=>{r({invert:t=>({"--tw-invert":`invert(${t})`,"@defaults filter":{},filter:nt})},{values:e("invert")})},saturate:({matchUtilities:r,theme:e})=>{r({saturate:t=>({"--tw-saturate":`saturate(${t})`,"@defaults filter":{},filter:nt})},{values:e("saturate")})},sepia:({matchUtilities:r,theme:e})=>{r({sepia:t=>({"--tw-sepia":`sepia(${t})`,"@defaults filter":{},filter:nt})},{values:e("sepia")})},filter:({addDefaults:r,addUtilities:e})=>{r("filter",{"--tw-blur":" ","--tw-brightness":" ","--tw-contrast":" ","--tw-grayscale":" ","--tw-hue-rotate":" ","--tw-invert":" ","--tw-saturate":" ","--tw-sepia":" ","--tw-drop-shadow":" "}),e({".filter":{"@defaults filter":{},filter:nt},".filter-none":{filter:"none"}})},backdropBlur:({matchUtilities:r,theme:e})=>{r({"backdrop-blur":t=>({"--tw-backdrop-blur":t.trim()===""?" ":`blur(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropBlur")})},backdropBrightness:({matchUtilities:r,theme:e})=>{r({"backdrop-brightness":t=>({"--tw-backdrop-brightness":`brightness(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropBrightness")})},backdropContrast:({matchUtilities:r,theme:e})=>{r({"backdrop-contrast":t=>({"--tw-backdrop-contrast":`contrast(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropContrast")})},backdropGrayscale:({matchUtilities:r,theme:e})=>{r({"backdrop-grayscale":t=>({"--tw-backdrop-grayscale":`grayscale(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropGrayscale")})},backdropHueRotate:({matchUtilities:r,theme:e})=>{r({"backdrop-hue-rotate":t=>({"--tw-backdrop-hue-rotate":`hue-rotate(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropHueRotate"),supportsNegativeValues:!0})},backdropInvert:({matchUtilities:r,theme:e})=>{r({"backdrop-invert":t=>({"--tw-backdrop-invert":`invert(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropInvert")})},backdropOpacity:({matchUtilities:r,theme:e})=>{r({"backdrop-opacity":t=>({"--tw-backdrop-opacity":`opacity(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropOpacity")})},backdropSaturate:({matchUtilities:r,theme:e})=>{r({"backdrop-saturate":t=>({"--tw-backdrop-saturate":`saturate(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropSaturate")})},backdropSepia:({matchUtilities:r,theme:e})=>{r({"backdrop-sepia":t=>({"--tw-backdrop-sepia":`sepia(${t})`,"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge})},{values:e("backdropSepia")})},backdropFilter:({addDefaults:r,addUtilities:e})=>{r("backdrop-filter",{"--tw-backdrop-blur":" ","--tw-backdrop-brightness":" ","--tw-backdrop-contrast":" ","--tw-backdrop-grayscale":" ","--tw-backdrop-hue-rotate":" ","--tw-backdrop-invert":" ","--tw-backdrop-opacity":" ","--tw-backdrop-saturate":" ","--tw-backdrop-sepia":" "}),e({".backdrop-filter":{"@defaults backdrop-filter":{},"-webkit-backdrop-filter":ge,"backdrop-filter":ge},".backdrop-filter-none":{"-webkit-backdrop-filter":"none","backdrop-filter":"none"}})},transitionProperty:({matchUtilities:r,theme:e})=>{let t=e("transitionTimingFunction.DEFAULT"),i=e("transitionDuration.DEFAULT");r({transition:n=>({"transition-property":n,...n==="none"?{}:{"transition-timing-function":t,"transition-duration":i}})},{values:e("transitionProperty")})},transitionDelay:L("transitionDelay",[["delay",["transitionDelay"]]]),transitionDuration:L("transitionDuration",[["duration",["transitionDuration"]]],{filterDefault:!0}),transitionTimingFunction:L("transitionTimingFunction",[["ease",["transitionTimingFunction"]]],{filterDefault:!0}),willChange:L("willChange",[["will-change",["will-change"]]]),contain:({addDefaults:r,addUtilities:e})=>{let t="var(--tw-contain-size) var(--tw-contain-layout) var(--tw-contain-paint) var(--tw-contain-style)";r("contain",{"--tw-contain-size":" ","--tw-contain-layout":" ","--tw-contain-paint":" ","--tw-contain-style":" "}),e({".contain-none":{contain:"none"},".contain-content":{contain:"content"},".contain-strict":{contain:"strict"},".contain-size":{"@defaults contain":{},"--tw-contain-size":"size",contain:t},".contain-inline-size":{"@defaults contain":{},"--tw-contain-size":"inline-size",contain:t},".contain-layout":{"@defaults contain":{},"--tw-contain-layout":"layout",contain:t},".contain-paint":{"@defaults contain":{},"--tw-contain-paint":"paint",contain:t},".contain-style":{"@defaults contain":{},"--tw-contain-style":"style",contain:t}})},content:L("content",[["content",["--tw-content",["content","var(--tw-content)"]]]]),forcedColorAdjust:({addUtilities:r})=>{r({".forced-color-adjust-auto":{"forced-color-adjust":"auto"},".forced-color-adjust-none":{"forced-color-adjust":"none"}})}}});function p_(r){if(r===void 0)return!1;if(r==="true"||r==="1")return!0;if(r==="false"||r==="0")return!1;if(r==="*")return!0;let e=r.split(",").map(t=>t.split(":")[0]);return e.includes("-tailwindcss")?!1:!!e.includes("tailwindcss")}var Je,yh,bh,Zn,Lo,gt,Ei,It=R(()=>{u();Je=typeof m!="undefined"?{NODE_ENV:"production",DEBUG:p_(m.env.DEBUG)}:{NODE_ENV:"production",DEBUG:!1},yh=new Map,bh=new Map,Zn=new Map,Lo=new Map,gt=new String("*"),Ei=Symbol("__NONE__")});function cr(r){let e=[],t=!1;for(let i=0;i0)}var wh,vh,d_,Mo=R(()=>{u();wh=new Map([["{","}"],["[","]"],["(",")"]]),vh=new Map(Array.from(wh.entries()).map(([r,e])=>[e,r])),d_=new Set(['"',"'","`"])});function pr(r){let[e]=xh(r);return e.forEach(([t,i])=>t.removeChild(i)),r.nodes.push(...e.map(([,t])=>t)),r}function xh(r){let e=[],t=null;for(let i of r.nodes)if(i.type==="combinator")e=e.filter(([,n])=>Bo(n).includes("jumpable")),t=null;else if(i.type==="pseudo"){h_(i)?(t=i,e.push([r,i,null])):t&&m_(i,t)?e.push([r,i,t]):t=null;for(let n of i.nodes??[]){let[a,s]=xh(n);t=s||t,e.push(...a)}}return[e,t]}function kh(r){return r.value.startsWith("::")||No[r.value]!==void 0}function h_(r){return kh(r)&&Bo(r).includes("terminal")}function m_(r,e){return r.type!=="pseudo"||kh(r)?!1:Bo(e).includes("actionable")}function Bo(r){return No[r.value]??No.__default__}var No,es=R(()=>{u();No={"::after":["terminal","jumpable"],"::backdrop":["terminal","jumpable"],"::before":["terminal","jumpable"],"::cue":["terminal"],"::cue-region":["terminal"],"::first-letter":["terminal","jumpable"],"::first-line":["terminal","jumpable"],"::grammar-error":["terminal"],"::marker":["terminal","jumpable"],"::part":["terminal","actionable"],"::placeholder":["terminal","jumpable"],"::selection":["terminal","jumpable"],"::slotted":["terminal"],"::spelling-error":["terminal"],"::target-text":["terminal"],"::file-selector-button":["terminal","actionable"],"::deep":["actionable"],"::v-deep":["actionable"],"::ng-deep":["actionable"],":after":["terminal","jumpable"],":before":["terminal","jumpable"],":first-letter":["terminal","jumpable"],":first-line":["terminal","jumpable"],":where":[],":is":[],":has":[],__default__:["terminal","actionable"]}});function dr(r,{context:e,candidate:t}){let i=e?.tailwindConfig.prefix??"",n=r.map(s=>{let o=(0,st.default)().astSync(s.format);return{...s,ast:s.respectPrefix?ur(i,o):o}}),a=st.default.root({nodes:[st.default.selector({nodes:[st.default.className({value:Te(t)})]})]});for(let{ast:s}of n)[a,s]=y_(a,s),s.walkNesting(o=>o.replaceWith(...a.nodes[0].nodes)),a=s;return a}function Ah(r){let e=[];for(;r.prev()&&r.prev().type!=="combinator";)r=r.prev();for(;r&&r.type!=="combinator";)e.push(r),r=r.next();return e}function g_(r){return r.sort((e,t)=>e.type==="tag"&&t.type==="class"?-1:e.type==="class"&&t.type==="tag"?1:e.type==="class"&&t.type==="pseudo"&&t.value.startsWith("::")?-1:e.type==="pseudo"&&e.value.startsWith("::")&&t.type==="class"?1:r.index(e)-r.index(t)),r}function jo(r,e){let t=!1;r.walk(i=>{if(i.type==="class"&&i.value===e)return t=!0,!1}),t||r.remove()}function ts(r,e,{context:t,candidate:i,base:n}){let a=t?.tailwindConfig?.separator??":";n=n??ve(i,a).pop();let s=(0,st.default)().astSync(r);if(s.walkClasses(f=>{f.raws&&f.value.includes(n)&&(f.raws.value=Te((0,Sh.default)(f.raws.value)))}),s.each(f=>jo(f,n)),s.length===0)return null;let o=Array.isArray(e)?dr(e,{context:t,candidate:i}):e;if(o===null)return s.toString();let l=st.default.comment({value:"/*__simple__*/"}),c=st.default.comment({value:"/*__simple__*/"});return s.walkClasses(f=>{if(f.value!==n)return;let d=f.parent,p=o.nodes[0].nodes;if(d.nodes.length===1){f.replaceWith(...p);return}let h=Ah(f);d.insertBefore(h[0],l),d.insertAfter(h[h.length-1],c);for(let v of p)d.insertBefore(h[0],v.clone());f.remove(),h=Ah(l);let b=d.index(l);d.nodes.splice(b,h.length,...g_(st.default.selector({nodes:h})).nodes),l.remove(),c.remove()}),s.walkPseudos(f=>{f.value===Fo&&f.replaceWith(f.nodes)}),s.each(f=>pr(f)),s.toString()}function y_(r,e){let t=[];return r.walkPseudos(i=>{i.value===Fo&&t.push({pseudo:i,value:i.nodes[0].toString()})}),e.walkPseudos(i=>{if(i.value!==Fo)return;let n=i.nodes[0].toString(),a=t.find(c=>c.value===n);if(!a)return;let s=[],o=i.next();for(;o&&o.type!=="combinator";)s.push(o),o=o.next();let l=o;a.pseudo.parent.insertAfter(a.pseudo,st.default.selector({nodes:s.map(c=>c.clone())})),i.remove(),s.forEach(c=>c.remove()),l&&l.type==="combinator"&&l.remove()}),[r,e]}var st,Sh,Fo,zo=R(()=>{u();st=pe(it()),Sh=pe(Rn());fr();Wn();es();zt();Fo=":merge"});function rs(r,e){let t=(0,Uo.default)().astSync(r);return t.each(i=>{i.nodes.some(a=>a.type==="combinator")&&(i.nodes=[Uo.default.pseudo({value:":is",nodes:[i.clone()]})]),pr(i)}),`${e} ${t.toString()}`}var Uo,Vo=R(()=>{u();Uo=pe(it());es()});function Ho(r){return b_.transformSync(r)}function*w_(r){let e=1/0;for(;e>=0;){let t,i=!1;if(e===1/0&&r.endsWith("]")){let s=r.indexOf("[");r[s-1]==="-"?t=s-1:r[s-1]==="/"?(t=s-1,i=!0):t=-1}else e===1/0&&r.includes("/")?(t=r.lastIndexOf("/"),i=!0):t=r.lastIndexOf("-",e);if(t<0)break;let n=r.slice(0,t),a=r.slice(i?t:t+1);e=t-1,!(n===""||a==="/")&&(yield[n,a])}}function v_(r,e){if(r.length===0||e.tailwindConfig.prefix==="")return r;for(let t of r){let[i]=t;if(i.options.respectPrefix){let n=ee.root({nodes:[t[1].clone()]}),a=t[1].raws.tailwind.classCandidate;n.walkRules(s=>{let o=a.startsWith("-");s.selector=ur(e.tailwindConfig.prefix,s.selector,o)}),t[1]=n.nodes[0]}}return r}function x_(r,e){if(r.length===0)return r;let t=[];function i(n){return n.parent&&n.parent.type==="atrule"&&n.parent.name==="keyframes"}for(let[n,a]of r){let s=ee.root({nodes:[a.clone()]});s.walkRules(o=>{if(i(o))return;let l=(0,is.default)().astSync(o.selector);l.each(c=>jo(c,e)),Wf(l,c=>c===e?`!${c}`:c),o.selector=l.toString(),o.walkDecls(c=>c.important=!0)}),t.push([{...n,important:!0},s.nodes[0]])}return t}function k_(r,e,t){if(e.length===0)return e;let i={modifier:null,value:Ei};{let[n,...a]=ve(r,"/");if(a.length>1&&(n=n+"/"+a.slice(0,-1).join("/"),a=a.slice(-1)),a.length&&!t.variantMap.has(r)&&(r=n,i.modifier=a[0],!we(t.tailwindConfig,"generalizedModifiers")))return[]}if(r.endsWith("]")&&!r.startsWith("[")){let n=/(.)(-?)\[(.*)\]/g.exec(r);if(n){let[,a,s,o]=n;if(a==="@"&&s==="-")return[];if(a!=="@"&&s==="")return[];r=r.replace(`${s}[${o}]`,""),i.value=o}}if(Qo(r)&&!t.variantMap.has(r)){let n=t.offsets.recordVariant(r),a=K(r.slice(1,-1)),s=ve(a,",");if(s.length>1)return[];if(!s.every(os))return[];let o=s.map((l,c)=>[t.offsets.applyParallelOffset(n,c),Oi(l.trim())]);t.variantMap.set(r,o)}if(t.variantMap.has(r)){let n=Qo(r),a=t.variantOptions.get(r)?.[Pt]??{},s=t.variantMap.get(r).slice(),o=[],l=(()=>!(n||a.respectPrefix===!1))();for(let[c,f]of e){if(c.layer==="user")continue;let d=ee.root({nodes:[f.clone()]});for(let[p,h,b]of s){let w=function(){v.raws.neededBackup||(v.raws.neededBackup=!0,v.walkRules(O=>O.raws.originalSelector=O.selector))},k=function(O){return w(),v.each(B=>{B.type==="rule"&&(B.selectors=B.selectors.map(N=>O({get className(){return Ho(N)},selector:N})))}),v},v=(b??d).clone(),y=[],S=h({get container(){return w(),v},separator:t.tailwindConfig.separator,modifySelectors:k,wrap(O){let B=v.nodes;v.removeAll(),O.append(B),v.append(O)},format(O){y.push({format:O,respectPrefix:l})},args:i});if(Array.isArray(S)){for(let[O,B]of S.entries())s.push([t.offsets.applyParallelOffset(p,O),B,v.clone()]);continue}if(typeof S=="string"&&y.push({format:S,respectPrefix:l}),S===null)continue;v.raws.neededBackup&&(delete v.raws.neededBackup,v.walkRules(O=>{let B=O.raws.originalSelector;if(!B||(delete O.raws.originalSelector,B===O.selector))return;let N=O.selector,T=(0,is.default)(F=>{F.walkClasses(Y=>{Y.value=`${r}${t.tailwindConfig.separator}${Y.value}`})}).processSync(B);y.push({format:N.replace(T,"&"),respectPrefix:l}),O.selector=B})),v.nodes[0].raws.tailwind={...v.nodes[0].raws.tailwind,parentLayer:c.layer};let E=[{...c,sort:t.offsets.applyVariantOffset(c.sort,p,Object.assign(i,t.variantOptions.get(r))),collectedFormats:(c.collectedFormats??[]).concat(y)},v.nodes[0]];o.push(E)}}return o}return[]}function Wo(r,e,t={}){return!ke(r)&&!Array.isArray(r)?[[r],t]:Array.isArray(r)?Wo(r[0],e,r[1]):(e.has(r)||e.set(r,lr(r)),[e.get(r),t])}function A_(r){return S_.test(r)}function C_(r){if(!r.includes("://"))return!1;try{let e=new URL(r);return e.scheme!==""&&e.host!==""}catch(e){return!1}}function Ch(r){let e=!0;return r.walkDecls(t=>{if(!_h(t.prop,t.value))return e=!1,!1}),e}function _h(r,e){if(C_(`${r}:${e}`))return!1;try{return ee.parse(`a{${r}:${e}}`).toResult(),!0}catch(t){return!1}}function __(r,e){let[,t,i]=r.match(/^\[([a-zA-Z0-9-_]+):(\S+)\]$/)??[];if(i===void 0||!A_(t)||!cr(i))return null;let n=K(i,{property:t});return _h(t,n)?[[{sort:e.offsets.arbitraryProperty(r),layer:"utilities",options:{respectImportant:!0}},()=>({[Do(r)]:{[t]:n}})]]:null}function*E_(r,e){e.candidateRuleMap.has(r)&&(yield[e.candidateRuleMap.get(r),"DEFAULT"]),yield*function*(o){o!==null&&(yield[o,"DEFAULT"])}(__(r,e));let t=r,i=!1,n=e.tailwindConfig.prefix,a=n.length,s=t.startsWith(n)||t.startsWith(`-${n}`);t[a]==="-"&&s&&(i=!0,t=n+t.slice(a+1)),i&&e.candidateRuleMap.has(t)&&(yield[e.candidateRuleMap.get(t),"-DEFAULT"]);for(let[o,l]of w_(t))e.candidateRuleMap.has(o)&&(yield[e.candidateRuleMap.get(o),i?`-${l}`:l])}function O_(r,e){return r===gt?[gt]:ve(r,e)}function*T_(r,e){for(let t of r)t[1].raws.tailwind={...t[1].raws.tailwind,classCandidate:e,preserveSource:t[0].options?.preserveSource??!1},yield t}function*Go(r,e){let t=e.tailwindConfig.separator,[i,...n]=O_(r,t).reverse(),a=!1;i.startsWith("!")&&(a=!0,i=i.slice(1));for(let s of E_(i,e)){let o=[],l=new Map,[c,f]=s,d=c.length===1;for(let[p,h]of c){let b=[];if(typeof h=="function")for(let v of[].concat(h(f,{isOnlyPlugin:d}))){let[y,w]=Wo(v,e.postCssNodeCache);for(let k of y)b.push([{...p,options:{...p.options,...w}},k])}else if(f==="DEFAULT"||f==="-DEFAULT"){let v=h,[y,w]=Wo(v,e.postCssNodeCache);for(let k of y)b.push([{...p,options:{...p.options,...w}},k])}if(b.length>0){let v=Array.from(Zs(p.options?.types??[],f,p.options??{},e.tailwindConfig)).map(([y,w])=>w);v.length>0&&l.set(b,v),o.push(b)}}if(Qo(f)){if(o.length>1){let b=function(y){return y.length===1?y[0]:y.find(w=>{let k=l.get(w);return w.some(([{options:S},E])=>Ch(E)?S.types.some(({type:O,preferOnConflict:B})=>k.includes(O)&&B):!1)})},[p,h]=o.reduce((y,w)=>(w.some(([{options:S}])=>S.types.some(({type:E})=>E==="any"))?y[0].push(w):y[1].push(w),y),[[],[]]),v=b(h)??b(p);if(v)o=[v];else{let y=o.map(k=>new Set([...l.get(k)??[]]));for(let k of y)for(let S of k){let E=!1;for(let O of y)k!==O&&O.has(S)&&(O.delete(S),E=!0);E&&k.delete(S)}let w=[];for(let[k,S]of y.entries())for(let E of S){let O=o[k].map(([,B])=>B).flat().map(B=>B.toString().split(` -`).slice(1,-1).map(N=>N.trim()).map(N=>` ${N}`).join(` -`)).join(` - -`);w.push(` Use \`${r.replace("[",`[${E}:`)}\` for \`${O.trim()}\``);break}G.warn([`The class \`${r}\` is ambiguous and matches multiple utilities.`,...w,`If this is content and not a class, replace it with \`${r.replace("[","[").replace("]","]")}\` to silence this warning.`]);continue}}o=o.map(p=>p.filter(h=>Ch(h[1])))}o=o.flat(),o=Array.from(T_(o,i)),o=v_(o,e),a&&(o=x_(o,i));for(let p of n)o=k_(p,o,e);for(let p of o)p[1].raws.tailwind={...p[1].raws.tailwind,candidate:r},p=R_(p,{context:e,candidate:r}),p!==null&&(yield p)}}function R_(r,{context:e,candidate:t}){if(!r[0].collectedFormats)return r;let i=!0,n;try{n=dr(r[0].collectedFormats,{context:e,candidate:t})}catch{return null}let a=ee.root({nodes:[r[1].clone()]});return a.walkRules(s=>{if(!ns(s))try{let o=ts(s.selector,n,{candidate:t,context:e});if(o===null){s.remove();return}s.selector=o}catch{return i=!1,!1}}),!i||a.nodes.length===0?null:(r[1]=a.nodes[0],r)}function ns(r){return r.parent&&r.parent.type==="atrule"&&r.parent.name==="keyframes"}function P_(r){if(r===!0)return e=>{ns(e)||e.walkDecls(t=>{t.parent.type==="rule"&&!ns(t.parent)&&(t.important=!0)})};if(typeof r=="string")return e=>{ns(e)||(e.selectors=e.selectors.map(t=>rs(t,r)))}}function ss(r,e,t=!1){let i=[],n=P_(e.tailwindConfig.important);for(let a of r){if(e.notClassCache.has(a))continue;if(e.candidateRuleCache.has(a)){i=i.concat(Array.from(e.candidateRuleCache.get(a)));continue}let s=Array.from(Go(a,e));if(s.length===0){e.notClassCache.add(a);continue}e.classCache.set(a,s);let o=e.candidateRuleCache.get(a)??new Set;e.candidateRuleCache.set(a,o);for(let l of s){let[{sort:c,options:f},d]=l;if(f.respectImportant&&n){let h=ee.root({nodes:[d.clone()]});h.walkRules(n),d=h.nodes[0]}let p=[c,t?d.clone():d];o.add(p),e.ruleCache.add(p),i.push(p)}}return i}function Qo(r){return r.startsWith("[")&&r.endsWith("]")}var is,b_,S_,as=R(()=>{u();Ot();is=pe(it());Io();Kt();Wn();Fr();Be();It();zo();qo();Br();_i();Mo();zt();ct();Vo();b_=(0,is.default)(r=>r.first.filter(({type:e})=>e==="class").pop().value);S_=/^[a-z_-]/});var Eh,Oh=R(()=>{u();Eh={}});function I_(r){try{return Eh.createHash("md5").update(r,"utf-8").digest("binary")}catch(e){return""}}function Th(r,e){let t=e.toString();if(!t.includes("@tailwind"))return!1;let i=Lo.get(r),n=I_(t),a=i!==n;return Lo.set(r,n),a}var Rh=R(()=>{u();Oh();It()});function ls(r){return(r>0n)-(r<0n)}var Ph=R(()=>{u()});function Ih(r,e){let t=0n,i=0n;for(let[n,a]of e)r&n&&(t=t|n,i=i|a);return r&~t|i}var Dh=R(()=>{u()});function qh(r){let e=null;for(let t of r)e=e??t,e=e>t?e:t;return e}function D_(r,e){let t=r.length,i=e.length,n=t{u();Ph();Dh();Yo=class{constructor(){this.offsets={defaults:0n,base:0n,components:0n,utilities:0n,variants:0n,user:0n},this.layerPositions={defaults:0n,base:1n,components:2n,utilities:3n,user:4n,variants:5n},this.reservedVariantBits=0n,this.variantOffsets=new Map}create(e){return{layer:e,parentLayer:e,arbitrary:0n,variants:0n,parallelIndex:0n,index:this.offsets[e]++,propertyOffset:0n,property:"",options:[]}}arbitraryProperty(e){return{...this.create("utilities"),arbitrary:1n,property:e}}forVariant(e,t=0){let i=this.variantOffsets.get(e);if(i===void 0)throw new Error(`Cannot find offset for unknown variant ${e}`);return{...this.create("variants"),variants:i<n.startsWith("[")).sort(([n],[a])=>D_(n,a)),t=e.map(([,n])=>n).sort((n,a)=>ls(n-a));return e.map(([,n],a)=>[n,t[a]]).filter(([n,a])=>n!==a)}remapArbitraryVariantOffsets(e){let t=this.recalculateVariantOffsets();return t.length===0?e:e.map(i=>{let[n,a]=i;return n={...n,variants:Ih(n.variants,t)},[n,a]})}sortArbitraryProperties(e){let t=new Set;for(let[s]of e)s.arbitrary===1n&&t.add(s.property);if(t.size===0)return e;let i=Array.from(t).sort(),n=new Map,a=1n;for(let s of i)n.set(s,a++);return e.map(s=>{let[o,l]=s;return o={...o,propertyOffset:n.get(o.property)??0n},[o,l]})}sort(e){return e=this.remapArbitraryVariantOffsets(e),e=this.sortArbitraryProperties(e),e.sort(([t],[i])=>ls(this.compare(t,i)))}}});function Zo(r,e){let t=r.tailwindConfig.prefix;return typeof t=="function"?t(e):t+e}function Mh({type:r="any",...e}){let t=[].concat(r);return{...e,types:t.map(i=>Array.isArray(i)?{type:i[0],...i[1]}:{type:i,preferOnConflict:!1})}}function q_(r){let e=[],t="",i=0;for(let n=0;n0&&e.push(t.trim()),e=e.filter(n=>n!==""),e}function $_(r,e,{before:t=[]}={}){if(t=[].concat(t),t.length<=0){r.push(e);return}let i=r.length-1;for(let n of t){let a=r.indexOf(n);a!==-1&&(i=Math.min(i,a))}r.splice(i,0,e)}function Nh(r){return Array.isArray(r)?r.flatMap(e=>!Array.isArray(e)&&!ke(e)?e:lr(e)):Nh([r])}function L_(r,e){return(0,Ko.default)(i=>{let n=[];return e&&e(i),i.walkClasses(a=>{n.push(a.value)}),n}).transformSync(r)}function M_(r){r.walkPseudos(e=>{e.value===":not"&&e.remove()})}function N_(r,e={containsNonOnDemandable:!1},t=0){let i=[],n=[];r.type==="rule"?n.push(...r.selectors):r.type==="atrule"&&r.walkRules(a=>n.push(...a.selectors));for(let a of n){let s=L_(a,M_);s.length===0&&(e.containsNonOnDemandable=!0);for(let o of s)i.push(o)}return t===0?[e.containsNonOnDemandable||i.length===0,i]:i}function us(r){return Nh(r).flatMap(e=>{let t=new Map,[i,n]=N_(e);return i&&n.unshift(gt),n.map(a=>(t.has(e)||t.set(e,e),[a,t.get(e)]))})}function os(r){return r.startsWith("@")||r.includes("&")}function Oi(r){r=r.replace(/\n+/g,"").replace(/\s{1,}/g," ").trim();let e=q_(r).map(t=>{if(!t.startsWith("@"))return({format:a})=>a(t);let[,i,n]=/@(\S*)( .+|[({].*)?/g.exec(t);return({wrap:a})=>a(ee.atRule({name:i,params:n?.trim()??""}))}).reverse();return t=>{for(let i of e)i(t)}}function B_(r,e,{variantList:t,variantMap:i,offsets:n,classList:a}){function s(p,h){return p?(0,Lh.default)(r,p,h):r}function o(p){return ur(r.prefix,p)}function l(p,h){return p===gt?gt:h.respectPrefix?e.tailwindConfig.prefix+p:p}function c(p,h,b={}){let v=kt(p),y=s(["theme",...v],h);return mt(v[0])(y,b)}let f=0,d={postcss:ee,prefix:o,e:Te,config:s,theme:c,corePlugins:p=>Array.isArray(r.corePlugins)?r.corePlugins.includes(p):s(["corePlugins",p],!0),variants:()=>[],addBase(p){for(let[h,b]of us(p)){let v=l(h,{}),y=n.create("base");e.candidateRuleMap.has(v)||e.candidateRuleMap.set(v,[]),e.candidateRuleMap.get(v).push([{sort:y,layer:"base"},b])}},addDefaults(p,h){let b={[`@defaults ${p}`]:h};for(let[v,y]of us(b)){let w=l(v,{});e.candidateRuleMap.has(w)||e.candidateRuleMap.set(w,[]),e.candidateRuleMap.get(w).push([{sort:n.create("defaults"),layer:"defaults"},y])}},addComponents(p,h){h=Object.assign({},{preserveSource:!1,respectPrefix:!0,respectImportant:!1},Array.isArray(h)?{}:h);for(let[v,y]of us(p)){let w=l(v,h);a.add(w),e.candidateRuleMap.has(w)||e.candidateRuleMap.set(w,[]),e.candidateRuleMap.get(w).push([{sort:n.create("components"),layer:"components",options:h},y])}},addUtilities(p,h){h=Object.assign({},{preserveSource:!1,respectPrefix:!0,respectImportant:!0},Array.isArray(h)?{}:h);for(let[v,y]of us(p)){let w=l(v,h);a.add(w),e.candidateRuleMap.has(w)||e.candidateRuleMap.set(w,[]),e.candidateRuleMap.get(w).push([{sort:n.create("utilities"),layer:"utilities",options:h},y])}},matchUtilities:function(p,h){h=Mh({...{respectPrefix:!0,respectImportant:!0,modifiers:!1},...h});let v=n.create("utilities");for(let y in p){let S=function(O,{isOnlyPlugin:B}){let[N,T,F]=Js(h.types,O,h,r);if(N===void 0)return[];if(!h.types.some(({type:U})=>U===T))if(B)G.warn([`Unnecessary typehint \`${T}\` in \`${y}-${O}\`.`,`You can safely update it to \`${y}-${O.replace(T+":","")}\`.`]);else return[];if(!cr(N))return[];let Y={get modifier(){return h.modifiers||G.warn(`modifier-used-without-options-for-${y}`,["Your plugin must set `modifiers: true` in its options to support modifiers."]),F}},_=we(r,"generalizedModifiers");return[].concat(_?k(N,Y):k(N)).filter(Boolean).map(U=>({[Gn(y,O)]:U}))},w=l(y,h),k=p[y];a.add([w,h]);let E=[{sort:v,layer:"utilities",options:h},S];e.candidateRuleMap.has(w)||e.candidateRuleMap.set(w,[]),e.candidateRuleMap.get(w).push(E)}},matchComponents:function(p,h){h=Mh({...{respectPrefix:!0,respectImportant:!1,modifiers:!1},...h});let v=n.create("components");for(let y in p){let S=function(O,{isOnlyPlugin:B}){let[N,T,F]=Js(h.types,O,h,r);if(N===void 0)return[];if(!h.types.some(({type:U})=>U===T))if(B)G.warn([`Unnecessary typehint \`${T}\` in \`${y}-${O}\`.`,`You can safely update it to \`${y}-${O.replace(T+":","")}\`.`]);else return[];if(!cr(N))return[];let Y={get modifier(){return h.modifiers||G.warn(`modifier-used-without-options-for-${y}`,["Your plugin must set `modifiers: true` in its options to support modifiers."]),F}},_=we(r,"generalizedModifiers");return[].concat(_?k(N,Y):k(N)).filter(Boolean).map(U=>({[Gn(y,O)]:U}))},w=l(y,h),k=p[y];a.add([w,h]);let E=[{sort:v,layer:"components",options:h},S];e.candidateRuleMap.has(w)||e.candidateRuleMap.set(w,[]),e.candidateRuleMap.get(w).push(E)}},addVariant(p,h,b={}){h=[].concat(h).map(v=>{if(typeof v!="string")return(y={})=>{let{args:w,modifySelectors:k,container:S,separator:E,wrap:O,format:B}=y,N=v(Object.assign({modifySelectors:k,container:S,separator:E},b.type===Xo.MatchVariant&&{args:w,wrap:O,format:B}));if(typeof N=="string"&&!os(N))throw new Error(`Your custom variant \`${p}\` has an invalid format string. Make sure it's an at-rule or contains a \`&\` placeholder.`);return Array.isArray(N)?N.filter(T=>typeof T=="string").map(T=>Oi(T)):N&&typeof N=="string"&&Oi(N)(y)};if(!os(v))throw new Error(`Your custom variant \`${p}\` has an invalid format string. Make sure it's an at-rule or contains a \`&\` placeholder.`);return Oi(v)}),$_(t,p,b),i.set(p,h),e.variantOptions.set(p,b)},matchVariant(p,h,b){let v=b?.id??++f,y=p==="@",w=we(r,"generalizedModifiers");for(let[S,E]of Object.entries(b?.values??{}))S!=="DEFAULT"&&d.addVariant(y?`${p}${S}`:`${p}-${S}`,({args:O,container:B})=>h(E,w?{modifier:O?.modifier,container:B}:{container:B}),{...b,value:E,id:v,type:Xo.MatchVariant,variantInfo:Jo.Base});let k="DEFAULT"in(b?.values??{});d.addVariant(p,({args:S,container:E})=>S?.value===Ei&&!k?null:h(S?.value===Ei?b.values.DEFAULT:S?.value??(typeof S=="string"?S:""),w?{modifier:S?.modifier,container:E}:{container:E}),{...b,id:v,type:Xo.MatchVariant,variantInfo:Jo.Dynamic})}};return d}function fs(r){return el.has(r)||el.set(r,new Map),el.get(r)}function Bh(r,e){let t=!1,i=new Map;for(let n of r){if(!n)continue;let a=sa.parse(n),s=a.hash?a.href.replace(a.hash,""):a.href;s=a.search?s.replace(a.search,""):s;let o=be.statSync(decodeURIComponent(s),{throwIfNoEntry:!1})?.mtimeMs;!o||((!e.has(n)||o>e.get(n))&&(t=!0),i.set(n,o))}return[t,i]}function Fh(r){r.walkAtRules(e=>{["responsive","variants"].includes(e.name)&&(Fh(e),e.before(e.nodes),e.remove())})}function F_(r){let e=[];return r.each(t=>{t.type==="atrule"&&["responsive","variants"].includes(t.name)&&(t.name="layer",t.params="utilities")}),r.walkAtRules("layer",t=>{if(Fh(t),t.params==="base"){for(let i of t.nodes)e.push(function({addBase:n}){n(i,{respectPrefix:!1})});t.remove()}else if(t.params==="components"){for(let i of t.nodes)e.push(function({addComponents:n}){n(i,{respectPrefix:!1,preserveSource:!0})});t.remove()}else if(t.params==="utilities"){for(let i of t.nodes)e.push(function({addUtilities:n}){n(i,{respectPrefix:!1,preserveSource:!0})});t.remove()}}),e}function j_(r,e){let t=Object.entries({...se,...mh}).map(([l,c])=>r.tailwindConfig.corePlugins.includes(l)?c:null).filter(Boolean),i=r.tailwindConfig.plugins.map(l=>(l.__isOptionsFunction&&(l=l()),typeof l=="function"?l:l.handler)),n=F_(e),a=[se.childVariant,se.pseudoElementVariants,se.pseudoClassVariants,se.hasVariants,se.ariaVariants,se.dataVariants],s=[se.supportsVariants,se.reducedMotionVariants,se.prefersContrastVariants,se.screenVariants,se.orientationVariants,se.directionVariants,se.darkVariants,se.forcedColorsVariants,se.printVariant];return(r.tailwindConfig.darkMode==="class"||Array.isArray(r.tailwindConfig.darkMode)&&r.tailwindConfig.darkMode[0]==="class")&&(s=[se.supportsVariants,se.reducedMotionVariants,se.prefersContrastVariants,se.darkVariants,se.screenVariants,se.orientationVariants,se.directionVariants,se.forcedColorsVariants,se.printVariant]),[...t,...a,...i,...s,...n]}function z_(r,e){let t=[],i=new Map;e.variantMap=i;let n=new Yo;e.offsets=n;let a=new Set,s=B_(e.tailwindConfig,e,{variantList:t,variantMap:i,offsets:n,classList:a});for(let f of r)if(Array.isArray(f))for(let d of f)d(s);else f?.(s);n.recordVariants(t,f=>i.get(f).length);for(let[f,d]of i.entries())e.variantMap.set(f,d.map((p,h)=>[n.forVariant(f,h),p]));let o=(e.tailwindConfig.safelist??[]).filter(Boolean);if(o.length>0){let f=[];for(let d of o){if(typeof d=="string"){e.changedContent.push({content:d,extension:"html"});continue}if(d instanceof RegExp){G.warn("root-regex",["Regular expressions in `safelist` work differently in Tailwind CSS v3.0.","Update your `safelist` configuration to eliminate this warning.","https://tailwindcss.com/docs/content-configuration#safelisting-classes"]);continue}f.push(d)}if(f.length>0){let d=new Map,p=e.tailwindConfig.prefix.length,h=f.some(b=>b.pattern.source.includes("!"));for(let b of a){let v=Array.isArray(b)?(()=>{let[y,w]=b,S=Object.keys(w?.values??{}).map(E=>Ci(y,E));return w?.supportsNegativeValues&&(S=[...S,...S.map(E=>"-"+E)],S=[...S,...S.map(E=>E.slice(0,p)+"-"+E.slice(p))]),w.types.some(({type:E})=>E==="color")&&(S=[...S,...S.flatMap(E=>Object.keys(e.tailwindConfig.theme.opacity).map(O=>`${E}/${O}`))]),h&&w?.respectImportant&&(S=[...S,...S.map(E=>"!"+E)]),S})():[b];for(let y of v)for(let{pattern:w,variants:k=[]}of f)if(w.lastIndex=0,d.has(w)||d.set(w,0),!!w.test(y)){d.set(w,d.get(w)+1),e.changedContent.push({content:y,extension:"html"});for(let S of k)e.changedContent.push({content:S+e.tailwindConfig.separator+y,extension:"html"})}}for(let[b,v]of d.entries())v===0&&G.warn([`The safelist pattern \`${b}\` doesn't match any Tailwind CSS classes.`,"Fix this pattern or remove it from your `safelist` configuration.","https://tailwindcss.com/docs/content-configuration#safelisting-classes"])}}let l=[].concat(e.tailwindConfig.darkMode??"media")[1]??"dark",c=[Zo(e,l),Zo(e,"group"),Zo(e,"peer")];e.getClassOrder=function(d){let p=[...d].sort((y,w)=>y===w?0:y[y,null])),b=ss(new Set(p),e,!0);b=e.offsets.sort(b);let v=BigInt(c.length);for(let[,y]of b){let w=y.raws.tailwind.candidate;h.set(w,h.get(w)??v++)}return d.map(y=>{let w=h.get(y)??null,k=c.indexOf(y);return w===null&&k!==-1&&(w=BigInt(k)),[y,w]})},e.getClassList=function(d={}){let p=[];for(let h of a)if(Array.isArray(h)){let[b,v]=h,y=[],w=Object.keys(v?.modifiers??{});v?.types?.some(({type:E})=>E==="color")&&w.push(...Object.keys(e.tailwindConfig.theme.opacity??{}));let k={modifiers:w},S=d.includeMetadata&&w.length>0;for(let[E,O]of Object.entries(v?.values??{})){if(O==null)continue;let B=Ci(b,E);if(p.push(S?[B,k]:B),v?.supportsNegativeValues&&xt(O)){let N=Ci(b,`-${E}`);y.push(S?[N,k]:N)}}p.push(...y)}else p.push(h);return p},e.getVariants=function(){let d=Math.random().toString(36).substring(7).toUpperCase(),p=[];for(let[h,b]of e.variantOptions.entries())b.variantInfo!==Jo.Base&&p.push({name:h,isArbitrary:b.type===Symbol.for("MATCH_VARIANT"),values:Object.keys(b.values??{}),hasDash:h!=="@",selectors({modifier:v,value:y}={}){let w=`TAILWINDPLACEHOLDER${d}`,k=ee.rule({selector:`.${w}`}),S=ee.root({nodes:[k.clone()]}),E=S.toString(),O=(e.variantMap.get(h)??[]).flatMap(([oe,A])=>A),B=[];for(let oe of O){let A=[],C={args:{modifier:v,value:b.values?.[y]??y},separator:e.tailwindConfig.separator,modifySelectors(V){return S.each(Ee=>{Ee.type==="rule"&&(Ee.selectors=Ee.selectors.map(Ie=>V({get className(){return Ho(Ie)},selector:Ie})))}),S},format(V){A.push(V)},wrap(V){A.push(`@${V.name} ${V.params} { & }`)},container:S},he=oe(C);if(A.length>0&&B.push(A),Array.isArray(he))for(let V of he)A=[],V(C),B.push(A)}let N=[],T=S.toString();E!==T&&(S.walkRules(oe=>{let A=oe.selector,C=(0,Ko.default)(he=>{he.walkClasses(V=>{V.value=`${h}${e.tailwindConfig.separator}${V.value}`})}).processSync(A);N.push(A.replace(C,"&").replace(w,"&"))}),S.walkAtRules(oe=>{N.push(`@${oe.name} (${oe.params}) { & }`)}));let F=!(y in(b.values??{})),Y=b[Pt]??{},_=(()=>!(F||Y.respectPrefix===!1))();B=B.map(oe=>oe.map(A=>({format:A,respectPrefix:_}))),N=N.map(oe=>({format:oe,respectPrefix:_}));let Q={candidate:w,context:e},U=B.map(oe=>ts(`.${w}`,dr(oe,Q),Q).replace(`.${w}`,"&").replace("{ & }","").trim());return N.length>0&&U.push(dr(N,Q).toString().replace(`.${w}`,"&")),U}});return p}}function jh(r,e){!r.classCache.has(e)||(r.notClassCache.add(e),r.classCache.delete(e),r.applyClassCache.delete(e),r.candidateRuleMap.delete(e),r.candidateRuleCache.delete(e),r.stylesheetCache=null)}function U_(r,e){let t=e.raws.tailwind.candidate;if(!!t){for(let i of r.ruleCache)i[1].raws.tailwind.candidate===t&&r.ruleCache.delete(i);jh(r,t)}}function tl(r,e=[],t=ee.root()){let i={disposables:[],ruleCache:new Set,candidateRuleCache:new Map,classCache:new Map,applyClassCache:new Map,notClassCache:new Set(r.blocklist??[]),postCssNodeCache:new Map,candidateRuleMap:new Map,tailwindConfig:r,changedContent:e,variantMap:new Map,stylesheetCache:null,variantOptions:new Map,markInvalidUtilityCandidate:a=>jh(i,a),markInvalidUtilityNode:a=>U_(i,a)},n=j_(i,t);return z_(n,i),i}function zh(r,e,t,i,n,a){let s=e.opts.from,o=i!==null;Je.DEBUG&&console.log("Source path:",s);let l;if(o&&hr.has(s))l=hr.get(s);else if(Ti.has(n)){let p=Ti.get(n);Dt.get(p).add(s),hr.set(s,p),l=p}let c=Th(s,r);if(l){let[p,h]=Bh([...a],fs(l));if(!p&&!c)return[l,!1,h]}if(hr.has(s)){let p=hr.get(s);if(Dt.has(p)&&(Dt.get(p).delete(s),Dt.get(p).size===0)){Dt.delete(p);for(let[h,b]of Ti)b===p&&Ti.delete(h);for(let h of p.disposables.splice(0))h(p)}}Je.DEBUG&&console.log("Setting up new context...");let f=tl(t,[],r);Object.assign(f,{userConfigPath:i});let[,d]=Bh([...a],fs(f));return Ti.set(n,f),hr.set(s,f),Dt.has(f)||Dt.set(f,new Set),Dt.get(f).add(s),[f,!0,d]}var Lh,Ko,Pt,Xo,Jo,el,hr,Ti,Dt,_i=R(()=>{u();ft();aa();Ot();Lh=pe(Oa()),Ko=pe(it());Si();Io();Wn();Kt();fr();qo();Fr();gh();It();It();Gi();Be();Hi();Mo();as();Rh();$h();ct();zo();Pt=Symbol(),Xo={AddVariant:Symbol.for("ADD_VARIANT"),MatchVariant:Symbol.for("MATCH_VARIANT")},Jo={Base:1<<0,Dynamic:1<<1};el=new WeakMap;hr=yh,Ti=bh,Dt=Zn});function rl(r){return r.ignore?[]:r.glob?m.env.ROLLUP_WATCH==="true"?[{type:"dependency",file:r.base}]:[{type:"dir-dependency",dir:r.base,glob:r.glob}]:[{type:"dependency",file:r.base}]}var Uh=R(()=>{u()});function Vh(r,e){return{handler:r,config:e}}var Hh,Wh=R(()=>{u();Vh.withOptions=function(r,e=()=>({})){let t=function(i){return{__options:i,handler:r(i),config:e(i)}};return t.__isOptionsFunction=!0,t.__pluginFunction=r,t.__configFunction=e,t};Hh=Vh});var il={};Ge(il,{default:()=>V_});var V_,nl=R(()=>{u();Wh();V_=Hh});var Qh=x((F4,Gh)=>{u();var H_=(nl(),il).default,W_={overflow:"hidden",display:"-webkit-box","-webkit-box-orient":"vertical"},G_=H_(function({matchUtilities:r,addUtilities:e,theme:t,variants:i}){let n=t("lineClamp");r({"line-clamp":a=>({...W_,"-webkit-line-clamp":`${a}`})},{values:n}),e([{".line-clamp-none":{"-webkit-line-clamp":"unset"}}],i("lineClamp"))},{theme:{lineClamp:{1:"1",2:"2",3:"3",4:"4",5:"5",6:"6"}},variants:{lineClamp:["responsive"]}});Gh.exports=G_});function sl(r){r.content.files.length===0&&G.warn("content-problems",["The `content` option in your Tailwind CSS configuration is missing or empty.","Configure your content sources or your generated CSS will be missing styles.","https://tailwindcss.com/docs/content-configuration"]);try{let e=Qh();r.plugins.includes(e)&&(G.warn("line-clamp-in-core",["As of Tailwind CSS v3.3, the `@tailwindcss/line-clamp` plugin is now included by default.","Remove it from the `plugins` array in your configuration to eliminate this warning."]),r.plugins=r.plugins.filter(t=>t!==e))}catch{}return r}var Yh=R(()=>{u();Be()});var Kh,Xh=R(()=>{u();Kh=()=>!1});var cs,Jh=R(()=>{u();cs={sync:r=>[].concat(r),generateTasks:r=>[{dynamic:!1,base:".",negative:[],positive:[].concat(r),patterns:[].concat(r)}],escapePath:r=>r}});var al,Zh=R(()=>{u();al=r=>r});var em,tm=R(()=>{u();em=()=>""});function rm(r){let e=r,t=em(r);return t!=="."&&(e=r.substr(t.length),e.charAt(0)==="/"&&(e=e.substr(1))),e.substr(0,2)==="./"?e=e.substr(2):e.charAt(0)==="/"&&(e=e.substr(1)),{base:t,glob:e}}var im=R(()=>{u();tm()});var ps=x(Ve=>{u();"use strict";Ve.isInteger=r=>typeof r=="number"?Number.isInteger(r):typeof r=="string"&&r.trim()!==""?Number.isInteger(Number(r)):!1;Ve.find=(r,e)=>r.nodes.find(t=>t.type===e);Ve.exceedsLimit=(r,e,t=1,i)=>i===!1||!Ve.isInteger(r)||!Ve.isInteger(e)?!1:(Number(e)-Number(r))/Number(t)>=i;Ve.escapeNode=(r,e=0,t)=>{let i=r.nodes[e];!i||(t&&i.type===t||i.type==="open"||i.type==="close")&&i.escaped!==!0&&(i.value="\\"+i.value,i.escaped=!0)};Ve.encloseBrace=r=>r.type!=="brace"?!1:r.commas>>0+r.ranges>>0==0?(r.invalid=!0,!0):!1;Ve.isInvalidBrace=r=>r.type!=="brace"?!1:r.invalid===!0||r.dollar?!0:r.commas>>0+r.ranges>>0==0||r.open!==!0||r.close!==!0?(r.invalid=!0,!0):!1;Ve.isOpenOrClose=r=>r.type==="open"||r.type==="close"?!0:r.open===!0||r.close===!0;Ve.reduce=r=>r.reduce((e,t)=>(t.type==="text"&&e.push(t.value),t.type==="range"&&(t.type="text"),e),[]);Ve.flatten=(...r)=>{let e=[],t=i=>{for(let n=0;n{u();"use strict";var nm=ps();sm.exports=(r,e={})=>{let t=(i,n={})=>{let a=e.escapeInvalid&&nm.isInvalidBrace(n),s=i.invalid===!0&&e.escapeInvalid===!0,o="";if(i.value)return(a||s)&&nm.isOpenOrClose(i)?"\\"+i.value:i.value;if(i.value)return i.value;if(i.nodes)for(let l of i.nodes)o+=t(l);return o};return t(r)}});var om=x((X4,am)=>{u();"use strict";am.exports=function(r){return typeof r=="number"?r-r==0:typeof r=="string"&&r.trim()!==""?Number.isFinite?Number.isFinite(+r):isFinite(+r):!1}});var gm=x((J4,mm)=>{u();"use strict";var lm=om(),Wt=(r,e,t)=>{if(lm(r)===!1)throw new TypeError("toRegexRange: expected the first argument to be a number");if(e===void 0||r===e)return String(r);if(lm(e)===!1)throw new TypeError("toRegexRange: expected the second argument to be a number.");let i={relaxZeros:!0,...t};typeof i.strictZeros=="boolean"&&(i.relaxZeros=i.strictZeros===!1);let n=String(i.relaxZeros),a=String(i.shorthand),s=String(i.capture),o=String(i.wrap),l=r+":"+e+"="+n+a+s+o;if(Wt.cache.hasOwnProperty(l))return Wt.cache[l].result;let c=Math.min(r,e),f=Math.max(r,e);if(Math.abs(c-f)===1){let v=r+"|"+e;return i.capture?`(${v})`:i.wrap===!1?v:`(?:${v})`}let d=hm(r)||hm(e),p={min:r,max:e,a:c,b:f},h=[],b=[];if(d&&(p.isPadded=d,p.maxLen=String(p.max).length),c<0){let v=f<0?Math.abs(f):1;b=um(v,Math.abs(c),p,i),c=p.a=0}return f>=0&&(h=um(c,f,p,i)),p.negatives=b,p.positives=h,p.result=Q_(b,h,i),i.capture===!0?p.result=`(${p.result})`:i.wrap!==!1&&h.length+b.length>1&&(p.result=`(?:${p.result})`),Wt.cache[l]=p,p.result};function Q_(r,e,t){let i=ol(r,e,"-",!1,t)||[],n=ol(e,r,"",!1,t)||[],a=ol(r,e,"-?",!0,t)||[];return i.concat(a).concat(n).join("|")}function Y_(r,e){let t=1,i=1,n=cm(r,t),a=new Set([e]);for(;r<=n&&n<=e;)a.add(n),t+=1,n=cm(r,t);for(n=pm(e+1,i)-1;r1&&o.count.pop(),o.count.push(f.count[0]),o.string=o.pattern+dm(o.count),s=c+1;continue}t.isPadded&&(d=eE(c,t,i)),f.string=d+f.pattern+dm(f.count),a.push(f),s=c+1,o=f}return a}function ol(r,e,t,i,n){let a=[];for(let s of r){let{string:o}=s;!i&&!fm(e,"string",o)&&a.push(t+o),i&&fm(e,"string",o)&&a.push(t+o)}return a}function X_(r,e){let t=[];for(let i=0;ie?1:e>r?-1:0}function fm(r,e,t){return r.some(i=>i[e]===t)}function cm(r,e){return Number(String(r).slice(0,-e)+"9".repeat(e))}function pm(r,e){return r-r%Math.pow(10,e)}function dm(r){let[e=0,t=""]=r;return t||e>1?`{${e+(t?","+t:"")}}`:""}function Z_(r,e,t){return`[${r}${e-r==1?"":"-"}${e}]`}function hm(r){return/^-?(0+)\d/.test(r)}function eE(r,e,t){if(!e.isPadded)return r;let i=Math.abs(e.maxLen-String(r).length),n=t.relaxZeros!==!1;switch(i){case 0:return"";case 1:return n?"0?":"0";case 2:return n?"0{0,2}":"00";default:return n?`0{0,${i}}`:`0{${i}}`}}Wt.cache={};Wt.clearCache=()=>Wt.cache={};mm.exports=Wt});var fl=x((Z4,Am)=>{u();"use strict";var tE=(Bn(),Nn),ym=gm(),bm=r=>r!==null&&typeof r=="object"&&!Array.isArray(r),rE=r=>e=>r===!0?Number(e):String(e),ll=r=>typeof r=="number"||typeof r=="string"&&r!=="",Ri=r=>Number.isInteger(+r),ul=r=>{let e=`${r}`,t=-1;if(e[0]==="-"&&(e=e.slice(1)),e==="0")return!1;for(;e[++t]==="0";);return t>0},iE=(r,e,t)=>typeof r=="string"||typeof e=="string"?!0:t.stringify===!0,nE=(r,e,t)=>{if(e>0){let i=r[0]==="-"?"-":"";i&&(r=r.slice(1)),r=i+r.padStart(i?e-1:e,"0")}return t===!1?String(r):r},wm=(r,e)=>{let t=r[0]==="-"?"-":"";for(t&&(r=r.slice(1),e--);r.length{r.negatives.sort((s,o)=>so?1:0),r.positives.sort((s,o)=>so?1:0);let t=e.capture?"":"?:",i="",n="",a;return r.positives.length&&(i=r.positives.join("|")),r.negatives.length&&(n=`-(${t}${r.negatives.join("|")})`),i&&n?a=`${i}|${n}`:a=i||n,e.wrap?`(${t}${a})`:a},vm=(r,e,t,i)=>{if(t)return ym(r,e,{wrap:!1,...i});let n=String.fromCharCode(r);if(r===e)return n;let a=String.fromCharCode(e);return`[${n}-${a}]`},xm=(r,e,t)=>{if(Array.isArray(r)){let i=t.wrap===!0,n=t.capture?"":"?:";return i?`(${n}${r.join("|")})`:r.join("|")}return ym(r,e,t)},km=(...r)=>new RangeError("Invalid range arguments: "+tE.inspect(...r)),Sm=(r,e,t)=>{if(t.strictRanges===!0)throw km([r,e]);return[]},aE=(r,e)=>{if(e.strictRanges===!0)throw new TypeError(`Expected step "${r}" to be a number`);return[]},oE=(r,e,t=1,i={})=>{let n=Number(r),a=Number(e);if(!Number.isInteger(n)||!Number.isInteger(a)){if(i.strictRanges===!0)throw km([r,e]);return[]}n===0&&(n=0),a===0&&(a=0);let s=n>a,o=String(r),l=String(e),c=String(t);t=Math.max(Math.abs(t),1);let f=ul(o)||ul(l)||ul(c),d=f?Math.max(o.length,l.length,c.length):0,p=f===!1&&iE(r,e,i)===!1,h=i.transform||rE(p);if(i.toRegex&&t===1)return vm(wm(r,d),wm(e,d),!0,i);let b={negatives:[],positives:[]},v=k=>b[k<0?"negatives":"positives"].push(Math.abs(k)),y=[],w=0;for(;s?n>=a:n<=a;)i.toRegex===!0&&t>1?v(n):y.push(nE(h(n,w),d,p)),n=s?n-t:n+t,w++;return i.toRegex===!0?t>1?sE(b,i):xm(y,null,{wrap:!1,...i}):y},lE=(r,e,t=1,i={})=>{if(!Ri(r)&&r.length>1||!Ri(e)&&e.length>1)return Sm(r,e,i);let n=i.transform||(p=>String.fromCharCode(p)),a=`${r}`.charCodeAt(0),s=`${e}`.charCodeAt(0),o=a>s,l=Math.min(a,s),c=Math.max(a,s);if(i.toRegex&&t===1)return vm(l,c,!1,i);let f=[],d=0;for(;o?a>=s:a<=s;)f.push(n(a,d)),a=o?a-t:a+t,d++;return i.toRegex===!0?xm(f,null,{wrap:!1,options:i}):f},hs=(r,e,t,i={})=>{if(e==null&&ll(r))return[r];if(!ll(r)||!ll(e))return Sm(r,e,i);if(typeof t=="function")return hs(r,e,1,{transform:t});if(bm(t))return hs(r,e,0,t);let n={...i};return n.capture===!0&&(n.wrap=!0),t=t||n.step||1,Ri(t)?Ri(r)&&Ri(e)?oE(r,e,t,n):lE(r,e,Math.max(Math.abs(t),1),n):t!=null&&!bm(t)?aE(t,n):hs(r,e,1,t)};Am.exports=hs});var Em=x((e6,_m)=>{u();"use strict";var uE=fl(),Cm=ps(),fE=(r,e={})=>{let t=(i,n={})=>{let a=Cm.isInvalidBrace(n),s=i.invalid===!0&&e.escapeInvalid===!0,o=a===!0||s===!0,l=e.escapeInvalid===!0?"\\":"",c="";if(i.isOpen===!0||i.isClose===!0)return l+i.value;if(i.type==="open")return o?l+i.value:"(";if(i.type==="close")return o?l+i.value:")";if(i.type==="comma")return i.prev.type==="comma"?"":o?i.value:"|";if(i.value)return i.value;if(i.nodes&&i.ranges>0){let f=Cm.reduce(i.nodes),d=uE(...f,{...e,wrap:!1,toRegex:!0});if(d.length!==0)return f.length>1&&d.length>1?`(${d})`:d}if(i.nodes)for(let f of i.nodes)c+=t(f,i);return c};return t(r)};_m.exports=fE});var Rm=x((t6,Tm)=>{u();"use strict";var cE=fl(),Om=ds(),mr=ps(),Gt=(r="",e="",t=!1)=>{let i=[];if(r=[].concat(r),e=[].concat(e),!e.length)return r;if(!r.length)return t?mr.flatten(e).map(n=>`{${n}}`):e;for(let n of r)if(Array.isArray(n))for(let a of n)i.push(Gt(a,e,t));else for(let a of e)t===!0&&typeof a=="string"&&(a=`{${a}}`),i.push(Array.isArray(a)?Gt(n,a,t):n+a);return mr.flatten(i)},pE=(r,e={})=>{let t=e.rangeLimit===void 0?1e3:e.rangeLimit,i=(n,a={})=>{n.queue=[];let s=a,o=a.queue;for(;s.type!=="brace"&&s.type!=="root"&&s.parent;)s=s.parent,o=s.queue;if(n.invalid||n.dollar){o.push(Gt(o.pop(),Om(n,e)));return}if(n.type==="brace"&&n.invalid!==!0&&n.nodes.length===2){o.push(Gt(o.pop(),["{}"]));return}if(n.nodes&&n.ranges>0){let d=mr.reduce(n.nodes);if(mr.exceedsLimit(...d,e.step,t))throw new RangeError("expanded array length exceeds range limit. Use options.rangeLimit to increase or disable the limit.");let p=cE(...d,e);p.length===0&&(p=Om(n,e)),o.push(Gt(o.pop(),p)),n.nodes=[];return}let l=mr.encloseBrace(n),c=n.queue,f=n;for(;f.type!=="brace"&&f.type!=="root"&&f.parent;)f=f.parent,c=f.queue;for(let d=0;d{u();"use strict";Pm.exports={MAX_LENGTH:1024*64,CHAR_0:"0",CHAR_9:"9",CHAR_UPPERCASE_A:"A",CHAR_LOWERCASE_A:"a",CHAR_UPPERCASE_Z:"Z",CHAR_LOWERCASE_Z:"z",CHAR_LEFT_PARENTHESES:"(",CHAR_RIGHT_PARENTHESES:")",CHAR_ASTERISK:"*",CHAR_AMPERSAND:"&",CHAR_AT:"@",CHAR_BACKSLASH:"\\",CHAR_BACKTICK:"`",CHAR_CARRIAGE_RETURN:"\r",CHAR_CIRCUMFLEX_ACCENT:"^",CHAR_COLON:":",CHAR_COMMA:",",CHAR_DOLLAR:"$",CHAR_DOT:".",CHAR_DOUBLE_QUOTE:'"',CHAR_EQUAL:"=",CHAR_EXCLAMATION_MARK:"!",CHAR_FORM_FEED:"\f",CHAR_FORWARD_SLASH:"/",CHAR_HASH:"#",CHAR_HYPHEN_MINUS:"-",CHAR_LEFT_ANGLE_BRACKET:"<",CHAR_LEFT_CURLY_BRACE:"{",CHAR_LEFT_SQUARE_BRACKET:"[",CHAR_LINE_FEED:` -`,CHAR_NO_BREAK_SPACE:"\xA0",CHAR_PERCENT:"%",CHAR_PLUS:"+",CHAR_QUESTION_MARK:"?",CHAR_RIGHT_ANGLE_BRACKET:">",CHAR_RIGHT_CURLY_BRACE:"}",CHAR_RIGHT_SQUARE_BRACKET:"]",CHAR_SEMICOLON:";",CHAR_SINGLE_QUOTE:"'",CHAR_SPACE:" ",CHAR_TAB:" ",CHAR_UNDERSCORE:"_",CHAR_VERTICAL_LINE:"|",CHAR_ZERO_WIDTH_NOBREAK_SPACE:"\uFEFF"}});var Mm=x((i6,Lm)=>{u();"use strict";var dE=ds(),{MAX_LENGTH:Dm,CHAR_BACKSLASH:cl,CHAR_BACKTICK:hE,CHAR_COMMA:mE,CHAR_DOT:gE,CHAR_LEFT_PARENTHESES:yE,CHAR_RIGHT_PARENTHESES:bE,CHAR_LEFT_CURLY_BRACE:wE,CHAR_RIGHT_CURLY_BRACE:vE,CHAR_LEFT_SQUARE_BRACKET:qm,CHAR_RIGHT_SQUARE_BRACKET:$m,CHAR_DOUBLE_QUOTE:xE,CHAR_SINGLE_QUOTE:kE,CHAR_NO_BREAK_SPACE:SE,CHAR_ZERO_WIDTH_NOBREAK_SPACE:AE}=Im(),CE=(r,e={})=>{if(typeof r!="string")throw new TypeError("Expected a string");let t=e||{},i=typeof t.maxLength=="number"?Math.min(Dm,t.maxLength):Dm;if(r.length>i)throw new SyntaxError(`Input length (${r.length}), exceeds max characters (${i})`);let n={type:"root",input:r,nodes:[]},a=[n],s=n,o=n,l=0,c=r.length,f=0,d=0,p,h={},b=()=>r[f++],v=y=>{if(y.type==="text"&&o.type==="dot"&&(o.type="text"),o&&o.type==="text"&&y.type==="text"){o.value+=y.value;return}return s.nodes.push(y),y.parent=s,y.prev=o,o=y,y};for(v({type:"bos"});f0){if(s.ranges>0){s.ranges=0;let y=s.nodes.shift();s.nodes=[y,{type:"text",value:dE(s)}]}v({type:"comma",value:p}),s.commas++;continue}if(p===gE&&d>0&&s.commas===0){let y=s.nodes;if(d===0||y.length===0){v({type:"text",value:p});continue}if(o.type==="dot"){if(s.range=[],o.value+=p,o.type="range",s.nodes.length!==3&&s.nodes.length!==5){s.invalid=!0,s.ranges=0,o.type="text";continue}s.ranges++,s.args=[];continue}if(o.type==="range"){y.pop();let w=y[y.length-1];w.value+=o.value+p,o=w,s.ranges--;continue}v({type:"dot",value:p});continue}v({type:"text",value:p})}do if(s=a.pop(),s.type!=="root"){s.nodes.forEach(k=>{k.nodes||(k.type==="open"&&(k.isOpen=!0),k.type==="close"&&(k.isClose=!0),k.nodes||(k.type="text"),k.invalid=!0)});let y=a[a.length-1],w=y.nodes.indexOf(s);y.nodes.splice(w,1,...s.nodes)}while(a.length>0);return v({type:"eos"}),n};Lm.exports=CE});var Fm=x((n6,Bm)=>{u();"use strict";var Nm=ds(),_E=Em(),EE=Rm(),OE=Mm(),Le=(r,e={})=>{let t=[];if(Array.isArray(r))for(let i of r){let n=Le.create(i,e);Array.isArray(n)?t.push(...n):t.push(n)}else t=[].concat(Le.create(r,e));return e&&e.expand===!0&&e.nodupes===!0&&(t=[...new Set(t)]),t};Le.parse=(r,e={})=>OE(r,e);Le.stringify=(r,e={})=>typeof r=="string"?Nm(Le.parse(r,e),e):Nm(r,e);Le.compile=(r,e={})=>(typeof r=="string"&&(r=Le.parse(r,e)),_E(r,e));Le.expand=(r,e={})=>{typeof r=="string"&&(r=Le.parse(r,e));let t=EE(r,e);return e.noempty===!0&&(t=t.filter(Boolean)),e.nodupes===!0&&(t=[...new Set(t)]),t};Le.create=(r,e={})=>r===""||r.length<3?[r]:e.expand!==!0?Le.compile(r,e):Le.expand(r,e);Bm.exports=Le});var Pi=x((s6,Hm)=>{u();"use strict";var TE=(et(),Ur),at="\\\\/",jm=`[^${at}]`,yt="\\.",RE="\\+",PE="\\?",ms="\\/",IE="(?=.)",zm="[^/]",pl=`(?:${ms}|$)`,Um=`(?:^|${ms})`,dl=`${yt}{1,2}${pl}`,DE=`(?!${yt})`,qE=`(?!${Um}${dl})`,$E=`(?!${yt}{0,1}${pl})`,LE=`(?!${dl})`,ME=`[^.${ms}]`,NE=`${zm}*?`,Vm={DOT_LITERAL:yt,PLUS_LITERAL:RE,QMARK_LITERAL:PE,SLASH_LITERAL:ms,ONE_CHAR:IE,QMARK:zm,END_ANCHOR:pl,DOTS_SLASH:dl,NO_DOT:DE,NO_DOTS:qE,NO_DOT_SLASH:$E,NO_DOTS_SLASH:LE,QMARK_NO_DOT:ME,STAR:NE,START_ANCHOR:Um},BE={...Vm,SLASH_LITERAL:`[${at}]`,QMARK:jm,STAR:`${jm}*?`,DOTS_SLASH:`${yt}{1,2}(?:[${at}]|$)`,NO_DOT:`(?!${yt})`,NO_DOTS:`(?!(?:^|[${at}])${yt}{1,2}(?:[${at}]|$))`,NO_DOT_SLASH:`(?!${yt}{0,1}(?:[${at}]|$))`,NO_DOTS_SLASH:`(?!${yt}{1,2}(?:[${at}]|$))`,QMARK_NO_DOT:`[^.${at}]`,START_ANCHOR:`(?:^|[${at}])`,END_ANCHOR:`(?:[${at}]|$)`},FE={alnum:"a-zA-Z0-9",alpha:"a-zA-Z",ascii:"\\x00-\\x7F",blank:" \\t",cntrl:"\\x00-\\x1F\\x7F",digit:"0-9",graph:"\\x21-\\x7E",lower:"a-z",print:"\\x20-\\x7E ",punct:"\\-!\"#$%&'()\\*+,./:;<=>?@[\\]^_`{|}~",space:" \\t\\r\\n\\v\\f",upper:"A-Z",word:"A-Za-z0-9_",xdigit:"A-Fa-f0-9"};Hm.exports={MAX_LENGTH:1024*64,POSIX_REGEX_SOURCE:FE,REGEX_BACKSLASH:/\\(?![*+?^${}(|)[\]])/g,REGEX_NON_SPECIAL_CHARS:/^[^@![\].,$*+?^{}()|\\/]+/,REGEX_SPECIAL_CHARS:/[-*+?.^${}(|)[\]]/,REGEX_SPECIAL_CHARS_BACKREF:/(\\?)((\W)(\3*))/g,REGEX_SPECIAL_CHARS_GLOBAL:/([-*+?.^${}(|)[\]])/g,REGEX_REMOVE_BACKSLASH:/(?:\[.*?[^\\]\]|\\(?=.))/g,REPLACEMENTS:{"***":"*","**/**":"**","**/**/**":"**"},CHAR_0:48,CHAR_9:57,CHAR_UPPERCASE_A:65,CHAR_LOWERCASE_A:97,CHAR_UPPERCASE_Z:90,CHAR_LOWERCASE_Z:122,CHAR_LEFT_PARENTHESES:40,CHAR_RIGHT_PARENTHESES:41,CHAR_ASTERISK:42,CHAR_AMPERSAND:38,CHAR_AT:64,CHAR_BACKWARD_SLASH:92,CHAR_CARRIAGE_RETURN:13,CHAR_CIRCUMFLEX_ACCENT:94,CHAR_COLON:58,CHAR_COMMA:44,CHAR_DOT:46,CHAR_DOUBLE_QUOTE:34,CHAR_EQUAL:61,CHAR_EXCLAMATION_MARK:33,CHAR_FORM_FEED:12,CHAR_FORWARD_SLASH:47,CHAR_GRAVE_ACCENT:96,CHAR_HASH:35,CHAR_HYPHEN_MINUS:45,CHAR_LEFT_ANGLE_BRACKET:60,CHAR_LEFT_CURLY_BRACE:123,CHAR_LEFT_SQUARE_BRACKET:91,CHAR_LINE_FEED:10,CHAR_NO_BREAK_SPACE:160,CHAR_PERCENT:37,CHAR_PLUS:43,CHAR_QUESTION_MARK:63,CHAR_RIGHT_ANGLE_BRACKET:62,CHAR_RIGHT_CURLY_BRACE:125,CHAR_RIGHT_SQUARE_BRACKET:93,CHAR_SEMICOLON:59,CHAR_SINGLE_QUOTE:39,CHAR_SPACE:32,CHAR_TAB:9,CHAR_UNDERSCORE:95,CHAR_VERTICAL_LINE:124,CHAR_ZERO_WIDTH_NOBREAK_SPACE:65279,SEP:TE.sep,extglobChars(r){return{"!":{type:"negate",open:"(?:(?!(?:",close:`))${r.STAR})`},"?":{type:"qmark",open:"(?:",close:")?"},"+":{type:"plus",open:"(?:",close:")+"},"*":{type:"star",open:"(?:",close:")*"},"@":{type:"at",open:"(?:",close:")"}}},globChars(r){return r===!0?BE:Vm}}});var Ii=x(Re=>{u();"use strict";var jE=(et(),Ur),zE=m.platform==="win32",{REGEX_BACKSLASH:UE,REGEX_REMOVE_BACKSLASH:VE,REGEX_SPECIAL_CHARS:HE,REGEX_SPECIAL_CHARS_GLOBAL:WE}=Pi();Re.isObject=r=>r!==null&&typeof r=="object"&&!Array.isArray(r);Re.hasRegexChars=r=>HE.test(r);Re.isRegexChar=r=>r.length===1&&Re.hasRegexChars(r);Re.escapeRegex=r=>r.replace(WE,"\\$1");Re.toPosixSlashes=r=>r.replace(UE,"/");Re.removeBackslashes=r=>r.replace(VE,e=>e==="\\"?"":e);Re.supportsLookbehinds=()=>{let r=m.version.slice(1).split(".").map(Number);return r.length===3&&r[0]>=9||r[0]===8&&r[1]>=10};Re.isWindows=r=>r&&typeof r.windows=="boolean"?r.windows:zE===!0||jE.sep==="\\";Re.escapeLast=(r,e,t)=>{let i=r.lastIndexOf(e,t);return i===-1?r:r[i-1]==="\\"?Re.escapeLast(r,e,i-1):`${r.slice(0,i)}\\${r.slice(i)}`};Re.removePrefix=(r,e={})=>{let t=r;return t.startsWith("./")&&(t=t.slice(2),e.prefix="./"),t};Re.wrapOutput=(r,e={},t={})=>{let i=t.contains?"":"^",n=t.contains?"":"$",a=`${i}(?:${r})${n}`;return e.negated===!0&&(a=`(?:^(?!${a}).*$)`),a}});var Zm=x((o6,Jm)=>{u();"use strict";var Wm=Ii(),{CHAR_ASTERISK:hl,CHAR_AT:GE,CHAR_BACKWARD_SLASH:Di,CHAR_COMMA:QE,CHAR_DOT:ml,CHAR_EXCLAMATION_MARK:gl,CHAR_FORWARD_SLASH:Gm,CHAR_LEFT_CURLY_BRACE:yl,CHAR_LEFT_PARENTHESES:bl,CHAR_LEFT_SQUARE_BRACKET:YE,CHAR_PLUS:KE,CHAR_QUESTION_MARK:Qm,CHAR_RIGHT_CURLY_BRACE:XE,CHAR_RIGHT_PARENTHESES:Ym,CHAR_RIGHT_SQUARE_BRACKET:JE}=Pi(),Km=r=>r===Gm||r===Di,Xm=r=>{r.isPrefix!==!0&&(r.depth=r.isGlobstar?1/0:1)},ZE=(r,e)=>{let t=e||{},i=r.length-1,n=t.parts===!0||t.scanToEnd===!0,a=[],s=[],o=[],l=r,c=-1,f=0,d=0,p=!1,h=!1,b=!1,v=!1,y=!1,w=!1,k=!1,S=!1,E=!1,O=!1,B=0,N,T,F={value:"",depth:0,isGlob:!1},Y=()=>c>=i,_=()=>l.charCodeAt(c+1),Q=()=>(N=T,l.charCodeAt(++c));for(;c0&&(oe=l.slice(0,f),l=l.slice(f),d-=f),U&&b===!0&&d>0?(U=l.slice(0,d),A=l.slice(d)):b===!0?(U="",A=l):U=l,U&&U!==""&&U!=="/"&&U!==l&&Km(U.charCodeAt(U.length-1))&&(U=U.slice(0,-1)),t.unescape===!0&&(A&&(A=Wm.removeBackslashes(A)),U&&k===!0&&(U=Wm.removeBackslashes(U)));let C={prefix:oe,input:r,start:f,base:U,glob:A,isBrace:p,isBracket:h,isGlob:b,isExtglob:v,isGlobstar:y,negated:S,negatedExtglob:E};if(t.tokens===!0&&(C.maxDepth=0,Km(T)||s.push(F),C.tokens=s),t.parts===!0||t.tokens===!0){let he;for(let V=0;V{u();"use strict";var gs=Pi(),Me=Ii(),{MAX_LENGTH:ys,POSIX_REGEX_SOURCE:e2,REGEX_NON_SPECIAL_CHARS:t2,REGEX_SPECIAL_CHARS_BACKREF:r2,REPLACEMENTS:eg}=gs,i2=(r,e)=>{if(typeof e.expandRange=="function")return e.expandRange(...r,e);r.sort();let t=`[${r.join("-")}]`;try{new RegExp(t)}catch(i){return r.map(n=>Me.escapeRegex(n)).join("..")}return t},gr=(r,e)=>`Missing ${r}: "${e}" - use "\\\\${e}" to match literal characters`,wl=(r,e)=>{if(typeof r!="string")throw new TypeError("Expected a string");r=eg[r]||r;let t={...e},i=typeof t.maxLength=="number"?Math.min(ys,t.maxLength):ys,n=r.length;if(n>i)throw new SyntaxError(`Input length: ${n}, exceeds maximum allowed length: ${i}`);let a={type:"bos",value:"",output:t.prepend||""},s=[a],o=t.capture?"":"?:",l=Me.isWindows(e),c=gs.globChars(l),f=gs.extglobChars(c),{DOT_LITERAL:d,PLUS_LITERAL:p,SLASH_LITERAL:h,ONE_CHAR:b,DOTS_SLASH:v,NO_DOT:y,NO_DOT_SLASH:w,NO_DOTS_SLASH:k,QMARK:S,QMARK_NO_DOT:E,STAR:O,START_ANCHOR:B}=c,N=q=>`(${o}(?:(?!${B}${q.dot?v:d}).)*?)`,T=t.dot?"":y,F=t.dot?S:E,Y=t.bash===!0?N(t):O;t.capture&&(Y=`(${Y})`),typeof t.noext=="boolean"&&(t.noextglob=t.noext);let _={input:r,index:-1,start:0,dot:t.dot===!0,consumed:"",output:"",prefix:"",backtrack:!1,negated:!1,brackets:0,braces:0,parens:0,quotes:0,globstar:!1,tokens:s};r=Me.removePrefix(r,_),n=r.length;let Q=[],U=[],oe=[],A=a,C,he=()=>_.index===n-1,V=_.peek=(q=1)=>r[_.index+q],Ee=_.advance=()=>r[++_.index]||"",Ie=()=>r.slice(_.index+1),De=(q="",ae=0)=>{_.consumed+=q,_.index+=ae},Bi=q=>{_.output+=q.output!=null?q.output:q.value,De(q.value)},Rv=()=>{let q=1;for(;V()==="!"&&(V(2)!=="("||V(3)==="?");)Ee(),_.start++,q++;return q%2==0?!1:(_.negated=!0,_.start++,!0)},Fi=q=>{_[q]++,oe.push(q)},Ft=q=>{_[q]--,oe.pop()},W=q=>{if(A.type==="globstar"){let ae=_.braces>0&&(q.type==="comma"||q.type==="brace"),I=q.extglob===!0||Q.length&&(q.type==="pipe"||q.type==="paren");q.type!=="slash"&&q.type!=="paren"&&!ae&&!I&&(_.output=_.output.slice(0,-A.output.length),A.type="star",A.value="*",A.output=Y,_.output+=A.output)}if(Q.length&&q.type!=="paren"&&(Q[Q.length-1].inner+=q.value),(q.value||q.output)&&Bi(q),A&&A.type==="text"&&q.type==="text"){A.value+=q.value,A.output=(A.output||"")+q.value;return}q.prev=A,s.push(q),A=q},ji=(q,ae)=>{let I={...f[ae],conditions:1,inner:""};I.prev=A,I.parens=_.parens,I.output=_.output;let H=(t.capture?"(":"")+I.open;Fi("parens"),W({type:q,value:ae,output:_.output?"":b}),W({type:"paren",extglob:!0,value:Ee(),output:H}),Q.push(I)},Pv=q=>{let ae=q.close+(t.capture?")":""),I;if(q.type==="negate"){let H=Y;if(q.inner&&q.inner.length>1&&q.inner.includes("/")&&(H=N(t)),(H!==Y||he()||/^\)+$/.test(Ie()))&&(ae=q.close=`)$))${H}`),q.inner.includes("*")&&(I=Ie())&&/^\.[^\\/.]+$/.test(I)){let ce=wl(I,{...e,fastpaths:!1}).output;ae=q.close=`)${ce})${H})`}q.prev.type==="bos"&&(_.negatedExtglob=!0)}W({type:"paren",extglob:!0,value:C,output:ae}),Ft("parens")};if(t.fastpaths!==!1&&!/(^[*!]|[/()[\]{}"])/.test(r)){let q=!1,ae=r.replace(r2,(I,H,ce,Ce,ye,Ms)=>Ce==="\\"?(q=!0,I):Ce==="?"?H?H+Ce+(ye?S.repeat(ye.length):""):Ms===0?F+(ye?S.repeat(ye.length):""):S.repeat(ce.length):Ce==="."?d.repeat(ce.length):Ce==="*"?H?H+Ce+(ye?Y:""):Y:H?I:`\\${I}`);return q===!0&&(t.unescape===!0?ae=ae.replace(/\\/g,""):ae=ae.replace(/\\+/g,I=>I.length%2==0?"\\\\":I?"\\":"")),ae===r&&t.contains===!0?(_.output=r,_):(_.output=Me.wrapOutput(ae,_,e),_)}for(;!he();){if(C=Ee(),C==="\0")continue;if(C==="\\"){let I=V();if(I==="/"&&t.bash!==!0||I==="."||I===";")continue;if(!I){C+="\\",W({type:"text",value:C});continue}let H=/^\\+/.exec(Ie()),ce=0;if(H&&H[0].length>2&&(ce=H[0].length,_.index+=ce,ce%2!=0&&(C+="\\")),t.unescape===!0?C=Ee():C+=Ee(),_.brackets===0){W({type:"text",value:C});continue}}if(_.brackets>0&&(C!=="]"||A.value==="["||A.value==="[^")){if(t.posix!==!1&&C===":"){let I=A.value.slice(1);if(I.includes("[")&&(A.posix=!0,I.includes(":"))){let H=A.value.lastIndexOf("["),ce=A.value.slice(0,H),Ce=A.value.slice(H+2),ye=e2[Ce];if(ye){A.value=ce+ye,_.backtrack=!0,Ee(),!a.output&&s.indexOf(A)===1&&(a.output=b);continue}}}(C==="["&&V()!==":"||C==="-"&&V()==="]")&&(C=`\\${C}`),C==="]"&&(A.value==="["||A.value==="[^")&&(C=`\\${C}`),t.posix===!0&&C==="!"&&A.value==="["&&(C="^"),A.value+=C,Bi({value:C});continue}if(_.quotes===1&&C!=='"'){C=Me.escapeRegex(C),A.value+=C,Bi({value:C});continue}if(C==='"'){_.quotes=_.quotes===1?0:1,t.keepQuotes===!0&&W({type:"text",value:C});continue}if(C==="("){Fi("parens"),W({type:"paren",value:C});continue}if(C===")"){if(_.parens===0&&t.strictBrackets===!0)throw new SyntaxError(gr("opening","("));let I=Q[Q.length-1];if(I&&_.parens===I.parens+1){Pv(Q.pop());continue}W({type:"paren",value:C,output:_.parens?")":"\\)"}),Ft("parens");continue}if(C==="["){if(t.nobracket===!0||!Ie().includes("]")){if(t.nobracket!==!0&&t.strictBrackets===!0)throw new SyntaxError(gr("closing","]"));C=`\\${C}`}else Fi("brackets");W({type:"bracket",value:C});continue}if(C==="]"){if(t.nobracket===!0||A&&A.type==="bracket"&&A.value.length===1){W({type:"text",value:C,output:`\\${C}`});continue}if(_.brackets===0){if(t.strictBrackets===!0)throw new SyntaxError(gr("opening","["));W({type:"text",value:C,output:`\\${C}`});continue}Ft("brackets");let I=A.value.slice(1);if(A.posix!==!0&&I[0]==="^"&&!I.includes("/")&&(C=`/${C}`),A.value+=C,Bi({value:C}),t.literalBrackets===!1||Me.hasRegexChars(I))continue;let H=Me.escapeRegex(A.value);if(_.output=_.output.slice(0,-A.value.length),t.literalBrackets===!0){_.output+=H,A.value=H;continue}A.value=`(${o}${H}|${A.value})`,_.output+=A.value;continue}if(C==="{"&&t.nobrace!==!0){Fi("braces");let I={type:"brace",value:C,output:"(",outputIndex:_.output.length,tokensIndex:_.tokens.length};U.push(I),W(I);continue}if(C==="}"){let I=U[U.length-1];if(t.nobrace===!0||!I){W({type:"text",value:C,output:C});continue}let H=")";if(I.dots===!0){let ce=s.slice(),Ce=[];for(let ye=ce.length-1;ye>=0&&(s.pop(),ce[ye].type!=="brace");ye--)ce[ye].type!=="dots"&&Ce.unshift(ce[ye].value);H=i2(Ce,t),_.backtrack=!0}if(I.comma!==!0&&I.dots!==!0){let ce=_.output.slice(0,I.outputIndex),Ce=_.tokens.slice(I.tokensIndex);I.value=I.output="\\{",C=H="\\}",_.output=ce;for(let ye of Ce)_.output+=ye.output||ye.value}W({type:"brace",value:C,output:H}),Ft("braces"),U.pop();continue}if(C==="|"){Q.length>0&&Q[Q.length-1].conditions++,W({type:"text",value:C});continue}if(C===","){let I=C,H=U[U.length-1];H&&oe[oe.length-1]==="braces"&&(H.comma=!0,I="|"),W({type:"comma",value:C,output:I});continue}if(C==="/"){if(A.type==="dot"&&_.index===_.start+1){_.start=_.index+1,_.consumed="",_.output="",s.pop(),A=a;continue}W({type:"slash",value:C,output:h});continue}if(C==="."){if(_.braces>0&&A.type==="dot"){A.value==="."&&(A.output=d);let I=U[U.length-1];A.type="dots",A.output+=C,A.value+=C,I.dots=!0;continue}if(_.braces+_.parens===0&&A.type!=="bos"&&A.type!=="slash"){W({type:"text",value:C,output:d});continue}W({type:"dot",value:C,output:d});continue}if(C==="?"){if(!(A&&A.value==="(")&&t.noextglob!==!0&&V()==="("&&V(2)!=="?"){ji("qmark",C);continue}if(A&&A.type==="paren"){let H=V(),ce=C;if(H==="<"&&!Me.supportsLookbehinds())throw new Error("Node.js v10 or higher is required for regex lookbehinds");(A.value==="("&&!/[!=<:]/.test(H)||H==="<"&&!/<([!=]|\w+>)/.test(Ie()))&&(ce=`\\${C}`),W({type:"text",value:C,output:ce});continue}if(t.dot!==!0&&(A.type==="slash"||A.type==="bos")){W({type:"qmark",value:C,output:E});continue}W({type:"qmark",value:C,output:S});continue}if(C==="!"){if(t.noextglob!==!0&&V()==="("&&(V(2)!=="?"||!/[!=<:]/.test(V(3)))){ji("negate",C);continue}if(t.nonegate!==!0&&_.index===0){Rv();continue}}if(C==="+"){if(t.noextglob!==!0&&V()==="("&&V(2)!=="?"){ji("plus",C);continue}if(A&&A.value==="("||t.regex===!1){W({type:"plus",value:C,output:p});continue}if(A&&(A.type==="bracket"||A.type==="paren"||A.type==="brace")||_.parens>0){W({type:"plus",value:C});continue}W({type:"plus",value:p});continue}if(C==="@"){if(t.noextglob!==!0&&V()==="("&&V(2)!=="?"){W({type:"at",extglob:!0,value:C,output:""});continue}W({type:"text",value:C});continue}if(C!=="*"){(C==="$"||C==="^")&&(C=`\\${C}`);let I=t2.exec(Ie());I&&(C+=I[0],_.index+=I[0].length),W({type:"text",value:C});continue}if(A&&(A.type==="globstar"||A.star===!0)){A.type="star",A.star=!0,A.value+=C,A.output=Y,_.backtrack=!0,_.globstar=!0,De(C);continue}let q=Ie();if(t.noextglob!==!0&&/^\([^?]/.test(q)){ji("star",C);continue}if(A.type==="star"){if(t.noglobstar===!0){De(C);continue}let I=A.prev,H=I.prev,ce=I.type==="slash"||I.type==="bos",Ce=H&&(H.type==="star"||H.type==="globstar");if(t.bash===!0&&(!ce||q[0]&&q[0]!=="/")){W({type:"star",value:C,output:""});continue}let ye=_.braces>0&&(I.type==="comma"||I.type==="brace"),Ms=Q.length&&(I.type==="pipe"||I.type==="paren");if(!ce&&I.type!=="paren"&&!ye&&!Ms){W({type:"star",value:C,output:""});continue}for(;q.slice(0,3)==="/**";){let zi=r[_.index+4];if(zi&&zi!=="/")break;q=q.slice(3),De("/**",3)}if(I.type==="bos"&&he()){A.type="globstar",A.value+=C,A.output=N(t),_.output=A.output,_.globstar=!0,De(C);continue}if(I.type==="slash"&&I.prev.type!=="bos"&&!Ce&&he()){_.output=_.output.slice(0,-(I.output+A.output).length),I.output=`(?:${I.output}`,A.type="globstar",A.output=N(t)+(t.strictSlashes?")":"|$)"),A.value+=C,_.globstar=!0,_.output+=I.output+A.output,De(C);continue}if(I.type==="slash"&&I.prev.type!=="bos"&&q[0]==="/"){let zi=q[1]!==void 0?"|$":"";_.output=_.output.slice(0,-(I.output+A.output).length),I.output=`(?:${I.output}`,A.type="globstar",A.output=`${N(t)}${h}|${h}${zi})`,A.value+=C,_.output+=I.output+A.output,_.globstar=!0,De(C+Ee()),W({type:"slash",value:"/",output:""});continue}if(I.type==="bos"&&q[0]==="/"){A.type="globstar",A.value+=C,A.output=`(?:^|${h}|${N(t)}${h})`,_.output=A.output,_.globstar=!0,De(C+Ee()),W({type:"slash",value:"/",output:""});continue}_.output=_.output.slice(0,-A.output.length),A.type="globstar",A.output=N(t),A.value+=C,_.output+=A.output,_.globstar=!0,De(C);continue}let ae={type:"star",value:C,output:Y};if(t.bash===!0){ae.output=".*?",(A.type==="bos"||A.type==="slash")&&(ae.output=T+ae.output),W(ae);continue}if(A&&(A.type==="bracket"||A.type==="paren")&&t.regex===!0){ae.output=C,W(ae);continue}(_.index===_.start||A.type==="slash"||A.type==="dot")&&(A.type==="dot"?(_.output+=w,A.output+=w):t.dot===!0?(_.output+=k,A.output+=k):(_.output+=T,A.output+=T),V()!=="*"&&(_.output+=b,A.output+=b)),W(ae)}for(;_.brackets>0;){if(t.strictBrackets===!0)throw new SyntaxError(gr("closing","]"));_.output=Me.escapeLast(_.output,"["),Ft("brackets")}for(;_.parens>0;){if(t.strictBrackets===!0)throw new SyntaxError(gr("closing",")"));_.output=Me.escapeLast(_.output,"("),Ft("parens")}for(;_.braces>0;){if(t.strictBrackets===!0)throw new SyntaxError(gr("closing","}"));_.output=Me.escapeLast(_.output,"{"),Ft("braces")}if(t.strictSlashes!==!0&&(A.type==="star"||A.type==="bracket")&&W({type:"maybe_slash",value:"",output:`${h}?`}),_.backtrack===!0){_.output="";for(let q of _.tokens)_.output+=q.output!=null?q.output:q.value,q.suffix&&(_.output+=q.suffix)}return _};wl.fastpaths=(r,e)=>{let t={...e},i=typeof t.maxLength=="number"?Math.min(ys,t.maxLength):ys,n=r.length;if(n>i)throw new SyntaxError(`Input length: ${n}, exceeds maximum allowed length: ${i}`);r=eg[r]||r;let a=Me.isWindows(e),{DOT_LITERAL:s,SLASH_LITERAL:o,ONE_CHAR:l,DOTS_SLASH:c,NO_DOT:f,NO_DOTS:d,NO_DOTS_SLASH:p,STAR:h,START_ANCHOR:b}=gs.globChars(a),v=t.dot?d:f,y=t.dot?p:f,w=t.capture?"":"?:",k={negated:!1,prefix:""},S=t.bash===!0?".*?":h;t.capture&&(S=`(${S})`);let E=T=>T.noglobstar===!0?S:`(${w}(?:(?!${b}${T.dot?c:s}).)*?)`,O=T=>{switch(T){case"*":return`${v}${l}${S}`;case".*":return`${s}${l}${S}`;case"*.*":return`${v}${S}${s}${l}${S}`;case"*/*":return`${v}${S}${o}${l}${y}${S}`;case"**":return v+E(t);case"**/*":return`(?:${v}${E(t)}${o})?${y}${l}${S}`;case"**/*.*":return`(?:${v}${E(t)}${o})?${y}${S}${s}${l}${S}`;case"**/.*":return`(?:${v}${E(t)}${o})?${s}${l}${S}`;default:{let F=/^(.*?)\.(\w+)$/.exec(T);if(!F)return;let Y=O(F[1]);return Y?Y+s+F[2]:void 0}}},B=Me.removePrefix(r,k),N=O(B);return N&&t.strictSlashes!==!0&&(N+=`${o}?`),N};tg.exports=wl});var ng=x((u6,ig)=>{u();"use strict";var n2=(et(),Ur),s2=Zm(),vl=rg(),xl=Ii(),a2=Pi(),o2=r=>r&&typeof r=="object"&&!Array.isArray(r),de=(r,e,t=!1)=>{if(Array.isArray(r)){let f=r.map(p=>de(p,e,t));return p=>{for(let h of f){let b=h(p);if(b)return b}return!1}}let i=o2(r)&&r.tokens&&r.input;if(r===""||typeof r!="string"&&!i)throw new TypeError("Expected pattern to be a non-empty string");let n=e||{},a=xl.isWindows(e),s=i?de.compileRe(r,e):de.makeRe(r,e,!1,!0),o=s.state;delete s.state;let l=()=>!1;if(n.ignore){let f={...e,ignore:null,onMatch:null,onResult:null};l=de(n.ignore,f,t)}let c=(f,d=!1)=>{let{isMatch:p,match:h,output:b}=de.test(f,s,e,{glob:r,posix:a}),v={glob:r,state:o,regex:s,posix:a,input:f,output:b,match:h,isMatch:p};return typeof n.onResult=="function"&&n.onResult(v),p===!1?(v.isMatch=!1,d?v:!1):l(f)?(typeof n.onIgnore=="function"&&n.onIgnore(v),v.isMatch=!1,d?v:!1):(typeof n.onMatch=="function"&&n.onMatch(v),d?v:!0)};return t&&(c.state=o),c};de.test=(r,e,t,{glob:i,posix:n}={})=>{if(typeof r!="string")throw new TypeError("Expected input to be a string");if(r==="")return{isMatch:!1,output:""};let a=t||{},s=a.format||(n?xl.toPosixSlashes:null),o=r===i,l=o&&s?s(r):r;return o===!1&&(l=s?s(r):r,o=l===i),(o===!1||a.capture===!0)&&(a.matchBase===!0||a.basename===!0?o=de.matchBase(r,e,t,n):o=e.exec(l)),{isMatch:Boolean(o),match:o,output:l}};de.matchBase=(r,e,t,i=xl.isWindows(t))=>(e instanceof RegExp?e:de.makeRe(e,t)).test(n2.basename(r));de.isMatch=(r,e,t)=>de(e,t)(r);de.parse=(r,e)=>Array.isArray(r)?r.map(t=>de.parse(t,e)):vl(r,{...e,fastpaths:!1});de.scan=(r,e)=>s2(r,e);de.compileRe=(r,e,t=!1,i=!1)=>{if(t===!0)return r.output;let n=e||{},a=n.contains?"":"^",s=n.contains?"":"$",o=`${a}(?:${r.output})${s}`;r&&r.negated===!0&&(o=`^(?!${o}).*$`);let l=de.toRegex(o,e);return i===!0&&(l.state=r),l};de.makeRe=(r,e={},t=!1,i=!1)=>{if(!r||typeof r!="string")throw new TypeError("Expected a non-empty string");let n={negated:!1,fastpaths:!0};return e.fastpaths!==!1&&(r[0]==="."||r[0]==="*")&&(n.output=vl.fastpaths(r,e)),n.output||(n=vl(r,e)),de.compileRe(n,e,t,i)};de.toRegex=(r,e)=>{try{let t=e||{};return new RegExp(r,t.flags||(t.nocase?"i":""))}catch(t){if(e&&e.debug===!0)throw t;return/$^/}};de.constants=a2;ig.exports=de});var ag=x((f6,sg)=>{u();"use strict";sg.exports=ng()});var cg=x((c6,fg)=>{u();"use strict";var og=(Bn(),Nn),lg=Fm(),ot=ag(),kl=Ii(),ug=r=>r===""||r==="./",fe=(r,e,t)=>{e=[].concat(e),r=[].concat(r);let i=new Set,n=new Set,a=new Set,s=0,o=f=>{a.add(f.output),t&&t.onResult&&t.onResult(f)};for(let f=0;f!i.has(f));if(t&&c.length===0){if(t.failglob===!0)throw new Error(`No matches found for "${e.join(", ")}"`);if(t.nonull===!0||t.nullglob===!0)return t.unescape?e.map(f=>f.replace(/\\/g,"")):e}return c};fe.match=fe;fe.matcher=(r,e)=>ot(r,e);fe.isMatch=(r,e,t)=>ot(e,t)(r);fe.any=fe.isMatch;fe.not=(r,e,t={})=>{e=[].concat(e).map(String);let i=new Set,n=[],a=o=>{t.onResult&&t.onResult(o),n.push(o.output)},s=new Set(fe(r,e,{...t,onResult:a}));for(let o of n)s.has(o)||i.add(o);return[...i]};fe.contains=(r,e,t)=>{if(typeof r!="string")throw new TypeError(`Expected a string: "${og.inspect(r)}"`);if(Array.isArray(e))return e.some(i=>fe.contains(r,i,t));if(typeof e=="string"){if(ug(r)||ug(e))return!1;if(r.includes(e)||r.startsWith("./")&&r.slice(2).includes(e))return!0}return fe.isMatch(r,e,{...t,contains:!0})};fe.matchKeys=(r,e,t)=>{if(!kl.isObject(r))throw new TypeError("Expected the first argument to be an object");let i=fe(Object.keys(r),e,t),n={};for(let a of i)n[a]=r[a];return n};fe.some=(r,e,t)=>{let i=[].concat(r);for(let n of[].concat(e)){let a=ot(String(n),t);if(i.some(s=>a(s)))return!0}return!1};fe.every=(r,e,t)=>{let i=[].concat(r);for(let n of[].concat(e)){let a=ot(String(n),t);if(!i.every(s=>a(s)))return!1}return!0};fe.all=(r,e,t)=>{if(typeof r!="string")throw new TypeError(`Expected a string: "${og.inspect(r)}"`);return[].concat(e).every(i=>ot(i,t)(r))};fe.capture=(r,e,t)=>{let i=kl.isWindows(t),a=ot.makeRe(String(r),{...t,capture:!0}).exec(i?kl.toPosixSlashes(e):e);if(a)return a.slice(1).map(s=>s===void 0?"":s)};fe.makeRe=(...r)=>ot.makeRe(...r);fe.scan=(...r)=>ot.scan(...r);fe.parse=(r,e)=>{let t=[];for(let i of[].concat(r||[]))for(let n of lg(String(i),e))t.push(ot.parse(n,e));return t};fe.braces=(r,e)=>{if(typeof r!="string")throw new TypeError("Expected a string");return e&&e.nobrace===!0||!/\{.*\}/.test(r)?[r]:lg(r,e)};fe.braceExpand=(r,e)=>{if(typeof r!="string")throw new TypeError("Expected a string");return fe.braces(r,{...e,expand:!0})};fg.exports=fe});function dg(r,e){let t=e.content.files;t=t.filter(o=>typeof o=="string"),t=t.map(al);let i=cs.generateTasks(t),n=[],a=[];for(let o of i)n.push(...o.positive.map(l=>hg(l,!1))),a.push(...o.negative.map(l=>hg(l,!0)));let s=[...n,...a];return s=u2(r,s),s=s.flatMap(f2),s=s.map(l2),s}function hg(r,e){let t={original:r,base:r,ignore:e,pattern:r,glob:null};return Kh(r)&&Object.assign(t,rm(r)),t}function l2(r){let e=al(r.base);return e=cs.escapePath(e),r.pattern=r.glob?`${e}/${r.glob}`:e,r.pattern=r.ignore?`!${r.pattern}`:r.pattern,r}function u2(r,e){let t=[];return r.userConfigPath&&r.tailwindConfig.content.relative&&(t=[me.dirname(r.userConfigPath)]),e.map(i=>(i.base=me.resolve(...t,i.base),i))}function f2(r){let e=[r];try{let t=be.realpathSync(r.base);t!==r.base&&e.push({...r,base:t})}catch{}return e}function mg(r,e,t){let i=r.tailwindConfig.content.files.filter(s=>typeof s.raw=="string").map(({raw:s,extension:o="html"})=>({content:s,extension:o})),[n,a]=p2(e,t);for(let s of n){let o=me.extname(s).slice(1);i.push({file:s,extension:o})}return[i,a]}function c2(r){if(!r.some(a=>a.includes("**")&&!yg.test(a)))return()=>{};let t=[],i=[];for(let a of r){let s=pg.default.matcher(a);yg.test(a)&&i.push(s),t.push(s)}let n=!1;return a=>{if(n||i.some(f=>f(a)))return;let s=t.findIndex(f=>f(a));if(s===-1)return;let o=r[s],l=me.relative(m.cwd(),o);l[0]!=="."&&(l=`./${l}`);let c=gg.find(f=>a.includes(f));c&&(n=!0,G.warn("broad-content-glob-pattern",[`Your \`content\` configuration includes a pattern which looks like it's accidentally matching all of \`${c}\` and can cause serious performance issues.`,`Pattern: \`${l}\``,"See our documentation for recommendations:","https://tailwindcss.com/docs/content-configuration#pattern-recommendations"]))}}function p2(r,e){let t=r.map(o=>o.pattern),i=new Map,n=c2(t),a=new Set;Je.DEBUG&&console.time("Finding changed files");let s=cs.sync(t,{absolute:!0});for(let o of s){n(o);let l=e.get(o)||-1/0,c=be.statSync(o).mtimeMs;c>l&&(a.add(o),i.set(o,c))}return Je.DEBUG&&console.timeEnd("Finding changed files"),[a,i]}var pg,gg,yg,bg=R(()=>{u();ft();et();Xh();Jh();Zh();im();It();Be();pg=pe(cg());gg=["node_modules"],yg=new RegExp(`(${gg.map(r=>String.raw`\b${r}\b`).join("|")})`)});function wg(){}var vg=R(()=>{u()});function g2(r,e){for(let t of e){let i=`${r}${t}`;if(be.existsSync(i)&&be.statSync(i).isFile())return i}for(let t of e){let i=`${r}/index${t}`;if(be.existsSync(i))return i}return null}function*xg(r,e,t,i=me.extname(r)){let n=g2(me.resolve(e,r),d2.includes(i)?h2:m2);if(n===null||t.has(n))return;t.add(n),yield n,e=me.dirname(n),i=me.extname(n);let a=be.readFileSync(n,"utf-8");for(let s of[...a.matchAll(/import[\s\S]*?['"](.{3,}?)['"]/gi),...a.matchAll(/import[\s\S]*from[\s\S]*?['"](.{3,}?)['"]/gi),...a.matchAll(/require\(['"`](.+)['"`]\)/gi)])!s[1].startsWith(".")||(yield*xg(s[1],e,t,i))}function Sl(r){return r===null?new Set:new Set(xg(r,me.dirname(r),new Set))}var d2,h2,m2,kg=R(()=>{u();ft();et();d2=[".js",".cjs",".mjs"],h2=["",".js",".cjs",".mjs",".ts",".cts",".mts",".jsx",".tsx"],m2=["",".ts",".cts",".mts",".tsx",".js",".cjs",".mjs",".jsx"]});function y2(r,e){if(Al.has(r))return Al.get(r);let t=dg(r,e);return Al.set(r,t).get(r)}function b2(r){let e=na(r);if(e!==null){let[i,n,a,s]=Ag.get(e)||[],o=Sl(e),l=!1,c=new Map;for(let p of o){let h=be.statSync(p).mtimeMs;c.set(p,h),(!s||!s.has(p)||h>s.get(p))&&(l=!0)}if(!l)return[i,e,n,a];for(let p of o)delete pf.cache[p];let f=sl(zr(wg(e))),d=Vi(f);return Ag.set(e,[f,d,o,c]),[f,e,d,o]}let t=zr(r?.config??r??{});return t=sl(t),[t,null,Vi(t),[]]}function Cl(r){return({tailwindDirectives:e,registerDependency:t})=>(i,n)=>{let[a,s,o,l]=b2(r),c=new Set(l);if(e.size>0){c.add(n.opts.from);for(let b of n.messages)b.type==="dependency"&&c.add(b.file)}let[f,,d]=zh(i,n,a,s,o,c),p=fs(f),h=y2(f,a);if(e.size>0){for(let y of h)for(let w of rl(y))t(w);let[b,v]=mg(f,h,p);for(let y of b)f.changedContent.push(y);for(let[y,w]of v.entries())d.set(y,w)}for(let b of l)t({type:"dependency",file:b});for(let[b,v]of d.entries())p.set(b,v);return f}}var Sg,Ag,Al,Cg=R(()=>{u();ft();Sg=pe(Ns());yf();ia();sc();_i();Uh();Yh();bg();vg();kg();Ag=new Sg.default({maxSize:100}),Al=new WeakMap});function _l(r){let e=new Set,t=new Set,i=new Set;if(r.walkAtRules(n=>{n.name==="apply"&&i.add(n),n.name==="import"&&(n.params==='"tailwindcss/base"'||n.params==="'tailwindcss/base'"?(n.name="tailwind",n.params="base"):n.params==='"tailwindcss/components"'||n.params==="'tailwindcss/components'"?(n.name="tailwind",n.params="components"):n.params==='"tailwindcss/utilities"'||n.params==="'tailwindcss/utilities'"?(n.name="tailwind",n.params="utilities"):(n.params==='"tailwindcss/screens"'||n.params==="'tailwindcss/screens'"||n.params==='"tailwindcss/variants"'||n.params==="'tailwindcss/variants'")&&(n.name="tailwind",n.params="variants")),n.name==="tailwind"&&(n.params==="screens"&&(n.params="variants"),e.add(n.params)),["layer","responsive","variants"].includes(n.name)&&(["responsive","variants"].includes(n.name)&&G.warn(`${n.name}-at-rule-deprecated`,[`The \`@${n.name}\` directive has been deprecated in Tailwind CSS v3.0.`,"Use `@layer utilities` or `@layer components` instead.","https://tailwindcss.com/docs/upgrade-guide#replace-variants-with-layer"]),t.add(n))}),!e.has("base")||!e.has("components")||!e.has("utilities")){for(let n of t)if(n.name==="layer"&&["base","components","utilities"].includes(n.params)){if(!e.has(n.params))throw n.error(`\`@layer ${n.params}\` is used but no matching \`@tailwind ${n.params}\` directive is present.`)}else if(n.name==="responsive"){if(!e.has("utilities"))throw n.error("`@responsive` is used but `@tailwind utilities` is missing.")}else if(n.name==="variants"&&!e.has("utilities"))throw n.error("`@variants` is used but `@tailwind utilities` is missing.")}return{tailwindDirectives:e,applyDirectives:i}}var _g=R(()=>{u();Be()});function Qt(r,e=void 0,t=void 0){return r.map(i=>{let n=i.clone();return t!==void 0&&(n.raws.tailwind={...n.raws.tailwind,...t}),e!==void 0&&Eg(n,a=>{if(a.raws.tailwind?.preserveSource===!0&&a.source)return!1;a.source=e}),n})}function Eg(r,e){e(r)!==!1&&r.each?.(t=>Eg(t,e))}var Og=R(()=>{u()});function El(r){return r=Array.isArray(r)?r:[r],r=r.map(e=>e instanceof RegExp?e.source:e),r.join("")}function Ne(r){return new RegExp(El(r),"g")}function qt(r){return`(?:${r.map(El).join("|")})`}function Ol(r){return`(?:${El(r)})?`}function Rg(r){return r&&w2.test(r)?r.replace(Tg,"\\$&"):r||""}var Tg,w2,Pg=R(()=>{u();Tg=/[\\^$.*+?()[\]{}|]/g,w2=RegExp(Tg.source)});function Ig(r){let e=Array.from(v2(r));return t=>{let i=[];for(let n of e)for(let a of t.match(n)??[])i.push(S2(a));for(let n of i.slice()){let a=ve(n,".");for(let s=0;s=a.length-1){i.push(o);continue}let l=Number(a[s+1]);isNaN(l)?i.push(o):s++}}return i}}function*v2(r){let e=r.tailwindConfig.separator,t=r.tailwindConfig.prefix!==""?Ol(Ne([/-?/,Rg(r.tailwindConfig.prefix)])):"",i=qt([/\[[^\s:'"`]+:[^\s\[\]]+\]/,/\[[^\s:'"`\]]+:[^\s]+?\[[^\s]+\][^\s]+?\]/,Ne([qt([/-?(?:\w+)/,/@(?:\w+)/]),Ol(qt([Ne([qt([/-(?:\w+-)*\['[^\s]+'\]/,/-(?:\w+-)*\["[^\s]+"\]/,/-(?:\w+-)*\[`[^\s]+`\]/,/-(?:\w+-)*\[(?:[^\s\[\]]+\[[^\s\[\]]+\])*[^\s:\[\]]+\]/]),/(?![{([]])/,/(?:\/[^\s'"`\\><$]*)?/]),Ne([qt([/-(?:\w+-)*\['[^\s]+'\]/,/-(?:\w+-)*\["[^\s]+"\]/,/-(?:\w+-)*\[`[^\s]+`\]/,/-(?:\w+-)*\[(?:[^\s\[\]]+\[[^\s\[\]]+\])*[^\s\[\]]+\]/]),/(?![{([]])/,/(?:\/[^\s'"`\\$]*)?/]),/[-\/][^\s'"`\\$={><]*/]))])]),n=[qt([Ne([/@\[[^\s"'`]+\](\/[^\s"'`]+)?/,e]),Ne([/([^\s"'`\[\\]+-)?\[[^\s"'`]+\]\/[\w_-]+/,e]),Ne([/([^\s"'`\[\\]+-)?\[[^\s"'`]+\]/,e]),Ne([/[^\s"'`\[\\]+/,e])]),qt([Ne([/([^\s"'`\[\\]+-)?\[[^\s`]+\]\/[\w_-]+/,e]),Ne([/([^\s"'`\[\\]+-)?\[[^\s`]+\]/,e]),Ne([/[^\s`\[\\]+/,e])])];for(let a of n)yield Ne(["((?=((",a,")+))\\2)?",/!?/,t,i]);yield/[^<>"'`\s.(){}[\]#=%$][^<>"'`\s(){}[\]#=%$]*[^<>"'`\s.(){}[\]#=%:$]/g}function S2(r){if(!r.includes("-["))return r;let e=0,t=[],i=r.matchAll(x2);i=Array.from(i).flatMap(n=>{let[,...a]=n;return a.map((s,o)=>Object.assign([],n,{index:n.index+o,0:s}))});for(let n of i){let a=n[0],s=t[t.length-1];if(a===s?t.pop():(a==="'"||a==='"'||a==="`")&&t.push(a),!s){if(a==="["){e++;continue}else if(a==="]"){e--;continue}if(e<0)return r.substring(0,n.index-1);if(e===0&&!k2.test(a))return r.substring(0,n.index)}}return r}var x2,k2,Dg=R(()=>{u();Pg();zt();x2=/([\[\]'"`])([^\[\]'"`])?/g,k2=/[^"'`\s<>\]]+/});function A2(r,e){let t=r.tailwindConfig.content.extract;return t[e]||t.DEFAULT||$g[e]||$g.DEFAULT(r)}function C2(r,e){let t=r.content.transform;return t[e]||t.DEFAULT||Lg[e]||Lg.DEFAULT}function _2(r,e,t,i){qi.has(e)||qi.set(e,new qg.default({maxSize:25e3}));for(let n of r.split(` -`))if(n=n.trim(),!i.has(n))if(i.add(n),qi.get(e).has(n))for(let a of qi.get(e).get(n))t.add(a);else{let a=e(n).filter(o=>o!=="!*"),s=new Set(a);for(let o of s)t.add(o);qi.get(e).set(n,s)}}function E2(r,e){let t=e.offsets.sort(r),i={base:new Set,defaults:new Set,components:new Set,utilities:new Set,variants:new Set};for(let[n,a]of t)i[n.layer].add(a);return i}function Tl(r){return async e=>{let t={base:null,components:null,utilities:null,variants:null};if(e.walkAtRules(y=>{y.name==="tailwind"&&Object.keys(t).includes(y.params)&&(t[y.params]=y)}),Object.values(t).every(y=>y===null))return e;let i=new Set([...r.candidates??[],gt]),n=new Set;bt.DEBUG&&console.time("Reading changed files");let a=[];for(let y of r.changedContent){let w=C2(r.tailwindConfig,y.extension),k=A2(r,y.extension);a.push([y,{transformer:w,extractor:k}])}let s=500;for(let y=0;y{S=k?await be.promises.readFile(k,"utf8"):S,_2(E(S),O,i,n)}))}bt.DEBUG&&console.timeEnd("Reading changed files");let o=r.classCache.size;bt.DEBUG&&console.time("Generate rules"),bt.DEBUG&&console.time("Sorting candidates");let l=new Set([...i].sort((y,w)=>y===w?0:y{let w=y.raws.tailwind?.parentLayer;return w==="components"?t.components!==null:w==="utilities"?t.utilities!==null:!0});t.variants?(t.variants.before(Qt(b,t.variants.source,{layer:"variants"})),t.variants.remove()):b.length>0&&e.append(Qt(b,e.source,{layer:"variants"})),e.source.end=e.source.end??e.source.start;let v=b.some(y=>y.raws.tailwind?.parentLayer==="utilities");t.utilities&&p.size===0&&!v&&G.warn("content-problems",["No utility classes were detected in your source files. If this is unexpected, double-check the `content` option in your Tailwind CSS configuration.","https://tailwindcss.com/docs/content-configuration"]),bt.DEBUG&&(console.log("Potential classes: ",i.size),console.log("Active contexts: ",Zn.size)),r.changedContent=[],e.walkAtRules("layer",y=>{Object.keys(t).includes(y.params)&&y.remove()})}}var qg,bt,$g,Lg,qi,Mg=R(()=>{u();ft();qg=pe(Ns());It();as();Be();Og();Dg();bt=Je,$g={DEFAULT:Ig},Lg={DEFAULT:r=>r,svelte:r=>r.replace(/(?:^|\s)class:/g," ")};qi=new WeakMap});function ws(r){let e=new Map;ee.root({nodes:[r.clone()]}).walkRules(a=>{(0,bs.default)(s=>{s.walkClasses(o=>{let l=o.parent.toString(),c=e.get(l);c||e.set(l,c=new Set),c.add(o.value)})}).processSync(a.selector)});let i=Array.from(e.values(),a=>Array.from(a)),n=i.flat();return Object.assign(n,{groups:i})}function Rl(r){return O2.astSync(r)}function Ng(r,e){let t=new Set;for(let i of r)t.add(i.split(e).pop());return Array.from(t)}function Bg(r,e){let t=r.tailwindConfig.prefix;return typeof t=="function"?t(e):t+e}function*Fg(r){for(yield r;r.parent;)yield r.parent,r=r.parent}function T2(r,e={}){let t=r.nodes;r.nodes=[];let i=r.clone(e);return r.nodes=t,i}function R2(r){for(let e of Fg(r))if(r!==e){if(e.type==="root")break;r=T2(e,{nodes:[r]})}return r}function P2(r,e){let t=new Map;return r.walkRules(i=>{for(let s of Fg(i))if(s.raws.tailwind?.layer!==void 0)return;let n=R2(i),a=e.offsets.create("user");for(let s of ws(i)){let o=t.get(s)||[];t.set(s,o),o.push([{layer:"user",sort:a,important:!1},n])}}),t}function I2(r,e){for(let t of r){if(e.notClassCache.has(t)||e.applyClassCache.has(t))continue;if(e.classCache.has(t)){e.applyClassCache.set(t,e.classCache.get(t).map(([n,a])=>[n,a.clone()]));continue}let i=Array.from(Go(t,e));if(i.length===0){e.notClassCache.add(t);continue}e.applyClassCache.set(t,i)}return e.applyClassCache}function D2(r){let e=null;return{get:t=>(e=e||r(),e.get(t)),has:t=>(e=e||r(),e.has(t))}}function q2(r){return{get:e=>r.flatMap(t=>t.get(e)||[]),has:e=>r.some(t=>t.has(e))}}function jg(r){let e=r.split(/[\s\t\n]+/g);return e[e.length-1]==="!important"?[e.slice(0,-1),!0]:[e,!1]}function zg(r,e,t){let i=new Set,n=[];if(r.walkAtRules("apply",l=>{let[c]=jg(l.params);for(let f of c)i.add(f);n.push(l)}),n.length===0)return;let a=q2([t,I2(i,e)]);function s(l,c,f){let d=Rl(l),p=Rl(c),b=Rl(`.${Te(f)}`).nodes[0].nodes[0];return d.each(v=>{let y=new Set;p.each(w=>{let k=!1;w=w.clone(),w.walkClasses(S=>{S.value===b.value&&(k||(S.replaceWith(...v.nodes.map(E=>E.clone())),y.add(w),k=!0))})});for(let w of y){let k=[[]];for(let S of w.nodes)S.type==="combinator"?(k.push(S),k.push([])):k[k.length-1].push(S);w.nodes=[];for(let S of k)Array.isArray(S)&&S.sort((E,O)=>E.type==="tag"&&O.type==="class"?-1:E.type==="class"&&O.type==="tag"?1:E.type==="class"&&O.type==="pseudo"&&O.value.startsWith("::")?-1:E.type==="pseudo"&&E.value.startsWith("::")&&O.type==="class"?1:0),w.nodes=w.nodes.concat(S)}v.replaceWith(...y)}),d.toString()}let o=new Map;for(let l of n){let[c]=o.get(l.parent)||[[],l.source];o.set(l.parent,[c,l.source]);let[f,d]=jg(l.params);if(l.parent.type==="atrule"){if(l.parent.name==="screen"){let p=l.parent.params;throw l.error(`@apply is not supported within nested at-rules like @screen. We suggest you write this as @apply ${f.map(h=>`${p}:${h}`).join(" ")} instead.`)}throw l.error(`@apply is not supported within nested at-rules like @${l.parent.name}. You can fix this by un-nesting @${l.parent.name}.`)}for(let p of f){if([Bg(e,"group"),Bg(e,"peer")].includes(p))throw l.error(`@apply should not be used with the '${p}' utility`);if(!a.has(p))throw l.error(`The \`${p}\` class does not exist. If \`${p}\` is a custom class, make sure it is defined within a \`@layer\` directive.`);let h=a.get(p);for(let[,b]of h)b.type!=="atrule"&&b.walkRules(()=>{throw l.error([`The \`${p}\` class cannot be used with \`@apply\` because \`@apply\` does not currently support nested CSS.`,"Rewrite the selector without nesting or configure the `tailwindcss/nesting` plugin:","https://tailwindcss.com/docs/using-with-preprocessors#nesting"].join(` -`))});c.push([p,d,h])}}for(let[l,[c,f]]of o){let d=[];for(let[h,b,v]of c){let y=[h,...Ng([h],e.tailwindConfig.separator)];for(let[w,k]of v){let S=ws(l),E=ws(k);if(E=E.groups.filter(T=>T.some(F=>y.includes(F))).flat(),E=E.concat(Ng(E,e.tailwindConfig.separator)),S.some(T=>E.includes(T)))throw k.error(`You cannot \`@apply\` the \`${h}\` utility here because it creates a circular dependency.`);let B=ee.root({nodes:[k.clone()]});B.walk(T=>{T.source=f}),(k.type!=="atrule"||k.type==="atrule"&&k.name!=="keyframes")&&B.walkRules(T=>{if(!ws(T).some(U=>U===h)){T.remove();return}let F=typeof e.tailwindConfig.important=="string"?e.tailwindConfig.important:null,_=l.raws.tailwind!==void 0&&F&&l.selector.indexOf(F)===0?l.selector.slice(F.length):l.selector;_===""&&(_=l.selector),T.selector=s(_,T.selector,h),F&&_!==l.selector&&(T.selector=rs(T.selector,F)),T.walkDecls(U=>{U.important=w.important||b});let Q=(0,bs.default)().astSync(T.selector);Q.each(U=>pr(U)),T.selector=Q.toString()}),!!B.nodes[0]&&d.push([w.sort,B.nodes[0]])}}let p=e.offsets.sort(d).map(h=>h[1]);l.after(p)}for(let l of n)l.parent.nodes.length>1?l.remove():l.parent.remove();zg(r,e,t)}function Pl(r){return e=>{let t=D2(()=>P2(e,r));zg(e,r,t)}}var bs,O2,Ug=R(()=>{u();Ot();bs=pe(it());as();fr();Vo();es();O2=(0,bs.default)()});var Vg=x((rq,vs)=>{u();(function(){"use strict";function r(i,n,a){if(!i)return null;r.caseSensitive||(i=i.toLowerCase());var s=r.threshold===null?null:r.threshold*i.length,o=r.thresholdAbsolute,l;s!==null&&o!==null?l=Math.min(s,o):s!==null?l=s:o!==null?l=o:l=null;var c,f,d,p,h,b=n.length;for(h=0;ha)return a+1;var l=[],c,f,d,p,h;for(c=0;c<=o;c++)l[c]=[c];for(f=0;f<=s;f++)l[0][f]=f;for(c=1;c<=o;c++){for(d=e,p=1,c>a&&(p=c-a),h=o+1,h>a+c&&(h=a+c),f=1;f<=s;f++)fh?l[c][f]=a+1:n.charAt(c-1)===i.charAt(f-1)?l[c][f]=l[c-1][f-1]:l[c][f]=Math.min(l[c-1][f-1]+1,Math.min(l[c][f-1]+1,l[c-1][f]+1)),l[c][f]a)return a+1}return l[o][s]}})()});var Wg=x((iq,Hg)=>{u();var Il="(".charCodeAt(0),Dl=")".charCodeAt(0),xs="'".charCodeAt(0),ql='"'.charCodeAt(0),$l="\\".charCodeAt(0),yr="/".charCodeAt(0),Ll=",".charCodeAt(0),Ml=":".charCodeAt(0),ks="*".charCodeAt(0),$2="u".charCodeAt(0),L2="U".charCodeAt(0),M2="+".charCodeAt(0),N2=/^[a-f0-9?-]+$/i;Hg.exports=function(r){for(var e=[],t=r,i,n,a,s,o,l,c,f,d=0,p=t.charCodeAt(d),h=t.length,b=[{nodes:e}],v=0,y,w="",k="",S="";d{u();Gg.exports=function r(e,t,i){var n,a,s,o;for(n=0,a=e.length;n{u();function Yg(r,e){var t=r.type,i=r.value,n,a;return e&&(a=e(r))!==void 0?a:t==="word"||t==="space"?i:t==="string"?(n=r.quote||"",n+i+(r.unclosed?"":n)):t==="comment"?"/*"+i+(r.unclosed?"":"*/"):t==="div"?(r.before||"")+i+(r.after||""):Array.isArray(r.nodes)?(n=Kg(r.nodes,e),t!=="function"?n:i+"("+(r.before||"")+n+(r.after||"")+(r.unclosed?"":")")):i}function Kg(r,e){var t,i;if(Array.isArray(r)){for(t="",i=r.length-1;~i;i-=1)t=Yg(r[i],e)+t;return t}return Yg(r,e)}Xg.exports=Kg});var ey=x((aq,Zg)=>{u();var Ss="-".charCodeAt(0),As="+".charCodeAt(0),Nl=".".charCodeAt(0),B2="e".charCodeAt(0),F2="E".charCodeAt(0);function j2(r){var e=r.charCodeAt(0),t;if(e===As||e===Ss){if(t=r.charCodeAt(1),t>=48&&t<=57)return!0;var i=r.charCodeAt(2);return t===Nl&&i>=48&&i<=57}return e===Nl?(t=r.charCodeAt(1),t>=48&&t<=57):e>=48&&e<=57}Zg.exports=function(r){var e=0,t=r.length,i,n,a;if(t===0||!j2(r))return!1;for(i=r.charCodeAt(e),(i===As||i===Ss)&&e++;e57));)e+=1;if(i=r.charCodeAt(e),n=r.charCodeAt(e+1),i===Nl&&n>=48&&n<=57)for(e+=2;e57));)e+=1;if(i=r.charCodeAt(e),n=r.charCodeAt(e+1),a=r.charCodeAt(e+2),(i===B2||i===F2)&&(n>=48&&n<=57||(n===As||n===Ss)&&a>=48&&a<=57))for(e+=n===As||n===Ss?3:2;e57));)e+=1;return{number:r.slice(0,e),unit:r.slice(e)}}});var ny=x((oq,iy)=>{u();var z2=Wg(),ty=Qg(),ry=Jg();function $t(r){return this instanceof $t?(this.nodes=z2(r),this):new $t(r)}$t.prototype.toString=function(){return Array.isArray(this.nodes)?ry(this.nodes):""};$t.prototype.walk=function(r,e){return ty(this.nodes,r,e),this};$t.unit=ey();$t.walk=ty;$t.stringify=ry;iy.exports=$t});function Fl(r){return typeof r=="object"&&r!==null}function U2(r,e){let t=kt(e);do if(t.pop(),(0,$i.default)(r,t)!==void 0)break;while(t.length);return t.length?t:void 0}function br(r){return typeof r=="string"?r:r.reduce((e,t,i)=>t.includes(".")?`${e}[${t}]`:i===0?t:`${e}.${t}`,"")}function ay(r){return r.map(e=>`'${e}'`).join(", ")}function oy(r){return ay(Object.keys(r))}function jl(r,e,t,i={}){let n=Array.isArray(e)?br(e):e.replace(/^['"]+|['"]+$/g,""),a=Array.isArray(e)?e:kt(n),s=(0,$i.default)(r.theme,a,t);if(s===void 0){let l=`'${n}' does not exist in your theme config.`,c=a.slice(0,-1),f=(0,$i.default)(r.theme,c);if(Fl(f)){let d=Object.keys(f).filter(h=>jl(r,[...c,h]).isValid),p=(0,sy.default)(a[a.length-1],d);p?l+=` Did you mean '${br([...c,p])}'?`:d.length>0&&(l+=` '${br(c)}' has the following valid keys: ${ay(d)}`)}else{let d=U2(r.theme,n);if(d){let p=(0,$i.default)(r.theme,d);Fl(p)?l+=` '${br(d)}' has the following keys: ${oy(p)}`:l+=` '${br(d)}' is not an object.`}else l+=` Your theme has the following top-level keys: ${oy(r.theme)}`}return{isValid:!1,error:l}}if(!(typeof s=="string"||typeof s=="number"||typeof s=="function"||s instanceof String||s instanceof Number||Array.isArray(s))){let l=`'${n}' was found but does not resolve to a string.`;if(Fl(s)){let c=Object.keys(s).filter(f=>jl(r,[...a,f]).isValid);c.length&&(l+=` Did you mean something like '${br([...a,c[0]])}'?`)}return{isValid:!1,error:l}}let[o]=a;return{isValid:!0,value:mt(o)(s,i)}}function V2(r,e,t){e=e.map(n=>ly(r,n,t));let i=[""];for(let n of e)n.type==="div"&&n.value===","?i.push(""):i[i.length-1]+=Bl.default.stringify(n);return i}function ly(r,e,t){if(e.type==="function"&&t[e.value]!==void 0){let i=V2(r,e.nodes,t);e.type="word",e.value=t[e.value](r,...i)}return e}function H2(r,e,t){return Object.keys(t).some(n=>e.includes(`${n}(`))?(0,Bl.default)(e).walk(n=>{ly(r,n,t)}).toString():e}function*G2(r){r=r.replace(/^['"]+|['"]+$/g,"");let e=r.match(/^([^\s]+)(?![^\[]*\])(?:\s*\/\s*([^\/\s]+))$/),t;yield[r,void 0],e&&(r=e[1],t=e[2],yield[r,t])}function Q2(r,e,t){let i=Array.from(G2(e)).map(([n,a])=>Object.assign(jl(r,n,t,{opacityValue:a}),{resolvedPath:n,alpha:a}));return i.find(n=>n.isValid)??i[0]}function uy(r){let e=r.tailwindConfig,t={theme:(i,n,...a)=>{let{isValid:s,value:o,error:l,alpha:c}=Q2(e,n,a.length?a:void 0);if(!s){let p=i.parent,h=p?.raws.tailwind?.candidate;if(p&&h!==void 0){r.markInvalidUtilityNode(p),p.remove(),G.warn("invalid-theme-key-in-class",[`The utility \`${h}\` contains an invalid theme value and was not generated.`]);return}throw i.error(l)}let f=Xt(o),d=f!==void 0&&typeof f=="function";return(c!==void 0||d)&&(c===void 0&&(c=1),o=Ze(f,c,f)),o},screen:(i,n)=>{n=n.replace(/^['"]+/g,"").replace(/['"]+$/g,"");let s=Rt(e.theme.screens).find(({name:o})=>o===n);if(!s)throw i.error(`The '${n}' screen does not exist in your theme.`);return Tt(s)}};return i=>{i.walk(n=>{let a=W2[n.type];a!==void 0&&(n[a]=H2(n,n[a],t))})}}var $i,sy,Bl,W2,fy=R(()=>{u();$i=pe(Oa()),sy=pe(Vg());Si();Bl=pe(ny());Xn();Qn();Gi();Lr();Fr();Be();W2={atrule:"params",decl:"value"}});function cy({tailwindConfig:{theme:r}}){return function(e){e.walkAtRules("screen",t=>{let i=t.params,a=Rt(r.screens).find(({name:s})=>s===i);if(!a)throw t.error(`No \`${i}\` screen found.`);t.name="media",t.params=Tt(a)})}}var py=R(()=>{u();Xn();Qn()});function Y2(r){let e=r.filter(o=>o.type!=="pseudo"||o.nodes.length>0?!0:o.value.startsWith("::")||[":before",":after",":first-line",":first-letter"].includes(o.value)).reverse(),t=new Set(["tag","class","id","attribute"]),i=e.findIndex(o=>t.has(o.type));if(i===-1)return e.reverse().join("").trim();let n=e[i],a=dy[n.type]?dy[n.type](n):n;e=e.slice(0,i);let s=e.findIndex(o=>o.type==="combinator"&&o.value===">");return s!==-1&&(e.splice(0,s),e.unshift(Cs.default.universal())),[a,...e.reverse()].join("").trim()}function X2(r){return zl.has(r)||zl.set(r,K2.transformSync(r)),zl.get(r)}function Ul({tailwindConfig:r}){return e=>{let t=new Map,i=new Set;if(e.walkAtRules("defaults",n=>{if(n.nodes&&n.nodes.length>0){i.add(n);return}let a=n.params;t.has(a)||t.set(a,new Set),t.get(a).add(n.parent),n.remove()}),we(r,"optimizeUniversalDefaults"))for(let n of i){let a=new Map,s=t.get(n.params)??[];for(let o of s)for(let l of X2(o.selector)){let c=l.includes(":-")||l.includes("::-")||l.includes(":has")?l:"__DEFAULT__",f=a.get(c)??new Set;a.set(c,f),f.add(l)}if(a.size===0){n.remove();continue}for(let[,o]of a){let l=ee.rule({source:n.source});l.selectors=[...o],l.append(n.nodes.map(c=>c.clone())),n.before(l)}n.remove()}else if(i.size){let n=ee.rule({selectors:["*","::before","::after"]});for(let s of i)n.append(s.nodes),n.parent||s.before(n),n.source||(n.source=s.source),s.remove();let a=n.clone({selectors:["::backdrop"]});n.after(a)}}}var Cs,dy,K2,zl,hy=R(()=>{u();Ot();Cs=pe(it());ct();dy={id(r){return Cs.default.attribute({attribute:"id",operator:"=",value:r.value,quoteMark:'"'})}};K2=(0,Cs.default)(r=>r.map(e=>{let t=e.split(i=>i.type==="combinator"&&i.value===" ").pop();return Y2(t)})),zl=new Map});function Vl(){function r(e){let t=null;e.each(i=>{if(!J2.has(i.type)){t=null;return}if(t===null){t=i;return}let n=my[i.type];i.type==="atrule"&&i.name==="font-face"?t=i:n.every(a=>(i[a]??"").replace(/\s+/g," ")===(t[a]??"").replace(/\s+/g," "))?(i.nodes&&t.append(i.nodes),i.remove()):t=i}),e.each(i=>{i.type==="atrule"&&r(i)})}return e=>{r(e)}}var my,J2,gy=R(()=>{u();my={atrule:["name","params"],rule:["selector"]},J2=new Set(Object.keys(my))});function Hl(){return r=>{r.walkRules(e=>{let t=new Map,i=new Set([]),n=new Map;e.walkDecls(a=>{if(a.parent===e){if(t.has(a.prop)){if(t.get(a.prop).value===a.value){i.add(t.get(a.prop)),t.set(a.prop,a);return}n.has(a.prop)||n.set(a.prop,new Set),n.get(a.prop).add(t.get(a.prop)),n.get(a.prop).add(a)}t.set(a.prop,a)}});for(let a of i)a.remove();for(let a of n.values()){let s=new Map;for(let o of a){let l=eO(o.value);l!==null&&(s.has(l)||s.set(l,new Set),s.get(l).add(o))}for(let o of s.values()){let l=Array.from(o).slice(0,-1);for(let c of l)c.remove()}}})}}function eO(r){let e=/^-?\d*.?\d+([\w%]+)?$/g.exec(r);return e?e[1]??Z2:null}var Z2,yy=R(()=>{u();Z2=Symbol("unitless-number")});function tO(r){if(!r.walkAtRules)return;let e=new Set;if(r.walkAtRules("apply",t=>{e.add(t.parent)}),e.size!==0)for(let t of e){let i=[],n=[];for(let a of t.nodes)a.type==="atrule"&&a.name==="apply"?(n.length>0&&(i.push(n),n=[]),i.push([a])):n.push(a);if(n.length>0&&i.push(n),i.length!==1){for(let a of[...i].reverse()){let s=t.clone({nodes:[]});s.append(a),t.after(s)}t.remove()}}}function _s(){return r=>{tO(r)}}var by=R(()=>{u()});function Es(r){return async function(e,t){let{tailwindDirectives:i,applyDirectives:n}=_l(e);_s()(e,t);let a=r({tailwindDirectives:i,applyDirectives:n,registerDependency(s){t.messages.push({plugin:"tailwindcss",parent:t.opts.from,...s})},createContext(s,o){return tl(s,o,e)}})(e,t);if(a.tailwindConfig.separator==="-")throw new Error("The '-' character cannot be used as a custom separator in JIT mode due to parsing ambiguity. Please use another character like '_' instead.");Of(a.tailwindConfig),await Tl(a)(e,t),_s()(e,t),Pl(a)(e,t),uy(a)(e,t),cy(a)(e,t),Ul(a)(e,t),Vl(a)(e,t),Hl(a)(e,t)}}var wy=R(()=>{u();_g();Mg();Ug();fy();py();hy();gy();yy();by();_i();ct()});function vy(r,e){let t=null,i=null;return r.walkAtRules("config",n=>{if(i=n.source?.input.file??e.opts.from??null,i===null)throw n.error("The `@config` directive cannot be used without setting `from` in your PostCSS config.");if(t)throw n.error("Only one `@config` directive is allowed per file.");let a=n.params.match(/(['"])(.*?)\1/);if(!a)throw n.error("A path is required when using the `@config` directive.");let s=a[2];if(me.isAbsolute(s))throw n.error("The `@config` directive cannot be used with an absolute path.");if(t=me.resolve(me.dirname(i),s),!be.existsSync(t))throw n.error(`The config file at "${s}" does not exist. Make sure the path is correct and the file exists.`);n.remove()}),t||null}var xy=R(()=>{u();ft();et()});var ky=x((Vq,Wl)=>{u();Cg();wy();It();xy();Wl.exports=function(e){return{postcssPlugin:"tailwindcss",plugins:[Je.DEBUG&&function(t){return console.log(` -`),console.time("JIT TOTAL"),t},async function(t,i){e=vy(t,i)??e;let n=Cl(e);if(t.type==="document"){let a=t.nodes.filter(s=>s.type==="root");for(let s of a)s.type==="root"&&await Es(n)(s,i);return}await Es(n)(t,i)},Je.DEBUG&&function(t){return console.timeEnd("JIT TOTAL"),console.log(` -`),t}].filter(Boolean)}};Wl.exports.postcss=!0});var Ay=x((Hq,Sy)=>{u();Sy.exports=ky()});var Gl=x((Wq,Cy)=>{u();Cy.exports=()=>["and_chr 114","and_uc 15.5","chrome 114","chrome 113","chrome 109","edge 114","firefox 114","ios_saf 16.5","ios_saf 16.4","ios_saf 16.3","ios_saf 16.1","opera 99","safari 16.5","samsung 21"]});var Os={};Ge(Os,{agents:()=>rO,feature:()=>iO});function iO(){return{status:"cr",title:"CSS Feature Queries",stats:{ie:{"6":"n","7":"n","8":"n","9":"n","10":"n","11":"n","5.5":"n"},edge:{"12":"y","13":"y","14":"y","15":"y","16":"y","17":"y","18":"y","79":"y","80":"y","81":"y","83":"y","84":"y","85":"y","86":"y","87":"y","88":"y","89":"y","90":"y","91":"y","92":"y","93":"y","94":"y","95":"y","96":"y","97":"y","98":"y","99":"y","100":"y","101":"y","102":"y","103":"y","104":"y","105":"y","106":"y","107":"y","108":"y","109":"y","110":"y","111":"y","112":"y","113":"y","114":"y"},firefox:{"2":"n","3":"n","4":"n","5":"n","6":"n","7":"n","8":"n","9":"n","10":"n","11":"n","12":"n","13":"n","14":"n","15":"n","16":"n","17":"n","18":"n","19":"n","20":"n","21":"n","22":"y","23":"y","24":"y","25":"y","26":"y","27":"y","28":"y","29":"y","30":"y","31":"y","32":"y","33":"y","34":"y","35":"y","36":"y","37":"y","38":"y","39":"y","40":"y","41":"y","42":"y","43":"y","44":"y","45":"y","46":"y","47":"y","48":"y","49":"y","50":"y","51":"y","52":"y","53":"y","54":"y","55":"y","56":"y","57":"y","58":"y","59":"y","60":"y","61":"y","62":"y","63":"y","64":"y","65":"y","66":"y","67":"y","68":"y","69":"y","70":"y","71":"y","72":"y","73":"y","74":"y","75":"y","76":"y","77":"y","78":"y","79":"y","80":"y","81":"y","82":"y","83":"y","84":"y","85":"y","86":"y","87":"y","88":"y","89":"y","90":"y","91":"y","92":"y","93":"y","94":"y","95":"y","96":"y","97":"y","98":"y","99":"y","100":"y","101":"y","102":"y","103":"y","104":"y","105":"y","106":"y","107":"y","108":"y","109":"y","110":"y","111":"y","112":"y","113":"y","114":"y","115":"y","116":"y","117":"y","3.5":"n","3.6":"n"},chrome:{"4":"n","5":"n","6":"n","7":"n","8":"n","9":"n","10":"n","11":"n","12":"n","13":"n","14":"n","15":"n","16":"n","17":"n","18":"n","19":"n","20":"n","21":"n","22":"n","23":"n","24":"n","25":"n","26":"n","27":"n","28":"y","29":"y","30":"y","31":"y","32":"y","33":"y","34":"y","35":"y","36":"y","37":"y","38":"y","39":"y","40":"y","41":"y","42":"y","43":"y","44":"y","45":"y","46":"y","47":"y","48":"y","49":"y","50":"y","51":"y","52":"y","53":"y","54":"y","55":"y","56":"y","57":"y","58":"y","59":"y","60":"y","61":"y","62":"y","63":"y","64":"y","65":"y","66":"y","67":"y","68":"y","69":"y","70":"y","71":"y","72":"y","73":"y","74":"y","75":"y","76":"y","77":"y","78":"y","79":"y","80":"y","81":"y","83":"y","84":"y","85":"y","86":"y","87":"y","88":"y","89":"y","90":"y","91":"y","92":"y","93":"y","94":"y","95":"y","96":"y","97":"y","98":"y","99":"y","100":"y","101":"y","102":"y","103":"y","104":"y","105":"y","106":"y","107":"y","108":"y","109":"y","110":"y","111":"y","112":"y","113":"y","114":"y","115":"y","116":"y","117":"y"},safari:{"4":"n","5":"n","6":"n","7":"n","8":"n","9":"y","10":"y","11":"y","12":"y","13":"y","14":"y","15":"y","17":"y","9.1":"y","10.1":"y","11.1":"y","12.1":"y","13.1":"y","14.1":"y","15.1":"y","15.2-15.3":"y","15.4":"y","15.5":"y","15.6":"y","16.0":"y","16.1":"y","16.2":"y","16.3":"y","16.4":"y","16.5":"y","16.6":"y",TP:"y","3.1":"n","3.2":"n","5.1":"n","6.1":"n","7.1":"n"},opera:{"9":"n","11":"n","12":"n","15":"y","16":"y","17":"y","18":"y","19":"y","20":"y","21":"y","22":"y","23":"y","24":"y","25":"y","26":"y","27":"y","28":"y","29":"y","30":"y","31":"y","32":"y","33":"y","34":"y","35":"y","36":"y","37":"y","38":"y","39":"y","40":"y","41":"y","42":"y","43":"y","44":"y","45":"y","46":"y","47":"y","48":"y","49":"y","50":"y","51":"y","52":"y","53":"y","54":"y","55":"y","56":"y","57":"y","58":"y","60":"y","62":"y","63":"y","64":"y","65":"y","66":"y","67":"y","68":"y","69":"y","70":"y","71":"y","72":"y","73":"y","74":"y","75":"y","76":"y","77":"y","78":"y","79":"y","80":"y","81":"y","82":"y","83":"y","84":"y","85":"y","86":"y","87":"y","88":"y","89":"y","90":"y","91":"y","92":"y","93":"y","94":"y","95":"y","96":"y","97":"y","98":"y","99":"y","100":"y","12.1":"y","9.5-9.6":"n","10.0-10.1":"n","10.5":"n","10.6":"n","11.1":"n","11.5":"n","11.6":"n"},ios_saf:{"8":"n","17":"y","9.0-9.2":"y","9.3":"y","10.0-10.2":"y","10.3":"y","11.0-11.2":"y","11.3-11.4":"y","12.0-12.1":"y","12.2-12.5":"y","13.0-13.1":"y","13.2":"y","13.3":"y","13.4-13.7":"y","14.0-14.4":"y","14.5-14.8":"y","15.0-15.1":"y","15.2-15.3":"y","15.4":"y","15.5":"y","15.6":"y","16.0":"y","16.1":"y","16.2":"y","16.3":"y","16.4":"y","16.5":"y","16.6":"y","3.2":"n","4.0-4.1":"n","4.2-4.3":"n","5.0-5.1":"n","6.0-6.1":"n","7.0-7.1":"n","8.1-8.4":"n"},op_mini:{all:"y"},android:{"3":"n","4":"n","114":"y","4.4":"y","4.4.3-4.4.4":"y","2.1":"n","2.2":"n","2.3":"n","4.1":"n","4.2-4.3":"n"},bb:{"7":"n","10":"n"},op_mob:{"10":"n","11":"n","12":"n","73":"y","11.1":"n","11.5":"n","12.1":"n"},and_chr:{"114":"y"},and_ff:{"115":"y"},ie_mob:{"10":"n","11":"n"},and_uc:{"15.5":"y"},samsung:{"4":"y","20":"y","21":"y","5.0-5.4":"y","6.2-6.4":"y","7.2-7.4":"y","8.2":"y","9.2":"y","10.1":"y","11.1-11.2":"y","12.0":"y","13.0":"y","14.0":"y","15.0":"y","16.0":"y","17.0":"y","18.0":"y","19.0":"y"},and_qq:{"13.1":"y"},baidu:{"13.18":"y"},kaios:{"2.5":"y","3.0-3.1":"y"}}}}var rO,Ts=R(()=>{u();rO={ie:{prefix:"ms"},edge:{prefix:"webkit",prefix_exceptions:{"12":"ms","13":"ms","14":"ms","15":"ms","16":"ms","17":"ms","18":"ms"}},firefox:{prefix:"moz"},chrome:{prefix:"webkit"},safari:{prefix:"webkit"},opera:{prefix:"webkit",prefix_exceptions:{"9":"o","11":"o","12":"o","9.5-9.6":"o","10.0-10.1":"o","10.5":"o","10.6":"o","11.1":"o","11.5":"o","11.6":"o","12.1":"o"}},ios_saf:{prefix:"webkit"},op_mini:{prefix:"o"},android:{prefix:"webkit"},bb:{prefix:"webkit"},op_mob:{prefix:"o",prefix_exceptions:{"73":"webkit"}},and_chr:{prefix:"webkit"},and_ff:{prefix:"moz"},ie_mob:{prefix:"ms"},and_uc:{prefix:"webkit",prefix_exceptions:{"15.5":"webkit"}},samsung:{prefix:"webkit"},and_qq:{prefix:"webkit"},baidu:{prefix:"webkit"},kaios:{prefix:"moz"}}});var _y=x(()=>{u()});var _e=x((Yq,Lt)=>{u();var{list:Ql}=$e();Lt.exports.error=function(r){let e=new Error(r);throw e.autoprefixer=!0,e};Lt.exports.uniq=function(r){return[...new Set(r)]};Lt.exports.removeNote=function(r){return r.includes(" ")?r.split(" ")[0]:r};Lt.exports.escapeRegexp=function(r){return r.replace(/[$()*+-.?[\\\]^{|}]/g,"\\$&")};Lt.exports.regexp=function(r,e=!0){return e&&(r=this.escapeRegexp(r)),new RegExp(`(^|[\\s,(])(${r}($|[\\s(,]))`,"gi")};Lt.exports.editList=function(r,e){let t=Ql.comma(r),i=e(t,[]);if(t===i)return r;let n=r.match(/,\s*/);return n=n?n[0]:", ",i.join(n)};Lt.exports.splitSelector=function(r){return Ql.comma(r).map(e=>Ql.space(e).map(t=>t.split(/(?=\.|#)/g)))}});var Mt=x((Kq,Ty)=>{u();var nO=Gl(),Ey=(Ts(),Os).agents,sO=_e(),Oy=class{static prefixes(){if(this.prefixesCache)return this.prefixesCache;this.prefixesCache=[];for(let e in Ey)this.prefixesCache.push(`-${Ey[e].prefix}-`);return this.prefixesCache=sO.uniq(this.prefixesCache).sort((e,t)=>t.length-e.length),this.prefixesCache}static withPrefix(e){return this.prefixesRegexp||(this.prefixesRegexp=new RegExp(this.prefixes().join("|"))),this.prefixesRegexp.test(e)}constructor(e,t,i,n){this.data=e,this.options=i||{},this.browserslistOpts=n||{},this.selected=this.parse(t)}parse(e){let t={};for(let i in this.browserslistOpts)t[i]=this.browserslistOpts[i];return t.path=this.options.from,nO(e,t)}prefix(e){let[t,i]=e.split(" "),n=this.data[t],a=n.prefix_exceptions&&n.prefix_exceptions[i];return a||(a=n.prefix),`-${a}-`}isSelected(e){return this.selected.includes(e)}};Ty.exports=Oy});var Li=x((Xq,Ry)=>{u();Ry.exports={prefix(r){let e=r.match(/^(-\w+-)/);return e?e[0]:""},unprefixed(r){return r.replace(/^-\w+-/,"")}}});var wr=x((Jq,Iy)=>{u();var aO=Mt(),Py=Li(),oO=_e();function Yl(r,e){let t=new r.constructor;for(let i of Object.keys(r||{})){let n=r[i];i==="parent"&&typeof n=="object"?e&&(t[i]=e):i==="source"||i===null?t[i]=n:Array.isArray(n)?t[i]=n.map(a=>Yl(a,t)):i!=="_autoprefixerPrefix"&&i!=="_autoprefixerValues"&&i!=="proxyCache"&&(typeof n=="object"&&n!==null&&(n=Yl(n,t)),t[i]=n)}return t}var Rs=class{static hack(e){return this.hacks||(this.hacks={}),e.names.map(t=>(this.hacks[t]=e,this.hacks[t]))}static load(e,t,i){let n=this.hacks&&this.hacks[e];return n?new n(e,t,i):new this(e,t,i)}static clone(e,t){let i=Yl(e);for(let n in t)i[n]=t[n];return i}constructor(e,t,i){this.prefixes=t,this.name=e,this.all=i}parentPrefix(e){let t;return typeof e._autoprefixerPrefix!="undefined"?t=e._autoprefixerPrefix:e.type==="decl"&&e.prop[0]==="-"?t=Py.prefix(e.prop):e.type==="root"?t=!1:e.type==="rule"&&e.selector.includes(":-")&&/:(-\w+-)/.test(e.selector)?t=e.selector.match(/:(-\w+-)/)[1]:e.type==="atrule"&&e.name[0]==="-"?t=Py.prefix(e.name):t=this.parentPrefix(e.parent),aO.prefixes().includes(t)||(t=!1),e._autoprefixerPrefix=t,e._autoprefixerPrefix}process(e,t){if(!this.check(e))return;let i=this.parentPrefix(e),n=this.prefixes.filter(s=>!i||i===oO.removeNote(s)),a=[];for(let s of n)this.add(e,s,a.concat([s]),t)&&a.push(s);return a}clone(e,t){return Rs.clone(e,t)}};Iy.exports=Rs});var j=x((Zq,$y)=>{u();var lO=wr(),uO=Mt(),Dy=_e(),qy=class extends lO{check(){return!0}prefixed(e,t){return t+e}normalize(e){return e}otherPrefixes(e,t){for(let i of uO.prefixes())if(i!==t&&e.includes(i))return!0;return!1}set(e,t){return e.prop=this.prefixed(e.prop,t),e}needCascade(e){return e._autoprefixerCascade||(e._autoprefixerCascade=this.all.options.cascade!==!1&&e.raw("before").includes(` -`)),e._autoprefixerCascade}maxPrefixed(e,t){if(t._autoprefixerMax)return t._autoprefixerMax;let i=0;for(let n of e)n=Dy.removeNote(n),n.length>i&&(i=n.length);return t._autoprefixerMax=i,t._autoprefixerMax}calcBefore(e,t,i=""){let a=this.maxPrefixed(e,t)-Dy.removeNote(i).length,s=t.raw("before");return a>0&&(s+=Array(a).fill(" ").join("")),s}restoreBefore(e){let t=e.raw("before").split(` -`),i=t[t.length-1];this.all.group(e).up(n=>{let a=n.raw("before").split(` -`),s=a[a.length-1];s.lengths.prop===n.prop&&s.value===n.value)))return this.needCascade(e)&&(n.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,n)}isAlready(e,t){let i=this.all.group(e).up(n=>n.prop===t);return i||(i=this.all.group(e).down(n=>n.prop===t)),i}add(e,t,i,n){let a=this.prefixed(e.prop,t);if(!(this.isAlready(e,a)||this.otherPrefixes(e.value,t)))return this.insert(e,t,i,n)}process(e,t){if(!this.needCascade(e)){super.process(e,t);return}let i=super.process(e,t);!i||!i.length||(this.restoreBefore(e),e.raws.before=this.calcBefore(i,e))}old(e,t){return[this.prefixed(e,t)]}};$y.exports=qy});var My=x((e$,Ly)=>{u();Ly.exports=function r(e){return{mul:t=>new r(e*t),div:t=>new r(e/t),simplify:()=>new r(e),toString:()=>e.toString()}}});var Fy=x((t$,By)=>{u();var fO=My(),cO=wr(),Kl=_e(),pO=/(min|max)-resolution\s*:\s*\d*\.?\d+(dppx|dpcm|dpi|x)/gi,dO=/(min|max)-resolution(\s*:\s*)(\d*\.?\d+)(dppx|dpcm|dpi|x)/i,Ny=class extends cO{prefixName(e,t){return e==="-moz-"?t+"--moz-device-pixel-ratio":e+t+"-device-pixel-ratio"}prefixQuery(e,t,i,n,a){return n=new fO(n),a==="dpi"?n=n.div(96):a==="dpcm"&&(n=n.mul(2.54).div(96)),n=n.simplify(),e==="-o-"&&(n=n.n+"/"+n.d),this.prefixName(e,t)+i+n}clean(e){if(!this.bad){this.bad=[];for(let t of this.prefixes)this.bad.push(this.prefixName(t,"min")),this.bad.push(this.prefixName(t,"max"))}e.params=Kl.editList(e.params,t=>t.filter(i=>this.bad.every(n=>!i.includes(n))))}process(e){let t=this.parentPrefix(e),i=t?[t]:this.prefixes;e.params=Kl.editList(e.params,(n,a)=>{for(let s of n){if(!s.includes("min-resolution")&&!s.includes("max-resolution")){a.push(s);continue}for(let o of i){let l=s.replace(pO,c=>{let f=c.match(dO);return this.prefixQuery(o,f[1],f[2],f[3],f[4])});a.push(l)}a.push(s)}return Kl.uniq(a)})}};By.exports=Ny});var zy=x((r$,jy)=>{u();var Xl="(".charCodeAt(0),Jl=")".charCodeAt(0),Ps="'".charCodeAt(0),Zl='"'.charCodeAt(0),eu="\\".charCodeAt(0),vr="/".charCodeAt(0),tu=",".charCodeAt(0),ru=":".charCodeAt(0),Is="*".charCodeAt(0),hO="u".charCodeAt(0),mO="U".charCodeAt(0),gO="+".charCodeAt(0),yO=/^[a-f0-9?-]+$/i;jy.exports=function(r){for(var e=[],t=r,i,n,a,s,o,l,c,f,d=0,p=t.charCodeAt(d),h=t.length,b=[{nodes:e}],v=0,y,w="",k="",S="";d{u();Uy.exports=function r(e,t,i){var n,a,s,o;for(n=0,a=e.length;n{u();function Hy(r,e){var t=r.type,i=r.value,n,a;return e&&(a=e(r))!==void 0?a:t==="word"||t==="space"?i:t==="string"?(n=r.quote||"",n+i+(r.unclosed?"":n)):t==="comment"?"/*"+i+(r.unclosed?"":"*/"):t==="div"?(r.before||"")+i+(r.after||""):Array.isArray(r.nodes)?(n=Wy(r.nodes,e),t!=="function"?n:i+"("+(r.before||"")+n+(r.after||"")+(r.unclosed?"":")")):i}function Wy(r,e){var t,i;if(Array.isArray(r)){for(t="",i=r.length-1;~i;i-=1)t=Hy(r[i],e)+t;return t}return Hy(r,e)}Gy.exports=Wy});var Ky=x((s$,Yy)=>{u();var Ds="-".charCodeAt(0),qs="+".charCodeAt(0),iu=".".charCodeAt(0),bO="e".charCodeAt(0),wO="E".charCodeAt(0);function vO(r){var e=r.charCodeAt(0),t;if(e===qs||e===Ds){if(t=r.charCodeAt(1),t>=48&&t<=57)return!0;var i=r.charCodeAt(2);return t===iu&&i>=48&&i<=57}return e===iu?(t=r.charCodeAt(1),t>=48&&t<=57):e>=48&&e<=57}Yy.exports=function(r){var e=0,t=r.length,i,n,a;if(t===0||!vO(r))return!1;for(i=r.charCodeAt(e),(i===qs||i===Ds)&&e++;e57));)e+=1;if(i=r.charCodeAt(e),n=r.charCodeAt(e+1),i===iu&&n>=48&&n<=57)for(e+=2;e57));)e+=1;if(i=r.charCodeAt(e),n=r.charCodeAt(e+1),a=r.charCodeAt(e+2),(i===bO||i===wO)&&(n>=48&&n<=57||(n===qs||n===Ds)&&a>=48&&a<=57))for(e+=n===qs||n===Ds?3:2;e57));)e+=1;return{number:r.slice(0,e),unit:r.slice(e)}}});var $s=x((a$,Zy)=>{u();var xO=zy(),Xy=Vy(),Jy=Qy();function Nt(r){return this instanceof Nt?(this.nodes=xO(r),this):new Nt(r)}Nt.prototype.toString=function(){return Array.isArray(this.nodes)?Jy(this.nodes):""};Nt.prototype.walk=function(r,e){return Xy(this.nodes,r,e),this};Nt.unit=Ky();Nt.walk=Xy;Nt.stringify=Jy;Zy.exports=Nt});var nb=x((o$,ib)=>{u();var{list:kO}=$e(),eb=$s(),SO=Mt(),tb=Li(),rb=class{constructor(e){this.props=["transition","transition-property"],this.prefixes=e}add(e,t){let i,n,a=this.prefixes.add[e.prop],s=this.ruleVendorPrefixes(e),o=s||a&&a.prefixes||[],l=this.parse(e.value),c=l.map(h=>this.findProp(h)),f=[];if(c.some(h=>h[0]==="-"))return;for(let h of l){if(n=this.findProp(h),n[0]==="-")continue;let b=this.prefixes.add[n];if(!(!b||!b.prefixes))for(i of b.prefixes){if(s&&!s.some(y=>i.includes(y)))continue;let v=this.prefixes.prefixed(n,i);v!=="-ms-transform"&&!c.includes(v)&&(this.disabled(n,i)||f.push(this.clone(n,v,h)))}}l=l.concat(f);let d=this.stringify(l),p=this.stringify(this.cleanFromUnprefixed(l,"-webkit-"));if(o.includes("-webkit-")&&this.cloneBefore(e,`-webkit-${e.prop}`,p),this.cloneBefore(e,e.prop,p),o.includes("-o-")){let h=this.stringify(this.cleanFromUnprefixed(l,"-o-"));this.cloneBefore(e,`-o-${e.prop}`,h)}for(i of o)if(i!=="-webkit-"&&i!=="-o-"){let h=this.stringify(this.cleanOtherPrefixes(l,i));this.cloneBefore(e,i+e.prop,h)}d!==e.value&&!this.already(e,e.prop,d)&&(this.checkForWarning(t,e),e.cloneBefore(),e.value=d)}findProp(e){let t=e[0].value;if(/^\d/.test(t)){for(let[i,n]of e.entries())if(i!==0&&n.type==="word")return n.value}return t}already(e,t,i){return e.parent.some(n=>n.prop===t&&n.value===i)}cloneBefore(e,t,i){this.already(e,t,i)||e.cloneBefore({prop:t,value:i})}checkForWarning(e,t){if(t.prop!=="transition-property")return;let i=!1,n=!1;t.parent.each(a=>{if(a.type!=="decl"||a.prop.indexOf("transition-")!==0)return;let s=kO.comma(a.value);if(a.prop==="transition-property"){s.forEach(o=>{let l=this.prefixes.add[o];l&&l.prefixes&&l.prefixes.length>0&&(i=!0)});return}return n=n||s.length>1,!1}),i&&n&&t.warn(e,"Replace transition-property to transition, because Autoprefixer could not support any cases of transition-property and other transition-*")}remove(e){let t=this.parse(e.value);t=t.filter(s=>{let o=this.prefixes.remove[this.findProp(s)];return!o||!o.remove});let i=this.stringify(t);if(e.value===i)return;if(t.length===0){e.remove();return}let n=e.parent.some(s=>s.prop===e.prop&&s.value===i),a=e.parent.some(s=>s!==e&&s.prop===e.prop&&s.value.length>i.length);if(n||a){e.remove();return}e.value=i}parse(e){let t=eb(e),i=[],n=[];for(let a of t.nodes)n.push(a),a.type==="div"&&a.value===","&&(i.push(n),n=[]);return i.push(n),i.filter(a=>a.length>0)}stringify(e){if(e.length===0)return"";let t=[];for(let i of e)i[i.length-1].type!=="div"&&i.push(this.div(e)),t=t.concat(i);return t[0].type==="div"&&(t=t.slice(1)),t[t.length-1].type==="div"&&(t=t.slice(0,-2+1||void 0)),eb.stringify({nodes:t})}clone(e,t,i){let n=[],a=!1;for(let s of i)!a&&s.type==="word"&&s.value===e?(n.push({type:"word",value:t}),a=!0):n.push(s);return n}div(e){for(let t of e)for(let i of t)if(i.type==="div"&&i.value===",")return i;return{type:"div",value:",",after:" "}}cleanOtherPrefixes(e,t){return e.filter(i=>{let n=tb.prefix(this.findProp(i));return n===""||n===t})}cleanFromUnprefixed(e,t){let i=e.map(a=>this.findProp(a)).filter(a=>a.slice(0,t.length)===t).map(a=>this.prefixes.unprefixed(a)),n=[];for(let a of e){let s=this.findProp(a),o=tb.prefix(s);!i.includes(s)&&(o===t||o==="")&&n.push(a)}return n}disabled(e,t){let i=["order","justify-content","align-self","align-content"];if(e.includes("flex")||i.includes(e)){if(this.prefixes.options.flexbox===!1)return!0;if(this.prefixes.options.flexbox==="no-2009")return t.includes("2009")}}ruleVendorPrefixes(e){let{parent:t}=e;if(t.type!=="rule")return!1;if(!t.selector.includes(":-"))return!1;let i=SO.prefixes().filter(n=>t.selector.includes(":"+n));return i.length>0?i:!1}};ib.exports=rb});var xr=x((l$,ab)=>{u();var AO=_e(),sb=class{constructor(e,t,i,n){this.unprefixed=e,this.prefixed=t,this.string=i||t,this.regexp=n||AO.regexp(t)}check(e){return e.includes(this.string)?!!e.match(this.regexp):!1}};ab.exports=sb});var He=x((u$,lb)=>{u();var CO=wr(),_O=xr(),EO=Li(),OO=_e(),ob=class extends CO{static save(e,t){let i=t.prop,n=[];for(let a in t._autoprefixerValues){let s=t._autoprefixerValues[a];if(s===t.value)continue;let o,l=EO.prefix(i);if(l==="-pie-")continue;if(l===a){o=t.value=s,n.push(o);continue}let c=e.prefixed(i,a),f=t.parent;if(!f.every(b=>b.prop!==c)){n.push(o);continue}let d=s.replace(/\s+/," ");if(f.some(b=>b.prop===t.prop&&b.value.replace(/\s+/," ")===d)){n.push(o);continue}let h=this.clone(t,{value:s});o=t.parent.insertBefore(t,h),n.push(o)}return n}check(e){let t=e.value;return t.includes(this.name)?!!t.match(this.regexp()):!1}regexp(){return this.regexpCache||(this.regexpCache=OO.regexp(this.name))}replace(e,t){return e.replace(this.regexp(),`$1${t}$2`)}value(e){return e.raws.value&&e.raws.value.value===e.value?e.raws.value.raw:e.value}add(e,t){e._autoprefixerValues||(e._autoprefixerValues={});let i=e._autoprefixerValues[t]||this.value(e),n;do if(n=i,i=this.replace(i,t),i===!1)return;while(i!==n);e._autoprefixerValues[t]=i}old(e){return new _O(this.name,e+this.name)}};lb.exports=ob});var Bt=x((f$,ub)=>{u();ub.exports={}});var su=x((c$,pb)=>{u();var fb=$s(),TO=He(),RO=Bt().insertAreas,PO=/(^|[^-])linear-gradient\(\s*(top|left|right|bottom)/i,IO=/(^|[^-])radial-gradient\(\s*\d+(\w*|%)\s+\d+(\w*|%)\s*,/i,DO=/(!\s*)?autoprefixer:\s*ignore\s+next/i,qO=/(!\s*)?autoprefixer\s*grid:\s*(on|off|(no-)?autoplace)/i,$O=["width","height","min-width","max-width","min-height","max-height","inline-size","min-inline-size","max-inline-size","block-size","min-block-size","max-block-size"];function nu(r){return r.parent.some(e=>e.prop==="grid-template"||e.prop==="grid-template-areas")}function LO(r){let e=r.parent.some(i=>i.prop==="grid-template-rows"),t=r.parent.some(i=>i.prop==="grid-template-columns");return e&&t}var cb=class{constructor(e){this.prefixes=e}add(e,t){let i=this.prefixes.add["@resolution"],n=this.prefixes.add["@keyframes"],a=this.prefixes.add["@viewport"],s=this.prefixes.add["@supports"];e.walkAtRules(f=>{if(f.name==="keyframes"){if(!this.disabled(f,t))return n&&n.process(f)}else if(f.name==="viewport"){if(!this.disabled(f,t))return a&&a.process(f)}else if(f.name==="supports"){if(this.prefixes.options.supports!==!1&&!this.disabled(f,t))return s.process(f)}else if(f.name==="media"&&f.params.includes("-resolution")&&!this.disabled(f,t))return i&&i.process(f)}),e.walkRules(f=>{if(!this.disabled(f,t))return this.prefixes.add.selectors.map(d=>d.process(f,t))});function o(f){return f.parent.nodes.some(d=>{if(d.type!=="decl")return!1;let p=d.prop==="display"&&/(inline-)?grid/.test(d.value),h=d.prop.startsWith("grid-template"),b=/^grid-([A-z]+-)?gap/.test(d.prop);return p||h||b})}function l(f){return f.parent.some(d=>d.prop==="display"&&/(inline-)?flex/.test(d.value))}let c=this.gridStatus(e,t)&&this.prefixes.add["grid-area"]&&this.prefixes.add["grid-area"].prefixes;return e.walkDecls(f=>{if(this.disabledDecl(f,t))return;let d=f.parent,p=f.prop,h=f.value;if(p==="grid-row-span"){t.warn("grid-row-span is not part of final Grid Layout. Use grid-row.",{node:f});return}else if(p==="grid-column-span"){t.warn("grid-column-span is not part of final Grid Layout. Use grid-column.",{node:f});return}else if(p==="display"&&h==="box"){t.warn("You should write display: flex by final spec instead of display: box",{node:f});return}else if(p==="text-emphasis-position")(h==="under"||h==="over")&&t.warn("You should use 2 values for text-emphasis-position For example, `under left` instead of just `under`.",{node:f});else if(/^(align|justify|place)-(items|content)$/.test(p)&&l(f))(h==="start"||h==="end")&&t.warn(`${h} value has mixed support, consider using flex-${h} instead`,{node:f});else if(p==="text-decoration-skip"&&h==="ink")t.warn("Replace text-decoration-skip: ink to text-decoration-skip-ink: auto, because spec had been changed",{node:f});else{if(c&&this.gridStatus(f,t))if(f.value==="subgrid"&&t.warn("IE does not support subgrid",{node:f}),/^(align|justify|place)-items$/.test(p)&&o(f)){let v=p.replace("-items","-self");t.warn(`IE does not support ${p} on grid containers. Try using ${v} on child elements instead: ${f.parent.selector} > * { ${v}: ${f.value} }`,{node:f})}else if(/^(align|justify|place)-content$/.test(p)&&o(f))t.warn(`IE does not support ${f.prop} on grid containers`,{node:f});else if(p==="display"&&f.value==="contents"){t.warn("Please do not use display: contents; if you have grid setting enabled",{node:f});return}else if(f.prop==="grid-gap"){let v=this.gridStatus(f,t);v==="autoplace"&&!LO(f)&&!nu(f)?t.warn("grid-gap only works if grid-template(-areas) is being used or both rows and columns have been declared and cells have not been manually placed inside the explicit grid",{node:f}):(v===!0||v==="no-autoplace")&&!nu(f)&&t.warn("grid-gap only works if grid-template(-areas) is being used",{node:f})}else if(p==="grid-auto-columns"){t.warn("grid-auto-columns is not supported by IE",{node:f});return}else if(p==="grid-auto-rows"){t.warn("grid-auto-rows is not supported by IE",{node:f});return}else if(p==="grid-auto-flow"){let v=d.some(w=>w.prop==="grid-template-rows"),y=d.some(w=>w.prop==="grid-template-columns");nu(f)?t.warn("grid-auto-flow is not supported by IE",{node:f}):h.includes("dense")?t.warn("grid-auto-flow: dense is not supported by IE",{node:f}):!v&&!y&&t.warn("grid-auto-flow works only if grid-template-rows and grid-template-columns are present in the same rule",{node:f});return}else if(h.includes("auto-fit")){t.warn("auto-fit value is not supported by IE",{node:f,word:"auto-fit"});return}else if(h.includes("auto-fill")){t.warn("auto-fill value is not supported by IE",{node:f,word:"auto-fill"});return}else p.startsWith("grid-template")&&h.includes("[")&&t.warn("Autoprefixer currently does not support line names. Try using grid-template-areas instead.",{node:f,word:"["});if(h.includes("radial-gradient"))if(IO.test(f.value))t.warn("Gradient has outdated direction syntax. New syntax is like `closest-side at 0 0` instead of `0 0, closest-side`.",{node:f});else{let v=fb(h);for(let y of v.nodes)if(y.type==="function"&&y.value==="radial-gradient")for(let w of y.nodes)w.type==="word"&&(w.value==="cover"?t.warn("Gradient has outdated direction syntax. Replace `cover` to `farthest-corner`.",{node:f}):w.value==="contain"&&t.warn("Gradient has outdated direction syntax. Replace `contain` to `closest-side`.",{node:f}))}h.includes("linear-gradient")&&PO.test(h)&&t.warn("Gradient has outdated direction syntax. New syntax is like `to left` instead of `right`.",{node:f})}$O.includes(f.prop)&&(f.value.includes("-fill-available")||(f.value.includes("fill-available")?t.warn("Replace fill-available to stretch, because spec had been changed",{node:f}):f.value.includes("fill")&&fb(h).nodes.some(y=>y.type==="word"&&y.value==="fill")&&t.warn("Replace fill to stretch, because spec had been changed",{node:f})));let b;if(f.prop==="transition"||f.prop==="transition-property")return this.prefixes.transition.add(f,t);if(f.prop==="align-self"){if(this.displayType(f)!=="grid"&&this.prefixes.options.flexbox!==!1&&(b=this.prefixes.add["align-self"],b&&b.prefixes&&b.process(f)),this.gridStatus(f,t)!==!1&&(b=this.prefixes.add["grid-row-align"],b&&b.prefixes))return b.process(f,t)}else if(f.prop==="justify-self"){if(this.gridStatus(f,t)!==!1&&(b=this.prefixes.add["grid-column-align"],b&&b.prefixes))return b.process(f,t)}else if(f.prop==="place-self"){if(b=this.prefixes.add["place-self"],b&&b.prefixes&&this.gridStatus(f,t)!==!1)return b.process(f,t)}else if(b=this.prefixes.add[f.prop],b&&b.prefixes)return b.process(f,t)}),this.gridStatus(e,t)&&RO(e,this.disabled),e.walkDecls(f=>{if(this.disabledValue(f,t))return;let d=this.prefixes.unprefixed(f.prop),p=this.prefixes.values("add",d);if(Array.isArray(p))for(let h of p)h.process&&h.process(f,t);TO.save(this.prefixes,f)})}remove(e,t){let i=this.prefixes.remove["@resolution"];e.walkAtRules((n,a)=>{this.prefixes.remove[`@${n.name}`]?this.disabled(n,t)||n.parent.removeChild(a):n.name==="media"&&n.params.includes("-resolution")&&i&&i.clean(n)});for(let n of this.prefixes.remove.selectors)e.walkRules((a,s)=>{n.check(a)&&(this.disabled(a,t)||a.parent.removeChild(s))});return e.walkDecls((n,a)=>{if(this.disabled(n,t))return;let s=n.parent,o=this.prefixes.unprefixed(n.prop);if((n.prop==="transition"||n.prop==="transition-property")&&this.prefixes.transition.remove(n),this.prefixes.remove[n.prop]&&this.prefixes.remove[n.prop].remove){let l=this.prefixes.group(n).down(c=>this.prefixes.normalize(c.prop)===o);if(o==="flex-flow"&&(l=!0),n.prop==="-webkit-box-orient"){let c={"flex-direction":!0,"flex-flow":!0};if(!n.parent.some(f=>c[f.prop]))return}if(l&&!this.withHackValue(n)){n.raw("before").includes(` -`)&&this.reduceSpaces(n),s.removeChild(a);return}}for(let l of this.prefixes.values("remove",o)){if(!l.check||!l.check(n.value))continue;if(o=l.unprefixed,this.prefixes.group(n).down(f=>f.value.includes(o))){s.removeChild(a);return}}})}withHackValue(e){return e.prop==="-webkit-background-clip"&&e.value==="text"}disabledValue(e,t){return this.gridStatus(e,t)===!1&&e.type==="decl"&&e.prop==="display"&&e.value.includes("grid")||this.prefixes.options.flexbox===!1&&e.type==="decl"&&e.prop==="display"&&e.value.includes("flex")||e.type==="decl"&&e.prop==="content"?!0:this.disabled(e,t)}disabledDecl(e,t){if(this.gridStatus(e,t)===!1&&e.type==="decl"&&(e.prop.includes("grid")||e.prop==="justify-items"))return!0;if(this.prefixes.options.flexbox===!1&&e.type==="decl"){let i=["order","justify-content","align-items","align-content"];if(e.prop.includes("flex")||i.includes(e.prop))return!0}return this.disabled(e,t)}disabled(e,t){if(!e)return!1;if(e._autoprefixerDisabled!==void 0)return e._autoprefixerDisabled;if(e.parent){let n=e.prev();if(n&&n.type==="comment"&&DO.test(n.text))return e._autoprefixerDisabled=!0,e._autoprefixerSelfDisabled=!0,!0}let i=null;if(e.nodes){let n;e.each(a=>{a.type==="comment"&&/(!\s*)?autoprefixer:\s*(off|on)/i.test(a.text)&&(typeof n!="undefined"?t.warn("Second Autoprefixer control comment was ignored. Autoprefixer applies control comment to whole block, not to next rules.",{node:a}):n=/on/i.test(a.text))}),n!==void 0&&(i=!n)}if(!e.nodes||i===null)if(e.parent){let n=this.disabled(e.parent,t);e.parent._autoprefixerSelfDisabled===!0?i=!1:i=n}else i=!1;return e._autoprefixerDisabled=i,i}reduceSpaces(e){let t=!1;if(this.prefixes.group(e).up(()=>(t=!0,!0)),t)return;let i=e.raw("before").split(` -`),n=i[i.length-1].length,a=!1;this.prefixes.group(e).down(s=>{i=s.raw("before").split(` -`);let o=i.length-1;i[o].length>n&&(a===!1&&(a=i[o].length-n),i[o]=i[o].slice(0,-a),s.raws.before=i.join(` -`))})}displayType(e){for(let t of e.parent.nodes)if(t.prop==="display"){if(t.value.includes("flex"))return"flex";if(t.value.includes("grid"))return"grid"}return!1}gridStatus(e,t){if(!e)return!1;if(e._autoprefixerGridStatus!==void 0)return e._autoprefixerGridStatus;let i=null;if(e.nodes){let n;e.each(a=>{if(a.type==="comment"&&qO.test(a.text)){let s=/:\s*autoplace/i.test(a.text),o=/no-autoplace/i.test(a.text);typeof n!="undefined"?t.warn("Second Autoprefixer grid control comment was ignored. Autoprefixer applies control comments to the whole block, not to the next rules.",{node:a}):s?n="autoplace":o?n=!0:n=/on/i.test(a.text)}}),n!==void 0&&(i=n)}if(e.type==="atrule"&&e.name==="supports"){let n=e.params;n.includes("grid")&&n.includes("auto")&&(i=!1)}if(!e.nodes||i===null)if(e.parent){let n=this.gridStatus(e.parent,t);e.parent._autoprefixerSelfDisabled===!0?i=!1:i=n}else typeof this.prefixes.options.grid!="undefined"?i=this.prefixes.options.grid:typeof m.env.AUTOPREFIXER_GRID!="undefined"?m.env.AUTOPREFIXER_GRID==="autoplace"?i="autoplace":i=!0:i=!1;return e._autoprefixerGridStatus=i,i}};pb.exports=cb});var hb=x((p$,db)=>{u();db.exports={A:{A:{"2":"K E F G A B JC"},B:{"1":"C L M H N D O P Q R S T U V W X Y Z a b c d e f g h i j n o p q r s t u v w x y z I"},C:{"1":"2 3 4 5 6 7 8 9 AB BB CB DB EB FB GB HB IB JB KB LB MB NB OB PB QB RB SB TB UB VB WB XB YB ZB aB bB cB 0B dB 1B eB fB gB hB iB jB kB lB mB nB oB m pB qB rB sB tB P Q R 2B S T U V W X Y Z a b c d e f g h i j n o p q r s t u v w x y z I uB 3B 4B","2":"0 1 KC zB J K E F G A B C L M H N D O k l LC MC"},D:{"1":"8 9 AB BB CB DB EB FB GB HB IB JB KB LB MB NB OB PB QB RB SB TB UB VB WB XB YB ZB aB bB cB 0B dB 1B eB fB gB hB iB jB kB lB mB nB oB m pB qB rB sB tB P Q R S T U V W X Y Z a b c d e f g h i j n o p q r s t u v w x y z I uB 3B 4B","2":"0 1 2 3 4 5 6 7 J K E F G A B C L M H N D O k l"},E:{"1":"G A B C L M H D RC 6B vB wB 7B SC TC 8B 9B xB AC yB BC CC DC EC FC GC UC","2":"0 J K E F NC 5B OC PC QC"},F:{"1":"1 2 3 4 5 6 7 8 9 H N D O k l AB BB CB DB EB FB GB HB IB JB KB LB MB NB OB PB QB RB SB TB UB VB WB XB YB ZB aB bB cB dB eB fB gB hB iB jB kB lB mB nB oB m pB qB rB sB tB P Q R 2B S T U V W X Y Z a b c d e f g h i j wB","2":"G B C VC WC XC YC vB HC ZC"},G:{"1":"D fC gC hC iC jC kC lC mC nC oC pC qC rC sC tC 8B 9B xB AC yB BC CC DC EC FC GC","2":"F 5B aC IC bC cC dC eC"},H:{"1":"uC"},I:{"1":"I zC 0C","2":"zB J vC wC xC yC IC"},J:{"2":"E A"},K:{"1":"m","2":"A B C vB HC wB"},L:{"1":"I"},M:{"1":"uB"},N:{"2":"A B"},O:{"1":"xB"},P:{"1":"J k l 1C 2C 3C 4C 5C 6B 6C 7C 8C 9C AD yB BD CD DD"},Q:{"1":"7B"},R:{"1":"ED"},S:{"1":"FD GD"}},B:4,C:"CSS Feature Queries"}});var bb=x((d$,yb)=>{u();function mb(r){return r[r.length-1]}var gb={parse(r){let e=[""],t=[e];for(let i of r){if(i==="("){e=[""],mb(t).push(e),t.push(e);continue}if(i===")"){t.pop(),e=mb(t),e.push("");continue}e[e.length-1]+=i}return t[0]},stringify(r){let e="";for(let t of r){if(typeof t=="object"){e+=`(${gb.stringify(t)})`;continue}e+=t}return e}};yb.exports=gb});var Sb=x((h$,kb)=>{u();var MO=hb(),{feature:NO}=(Ts(),Os),{parse:BO}=$e(),FO=Mt(),au=bb(),jO=He(),zO=_e(),wb=NO(MO),vb=[];for(let r in wb.stats){let e=wb.stats[r];for(let t in e){let i=e[t];/y/.test(i)&&vb.push(r+" "+t)}}var xb=class{constructor(e,t){this.Prefixes=e,this.all=t}prefixer(){if(this.prefixerCache)return this.prefixerCache;let e=this.all.browsers.selected.filter(i=>vb.includes(i)),t=new FO(this.all.browsers.data,e,this.all.options);return this.prefixerCache=new this.Prefixes(this.all.data,t,this.all.options),this.prefixerCache}parse(e){let t=e.split(":"),i=t[0],n=t[1];return n||(n=""),[i.trim(),n.trim()]}virtual(e){let[t,i]=this.parse(e),n=BO("a{}").first;return n.append({prop:t,value:i,raws:{before:""}}),n}prefixed(e){let t=this.virtual(e);if(this.disabled(t.first))return t.nodes;let i={warn:()=>null},n=this.prefixer().add[t.first.prop];n&&n.process&&n.process(t.first,i);for(let a of t.nodes){for(let s of this.prefixer().values("add",t.first.prop))s.process(a);jO.save(this.all,a)}return t.nodes}isNot(e){return typeof e=="string"&&/not\s*/i.test(e)}isOr(e){return typeof e=="string"&&/\s*or\s*/i.test(e)}isProp(e){return typeof e=="object"&&e.length===1&&typeof e[0]=="string"}isHack(e,t){return!new RegExp(`(\\(|\\s)${zO.escapeRegexp(t)}:`).test(e)}toRemove(e,t){let[i,n]=this.parse(e),a=this.all.unprefixed(i),s=this.all.cleaner();if(s.remove[i]&&s.remove[i].remove&&!this.isHack(t,a))return!0;for(let o of s.values("remove",a))if(o.check(n))return!0;return!1}remove(e,t){let i=0;for(;itypeof t!="object"?t:t.length===1&&typeof t[0]=="object"?this.cleanBrackets(t[0]):this.cleanBrackets(t))}convert(e){let t=[""];for(let i of e)t.push([`${i.prop}: ${i.value}`]),t.push(" or ");return t[t.length-1]="",t}normalize(e){if(typeof e!="object")return e;if(e=e.filter(t=>t!==""),typeof e[0]=="string"){let t=e[0].trim();if(t.includes(":")||t==="selector"||t==="not selector")return[au.stringify(e)]}return e.map(t=>this.normalize(t))}add(e,t){return e.map(i=>{if(this.isProp(i)){let n=this.prefixed(i[0]);return n.length>1?this.convert(n):i}return typeof i=="object"?this.add(i,t):i})}process(e){let t=au.parse(e.params);t=this.normalize(t),t=this.remove(t,e.params),t=this.add(t,e.params),t=this.cleanBrackets(t),e.params=au.stringify(t)}disabled(e){if(!this.all.options.grid&&(e.prop==="display"&&e.value.includes("grid")||e.prop.includes("grid")||e.prop==="justify-items"))return!0;if(this.all.options.flexbox===!1){if(e.prop==="display"&&e.value.includes("flex"))return!0;let t=["order","justify-content","align-items","align-content"];if(e.prop.includes("flex")||t.includes(e.prop))return!0}return!1}};kb.exports=xb});var _b=x((m$,Cb)=>{u();var Ab=class{constructor(e,t){this.prefix=t,this.prefixed=e.prefixed(this.prefix),this.regexp=e.regexp(this.prefix),this.prefixeds=e.possible().map(i=>[e.prefixed(i),e.regexp(i)]),this.unprefixed=e.name,this.nameRegexp=e.regexp()}isHack(e){let t=e.parent.index(e)+1,i=e.parent.nodes;for(;t{u();var{list:UO}=$e(),VO=_b(),HO=wr(),WO=Mt(),GO=_e(),Eb=class extends HO{constructor(e,t,i){super(e,t,i);this.regexpCache=new Map}check(e){return e.selector.includes(this.name)?!!e.selector.match(this.regexp()):!1}prefixed(e){return this.name.replace(/^(\W*)/,`$1${e}`)}regexp(e){if(!this.regexpCache.has(e)){let t=e?this.prefixed(e):this.name;this.regexpCache.set(e,new RegExp(`(^|[^:"'=])${GO.escapeRegexp(t)}`,"gi"))}return this.regexpCache.get(e)}possible(){return WO.prefixes()}prefixeds(e){if(e._autoprefixerPrefixeds){if(e._autoprefixerPrefixeds[this.name])return e._autoprefixerPrefixeds}else e._autoprefixerPrefixeds={};let t={};if(e.selector.includes(",")){let n=UO.comma(e.selector).filter(a=>a.includes(this.name));for(let a of this.possible())t[a]=n.map(s=>this.replace(s,a)).join(", ")}else for(let i of this.possible())t[i]=this.replace(e.selector,i);return e._autoprefixerPrefixeds[this.name]=t,e._autoprefixerPrefixeds}already(e,t,i){let n=e.parent.index(e)-1;for(;n>=0;){let a=e.parent.nodes[n];if(a.type!=="rule")return!1;let s=!1;for(let o in t[this.name]){let l=t[this.name][o];if(a.selector===l){if(i===o)return!0;s=!0;break}}if(!s)return!1;n-=1}return!1}replace(e,t){return e.replace(this.regexp(),`$1${this.prefixed(t)}`)}add(e,t){let i=this.prefixeds(e);if(this.already(e,i,t))return;let n=this.clone(e,{selector:i[this.name][t]});e.parent.insertBefore(e,n)}old(e){return new VO(this,e)}};Ob.exports=Eb});var Pb=x((y$,Rb)=>{u();var QO=wr(),Tb=class extends QO{add(e,t){let i=t+e.name;if(e.parent.some(s=>s.name===i&&s.params===e.params))return;let a=this.clone(e,{name:i});return e.parent.insertBefore(e,a)}process(e){let t=this.parentPrefix(e);for(let i of this.prefixes)(!t||t===i)&&this.add(e,i)}};Rb.exports=Tb});var Db=x((b$,Ib)=>{u();var YO=kr(),ou=class extends YO{prefixed(e){return e==="-webkit-"?":-webkit-full-screen":e==="-moz-"?":-moz-full-screen":`:${e}fullscreen`}};ou.names=[":fullscreen"];Ib.exports=ou});var $b=x((w$,qb)=>{u();var KO=kr(),lu=class extends KO{possible(){return super.possible().concat(["-moz- old","-ms- old"])}prefixed(e){return e==="-webkit-"?"::-webkit-input-placeholder":e==="-ms-"?"::-ms-input-placeholder":e==="-ms- old"?":-ms-input-placeholder":e==="-moz- old"?":-moz-placeholder":`::${e}placeholder`}};lu.names=["::placeholder"];qb.exports=lu});var Mb=x((v$,Lb)=>{u();var XO=kr(),uu=class extends XO{prefixed(e){return e==="-ms-"?":-ms-input-placeholder":`:${e}placeholder-shown`}};uu.names=[":placeholder-shown"];Lb.exports=uu});var Bb=x((x$,Nb)=>{u();var JO=kr(),ZO=_e(),fu=class extends JO{constructor(e,t,i){super(e,t,i);this.prefixes&&(this.prefixes=ZO.uniq(this.prefixes.map(n=>"-webkit-")))}prefixed(e){return e==="-webkit-"?"::-webkit-file-upload-button":`::${e}file-selector-button`}};fu.names=["::file-selector-button"];Nb.exports=fu});var Pe=x((k$,Fb)=>{u();Fb.exports=function(r){let e;return r==="-webkit- 2009"||r==="-moz-"?e=2009:r==="-ms-"?e=2012:r==="-webkit-"&&(e="final"),r==="-webkit- 2009"&&(r="-webkit-"),[e,r]}});var Vb=x((S$,Ub)=>{u();var jb=$e().list,zb=Pe(),eT=j(),Sr=class extends eT{prefixed(e,t){let i;return[i,t]=zb(t),i===2009?t+"box-flex":super.prefixed(e,t)}normalize(){return"flex"}set(e,t){let i=zb(t)[0];if(i===2009)return e.value=jb.space(e.value)[0],e.value=Sr.oldValues[e.value]||e.value,super.set(e,t);if(i===2012){let n=jb.space(e.value);n.length===3&&n[2]==="0"&&(e.value=n.slice(0,2).concat("0px").join(" "))}return super.set(e,t)}};Sr.names=["flex","box-flex"];Sr.oldValues={auto:"1",none:"0"};Ub.exports=Sr});var Gb=x((A$,Wb)=>{u();var Hb=Pe(),tT=j(),cu=class extends tT{prefixed(e,t){let i;return[i,t]=Hb(t),i===2009?t+"box-ordinal-group":i===2012?t+"flex-order":super.prefixed(e,t)}normalize(){return"order"}set(e,t){return Hb(t)[0]===2009&&/\d/.test(e.value)?(e.value=(parseInt(e.value)+1).toString(),super.set(e,t)):super.set(e,t)}};cu.names=["order","flex-order","box-ordinal-group"];Wb.exports=cu});var Yb=x((C$,Qb)=>{u();var rT=j(),pu=class extends rT{check(e){let t=e.value;return!t.toLowerCase().includes("alpha(")&&!t.includes("DXImageTransform.Microsoft")&&!t.includes("data:image/svg+xml")}};pu.names=["filter"];Qb.exports=pu});var Xb=x((_$,Kb)=>{u();var iT=j(),du=class extends iT{insert(e,t,i,n){if(t!=="-ms-")return super.insert(e,t,i);let a=this.clone(e),s=e.prop.replace(/end$/,"start"),o=t+e.prop.replace(/end$/,"span");if(!e.parent.some(l=>l.prop===o)){if(a.prop=o,e.value.includes("span"))a.value=e.value.replace(/span\s/i,"");else{let l;if(e.parent.walkDecls(s,c=>{l=c}),l){let c=Number(e.value)-Number(l.value)+"";a.value=c}else e.warn(n,`Can not prefix ${e.prop} (${s} is not found)`)}e.cloneBefore(a)}}};du.names=["grid-row-end","grid-column-end"];Kb.exports=du});var Zb=x((E$,Jb)=>{u();var nT=j(),hu=class extends nT{check(e){return!e.value.split(/\s+/).some(t=>{let i=t.toLowerCase();return i==="reverse"||i==="alternate-reverse"})}};hu.names=["animation","animation-direction"];Jb.exports=hu});var tw=x((O$,ew)=>{u();var sT=Pe(),aT=j(),mu=class extends aT{insert(e,t,i){let n;if([n,t]=sT(t),n!==2009)return super.insert(e,t,i);let a=e.value.split(/\s+/).filter(d=>d!=="wrap"&&d!=="nowrap"&&"wrap-reverse");if(a.length===0||e.parent.some(d=>d.prop===t+"box-orient"||d.prop===t+"box-direction"))return;let o=a[0],l=o.includes("row")?"horizontal":"vertical",c=o.includes("reverse")?"reverse":"normal",f=this.clone(e);return f.prop=t+"box-orient",f.value=l,this.needCascade(e)&&(f.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,f),f=this.clone(e),f.prop=t+"box-direction",f.value=c,this.needCascade(e)&&(f.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,f)}};mu.names=["flex-flow","box-direction","box-orient"];ew.exports=mu});var iw=x((T$,rw)=>{u();var oT=Pe(),lT=j(),gu=class extends lT{normalize(){return"flex"}prefixed(e,t){let i;return[i,t]=oT(t),i===2009?t+"box-flex":i===2012?t+"flex-positive":super.prefixed(e,t)}};gu.names=["flex-grow","flex-positive"];rw.exports=gu});var sw=x((R$,nw)=>{u();var uT=Pe(),fT=j(),yu=class extends fT{set(e,t){if(uT(t)[0]!==2009)return super.set(e,t)}};yu.names=["flex-wrap"];nw.exports=yu});var ow=x((P$,aw)=>{u();var cT=j(),Ar=Bt(),bu=class extends cT{insert(e,t,i,n){if(t!=="-ms-")return super.insert(e,t,i);let a=Ar.parse(e),[s,o]=Ar.translate(a,0,2),[l,c]=Ar.translate(a,1,3);[["grid-row",s],["grid-row-span",o],["grid-column",l],["grid-column-span",c]].forEach(([f,d])=>{Ar.insertDecl(e,f,d)}),Ar.warnTemplateSelectorNotFound(e,n),Ar.warnIfGridRowColumnExists(e,n)}};bu.names=["grid-area"];aw.exports=bu});var uw=x((I$,lw)=>{u();var pT=j(),Mi=Bt(),wu=class extends pT{insert(e,t,i){if(t!=="-ms-")return super.insert(e,t,i);if(e.parent.some(s=>s.prop==="-ms-grid-row-align"))return;let[[n,a]]=Mi.parse(e);a?(Mi.insertDecl(e,"grid-row-align",n),Mi.insertDecl(e,"grid-column-align",a)):(Mi.insertDecl(e,"grid-row-align",n),Mi.insertDecl(e,"grid-column-align",n))}};wu.names=["place-self"];lw.exports=wu});var cw=x((D$,fw)=>{u();var dT=j(),vu=class extends dT{check(e){let t=e.value;return!t.includes("/")||t.includes("span")}normalize(e){return e.replace("-start","")}prefixed(e,t){let i=super.prefixed(e,t);return t==="-ms-"&&(i=i.replace("-start","")),i}};vu.names=["grid-row-start","grid-column-start"];fw.exports=vu});var hw=x((q$,dw)=>{u();var pw=Pe(),hT=j(),Cr=class extends hT{check(e){return e.parent&&!e.parent.some(t=>t.prop&&t.prop.startsWith("grid-"))}prefixed(e,t){let i;return[i,t]=pw(t),i===2012?t+"flex-item-align":super.prefixed(e,t)}normalize(){return"align-self"}set(e,t){let i=pw(t)[0];if(i===2012)return e.value=Cr.oldValues[e.value]||e.value,super.set(e,t);if(i==="final")return super.set(e,t)}};Cr.names=["align-self","flex-item-align"];Cr.oldValues={"flex-end":"end","flex-start":"start"};dw.exports=Cr});var gw=x(($$,mw)=>{u();var mT=j(),gT=_e(),xu=class extends mT{constructor(e,t,i){super(e,t,i);this.prefixes&&(this.prefixes=gT.uniq(this.prefixes.map(n=>n==="-ms-"?"-webkit-":n)))}};xu.names=["appearance"];mw.exports=xu});var ww=x((L$,bw)=>{u();var yw=Pe(),yT=j(),ku=class extends yT{normalize(){return"flex-basis"}prefixed(e,t){let i;return[i,t]=yw(t),i===2012?t+"flex-preferred-size":super.prefixed(e,t)}set(e,t){let i;if([i,t]=yw(t),i===2012||i==="final")return super.set(e,t)}};ku.names=["flex-basis","flex-preferred-size"];bw.exports=ku});var xw=x((M$,vw)=>{u();var bT=j(),Su=class extends bT{normalize(){return this.name.replace("box-image","border")}prefixed(e,t){let i=super.prefixed(e,t);return t==="-webkit-"&&(i=i.replace("border","box-image")),i}};Su.names=["mask-border","mask-border-source","mask-border-slice","mask-border-width","mask-border-outset","mask-border-repeat","mask-box-image","mask-box-image-source","mask-box-image-slice","mask-box-image-width","mask-box-image-outset","mask-box-image-repeat"];vw.exports=Su});var Sw=x((N$,kw)=>{u();var wT=j(),lt=class extends wT{insert(e,t,i){let n=e.prop==="mask-composite",a;n?a=e.value.split(","):a=e.value.match(lt.regexp)||[],a=a.map(c=>c.trim()).filter(c=>c);let s=a.length,o;if(s&&(o=this.clone(e),o.value=a.map(c=>lt.oldValues[c]||c).join(", "),a.includes("intersect")&&(o.value+=", xor"),o.prop=t+"mask-composite"),n)return s?(this.needCascade(e)&&(o.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,o)):void 0;let l=this.clone(e);return l.prop=t+l.prop,s&&(l.value=l.value.replace(lt.regexp,"")),this.needCascade(e)&&(l.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,l),s?(this.needCascade(e)&&(o.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,o)):e}};lt.names=["mask","mask-composite"];lt.oldValues={add:"source-over",subtract:"source-out",intersect:"source-in",exclude:"xor"};lt.regexp=new RegExp(`\\s+(${Object.keys(lt.oldValues).join("|")})\\b(?!\\))\\s*(?=[,])`,"ig");kw.exports=lt});var _w=x((B$,Cw)=>{u();var Aw=Pe(),vT=j(),_r=class extends vT{prefixed(e,t){let i;return[i,t]=Aw(t),i===2009?t+"box-align":i===2012?t+"flex-align":super.prefixed(e,t)}normalize(){return"align-items"}set(e,t){let i=Aw(t)[0];return(i===2009||i===2012)&&(e.value=_r.oldValues[e.value]||e.value),super.set(e,t)}};_r.names=["align-items","flex-align","box-align"];_r.oldValues={"flex-end":"end","flex-start":"start"};Cw.exports=_r});var Ow=x((F$,Ew)=>{u();var xT=j(),Au=class extends xT{set(e,t){return t==="-ms-"&&e.value==="contain"&&(e.value="element"),super.set(e,t)}insert(e,t,i){if(!(e.value==="all"&&t==="-ms-"))return super.insert(e,t,i)}};Au.names=["user-select"];Ew.exports=Au});var Pw=x((j$,Rw)=>{u();var Tw=Pe(),kT=j(),Cu=class extends kT{normalize(){return"flex-shrink"}prefixed(e,t){let i;return[i,t]=Tw(t),i===2012?t+"flex-negative":super.prefixed(e,t)}set(e,t){let i;if([i,t]=Tw(t),i===2012||i==="final")return super.set(e,t)}};Cu.names=["flex-shrink","flex-negative"];Rw.exports=Cu});var Dw=x((z$,Iw)=>{u();var ST=j(),_u=class extends ST{prefixed(e,t){return`${t}column-${e}`}normalize(e){return e.includes("inside")?"break-inside":e.includes("before")?"break-before":"break-after"}set(e,t){return(e.prop==="break-inside"&&e.value==="avoid-column"||e.value==="avoid-page")&&(e.value="avoid"),super.set(e,t)}insert(e,t,i){if(e.prop!=="break-inside")return super.insert(e,t,i);if(!(/region/i.test(e.value)||/page/i.test(e.value)))return super.insert(e,t,i)}};_u.names=["break-inside","page-break-inside","column-break-inside","break-before","page-break-before","column-break-before","break-after","page-break-after","column-break-after"];Iw.exports=_u});var $w=x((U$,qw)=>{u();var AT=j(),Eu=class extends AT{prefixed(e,t){return t+"print-color-adjust"}normalize(){return"color-adjust"}};Eu.names=["color-adjust","print-color-adjust"];qw.exports=Eu});var Mw=x((V$,Lw)=>{u();var CT=j(),Er=class extends CT{insert(e,t,i){if(t==="-ms-"){let n=this.set(this.clone(e),t);this.needCascade(e)&&(n.raws.before=this.calcBefore(i,e,t));let a="ltr";return e.parent.nodes.forEach(s=>{s.prop==="direction"&&(s.value==="rtl"||s.value==="ltr")&&(a=s.value)}),n.value=Er.msValues[a][e.value]||e.value,e.parent.insertBefore(e,n)}return super.insert(e,t,i)}};Er.names=["writing-mode"];Er.msValues={ltr:{"horizontal-tb":"lr-tb","vertical-rl":"tb-rl","vertical-lr":"tb-lr"},rtl:{"horizontal-tb":"rl-tb","vertical-rl":"bt-rl","vertical-lr":"bt-lr"}};Lw.exports=Er});var Bw=x((H$,Nw)=>{u();var _T=j(),Ou=class extends _T{set(e,t){return e.value=e.value.replace(/\s+fill(\s)/,"$1"),super.set(e,t)}};Ou.names=["border-image"];Nw.exports=Ou});var zw=x((W$,jw)=>{u();var Fw=Pe(),ET=j(),Or=class extends ET{prefixed(e,t){let i;return[i,t]=Fw(t),i===2012?t+"flex-line-pack":super.prefixed(e,t)}normalize(){return"align-content"}set(e,t){let i=Fw(t)[0];if(i===2012)return e.value=Or.oldValues[e.value]||e.value,super.set(e,t);if(i==="final")return super.set(e,t)}};Or.names=["align-content","flex-line-pack"];Or.oldValues={"flex-end":"end","flex-start":"start","space-between":"justify","space-around":"distribute"};jw.exports=Or});var Vw=x((G$,Uw)=>{u();var OT=j(),We=class extends OT{prefixed(e,t){return t==="-moz-"?t+(We.toMozilla[e]||e):super.prefixed(e,t)}normalize(e){return We.toNormal[e]||e}};We.names=["border-radius"];We.toMozilla={};We.toNormal={};for(let r of["top","bottom"])for(let e of["left","right"]){let t=`border-${r}-${e}-radius`,i=`border-radius-${r}${e}`;We.names.push(t),We.names.push(i),We.toMozilla[t]=i,We.toNormal[i]=t}Uw.exports=We});var Ww=x((Q$,Hw)=>{u();var TT=j(),Tu=class extends TT{prefixed(e,t){return e.includes("-start")?t+e.replace("-block-start","-before"):t+e.replace("-block-end","-after")}normalize(e){return e.includes("-before")?e.replace("-before","-block-start"):e.replace("-after","-block-end")}};Tu.names=["border-block-start","border-block-end","margin-block-start","margin-block-end","padding-block-start","padding-block-end","border-before","border-after","margin-before","margin-after","padding-before","padding-after"];Hw.exports=Tu});var Qw=x((Y$,Gw)=>{u();var RT=j(),{parseTemplate:PT,warnMissedAreas:IT,getGridGap:DT,warnGridGap:qT,inheritGridGap:$T}=Bt(),Ru=class extends RT{insert(e,t,i,n){if(t!=="-ms-")return super.insert(e,t,i);if(e.parent.some(h=>h.prop==="-ms-grid-rows"))return;let a=DT(e),s=$T(e,a),{rows:o,columns:l,areas:c}=PT({decl:e,gap:s||a}),f=Object.keys(c).length>0,d=Boolean(o),p=Boolean(l);return qT({gap:a,hasColumns:p,decl:e,result:n}),IT(c,e,n),(d&&p||f)&&e.cloneBefore({prop:"-ms-grid-rows",value:o,raws:{}}),p&&e.cloneBefore({prop:"-ms-grid-columns",value:l,raws:{}}),e}};Ru.names=["grid-template"];Gw.exports=Ru});var Kw=x((K$,Yw)=>{u();var LT=j(),Pu=class extends LT{prefixed(e,t){return t+e.replace("-inline","")}normalize(e){return e.replace(/(margin|padding|border)-(start|end)/,"$1-inline-$2")}};Pu.names=["border-inline-start","border-inline-end","margin-inline-start","margin-inline-end","padding-inline-start","padding-inline-end","border-start","border-end","margin-start","margin-end","padding-start","padding-end"];Yw.exports=Pu});var Jw=x((X$,Xw)=>{u();var MT=j(),Iu=class extends MT{check(e){return!e.value.includes("flex-")&&e.value!=="baseline"}prefixed(e,t){return t+"grid-row-align"}normalize(){return"align-self"}};Iu.names=["grid-row-align"];Xw.exports=Iu});var e0=x((J$,Zw)=>{u();var NT=j(),Tr=class extends NT{keyframeParents(e){let{parent:t}=e;for(;t;){if(t.type==="atrule"&&t.name==="keyframes")return!0;({parent:t}=t)}return!1}contain3d(e){if(e.prop==="transform-origin")return!1;for(let t of Tr.functions3d)if(e.value.includes(`${t}(`))return!0;return!1}set(e,t){return e=super.set(e,t),t==="-ms-"&&(e.value=e.value.replace(/rotatez/gi,"rotate")),e}insert(e,t,i){if(t==="-ms-"){if(!this.contain3d(e)&&!this.keyframeParents(e))return super.insert(e,t,i)}else if(t==="-o-"){if(!this.contain3d(e))return super.insert(e,t,i)}else return super.insert(e,t,i)}};Tr.names=["transform","transform-origin"];Tr.functions3d=["matrix3d","translate3d","translateZ","scale3d","scaleZ","rotate3d","rotateX","rotateY","perspective"];Zw.exports=Tr});var i0=x((Z$,r0)=>{u();var t0=Pe(),BT=j(),Du=class extends BT{normalize(){return"flex-direction"}insert(e,t,i){let n;if([n,t]=t0(t),n!==2009)return super.insert(e,t,i);if(e.parent.some(f=>f.prop===t+"box-orient"||f.prop===t+"box-direction"))return;let s=e.value,o,l;s==="inherit"||s==="initial"||s==="unset"?(o=s,l=s):(o=s.includes("row")?"horizontal":"vertical",l=s.includes("reverse")?"reverse":"normal");let c=this.clone(e);return c.prop=t+"box-orient",c.value=o,this.needCascade(e)&&(c.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,c),c=this.clone(e),c.prop=t+"box-direction",c.value=l,this.needCascade(e)&&(c.raws.before=this.calcBefore(i,e,t)),e.parent.insertBefore(e,c)}old(e,t){let i;return[i,t]=t0(t),i===2009?[t+"box-orient",t+"box-direction"]:super.old(e,t)}};Du.names=["flex-direction","box-direction","box-orient"];r0.exports=Du});var s0=x((eL,n0)=>{u();var FT=j(),qu=class extends FT{check(e){return e.value==="pixelated"}prefixed(e,t){return t==="-ms-"?"-ms-interpolation-mode":super.prefixed(e,t)}set(e,t){return t!=="-ms-"?super.set(e,t):(e.prop="-ms-interpolation-mode",e.value="nearest-neighbor",e)}normalize(){return"image-rendering"}process(e,t){return super.process(e,t)}};qu.names=["image-rendering","interpolation-mode"];n0.exports=qu});var o0=x((tL,a0)=>{u();var jT=j(),zT=_e(),$u=class extends jT{constructor(e,t,i){super(e,t,i);this.prefixes&&(this.prefixes=zT.uniq(this.prefixes.map(n=>n==="-ms-"?"-webkit-":n)))}};$u.names=["backdrop-filter"];a0.exports=$u});var u0=x((rL,l0)=>{u();var UT=j(),VT=_e(),Lu=class extends UT{constructor(e,t,i){super(e,t,i);this.prefixes&&(this.prefixes=VT.uniq(this.prefixes.map(n=>n==="-ms-"?"-webkit-":n)))}check(e){return e.value.toLowerCase()==="text"}};Lu.names=["background-clip"];l0.exports=Lu});var c0=x((iL,f0)=>{u();var HT=j(),WT=["none","underline","overline","line-through","blink","inherit","initial","unset"],Mu=class extends HT{check(e){return e.value.split(/\s+/).some(t=>!WT.includes(t))}};Mu.names=["text-decoration"];f0.exports=Mu});var h0=x((nL,d0)=>{u();var p0=Pe(),GT=j(),Rr=class extends GT{prefixed(e,t){let i;return[i,t]=p0(t),i===2009?t+"box-pack":i===2012?t+"flex-pack":super.prefixed(e,t)}normalize(){return"justify-content"}set(e,t){let i=p0(t)[0];if(i===2009||i===2012){let n=Rr.oldValues[e.value]||e.value;if(e.value=n,i!==2009||n!=="distribute")return super.set(e,t)}else if(i==="final")return super.set(e,t)}};Rr.names=["justify-content","flex-pack","box-pack"];Rr.oldValues={"flex-end":"end","flex-start":"start","space-between":"justify","space-around":"distribute"};d0.exports=Rr});var g0=x((sL,m0)=>{u();var QT=j(),Nu=class extends QT{set(e,t){let i=e.value.toLowerCase();return t==="-webkit-"&&!i.includes(" ")&&i!=="contain"&&i!=="cover"&&(e.value=e.value+" "+e.value),super.set(e,t)}};Nu.names=["background-size"];m0.exports=Nu});var b0=x((aL,y0)=>{u();var YT=j(),Bu=Bt(),Fu=class extends YT{insert(e,t,i){if(t!=="-ms-")return super.insert(e,t,i);let n=Bu.parse(e),[a,s]=Bu.translate(n,0,1);n[0]&&n[0].includes("span")&&(s=n[0].join("").replace(/\D/g,"")),[[e.prop,a],[`${e.prop}-span`,s]].forEach(([l,c])=>{Bu.insertDecl(e,l,c)})}};Fu.names=["grid-row","grid-column"];y0.exports=Fu});var x0=x((oL,v0)=>{u();var KT=j(),{prefixTrackProp:w0,prefixTrackValue:XT,autoplaceGridItems:JT,getGridGap:ZT,inheritGridGap:eR}=Bt(),tR=su(),ju=class extends KT{prefixed(e,t){return t==="-ms-"?w0({prop:e,prefix:t}):super.prefixed(e,t)}normalize(e){return e.replace(/^grid-(rows|columns)/,"grid-template-$1")}insert(e,t,i,n){if(t!=="-ms-")return super.insert(e,t,i);let{parent:a,prop:s,value:o}=e,l=s.includes("rows"),c=s.includes("columns"),f=a.some(k=>k.prop==="grid-template"||k.prop==="grid-template-areas");if(f&&l)return!1;let d=new tR({options:{}}),p=d.gridStatus(a,n),h=ZT(e);h=eR(e,h)||h;let b=l?h.row:h.column;(p==="no-autoplace"||p===!0)&&!f&&(b=null);let v=XT({value:o,gap:b});e.cloneBefore({prop:w0({prop:s,prefix:t}),value:v});let y=a.nodes.find(k=>k.prop==="grid-auto-flow"),w="row";if(y&&!d.disabled(y,n)&&(w=y.value.trim()),p==="autoplace"){let k=a.nodes.find(E=>E.prop==="grid-template-rows");if(!k&&f)return;if(!k&&!f){e.warn(n,"Autoplacement does not work without grid-template-rows property");return}!a.nodes.find(E=>E.prop==="grid-template-columns")&&!f&&e.warn(n,"Autoplacement does not work without grid-template-columns property"),c&&!f&&JT(e,n,h,w)}}};ju.names=["grid-template-rows","grid-template-columns","grid-rows","grid-columns"];v0.exports=ju});var S0=x((lL,k0)=>{u();var rR=j(),zu=class extends rR{check(e){return!e.value.includes("flex-")&&e.value!=="baseline"}prefixed(e,t){return t+"grid-column-align"}normalize(){return"justify-self"}};zu.names=["grid-column-align"];k0.exports=zu});var C0=x((uL,A0)=>{u();var iR=j(),Uu=class extends iR{prefixed(e,t){return t+"scroll-chaining"}normalize(){return"overscroll-behavior"}set(e,t){return e.value==="auto"?e.value="chained":(e.value==="none"||e.value==="contain")&&(e.value="none"),super.set(e,t)}};Uu.names=["overscroll-behavior","scroll-chaining"];A0.exports=Uu});var O0=x((fL,E0)=>{u();var nR=j(),{parseGridAreas:sR,warnMissedAreas:aR,prefixTrackProp:oR,prefixTrackValue:_0,getGridGap:lR,warnGridGap:uR,inheritGridGap:fR}=Bt();function cR(r){return r.trim().slice(1,-1).split(/["']\s*["']?/g)}var Vu=class extends nR{insert(e,t,i,n){if(t!=="-ms-")return super.insert(e,t,i);let a=!1,s=!1,o=e.parent,l=lR(e);l=fR(e,l)||l,o.walkDecls(/-ms-grid-rows/,d=>d.remove()),o.walkDecls(/grid-template-(rows|columns)/,d=>{if(d.prop==="grid-template-rows"){s=!0;let{prop:p,value:h}=d;d.cloneBefore({prop:oR({prop:p,prefix:t}),value:_0({value:h,gap:l.row})})}else a=!0});let c=cR(e.value);a&&!s&&l.row&&c.length>1&&e.cloneBefore({prop:"-ms-grid-rows",value:_0({value:`repeat(${c.length}, auto)`,gap:l.row}),raws:{}}),uR({gap:l,hasColumns:a,decl:e,result:n});let f=sR({rows:c,gap:l});return aR(f,e,n),e}};Vu.names=["grid-template-areas"];E0.exports=Vu});var R0=x((cL,T0)=>{u();var pR=j(),Hu=class extends pR{set(e,t){return t==="-webkit-"&&(e.value=e.value.replace(/\s*(right|left)\s*/i,"")),super.set(e,t)}};Hu.names=["text-emphasis-position"];T0.exports=Hu});var I0=x((pL,P0)=>{u();var dR=j(),Wu=class extends dR{set(e,t){return e.prop==="text-decoration-skip-ink"&&e.value==="auto"?(e.prop=t+"text-decoration-skip",e.value="ink",e):super.set(e,t)}};Wu.names=["text-decoration-skip-ink","text-decoration-skip"];P0.exports=Wu});var N0=x((dL,M0)=>{u();"use strict";M0.exports={wrap:D0,limit:q0,validate:$0,test:Gu,curry:hR,name:L0};function D0(r,e,t){var i=e-r;return((t-r)%i+i)%i+r}function q0(r,e,t){return Math.max(r,Math.min(e,t))}function $0(r,e,t,i,n){if(!Gu(r,e,t,i,n))throw new Error(t+" is outside of range ["+r+","+e+")");return t}function Gu(r,e,t,i,n){return!(te||n&&t===e||i&&t===r)}function L0(r,e,t,i){return(t?"(":"[")+r+","+e+(i?")":"]")}function hR(r,e,t,i){var n=L0.bind(null,r,e,t,i);return{wrap:D0.bind(null,r,e),limit:q0.bind(null,r,e),validate:function(a){return $0(r,e,a,t,i)},test:function(a){return Gu(r,e,a,t,i)},toString:n,name:n}}});var j0=x((hL,F0)=>{u();var Qu=$s(),mR=N0(),gR=xr(),yR=He(),bR=_e(),B0=/top|left|right|bottom/gi,wt=class extends yR{replace(e,t){let i=Qu(e);for(let n of i.nodes)if(n.type==="function"&&n.value===this.name)if(n.nodes=this.newDirection(n.nodes),n.nodes=this.normalize(n.nodes),t==="-webkit- old"){if(!this.oldWebkit(n))return!1}else n.nodes=this.convertDirection(n.nodes),n.value=t+n.value;return i.toString()}replaceFirst(e,...t){return t.map(n=>n===" "?{type:"space",value:n}:{type:"word",value:n}).concat(e.slice(1))}normalizeUnit(e,t){return`${parseFloat(e)/t*360}deg`}normalize(e){if(!e[0])return e;if(/-?\d+(.\d+)?grad/.test(e[0].value))e[0].value=this.normalizeUnit(e[0].value,400);else if(/-?\d+(.\d+)?rad/.test(e[0].value))e[0].value=this.normalizeUnit(e[0].value,2*Math.PI);else if(/-?\d+(.\d+)?turn/.test(e[0].value))e[0].value=this.normalizeUnit(e[0].value,1);else if(e[0].value.includes("deg")){let t=parseFloat(e[0].value);t=mR.wrap(0,360,t),e[0].value=`${t}deg`}return e[0].value==="0deg"?e=this.replaceFirst(e,"to"," ","top"):e[0].value==="90deg"?e=this.replaceFirst(e,"to"," ","right"):e[0].value==="180deg"?e=this.replaceFirst(e,"to"," ","bottom"):e[0].value==="270deg"&&(e=this.replaceFirst(e,"to"," ","left")),e}newDirection(e){if(e[0].value==="to"||(B0.lastIndex=0,!B0.test(e[0].value)))return e;e.unshift({type:"word",value:"to"},{type:"space",value:" "});for(let t=2;t0&&(e[0].value==="to"?this.fixDirection(e):e[0].value.includes("deg")?this.fixAngle(e):this.isRadial(e)&&this.fixRadial(e)),e}fixDirection(e){e.splice(0,2);for(let t of e){if(t.type==="div")break;t.type==="word"&&(t.value=this.revertDirection(t.value))}}fixAngle(e){let t=e[0].value;t=parseFloat(t),t=Math.abs(450-t)%360,t=this.roundFloat(t,3),e[0].value=`${t}deg`}fixRadial(e){let t=[],i=[],n,a,s,o,l;for(o=0;o{u();var wR=xr(),vR=He();function z0(r){return new RegExp(`(^|[\\s,(])(${r}($|[\\s),]))`,"gi")}var Yu=class extends vR{regexp(){return this.regexpCache||(this.regexpCache=z0(this.name)),this.regexpCache}isStretch(){return this.name==="stretch"||this.name==="fill"||this.name==="fill-available"}replace(e,t){return t==="-moz-"&&this.isStretch()?e.replace(this.regexp(),"$1-moz-available$3"):t==="-webkit-"&&this.isStretch()?e.replace(this.regexp(),"$1-webkit-fill-available$3"):super.replace(e,t)}old(e){let t=e+this.name;return this.isStretch()&&(e==="-moz-"?t="-moz-available":e==="-webkit-"&&(t="-webkit-fill-available")),new wR(this.name,t,t,z0(t))}add(e,t){if(!(e.prop.includes("grid")&&t!=="-webkit-"))return super.add(e,t)}};Yu.names=["max-content","min-content","fit-content","fill","fill-available","stretch"];U0.exports=Yu});var G0=x((gL,W0)=>{u();var H0=xr(),xR=He(),Ku=class extends xR{replace(e,t){return t==="-webkit-"?e.replace(this.regexp(),"$1-webkit-optimize-contrast"):t==="-moz-"?e.replace(this.regexp(),"$1-moz-crisp-edges"):super.replace(e,t)}old(e){return e==="-webkit-"?new H0(this.name,"-webkit-optimize-contrast"):e==="-moz-"?new H0(this.name,"-moz-crisp-edges"):super.old(e)}};Ku.names=["pixelated"];W0.exports=Ku});var Y0=x((yL,Q0)=>{u();var kR=He(),Xu=class extends kR{replace(e,t){let i=super.replace(e,t);return t==="-webkit-"&&(i=i.replace(/("[^"]+"|'[^']+')(\s+\d+\w)/gi,"url($1)$2")),i}};Xu.names=["image-set"];Q0.exports=Xu});var X0=x((bL,K0)=>{u();var SR=$e().list,AR=He(),Ju=class extends AR{replace(e,t){return SR.space(e).map(i=>{if(i.slice(0,+this.name.length+1)!==this.name+"(")return i;let n=i.lastIndexOf(")"),a=i.slice(n+1),s=i.slice(this.name.length+1,n);if(t==="-webkit-"){let o=s.match(/\d*.?\d+%?/);o?(s=s.slice(o[0].length).trim(),s+=`, ${o[0]}`):s+=", 0.5"}return t+this.name+"("+s+")"+a}).join(" ")}};Ju.names=["cross-fade"];K0.exports=Ju});var Z0=x((wL,J0)=>{u();var CR=Pe(),_R=xr(),ER=He(),Zu=class extends ER{constructor(e,t){super(e,t);e==="display-flex"&&(this.name="flex")}check(e){return e.prop==="display"&&e.value===this.name}prefixed(e){let t,i;return[t,e]=CR(e),t===2009?this.name==="flex"?i="box":i="inline-box":t===2012?this.name==="flex"?i="flexbox":i="inline-flexbox":t==="final"&&(i=this.name),e+i}replace(e,t){return this.prefixed(t)}old(e){let t=this.prefixed(e);if(!!t)return new _R(this.name,t)}};Zu.names=["display-flex","inline-flex"];J0.exports=Zu});var tv=x((vL,ev)=>{u();var OR=He(),ef=class extends OR{constructor(e,t){super(e,t);e==="display-grid"&&(this.name="grid")}check(e){return e.prop==="display"&&e.value===this.name}};ef.names=["display-grid","inline-grid"];ev.exports=ef});var iv=x((xL,rv)=>{u();var TR=He(),tf=class extends TR{constructor(e,t){super(e,t);e==="filter-function"&&(this.name="filter")}};tf.names=["filter","filter-function"];rv.exports=tf});var ov=x((kL,av)=>{u();var nv=Li(),z=j(),sv=Fy(),RR=nb(),PR=su(),IR=Sb(),rf=Mt(),Pr=kr(),DR=Pb(),ut=He(),Ir=_e(),qR=Db(),$R=$b(),LR=Mb(),MR=Bb(),NR=Vb(),BR=Gb(),FR=Yb(),jR=Xb(),zR=Zb(),UR=tw(),VR=iw(),HR=sw(),WR=ow(),GR=uw(),QR=cw(),YR=hw(),KR=gw(),XR=ww(),JR=xw(),ZR=Sw(),e5=_w(),t5=Ow(),r5=Pw(),i5=Dw(),n5=$w(),s5=Mw(),a5=Bw(),o5=zw(),l5=Vw(),u5=Ww(),f5=Qw(),c5=Kw(),p5=Jw(),d5=e0(),h5=i0(),m5=s0(),g5=o0(),y5=u0(),b5=c0(),w5=h0(),v5=g0(),x5=b0(),k5=x0(),S5=S0(),A5=C0(),C5=O0(),_5=R0(),E5=I0(),O5=j0(),T5=V0(),R5=G0(),P5=Y0(),I5=X0(),D5=Z0(),q5=tv(),$5=iv();Pr.hack(qR);Pr.hack($R);Pr.hack(LR);Pr.hack(MR);z.hack(NR);z.hack(BR);z.hack(FR);z.hack(jR);z.hack(zR);z.hack(UR);z.hack(VR);z.hack(HR);z.hack(WR);z.hack(GR);z.hack(QR);z.hack(YR);z.hack(KR);z.hack(XR);z.hack(JR);z.hack(ZR);z.hack(e5);z.hack(t5);z.hack(r5);z.hack(i5);z.hack(n5);z.hack(s5);z.hack(a5);z.hack(o5);z.hack(l5);z.hack(u5);z.hack(f5);z.hack(c5);z.hack(p5);z.hack(d5);z.hack(h5);z.hack(m5);z.hack(g5);z.hack(y5);z.hack(b5);z.hack(w5);z.hack(v5);z.hack(x5);z.hack(k5);z.hack(S5);z.hack(A5);z.hack(C5);z.hack(_5);z.hack(E5);ut.hack(O5);ut.hack(T5);ut.hack(R5);ut.hack(P5);ut.hack(I5);ut.hack(D5);ut.hack(q5);ut.hack($5);var nf=new Map,Ni=class{constructor(e,t,i={}){this.data=e,this.browsers=t,this.options=i,[this.add,this.remove]=this.preprocess(this.select(this.data)),this.transition=new RR(this),this.processor=new PR(this)}cleaner(){if(this.cleanerCache)return this.cleanerCache;if(this.browsers.selected.length){let e=new rf(this.browsers.data,[]);this.cleanerCache=new Ni(this.data,e,this.options)}else return this;return this.cleanerCache}select(e){let t={add:{},remove:{}};for(let i in e){let n=e[i],a=n.browsers.map(l=>{let c=l.split(" ");return{browser:`${c[0]} ${c[1]}`,note:c[2]}}),s=a.filter(l=>l.note).map(l=>`${this.browsers.prefix(l.browser)} ${l.note}`);s=Ir.uniq(s),a=a.filter(l=>this.browsers.isSelected(l.browser)).map(l=>{let c=this.browsers.prefix(l.browser);return l.note?`${c} ${l.note}`:c}),a=this.sort(Ir.uniq(a)),this.options.flexbox==="no-2009"&&(a=a.filter(l=>!l.includes("2009")));let o=n.browsers.map(l=>this.browsers.prefix(l));n.mistakes&&(o=o.concat(n.mistakes)),o=o.concat(s),o=Ir.uniq(o),a.length?(t.add[i]=a,a.length!a.includes(l)))):t.remove[i]=o}return t}sort(e){return e.sort((t,i)=>{let n=Ir.removeNote(t).length,a=Ir.removeNote(i).length;return n===a?i.length-t.length:a-n})}preprocess(e){let t={selectors:[],"@supports":new IR(Ni,this)};for(let n in e.add){let a=e.add[n];if(n==="@keyframes"||n==="@viewport")t[n]=new DR(n,a,this);else if(n==="@resolution")t[n]=new sv(n,a,this);else if(this.data[n].selector)t.selectors.push(Pr.load(n,a,this));else{let s=this.data[n].props;if(s){let o=ut.load(n,a,this);for(let l of s)t[l]||(t[l]={values:[]}),t[l].values.push(o)}else{let o=t[n]&&t[n].values||[];t[n]=z.load(n,a,this),t[n].values=o}}}let i={selectors:[]};for(let n in e.remove){let a=e.remove[n];if(this.data[n].selector){let s=Pr.load(n,a);for(let o of a)i.selectors.push(s.old(o))}else if(n==="@keyframes"||n==="@viewport")for(let s of a){let o=`@${s}${n.slice(1)}`;i[o]={remove:!0}}else if(n==="@resolution")i[n]=new sv(n,a,this);else{let s=this.data[n].props;if(s){let o=ut.load(n,[],this);for(let l of a){let c=o.old(l);if(c)for(let f of s)i[f]||(i[f]={}),i[f].values||(i[f].values=[]),i[f].values.push(c)}}else for(let o of a){let l=this.decl(n).old(n,o);if(n==="align-self"){let c=t[n]&&t[n].prefixes;if(c){if(o==="-webkit- 2009"&&c.includes("-webkit-"))continue;if(o==="-webkit-"&&c.includes("-webkit- 2009"))continue}}for(let c of l)i[c]||(i[c]={}),i[c].remove=!0}}}return[t,i]}decl(e){return nf.has(e)||nf.set(e,z.load(e)),nf.get(e)}unprefixed(e){let t=this.normalize(nv.unprefixed(e));return t==="flex-direction"&&(t="flex-flow"),t}normalize(e){return this.decl(e).normalize(e)}prefixed(e,t){return e=nv.unprefixed(e),this.decl(e).prefixed(e,t)}values(e,t){let i=this[e],n=i["*"]&&i["*"].values,a=i[t]&&i[t].values;return n&&a?Ir.uniq(n.concat(a)):n||a||[]}group(e){let t=e.parent,i=t.index(e),{length:n}=t.nodes,a=this.unprefixed(e.prop),s=(o,l)=>{for(i+=o;i>=0&&i{u();lv.exports={"backdrop-filter":{feature:"css-backdrop-filter",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5","safari 16.5"]},element:{props:["background","background-image","border-image","mask","list-style","list-style-image","content","mask-image"],feature:"css-element-function",browsers:["firefox 114"]},"user-select":{mistakes:["-khtml-"],feature:"user-select-none",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5","safari 16.5"]},"background-clip":{feature:"background-clip-text",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},hyphens:{feature:"css-hyphens",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5","safari 16.5"]},fill:{props:["width","min-width","max-width","height","min-height","max-height","inline-size","min-inline-size","max-inline-size","block-size","min-block-size","max-block-size","grid","grid-template","grid-template-rows","grid-template-columns","grid-auto-columns","grid-auto-rows"],feature:"intrinsic-width",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"fill-available":{props:["width","min-width","max-width","height","min-height","max-height","inline-size","min-inline-size","max-inline-size","block-size","min-block-size","max-block-size","grid","grid-template","grid-template-rows","grid-template-columns","grid-auto-columns","grid-auto-rows"],feature:"intrinsic-width",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},stretch:{props:["width","min-width","max-width","height","min-height","max-height","inline-size","min-inline-size","max-inline-size","block-size","min-block-size","max-block-size","grid","grid-template","grid-template-rows","grid-template-columns","grid-auto-columns","grid-auto-rows"],feature:"intrinsic-width",browsers:["firefox 114"]},"fit-content":{props:["width","min-width","max-width","height","min-height","max-height","inline-size","min-inline-size","max-inline-size","block-size","min-block-size","max-block-size","grid","grid-template","grid-template-rows","grid-template-columns","grid-auto-columns","grid-auto-rows"],feature:"intrinsic-width",browsers:["firefox 114"]},"text-decoration-style":{feature:"text-decoration",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"text-decoration-color":{feature:"text-decoration",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"text-decoration-line":{feature:"text-decoration",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"text-decoration":{feature:"text-decoration",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"text-decoration-skip":{feature:"text-decoration",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"text-decoration-skip-ink":{feature:"text-decoration",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"text-size-adjust":{feature:"text-size-adjust",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5"]},"mask-clip":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-composite":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-image":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-origin":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-repeat":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-border-repeat":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-border-source":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},mask:{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-position":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-size":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-border":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-border-outset":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-border-width":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"mask-border-slice":{feature:"css-masks",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},"clip-path":{feature:"css-clip-path",browsers:["samsung 21"]},"box-decoration-break":{feature:"css-boxdecorationbreak",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5","opera 99","safari 16.5","samsung 21"]},appearance:{feature:"css-appearance",browsers:["samsung 21"]},"image-set":{props:["background","background-image","border-image","cursor","mask","mask-image","list-style","list-style-image","content"],feature:"css-image-set",browsers:["and_uc 15.5","chrome 109","samsung 21"]},"cross-fade":{props:["background","background-image","border-image","mask","list-style","list-style-image","content","mask-image"],feature:"css-cross-fade",browsers:["and_chr 114","and_uc 15.5","chrome 109","chrome 113","chrome 114","edge 114","opera 99","samsung 21"]},isolate:{props:["unicode-bidi"],feature:"css-unicode-bidi",browsers:["ios_saf 16.1","ios_saf 16.3","ios_saf 16.4","ios_saf 16.5","safari 16.5"]},"color-adjust":{feature:"css-color-adjust",browsers:["chrome 109","chrome 113","chrome 114","edge 114","opera 99"]}}});var cv=x((AL,fv)=>{u();fv.exports={}});var mv=x((CL,hv)=>{u();var L5=Gl(),{agents:M5}=(Ts(),Os),sf=_y(),N5=Mt(),B5=ov(),F5=uv(),j5=cv(),pv={browsers:M5,prefixes:F5},dv=` - Replace Autoprefixer \`browsers\` option to Browserslist config. - Use \`browserslist\` key in \`package.json\` or \`.browserslistrc\` file. - - Using \`browsers\` option can cause errors. Browserslist config can - be used for Babel, Autoprefixer, postcss-normalize and other tools. - - If you really need to use option, rename it to \`overrideBrowserslist\`. - - Learn more at: - https://github.com/browserslist/browserslist#readme - https://twitter.com/browserslist - -`;function z5(r){return Object.prototype.toString.apply(r)==="[object Object]"}var af=new Map;function U5(r,e){e.browsers.selected.length!==0&&(e.add.selectors.length>0||Object.keys(e.add).length>2||r.warn(`Autoprefixer target browsers do not need any prefixes.You do not need Autoprefixer anymore. -Check your Browserslist config to be sure that your targets are set up correctly. - - Learn more at: - https://github.com/postcss/autoprefixer#readme - https://github.com/browserslist/browserslist#readme - -`))}hv.exports=Dr;function Dr(...r){let e;if(r.length===1&&z5(r[0])?(e=r[0],r=void 0):r.length===0||r.length===1&&!r[0]?r=void 0:r.length<=2&&(Array.isArray(r[0])||!r[0])?(e=r[1],r=r[0]):typeof r[r.length-1]=="object"&&(e=r.pop()),e||(e={}),e.browser)throw new Error("Change `browser` option to `overrideBrowserslist` in Autoprefixer");if(e.browserslist)throw new Error("Change `browserslist` option to `overrideBrowserslist` in Autoprefixer");e.overrideBrowserslist?r=e.overrideBrowserslist:e.browsers&&(typeof console!="undefined"&&console.warn&&(sf.red?console.warn(sf.red(dv.replace(/`[^`]+`/g,n=>sf.yellow(n.slice(1,-1))))):console.warn(dv)),r=e.browsers);let t={ignoreUnknownVersions:e.ignoreUnknownVersions,stats:e.stats,env:e.env};function i(n){let a=pv,s=new N5(a.browsers,r,n,t),o=s.selected.join(", ")+JSON.stringify(e);return af.has(o)||af.set(o,new B5(a.prefixes,s,e)),af.get(o)}return{postcssPlugin:"autoprefixer",prepare(n){let a=i({from:n.opts.from,env:e.env});return{OnceExit(s){U5(n,a),e.remove!==!1&&a.processor.remove(s,n),e.add!==!1&&a.processor.add(s,n)}}},info(n){return n=n||{},n.from=n.from||m.cwd(),j5(i(n))},options:e,browsers:r}}Dr.postcss=!0;Dr.data=pv;Dr.defaults=L5.defaults;Dr.info=()=>Dr().info()});var gv={};Ge(gv,{default:()=>V5});var V5,yv=R(()=>{u();V5=[]});var wv={};Ge(wv,{default:()=>H5});var bv,H5,vv=R(()=>{u();Yi();bv=pe(en()),H5=St(bv.default.theme)});var kv={};Ge(kv,{default:()=>W5});var xv,W5,Sv=R(()=>{u();Yi();xv=pe(en()),W5=St(xv.default)});u();"use strict";var G5=vt(Ay()),Q5=vt($e()),Y5=vt(mv()),K5=vt((yv(),gv)),X5=vt((vv(),wv)),J5=vt((Sv(),kv)),Z5=vt((zs(),Af)),eP=vt((nl(),il)),tP=vt((ia(),ic));function vt(r){return r&&r.__esModule?r:{default:r}}console.warn("cdn.tailwindcss.com should not be used in production. To use Tailwind CSS in production, install it as a PostCSS plugin or use the Tailwind CLI: https://tailwindcss.com/docs/installation");var Ls="tailwind",of="text/tailwindcss",Av="/template.html",Yt,Cv=!0,_v=0,lf=new Set,uf,Ev="",Ov=(r=!1)=>({get(e,t){return(!r||t==="config")&&typeof e[t]=="object"&&e[t]!==null?new Proxy(e[t],Ov()):e[t]},set(e,t,i){return e[t]=i,(!r||t==="config")&&ff(!0),!0}});window[Ls]=new Proxy({config:{},defaultTheme:X5.default,defaultConfig:J5.default,colors:Z5.default,plugin:eP.default,resolveConfig:tP.default},Ov(!0));function Tv(r){uf.observe(r,{attributes:!0,attributeFilter:["type"],characterData:!0,subtree:!0,childList:!0})}new MutationObserver(async r=>{let e=!1;if(!uf){uf=new MutationObserver(async()=>await ff(!0));for(let t of document.querySelectorAll(`style[type="${of}"]`))Tv(t)}for(let t of r)for(let i of t.addedNodes)i.nodeType===1&&i.tagName==="STYLE"&&i.getAttribute("type")===of&&(Tv(i),e=!0);await ff(e)}).observe(document.documentElement,{attributes:!0,attributeFilter:["class"],childList:!0,subtree:!0});async function ff(r=!1){r&&(_v++,lf.clear());let e="";for(let i of document.querySelectorAll(`style[type="${of}"]`))e+=i.textContent;let t=new Set;for(let i of document.querySelectorAll("[class]"))for(let n of i.classList)lf.has(n)||t.add(n);if(document.body&&(Cv||t.size>0||e!==Ev||!Yt||!Yt.isConnected)){for(let n of t)lf.add(n);Cv=!1,Ev=e,self[Av]=Array.from(t).join(" ");let{css:i}=await(0,Q5.default)([(0,G5.default)({...window[Ls].config,_hash:_v,content:{files:[Av],extract:{html:n=>n.split(" ")}},plugins:[...K5.default,...Array.isArray(window[Ls].config.plugins)?window[Ls].config.plugins:[]]}),(0,Y5.default)({remove:!1})]).process(`@tailwind base;@tailwind components;@tailwind utilities;${e}`);(!Yt||!Yt.isConnected)&&(Yt=document.createElement("style"),document.head.append(Yt)),Yt.textContent=i}}})(); -/*! - * fill-range - * - * Copyright (c) 2014-present, Jon Schlinkert. - * Licensed under the MIT License. - */ -/*! - * is-number - * - * Copyright (c) 2014-present, Jon Schlinkert. - * Released under the MIT License. - */ -/*! - * to-regex-range - * - * Copyright (c) 2015-present, Jon Schlinkert. - * Released under the MIT License. - */ -/*! https://mths.be/cssesc v3.0.0 by @mathias */ - diff --git a/examples/server/public/deps_vue.esm-browser.js b/examples/server/public/deps_vue.esm-browser.js deleted file mode 100644 index 4679d9614a765..0000000000000 --- a/examples/server/public/deps_vue.esm-browser.js +++ /dev/null @@ -1,18160 +0,0 @@ -/** -* vue v3.5.12 -* (c) 2018-present Yuxi (Evan) You and Vue contributors -* @license MIT -**/ -/*! #__NO_SIDE_EFFECTS__ */ -// @__NO_SIDE_EFFECTS__ -function makeMap(str) { - const map = /* @__PURE__ */ Object.create(null); - for (const key of str.split(",")) map[key] = 1; - return (val) => val in map; -} - -const EMPTY_OBJ = Object.freeze({}) ; -const EMPTY_ARR = Object.freeze([]) ; -const NOOP = () => { -}; -const NO = () => false; -const isOn = (key) => key.charCodeAt(0) === 111 && key.charCodeAt(1) === 110 && // uppercase letter -(key.charCodeAt(2) > 122 || key.charCodeAt(2) < 97); -const isModelListener = (key) => key.startsWith("onUpdate:"); -const extend = Object.assign; -const remove = (arr, el) => { - const i = arr.indexOf(el); - if (i > -1) { - arr.splice(i, 1); - } -}; -const hasOwnProperty$1 = Object.prototype.hasOwnProperty; -const hasOwn = (val, key) => hasOwnProperty$1.call(val, key); -const isArray = Array.isArray; -const isMap = (val) => toTypeString(val) === "[object Map]"; -const isSet = (val) => toTypeString(val) === "[object Set]"; -const isDate = (val) => toTypeString(val) === "[object Date]"; -const isRegExp = (val) => toTypeString(val) === "[object RegExp]"; -const isFunction = (val) => typeof val === "function"; -const isString = (val) => typeof val === "string"; -const isSymbol = (val) => typeof val === "symbol"; -const isObject = (val) => val !== null && typeof val === "object"; -const isPromise = (val) => { - return (isObject(val) || isFunction(val)) && isFunction(val.then) && isFunction(val.catch); -}; -const objectToString = Object.prototype.toString; -const toTypeString = (value) => objectToString.call(value); -const toRawType = (value) => { - return toTypeString(value).slice(8, -1); -}; -const isPlainObject = (val) => toTypeString(val) === "[object Object]"; -const isIntegerKey = (key) => isString(key) && key !== "NaN" && key[0] !== "-" && "" + parseInt(key, 10) === key; -const isReservedProp = /* @__PURE__ */ makeMap( - // the leading comma is intentional so empty string "" is also included - ",key,ref,ref_for,ref_key,onVnodeBeforeMount,onVnodeMounted,onVnodeBeforeUpdate,onVnodeUpdated,onVnodeBeforeUnmount,onVnodeUnmounted" -); -const isBuiltInDirective = /* @__PURE__ */ makeMap( - "bind,cloak,else-if,else,for,html,if,model,on,once,pre,show,slot,text,memo" -); -const cacheStringFunction = (fn) => { - const cache = /* @__PURE__ */ Object.create(null); - return (str) => { - const hit = cache[str]; - return hit || (cache[str] = fn(str)); - }; -}; -const camelizeRE = /-(\w)/g; -const camelize = cacheStringFunction( - (str) => { - return str.replace(camelizeRE, (_, c) => c ? c.toUpperCase() : ""); - } -); -const hyphenateRE = /\B([A-Z])/g; -const hyphenate = cacheStringFunction( - (str) => str.replace(hyphenateRE, "-$1").toLowerCase() -); -const capitalize = cacheStringFunction((str) => { - return str.charAt(0).toUpperCase() + str.slice(1); -}); -const toHandlerKey = cacheStringFunction( - (str) => { - const s = str ? `on${capitalize(str)}` : ``; - return s; - } -); -const hasChanged = (value, oldValue) => !Object.is(value, oldValue); -const invokeArrayFns = (fns, ...arg) => { - for (let i = 0; i < fns.length; i++) { - fns[i](...arg); - } -}; -const def = (obj, key, value, writable = false) => { - Object.defineProperty(obj, key, { - configurable: true, - enumerable: false, - writable, - value - }); -}; -const looseToNumber = (val) => { - const n = parseFloat(val); - return isNaN(n) ? val : n; -}; -const toNumber = (val) => { - const n = isString(val) ? Number(val) : NaN; - return isNaN(n) ? val : n; -}; -let _globalThis; -const getGlobalThis = () => { - return _globalThis || (_globalThis = typeof globalThis !== "undefined" ? globalThis : typeof self !== "undefined" ? self : typeof window !== "undefined" ? window : typeof global !== "undefined" ? global : {}); -}; -function genCacheKey(source, options) { - return source + JSON.stringify( - options, - (_, val) => typeof val === "function" ? val.toString() : val - ); -} - -const PatchFlagNames = { - [1]: `TEXT`, - [2]: `CLASS`, - [4]: `STYLE`, - [8]: `PROPS`, - [16]: `FULL_PROPS`, - [32]: `NEED_HYDRATION`, - [64]: `STABLE_FRAGMENT`, - [128]: `KEYED_FRAGMENT`, - [256]: `UNKEYED_FRAGMENT`, - [512]: `NEED_PATCH`, - [1024]: `DYNAMIC_SLOTS`, - [2048]: `DEV_ROOT_FRAGMENT`, - [-1]: `HOISTED`, - [-2]: `BAIL` -}; - -const slotFlagsText = { - [1]: "STABLE", - [2]: "DYNAMIC", - [3]: "FORWARDED" -}; - -const GLOBALS_ALLOWED = "Infinity,undefined,NaN,isFinite,isNaN,parseFloat,parseInt,decodeURI,decodeURIComponent,encodeURI,encodeURIComponent,Math,Number,Date,Array,Object,Boolean,String,RegExp,Map,Set,JSON,Intl,BigInt,console,Error,Symbol"; -const isGloballyAllowed = /* @__PURE__ */ makeMap(GLOBALS_ALLOWED); - -const range = 2; -function generateCodeFrame(source, start = 0, end = source.length) { - start = Math.max(0, Math.min(start, source.length)); - end = Math.max(0, Math.min(end, source.length)); - if (start > end) return ""; - let lines = source.split(/(\r?\n)/); - const newlineSequences = lines.filter((_, idx) => idx % 2 === 1); - lines = lines.filter((_, idx) => idx % 2 === 0); - let count = 0; - const res = []; - for (let i = 0; i < lines.length; i++) { - count += lines[i].length + (newlineSequences[i] && newlineSequences[i].length || 0); - if (count >= start) { - for (let j = i - range; j <= i + range || end > count; j++) { - if (j < 0 || j >= lines.length) continue; - const line = j + 1; - res.push( - `${line}${" ".repeat(Math.max(3 - String(line).length, 0))}| ${lines[j]}` - ); - const lineLength = lines[j].length; - const newLineSeqLength = newlineSequences[j] && newlineSequences[j].length || 0; - if (j === i) { - const pad = start - (count - (lineLength + newLineSeqLength)); - const length = Math.max( - 1, - end > count ? lineLength - pad : end - start - ); - res.push(` | ` + " ".repeat(pad) + "^".repeat(length)); - } else if (j > i) { - if (end > count) { - const length = Math.max(Math.min(end - count, lineLength), 1); - res.push(` | ` + "^".repeat(length)); - } - count += lineLength + newLineSeqLength; - } - } - break; - } - } - return res.join("\n"); -} - -function normalizeStyle(value) { - if (isArray(value)) { - const res = {}; - for (let i = 0; i < value.length; i++) { - const item = value[i]; - const normalized = isString(item) ? parseStringStyle(item) : normalizeStyle(item); - if (normalized) { - for (const key in normalized) { - res[key] = normalized[key]; - } - } - } - return res; - } else if (isString(value) || isObject(value)) { - return value; - } -} -const listDelimiterRE = /;(?![^(]*\))/g; -const propertyDelimiterRE = /:([^]+)/; -const styleCommentRE = /\/\*[^]*?\*\//g; -function parseStringStyle(cssText) { - const ret = {}; - cssText.replace(styleCommentRE, "").split(listDelimiterRE).forEach((item) => { - if (item) { - const tmp = item.split(propertyDelimiterRE); - tmp.length > 1 && (ret[tmp[0].trim()] = tmp[1].trim()); - } - }); - return ret; -} -function stringifyStyle(styles) { - let ret = ""; - if (!styles || isString(styles)) { - return ret; - } - for (const key in styles) { - const value = styles[key]; - if (isString(value) || typeof value === "number") { - const normalizedKey = key.startsWith(`--`) ? key : hyphenate(key); - ret += `${normalizedKey}:${value};`; - } - } - return ret; -} -function normalizeClass(value) { - let res = ""; - if (isString(value)) { - res = value; - } else if (isArray(value)) { - for (let i = 0; i < value.length; i++) { - const normalized = normalizeClass(value[i]); - if (normalized) { - res += normalized + " "; - } - } - } else if (isObject(value)) { - for (const name in value) { - if (value[name]) { - res += name + " "; - } - } - } - return res.trim(); -} -function normalizeProps(props) { - if (!props) return null; - let { class: klass, style } = props; - if (klass && !isString(klass)) { - props.class = normalizeClass(klass); - } - if (style) { - props.style = normalizeStyle(style); - } - return props; -} - -const HTML_TAGS = "html,body,base,head,link,meta,style,title,address,article,aside,footer,header,hgroup,h1,h2,h3,h4,h5,h6,nav,section,div,dd,dl,dt,figcaption,figure,picture,hr,img,li,main,ol,p,pre,ul,a,b,abbr,bdi,bdo,br,cite,code,data,dfn,em,i,kbd,mark,q,rp,rt,ruby,s,samp,small,span,strong,sub,sup,time,u,var,wbr,area,audio,map,track,video,embed,object,param,source,canvas,script,noscript,del,ins,caption,col,colgroup,table,thead,tbody,td,th,tr,button,datalist,fieldset,form,input,label,legend,meter,optgroup,option,output,progress,select,textarea,details,dialog,menu,summary,template,blockquote,iframe,tfoot"; -const SVG_TAGS = "svg,animate,animateMotion,animateTransform,circle,clipPath,color-profile,defs,desc,discard,ellipse,feBlend,feColorMatrix,feComponentTransfer,feComposite,feConvolveMatrix,feDiffuseLighting,feDisplacementMap,feDistantLight,feDropShadow,feFlood,feFuncA,feFuncB,feFuncG,feFuncR,feGaussianBlur,feImage,feMerge,feMergeNode,feMorphology,feOffset,fePointLight,feSpecularLighting,feSpotLight,feTile,feTurbulence,filter,foreignObject,g,hatch,hatchpath,image,line,linearGradient,marker,mask,mesh,meshgradient,meshpatch,meshrow,metadata,mpath,path,pattern,polygon,polyline,radialGradient,rect,set,solidcolor,stop,switch,symbol,text,textPath,title,tspan,unknown,use,view"; -const MATH_TAGS = "annotation,annotation-xml,maction,maligngroup,malignmark,math,menclose,merror,mfenced,mfrac,mfraction,mglyph,mi,mlabeledtr,mlongdiv,mmultiscripts,mn,mo,mover,mpadded,mphantom,mprescripts,mroot,mrow,ms,mscarries,mscarry,msgroup,msline,mspace,msqrt,msrow,mstack,mstyle,msub,msubsup,msup,mtable,mtd,mtext,mtr,munder,munderover,none,semantics"; -const VOID_TAGS = "area,base,br,col,embed,hr,img,input,link,meta,param,source,track,wbr"; -const isHTMLTag = /* @__PURE__ */ makeMap(HTML_TAGS); -const isSVGTag = /* @__PURE__ */ makeMap(SVG_TAGS); -const isMathMLTag = /* @__PURE__ */ makeMap(MATH_TAGS); -const isVoidTag = /* @__PURE__ */ makeMap(VOID_TAGS); - -const specialBooleanAttrs = `itemscope,allowfullscreen,formnovalidate,ismap,nomodule,novalidate,readonly`; -const isSpecialBooleanAttr = /* @__PURE__ */ makeMap(specialBooleanAttrs); -const isBooleanAttr = /* @__PURE__ */ makeMap( - specialBooleanAttrs + `,async,autofocus,autoplay,controls,default,defer,disabled,hidden,inert,loop,open,required,reversed,scoped,seamless,checked,muted,multiple,selected` -); -function includeBooleanAttr(value) { - return !!value || value === ""; -} -const isKnownHtmlAttr = /* @__PURE__ */ makeMap( - `accept,accept-charset,accesskey,action,align,allow,alt,async,autocapitalize,autocomplete,autofocus,autoplay,background,bgcolor,border,buffered,capture,challenge,charset,checked,cite,class,code,codebase,color,cols,colspan,content,contenteditable,contextmenu,controls,coords,crossorigin,csp,data,datetime,decoding,default,defer,dir,dirname,disabled,download,draggable,dropzone,enctype,enterkeyhint,for,form,formaction,formenctype,formmethod,formnovalidate,formtarget,headers,height,hidden,high,href,hreflang,http-equiv,icon,id,importance,inert,integrity,ismap,itemprop,keytype,kind,label,lang,language,loading,list,loop,low,manifest,max,maxlength,minlength,media,min,multiple,muted,name,novalidate,open,optimum,pattern,ping,placeholder,poster,preload,radiogroup,readonly,referrerpolicy,rel,required,reversed,rows,rowspan,sandbox,scope,scoped,selected,shape,size,sizes,slot,span,spellcheck,src,srcdoc,srclang,srcset,start,step,style,summary,tabindex,target,title,translate,type,usemap,value,width,wrap` -); -const isKnownSvgAttr = /* @__PURE__ */ makeMap( - `xmlns,accent-height,accumulate,additive,alignment-baseline,alphabetic,amplitude,arabic-form,ascent,attributeName,attributeType,azimuth,baseFrequency,baseline-shift,baseProfile,bbox,begin,bias,by,calcMode,cap-height,class,clip,clipPathUnits,clip-path,clip-rule,color,color-interpolation,color-interpolation-filters,color-profile,color-rendering,contentScriptType,contentStyleType,crossorigin,cursor,cx,cy,d,decelerate,descent,diffuseConstant,direction,display,divisor,dominant-baseline,dur,dx,dy,edgeMode,elevation,enable-background,end,exponent,fill,fill-opacity,fill-rule,filter,filterRes,filterUnits,flood-color,flood-opacity,font-family,font-size,font-size-adjust,font-stretch,font-style,font-variant,font-weight,format,from,fr,fx,fy,g1,g2,glyph-name,glyph-orientation-horizontal,glyph-orientation-vertical,glyphRef,gradientTransform,gradientUnits,hanging,height,href,hreflang,horiz-adv-x,horiz-origin-x,id,ideographic,image-rendering,in,in2,intercept,k,k1,k2,k3,k4,kernelMatrix,kernelUnitLength,kerning,keyPoints,keySplines,keyTimes,lang,lengthAdjust,letter-spacing,lighting-color,limitingConeAngle,local,marker-end,marker-mid,marker-start,markerHeight,markerUnits,markerWidth,mask,maskContentUnits,maskUnits,mathematical,max,media,method,min,mode,name,numOctaves,offset,opacity,operator,order,orient,orientation,origin,overflow,overline-position,overline-thickness,panose-1,paint-order,path,pathLength,patternContentUnits,patternTransform,patternUnits,ping,pointer-events,points,pointsAtX,pointsAtY,pointsAtZ,preserveAlpha,preserveAspectRatio,primitiveUnits,r,radius,referrerPolicy,refX,refY,rel,rendering-intent,repeatCount,repeatDur,requiredExtensions,requiredFeatures,restart,result,rotate,rx,ry,scale,seed,shape-rendering,slope,spacing,specularConstant,specularExponent,speed,spreadMethod,startOffset,stdDeviation,stemh,stemv,stitchTiles,stop-color,stop-opacity,strikethrough-position,strikethrough-thickness,string,stroke,stroke-dasharray,stroke-dashoffset,stroke-linecap,stroke-linejoin,stroke-miterlimit,stroke-opacity,stroke-width,style,surfaceScale,systemLanguage,tabindex,tableValues,target,targetX,targetY,text-anchor,text-decoration,text-rendering,textLength,to,transform,transform-origin,type,u1,u2,underline-position,underline-thickness,unicode,unicode-bidi,unicode-range,units-per-em,v-alphabetic,v-hanging,v-ideographic,v-mathematical,values,vector-effect,version,vert-adv-y,vert-origin-x,vert-origin-y,viewBox,viewTarget,visibility,width,widths,word-spacing,writing-mode,x,x-height,x1,x2,xChannelSelector,xlink:actuate,xlink:arcrole,xlink:href,xlink:role,xlink:show,xlink:title,xlink:type,xmlns:xlink,xml:base,xml:lang,xml:space,y,y1,y2,yChannelSelector,z,zoomAndPan` -); -function isRenderableAttrValue(value) { - if (value == null) { - return false; - } - const type = typeof value; - return type === "string" || type === "number" || type === "boolean"; -} - -const cssVarNameEscapeSymbolsRE = /[ !"#$%&'()*+,./:;<=>?@[\\\]^`{|}~]/g; -function getEscapedCssVarName(key, doubleEscape) { - return key.replace( - cssVarNameEscapeSymbolsRE, - (s) => `\\${s}` - ); -} - -function looseCompareArrays(a, b) { - if (a.length !== b.length) return false; - let equal = true; - for (let i = 0; equal && i < a.length; i++) { - equal = looseEqual(a[i], b[i]); - } - return equal; -} -function looseEqual(a, b) { - if (a === b) return true; - let aValidType = isDate(a); - let bValidType = isDate(b); - if (aValidType || bValidType) { - return aValidType && bValidType ? a.getTime() === b.getTime() : false; - } - aValidType = isSymbol(a); - bValidType = isSymbol(b); - if (aValidType || bValidType) { - return a === b; - } - aValidType = isArray(a); - bValidType = isArray(b); - if (aValidType || bValidType) { - return aValidType && bValidType ? looseCompareArrays(a, b) : false; - } - aValidType = isObject(a); - bValidType = isObject(b); - if (aValidType || bValidType) { - if (!aValidType || !bValidType) { - return false; - } - const aKeysCount = Object.keys(a).length; - const bKeysCount = Object.keys(b).length; - if (aKeysCount !== bKeysCount) { - return false; - } - for (const key in a) { - const aHasKey = a.hasOwnProperty(key); - const bHasKey = b.hasOwnProperty(key); - if (aHasKey && !bHasKey || !aHasKey && bHasKey || !looseEqual(a[key], b[key])) { - return false; - } - } - } - return String(a) === String(b); -} -function looseIndexOf(arr, val) { - return arr.findIndex((item) => looseEqual(item, val)); -} - -const isRef$1 = (val) => { - return !!(val && val["__v_isRef"] === true); -}; -const toDisplayString = (val) => { - return isString(val) ? val : val == null ? "" : isArray(val) || isObject(val) && (val.toString === objectToString || !isFunction(val.toString)) ? isRef$1(val) ? toDisplayString(val.value) : JSON.stringify(val, replacer, 2) : String(val); -}; -const replacer = (_key, val) => { - if (isRef$1(val)) { - return replacer(_key, val.value); - } else if (isMap(val)) { - return { - [`Map(${val.size})`]: [...val.entries()].reduce( - (entries, [key, val2], i) => { - entries[stringifySymbol(key, i) + " =>"] = val2; - return entries; - }, - {} - ) - }; - } else if (isSet(val)) { - return { - [`Set(${val.size})`]: [...val.values()].map((v) => stringifySymbol(v)) - }; - } else if (isSymbol(val)) { - return stringifySymbol(val); - } else if (isObject(val) && !isArray(val) && !isPlainObject(val)) { - return String(val); - } - return val; -}; -const stringifySymbol = (v, i = "") => { - var _a; - return ( - // Symbol.description in es2019+ so we need to cast here to pass - // the lib: es2016 check - isSymbol(v) ? `Symbol(${(_a = v.description) != null ? _a : i})` : v - ); -}; - -function warn$2(msg, ...args) { - console.warn(`[Vue warn] ${msg}`, ...args); -} - -let activeEffectScope; -class EffectScope { - constructor(detached = false) { - this.detached = detached; - /** - * @internal - */ - this._active = true; - /** - * @internal - */ - this.effects = []; - /** - * @internal - */ - this.cleanups = []; - this._isPaused = false; - this.parent = activeEffectScope; - if (!detached && activeEffectScope) { - this.index = (activeEffectScope.scopes || (activeEffectScope.scopes = [])).push( - this - ) - 1; - } - } - get active() { - return this._active; - } - pause() { - if (this._active) { - this._isPaused = true; - let i, l; - if (this.scopes) { - for (i = 0, l = this.scopes.length; i < l; i++) { - this.scopes[i].pause(); - } - } - for (i = 0, l = this.effects.length; i < l; i++) { - this.effects[i].pause(); - } - } - } - /** - * Resumes the effect scope, including all child scopes and effects. - */ - resume() { - if (this._active) { - if (this._isPaused) { - this._isPaused = false; - let i, l; - if (this.scopes) { - for (i = 0, l = this.scopes.length; i < l; i++) { - this.scopes[i].resume(); - } - } - for (i = 0, l = this.effects.length; i < l; i++) { - this.effects[i].resume(); - } - } - } - } - run(fn) { - if (this._active) { - const currentEffectScope = activeEffectScope; - try { - activeEffectScope = this; - return fn(); - } finally { - activeEffectScope = currentEffectScope; - } - } else { - warn$2(`cannot run an inactive effect scope.`); - } - } - /** - * This should only be called on non-detached scopes - * @internal - */ - on() { - activeEffectScope = this; - } - /** - * This should only be called on non-detached scopes - * @internal - */ - off() { - activeEffectScope = this.parent; - } - stop(fromParent) { - if (this._active) { - let i, l; - for (i = 0, l = this.effects.length; i < l; i++) { - this.effects[i].stop(); - } - for (i = 0, l = this.cleanups.length; i < l; i++) { - this.cleanups[i](); - } - if (this.scopes) { - for (i = 0, l = this.scopes.length; i < l; i++) { - this.scopes[i].stop(true); - } - } - if (!this.detached && this.parent && !fromParent) { - const last = this.parent.scopes.pop(); - if (last && last !== this) { - this.parent.scopes[this.index] = last; - last.index = this.index; - } - } - this.parent = void 0; - this._active = false; - } - } -} -function effectScope(detached) { - return new EffectScope(detached); -} -function getCurrentScope() { - return activeEffectScope; -} -function onScopeDispose(fn, failSilently = false) { - if (activeEffectScope) { - activeEffectScope.cleanups.push(fn); - } else if (!failSilently) { - warn$2( - `onScopeDispose() is called when there is no active effect scope to be associated with.` - ); - } -} - -let activeSub; -const pausedQueueEffects = /* @__PURE__ */ new WeakSet(); -class ReactiveEffect { - constructor(fn) { - this.fn = fn; - /** - * @internal - */ - this.deps = void 0; - /** - * @internal - */ - this.depsTail = void 0; - /** - * @internal - */ - this.flags = 1 | 4; - /** - * @internal - */ - this.next = void 0; - /** - * @internal - */ - this.cleanup = void 0; - this.scheduler = void 0; - if (activeEffectScope && activeEffectScope.active) { - activeEffectScope.effects.push(this); - } - } - pause() { - this.flags |= 64; - } - resume() { - if (this.flags & 64) { - this.flags &= ~64; - if (pausedQueueEffects.has(this)) { - pausedQueueEffects.delete(this); - this.trigger(); - } - } - } - /** - * @internal - */ - notify() { - if (this.flags & 2 && !(this.flags & 32)) { - return; - } - if (!(this.flags & 8)) { - batch(this); - } - } - run() { - if (!(this.flags & 1)) { - return this.fn(); - } - this.flags |= 2; - cleanupEffect(this); - prepareDeps(this); - const prevEffect = activeSub; - const prevShouldTrack = shouldTrack; - activeSub = this; - shouldTrack = true; - try { - return this.fn(); - } finally { - if (activeSub !== this) { - warn$2( - "Active effect was not restored correctly - this is likely a Vue internal bug." - ); - } - cleanupDeps(this); - activeSub = prevEffect; - shouldTrack = prevShouldTrack; - this.flags &= ~2; - } - } - stop() { - if (this.flags & 1) { - for (let link = this.deps; link; link = link.nextDep) { - removeSub(link); - } - this.deps = this.depsTail = void 0; - cleanupEffect(this); - this.onStop && this.onStop(); - this.flags &= ~1; - } - } - trigger() { - if (this.flags & 64) { - pausedQueueEffects.add(this); - } else if (this.scheduler) { - this.scheduler(); - } else { - this.runIfDirty(); - } - } - /** - * @internal - */ - runIfDirty() { - if (isDirty(this)) { - this.run(); - } - } - get dirty() { - return isDirty(this); - } -} -let batchDepth = 0; -let batchedSub; -let batchedComputed; -function batch(sub, isComputed = false) { - sub.flags |= 8; - if (isComputed) { - sub.next = batchedComputed; - batchedComputed = sub; - return; - } - sub.next = batchedSub; - batchedSub = sub; -} -function startBatch() { - batchDepth++; -} -function endBatch() { - if (--batchDepth > 0) { - return; - } - if (batchedComputed) { - let e = batchedComputed; - batchedComputed = void 0; - while (e) { - const next = e.next; - e.next = void 0; - e.flags &= ~8; - e = next; - } - } - let error; - while (batchedSub) { - let e = batchedSub; - batchedSub = void 0; - while (e) { - const next = e.next; - e.next = void 0; - e.flags &= ~8; - if (e.flags & 1) { - try { - ; - e.trigger(); - } catch (err) { - if (!error) error = err; - } - } - e = next; - } - } - if (error) throw error; -} -function prepareDeps(sub) { - for (let link = sub.deps; link; link = link.nextDep) { - link.version = -1; - link.prevActiveLink = link.dep.activeLink; - link.dep.activeLink = link; - } -} -function cleanupDeps(sub) { - let head; - let tail = sub.depsTail; - let link = tail; - while (link) { - const prev = link.prevDep; - if (link.version === -1) { - if (link === tail) tail = prev; - removeSub(link); - removeDep(link); - } else { - head = link; - } - link.dep.activeLink = link.prevActiveLink; - link.prevActiveLink = void 0; - link = prev; - } - sub.deps = head; - sub.depsTail = tail; -} -function isDirty(sub) { - for (let link = sub.deps; link; link = link.nextDep) { - if (link.dep.version !== link.version || link.dep.computed && (refreshComputed(link.dep.computed) || link.dep.version !== link.version)) { - return true; - } - } - if (sub._dirty) { - return true; - } - return false; -} -function refreshComputed(computed) { - if (computed.flags & 4 && !(computed.flags & 16)) { - return; - } - computed.flags &= ~16; - if (computed.globalVersion === globalVersion) { - return; - } - computed.globalVersion = globalVersion; - const dep = computed.dep; - computed.flags |= 2; - if (dep.version > 0 && !computed.isSSR && computed.deps && !isDirty(computed)) { - computed.flags &= ~2; - return; - } - const prevSub = activeSub; - const prevShouldTrack = shouldTrack; - activeSub = computed; - shouldTrack = true; - try { - prepareDeps(computed); - const value = computed.fn(computed._value); - if (dep.version === 0 || hasChanged(value, computed._value)) { - computed._value = value; - dep.version++; - } - } catch (err) { - dep.version++; - throw err; - } finally { - activeSub = prevSub; - shouldTrack = prevShouldTrack; - cleanupDeps(computed); - computed.flags &= ~2; - } -} -function removeSub(link, soft = false) { - const { dep, prevSub, nextSub } = link; - if (prevSub) { - prevSub.nextSub = nextSub; - link.prevSub = void 0; - } - if (nextSub) { - nextSub.prevSub = prevSub; - link.nextSub = void 0; - } - if (dep.subsHead === link) { - dep.subsHead = nextSub; - } - if (dep.subs === link) { - dep.subs = prevSub; - if (!prevSub && dep.computed) { - dep.computed.flags &= ~4; - for (let l = dep.computed.deps; l; l = l.nextDep) { - removeSub(l, true); - } - } - } - if (!soft && !--dep.sc && dep.map) { - dep.map.delete(dep.key); - } -} -function removeDep(link) { - const { prevDep, nextDep } = link; - if (prevDep) { - prevDep.nextDep = nextDep; - link.prevDep = void 0; - } - if (nextDep) { - nextDep.prevDep = prevDep; - link.nextDep = void 0; - } -} -function effect(fn, options) { - if (fn.effect instanceof ReactiveEffect) { - fn = fn.effect.fn; - } - const e = new ReactiveEffect(fn); - if (options) { - extend(e, options); - } - try { - e.run(); - } catch (err) { - e.stop(); - throw err; - } - const runner = e.run.bind(e); - runner.effect = e; - return runner; -} -function stop(runner) { - runner.effect.stop(); -} -let shouldTrack = true; -const trackStack = []; -function pauseTracking() { - trackStack.push(shouldTrack); - shouldTrack = false; -} -function resetTracking() { - const last = trackStack.pop(); - shouldTrack = last === void 0 ? true : last; -} -function cleanupEffect(e) { - const { cleanup } = e; - e.cleanup = void 0; - if (cleanup) { - const prevSub = activeSub; - activeSub = void 0; - try { - cleanup(); - } finally { - activeSub = prevSub; - } - } -} - -let globalVersion = 0; -class Link { - constructor(sub, dep) { - this.sub = sub; - this.dep = dep; - this.version = dep.version; - this.nextDep = this.prevDep = this.nextSub = this.prevSub = this.prevActiveLink = void 0; - } -} -class Dep { - constructor(computed) { - this.computed = computed; - this.version = 0; - /** - * Link between this dep and the current active effect - */ - this.activeLink = void 0; - /** - * Doubly linked list representing the subscribing effects (tail) - */ - this.subs = void 0; - /** - * For object property deps cleanup - */ - this.map = void 0; - this.key = void 0; - /** - * Subscriber counter - */ - this.sc = 0; - { - this.subsHead = void 0; - } - } - track(debugInfo) { - if (!activeSub || !shouldTrack || activeSub === this.computed) { - return; - } - let link = this.activeLink; - if (link === void 0 || link.sub !== activeSub) { - link = this.activeLink = new Link(activeSub, this); - if (!activeSub.deps) { - activeSub.deps = activeSub.depsTail = link; - } else { - link.prevDep = activeSub.depsTail; - activeSub.depsTail.nextDep = link; - activeSub.depsTail = link; - } - addSub(link); - } else if (link.version === -1) { - link.version = this.version; - if (link.nextDep) { - const next = link.nextDep; - next.prevDep = link.prevDep; - if (link.prevDep) { - link.prevDep.nextDep = next; - } - link.prevDep = activeSub.depsTail; - link.nextDep = void 0; - activeSub.depsTail.nextDep = link; - activeSub.depsTail = link; - if (activeSub.deps === link) { - activeSub.deps = next; - } - } - } - if (activeSub.onTrack) { - activeSub.onTrack( - extend( - { - effect: activeSub - }, - debugInfo - ) - ); - } - return link; - } - trigger(debugInfo) { - this.version++; - globalVersion++; - this.notify(debugInfo); - } - notify(debugInfo) { - startBatch(); - try { - if (true) { - for (let head = this.subsHead; head; head = head.nextSub) { - if (head.sub.onTrigger && !(head.sub.flags & 8)) { - head.sub.onTrigger( - extend( - { - effect: head.sub - }, - debugInfo - ) - ); - } - } - } - for (let link = this.subs; link; link = link.prevSub) { - if (link.sub.notify()) { - ; - link.sub.dep.notify(); - } - } - } finally { - endBatch(); - } - } -} -function addSub(link) { - link.dep.sc++; - if (link.sub.flags & 4) { - const computed = link.dep.computed; - if (computed && !link.dep.subs) { - computed.flags |= 4 | 16; - for (let l = computed.deps; l; l = l.nextDep) { - addSub(l); - } - } - const currentTail = link.dep.subs; - if (currentTail !== link) { - link.prevSub = currentTail; - if (currentTail) currentTail.nextSub = link; - } - if (link.dep.subsHead === void 0) { - link.dep.subsHead = link; - } - link.dep.subs = link; - } -} -const targetMap = /* @__PURE__ */ new WeakMap(); -const ITERATE_KEY = Symbol( - "Object iterate" -); -const MAP_KEY_ITERATE_KEY = Symbol( - "Map keys iterate" -); -const ARRAY_ITERATE_KEY = Symbol( - "Array iterate" -); -function track(target, type, key) { - if (shouldTrack && activeSub) { - let depsMap = targetMap.get(target); - if (!depsMap) { - targetMap.set(target, depsMap = /* @__PURE__ */ new Map()); - } - let dep = depsMap.get(key); - if (!dep) { - depsMap.set(key, dep = new Dep()); - dep.map = depsMap; - dep.key = key; - } - { - dep.track({ - target, - type, - key - }); - } - } -} -function trigger(target, type, key, newValue, oldValue, oldTarget) { - const depsMap = targetMap.get(target); - if (!depsMap) { - globalVersion++; - return; - } - const run = (dep) => { - if (dep) { - { - dep.trigger({ - target, - type, - key, - newValue, - oldValue, - oldTarget - }); - } - } - }; - startBatch(); - if (type === "clear") { - depsMap.forEach(run); - } else { - const targetIsArray = isArray(target); - const isArrayIndex = targetIsArray && isIntegerKey(key); - if (targetIsArray && key === "length") { - const newLength = Number(newValue); - depsMap.forEach((dep, key2) => { - if (key2 === "length" || key2 === ARRAY_ITERATE_KEY || !isSymbol(key2) && key2 >= newLength) { - run(dep); - } - }); - } else { - if (key !== void 0 || depsMap.has(void 0)) { - run(depsMap.get(key)); - } - if (isArrayIndex) { - run(depsMap.get(ARRAY_ITERATE_KEY)); - } - switch (type) { - case "add": - if (!targetIsArray) { - run(depsMap.get(ITERATE_KEY)); - if (isMap(target)) { - run(depsMap.get(MAP_KEY_ITERATE_KEY)); - } - } else if (isArrayIndex) { - run(depsMap.get("length")); - } - break; - case "delete": - if (!targetIsArray) { - run(depsMap.get(ITERATE_KEY)); - if (isMap(target)) { - run(depsMap.get(MAP_KEY_ITERATE_KEY)); - } - } - break; - case "set": - if (isMap(target)) { - run(depsMap.get(ITERATE_KEY)); - } - break; - } - } - } - endBatch(); -} -function getDepFromReactive(object, key) { - const depMap = targetMap.get(object); - return depMap && depMap.get(key); -} - -function reactiveReadArray(array) { - const raw = toRaw(array); - if (raw === array) return raw; - track(raw, "iterate", ARRAY_ITERATE_KEY); - return isShallow(array) ? raw : raw.map(toReactive); -} -function shallowReadArray(arr) { - track(arr = toRaw(arr), "iterate", ARRAY_ITERATE_KEY); - return arr; -} -const arrayInstrumentations = { - __proto__: null, - [Symbol.iterator]() { - return iterator(this, Symbol.iterator, toReactive); - }, - concat(...args) { - return reactiveReadArray(this).concat( - ...args.map((x) => isArray(x) ? reactiveReadArray(x) : x) - ); - }, - entries() { - return iterator(this, "entries", (value) => { - value[1] = toReactive(value[1]); - return value; - }); - }, - every(fn, thisArg) { - return apply(this, "every", fn, thisArg, void 0, arguments); - }, - filter(fn, thisArg) { - return apply(this, "filter", fn, thisArg, (v) => v.map(toReactive), arguments); - }, - find(fn, thisArg) { - return apply(this, "find", fn, thisArg, toReactive, arguments); - }, - findIndex(fn, thisArg) { - return apply(this, "findIndex", fn, thisArg, void 0, arguments); - }, - findLast(fn, thisArg) { - return apply(this, "findLast", fn, thisArg, toReactive, arguments); - }, - findLastIndex(fn, thisArg) { - return apply(this, "findLastIndex", fn, thisArg, void 0, arguments); - }, - // flat, flatMap could benefit from ARRAY_ITERATE but are not straight-forward to implement - forEach(fn, thisArg) { - return apply(this, "forEach", fn, thisArg, void 0, arguments); - }, - includes(...args) { - return searchProxy(this, "includes", args); - }, - indexOf(...args) { - return searchProxy(this, "indexOf", args); - }, - join(separator) { - return reactiveReadArray(this).join(separator); - }, - // keys() iterator only reads `length`, no optimisation required - lastIndexOf(...args) { - return searchProxy(this, "lastIndexOf", args); - }, - map(fn, thisArg) { - return apply(this, "map", fn, thisArg, void 0, arguments); - }, - pop() { - return noTracking(this, "pop"); - }, - push(...args) { - return noTracking(this, "push", args); - }, - reduce(fn, ...args) { - return reduce(this, "reduce", fn, args); - }, - reduceRight(fn, ...args) { - return reduce(this, "reduceRight", fn, args); - }, - shift() { - return noTracking(this, "shift"); - }, - // slice could use ARRAY_ITERATE but also seems to beg for range tracking - some(fn, thisArg) { - return apply(this, "some", fn, thisArg, void 0, arguments); - }, - splice(...args) { - return noTracking(this, "splice", args); - }, - toReversed() { - return reactiveReadArray(this).toReversed(); - }, - toSorted(comparer) { - return reactiveReadArray(this).toSorted(comparer); - }, - toSpliced(...args) { - return reactiveReadArray(this).toSpliced(...args); - }, - unshift(...args) { - return noTracking(this, "unshift", args); - }, - values() { - return iterator(this, "values", toReactive); - } -}; -function iterator(self, method, wrapValue) { - const arr = shallowReadArray(self); - const iter = arr[method](); - if (arr !== self && !isShallow(self)) { - iter._next = iter.next; - iter.next = () => { - const result = iter._next(); - if (result.value) { - result.value = wrapValue(result.value); - } - return result; - }; - } - return iter; -} -const arrayProto = Array.prototype; -function apply(self, method, fn, thisArg, wrappedRetFn, args) { - const arr = shallowReadArray(self); - const needsWrap = arr !== self && !isShallow(self); - const methodFn = arr[method]; - if (methodFn !== arrayProto[method]) { - const result2 = methodFn.apply(self, args); - return needsWrap ? toReactive(result2) : result2; - } - let wrappedFn = fn; - if (arr !== self) { - if (needsWrap) { - wrappedFn = function(item, index) { - return fn.call(this, toReactive(item), index, self); - }; - } else if (fn.length > 2) { - wrappedFn = function(item, index) { - return fn.call(this, item, index, self); - }; - } - } - const result = methodFn.call(arr, wrappedFn, thisArg); - return needsWrap && wrappedRetFn ? wrappedRetFn(result) : result; -} -function reduce(self, method, fn, args) { - const arr = shallowReadArray(self); - let wrappedFn = fn; - if (arr !== self) { - if (!isShallow(self)) { - wrappedFn = function(acc, item, index) { - return fn.call(this, acc, toReactive(item), index, self); - }; - } else if (fn.length > 3) { - wrappedFn = function(acc, item, index) { - return fn.call(this, acc, item, index, self); - }; - } - } - return arr[method](wrappedFn, ...args); -} -function searchProxy(self, method, args) { - const arr = toRaw(self); - track(arr, "iterate", ARRAY_ITERATE_KEY); - const res = arr[method](...args); - if ((res === -1 || res === false) && isProxy(args[0])) { - args[0] = toRaw(args[0]); - return arr[method](...args); - } - return res; -} -function noTracking(self, method, args = []) { - pauseTracking(); - startBatch(); - const res = toRaw(self)[method].apply(self, args); - endBatch(); - resetTracking(); - return res; -} - -const isNonTrackableKeys = /* @__PURE__ */ makeMap(`__proto__,__v_isRef,__isVue`); -const builtInSymbols = new Set( - /* @__PURE__ */ Object.getOwnPropertyNames(Symbol).filter((key) => key !== "arguments" && key !== "caller").map((key) => Symbol[key]).filter(isSymbol) -); -function hasOwnProperty(key) { - if (!isSymbol(key)) key = String(key); - const obj = toRaw(this); - track(obj, "has", key); - return obj.hasOwnProperty(key); -} -class BaseReactiveHandler { - constructor(_isReadonly = false, _isShallow = false) { - this._isReadonly = _isReadonly; - this._isShallow = _isShallow; - } - get(target, key, receiver) { - const isReadonly2 = this._isReadonly, isShallow2 = this._isShallow; - if (key === "__v_isReactive") { - return !isReadonly2; - } else if (key === "__v_isReadonly") { - return isReadonly2; - } else if (key === "__v_isShallow") { - return isShallow2; - } else if (key === "__v_raw") { - if (receiver === (isReadonly2 ? isShallow2 ? shallowReadonlyMap : readonlyMap : isShallow2 ? shallowReactiveMap : reactiveMap).get(target) || // receiver is not the reactive proxy, but has the same prototype - // this means the receiver is a user proxy of the reactive proxy - Object.getPrototypeOf(target) === Object.getPrototypeOf(receiver)) { - return target; - } - return; - } - const targetIsArray = isArray(target); - if (!isReadonly2) { - let fn; - if (targetIsArray && (fn = arrayInstrumentations[key])) { - return fn; - } - if (key === "hasOwnProperty") { - return hasOwnProperty; - } - } - const res = Reflect.get( - target, - key, - // if this is a proxy wrapping a ref, return methods using the raw ref - // as receiver so that we don't have to call `toRaw` on the ref in all - // its class methods - isRef(target) ? target : receiver - ); - if (isSymbol(key) ? builtInSymbols.has(key) : isNonTrackableKeys(key)) { - return res; - } - if (!isReadonly2) { - track(target, "get", key); - } - if (isShallow2) { - return res; - } - if (isRef(res)) { - return targetIsArray && isIntegerKey(key) ? res : res.value; - } - if (isObject(res)) { - return isReadonly2 ? readonly(res) : reactive(res); - } - return res; - } -} -class MutableReactiveHandler extends BaseReactiveHandler { - constructor(isShallow2 = false) { - super(false, isShallow2); - } - set(target, key, value, receiver) { - let oldValue = target[key]; - if (!this._isShallow) { - const isOldValueReadonly = isReadonly(oldValue); - if (!isShallow(value) && !isReadonly(value)) { - oldValue = toRaw(oldValue); - value = toRaw(value); - } - if (!isArray(target) && isRef(oldValue) && !isRef(value)) { - if (isOldValueReadonly) { - return false; - } else { - oldValue.value = value; - return true; - } - } - } - const hadKey = isArray(target) && isIntegerKey(key) ? Number(key) < target.length : hasOwn(target, key); - const result = Reflect.set( - target, - key, - value, - isRef(target) ? target : receiver - ); - if (target === toRaw(receiver)) { - if (!hadKey) { - trigger(target, "add", key, value); - } else if (hasChanged(value, oldValue)) { - trigger(target, "set", key, value, oldValue); - } - } - return result; - } - deleteProperty(target, key) { - const hadKey = hasOwn(target, key); - const oldValue = target[key]; - const result = Reflect.deleteProperty(target, key); - if (result && hadKey) { - trigger(target, "delete", key, void 0, oldValue); - } - return result; - } - has(target, key) { - const result = Reflect.has(target, key); - if (!isSymbol(key) || !builtInSymbols.has(key)) { - track(target, "has", key); - } - return result; - } - ownKeys(target) { - track( - target, - "iterate", - isArray(target) ? "length" : ITERATE_KEY - ); - return Reflect.ownKeys(target); - } -} -class ReadonlyReactiveHandler extends BaseReactiveHandler { - constructor(isShallow2 = false) { - super(true, isShallow2); - } - set(target, key) { - { - warn$2( - `Set operation on key "${String(key)}" failed: target is readonly.`, - target - ); - } - return true; - } - deleteProperty(target, key) { - { - warn$2( - `Delete operation on key "${String(key)}" failed: target is readonly.`, - target - ); - } - return true; - } -} -const mutableHandlers = /* @__PURE__ */ new MutableReactiveHandler(); -const readonlyHandlers = /* @__PURE__ */ new ReadonlyReactiveHandler(); -const shallowReactiveHandlers = /* @__PURE__ */ new MutableReactiveHandler(true); -const shallowReadonlyHandlers = /* @__PURE__ */ new ReadonlyReactiveHandler(true); - -const toShallow = (value) => value; -const getProto = (v) => Reflect.getPrototypeOf(v); -function createIterableMethod(method, isReadonly2, isShallow2) { - return function(...args) { - const target = this["__v_raw"]; - const rawTarget = toRaw(target); - const targetIsMap = isMap(rawTarget); - const isPair = method === "entries" || method === Symbol.iterator && targetIsMap; - const isKeyOnly = method === "keys" && targetIsMap; - const innerIterator = target[method](...args); - const wrap = isShallow2 ? toShallow : isReadonly2 ? toReadonly : toReactive; - !isReadonly2 && track( - rawTarget, - "iterate", - isKeyOnly ? MAP_KEY_ITERATE_KEY : ITERATE_KEY - ); - return { - // iterator protocol - next() { - const { value, done } = innerIterator.next(); - return done ? { value, done } : { - value: isPair ? [wrap(value[0]), wrap(value[1])] : wrap(value), - done - }; - }, - // iterable protocol - [Symbol.iterator]() { - return this; - } - }; - }; -} -function createReadonlyMethod(type) { - return function(...args) { - { - const key = args[0] ? `on key "${args[0]}" ` : ``; - warn$2( - `${capitalize(type)} operation ${key}failed: target is readonly.`, - toRaw(this) - ); - } - return type === "delete" ? false : type === "clear" ? void 0 : this; - }; -} -function createInstrumentations(readonly, shallow) { - const instrumentations = { - get(key) { - const target = this["__v_raw"]; - const rawTarget = toRaw(target); - const rawKey = toRaw(key); - if (!readonly) { - if (hasChanged(key, rawKey)) { - track(rawTarget, "get", key); - } - track(rawTarget, "get", rawKey); - } - const { has } = getProto(rawTarget); - const wrap = shallow ? toShallow : readonly ? toReadonly : toReactive; - if (has.call(rawTarget, key)) { - return wrap(target.get(key)); - } else if (has.call(rawTarget, rawKey)) { - return wrap(target.get(rawKey)); - } else if (target !== rawTarget) { - target.get(key); - } - }, - get size() { - const target = this["__v_raw"]; - !readonly && track(toRaw(target), "iterate", ITERATE_KEY); - return Reflect.get(target, "size", target); - }, - has(key) { - const target = this["__v_raw"]; - const rawTarget = toRaw(target); - const rawKey = toRaw(key); - if (!readonly) { - if (hasChanged(key, rawKey)) { - track(rawTarget, "has", key); - } - track(rawTarget, "has", rawKey); - } - return key === rawKey ? target.has(key) : target.has(key) || target.has(rawKey); - }, - forEach(callback, thisArg) { - const observed = this; - const target = observed["__v_raw"]; - const rawTarget = toRaw(target); - const wrap = shallow ? toShallow : readonly ? toReadonly : toReactive; - !readonly && track(rawTarget, "iterate", ITERATE_KEY); - return target.forEach((value, key) => { - return callback.call(thisArg, wrap(value), wrap(key), observed); - }); - } - }; - extend( - instrumentations, - readonly ? { - add: createReadonlyMethod("add"), - set: createReadonlyMethod("set"), - delete: createReadonlyMethod("delete"), - clear: createReadonlyMethod("clear") - } : { - add(value) { - if (!shallow && !isShallow(value) && !isReadonly(value)) { - value = toRaw(value); - } - const target = toRaw(this); - const proto = getProto(target); - const hadKey = proto.has.call(target, value); - if (!hadKey) { - target.add(value); - trigger(target, "add", value, value); - } - return this; - }, - set(key, value) { - if (!shallow && !isShallow(value) && !isReadonly(value)) { - value = toRaw(value); - } - const target = toRaw(this); - const { has, get } = getProto(target); - let hadKey = has.call(target, key); - if (!hadKey) { - key = toRaw(key); - hadKey = has.call(target, key); - } else { - checkIdentityKeys(target, has, key); - } - const oldValue = get.call(target, key); - target.set(key, value); - if (!hadKey) { - trigger(target, "add", key, value); - } else if (hasChanged(value, oldValue)) { - trigger(target, "set", key, value, oldValue); - } - return this; - }, - delete(key) { - const target = toRaw(this); - const { has, get } = getProto(target); - let hadKey = has.call(target, key); - if (!hadKey) { - key = toRaw(key); - hadKey = has.call(target, key); - } else { - checkIdentityKeys(target, has, key); - } - const oldValue = get ? get.call(target, key) : void 0; - const result = target.delete(key); - if (hadKey) { - trigger(target, "delete", key, void 0, oldValue); - } - return result; - }, - clear() { - const target = toRaw(this); - const hadItems = target.size !== 0; - const oldTarget = isMap(target) ? new Map(target) : new Set(target) ; - const result = target.clear(); - if (hadItems) { - trigger( - target, - "clear", - void 0, - void 0, - oldTarget - ); - } - return result; - } - } - ); - const iteratorMethods = [ - "keys", - "values", - "entries", - Symbol.iterator - ]; - iteratorMethods.forEach((method) => { - instrumentations[method] = createIterableMethod(method, readonly, shallow); - }); - return instrumentations; -} -function createInstrumentationGetter(isReadonly2, shallow) { - const instrumentations = createInstrumentations(isReadonly2, shallow); - return (target, key, receiver) => { - if (key === "__v_isReactive") { - return !isReadonly2; - } else if (key === "__v_isReadonly") { - return isReadonly2; - } else if (key === "__v_raw") { - return target; - } - return Reflect.get( - hasOwn(instrumentations, key) && key in target ? instrumentations : target, - key, - receiver - ); - }; -} -const mutableCollectionHandlers = { - get: /* @__PURE__ */ createInstrumentationGetter(false, false) -}; -const shallowCollectionHandlers = { - get: /* @__PURE__ */ createInstrumentationGetter(false, true) -}; -const readonlyCollectionHandlers = { - get: /* @__PURE__ */ createInstrumentationGetter(true, false) -}; -const shallowReadonlyCollectionHandlers = { - get: /* @__PURE__ */ createInstrumentationGetter(true, true) -}; -function checkIdentityKeys(target, has, key) { - const rawKey = toRaw(key); - if (rawKey !== key && has.call(target, rawKey)) { - const type = toRawType(target); - warn$2( - `Reactive ${type} contains both the raw and reactive versions of the same object${type === `Map` ? ` as keys` : ``}, which can lead to inconsistencies. Avoid differentiating between the raw and reactive versions of an object and only use the reactive version if possible.` - ); - } -} - -const reactiveMap = /* @__PURE__ */ new WeakMap(); -const shallowReactiveMap = /* @__PURE__ */ new WeakMap(); -const readonlyMap = /* @__PURE__ */ new WeakMap(); -const shallowReadonlyMap = /* @__PURE__ */ new WeakMap(); -function targetTypeMap(rawType) { - switch (rawType) { - case "Object": - case "Array": - return 1 /* COMMON */; - case "Map": - case "Set": - case "WeakMap": - case "WeakSet": - return 2 /* COLLECTION */; - default: - return 0 /* INVALID */; - } -} -function getTargetType(value) { - return value["__v_skip"] || !Object.isExtensible(value) ? 0 /* INVALID */ : targetTypeMap(toRawType(value)); -} -function reactive(target) { - if (isReadonly(target)) { - return target; - } - return createReactiveObject( - target, - false, - mutableHandlers, - mutableCollectionHandlers, - reactiveMap - ); -} -function shallowReactive(target) { - return createReactiveObject( - target, - false, - shallowReactiveHandlers, - shallowCollectionHandlers, - shallowReactiveMap - ); -} -function readonly(target) { - return createReactiveObject( - target, - true, - readonlyHandlers, - readonlyCollectionHandlers, - readonlyMap - ); -} -function shallowReadonly(target) { - return createReactiveObject( - target, - true, - shallowReadonlyHandlers, - shallowReadonlyCollectionHandlers, - shallowReadonlyMap - ); -} -function createReactiveObject(target, isReadonly2, baseHandlers, collectionHandlers, proxyMap) { - if (!isObject(target)) { - { - warn$2( - `value cannot be made ${isReadonly2 ? "readonly" : "reactive"}: ${String( - target - )}` - ); - } - return target; - } - if (target["__v_raw"] && !(isReadonly2 && target["__v_isReactive"])) { - return target; - } - const existingProxy = proxyMap.get(target); - if (existingProxy) { - return existingProxy; - } - const targetType = getTargetType(target); - if (targetType === 0 /* INVALID */) { - return target; - } - const proxy = new Proxy( - target, - targetType === 2 /* COLLECTION */ ? collectionHandlers : baseHandlers - ); - proxyMap.set(target, proxy); - return proxy; -} -function isReactive(value) { - if (isReadonly(value)) { - return isReactive(value["__v_raw"]); - } - return !!(value && value["__v_isReactive"]); -} -function isReadonly(value) { - return !!(value && value["__v_isReadonly"]); -} -function isShallow(value) { - return !!(value && value["__v_isShallow"]); -} -function isProxy(value) { - return value ? !!value["__v_raw"] : false; -} -function toRaw(observed) { - const raw = observed && observed["__v_raw"]; - return raw ? toRaw(raw) : observed; -} -function markRaw(value) { - if (!hasOwn(value, "__v_skip") && Object.isExtensible(value)) { - def(value, "__v_skip", true); - } - return value; -} -const toReactive = (value) => isObject(value) ? reactive(value) : value; -const toReadonly = (value) => isObject(value) ? readonly(value) : value; - -function isRef(r) { - return r ? r["__v_isRef"] === true : false; -} -function ref(value) { - return createRef(value, false); -} -function shallowRef(value) { - return createRef(value, true); -} -function createRef(rawValue, shallow) { - if (isRef(rawValue)) { - return rawValue; - } - return new RefImpl(rawValue, shallow); -} -class RefImpl { - constructor(value, isShallow2) { - this.dep = new Dep(); - this["__v_isRef"] = true; - this["__v_isShallow"] = false; - this._rawValue = isShallow2 ? value : toRaw(value); - this._value = isShallow2 ? value : toReactive(value); - this["__v_isShallow"] = isShallow2; - } - get value() { - { - this.dep.track({ - target: this, - type: "get", - key: "value" - }); - } - return this._value; - } - set value(newValue) { - const oldValue = this._rawValue; - const useDirectValue = this["__v_isShallow"] || isShallow(newValue) || isReadonly(newValue); - newValue = useDirectValue ? newValue : toRaw(newValue); - if (hasChanged(newValue, oldValue)) { - this._rawValue = newValue; - this._value = useDirectValue ? newValue : toReactive(newValue); - { - this.dep.trigger({ - target: this, - type: "set", - key: "value", - newValue, - oldValue - }); - } - } - } -} -function triggerRef(ref2) { - if (ref2.dep) { - { - ref2.dep.trigger({ - target: ref2, - type: "set", - key: "value", - newValue: ref2._value - }); - } - } -} -function unref(ref2) { - return isRef(ref2) ? ref2.value : ref2; -} -function toValue(source) { - return isFunction(source) ? source() : unref(source); -} -const shallowUnwrapHandlers = { - get: (target, key, receiver) => key === "__v_raw" ? target : unref(Reflect.get(target, key, receiver)), - set: (target, key, value, receiver) => { - const oldValue = target[key]; - if (isRef(oldValue) && !isRef(value)) { - oldValue.value = value; - return true; - } else { - return Reflect.set(target, key, value, receiver); - } - } -}; -function proxyRefs(objectWithRefs) { - return isReactive(objectWithRefs) ? objectWithRefs : new Proxy(objectWithRefs, shallowUnwrapHandlers); -} -class CustomRefImpl { - constructor(factory) { - this["__v_isRef"] = true; - this._value = void 0; - const dep = this.dep = new Dep(); - const { get, set } = factory(dep.track.bind(dep), dep.trigger.bind(dep)); - this._get = get; - this._set = set; - } - get value() { - return this._value = this._get(); - } - set value(newVal) { - this._set(newVal); - } -} -function customRef(factory) { - return new CustomRefImpl(factory); -} -function toRefs(object) { - if (!isProxy(object)) { - warn$2(`toRefs() expects a reactive object but received a plain one.`); - } - const ret = isArray(object) ? new Array(object.length) : {}; - for (const key in object) { - ret[key] = propertyToRef(object, key); - } - return ret; -} -class ObjectRefImpl { - constructor(_object, _key, _defaultValue) { - this._object = _object; - this._key = _key; - this._defaultValue = _defaultValue; - this["__v_isRef"] = true; - this._value = void 0; - } - get value() { - const val = this._object[this._key]; - return this._value = val === void 0 ? this._defaultValue : val; - } - set value(newVal) { - this._object[this._key] = newVal; - } - get dep() { - return getDepFromReactive(toRaw(this._object), this._key); - } -} -class GetterRefImpl { - constructor(_getter) { - this._getter = _getter; - this["__v_isRef"] = true; - this["__v_isReadonly"] = true; - this._value = void 0; - } - get value() { - return this._value = this._getter(); - } -} -function toRef(source, key, defaultValue) { - if (isRef(source)) { - return source; - } else if (isFunction(source)) { - return new GetterRefImpl(source); - } else if (isObject(source) && arguments.length > 1) { - return propertyToRef(source, key, defaultValue); - } else { - return ref(source); - } -} -function propertyToRef(source, key, defaultValue) { - const val = source[key]; - return isRef(val) ? val : new ObjectRefImpl(source, key, defaultValue); -} - -class ComputedRefImpl { - constructor(fn, setter, isSSR) { - this.fn = fn; - this.setter = setter; - /** - * @internal - */ - this._value = void 0; - /** - * @internal - */ - this.dep = new Dep(this); - /** - * @internal - */ - this.__v_isRef = true; - // TODO isolatedDeclarations "__v_isReadonly" - // A computed is also a subscriber that tracks other deps - /** - * @internal - */ - this.deps = void 0; - /** - * @internal - */ - this.depsTail = void 0; - /** - * @internal - */ - this.flags = 16; - /** - * @internal - */ - this.globalVersion = globalVersion - 1; - /** - * @internal - */ - this.next = void 0; - // for backwards compat - this.effect = this; - this["__v_isReadonly"] = !setter; - this.isSSR = isSSR; - } - /** - * @internal - */ - notify() { - this.flags |= 16; - if (!(this.flags & 8) && // avoid infinite self recursion - activeSub !== this) { - batch(this, true); - return true; - } - } - get value() { - const link = this.dep.track({ - target: this, - type: "get", - key: "value" - }) ; - refreshComputed(this); - if (link) { - link.version = this.dep.version; - } - return this._value; - } - set value(newValue) { - if (this.setter) { - this.setter(newValue); - } else { - warn$2("Write operation failed: computed value is readonly"); - } - } -} -function computed$1(getterOrOptions, debugOptions, isSSR = false) { - let getter; - let setter; - if (isFunction(getterOrOptions)) { - getter = getterOrOptions; - } else { - getter = getterOrOptions.get; - setter = getterOrOptions.set; - } - const cRef = new ComputedRefImpl(getter, setter, isSSR); - if (debugOptions && !isSSR) { - cRef.onTrack = debugOptions.onTrack; - cRef.onTrigger = debugOptions.onTrigger; - } - return cRef; -} - -const TrackOpTypes = { - "GET": "get", - "HAS": "has", - "ITERATE": "iterate" -}; -const TriggerOpTypes = { - "SET": "set", - "ADD": "add", - "DELETE": "delete", - "CLEAR": "clear" -}; - -const INITIAL_WATCHER_VALUE = {}; -const cleanupMap = /* @__PURE__ */ new WeakMap(); -let activeWatcher = void 0; -function getCurrentWatcher() { - return activeWatcher; -} -function onWatcherCleanup(cleanupFn, failSilently = false, owner = activeWatcher) { - if (owner) { - let cleanups = cleanupMap.get(owner); - if (!cleanups) cleanupMap.set(owner, cleanups = []); - cleanups.push(cleanupFn); - } else if (!failSilently) { - warn$2( - `onWatcherCleanup() was called when there was no active watcher to associate with.` - ); - } -} -function watch$1(source, cb, options = EMPTY_OBJ) { - const { immediate, deep, once, scheduler, augmentJob, call } = options; - const warnInvalidSource = (s) => { - (options.onWarn || warn$2)( - `Invalid watch source: `, - s, - `A watch source can only be a getter/effect function, a ref, a reactive object, or an array of these types.` - ); - }; - const reactiveGetter = (source2) => { - if (deep) return source2; - if (isShallow(source2) || deep === false || deep === 0) - return traverse(source2, 1); - return traverse(source2); - }; - let effect; - let getter; - let cleanup; - let boundCleanup; - let forceTrigger = false; - let isMultiSource = false; - if (isRef(source)) { - getter = () => source.value; - forceTrigger = isShallow(source); - } else if (isReactive(source)) { - getter = () => reactiveGetter(source); - forceTrigger = true; - } else if (isArray(source)) { - isMultiSource = true; - forceTrigger = source.some((s) => isReactive(s) || isShallow(s)); - getter = () => source.map((s) => { - if (isRef(s)) { - return s.value; - } else if (isReactive(s)) { - return reactiveGetter(s); - } else if (isFunction(s)) { - return call ? call(s, 2) : s(); - } else { - warnInvalidSource(s); - } - }); - } else if (isFunction(source)) { - if (cb) { - getter = call ? () => call(source, 2) : source; - } else { - getter = () => { - if (cleanup) { - pauseTracking(); - try { - cleanup(); - } finally { - resetTracking(); - } - } - const currentEffect = activeWatcher; - activeWatcher = effect; - try { - return call ? call(source, 3, [boundCleanup]) : source(boundCleanup); - } finally { - activeWatcher = currentEffect; - } - }; - } - } else { - getter = NOOP; - warnInvalidSource(source); - } - if (cb && deep) { - const baseGetter = getter; - const depth = deep === true ? Infinity : deep; - getter = () => traverse(baseGetter(), depth); - } - const scope = getCurrentScope(); - const watchHandle = () => { - effect.stop(); - if (scope) { - remove(scope.effects, effect); - } - }; - if (once && cb) { - const _cb = cb; - cb = (...args) => { - _cb(...args); - watchHandle(); - }; - } - let oldValue = isMultiSource ? new Array(source.length).fill(INITIAL_WATCHER_VALUE) : INITIAL_WATCHER_VALUE; - const job = (immediateFirstRun) => { - if (!(effect.flags & 1) || !effect.dirty && !immediateFirstRun) { - return; - } - if (cb) { - const newValue = effect.run(); - if (deep || forceTrigger || (isMultiSource ? newValue.some((v, i) => hasChanged(v, oldValue[i])) : hasChanged(newValue, oldValue))) { - if (cleanup) { - cleanup(); - } - const currentWatcher = activeWatcher; - activeWatcher = effect; - try { - const args = [ - newValue, - // pass undefined as the old value when it's changed for the first time - oldValue === INITIAL_WATCHER_VALUE ? void 0 : isMultiSource && oldValue[0] === INITIAL_WATCHER_VALUE ? [] : oldValue, - boundCleanup - ]; - call ? call(cb, 3, args) : ( - // @ts-expect-error - cb(...args) - ); - oldValue = newValue; - } finally { - activeWatcher = currentWatcher; - } - } - } else { - effect.run(); - } - }; - if (augmentJob) { - augmentJob(job); - } - effect = new ReactiveEffect(getter); - effect.scheduler = scheduler ? () => scheduler(job, false) : job; - boundCleanup = (fn) => onWatcherCleanup(fn, false, effect); - cleanup = effect.onStop = () => { - const cleanups = cleanupMap.get(effect); - if (cleanups) { - if (call) { - call(cleanups, 4); - } else { - for (const cleanup2 of cleanups) cleanup2(); - } - cleanupMap.delete(effect); - } - }; - { - effect.onTrack = options.onTrack; - effect.onTrigger = options.onTrigger; - } - if (cb) { - if (immediate) { - job(true); - } else { - oldValue = effect.run(); - } - } else if (scheduler) { - scheduler(job.bind(null, true), true); - } else { - effect.run(); - } - watchHandle.pause = effect.pause.bind(effect); - watchHandle.resume = effect.resume.bind(effect); - watchHandle.stop = watchHandle; - return watchHandle; -} -function traverse(value, depth = Infinity, seen) { - if (depth <= 0 || !isObject(value) || value["__v_skip"]) { - return value; - } - seen = seen || /* @__PURE__ */ new Set(); - if (seen.has(value)) { - return value; - } - seen.add(value); - depth--; - if (isRef(value)) { - traverse(value.value, depth, seen); - } else if (isArray(value)) { - for (let i = 0; i < value.length; i++) { - traverse(value[i], depth, seen); - } - } else if (isSet(value) || isMap(value)) { - value.forEach((v) => { - traverse(v, depth, seen); - }); - } else if (isPlainObject(value)) { - for (const key in value) { - traverse(value[key], depth, seen); - } - for (const key of Object.getOwnPropertySymbols(value)) { - if (Object.prototype.propertyIsEnumerable.call(value, key)) { - traverse(value[key], depth, seen); - } - } - } - return value; -} - -const stack$1 = []; -function pushWarningContext(vnode) { - stack$1.push(vnode); -} -function popWarningContext() { - stack$1.pop(); -} -let isWarning = false; -function warn$1(msg, ...args) { - if (isWarning) return; - isWarning = true; - pauseTracking(); - const instance = stack$1.length ? stack$1[stack$1.length - 1].component : null; - const appWarnHandler = instance && instance.appContext.config.warnHandler; - const trace = getComponentTrace(); - if (appWarnHandler) { - callWithErrorHandling( - appWarnHandler, - instance, - 11, - [ - // eslint-disable-next-line no-restricted-syntax - msg + args.map((a) => { - var _a, _b; - return (_b = (_a = a.toString) == null ? void 0 : _a.call(a)) != null ? _b : JSON.stringify(a); - }).join(""), - instance && instance.proxy, - trace.map( - ({ vnode }) => `at <${formatComponentName(instance, vnode.type)}>` - ).join("\n"), - trace - ] - ); - } else { - const warnArgs = [`[Vue warn]: ${msg}`, ...args]; - if (trace.length && // avoid spamming console during tests - true) { - warnArgs.push(` -`, ...formatTrace(trace)); - } - console.warn(...warnArgs); - } - resetTracking(); - isWarning = false; -} -function getComponentTrace() { - let currentVNode = stack$1[stack$1.length - 1]; - if (!currentVNode) { - return []; - } - const normalizedStack = []; - while (currentVNode) { - const last = normalizedStack[0]; - if (last && last.vnode === currentVNode) { - last.recurseCount++; - } else { - normalizedStack.push({ - vnode: currentVNode, - recurseCount: 0 - }); - } - const parentInstance = currentVNode.component && currentVNode.component.parent; - currentVNode = parentInstance && parentInstance.vnode; - } - return normalizedStack; -} -function formatTrace(trace) { - const logs = []; - trace.forEach((entry, i) => { - logs.push(...i === 0 ? [] : [` -`], ...formatTraceEntry(entry)); - }); - return logs; -} -function formatTraceEntry({ vnode, recurseCount }) { - const postfix = recurseCount > 0 ? `... (${recurseCount} recursive calls)` : ``; - const isRoot = vnode.component ? vnode.component.parent == null : false; - const open = ` at <${formatComponentName( - vnode.component, - vnode.type, - isRoot - )}`; - const close = `>` + postfix; - return vnode.props ? [open, ...formatProps(vnode.props), close] : [open + close]; -} -function formatProps(props) { - const res = []; - const keys = Object.keys(props); - keys.slice(0, 3).forEach((key) => { - res.push(...formatProp(key, props[key])); - }); - if (keys.length > 3) { - res.push(` ...`); - } - return res; -} -function formatProp(key, value, raw) { - if (isString(value)) { - value = JSON.stringify(value); - return raw ? value : [`${key}=${value}`]; - } else if (typeof value === "number" || typeof value === "boolean" || value == null) { - return raw ? value : [`${key}=${value}`]; - } else if (isRef(value)) { - value = formatProp(key, toRaw(value.value), true); - return raw ? value : [`${key}=Ref<`, value, `>`]; - } else if (isFunction(value)) { - return [`${key}=fn${value.name ? `<${value.name}>` : ``}`]; - } else { - value = toRaw(value); - return raw ? value : [`${key}=`, value]; - } -} -function assertNumber(val, type) { - if (val === void 0) { - return; - } else if (typeof val !== "number") { - warn$1(`${type} is not a valid number - got ${JSON.stringify(val)}.`); - } else if (isNaN(val)) { - warn$1(`${type} is NaN - the duration expression might be incorrect.`); - } -} - -const ErrorCodes = { - "SETUP_FUNCTION": 0, - "0": "SETUP_FUNCTION", - "RENDER_FUNCTION": 1, - "1": "RENDER_FUNCTION", - "NATIVE_EVENT_HANDLER": 5, - "5": "NATIVE_EVENT_HANDLER", - "COMPONENT_EVENT_HANDLER": 6, - "6": "COMPONENT_EVENT_HANDLER", - "VNODE_HOOK": 7, - "7": "VNODE_HOOK", - "DIRECTIVE_HOOK": 8, - "8": "DIRECTIVE_HOOK", - "TRANSITION_HOOK": 9, - "9": "TRANSITION_HOOK", - "APP_ERROR_HANDLER": 10, - "10": "APP_ERROR_HANDLER", - "APP_WARN_HANDLER": 11, - "11": "APP_WARN_HANDLER", - "FUNCTION_REF": 12, - "12": "FUNCTION_REF", - "ASYNC_COMPONENT_LOADER": 13, - "13": "ASYNC_COMPONENT_LOADER", - "SCHEDULER": 14, - "14": "SCHEDULER", - "COMPONENT_UPDATE": 15, - "15": "COMPONENT_UPDATE", - "APP_UNMOUNT_CLEANUP": 16, - "16": "APP_UNMOUNT_CLEANUP" -}; -const ErrorTypeStrings$1 = { - ["sp"]: "serverPrefetch hook", - ["bc"]: "beforeCreate hook", - ["c"]: "created hook", - ["bm"]: "beforeMount hook", - ["m"]: "mounted hook", - ["bu"]: "beforeUpdate hook", - ["u"]: "updated", - ["bum"]: "beforeUnmount hook", - ["um"]: "unmounted hook", - ["a"]: "activated hook", - ["da"]: "deactivated hook", - ["ec"]: "errorCaptured hook", - ["rtc"]: "renderTracked hook", - ["rtg"]: "renderTriggered hook", - [0]: "setup function", - [1]: "render function", - [2]: "watcher getter", - [3]: "watcher callback", - [4]: "watcher cleanup function", - [5]: "native event handler", - [6]: "component event handler", - [7]: "vnode hook", - [8]: "directive hook", - [9]: "transition hook", - [10]: "app errorHandler", - [11]: "app warnHandler", - [12]: "ref function", - [13]: "async component loader", - [14]: "scheduler flush", - [15]: "component update", - [16]: "app unmount cleanup function" -}; -function callWithErrorHandling(fn, instance, type, args) { - try { - return args ? fn(...args) : fn(); - } catch (err) { - handleError(err, instance, type); - } -} -function callWithAsyncErrorHandling(fn, instance, type, args) { - if (isFunction(fn)) { - const res = callWithErrorHandling(fn, instance, type, args); - if (res && isPromise(res)) { - res.catch((err) => { - handleError(err, instance, type); - }); - } - return res; - } - if (isArray(fn)) { - const values = []; - for (let i = 0; i < fn.length; i++) { - values.push(callWithAsyncErrorHandling(fn[i], instance, type, args)); - } - return values; - } else { - warn$1( - `Invalid value type passed to callWithAsyncErrorHandling(): ${typeof fn}` - ); - } -} -function handleError(err, instance, type, throwInDev = true) { - const contextVNode = instance ? instance.vnode : null; - const { errorHandler, throwUnhandledErrorInProduction } = instance && instance.appContext.config || EMPTY_OBJ; - if (instance) { - let cur = instance.parent; - const exposedInstance = instance.proxy; - const errorInfo = ErrorTypeStrings$1[type] ; - while (cur) { - const errorCapturedHooks = cur.ec; - if (errorCapturedHooks) { - for (let i = 0; i < errorCapturedHooks.length; i++) { - if (errorCapturedHooks[i](err, exposedInstance, errorInfo) === false) { - return; - } - } - } - cur = cur.parent; - } - if (errorHandler) { - pauseTracking(); - callWithErrorHandling(errorHandler, null, 10, [ - err, - exposedInstance, - errorInfo - ]); - resetTracking(); - return; - } - } - logError(err, type, contextVNode, throwInDev, throwUnhandledErrorInProduction); -} -function logError(err, type, contextVNode, throwInDev = true, throwInProd = false) { - { - const info = ErrorTypeStrings$1[type]; - if (contextVNode) { - pushWarningContext(contextVNode); - } - warn$1(`Unhandled error${info ? ` during execution of ${info}` : ``}`); - if (contextVNode) { - popWarningContext(); - } - if (throwInDev) { - throw err; - } else { - console.error(err); - } - } -} - -const queue = []; -let flushIndex = -1; -const pendingPostFlushCbs = []; -let activePostFlushCbs = null; -let postFlushIndex = 0; -const resolvedPromise = /* @__PURE__ */ Promise.resolve(); -let currentFlushPromise = null; -const RECURSION_LIMIT = 100; -function nextTick(fn) { - const p = currentFlushPromise || resolvedPromise; - return fn ? p.then(this ? fn.bind(this) : fn) : p; -} -function findInsertionIndex(id) { - let start = flushIndex + 1; - let end = queue.length; - while (start < end) { - const middle = start + end >>> 1; - const middleJob = queue[middle]; - const middleJobId = getId(middleJob); - if (middleJobId < id || middleJobId === id && middleJob.flags & 2) { - start = middle + 1; - } else { - end = middle; - } - } - return start; -} -function queueJob(job) { - if (!(job.flags & 1)) { - const jobId = getId(job); - const lastJob = queue[queue.length - 1]; - if (!lastJob || // fast path when the job id is larger than the tail - !(job.flags & 2) && jobId >= getId(lastJob)) { - queue.push(job); - } else { - queue.splice(findInsertionIndex(jobId), 0, job); - } - job.flags |= 1; - queueFlush(); - } -} -function queueFlush() { - if (!currentFlushPromise) { - currentFlushPromise = resolvedPromise.then(flushJobs); - } -} -function queuePostFlushCb(cb) { - if (!isArray(cb)) { - if (activePostFlushCbs && cb.id === -1) { - activePostFlushCbs.splice(postFlushIndex + 1, 0, cb); - } else if (!(cb.flags & 1)) { - pendingPostFlushCbs.push(cb); - cb.flags |= 1; - } - } else { - pendingPostFlushCbs.push(...cb); - } - queueFlush(); -} -function flushPreFlushCbs(instance, seen, i = flushIndex + 1) { - { - seen = seen || /* @__PURE__ */ new Map(); - } - for (; i < queue.length; i++) { - const cb = queue[i]; - if (cb && cb.flags & 2) { - if (instance && cb.id !== instance.uid) { - continue; - } - if (checkRecursiveUpdates(seen, cb)) { - continue; - } - queue.splice(i, 1); - i--; - if (cb.flags & 4) { - cb.flags &= ~1; - } - cb(); - if (!(cb.flags & 4)) { - cb.flags &= ~1; - } - } - } -} -function flushPostFlushCbs(seen) { - if (pendingPostFlushCbs.length) { - const deduped = [...new Set(pendingPostFlushCbs)].sort( - (a, b) => getId(a) - getId(b) - ); - pendingPostFlushCbs.length = 0; - if (activePostFlushCbs) { - activePostFlushCbs.push(...deduped); - return; - } - activePostFlushCbs = deduped; - { - seen = seen || /* @__PURE__ */ new Map(); - } - for (postFlushIndex = 0; postFlushIndex < activePostFlushCbs.length; postFlushIndex++) { - const cb = activePostFlushCbs[postFlushIndex]; - if (checkRecursiveUpdates(seen, cb)) { - continue; - } - if (cb.flags & 4) { - cb.flags &= ~1; - } - if (!(cb.flags & 8)) cb(); - cb.flags &= ~1; - } - activePostFlushCbs = null; - postFlushIndex = 0; - } -} -const getId = (job) => job.id == null ? job.flags & 2 ? -1 : Infinity : job.id; -function flushJobs(seen) { - { - seen = seen || /* @__PURE__ */ new Map(); - } - const check = (job) => checkRecursiveUpdates(seen, job) ; - try { - for (flushIndex = 0; flushIndex < queue.length; flushIndex++) { - const job = queue[flushIndex]; - if (job && !(job.flags & 8)) { - if (check(job)) { - continue; - } - if (job.flags & 4) { - job.flags &= ~1; - } - callWithErrorHandling( - job, - job.i, - job.i ? 15 : 14 - ); - if (!(job.flags & 4)) { - job.flags &= ~1; - } - } - } - } finally { - for (; flushIndex < queue.length; flushIndex++) { - const job = queue[flushIndex]; - if (job) { - job.flags &= ~1; - } - } - flushIndex = -1; - queue.length = 0; - flushPostFlushCbs(seen); - currentFlushPromise = null; - if (queue.length || pendingPostFlushCbs.length) { - flushJobs(seen); - } - } -} -function checkRecursiveUpdates(seen, fn) { - const count = seen.get(fn) || 0; - if (count > RECURSION_LIMIT) { - const instance = fn.i; - const componentName = instance && getComponentName(instance.type); - handleError( - `Maximum recursive updates exceeded${componentName ? ` in component <${componentName}>` : ``}. This means you have a reactive effect that is mutating its own dependencies and thus recursively triggering itself. Possible sources include component template, render function, updated hook or watcher source function.`, - null, - 10 - ); - return true; - } - seen.set(fn, count + 1); - return false; -} - -let isHmrUpdating = false; -const hmrDirtyComponents = /* @__PURE__ */ new Map(); -{ - getGlobalThis().__VUE_HMR_RUNTIME__ = { - createRecord: tryWrap(createRecord), - rerender: tryWrap(rerender), - reload: tryWrap(reload) - }; -} -const map = /* @__PURE__ */ new Map(); -function registerHMR(instance) { - const id = instance.type.__hmrId; - let record = map.get(id); - if (!record) { - createRecord(id, instance.type); - record = map.get(id); - } - record.instances.add(instance); -} -function unregisterHMR(instance) { - map.get(instance.type.__hmrId).instances.delete(instance); -} -function createRecord(id, initialDef) { - if (map.has(id)) { - return false; - } - map.set(id, { - initialDef: normalizeClassComponent(initialDef), - instances: /* @__PURE__ */ new Set() - }); - return true; -} -function normalizeClassComponent(component) { - return isClassComponent(component) ? component.__vccOpts : component; -} -function rerender(id, newRender) { - const record = map.get(id); - if (!record) { - return; - } - record.initialDef.render = newRender; - [...record.instances].forEach((instance) => { - if (newRender) { - instance.render = newRender; - normalizeClassComponent(instance.type).render = newRender; - } - instance.renderCache = []; - isHmrUpdating = true; - instance.update(); - isHmrUpdating = false; - }); -} -function reload(id, newComp) { - const record = map.get(id); - if (!record) return; - newComp = normalizeClassComponent(newComp); - updateComponentDef(record.initialDef, newComp); - const instances = [...record.instances]; - for (let i = 0; i < instances.length; i++) { - const instance = instances[i]; - const oldComp = normalizeClassComponent(instance.type); - let dirtyInstances = hmrDirtyComponents.get(oldComp); - if (!dirtyInstances) { - if (oldComp !== record.initialDef) { - updateComponentDef(oldComp, newComp); - } - hmrDirtyComponents.set(oldComp, dirtyInstances = /* @__PURE__ */ new Set()); - } - dirtyInstances.add(instance); - instance.appContext.propsCache.delete(instance.type); - instance.appContext.emitsCache.delete(instance.type); - instance.appContext.optionsCache.delete(instance.type); - if (instance.ceReload) { - dirtyInstances.add(instance); - instance.ceReload(newComp.styles); - dirtyInstances.delete(instance); - } else if (instance.parent) { - queueJob(() => { - isHmrUpdating = true; - instance.parent.update(); - isHmrUpdating = false; - dirtyInstances.delete(instance); - }); - } else if (instance.appContext.reload) { - instance.appContext.reload(); - } else if (typeof window !== "undefined") { - window.location.reload(); - } else { - console.warn( - "[HMR] Root or manually mounted instance modified. Full reload required." - ); - } - if (instance.root.ce && instance !== instance.root) { - instance.root.ce._removeChildStyle(oldComp); - } - } - queuePostFlushCb(() => { - hmrDirtyComponents.clear(); - }); -} -function updateComponentDef(oldComp, newComp) { - extend(oldComp, newComp); - for (const key in oldComp) { - if (key !== "__file" && !(key in newComp)) { - delete oldComp[key]; - } - } -} -function tryWrap(fn) { - return (id, arg) => { - try { - return fn(id, arg); - } catch (e) { - console.error(e); - console.warn( - `[HMR] Something went wrong during Vue component hot-reload. Full reload required.` - ); - } - }; -} - -let devtools$1; -let buffer = []; -let devtoolsNotInstalled = false; -function emit$1(event, ...args) { - if (devtools$1) { - devtools$1.emit(event, ...args); - } else if (!devtoolsNotInstalled) { - buffer.push({ event, args }); - } -} -function setDevtoolsHook$1(hook, target) { - var _a, _b; - devtools$1 = hook; - if (devtools$1) { - devtools$1.enabled = true; - buffer.forEach(({ event, args }) => devtools$1.emit(event, ...args)); - buffer = []; - } else if ( - // handle late devtools injection - only do this if we are in an actual - // browser environment to avoid the timer handle stalling test runner exit - // (#4815) - typeof window !== "undefined" && // some envs mock window but not fully - window.HTMLElement && // also exclude jsdom - // eslint-disable-next-line no-restricted-syntax - !((_b = (_a = window.navigator) == null ? void 0 : _a.userAgent) == null ? void 0 : _b.includes("jsdom")) - ) { - const replay = target.__VUE_DEVTOOLS_HOOK_REPLAY__ = target.__VUE_DEVTOOLS_HOOK_REPLAY__ || []; - replay.push((newHook) => { - setDevtoolsHook$1(newHook, target); - }); - setTimeout(() => { - if (!devtools$1) { - target.__VUE_DEVTOOLS_HOOK_REPLAY__ = null; - devtoolsNotInstalled = true; - buffer = []; - } - }, 3e3); - } else { - devtoolsNotInstalled = true; - buffer = []; - } -} -function devtoolsInitApp(app, version) { - emit$1("app:init" /* APP_INIT */, app, version, { - Fragment, - Text, - Comment, - Static - }); -} -function devtoolsUnmountApp(app) { - emit$1("app:unmount" /* APP_UNMOUNT */, app); -} -const devtoolsComponentAdded = /* @__PURE__ */ createDevtoolsComponentHook("component:added" /* COMPONENT_ADDED */); -const devtoolsComponentUpdated = /* @__PURE__ */ createDevtoolsComponentHook("component:updated" /* COMPONENT_UPDATED */); -const _devtoolsComponentRemoved = /* @__PURE__ */ createDevtoolsComponentHook( - "component:removed" /* COMPONENT_REMOVED */ -); -const devtoolsComponentRemoved = (component) => { - if (devtools$1 && typeof devtools$1.cleanupBuffer === "function" && // remove the component if it wasn't buffered - !devtools$1.cleanupBuffer(component)) { - _devtoolsComponentRemoved(component); - } -}; -/*! #__NO_SIDE_EFFECTS__ */ -// @__NO_SIDE_EFFECTS__ -function createDevtoolsComponentHook(hook) { - return (component) => { - emit$1( - hook, - component.appContext.app, - component.uid, - component.parent ? component.parent.uid : void 0, - component - ); - }; -} -const devtoolsPerfStart = /* @__PURE__ */ createDevtoolsPerformanceHook("perf:start" /* PERFORMANCE_START */); -const devtoolsPerfEnd = /* @__PURE__ */ createDevtoolsPerformanceHook("perf:end" /* PERFORMANCE_END */); -function createDevtoolsPerformanceHook(hook) { - return (component, type, time) => { - emit$1(hook, component.appContext.app, component.uid, component, type, time); - }; -} -function devtoolsComponentEmit(component, event, params) { - emit$1( - "component:emit" /* COMPONENT_EMIT */, - component.appContext.app, - component, - event, - params - ); -} - -let currentRenderingInstance = null; -let currentScopeId = null; -function setCurrentRenderingInstance(instance) { - const prev = currentRenderingInstance; - currentRenderingInstance = instance; - currentScopeId = instance && instance.type.__scopeId || null; - return prev; -} -function pushScopeId(id) { - currentScopeId = id; -} -function popScopeId() { - currentScopeId = null; -} -const withScopeId = (_id) => withCtx; -function withCtx(fn, ctx = currentRenderingInstance, isNonScopedSlot) { - if (!ctx) return fn; - if (fn._n) { - return fn; - } - const renderFnWithContext = (...args) => { - if (renderFnWithContext._d) { - setBlockTracking(-1); - } - const prevInstance = setCurrentRenderingInstance(ctx); - let res; - try { - res = fn(...args); - } finally { - setCurrentRenderingInstance(prevInstance); - if (renderFnWithContext._d) { - setBlockTracking(1); - } - } - { - devtoolsComponentUpdated(ctx); - } - return res; - }; - renderFnWithContext._n = true; - renderFnWithContext._c = true; - renderFnWithContext._d = true; - return renderFnWithContext; -} - -function validateDirectiveName(name) { - if (isBuiltInDirective(name)) { - warn$1("Do not use built-in directive ids as custom directive id: " + name); - } -} -function withDirectives(vnode, directives) { - if (currentRenderingInstance === null) { - warn$1(`withDirectives can only be used inside render functions.`); - return vnode; - } - const instance = getComponentPublicInstance(currentRenderingInstance); - const bindings = vnode.dirs || (vnode.dirs = []); - for (let i = 0; i < directives.length; i++) { - let [dir, value, arg, modifiers = EMPTY_OBJ] = directives[i]; - if (dir) { - if (isFunction(dir)) { - dir = { - mounted: dir, - updated: dir - }; - } - if (dir.deep) { - traverse(value); - } - bindings.push({ - dir, - instance, - value, - oldValue: void 0, - arg, - modifiers - }); - } - } - return vnode; -} -function invokeDirectiveHook(vnode, prevVNode, instance, name) { - const bindings = vnode.dirs; - const oldBindings = prevVNode && prevVNode.dirs; - for (let i = 0; i < bindings.length; i++) { - const binding = bindings[i]; - if (oldBindings) { - binding.oldValue = oldBindings[i].value; - } - let hook = binding.dir[name]; - if (hook) { - pauseTracking(); - callWithAsyncErrorHandling(hook, instance, 8, [ - vnode.el, - binding, - vnode, - prevVNode - ]); - resetTracking(); - } - } -} - -const TeleportEndKey = Symbol("_vte"); -const isTeleport = (type) => type.__isTeleport; -const isTeleportDisabled = (props) => props && (props.disabled || props.disabled === ""); -const isTeleportDeferred = (props) => props && (props.defer || props.defer === ""); -const isTargetSVG = (target) => typeof SVGElement !== "undefined" && target instanceof SVGElement; -const isTargetMathML = (target) => typeof MathMLElement === "function" && target instanceof MathMLElement; -const resolveTarget = (props, select) => { - const targetSelector = props && props.to; - if (isString(targetSelector)) { - if (!select) { - warn$1( - `Current renderer does not support string target for Teleports. (missing querySelector renderer option)` - ); - return null; - } else { - const target = select(targetSelector); - if (!target && !isTeleportDisabled(props)) { - warn$1( - `Failed to locate Teleport target with selector "${targetSelector}". Note the target element must exist before the component is mounted - i.e. the target cannot be rendered by the component itself, and ideally should be outside of the entire Vue component tree.` - ); - } - return target; - } - } else { - if (!targetSelector && !isTeleportDisabled(props)) { - warn$1(`Invalid Teleport target: ${targetSelector}`); - } - return targetSelector; - } -}; -const TeleportImpl = { - name: "Teleport", - __isTeleport: true, - process(n1, n2, container, anchor, parentComponent, parentSuspense, namespace, slotScopeIds, optimized, internals) { - const { - mc: mountChildren, - pc: patchChildren, - pbc: patchBlockChildren, - o: { insert, querySelector, createText, createComment } - } = internals; - const disabled = isTeleportDisabled(n2.props); - let { shapeFlag, children, dynamicChildren } = n2; - if (isHmrUpdating) { - optimized = false; - dynamicChildren = null; - } - if (n1 == null) { - const placeholder = n2.el = createComment("teleport start") ; - const mainAnchor = n2.anchor = createComment("teleport end") ; - insert(placeholder, container, anchor); - insert(mainAnchor, container, anchor); - const mount = (container2, anchor2) => { - if (shapeFlag & 16) { - if (parentComponent && parentComponent.isCE) { - parentComponent.ce._teleportTarget = container2; - } - mountChildren( - children, - container2, - anchor2, - parentComponent, - parentSuspense, - namespace, - slotScopeIds, - optimized - ); - } - }; - const mountToTarget = () => { - const target = n2.target = resolveTarget(n2.props, querySelector); - const targetAnchor = prepareAnchor(target, n2, createText, insert); - if (target) { - if (namespace !== "svg" && isTargetSVG(target)) { - namespace = "svg"; - } else if (namespace !== "mathml" && isTargetMathML(target)) { - namespace = "mathml"; - } - if (!disabled) { - mount(target, targetAnchor); - updateCssVars(n2, false); - } - } else if (!disabled) { - warn$1( - "Invalid Teleport target on mount:", - target, - `(${typeof target})` - ); - } - }; - if (disabled) { - mount(container, mainAnchor); - updateCssVars(n2, true); - } - if (isTeleportDeferred(n2.props)) { - queuePostRenderEffect(mountToTarget, parentSuspense); - } else { - mountToTarget(); - } - } else { - n2.el = n1.el; - n2.targetStart = n1.targetStart; - const mainAnchor = n2.anchor = n1.anchor; - const target = n2.target = n1.target; - const targetAnchor = n2.targetAnchor = n1.targetAnchor; - const wasDisabled = isTeleportDisabled(n1.props); - const currentContainer = wasDisabled ? container : target; - const currentAnchor = wasDisabled ? mainAnchor : targetAnchor; - if (namespace === "svg" || isTargetSVG(target)) { - namespace = "svg"; - } else if (namespace === "mathml" || isTargetMathML(target)) { - namespace = "mathml"; - } - if (dynamicChildren) { - patchBlockChildren( - n1.dynamicChildren, - dynamicChildren, - currentContainer, - parentComponent, - parentSuspense, - namespace, - slotScopeIds - ); - traverseStaticChildren(n1, n2, true); - } else if (!optimized) { - patchChildren( - n1, - n2, - currentContainer, - currentAnchor, - parentComponent, - parentSuspense, - namespace, - slotScopeIds, - false - ); - } - if (disabled) { - if (!wasDisabled) { - moveTeleport( - n2, - container, - mainAnchor, - internals, - 1 - ); - } else { - if (n2.props && n1.props && n2.props.to !== n1.props.to) { - n2.props.to = n1.props.to; - } - } - } else { - if ((n2.props && n2.props.to) !== (n1.props && n1.props.to)) { - const nextTarget = n2.target = resolveTarget( - n2.props, - querySelector - ); - if (nextTarget) { - moveTeleport( - n2, - nextTarget, - null, - internals, - 0 - ); - } else { - warn$1( - "Invalid Teleport target on update:", - target, - `(${typeof target})` - ); - } - } else if (wasDisabled) { - moveTeleport( - n2, - target, - targetAnchor, - internals, - 1 - ); - } - } - updateCssVars(n2, disabled); - } - }, - remove(vnode, parentComponent, parentSuspense, { um: unmount, o: { remove: hostRemove } }, doRemove) { - const { - shapeFlag, - children, - anchor, - targetStart, - targetAnchor, - target, - props - } = vnode; - if (target) { - hostRemove(targetStart); - hostRemove(targetAnchor); - } - doRemove && hostRemove(anchor); - if (shapeFlag & 16) { - const shouldRemove = doRemove || !isTeleportDisabled(props); - for (let i = 0; i < children.length; i++) { - const child = children[i]; - unmount( - child, - parentComponent, - parentSuspense, - shouldRemove, - !!child.dynamicChildren - ); - } - } - }, - move: moveTeleport, - hydrate: hydrateTeleport -}; -function moveTeleport(vnode, container, parentAnchor, { o: { insert }, m: move }, moveType = 2) { - if (moveType === 0) { - insert(vnode.targetAnchor, container, parentAnchor); - } - const { el, anchor, shapeFlag, children, props } = vnode; - const isReorder = moveType === 2; - if (isReorder) { - insert(el, container, parentAnchor); - } - if (!isReorder || isTeleportDisabled(props)) { - if (shapeFlag & 16) { - for (let i = 0; i < children.length; i++) { - move( - children[i], - container, - parentAnchor, - 2 - ); - } - } - } - if (isReorder) { - insert(anchor, container, parentAnchor); - } -} -function hydrateTeleport(node, vnode, parentComponent, parentSuspense, slotScopeIds, optimized, { - o: { nextSibling, parentNode, querySelector, insert, createText } -}, hydrateChildren) { - const target = vnode.target = resolveTarget( - vnode.props, - querySelector - ); - if (target) { - const disabled = isTeleportDisabled(vnode.props); - const targetNode = target._lpa || target.firstChild; - if (vnode.shapeFlag & 16) { - if (disabled) { - vnode.anchor = hydrateChildren( - nextSibling(node), - vnode, - parentNode(node), - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - vnode.targetStart = targetNode; - vnode.targetAnchor = targetNode && nextSibling(targetNode); - } else { - vnode.anchor = nextSibling(node); - let targetAnchor = targetNode; - while (targetAnchor) { - if (targetAnchor && targetAnchor.nodeType === 8) { - if (targetAnchor.data === "teleport start anchor") { - vnode.targetStart = targetAnchor; - } else if (targetAnchor.data === "teleport anchor") { - vnode.targetAnchor = targetAnchor; - target._lpa = vnode.targetAnchor && nextSibling(vnode.targetAnchor); - break; - } - } - targetAnchor = nextSibling(targetAnchor); - } - if (!vnode.targetAnchor) { - prepareAnchor(target, vnode, createText, insert); - } - hydrateChildren( - targetNode && nextSibling(targetNode), - vnode, - target, - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - } - } - updateCssVars(vnode, disabled); - } - return vnode.anchor && nextSibling(vnode.anchor); -} -const Teleport = TeleportImpl; -function updateCssVars(vnode, isDisabled) { - const ctx = vnode.ctx; - if (ctx && ctx.ut) { - let node, anchor; - if (isDisabled) { - node = vnode.el; - anchor = vnode.anchor; - } else { - node = vnode.targetStart; - anchor = vnode.targetAnchor; - } - while (node && node !== anchor) { - if (node.nodeType === 1) node.setAttribute("data-v-owner", ctx.uid); - node = node.nextSibling; - } - ctx.ut(); - } -} -function prepareAnchor(target, vnode, createText, insert) { - const targetStart = vnode.targetStart = createText(""); - const targetAnchor = vnode.targetAnchor = createText(""); - targetStart[TeleportEndKey] = targetAnchor; - if (target) { - insert(targetStart, target); - insert(targetAnchor, target); - } - return targetAnchor; -} - -const leaveCbKey = Symbol("_leaveCb"); -const enterCbKey$1 = Symbol("_enterCb"); -function useTransitionState() { - const state = { - isMounted: false, - isLeaving: false, - isUnmounting: false, - leavingVNodes: /* @__PURE__ */ new Map() - }; - onMounted(() => { - state.isMounted = true; - }); - onBeforeUnmount(() => { - state.isUnmounting = true; - }); - return state; -} -const TransitionHookValidator = [Function, Array]; -const BaseTransitionPropsValidators = { - mode: String, - appear: Boolean, - persisted: Boolean, - // enter - onBeforeEnter: TransitionHookValidator, - onEnter: TransitionHookValidator, - onAfterEnter: TransitionHookValidator, - onEnterCancelled: TransitionHookValidator, - // leave - onBeforeLeave: TransitionHookValidator, - onLeave: TransitionHookValidator, - onAfterLeave: TransitionHookValidator, - onLeaveCancelled: TransitionHookValidator, - // appear - onBeforeAppear: TransitionHookValidator, - onAppear: TransitionHookValidator, - onAfterAppear: TransitionHookValidator, - onAppearCancelled: TransitionHookValidator -}; -const recursiveGetSubtree = (instance) => { - const subTree = instance.subTree; - return subTree.component ? recursiveGetSubtree(subTree.component) : subTree; -}; -const BaseTransitionImpl = { - name: `BaseTransition`, - props: BaseTransitionPropsValidators, - setup(props, { slots }) { - const instance = getCurrentInstance(); - const state = useTransitionState(); - return () => { - const children = slots.default && getTransitionRawChildren(slots.default(), true); - if (!children || !children.length) { - return; - } - const child = findNonCommentChild(children); - const rawProps = toRaw(props); - const { mode } = rawProps; - if (mode && mode !== "in-out" && mode !== "out-in" && mode !== "default") { - warn$1(`invalid mode: ${mode}`); - } - if (state.isLeaving) { - return emptyPlaceholder(child); - } - const innerChild = getInnerChild$1(child); - if (!innerChild) { - return emptyPlaceholder(child); - } - let enterHooks = resolveTransitionHooks( - innerChild, - rawProps, - state, - instance, - // #11061, ensure enterHooks is fresh after clone - (hooks) => enterHooks = hooks - ); - if (innerChild.type !== Comment) { - setTransitionHooks(innerChild, enterHooks); - } - const oldChild = instance.subTree; - const oldInnerChild = oldChild && getInnerChild$1(oldChild); - if (oldInnerChild && oldInnerChild.type !== Comment && !isSameVNodeType(innerChild, oldInnerChild) && recursiveGetSubtree(instance).type !== Comment) { - const leavingHooks = resolveTransitionHooks( - oldInnerChild, - rawProps, - state, - instance - ); - setTransitionHooks(oldInnerChild, leavingHooks); - if (mode === "out-in" && innerChild.type !== Comment) { - state.isLeaving = true; - leavingHooks.afterLeave = () => { - state.isLeaving = false; - if (!(instance.job.flags & 8)) { - instance.update(); - } - delete leavingHooks.afterLeave; - }; - return emptyPlaceholder(child); - } else if (mode === "in-out" && innerChild.type !== Comment) { - leavingHooks.delayLeave = (el, earlyRemove, delayedLeave) => { - const leavingVNodesCache = getLeavingNodesForType( - state, - oldInnerChild - ); - leavingVNodesCache[String(oldInnerChild.key)] = oldInnerChild; - el[leaveCbKey] = () => { - earlyRemove(); - el[leaveCbKey] = void 0; - delete enterHooks.delayedLeave; - }; - enterHooks.delayedLeave = delayedLeave; - }; - } - } - return child; - }; - } -}; -function findNonCommentChild(children) { - let child = children[0]; - if (children.length > 1) { - let hasFound = false; - for (const c of children) { - if (c.type !== Comment) { - if (hasFound) { - warn$1( - " can only be used on a single element or component. Use for lists." - ); - break; - } - child = c; - hasFound = true; - } - } - } - return child; -} -const BaseTransition = BaseTransitionImpl; -function getLeavingNodesForType(state, vnode) { - const { leavingVNodes } = state; - let leavingVNodesCache = leavingVNodes.get(vnode.type); - if (!leavingVNodesCache) { - leavingVNodesCache = /* @__PURE__ */ Object.create(null); - leavingVNodes.set(vnode.type, leavingVNodesCache); - } - return leavingVNodesCache; -} -function resolveTransitionHooks(vnode, props, state, instance, postClone) { - const { - appear, - mode, - persisted = false, - onBeforeEnter, - onEnter, - onAfterEnter, - onEnterCancelled, - onBeforeLeave, - onLeave, - onAfterLeave, - onLeaveCancelled, - onBeforeAppear, - onAppear, - onAfterAppear, - onAppearCancelled - } = props; - const key = String(vnode.key); - const leavingVNodesCache = getLeavingNodesForType(state, vnode); - const callHook = (hook, args) => { - hook && callWithAsyncErrorHandling( - hook, - instance, - 9, - args - ); - }; - const callAsyncHook = (hook, args) => { - const done = args[1]; - callHook(hook, args); - if (isArray(hook)) { - if (hook.every((hook2) => hook2.length <= 1)) done(); - } else if (hook.length <= 1) { - done(); - } - }; - const hooks = { - mode, - persisted, - beforeEnter(el) { - let hook = onBeforeEnter; - if (!state.isMounted) { - if (appear) { - hook = onBeforeAppear || onBeforeEnter; - } else { - return; - } - } - if (el[leaveCbKey]) { - el[leaveCbKey]( - true - /* cancelled */ - ); - } - const leavingVNode = leavingVNodesCache[key]; - if (leavingVNode && isSameVNodeType(vnode, leavingVNode) && leavingVNode.el[leaveCbKey]) { - leavingVNode.el[leaveCbKey](); - } - callHook(hook, [el]); - }, - enter(el) { - let hook = onEnter; - let afterHook = onAfterEnter; - let cancelHook = onEnterCancelled; - if (!state.isMounted) { - if (appear) { - hook = onAppear || onEnter; - afterHook = onAfterAppear || onAfterEnter; - cancelHook = onAppearCancelled || onEnterCancelled; - } else { - return; - } - } - let called = false; - const done = el[enterCbKey$1] = (cancelled) => { - if (called) return; - called = true; - if (cancelled) { - callHook(cancelHook, [el]); - } else { - callHook(afterHook, [el]); - } - if (hooks.delayedLeave) { - hooks.delayedLeave(); - } - el[enterCbKey$1] = void 0; - }; - if (hook) { - callAsyncHook(hook, [el, done]); - } else { - done(); - } - }, - leave(el, remove) { - const key2 = String(vnode.key); - if (el[enterCbKey$1]) { - el[enterCbKey$1]( - true - /* cancelled */ - ); - } - if (state.isUnmounting) { - return remove(); - } - callHook(onBeforeLeave, [el]); - let called = false; - const done = el[leaveCbKey] = (cancelled) => { - if (called) return; - called = true; - remove(); - if (cancelled) { - callHook(onLeaveCancelled, [el]); - } else { - callHook(onAfterLeave, [el]); - } - el[leaveCbKey] = void 0; - if (leavingVNodesCache[key2] === vnode) { - delete leavingVNodesCache[key2]; - } - }; - leavingVNodesCache[key2] = vnode; - if (onLeave) { - callAsyncHook(onLeave, [el, done]); - } else { - done(); - } - }, - clone(vnode2) { - const hooks2 = resolveTransitionHooks( - vnode2, - props, - state, - instance, - postClone - ); - if (postClone) postClone(hooks2); - return hooks2; - } - }; - return hooks; -} -function emptyPlaceholder(vnode) { - if (isKeepAlive(vnode)) { - vnode = cloneVNode(vnode); - vnode.children = null; - return vnode; - } -} -function getInnerChild$1(vnode) { - if (!isKeepAlive(vnode)) { - if (isTeleport(vnode.type) && vnode.children) { - return findNonCommentChild(vnode.children); - } - return vnode; - } - if (vnode.component) { - return vnode.component.subTree; - } - const { shapeFlag, children } = vnode; - if (children) { - if (shapeFlag & 16) { - return children[0]; - } - if (shapeFlag & 32 && isFunction(children.default)) { - return children.default(); - } - } -} -function setTransitionHooks(vnode, hooks) { - if (vnode.shapeFlag & 6 && vnode.component) { - vnode.transition = hooks; - setTransitionHooks(vnode.component.subTree, hooks); - } else if (vnode.shapeFlag & 128) { - vnode.ssContent.transition = hooks.clone(vnode.ssContent); - vnode.ssFallback.transition = hooks.clone(vnode.ssFallback); - } else { - vnode.transition = hooks; - } -} -function getTransitionRawChildren(children, keepComment = false, parentKey) { - let ret = []; - let keyedFragmentCount = 0; - for (let i = 0; i < children.length; i++) { - let child = children[i]; - const key = parentKey == null ? child.key : String(parentKey) + String(child.key != null ? child.key : i); - if (child.type === Fragment) { - if (child.patchFlag & 128) keyedFragmentCount++; - ret = ret.concat( - getTransitionRawChildren(child.children, keepComment, key) - ); - } else if (keepComment || child.type !== Comment) { - ret.push(key != null ? cloneVNode(child, { key }) : child); - } - } - if (keyedFragmentCount > 1) { - for (let i = 0; i < ret.length; i++) { - ret[i].patchFlag = -2; - } - } - return ret; -} - -/*! #__NO_SIDE_EFFECTS__ */ -// @__NO_SIDE_EFFECTS__ -function defineComponent(options, extraOptions) { - return isFunction(options) ? ( - // #8236: extend call and options.name access are considered side-effects - // by Rollup, so we have to wrap it in a pure-annotated IIFE. - /* @__PURE__ */ (() => extend({ name: options.name }, extraOptions, { setup: options }))() - ) : options; -} - -function useId() { - const i = getCurrentInstance(); - if (i) { - return (i.appContext.config.idPrefix || "v") + "-" + i.ids[0] + i.ids[1]++; - } else { - warn$1( - `useId() is called when there is no active component instance to be associated with.` - ); - } - return ""; -} -function markAsyncBoundary(instance) { - instance.ids = [instance.ids[0] + instance.ids[2]++ + "-", 0, 0]; -} - -const knownTemplateRefs = /* @__PURE__ */ new WeakSet(); -function useTemplateRef(key) { - const i = getCurrentInstance(); - const r = shallowRef(null); - if (i) { - const refs = i.refs === EMPTY_OBJ ? i.refs = {} : i.refs; - let desc; - if ((desc = Object.getOwnPropertyDescriptor(refs, key)) && !desc.configurable) { - warn$1(`useTemplateRef('${key}') already exists.`); - } else { - Object.defineProperty(refs, key, { - enumerable: true, - get: () => r.value, - set: (val) => r.value = val - }); - } - } else { - warn$1( - `useTemplateRef() is called when there is no active component instance to be associated with.` - ); - } - const ret = readonly(r) ; - { - knownTemplateRefs.add(ret); - } - return ret; -} - -function setRef(rawRef, oldRawRef, parentSuspense, vnode, isUnmount = false) { - if (isArray(rawRef)) { - rawRef.forEach( - (r, i) => setRef( - r, - oldRawRef && (isArray(oldRawRef) ? oldRawRef[i] : oldRawRef), - parentSuspense, - vnode, - isUnmount - ) - ); - return; - } - if (isAsyncWrapper(vnode) && !isUnmount) { - return; - } - const refValue = vnode.shapeFlag & 4 ? getComponentPublicInstance(vnode.component) : vnode.el; - const value = isUnmount ? null : refValue; - const { i: owner, r: ref } = rawRef; - if (!owner) { - warn$1( - `Missing ref owner context. ref cannot be used on hoisted vnodes. A vnode with ref must be created inside the render function.` - ); - return; - } - const oldRef = oldRawRef && oldRawRef.r; - const refs = owner.refs === EMPTY_OBJ ? owner.refs = {} : owner.refs; - const setupState = owner.setupState; - const rawSetupState = toRaw(setupState); - const canSetSetupRef = setupState === EMPTY_OBJ ? () => false : (key) => { - { - if (hasOwn(rawSetupState, key) && !isRef(rawSetupState[key])) { - warn$1( - `Template ref "${key}" used on a non-ref value. It will not work in the production build.` - ); - } - if (knownTemplateRefs.has(rawSetupState[key])) { - return false; - } - } - return hasOwn(rawSetupState, key); - }; - if (oldRef != null && oldRef !== ref) { - if (isString(oldRef)) { - refs[oldRef] = null; - if (canSetSetupRef(oldRef)) { - setupState[oldRef] = null; - } - } else if (isRef(oldRef)) { - oldRef.value = null; - } - } - if (isFunction(ref)) { - callWithErrorHandling(ref, owner, 12, [value, refs]); - } else { - const _isString = isString(ref); - const _isRef = isRef(ref); - if (_isString || _isRef) { - const doSet = () => { - if (rawRef.f) { - const existing = _isString ? canSetSetupRef(ref) ? setupState[ref] : refs[ref] : ref.value; - if (isUnmount) { - isArray(existing) && remove(existing, refValue); - } else { - if (!isArray(existing)) { - if (_isString) { - refs[ref] = [refValue]; - if (canSetSetupRef(ref)) { - setupState[ref] = refs[ref]; - } - } else { - ref.value = [refValue]; - if (rawRef.k) refs[rawRef.k] = ref.value; - } - } else if (!existing.includes(refValue)) { - existing.push(refValue); - } - } - } else if (_isString) { - refs[ref] = value; - if (canSetSetupRef(ref)) { - setupState[ref] = value; - } - } else if (_isRef) { - ref.value = value; - if (rawRef.k) refs[rawRef.k] = value; - } else { - warn$1("Invalid template ref type:", ref, `(${typeof ref})`); - } - }; - if (value) { - doSet.id = -1; - queuePostRenderEffect(doSet, parentSuspense); - } else { - doSet(); - } - } else { - warn$1("Invalid template ref type:", ref, `(${typeof ref})`); - } - } -} - -let hasLoggedMismatchError = false; -const logMismatchError = () => { - if (hasLoggedMismatchError) { - return; - } - console.error("Hydration completed but contains mismatches."); - hasLoggedMismatchError = true; -}; -const isSVGContainer = (container) => container.namespaceURI.includes("svg") && container.tagName !== "foreignObject"; -const isMathMLContainer = (container) => container.namespaceURI.includes("MathML"); -const getContainerType = (container) => { - if (container.nodeType !== 1) return void 0; - if (isSVGContainer(container)) return "svg"; - if (isMathMLContainer(container)) return "mathml"; - return void 0; -}; -const isComment = (node) => node.nodeType === 8; -function createHydrationFunctions(rendererInternals) { - const { - mt: mountComponent, - p: patch, - o: { - patchProp, - createText, - nextSibling, - parentNode, - remove, - insert, - createComment - } - } = rendererInternals; - const hydrate = (vnode, container) => { - if (!container.hasChildNodes()) { - warn$1( - `Attempting to hydrate existing markup but container is empty. Performing full mount instead.` - ); - patch(null, vnode, container); - flushPostFlushCbs(); - container._vnode = vnode; - return; - } - hydrateNode(container.firstChild, vnode, null, null, null); - flushPostFlushCbs(); - container._vnode = vnode; - }; - const hydrateNode = (node, vnode, parentComponent, parentSuspense, slotScopeIds, optimized = false) => { - optimized = optimized || !!vnode.dynamicChildren; - const isFragmentStart = isComment(node) && node.data === "["; - const onMismatch = () => handleMismatch( - node, - vnode, - parentComponent, - parentSuspense, - slotScopeIds, - isFragmentStart - ); - const { type, ref, shapeFlag, patchFlag } = vnode; - let domType = node.nodeType; - vnode.el = node; - { - def(node, "__vnode", vnode, true); - def(node, "__vueParentComponent", parentComponent, true); - } - if (patchFlag === -2) { - optimized = false; - vnode.dynamicChildren = null; - } - let nextNode = null; - switch (type) { - case Text: - if (domType !== 3) { - if (vnode.children === "") { - insert(vnode.el = createText(""), parentNode(node), node); - nextNode = node; - } else { - nextNode = onMismatch(); - } - } else { - if (node.data !== vnode.children) { - warn$1( - `Hydration text mismatch in`, - node.parentNode, - ` - - rendered on server: ${JSON.stringify( - node.data - )} - - expected on client: ${JSON.stringify(vnode.children)}` - ); - logMismatchError(); - node.data = vnode.children; - } - nextNode = nextSibling(node); - } - break; - case Comment: - if (isTemplateNode(node)) { - nextNode = nextSibling(node); - replaceNode( - vnode.el = node.content.firstChild, - node, - parentComponent - ); - } else if (domType !== 8 || isFragmentStart) { - nextNode = onMismatch(); - } else { - nextNode = nextSibling(node); - } - break; - case Static: - if (isFragmentStart) { - node = nextSibling(node); - domType = node.nodeType; - } - if (domType === 1 || domType === 3) { - nextNode = node; - const needToAdoptContent = !vnode.children.length; - for (let i = 0; i < vnode.staticCount; i++) { - if (needToAdoptContent) - vnode.children += nextNode.nodeType === 1 ? nextNode.outerHTML : nextNode.data; - if (i === vnode.staticCount - 1) { - vnode.anchor = nextNode; - } - nextNode = nextSibling(nextNode); - } - return isFragmentStart ? nextSibling(nextNode) : nextNode; - } else { - onMismatch(); - } - break; - case Fragment: - if (!isFragmentStart) { - nextNode = onMismatch(); - } else { - nextNode = hydrateFragment( - node, - vnode, - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - } - break; - default: - if (shapeFlag & 1) { - if ((domType !== 1 || vnode.type.toLowerCase() !== node.tagName.toLowerCase()) && !isTemplateNode(node)) { - nextNode = onMismatch(); - } else { - nextNode = hydrateElement( - node, - vnode, - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - } - } else if (shapeFlag & 6) { - vnode.slotScopeIds = slotScopeIds; - const container = parentNode(node); - if (isFragmentStart) { - nextNode = locateClosingAnchor(node); - } else if (isComment(node) && node.data === "teleport start") { - nextNode = locateClosingAnchor(node, node.data, "teleport end"); - } else { - nextNode = nextSibling(node); - } - mountComponent( - vnode, - container, - null, - parentComponent, - parentSuspense, - getContainerType(container), - optimized - ); - if (isAsyncWrapper(vnode)) { - let subTree; - if (isFragmentStart) { - subTree = createVNode(Fragment); - subTree.anchor = nextNode ? nextNode.previousSibling : container.lastChild; - } else { - subTree = node.nodeType === 3 ? createTextVNode("") : createVNode("div"); - } - subTree.el = node; - vnode.component.subTree = subTree; - } - } else if (shapeFlag & 64) { - if (domType !== 8) { - nextNode = onMismatch(); - } else { - nextNode = vnode.type.hydrate( - node, - vnode, - parentComponent, - parentSuspense, - slotScopeIds, - optimized, - rendererInternals, - hydrateChildren - ); - } - } else if (shapeFlag & 128) { - nextNode = vnode.type.hydrate( - node, - vnode, - parentComponent, - parentSuspense, - getContainerType(parentNode(node)), - slotScopeIds, - optimized, - rendererInternals, - hydrateNode - ); - } else { - warn$1("Invalid HostVNode type:", type, `(${typeof type})`); - } - } - if (ref != null) { - setRef(ref, null, parentSuspense, vnode); - } - return nextNode; - }; - const hydrateElement = (el, vnode, parentComponent, parentSuspense, slotScopeIds, optimized) => { - optimized = optimized || !!vnode.dynamicChildren; - const { type, props, patchFlag, shapeFlag, dirs, transition } = vnode; - const forcePatch = type === "input" || type === "option"; - { - if (dirs) { - invokeDirectiveHook(vnode, null, parentComponent, "created"); - } - let needCallTransitionHooks = false; - if (isTemplateNode(el)) { - needCallTransitionHooks = needTransition( - null, - // no need check parentSuspense in hydration - transition - ) && parentComponent && parentComponent.vnode.props && parentComponent.vnode.props.appear; - const content = el.content.firstChild; - if (needCallTransitionHooks) { - transition.beforeEnter(content); - } - replaceNode(content, el, parentComponent); - vnode.el = el = content; - } - if (shapeFlag & 16 && // skip if element has innerHTML / textContent - !(props && (props.innerHTML || props.textContent))) { - let next = hydrateChildren( - el.firstChild, - vnode, - el, - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - let hasWarned = false; - while (next) { - if (!isMismatchAllowed(el, 1 /* CHILDREN */)) { - if (!hasWarned) { - warn$1( - `Hydration children mismatch on`, - el, - ` -Server rendered element contains more child nodes than client vdom.` - ); - hasWarned = true; - } - logMismatchError(); - } - const cur = next; - next = next.nextSibling; - remove(cur); - } - } else if (shapeFlag & 8) { - let clientText = vnode.children; - if (clientText[0] === "\n" && (el.tagName === "PRE" || el.tagName === "TEXTAREA")) { - clientText = clientText.slice(1); - } - if (el.textContent !== clientText) { - if (!isMismatchAllowed(el, 0 /* TEXT */)) { - warn$1( - `Hydration text content mismatch on`, - el, - ` - - rendered on server: ${el.textContent} - - expected on client: ${vnode.children}` - ); - logMismatchError(); - } - el.textContent = vnode.children; - } - } - if (props) { - { - const isCustomElement = el.tagName.includes("-"); - for (const key in props) { - if (// #11189 skip if this node has directives that have created hooks - // as it could have mutated the DOM in any possible way - !(dirs && dirs.some((d) => d.dir.created)) && propHasMismatch(el, key, props[key], vnode, parentComponent)) { - logMismatchError(); - } - if (forcePatch && (key.endsWith("value") || key === "indeterminate") || isOn(key) && !isReservedProp(key) || // force hydrate v-bind with .prop modifiers - key[0] === "." || isCustomElement) { - patchProp(el, key, null, props[key], void 0, parentComponent); - } - } - } - } - let vnodeHooks; - if (vnodeHooks = props && props.onVnodeBeforeMount) { - invokeVNodeHook(vnodeHooks, parentComponent, vnode); - } - if (dirs) { - invokeDirectiveHook(vnode, null, parentComponent, "beforeMount"); - } - if ((vnodeHooks = props && props.onVnodeMounted) || dirs || needCallTransitionHooks) { - queueEffectWithSuspense(() => { - vnodeHooks && invokeVNodeHook(vnodeHooks, parentComponent, vnode); - needCallTransitionHooks && transition.enter(el); - dirs && invokeDirectiveHook(vnode, null, parentComponent, "mounted"); - }, parentSuspense); - } - } - return el.nextSibling; - }; - const hydrateChildren = (node, parentVNode, container, parentComponent, parentSuspense, slotScopeIds, optimized) => { - optimized = optimized || !!parentVNode.dynamicChildren; - const children = parentVNode.children; - const l = children.length; - let hasWarned = false; - for (let i = 0; i < l; i++) { - const vnode = optimized ? children[i] : children[i] = normalizeVNode(children[i]); - const isText = vnode.type === Text; - if (node) { - if (isText && !optimized) { - if (i + 1 < l && normalizeVNode(children[i + 1]).type === Text) { - insert( - createText( - node.data.slice(vnode.children.length) - ), - container, - nextSibling(node) - ); - node.data = vnode.children; - } - } - node = hydrateNode( - node, - vnode, - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - } else if (isText && !vnode.children) { - insert(vnode.el = createText(""), container); - } else { - if (!isMismatchAllowed(container, 1 /* CHILDREN */)) { - if (!hasWarned) { - warn$1( - `Hydration children mismatch on`, - container, - ` -Server rendered element contains fewer child nodes than client vdom.` - ); - hasWarned = true; - } - logMismatchError(); - } - patch( - null, - vnode, - container, - null, - parentComponent, - parentSuspense, - getContainerType(container), - slotScopeIds - ); - } - } - return node; - }; - const hydrateFragment = (node, vnode, parentComponent, parentSuspense, slotScopeIds, optimized) => { - const { slotScopeIds: fragmentSlotScopeIds } = vnode; - if (fragmentSlotScopeIds) { - slotScopeIds = slotScopeIds ? slotScopeIds.concat(fragmentSlotScopeIds) : fragmentSlotScopeIds; - } - const container = parentNode(node); - const next = hydrateChildren( - nextSibling(node), - vnode, - container, - parentComponent, - parentSuspense, - slotScopeIds, - optimized - ); - if (next && isComment(next) && next.data === "]") { - return nextSibling(vnode.anchor = next); - } else { - logMismatchError(); - insert(vnode.anchor = createComment(`]`), container, next); - return next; - } - }; - const handleMismatch = (node, vnode, parentComponent, parentSuspense, slotScopeIds, isFragment) => { - if (!isMismatchAllowed(node.parentElement, 1 /* CHILDREN */)) { - warn$1( - `Hydration node mismatch: -- rendered on server:`, - node, - node.nodeType === 3 ? `(text)` : isComment(node) && node.data === "[" ? `(start of fragment)` : ``, - ` -- expected on client:`, - vnode.type - ); - logMismatchError(); - } - vnode.el = null; - if (isFragment) { - const end = locateClosingAnchor(node); - while (true) { - const next2 = nextSibling(node); - if (next2 && next2 !== end) { - remove(next2); - } else { - break; - } - } - } - const next = nextSibling(node); - const container = parentNode(node); - remove(node); - patch( - null, - vnode, - container, - next, - parentComponent, - parentSuspense, - getContainerType(container), - slotScopeIds - ); - return next; - }; - const locateClosingAnchor = (node, open = "[", close = "]") => { - let match = 0; - while (node) { - node = nextSibling(node); - if (node && isComment(node)) { - if (node.data === open) match++; - if (node.data === close) { - if (match === 0) { - return nextSibling(node); - } else { - match--; - } - } - } - } - return node; - }; - const replaceNode = (newNode, oldNode, parentComponent) => { - const parentNode2 = oldNode.parentNode; - if (parentNode2) { - parentNode2.replaceChild(newNode, oldNode); - } - let parent = parentComponent; - while (parent) { - if (parent.vnode.el === oldNode) { - parent.vnode.el = parent.subTree.el = newNode; - } - parent = parent.parent; - } - }; - const isTemplateNode = (node) => { - return node.nodeType === 1 && node.tagName === "TEMPLATE"; - }; - return [hydrate, hydrateNode]; -} -function propHasMismatch(el, key, clientValue, vnode, instance) { - let mismatchType; - let mismatchKey; - let actual; - let expected; - if (key === "class") { - actual = el.getAttribute("class"); - expected = normalizeClass(clientValue); - if (!isSetEqual(toClassSet(actual || ""), toClassSet(expected))) { - mismatchType = 2 /* CLASS */; - mismatchKey = `class`; - } - } else if (key === "style") { - actual = el.getAttribute("style") || ""; - expected = isString(clientValue) ? clientValue : stringifyStyle(normalizeStyle(clientValue)); - const actualMap = toStyleMap(actual); - const expectedMap = toStyleMap(expected); - if (vnode.dirs) { - for (const { dir, value } of vnode.dirs) { - if (dir.name === "show" && !value) { - expectedMap.set("display", "none"); - } - } - } - if (instance) { - resolveCssVars(instance, vnode, expectedMap); - } - if (!isMapEqual(actualMap, expectedMap)) { - mismatchType = 3 /* STYLE */; - mismatchKey = "style"; - } - } else if (el instanceof SVGElement && isKnownSvgAttr(key) || el instanceof HTMLElement && (isBooleanAttr(key) || isKnownHtmlAttr(key))) { - if (isBooleanAttr(key)) { - actual = el.hasAttribute(key); - expected = includeBooleanAttr(clientValue); - } else if (clientValue == null) { - actual = el.hasAttribute(key); - expected = false; - } else { - if (el.hasAttribute(key)) { - actual = el.getAttribute(key); - } else if (key === "value" && el.tagName === "TEXTAREA") { - actual = el.value; - } else { - actual = false; - } - expected = isRenderableAttrValue(clientValue) ? String(clientValue) : false; - } - if (actual !== expected) { - mismatchType = 4 /* ATTRIBUTE */; - mismatchKey = key; - } - } - if (mismatchType != null && !isMismatchAllowed(el, mismatchType)) { - const format = (v) => v === false ? `(not rendered)` : `${mismatchKey}="${v}"`; - const preSegment = `Hydration ${MismatchTypeString[mismatchType]} mismatch on`; - const postSegment = ` - - rendered on server: ${format(actual)} - - expected on client: ${format(expected)} - Note: this mismatch is check-only. The DOM will not be rectified in production due to performance overhead. - You should fix the source of the mismatch.`; - { - warn$1(preSegment, el, postSegment); - } - return true; - } - return false; -} -function toClassSet(str) { - return new Set(str.trim().split(/\s+/)); -} -function isSetEqual(a, b) { - if (a.size !== b.size) { - return false; - } - for (const s of a) { - if (!b.has(s)) { - return false; - } - } - return true; -} -function toStyleMap(str) { - const styleMap = /* @__PURE__ */ new Map(); - for (const item of str.split(";")) { - let [key, value] = item.split(":"); - key = key.trim(); - value = value && value.trim(); - if (key && value) { - styleMap.set(key, value); - } - } - return styleMap; -} -function isMapEqual(a, b) { - if (a.size !== b.size) { - return false; - } - for (const [key, value] of a) { - if (value !== b.get(key)) { - return false; - } - } - return true; -} -function resolveCssVars(instance, vnode, expectedMap) { - const root = instance.subTree; - if (instance.getCssVars && (vnode === root || root && root.type === Fragment && root.children.includes(vnode))) { - const cssVars = instance.getCssVars(); - for (const key in cssVars) { - expectedMap.set( - `--${getEscapedCssVarName(key)}`, - String(cssVars[key]) - ); - } - } - if (vnode === root && instance.parent) { - resolveCssVars(instance.parent, instance.vnode, expectedMap); - } -} -const allowMismatchAttr = "data-allow-mismatch"; -const MismatchTypeString = { - [0 /* TEXT */]: "text", - [1 /* CHILDREN */]: "children", - [2 /* CLASS */]: "class", - [3 /* STYLE */]: "style", - [4 /* ATTRIBUTE */]: "attribute" -}; -function isMismatchAllowed(el, allowedType) { - if (allowedType === 0 /* TEXT */ || allowedType === 1 /* CHILDREN */) { - while (el && !el.hasAttribute(allowMismatchAttr)) { - el = el.parentElement; - } - } - const allowedAttr = el && el.getAttribute(allowMismatchAttr); - if (allowedAttr == null) { - return false; - } else if (allowedAttr === "") { - return true; - } else { - const list = allowedAttr.split(","); - if (allowedType === 0 /* TEXT */ && list.includes("children")) { - return true; - } - return allowedAttr.split(",").includes(MismatchTypeString[allowedType]); - } -} - -const requestIdleCallback = getGlobalThis().requestIdleCallback || ((cb) => setTimeout(cb, 1)); -const cancelIdleCallback = getGlobalThis().cancelIdleCallback || ((id) => clearTimeout(id)); -const hydrateOnIdle = (timeout = 1e4) => (hydrate) => { - const id = requestIdleCallback(hydrate, { timeout }); - return () => cancelIdleCallback(id); -}; -function elementIsVisibleInViewport(el) { - const { top, left, bottom, right } = el.getBoundingClientRect(); - const { innerHeight, innerWidth } = window; - return (top > 0 && top < innerHeight || bottom > 0 && bottom < innerHeight) && (left > 0 && left < innerWidth || right > 0 && right < innerWidth); -} -const hydrateOnVisible = (opts) => (hydrate, forEach) => { - const ob = new IntersectionObserver((entries) => { - for (const e of entries) { - if (!e.isIntersecting) continue; - ob.disconnect(); - hydrate(); - break; - } - }, opts); - forEach((el) => { - if (!(el instanceof Element)) return; - if (elementIsVisibleInViewport(el)) { - hydrate(); - ob.disconnect(); - return false; - } - ob.observe(el); - }); - return () => ob.disconnect(); -}; -const hydrateOnMediaQuery = (query) => (hydrate) => { - if (query) { - const mql = matchMedia(query); - if (mql.matches) { - hydrate(); - } else { - mql.addEventListener("change", hydrate, { once: true }); - return () => mql.removeEventListener("change", hydrate); - } - } -}; -const hydrateOnInteraction = (interactions = []) => (hydrate, forEach) => { - if (isString(interactions)) interactions = [interactions]; - let hasHydrated = false; - const doHydrate = (e) => { - if (!hasHydrated) { - hasHydrated = true; - teardown(); - hydrate(); - e.target.dispatchEvent(new e.constructor(e.type, e)); - } - }; - const teardown = () => { - forEach((el) => { - for (const i of interactions) { - el.removeEventListener(i, doHydrate); - } - }); - }; - forEach((el) => { - for (const i of interactions) { - el.addEventListener(i, doHydrate, { once: true }); - } - }); - return teardown; -}; -function forEachElement(node, cb) { - if (isComment(node) && node.data === "[") { - let depth = 1; - let next = node.nextSibling; - while (next) { - if (next.nodeType === 1) { - const result = cb(next); - if (result === false) { - break; - } - } else if (isComment(next)) { - if (next.data === "]") { - if (--depth === 0) break; - } else if (next.data === "[") { - depth++; - } - } - next = next.nextSibling; - } - } else { - cb(node); - } -} - -const isAsyncWrapper = (i) => !!i.type.__asyncLoader; -/*! #__NO_SIDE_EFFECTS__ */ -// @__NO_SIDE_EFFECTS__ -function defineAsyncComponent(source) { - if (isFunction(source)) { - source = { loader: source }; - } - const { - loader, - loadingComponent, - errorComponent, - delay = 200, - hydrate: hydrateStrategy, - timeout, - // undefined = never times out - suspensible = true, - onError: userOnError - } = source; - let pendingRequest = null; - let resolvedComp; - let retries = 0; - const retry = () => { - retries++; - pendingRequest = null; - return load(); - }; - const load = () => { - let thisRequest; - return pendingRequest || (thisRequest = pendingRequest = loader().catch((err) => { - err = err instanceof Error ? err : new Error(String(err)); - if (userOnError) { - return new Promise((resolve, reject) => { - const userRetry = () => resolve(retry()); - const userFail = () => reject(err); - userOnError(err, userRetry, userFail, retries + 1); - }); - } else { - throw err; - } - }).then((comp) => { - if (thisRequest !== pendingRequest && pendingRequest) { - return pendingRequest; - } - if (!comp) { - warn$1( - `Async component loader resolved to undefined. If you are using retry(), make sure to return its return value.` - ); - } - if (comp && (comp.__esModule || comp[Symbol.toStringTag] === "Module")) { - comp = comp.default; - } - if (comp && !isObject(comp) && !isFunction(comp)) { - throw new Error(`Invalid async component load result: ${comp}`); - } - resolvedComp = comp; - return comp; - })); - }; - return defineComponent({ - name: "AsyncComponentWrapper", - __asyncLoader: load, - __asyncHydrate(el, instance, hydrate) { - const doHydrate = hydrateStrategy ? () => { - const teardown = hydrateStrategy( - hydrate, - (cb) => forEachElement(el, cb) - ); - if (teardown) { - (instance.bum || (instance.bum = [])).push(teardown); - } - } : hydrate; - if (resolvedComp) { - doHydrate(); - } else { - load().then(() => !instance.isUnmounted && doHydrate()); - } - }, - get __asyncResolved() { - return resolvedComp; - }, - setup() { - const instance = currentInstance; - markAsyncBoundary(instance); - if (resolvedComp) { - return () => createInnerComp(resolvedComp, instance); - } - const onError = (err) => { - pendingRequest = null; - handleError( - err, - instance, - 13, - !errorComponent - ); - }; - if (suspensible && instance.suspense || isInSSRComponentSetup) { - return load().then((comp) => { - return () => createInnerComp(comp, instance); - }).catch((err) => { - onError(err); - return () => errorComponent ? createVNode(errorComponent, { - error: err - }) : null; - }); - } - const loaded = ref(false); - const error = ref(); - const delayed = ref(!!delay); - if (delay) { - setTimeout(() => { - delayed.value = false; - }, delay); - } - if (timeout != null) { - setTimeout(() => { - if (!loaded.value && !error.value) { - const err = new Error( - `Async component timed out after ${timeout}ms.` - ); - onError(err); - error.value = err; - } - }, timeout); - } - load().then(() => { - loaded.value = true; - if (instance.parent && isKeepAlive(instance.parent.vnode)) { - instance.parent.update(); - } - }).catch((err) => { - onError(err); - error.value = err; - }); - return () => { - if (loaded.value && resolvedComp) { - return createInnerComp(resolvedComp, instance); - } else if (error.value && errorComponent) { - return createVNode(errorComponent, { - error: error.value - }); - } else if (loadingComponent && !delayed.value) { - return createVNode(loadingComponent); - } - }; - } - }); -} -function createInnerComp(comp, parent) { - const { ref: ref2, props, children, ce } = parent.vnode; - const vnode = createVNode(comp, props, children); - vnode.ref = ref2; - vnode.ce = ce; - delete parent.vnode.ce; - return vnode; -} - -const isKeepAlive = (vnode) => vnode.type.__isKeepAlive; -const KeepAliveImpl = { - name: `KeepAlive`, - // Marker for special handling inside the renderer. We are not using a === - // check directly on KeepAlive in the renderer, because importing it directly - // would prevent it from being tree-shaken. - __isKeepAlive: true, - props: { - include: [String, RegExp, Array], - exclude: [String, RegExp, Array], - max: [String, Number] - }, - setup(props, { slots }) { - const instance = getCurrentInstance(); - const sharedContext = instance.ctx; - if (!sharedContext.renderer) { - return () => { - const children = slots.default && slots.default(); - return children && children.length === 1 ? children[0] : children; - }; - } - const cache = /* @__PURE__ */ new Map(); - const keys = /* @__PURE__ */ new Set(); - let current = null; - { - instance.__v_cache = cache; - } - const parentSuspense = instance.suspense; - const { - renderer: { - p: patch, - m: move, - um: _unmount, - o: { createElement } - } - } = sharedContext; - const storageContainer = createElement("div"); - sharedContext.activate = (vnode, container, anchor, namespace, optimized) => { - const instance2 = vnode.component; - move(vnode, container, anchor, 0, parentSuspense); - patch( - instance2.vnode, - vnode, - container, - anchor, - instance2, - parentSuspense, - namespace, - vnode.slotScopeIds, - optimized - ); - queuePostRenderEffect(() => { - instance2.isDeactivated = false; - if (instance2.a) { - invokeArrayFns(instance2.a); - } - const vnodeHook = vnode.props && vnode.props.onVnodeMounted; - if (vnodeHook) { - invokeVNodeHook(vnodeHook, instance2.parent, vnode); - } - }, parentSuspense); - { - devtoolsComponentAdded(instance2); - } - }; - sharedContext.deactivate = (vnode) => { - const instance2 = vnode.component; - invalidateMount(instance2.m); - invalidateMount(instance2.a); - move(vnode, storageContainer, null, 1, parentSuspense); - queuePostRenderEffect(() => { - if (instance2.da) { - invokeArrayFns(instance2.da); - } - const vnodeHook = vnode.props && vnode.props.onVnodeUnmounted; - if (vnodeHook) { - invokeVNodeHook(vnodeHook, instance2.parent, vnode); - } - instance2.isDeactivated = true; - }, parentSuspense); - { - devtoolsComponentAdded(instance2); - } - }; - function unmount(vnode) { - resetShapeFlag(vnode); - _unmount(vnode, instance, parentSuspense, true); - } - function pruneCache(filter) { - cache.forEach((vnode, key) => { - const name = getComponentName(vnode.type); - if (name && !filter(name)) { - pruneCacheEntry(key); - } - }); - } - function pruneCacheEntry(key) { - const cached = cache.get(key); - if (cached && (!current || !isSameVNodeType(cached, current))) { - unmount(cached); - } else if (current) { - resetShapeFlag(current); - } - cache.delete(key); - keys.delete(key); - } - watch( - () => [props.include, props.exclude], - ([include, exclude]) => { - include && pruneCache((name) => matches(include, name)); - exclude && pruneCache((name) => !matches(exclude, name)); - }, - // prune post-render after `current` has been updated - { flush: "post", deep: true } - ); - let pendingCacheKey = null; - const cacheSubtree = () => { - if (pendingCacheKey != null) { - if (isSuspense(instance.subTree.type)) { - queuePostRenderEffect(() => { - cache.set(pendingCacheKey, getInnerChild(instance.subTree)); - }, instance.subTree.suspense); - } else { - cache.set(pendingCacheKey, getInnerChild(instance.subTree)); - } - } - }; - onMounted(cacheSubtree); - onUpdated(cacheSubtree); - onBeforeUnmount(() => { - cache.forEach((cached) => { - const { subTree, suspense } = instance; - const vnode = getInnerChild(subTree); - if (cached.type === vnode.type && cached.key === vnode.key) { - resetShapeFlag(vnode); - const da = vnode.component.da; - da && queuePostRenderEffect(da, suspense); - return; - } - unmount(cached); - }); - }); - return () => { - pendingCacheKey = null; - if (!slots.default) { - return current = null; - } - const children = slots.default(); - const rawVNode = children[0]; - if (children.length > 1) { - { - warn$1(`KeepAlive should contain exactly one component child.`); - } - current = null; - return children; - } else if (!isVNode(rawVNode) || !(rawVNode.shapeFlag & 4) && !(rawVNode.shapeFlag & 128)) { - current = null; - return rawVNode; - } - let vnode = getInnerChild(rawVNode); - if (vnode.type === Comment) { - current = null; - return vnode; - } - const comp = vnode.type; - const name = getComponentName( - isAsyncWrapper(vnode) ? vnode.type.__asyncResolved || {} : comp - ); - const { include, exclude, max } = props; - if (include && (!name || !matches(include, name)) || exclude && name && matches(exclude, name)) { - vnode.shapeFlag &= ~256; - current = vnode; - return rawVNode; - } - const key = vnode.key == null ? comp : vnode.key; - const cachedVNode = cache.get(key); - if (vnode.el) { - vnode = cloneVNode(vnode); - if (rawVNode.shapeFlag & 128) { - rawVNode.ssContent = vnode; - } - } - pendingCacheKey = key; - if (cachedVNode) { - vnode.el = cachedVNode.el; - vnode.component = cachedVNode.component; - if (vnode.transition) { - setTransitionHooks(vnode, vnode.transition); - } - vnode.shapeFlag |= 512; - keys.delete(key); - keys.add(key); - } else { - keys.add(key); - if (max && keys.size > parseInt(max, 10)) { - pruneCacheEntry(keys.values().next().value); - } - } - vnode.shapeFlag |= 256; - current = vnode; - return isSuspense(rawVNode.type) ? rawVNode : vnode; - }; - } -}; -const KeepAlive = KeepAliveImpl; -function matches(pattern, name) { - if (isArray(pattern)) { - return pattern.some((p) => matches(p, name)); - } else if (isString(pattern)) { - return pattern.split(",").includes(name); - } else if (isRegExp(pattern)) { - pattern.lastIndex = 0; - return pattern.test(name); - } - return false; -} -function onActivated(hook, target) { - registerKeepAliveHook(hook, "a", target); -} -function onDeactivated(hook, target) { - registerKeepAliveHook(hook, "da", target); -} -function registerKeepAliveHook(hook, type, target = currentInstance) { - const wrappedHook = hook.__wdc || (hook.__wdc = () => { - let current = target; - while (current) { - if (current.isDeactivated) { - return; - } - current = current.parent; - } - return hook(); - }); - injectHook(type, wrappedHook, target); - if (target) { - let current = target.parent; - while (current && current.parent) { - if (isKeepAlive(current.parent.vnode)) { - injectToKeepAliveRoot(wrappedHook, type, target, current); - } - current = current.parent; - } - } -} -function injectToKeepAliveRoot(hook, type, target, keepAliveRoot) { - const injected = injectHook( - type, - hook, - keepAliveRoot, - true - /* prepend */ - ); - onUnmounted(() => { - remove(keepAliveRoot[type], injected); - }, target); -} -function resetShapeFlag(vnode) { - vnode.shapeFlag &= ~256; - vnode.shapeFlag &= ~512; -} -function getInnerChild(vnode) { - return vnode.shapeFlag & 128 ? vnode.ssContent : vnode; -} - -function injectHook(type, hook, target = currentInstance, prepend = false) { - if (target) { - const hooks = target[type] || (target[type] = []); - const wrappedHook = hook.__weh || (hook.__weh = (...args) => { - pauseTracking(); - const reset = setCurrentInstance(target); - const res = callWithAsyncErrorHandling(hook, target, type, args); - reset(); - resetTracking(); - return res; - }); - if (prepend) { - hooks.unshift(wrappedHook); - } else { - hooks.push(wrappedHook); - } - return wrappedHook; - } else { - const apiName = toHandlerKey(ErrorTypeStrings$1[type].replace(/ hook$/, "")); - warn$1( - `${apiName} is called when there is no active component instance to be associated with. Lifecycle injection APIs can only be used during execution of setup().` + (` If you are using async setup(), make sure to register lifecycle hooks before the first await statement.` ) - ); - } -} -const createHook = (lifecycle) => (hook, target = currentInstance) => { - if (!isInSSRComponentSetup || lifecycle === "sp") { - injectHook(lifecycle, (...args) => hook(...args), target); - } -}; -const onBeforeMount = createHook("bm"); -const onMounted = createHook("m"); -const onBeforeUpdate = createHook( - "bu" -); -const onUpdated = createHook("u"); -const onBeforeUnmount = createHook( - "bum" -); -const onUnmounted = createHook("um"); -const onServerPrefetch = createHook( - "sp" -); -const onRenderTriggered = createHook("rtg"); -const onRenderTracked = createHook("rtc"); -function onErrorCaptured(hook, target = currentInstance) { - injectHook("ec", hook, target); -} - -const COMPONENTS = "components"; -const DIRECTIVES = "directives"; -function resolveComponent(name, maybeSelfReference) { - return resolveAsset(COMPONENTS, name, true, maybeSelfReference) || name; -} -const NULL_DYNAMIC_COMPONENT = Symbol.for("v-ndc"); -function resolveDynamicComponent(component) { - if (isString(component)) { - return resolveAsset(COMPONENTS, component, false) || component; - } else { - return component || NULL_DYNAMIC_COMPONENT; - } -} -function resolveDirective(name) { - return resolveAsset(DIRECTIVES, name); -} -function resolveAsset(type, name, warnMissing = true, maybeSelfReference = false) { - const instance = currentRenderingInstance || currentInstance; - if (instance) { - const Component = instance.type; - if (type === COMPONENTS) { - const selfName = getComponentName( - Component, - false - ); - if (selfName && (selfName === name || selfName === camelize(name) || selfName === capitalize(camelize(name)))) { - return Component; - } - } - const res = ( - // local registration - // check instance[type] first which is resolved for options API - resolve(instance[type] || Component[type], name) || // global registration - resolve(instance.appContext[type], name) - ); - if (!res && maybeSelfReference) { - return Component; - } - if (warnMissing && !res) { - const extra = type === COMPONENTS ? ` -If this is a native custom element, make sure to exclude it from component resolution via compilerOptions.isCustomElement.` : ``; - warn$1(`Failed to resolve ${type.slice(0, -1)}: ${name}${extra}`); - } - return res; - } else { - warn$1( - `resolve${capitalize(type.slice(0, -1))} can only be used in render() or setup().` - ); - } -} -function resolve(registry, name) { - return registry && (registry[name] || registry[camelize(name)] || registry[capitalize(camelize(name))]); -} - -function renderList(source, renderItem, cache, index) { - let ret; - const cached = cache && cache[index]; - const sourceIsArray = isArray(source); - if (sourceIsArray || isString(source)) { - const sourceIsReactiveArray = sourceIsArray && isReactive(source); - let needsWrap = false; - if (sourceIsReactiveArray) { - needsWrap = !isShallow(source); - source = shallowReadArray(source); - } - ret = new Array(source.length); - for (let i = 0, l = source.length; i < l; i++) { - ret[i] = renderItem( - needsWrap ? toReactive(source[i]) : source[i], - i, - void 0, - cached && cached[i] - ); - } - } else if (typeof source === "number") { - if (!Number.isInteger(source)) { - warn$1(`The v-for range expect an integer value but got ${source}.`); - } - ret = new Array(source); - for (let i = 0; i < source; i++) { - ret[i] = renderItem(i + 1, i, void 0, cached && cached[i]); - } - } else if (isObject(source)) { - if (source[Symbol.iterator]) { - ret = Array.from( - source, - (item, i) => renderItem(item, i, void 0, cached && cached[i]) - ); - } else { - const keys = Object.keys(source); - ret = new Array(keys.length); - for (let i = 0, l = keys.length; i < l; i++) { - const key = keys[i]; - ret[i] = renderItem(source[key], key, i, cached && cached[i]); - } - } - } else { - ret = []; - } - if (cache) { - cache[index] = ret; - } - return ret; -} - -function createSlots(slots, dynamicSlots) { - for (let i = 0; i < dynamicSlots.length; i++) { - const slot = dynamicSlots[i]; - if (isArray(slot)) { - for (let j = 0; j < slot.length; j++) { - slots[slot[j].name] = slot[j].fn; - } - } else if (slot) { - slots[slot.name] = slot.key ? (...args) => { - const res = slot.fn(...args); - if (res) res.key = slot.key; - return res; - } : slot.fn; - } - } - return slots; -} - -function renderSlot(slots, name, props = {}, fallback, noSlotted) { - if (currentRenderingInstance.ce || currentRenderingInstance.parent && isAsyncWrapper(currentRenderingInstance.parent) && currentRenderingInstance.parent.ce) { - if (name !== "default") props.name = name; - return openBlock(), createBlock( - Fragment, - null, - [createVNode("slot", props, fallback && fallback())], - 64 - ); - } - let slot = slots[name]; - if (slot && slot.length > 1) { - warn$1( - `SSR-optimized slot function detected in a non-SSR-optimized render function. You need to mark this component with $dynamic-slots in the parent template.` - ); - slot = () => []; - } - if (slot && slot._c) { - slot._d = false; - } - openBlock(); - const validSlotContent = slot && ensureValidVNode(slot(props)); - const slotKey = props.key || // slot content array of a dynamic conditional slot may have a branch - // key attached in the `createSlots` helper, respect that - validSlotContent && validSlotContent.key; - const rendered = createBlock( - Fragment, - { - key: (slotKey && !isSymbol(slotKey) ? slotKey : `_${name}`) + // #7256 force differentiate fallback content from actual content - (!validSlotContent && fallback ? "_fb" : "") - }, - validSlotContent || (fallback ? fallback() : []), - validSlotContent && slots._ === 1 ? 64 : -2 - ); - if (!noSlotted && rendered.scopeId) { - rendered.slotScopeIds = [rendered.scopeId + "-s"]; - } - if (slot && slot._c) { - slot._d = true; - } - return rendered; -} -function ensureValidVNode(vnodes) { - return vnodes.some((child) => { - if (!isVNode(child)) return true; - if (child.type === Comment) return false; - if (child.type === Fragment && !ensureValidVNode(child.children)) - return false; - return true; - }) ? vnodes : null; -} - -function toHandlers(obj, preserveCaseIfNecessary) { - const ret = {}; - if (!isObject(obj)) { - warn$1(`v-on with no argument expects an object value.`); - return ret; - } - for (const key in obj) { - ret[preserveCaseIfNecessary && /[A-Z]/.test(key) ? `on:${key}` : toHandlerKey(key)] = obj[key]; - } - return ret; -} - -const getPublicInstance = (i) => { - if (!i) return null; - if (isStatefulComponent(i)) return getComponentPublicInstance(i); - return getPublicInstance(i.parent); -}; -const publicPropertiesMap = ( - // Move PURE marker to new line to workaround compiler discarding it - // due to type annotation - /* @__PURE__ */ extend(/* @__PURE__ */ Object.create(null), { - $: (i) => i, - $el: (i) => i.vnode.el, - $data: (i) => i.data, - $props: (i) => shallowReadonly(i.props) , - $attrs: (i) => shallowReadonly(i.attrs) , - $slots: (i) => shallowReadonly(i.slots) , - $refs: (i) => shallowReadonly(i.refs) , - $parent: (i) => getPublicInstance(i.parent), - $root: (i) => getPublicInstance(i.root), - $host: (i) => i.ce, - $emit: (i) => i.emit, - $options: (i) => resolveMergedOptions(i) , - $forceUpdate: (i) => i.f || (i.f = () => { - queueJob(i.update); - }), - $nextTick: (i) => i.n || (i.n = nextTick.bind(i.proxy)), - $watch: (i) => instanceWatch.bind(i) - }) -); -const isReservedPrefix = (key) => key === "_" || key === "$"; -const hasSetupBinding = (state, key) => state !== EMPTY_OBJ && !state.__isScriptSetup && hasOwn(state, key); -const PublicInstanceProxyHandlers = { - get({ _: instance }, key) { - if (key === "__v_skip") { - return true; - } - const { ctx, setupState, data, props, accessCache, type, appContext } = instance; - if (key === "__isVue") { - return true; - } - let normalizedProps; - if (key[0] !== "$") { - const n = accessCache[key]; - if (n !== void 0) { - switch (n) { - case 1 /* SETUP */: - return setupState[key]; - case 2 /* DATA */: - return data[key]; - case 4 /* CONTEXT */: - return ctx[key]; - case 3 /* PROPS */: - return props[key]; - } - } else if (hasSetupBinding(setupState, key)) { - accessCache[key] = 1 /* SETUP */; - return setupState[key]; - } else if (data !== EMPTY_OBJ && hasOwn(data, key)) { - accessCache[key] = 2 /* DATA */; - return data[key]; - } else if ( - // only cache other properties when instance has declared (thus stable) - // props - (normalizedProps = instance.propsOptions[0]) && hasOwn(normalizedProps, key) - ) { - accessCache[key] = 3 /* PROPS */; - return props[key]; - } else if (ctx !== EMPTY_OBJ && hasOwn(ctx, key)) { - accessCache[key] = 4 /* CONTEXT */; - return ctx[key]; - } else if (shouldCacheAccess) { - accessCache[key] = 0 /* OTHER */; - } - } - const publicGetter = publicPropertiesMap[key]; - let cssModule, globalProperties; - if (publicGetter) { - if (key === "$attrs") { - track(instance.attrs, "get", ""); - markAttrsAccessed(); - } else if (key === "$slots") { - track(instance, "get", key); - } - return publicGetter(instance); - } else if ( - // css module (injected by vue-loader) - (cssModule = type.__cssModules) && (cssModule = cssModule[key]) - ) { - return cssModule; - } else if (ctx !== EMPTY_OBJ && hasOwn(ctx, key)) { - accessCache[key] = 4 /* CONTEXT */; - return ctx[key]; - } else if ( - // global properties - globalProperties = appContext.config.globalProperties, hasOwn(globalProperties, key) - ) { - { - return globalProperties[key]; - } - } else if (currentRenderingInstance && (!isString(key) || // #1091 avoid internal isRef/isVNode checks on component instance leading - // to infinite warning loop - key.indexOf("__v") !== 0)) { - if (data !== EMPTY_OBJ && isReservedPrefix(key[0]) && hasOwn(data, key)) { - warn$1( - `Property ${JSON.stringify( - key - )} must be accessed via $data because it starts with a reserved character ("$" or "_") and is not proxied on the render context.` - ); - } else if (instance === currentRenderingInstance) { - warn$1( - `Property ${JSON.stringify(key)} was accessed during render but is not defined on instance.` - ); - } - } - }, - set({ _: instance }, key, value) { - const { data, setupState, ctx } = instance; - if (hasSetupBinding(setupState, key)) { - setupState[key] = value; - return true; - } else if (setupState.__isScriptSetup && hasOwn(setupState, key)) { - warn$1(`Cannot mutate - - - - -

- - - - - - - - - diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz new file mode 100644 index 0000000000000..6a64915579c39 Binary files /dev/null and b/examples/server/public/index.html.gz differ diff --git a/examples/server/public_legacy/index-new.html b/examples/server/public_legacy/index-new.html index 8bfa380e57388..cbfbbdf2806fa 100644 --- a/examples/server/public_legacy/index-new.html +++ b/examples/server/public_legacy/index-new.html @@ -39,7 +39,6 @@ temperature: 0.8, // adapt all following parameters to optimized min-p requierements. If for non-english, set to 0.6 or lower repeat_last_n: 0, // 0 = disable penalty, -1 = context size repeat_penalty: 1.0, // 1.0 = disabled - penalize_nl: false, // true only useful for infinite completion dry_multiplier: 0.0, // 0.0 = disabled, 0.8 works well dry_base: 1.75, // 0.0 = disabled dry_allowed_length: 2, // tokens extending repetitions beyond this receive penalty, 2 works well diff --git a/examples/server/public_legacy/index.html b/examples/server/public_legacy/index.html index a95f5c6df8775..75f39330a789d 100644 --- a/examples/server/public_legacy/index.html +++ b/examples/server/public_legacy/index.html @@ -303,7 +303,6 @@ temperature: 0.7, repeat_last_n: 256, // 0 = disable penalty, -1 = context size repeat_penalty: 1.18, // 1.0 = disabled - penalize_nl: false, dry_multiplier: 0.0, // 0.0 = disabled, 0.8 works well dry_base: 1.75, // 0.0 = disabled dry_allowed_length: 2, // tokens extending repetitions beyond this receive penalty, 2 works well @@ -1006,7 +1005,6 @@ ${FloatField({ label: "Temperature", max: 2.0, min: 0.0, name: "temperature", step: 0.01, value: params.value.temperature })} ${FloatField({ label: "Penalize repeat sequence", max: 2.0, min: 0.0, name: "repeat_penalty", step: 0.01, value: params.value.repeat_penalty })} ${IntField({ label: "Consider N tokens for penalize", max: 2048, min: 0, name: "repeat_last_n", value: params.value.repeat_last_n })} - ${BoolField({ label: "Penalize repetition of newlines", name: "penalize_nl", value: params.value.penalize_nl })} ${IntField({ label: "Top-K sampling", max: 100, min: -1, name: "top_k", value: params.value.top_k })} ${FloatField({ label: "Top-P sampling", max: 1.0, min: 0.0, name: "top_p", step: 0.01, value: params.value.top_p })} ${FloatField({ label: "Min-P sampling", max: 1.0, min: 0.0, name: "min_p", step: 0.01, value: params.value.min_p })} diff --git a/examples/server/public_simplechat/simplechat.js b/examples/server/public_simplechat/simplechat.js index 8e0df3b61df2b..2fcd24a860bd4 100644 --- a/examples/server/public_simplechat/simplechat.js +++ b/examples/server/public_simplechat/simplechat.js @@ -407,6 +407,9 @@ class SimpleChat { if (curLine.startsWith("data:")) { curLine = curLine.substring(5); } + if (curLine.trim() === "[DONE]") { + break; + } let curJson = JSON.parse(curLine); console.debug("DBUG:SC:PART:Json:", curJson); this.append_response(this.response_extract_stream(curJson, apiEP)); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b8e003be9730e..fa3682a920649 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2,10 +2,11 @@ #include "arg.h" #include "common.h" -#include "log.h" -#include "sampling.h" #include "json-schema-to-grammar.h" #include "llama.h" +#include "log.h" +#include "sampling.h" +#include "speculative.h" // Change JSON_ASSERT from assert() to GGML_ASSERT: #define JSON_ASSERT GGML_ASSERT @@ -14,13 +15,8 @@ #define MIMETYPE_JSON "application/json; charset=utf-8" // auto generated files (update with ./deps.sh) -#include "index.html.hpp" -#include "completion.js.hpp" +#include "index.html.gz.hpp" #include "loading.html.hpp" -#include "deps_daisyui.min.css.hpp" -#include "deps_markdown-it.js.hpp" -#include "deps_tailwindcss.js.hpp" -#include "deps_vue.esm-browser.js.hpp" #include #include @@ -37,8 +33,10 @@ using json = nlohmann::ordered_json; enum stop_type { - STOP_TYPE_FULL, - STOP_TYPE_PARTIAL, + STOP_TYPE_NONE, + STOP_TYPE_EOS, + STOP_TYPE_WORD, + STOP_TYPE_LIMIT, }; // state diagram: https://github.com/ggerganov/llama.cpp/pull/9283 @@ -56,7 +54,10 @@ enum server_state { }; enum server_task_type { - SERVER_TASK_TYPE_INFERENCE, + SERVER_TASK_TYPE_COMPLETION, + SERVER_TASK_TYPE_EMBEDDING, + SERVER_TASK_TYPE_RERANK, + SERVER_TASK_TYPE_INFILL, SERVER_TASK_TYPE_CANCEL, SERVER_TASK_TYPE_NEXT_RESPONSE, SERVER_TASK_TYPE_METRICS, @@ -66,22 +67,309 @@ enum server_task_type { SERVER_TASK_TYPE_SET_LORA, }; -enum server_task_inf_type { - SERVER_TASK_INF_TYPE_COMPLETION, - SERVER_TASK_INF_TYPE_EMBEDDING, - SERVER_TASK_INF_TYPE_RERANK, - SERVER_TASK_INF_TYPE_INFILL, +// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11 +enum error_type { + ERROR_TYPE_INVALID_REQUEST, + ERROR_TYPE_AUTHENTICATION, + ERROR_TYPE_SERVER, + ERROR_TYPE_NOT_FOUND, + ERROR_TYPE_PERMISSION, + ERROR_TYPE_UNAVAILABLE, // custom error + ERROR_TYPE_NOT_SUPPORTED, // custom error +}; + +struct slot_params { + bool stream = true; + bool cache_prompt = true; // remember the prompt to avoid reprocessing all prompt + bool return_tokens = false; + + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half + int32_t n_predict = -1; // new tokens to predict + int32_t n_indent = 0; // mininum line indentation for the generated text in number of whitespace characters + + int64_t t_max_prompt_ms = -1; // TODO: implement + int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit + + std::vector antiprompt; + bool timings_per_token = false; + bool post_sampling_probs = false; + bool ignore_eos = false; + + struct common_params_sampling sampling; + struct common_params_speculative speculative; + + // OAI-compat fields + bool verbose = false; + bool oaicompat = false; + bool oaicompat_chat = true; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + + json to_json() const { + std::vector samplers; + samplers.reserve(sampling.samplers.size()); + for (const auto & sampler : sampling.samplers) { + samplers.emplace_back(common_sampler_type_to_str(sampler)); + } + + return json { + {"n_predict", n_predict}, // Server configured n_predict + {"seed", sampling.seed}, + {"temperature", sampling.temp}, + {"dynatemp_range", sampling.dynatemp_range}, + {"dynatemp_exponent", sampling.dynatemp_exponent}, + {"top_k", sampling.top_k}, + {"top_p", sampling.top_p}, + {"min_p", sampling.min_p}, + {"xtc_probability", sampling.xtc_probability}, + {"xtc_threshold", sampling.xtc_threshold}, + {"typical_p", sampling.typ_p}, + {"repeat_last_n", sampling.penalty_last_n}, + {"repeat_penalty", sampling.penalty_repeat}, + {"presence_penalty", sampling.penalty_present}, + {"frequency_penalty", sampling.penalty_freq}, + {"dry_multiplier", sampling.dry_multiplier}, + {"dry_base", sampling.dry_base}, + {"dry_allowed_length", sampling.dry_allowed_length}, + {"dry_penalty_last_n", sampling.dry_penalty_last_n}, + {"dry_sequence_breakers", sampling.dry_sequence_breakers}, + {"mirostat", sampling.mirostat}, + {"mirostat_tau", sampling.mirostat_tau}, + {"mirostat_eta", sampling.mirostat_eta}, + {"stop", antiprompt}, + {"max_tokens", n_predict}, // User configured n_predict + {"n_keep", n_keep}, + {"n_discard", n_discard}, + {"ignore_eos", sampling.ignore_eos}, + {"stream", stream}, + {"logit_bias", format_logit_bias(sampling.logit_bias)}, + {"n_probs", sampling.n_probs}, + {"min_keep", sampling.min_keep}, + {"grammar", sampling.grammar}, + {"samplers", samplers}, + {"speculative.n_max", speculative.n_max}, + {"speculative.n_min", speculative.n_min}, + {"speculative.p_min", speculative.p_min}, + {"timings_per_token", timings_per_token}, + {"post_sampling_probs", post_sampling_probs}, + }; + } }; struct server_task { - int id = -1; // to be filled by server_queue - int id_target = -1; // used by SERVER_TASK_TYPE_CANCEL + int id = -1; // to be filled by server_queue + int index = -1; // used when there are multiple prompts (batch request) - llama_tokens prompt_tokens; server_task_type type; - json data; - server_task_inf_type inf_type = SERVER_TASK_INF_TYPE_COMPLETION; + // used by SERVER_TASK_TYPE_CANCEL + int id_target = -1; + + // used by SERVER_TASK_TYPE_INFERENCE + slot_params params; + llama_tokens prompt_tokens; + int id_selected_slot = -1; + + // used by SERVER_TASK_TYPE_SLOT_SAVE, SERVER_TASK_TYPE_SLOT_RESTORE, SERVER_TASK_TYPE_SLOT_ERASE + struct slot_action { + int slot_id; + std::string filename; + std::string filepath; + }; + slot_action slot_action; + + // used by SERVER_TASK_TYPE_METRICS + bool metrics_reset_bucket = false; + + server_task(server_task_type type) : type(type) {} + + static slot_params params_from_json_cmpl( + const llama_model * model, + const llama_context * ctx, + const common_params & params_base, + const json & data) { + slot_params params; + + // Sampling parameter defaults are loaded from the global server context (but individual requests can still override them) + slot_params defaults; + defaults.sampling = params_base.sampling; + defaults.speculative = params_base.speculative; + + // enabling this will output extra debug information in the HTTP responses from the server + params.verbose = params_base.verbosity > 9; + params.timings_per_token = json_value(data, "timings_per_token", false); + + params.stream = json_value(data, "stream", false); + params.cache_prompt = json_value(data, "cache_prompt", true); + params.return_tokens = json_value(data, "return_tokens", false); + params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", defaults.n_predict)); + params.n_indent = json_value(data, "n_indent", defaults.n_indent); + params.n_keep = json_value(data, "n_keep", defaults.n_keep); + params.n_discard = json_value(data, "n_discard", defaults.n_discard); + //params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", defaults.t_max_prompt_ms); // TODO: implement + params.t_max_predict_ms = json_value(data, "t_max_predict_ms", defaults.t_max_predict_ms); + + params.sampling.top_k = json_value(data, "top_k", defaults.sampling.top_k); + params.sampling.top_p = json_value(data, "top_p", defaults.sampling.top_p); + params.sampling.min_p = json_value(data, "min_p", defaults.sampling.min_p); + params.sampling.xtc_probability = json_value(data, "xtc_probability", defaults.sampling.xtc_probability); + params.sampling.xtc_threshold = json_value(data, "xtc_threshold", defaults.sampling.xtc_threshold); + params.sampling.typ_p = json_value(data, "typical_p", defaults.sampling.typ_p); + params.sampling.temp = json_value(data, "temperature", defaults.sampling.temp); + params.sampling.dynatemp_range = json_value(data, "dynatemp_range", defaults.sampling.dynatemp_range); + params.sampling.dynatemp_exponent = json_value(data, "dynatemp_exponent", defaults.sampling.dynatemp_exponent); + params.sampling.penalty_last_n = json_value(data, "repeat_last_n", defaults.sampling.penalty_last_n); + params.sampling.penalty_repeat = json_value(data, "repeat_penalty", defaults.sampling.penalty_repeat); + params.sampling.penalty_freq = json_value(data, "frequency_penalty", defaults.sampling.penalty_freq); + params.sampling.penalty_present = json_value(data, "presence_penalty", defaults.sampling.penalty_present); + params.sampling.dry_multiplier = json_value(data, "dry_multiplier", defaults.sampling.dry_multiplier); + params.sampling.dry_base = json_value(data, "dry_base", defaults.sampling.dry_base); + params.sampling.dry_allowed_length = json_value(data, "dry_allowed_length", defaults.sampling.dry_allowed_length); + params.sampling.dry_penalty_last_n = json_value(data, "dry_penalty_last_n", defaults.sampling.dry_penalty_last_n); + params.sampling.mirostat = json_value(data, "mirostat", defaults.sampling.mirostat); + params.sampling.mirostat_tau = json_value(data, "mirostat_tau", defaults.sampling.mirostat_tau); + params.sampling.mirostat_eta = json_value(data, "mirostat_eta", defaults.sampling.mirostat_eta); + params.sampling.seed = json_value(data, "seed", defaults.sampling.seed); + params.sampling.n_probs = json_value(data, "n_probs", defaults.sampling.n_probs); + params.sampling.min_keep = json_value(data, "min_keep", defaults.sampling.min_keep); + params.post_sampling_probs = json_value(data, "post_sampling_probs", defaults.post_sampling_probs); + + params.speculative.n_min = json_value(data, "speculative.n_min", defaults.speculative.n_min); + params.speculative.n_max = json_value(data, "speculative.n_max", defaults.speculative.n_max); + params.speculative.p_min = json_value(data, "speculative.p_min", defaults.speculative.p_min); + + params.speculative.n_min = std::min(params.speculative.n_max, params.speculative.n_min); + params.speculative.n_min = std::max(params.speculative.n_min, 2); + params.speculative.n_max = std::max(params.speculative.n_max, 0); + + // TODO: add more sanity checks for the input parameters + + if (params.sampling.penalty_last_n < -1) { + throw std::runtime_error("Error: repeat_last_n must be >= -1"); + } + + if (params.sampling.dry_penalty_last_n < -1) { + throw std::runtime_error("Error: dry_penalty_last_n must be >= -1"); + } + + if (params.sampling.penalty_last_n == -1) { + // note: should be the slot's context and not the full context, but it's ok + params.sampling.penalty_last_n = llama_n_ctx(ctx); + } + + if (params.sampling.dry_penalty_last_n == -1) { + params.sampling.dry_penalty_last_n = llama_n_ctx(ctx); + } + + if (params.sampling.dry_base < 1.0f) { + params.sampling.dry_base = defaults.sampling.dry_base; + } + + // sequence breakers for DRY + { + // Currently, this is not compatible with TextGen WebUI, Koboldcpp and SillyTavern format + // Ref: https://github.com/oobabooga/text-generation-webui/blob/d1af7a41ade7bd3c3a463bfa640725edb818ebaf/extensions/openai/typing.py#L39 + + if (data.contains("dry_sequence_breakers")) { + params.sampling.dry_sequence_breakers = json_value(data, "dry_sequence_breakers", std::vector()); + if (params.sampling.dry_sequence_breakers.empty()) { + throw std::runtime_error("Error: dry_sequence_breakers must be a non-empty array of strings"); + } + } + } + + // process "json_schema" and "grammar" + if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) { + throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both"); + } + if (data.contains("json_schema") && !data.contains("grammar")) { + try { + auto schema = json_value(data, "json_schema", json::object()); + params.sampling.grammar = json_schema_to_grammar(schema); + } catch (const std::exception & e) { + throw std::runtime_error(std::string("\"json_schema\": ") + e.what()); + } + } else { + params.sampling.grammar = json_value(data, "grammar", defaults.sampling.grammar); + } + + { + params.sampling.logit_bias.clear(); + params.ignore_eos = json_value(data, "ignore_eos", false); + + const auto & logit_bias = data.find("logit_bias"); + if (logit_bias != data.end() && logit_bias->is_array()) { + const int n_vocab = llama_n_vocab(model); + for (const auto & el : *logit_bias) { + // TODO: we may want to throw errors here, in case "el" is incorrect + if (el.is_array() && el.size() == 2) { + float bias; + if (el[1].is_number()) { + bias = el[1].get(); + } else if (el[1].is_boolean() && !el[1].get()) { + bias = -INFINITY; + } else { + continue; + } + + if (el[0].is_number_integer()) { + llama_token tok = el[0].get(); + if (tok >= 0 && tok < n_vocab) { + params.sampling.logit_bias.push_back({tok, bias}); + } + } else if (el[0].is_string()) { + auto toks = common_tokenize(model, el[0].get(), false); + for (auto tok : toks) { + params.sampling.logit_bias.push_back({tok, bias}); + } + } + } + } + } + } + + { + params.antiprompt.clear(); + + const auto & stop = data.find("stop"); + if (stop != data.end() && stop->is_array()) { + for (const auto & word : *stop) { + if (!word.empty()) { + params.antiprompt.push_back(word); + } + } + } + } + + { + const auto & samplers = data.find("samplers"); + if (samplers != data.end()) { + if (samplers->is_array()) { + std::vector sampler_names; + for (const auto & name : *samplers) { + if (name.is_string()) { + sampler_names.emplace_back(name); + } + } + params.sampling.samplers = common_sampler_types_from_names(sampler_names, false); + } else if (samplers->is_string()){ + std::string sampler_string; + for (const auto & name : *samplers) { + sampler_string += name; + } + params.sampling.samplers = common_sampler_types_from_chars(sampler_string); + } + } else { + params.sampling.samplers = defaults.sampling.samplers; + } + } + + std::string model_name = params_base.model_alias.empty() ? DEFAULT_OAICOMPAT_MODEL : params_base.model_alias; + params.oaicompat_model = json_value(data, "model", model_name); + + return params; + } // utility function static std::unordered_set get_list_id(const std::vector & tasks) { @@ -93,40 +381,628 @@ struct server_task { } }; +struct result_timings { + int32_t prompt_n = -1; + double prompt_ms; + double prompt_per_token_ms; + double prompt_per_second; + + int32_t predicted_n = -1; + double predicted_ms; + double predicted_per_token_ms; + double predicted_per_second; + + json to_json() const { + return { + {"prompt_n", prompt_n}, + {"prompt_ms", prompt_ms}, + {"prompt_per_token_ms", prompt_per_token_ms}, + {"prompt_per_second", prompt_per_second}, + + {"predicted_n", predicted_n}, + {"predicted_ms", predicted_ms}, + {"predicted_per_token_ms", predicted_per_token_ms}, + {"predicted_per_second", predicted_per_second}, + }; + } +}; + struct server_task_result { - int id = -1; + int id = -1; + int id_slot = -1; + virtual bool is_error() { + // only used by server_task_result_error + return false; + } + virtual bool is_stop() { + // only used by server_task_result_cmpl_* + return false; + } + virtual int get_index() { + return -1; + } + virtual json to_json() = 0; + virtual ~server_task_result() = default; +}; - json data; +// using shared_ptr for polymorphism of server_task_result +using server_task_result_ptr = std::unique_ptr; - bool stop; - bool error; +inline std::string stop_type_to_str(stop_type type) { + switch (type) { + case STOP_TYPE_EOS: return "eos"; + case STOP_TYPE_WORD: return "word"; + case STOP_TYPE_LIMIT: return "limit"; + default: return "none"; + } +} + +struct completion_token_output { + llama_token tok; + float prob; + std::string text_to_send; + struct prob_info { + llama_token tok; + std::string txt; + float prob; + }; + std::vector probs; + + json to_json(bool post_sampling_probs) const { + json probs_for_token = json::array(); + for (const auto & p : probs) { + std::string txt(p.txt); + txt.resize(validate_utf8(txt)); + probs_for_token.push_back(json { + {"id", p.tok}, + {"token", txt}, + {"bytes", str_to_bytes(p.txt)}, + { + post_sampling_probs ? "prob" : "logprob", + post_sampling_probs ? p.prob : logarithm(p.prob) + }, + }); + } + return probs_for_token; + } + + static json probs_vector_to_json(const std::vector & probs, bool post_sampling_probs) { + json out = json::array(); + for (const auto & p : probs) { + std::string txt(p.text_to_send); + txt.resize(validate_utf8(txt)); + out.push_back(json { + {"id", p.tok}, + {"token", txt}, + {"bytes", str_to_bytes(p.text_to_send)}, + { + post_sampling_probs ? "prob" : "logprob", + post_sampling_probs ? p.prob : logarithm(p.prob) + }, + { + post_sampling_probs ? "top_probs" : "top_logprobs", + p.to_json(post_sampling_probs) + }, + }); + } + return out; + } + + static float logarithm(float x) { + // nlohmann::json converts -inf to null, so we need to prevent that + return x == 0.0f ? std::numeric_limits::lowest() : std::log(x); + } + + static std::vector str_to_bytes(const std::string & str) { + std::vector bytes; + for (unsigned char c : str) { + bytes.push_back(c); + } + return bytes; + } }; -struct server_static_file { - const unsigned char * data; - unsigned int size; - const char * mime_type; +struct server_task_result_cmpl_final : server_task_result { + int index = 0; + + std::string content; + llama_tokens tokens; + + bool stream; + result_timings timings; + std::string prompt; + + bool truncated; + int32_t n_decoded; + int32_t n_prompt_tokens; + int32_t n_tokens_cached; + bool has_new_line; + std::string stopping_word; + stop_type stop = STOP_TYPE_NONE; + + bool post_sampling_probs; + std::vector probs_output; + + slot_params generation_params; + + // OAI-compat fields + bool verbose = false; + bool oaicompat = false; + bool oaicompat_chat = true; // TODO: support oaicompat for non-chat + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + + virtual int get_index() override { + return index; + } + + virtual bool is_stop() override { + return true; // in stream mode, final responses are considered stop + } + + virtual json to_json() override { + return oaicompat + ? (stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat()) + : to_json_non_oaicompat(); + } + + json to_json_non_oaicompat() { + json res = json { + {"index", index}, + {"content", stream ? "" : content}, // in stream mode, content is already in last partial chunk + {"tokens", stream ? llama_tokens {} : tokens}, + {"id_slot", id_slot}, + {"stop", true}, + {"model", oaicompat_model}, + {"tokens_predicted", n_decoded}, + {"tokens_evaluated", n_prompt_tokens}, + {"generation_settings", generation_params.to_json()}, + {"prompt", prompt}, + {"has_new_line", has_new_line}, + {"truncated", truncated}, + {"stop_type", stop_type_to_str(stop)}, + {"stopping_word", stopping_word}, + {"tokens_cached", n_tokens_cached}, + {"timings", timings.to_json()}, + }; + if (!stream && !probs_output.empty()) { + res["completion_probabilities"] = completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs); + } + return res; + } + + json to_json_oaicompat_chat() { + std::string finish_reason = "length"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = "stop"; + } + + json choice = json{ + {"finish_reason", finish_reason}, + {"index", 0}, + {"message", json { + {"content", content}, + {"role", "assistant"} + } + }}; + + if (!stream && probs_output.size() > 0) { + choice["logprobs"] = json{ + {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)}, + }; + } + + std::time_t t = std::time(0); + + json res = json { + {"choices", json::array({choice})}, + {"created", t}, + {"model", oaicompat_model}, + {"object", "chat.completion"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens} + }}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; + } + + json to_json_oaicompat_chat_stream() { + std::time_t t = std::time(0); + std::string finish_reason = "length"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = "stop"; + } + + json choice = json{ + {"finish_reason", finish_reason}, + {"index", 0}, + {"delta", json::object()} + }; + + json ret = json { + {"choices", json::array({choice})}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"object", "chat.completion.chunk"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens}, + }}, + }; + + if (timings.prompt_n >= 0) { + ret.push_back({"timings", timings.to_json()}); + } + + return ret; + } }; -struct slot_params { - bool stream = true; - bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt +struct server_task_result_cmpl_partial : server_task_result { + int index = 0; - int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_discard = 0; // number of tokens after n_keep that may be discarded when shifting context, 0 defaults to half - int32_t n_predict = -1; // new tokens to predict - int32_t n_indent = 0; // mininum line indentation for the generated text in number of whitespace characters + std::string content; + llama_tokens tokens; - int64_t t_max_prompt_ms = -1; // TODO: implement - int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit + int32_t n_decoded; + int32_t n_prompt_tokens; - std::vector antiprompt; + bool post_sampling_probs; + completion_token_output prob_output; + result_timings timings; + + // OAI-compat fields + bool verbose = false; + bool oaicompat = false; + bool oaicompat_chat = true; // TODO: support oaicompat for non-chat + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + + virtual int get_index() override { + return index; + } + + virtual bool is_stop() override { + return false; // in stream mode, partial responses are not considered stop + } + + virtual json to_json() override { + return oaicompat ? to_json_oaicompat() : to_json_non_oaicompat(); + } + + json to_json_non_oaicompat() { + // non-OAI-compat JSON + json res = json { + {"index", index}, + {"content", content}, + {"tokens", tokens}, + {"stop", false}, + {"id_slot", id_slot}, + {"tokens_predicted", n_decoded}, + {"tokens_evaluated", n_prompt_tokens}, + }; + // populate the timings object when needed (usually for the last response or with timings_per_token enabled) + if (timings.prompt_n > 0) { + res.push_back({"timings", timings.to_json()}); + } + if (!prob_output.probs.empty()) { + res["completion_probabilities"] = completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs); + } + return res; + } + + json to_json_oaicompat() { + bool first = n_decoded == 0; + std::time_t t = std::time(0); + json choices; + + if (first) { + if (content.empty()) { + choices = json::array({json{{"finish_reason", nullptr}, + {"index", 0}, + {"delta", json{{"role", "assistant"}}}}}); + } else { + // We have to send this as two updates to conform to openai behavior + json initial_ret = json{{"choices", json::array({json{ + {"finish_reason", nullptr}, + {"index", 0}, + {"delta", json{ + {"role", "assistant"} + }}}})}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"object", "chat.completion.chunk"}}; + + json second_ret = json{ + {"choices", json::array({json{{"finish_reason", nullptr}, + {"index", 0}, + {"delta", json { + {"content", content}}} + }})}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"object", "chat.completion.chunk"}}; + + return std::vector({initial_ret, second_ret}); + } + } else { + choices = json::array({json{ + {"finish_reason", nullptr}, + {"index", 0}, + {"delta", + json { + {"content", content}, + }}, + }}); + } + + GGML_ASSERT(choices.size() >= 1); + + if (prob_output.probs.size() > 0) { + choices[0]["logprobs"] = json{ + {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)}, + }; + } + + json ret = json { + {"choices", choices}, + {"created", t}, + {"id", oaicompat_cmpl_id}, + {"model", oaicompat_model}, + {"object", "chat.completion.chunk"} + }; + + if (timings.prompt_n >= 0) { + ret.push_back({"timings", timings.to_json()}); + } + + return std::vector({ret}); + } +}; + +struct server_task_result_embd : server_task_result { + int index = 0; + std::vector> embedding; + + int32_t n_tokens; + + // OAI-compat fields + bool oaicompat = false; + + virtual int get_index() override { + return index; + } + + virtual json to_json() override { + return oaicompat ? to_json_oaicompat() : to_json_non_oaicompat(); + } + + json to_json_non_oaicompat() { + return json { + {"index", index}, + {"embedding", embedding}, + }; + } + + json to_json_oaicompat() { + return json { + {"index", index}, + {"embedding", embedding[0]}, + {"tokens_evaluated", n_tokens}, + }; + } +}; + +struct server_task_result_rerank : server_task_result { + int index = 0; + float score = -1e6; + + int32_t n_tokens; + + virtual int get_index() override { + return index; + } + + virtual json to_json() override { + return json { + {"index", index}, + {"score", score}, + {"tokens_evaluated", n_tokens}, + }; + } +}; + +// this function maybe used outside of server_task_result_error +static json format_error_response(const std::string & message, const enum error_type type) { + std::string type_str; + int code = 500; + switch (type) { + case ERROR_TYPE_INVALID_REQUEST: + type_str = "invalid_request_error"; + code = 400; + break; + case ERROR_TYPE_AUTHENTICATION: + type_str = "authentication_error"; + code = 401; + break; + case ERROR_TYPE_NOT_FOUND: + type_str = "not_found_error"; + code = 404; + break; + case ERROR_TYPE_SERVER: + type_str = "server_error"; + code = 500; + break; + case ERROR_TYPE_PERMISSION: + type_str = "permission_error"; + code = 403; + break; + case ERROR_TYPE_NOT_SUPPORTED: + type_str = "not_supported_error"; + code = 501; + break; + case ERROR_TYPE_UNAVAILABLE: + type_str = "unavailable_error"; + code = 503; + break; + } + return json { + {"code", code}, + {"message", message}, + {"type", type_str}, + }; +} + +struct server_task_result_error : server_task_result { + int index = 0; + error_type err_type = ERROR_TYPE_SERVER; + std::string err_msg; + + virtual bool is_error() override { + return true; + } + + virtual json to_json() override { + return format_error_response(err_msg, err_type); + } +}; + +struct server_task_result_metrics : server_task_result { + int n_idle_slots; + int n_processing_slots; + int n_tasks_deferred; + int64_t t_start; + + int32_t kv_cache_tokens_count; + int32_t kv_cache_used_cells; + + // TODO: somehow reuse server_metrics in the future, instead of duplicating the fields + uint64_t n_prompt_tokens_processed_total = 0; + uint64_t t_prompt_processing_total = 0; + uint64_t n_tokens_predicted_total = 0; + uint64_t t_tokens_generation_total = 0; + + uint64_t n_prompt_tokens_processed = 0; + uint64_t t_prompt_processing = 0; + + uint64_t n_tokens_predicted = 0; + uint64_t t_tokens_generation = 0; + + uint64_t n_decode_total = 0; + uint64_t n_busy_slots_total = 0; + + // while we can also use std::vector this requires copying the slot object which can be quite messy + // therefore, we use json to temporarily store the slot.to_json() result + json slots_data = json::array(); + + virtual json to_json() override { + return json { + { "idle", n_idle_slots }, + { "processing", n_processing_slots }, + { "deferred", n_tasks_deferred }, + { "t_start", t_start }, + + { "n_prompt_tokens_processed_total", n_prompt_tokens_processed_total }, + { "t_tokens_generation_total", t_tokens_generation_total }, + { "n_tokens_predicted_total", n_tokens_predicted_total }, + { "t_prompt_processing_total", t_prompt_processing_total }, + + { "n_prompt_tokens_processed", n_prompt_tokens_processed }, + { "t_prompt_processing", t_prompt_processing }, + { "n_tokens_predicted", n_tokens_predicted }, + { "t_tokens_generation", t_tokens_generation }, + + { "n_decode_total", n_decode_total }, + { "n_busy_slots_total", n_busy_slots_total }, + + { "kv_cache_tokens_count", kv_cache_tokens_count }, + { "kv_cache_used_cells", kv_cache_used_cells }, + + { "slots", slots_data }, + }; + } +}; + +struct server_task_result_slot_save_load : server_task_result { + std::string filename; + bool is_save; // true = save, false = load + + size_t n_tokens; + size_t n_bytes; + double t_ms; + + virtual json to_json() override { + if (is_save) { + return json { + { "id_slot", id_slot }, + { "filename", filename }, + { "n_saved", n_tokens }, + { "n_written", n_bytes }, + { "timings", { + { "save_ms", t_ms } + }}, + }; + } else { + return json { + { "id_slot", id_slot }, + { "filename", filename }, + { "n_restored", n_tokens }, + { "n_read", n_bytes }, + { "timings", { + { "restore_ms", t_ms } + }}, + }; + } + } +}; + +struct server_task_result_slot_erase : server_task_result { + size_t n_erased; + + virtual json to_json() override { + return json { + { "id_slot", id_slot }, + { "n_erased", n_erased }, + }; + } +}; + +struct server_task_result_apply_lora : server_task_result { + virtual json to_json() override { + return json {{ "success", true }}; + } }; struct server_slot { int id; int id_task = -1; + // only used for completion/embedding/infill/rerank + server_task_type task_type = SERVER_TASK_TYPE_COMPLETION; + + llama_batch batch_spec = {}; + + llama_context * ctx = nullptr; + llama_context * ctx_dft = nullptr; + + common_speculative * spec = nullptr; + // the index relative to completion multi-task request size_t index = 0; @@ -154,35 +1030,29 @@ struct server_slot { size_t last_nl_pos = 0; - std::string generated_text; + std::string generated_text; + llama_tokens generated_tokens; + llama_tokens cache_tokens; - std::vector generated_token_probs; - server_task_inf_type inf_type = SERVER_TASK_INF_TYPE_COMPLETION; + std::vector generated_token_probs; bool has_next_token = true; bool has_new_line = false; bool truncated = false; - bool stopped_eos = false; - bool stopped_word = false; - bool stopped_limit = false; + stop_type stop; - bool oaicompat = false; - - std::string oaicompat_model; std::string stopping_word; // sampling json json_schema; - struct common_sampler_params sparams; struct common_sampler * smpl = nullptr; llama_token sampled; // stats size_t n_sent_text = 0; // number of sent text character - size_t n_sent_token_probs = 0; int64_t t_start_process_prompt; int64_t t_start_generation; @@ -200,19 +1070,21 @@ struct server_slot { generated_text = ""; has_new_line = false; truncated = false; - stopped_eos = false; - stopped_word = false; - stopped_limit = false; + stop = STOP_TYPE_NONE; stopping_word = ""; n_past = 0; n_sent_text = 0; - n_sent_token_probs = 0; - inf_type = SERVER_TASK_INF_TYPE_COMPLETION; + task_type = SERVER_TASK_TYPE_COMPLETION; + generated_tokens.clear(); generated_token_probs.clear(); } - bool has_budget(common_params &global_params) { + bool is_non_causal() const { + return task_type == SERVER_TASK_TYPE_EMBEDDING || task_type == SERVER_TASK_TYPE_RERANK; + } + + bool has_budget(const common_params & global_params) { if (params.n_predict == -1 && global_params.n_predict == -1) { return true; // limitless } @@ -232,6 +1104,10 @@ struct server_slot { return state != SLOT_STATE_IDLE; } + bool can_speculate() const { + return ctx_dft && params.speculative.n_max > 0 && params.cache_prompt; + } + void add_token(const completion_token_output & token) { if (!is_processing()) { SLT_WRN(*this, "%s", "slot is not processing\n"); @@ -251,38 +1127,40 @@ struct server_slot { } } - json get_formated_timings() const { - return json { - {"prompt_n", n_prompt_tokens_processed}, - {"prompt_ms", t_prompt_processing}, - {"prompt_per_token_ms", t_prompt_processing / n_prompt_tokens_processed}, - {"prompt_per_second", 1e3 / t_prompt_processing * n_prompt_tokens_processed}, - - {"predicted_n", n_decoded}, - {"predicted_ms", t_token_generation}, - {"predicted_per_token_ms", t_token_generation / n_decoded}, - {"predicted_per_second", 1e3 / t_token_generation * n_decoded}, - }; + result_timings get_timings() const { + result_timings timings; + timings.prompt_n = n_prompt_tokens_processed; + timings.prompt_ms = t_prompt_processing; + timings.prompt_per_token_ms = t_prompt_processing / n_prompt_tokens_processed; + timings.prompt_per_second = 1e3 / t_prompt_processing * n_prompt_tokens_processed; + + timings.predicted_n = n_decoded; + timings.predicted_ms = t_token_generation; + timings.predicted_per_token_ms = t_token_generation / n_decoded; + timings.predicted_per_second = 1e3 / t_token_generation * n_decoded; + + return timings; } - size_t find_stopping_strings(const std::string & text, const size_t last_token_size, const stop_type type) { + size_t find_stopping_strings(const std::string & text, const size_t last_token_size, bool is_full_stop) { size_t stop_pos = std::string::npos; for (const std::string & word : params.antiprompt) { size_t pos; - if (type == STOP_TYPE_FULL) { + if (is_full_stop) { const size_t tmp = word.size() + last_token_size; const size_t from_pos = text.size() > tmp ? text.size() - tmp : 0; pos = text.find(word, from_pos); } else { + // otherwise, partial stop pos = find_partial_stop_string(word, text); } if (pos != std::string::npos && (stop_pos == std::string::npos || pos < stop_pos)) { - if (type == STOP_TYPE_FULL) { - stopped_word = true; + if (is_full_stop) { + stop = STOP_TYPE_WORD; stopping_word = word; has_next_token = false; } @@ -302,13 +1180,35 @@ struct server_slot { SLT_INF(*this, "\n" - "\rprompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n" - "\r eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n" - "\r total time = %10.2f ms / %5d tokens\n", + "prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n" + " eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n" + " total time = %10.2f ms / %5d tokens\n", t_prompt_processing, n_prompt_tokens_processed, t_prompt, n_prompt_second, t_token_generation, n_decoded, t_gen, n_gen_second, t_prompt_processing + t_token_generation, n_prompt_tokens_processed + n_decoded); } + + json to_json() const { + return json { + {"id", id}, + {"id_task", id_task}, + {"n_ctx", n_ctx}, + {"speculative", can_speculate()}, + {"is_processing", is_processing()}, + {"non_causal", is_non_causal()}, + {"params", params.to_json()}, + {"prompt", common_detokenize(ctx, prompt_tokens)}, + {"next_token", + { + {"has_next_token", has_next_token}, + {"has_new_line", has_new_line}, + {"n_remain", n_remaining}, + {"n_decoded", n_decoded}, + {"stopping_word", stopping_word}, + } + }, + }; + } }; struct server_metrics { @@ -381,9 +1281,7 @@ struct server_queue { // Add a new task to the end of the queue int post(server_task task, bool front = false) { std::unique_lock lock(mutex_tasks); - if (task.id == -1) { - task.id = id++; - } + GGML_ASSERT(task.id != -1); QUE_DBG("new task, id = %d, front = %d\n", task.id, front); if (front) { queue_tasks.push_front(std::move(task)); @@ -507,8 +1405,8 @@ struct server_response { // for keeping track of all tasks waiting for the result std::unordered_set waiting_task_ids; - // the main result queue - std::vector queue_results; + // the main result queue (using ptr for polymorphism) + std::vector queue_results; std::mutex mutex_results; std::condition_variable condition_results; @@ -548,7 +1446,7 @@ struct server_response { } // This function blocks the thread until there is a response for one of the id_tasks - server_task_result recv(const std::unordered_set & id_tasks) { + server_task_result_ptr recv(const std::unordered_set & id_tasks) { while (true) { std::unique_lock lock(mutex_results); condition_results.wait(lock, [&]{ @@ -556,8 +1454,8 @@ struct server_response { }); for (int i = 0; i < (int) queue_results.size(); i++) { - if (id_tasks.find(queue_results[i].id) != id_tasks.end()) { - server_task_result res = queue_results[i]; + if (id_tasks.find(queue_results[i]->id) != id_tasks.end()) { + server_task_result_ptr res = std::move(queue_results[i]); queue_results.erase(queue_results.begin() + i); return res; } @@ -568,21 +1466,21 @@ struct server_response { } // single-task version of recv() - server_task_result recv(int id_task) { + server_task_result_ptr recv(int id_task) { std::unordered_set id_tasks = {id_task}; return recv(id_tasks); } // Send a new result to a waiting id_task - void send(server_task_result & result) { - SRV_DBG("sending result for task id = %d\n", result.id); + void send(server_task_result_ptr && result) { + SRV_DBG("sending result for task id = %d\n", result->id); std::unique_lock lock(mutex_results); for (const auto & id_task : waiting_task_ids) { - if (result.id == id_task) { - SRV_DBG("task id = %d moved to result queue\n", result.id); + if (result->id == id_task) { + SRV_DBG("task id = %d pushed to result queue\n", result->id); - queue_results.push_back(std::move(result)); + queue_results.emplace_back(std::move(result)); condition_results.notify_all(); return; } @@ -591,11 +1489,14 @@ struct server_response { }; struct server_context { + common_params params_base; + llama_model * model = nullptr; llama_context * ctx = nullptr; std::vector loras; - common_params params; + llama_model * model_dft = nullptr; + llama_context_params cparams_dft; llama_batch batch = {}; @@ -628,34 +1529,90 @@ struct server_context { model = nullptr; } + if (model_dft) { + llama_free_model(model_dft); + model_dft = nullptr; + } + // Clear any sampling context for (server_slot & slot : slots) { - if (slot.smpl != nullptr) { - common_sampler_free(slot.smpl); - } + common_sampler_free(slot.smpl); + slot.smpl = nullptr; + + llama_free(slot.ctx_dft); + slot.ctx_dft = nullptr; + + common_speculative_free(slot.spec); + slot.spec = nullptr; + + llama_batch_free(slot.batch_spec); } llama_batch_free(batch); } - bool load_model(const common_params & params_) { - params = params_; + bool load_model(const common_params & params) { + SRV_INF("loading model '%s'\n", params.model.c_str()); - common_init_result llama_init = common_init_from_params(params); + params_base = params; + + common_init_result llama_init = common_init_from_params(params_base); model = llama_init.model; ctx = llama_init.context; loras = llama_init.lora_adapters; if (model == nullptr) { - SRV_ERR("failed to load model, '%s'\n", params.model.c_str()); + SRV_ERR("failed to load model, '%s'\n", params_base.model.c_str()); return false; } n_ctx = llama_n_ctx(ctx); add_bos_token = llama_add_bos_token(model); - has_eos_token = !llama_add_eos_token(model); + has_eos_token = llama_token_eos(model) != LLAMA_TOKEN_NULL; + + if (!params_base.speculative.model.empty()) { + SRV_INF("loading draft model '%s'\n", params_base.speculative.model.c_str()); + + auto params_dft = params_base; + + params_dft.devices = params_base.speculative.devices; + params_dft.model = params_base.speculative.model; + params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx; + params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; + params_dft.n_parallel = 1; + + common_init_result llama_init_dft = common_init_from_params(params_dft); + + model_dft = llama_init_dft.model; + + if (model_dft == nullptr) { + SRV_ERR("failed to load draft model, '%s'\n", params_base.speculative.model.c_str()); + return false; + } + + if (!common_speculative_are_compatible(ctx, llama_init_dft.context)) { + SRV_ERR("the draft model '%s' is not compatible with the target model '%s'\n", params_base.speculative.model.c_str(), params_base.model.c_str()); + + llama_free (llama_init_dft.context); + llama_free_model(llama_init_dft.model); + + return false; + } + + const int n_ctx_dft = llama_n_ctx(llama_init_dft.context); + + cparams_dft = common_context_params_to_llama(params_dft); + cparams_dft.n_batch = n_ctx_dft; + + // force F16 KV cache for the draft model for extra performance + cparams_dft.type_k = GGML_TYPE_F16; + cparams_dft.type_v = GGML_TYPE_F16; + + // the context is not needed - we will create one for each slot + llama_free(llama_init_dft.context); + } return true; } @@ -674,20 +1631,37 @@ struct server_context { } void init() { - const int32_t n_ctx_slot = n_ctx / params.n_parallel; + const int32_t n_ctx_slot = n_ctx / params_base.n_parallel; - SRV_INF("initializing slots, n_slots = %d\n", params.n_parallel); + SRV_INF("initializing slots, n_slots = %d\n", params_base.n_parallel); - for (int i = 0; i < params.n_parallel; i++) { + for (int i = 0; i < params_base.n_parallel; i++) { server_slot slot; slot.id = i; + slot.ctx = ctx; slot.n_ctx = n_ctx_slot; - slot.n_predict = params.n_predict; + slot.n_predict = params_base.n_predict; + + if (model_dft) { + slot.batch_spec = llama_batch_init(params_base.speculative.n_max + 1, 0, 1); + + slot.ctx_dft = llama_new_context_with_model(model_dft, cparams_dft); + if (slot.ctx_dft == nullptr) { + SRV_ERR("%s", "failed to create draft context\n"); + return; + } + + slot.spec = common_speculative_init(slot.ctx_dft); + if (slot.spec == nullptr) { + SRV_ERR("%s", "failed to create speculator\n"); + return; + } + } SLT_INF(slot, "new slot n_ctx_slot = %d\n", slot.n_ctx); - slot.sparams = params.sparams; + slot.params.sampling = params_base.sampling; slot.callback_on_release = [this](int) { queue_tasks.pop_deferred_task(); @@ -698,8 +1672,7 @@ struct server_context { slots.push_back(slot); } - default_generation_settings_for_props = get_formated_generation(slots.front()); - default_generation_settings_for_props["seed"] = -1; + default_generation_settings_for_props = slots[0].to_json(); // the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens // note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used) @@ -707,7 +1680,7 @@ struct server_context { const int32_t n_batch = llama_n_batch(ctx); // only a single seq_id per token is needed - batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1); + batch = llama_batch_init(std::max(n_batch, params_base.n_parallel), 0, 1); } metrics.init(); @@ -743,7 +1716,7 @@ struct server_context { } // length of the Longest Common Subsequence between the current slot's prompt and the input prompt - int cur_lcs_len = longest_common_subsequence(slot.cache_tokens, task.prompt_tokens); + int cur_lcs_len = common_lcs(slot.cache_tokens, task.prompt_tokens); // fraction of the common subsequence length compared to the current slot's prompt length float cur_similarity = static_cast(cur_lcs_len) / static_cast(slot.cache_tokens.size()); @@ -786,87 +1759,14 @@ struct server_context { } bool launch_slot_with_task(server_slot & slot, const server_task & task) { - slot_params default_params; - // Sampling parameter defaults are loaded from the global server context (but individual requests can still override them) - auto default_sparams = params.sparams; - const auto & data = task.data; - - if (data.count("__oaicompat") != 0) { - slot.oaicompat = true; - slot.oaicompat_model = json_value(data, "model", std::string(DEFAULT_OAICOMPAT_MODEL)); - } else { - slot.oaicompat = false; - slot.oaicompat_model = ""; - } - - slot.params.stream = json_value(data, "stream", false); - slot.params.cache_prompt = json_value(data, "cache_prompt", false); - slot.params.n_predict = json_value(data, "n_predict", json_value(data, "max_tokens", default_params.n_predict)); - slot.params.n_indent = json_value(data, "n_indent", default_params.n_indent); - slot.sparams.top_k = json_value(data, "top_k", default_sparams.top_k); - slot.sparams.top_p = json_value(data, "top_p", default_sparams.top_p); - slot.sparams.min_p = json_value(data, "min_p", default_sparams.min_p); - slot.sparams.xtc_probability = json_value(data, "xtc_probability", default_sparams.xtc_probability); - slot.sparams.xtc_threshold = json_value(data, "xtc_threshold", default_sparams.xtc_threshold); - slot.sparams.typ_p = json_value(data, "typical_p", default_sparams.typ_p); - slot.sparams.temp = json_value(data, "temperature", default_sparams.temp); - slot.sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range); - slot.sparams.dynatemp_exponent = json_value(data, "dynatemp_exponent", default_sparams.dynatemp_exponent); - slot.sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n); - slot.sparams.penalty_repeat = json_value(data, "repeat_penalty", default_sparams.penalty_repeat); - slot.sparams.penalty_freq = json_value(data, "frequency_penalty", default_sparams.penalty_freq); - slot.sparams.penalty_present = json_value(data, "presence_penalty", default_sparams.penalty_present); - slot.sparams.dry_multiplier = json_value(data, "dry_multiplier", default_sparams.dry_multiplier); - slot.sparams.dry_base = json_value(data, "dry_base", default_sparams.dry_base); - slot.sparams.dry_allowed_length = json_value(data, "dry_allowed_length", default_sparams.dry_allowed_length); - slot.sparams.dry_penalty_last_n = json_value(data, "dry_penalty_last_n", default_sparams.dry_penalty_last_n); - slot.sparams.mirostat = json_value(data, "mirostat", default_sparams.mirostat); - slot.sparams.mirostat_tau = json_value(data, "mirostat_tau", default_sparams.mirostat_tau); - slot.sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta); - slot.sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl); - slot.params.n_keep = json_value(data, "n_keep", default_params.n_keep); - slot.params.n_discard = json_value(data, "n_discard", default_params.n_discard); - slot.sparams.seed = json_value(data, "seed", default_sparams.seed); - slot.sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs); - slot.sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep); - //slot.params.t_max_prompt_ms = json_value(data, "t_max_prompt_ms", default_params.t_max_prompt_ms); // TODO: implement - slot.params.t_max_predict_ms = json_value(data, "t_max_predict_ms", default_params.t_max_predict_ms); - - if (slot.sparams.dry_base < 1.0f) - { - slot.sparams.dry_base = default_sparams.dry_base; - } - - // sequence breakers for DRY - { - // Currently, this is not compatible with TextGen WebUI, Koboldcpp and SillyTavern format - // Ref: https://github.com/oobabooga/text-generation-webui/blob/d1af7a41ade7bd3c3a463bfa640725edb818ebaf/extensions/openai/typing.py#L39 - - if (data.contains("dry_sequence_breakers")) { - slot.sparams.dry_sequence_breakers = json_value(data, "dry_sequence_breakers", std::vector()); - if (slot.sparams.dry_sequence_breakers.empty()) { - send_error(task, "Error: dry_sequence_breakers must be a non-empty array of strings", ERROR_TYPE_INVALID_REQUEST); - return false; - } - } - } + slot.reset(); + slot.id_task = task.id; + slot.index = task.index; + slot.task_type = task.type; + slot.params = std::move(task.params); + slot.prompt_tokens = std::move(task.prompt_tokens); - // process "json_schema" and "grammar" - if (data.contains("json_schema") && !data.at("json_schema").is_null() && data.contains("grammar") && !data.at("grammar").is_null()) { - send_error(task, "Either \"json_schema\" or \"grammar\" can be specified, but not both", ERROR_TYPE_INVALID_REQUEST); - return false; - } - if (data.contains("json_schema") && !data.contains("grammar")) { - try { - auto schema = json_value(data, "json_schema", json::object()); - slot.sparams.grammar = json_schema_to_grammar(schema); - } catch (const std::exception & e) { - send_error(task, std::string("\"json_schema\": ") + e.what(), ERROR_TYPE_INVALID_REQUEST); - return false; - } - } else { - slot.sparams.grammar = json_value(data, "grammar", default_sparams.grammar); - } + SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) { // Might be better to reject the request with a 400 ? @@ -874,86 +1774,16 @@ struct server_context { SLT_WRN(slot, "n_predict = %d exceeds server configuration, setting to %d", slot.n_predict, slot.n_predict); } - { - slot.sparams.logit_bias.clear(); - - if (json_value(data, "ignore_eos", false) && has_eos_token) { - slot.sparams.logit_bias.push_back({llama_token_eos(model), -INFINITY}); - } - - const auto & logit_bias = data.find("logit_bias"); - if (logit_bias != data.end() && logit_bias->is_array()) { - const int n_vocab = llama_n_vocab(model); - for (const auto & el : *logit_bias) { - // TODO: we may want to throw errors here, in case "el" is incorrect - if (el.is_array() && el.size() == 2) { - float bias; - if (el[1].is_number()) { - bias = el[1].get(); - } else if (el[1].is_boolean() && !el[1].get()) { - bias = -INFINITY; - } else { - continue; - } - - if (el[0].is_number_integer()) { - llama_token tok = el[0].get(); - if (tok >= 0 && tok < n_vocab) { - slot.sparams.logit_bias.push_back({tok, bias}); - } - } else if (el[0].is_string()) { - auto toks = common_tokenize(model, el[0].get(), false); - for (auto tok : toks) { - slot.sparams.logit_bias.push_back({tok, bias}); - } - } - } - } - } - } - - { - slot.params.antiprompt.clear(); - - const auto & stop = data.find("stop"); - if (stop != data.end() && stop->is_array()) { - for (const auto & word : *stop) { - if (!word.empty()) { - slot.params.antiprompt.push_back(word); - } - } - } - } - - { - const auto & samplers = data.find("samplers"); - if (samplers != data.end()) { - if (samplers->is_array()) { - std::vector sampler_names; - for (const auto & name : *samplers) { - if (name.is_string()) { - sampler_names.emplace_back(name); - } - } - slot.sparams.samplers = common_sampler_types_from_names(sampler_names, false); - } else if (samplers->is_string()){ - std::string sampler_string; - for (const auto & name : *samplers) { - sampler_string += name; - } - slot.sparams.samplers = common_sampler_types_from_chars(sampler_string); - } - } else { - slot.sparams.samplers = default_sparams.samplers; - } - } - + if (slot.params.ignore_eos && has_eos_token) { + slot.params.sampling.logit_bias.push_back({llama_token_eos(model), -INFINITY}); + } + { if (slot.smpl != nullptr) { common_sampler_free(slot.smpl); } - slot.smpl = common_sampler_init(model, slot.sparams); + slot.smpl = common_sampler_init(model, slot.params.sampling); if (slot.smpl == nullptr) { // for now, the only error that may happen here is invalid grammar send_error(task, "Failed to parse grammar", ERROR_TYPE_INVALID_REQUEST); @@ -961,6 +1791,12 @@ struct server_context { } } + if (slot.ctx_dft) { + llama_batch_free(slot.batch_spec); + + slot.batch_spec = llama_batch_init(slot.params.speculative.n_max + 1, 0, 1); + } + slot.state = SLOT_STATE_STARTED; SLT_INF(slot, "%s", "processing task\n"); @@ -978,49 +1814,33 @@ struct server_context { bool process_token(completion_token_output & result, server_slot & slot) { // remember which tokens were sampled - used for repetition penalties during sampling - const std::string token_str = common_token_to_piece(ctx, result.tok, params.special); + const std::string token_str = result.text_to_send; slot.sampled = result.tok; - // search stop word and delete it slot.generated_text += token_str; + if (slot.params.return_tokens) { + slot.generated_tokens.push_back(result.tok); + } slot.has_next_token = true; // check if there is incomplete UTF-8 character at the end - bool incomplete = false; - for (unsigned i = 1; i < 5 && i <= slot.generated_text.size(); ++i) { - unsigned char c = slot.generated_text[slot.generated_text.size() - i]; - if ((c & 0xC0) == 0x80) { - // continuation byte: 10xxxxxx - continue; - } - if ((c & 0xE0) == 0xC0) { - // 2-byte character: 110xxxxx ... - incomplete = i < 2; - } else if ((c & 0xF0) == 0xE0) { - // 3-byte character: 1110xxxx ... - incomplete = i < 3; - } else if ((c & 0xF8) == 0xF0) { - // 4-byte character: 11110xxx ... - incomplete = i < 4; - } - // else 1-byte character or invalid byte - break; - } + bool incomplete = validate_utf8(slot.generated_text) < slot.generated_text.size(); + // search stop word and delete it if (!incomplete) { size_t pos = std::min(slot.n_sent_text, slot.generated_text.size()); const std::string str_test = slot.generated_text.substr(pos); bool send_text = true; - size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_FULL); + size_t stop_pos = slot.find_stopping_strings(str_test, token_str.size(), true); if (stop_pos != std::string::npos) { slot.generated_text.erase( slot.generated_text.begin() + pos + stop_pos, slot.generated_text.end()); pos = std::min(slot.n_sent_text, slot.generated_text.size()); } else if (slot.has_next_token) { - stop_pos = slot.find_stopping_strings(str_test, token_str.size(), STOP_TYPE_PARTIAL); + stop_pos = slot.find_stopping_strings(str_test, token_str.size(), false); send_text = stop_pos == std::string::npos; } @@ -1043,8 +1863,8 @@ struct server_context { } // check the limits - if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params)) { - slot.stopped_limit = true; + if (slot.n_decoded > 0 && slot.has_next_token && !slot.has_budget(params_base)) { + slot.stop = STOP_TYPE_LIMIT; slot.has_next_token = false; SLT_DBG(slot, "stopped by limit, n_decoded = %d, n_predict = %d\n", slot.n_decoded, slot.params.n_predict); @@ -1053,7 +1873,7 @@ struct server_context { if (slot.has_new_line) { // if we have already seen a new line, we stop after a certain time limit if (slot.params.t_max_predict_ms > 0 && (ggml_time_us() - slot.t_start_generation > 1000.0f*slot.params.t_max_predict_ms)) { - slot.stopped_limit = true; + slot.stop = STOP_TYPE_LIMIT; slot.has_next_token = false; SLT_DBG(slot, "stopped by time limit, n_decoded = %d, t_max_predict_ms = %d ms\n", slot.n_decoded, (int) slot.params.t_max_predict_ms); @@ -1073,7 +1893,7 @@ struct server_context { } if (pos < slot.generated_text.size() && n_indent < slot.params.n_indent) { - slot.stopped_limit = true; + slot.stop = STOP_TYPE_LIMIT; slot.has_next_token = false; // cut the last line @@ -1102,7 +1922,7 @@ struct server_context { // if context shift is disabled, we stop when it reaches the context limit if (slot.n_past >= slot.n_ctx) { slot.truncated = true; - slot.stopped_limit = true; + slot.stop = STOP_TYPE_LIMIT; slot.has_next_token = false; SLT_DBG(slot, "stopped due to running out of context capacity, n_past = %d, n_prompt_tokens = %d, n_decoded = %d, n_ctx = %d\n", @@ -1110,7 +1930,7 @@ struct server_context { } if (llama_token_is_eog(model, result.tok)) { - slot.stopped_eos = true; + slot.stop = STOP_TYPE_EOS; slot.has_next_token = false; SLT_DBG(slot, "%s", "stopped by EOS\n"); @@ -1120,7 +1940,7 @@ struct server_context { if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) { slot.truncated = true; - slot.stopped_limit = true; + slot.stop = STOP_TYPE_LIMIT; slot.has_next_token = false; // stop prediction SLT_WRN(slot, @@ -1134,53 +1954,53 @@ struct server_context { return slot.has_next_token; // continue } - json get_formated_generation(const server_slot & slot) const { - std::vector samplers; - samplers.reserve(slot.sparams.samplers.size()); - for (const auto & sampler : slot.sparams.samplers) { - samplers.emplace_back(common_sampler_type_to_str(sampler)); - } + void populate_token_probs(const server_slot & slot, completion_token_output & result, bool post_sampling, bool special, int idx) { + size_t n_probs = slot.params.sampling.n_probs; + size_t n_vocab = llama_n_vocab(llama_get_model(ctx)); + if (post_sampling) { + const auto * cur_p = common_sampler_get_candidates(slot.smpl); + const size_t max_probs = cur_p->size; + + // set probability for sampled token + for (size_t i = 0; i < max_probs; i++) { + if (cur_p->data[i].id == result.tok) { + result.prob = cur_p->data[i].p; + break; + } + } - return json { - {"n_ctx", slot.n_ctx}, - {"n_predict", slot.n_predict}, // Server configured n_predict - {"model", params.model_alias}, - {"seed", slot.sparams.seed}, - {"seed_cur", slot.smpl ? common_sampler_get_seed(slot.smpl) : 0}, - {"temperature", slot.sparams.temp}, - {"dynatemp_range", slot.sparams.dynatemp_range}, - {"dynatemp_exponent", slot.sparams.dynatemp_exponent}, - {"top_k", slot.sparams.top_k}, - {"top_p", slot.sparams.top_p}, - {"min_p", slot.sparams.min_p}, - {"xtc_probability", slot.sparams.xtc_probability}, - {"xtc_threshold", slot.sparams.xtc_threshold}, - {"typical_p", slot.sparams.typ_p}, - {"repeat_last_n", slot.sparams.penalty_last_n}, - {"repeat_penalty", slot.sparams.penalty_repeat}, - {"presence_penalty", slot.sparams.penalty_present}, - {"frequency_penalty", slot.sparams.penalty_freq}, - {"dry_multiplier", slot.sparams.dry_multiplier}, - {"dry_base", slot.sparams.dry_base}, - {"dry_allowed_length", slot.sparams.dry_allowed_length}, - {"dry_penalty_last_n", slot.sparams.dry_penalty_last_n}, - {"dry_sequence_breakers", slot.sparams.dry_sequence_breakers}, - {"mirostat", slot.sparams.mirostat}, - {"mirostat_tau", slot.sparams.mirostat_tau}, - {"mirostat_eta", slot.sparams.mirostat_eta}, - {"penalize_nl", slot.sparams.penalize_nl}, - {"stop", slot.params.antiprompt}, - {"max_tokens", slot.params.n_predict}, // User configured n_predict - {"n_keep", slot.params.n_keep}, - {"n_discard", slot.params.n_discard}, - {"ignore_eos", slot.sparams.ignore_eos}, - {"stream", slot.params.stream}, - //{"logit_bias", slot.sparams.logit_bias}, - {"n_probs", slot.sparams.n_probs}, - {"min_keep", slot.sparams.min_keep}, - {"grammar", slot.sparams.grammar}, - {"samplers", samplers}, - }; + // set probability for top n_probs tokens + result.probs.reserve(max_probs); + for (size_t i = 0; i < std::min(max_probs, n_probs); i++) { + result.probs.push_back({ + cur_p->data[i].id, + common_detokenize(ctx, {cur_p->data[i].id}, special), + cur_p->data[i].p + }); + } + } else { + // TODO: optimize this with min-p optimization + std::vector cur = get_token_probabilities(ctx, idx); + + // set probability for sampled token + for (size_t i = 0; i < n_vocab; i++) { + // set probability for sampled token + if (cur[i].id == result.tok) { + result.prob = cur[i].p; + break; + } + } + + // set probability for top n_probs tokens + result.probs.reserve(n_probs); + for (size_t i = 0; i < std::min(n_vocab, n_probs); i++) { + result.probs.push_back({ + cur[i].id, + common_detokenize(ctx, {cur[i].id}, special), + cur[i].p + }); + } + } } void send_error(const server_task & task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) { @@ -1194,108 +2014,99 @@ struct server_context { void send_error(const int id_task, const std::string & error, const enum error_type type = ERROR_TYPE_SERVER) { SRV_ERR("task id = %d, error: %s\n", id_task, error.c_str()); - server_task_result res; - res.id = id_task; - res.stop = false; - res.error = true; - res.data = format_error_response(error, type); - - queue_results.send(res); - } - - void send_partial_response(server_slot & slot, completion_token_output tkn) { - server_task_result res; - res.id = slot.id_task; - res.error = false; - res.stop = false; - res.data = json { - {"content", tkn.text_to_send}, - {"stop", false}, - {"id_slot", slot.id}, - {"multimodal", false}, - {"index", slot.index}, - }; + auto res = std::make_unique(); + res->id = id_task; + res->err_type = type; + res->err_msg = error; - if (slot.sparams.n_probs > 0) { - const llama_tokens to_send_toks = common_tokenize(ctx, tkn.text_to_send, false); - const size_t probs_pos = std::min(slot.n_sent_token_probs, slot.generated_token_probs.size()); - const size_t probs_stop_pos = std::min(slot.n_sent_token_probs + to_send_toks.size(), slot.generated_token_probs.size()); + queue_results.send(std::move(res)); + } - std::vector probs_output; - if (probs_pos < probs_stop_pos) { - probs_output = std::vector( - slot.generated_token_probs.begin() + probs_pos, - slot.generated_token_probs.begin() + probs_stop_pos); - } - slot.n_sent_token_probs = probs_stop_pos; + void send_partial_response(server_slot & slot, const completion_token_output & tkn) { + auto res = std::make_unique(); + + res->id = slot.id_task; + res->index = slot.index; + res->content = tkn.text_to_send; + res->tokens = { tkn.tok }; + + res->n_decoded = slot.n_decoded; + res->n_prompt_tokens = slot.n_prompt_tokens; + res->post_sampling_probs = slot.params.post_sampling_probs; - res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs_output); + res->verbose = slot.params.verbose; + res->oaicompat = slot.params.oaicompat; + res->oaicompat_chat = slot.params.oaicompat_chat; + res->oaicompat_model = slot.params.oaicompat_model; + res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; + + // populate res.probs_output + if (slot.params.sampling.n_probs > 0) { + res->prob_output = tkn; // copy the token probs } - if (slot.oaicompat) { - res.data["oaicompat_token_ctr"] = slot.n_decoded; - res.data["model"] = slot.oaicompat_model; + // populate timings if this is final response or timings_per_token is enabled + if (slot.stop != STOP_TYPE_NONE || slot.params.timings_per_token) { + res->timings = slot.get_timings(); } - queue_results.send(res); + queue_results.send(std::move(res)); } - void send_final_response(const server_slot & slot) { - server_task_result res; - res.id = slot.id_task; - res.error = false; - res.stop = true; - res.data = json { - {"content", !slot.params.stream ? slot.generated_text : ""}, - {"id_slot", slot.id}, - {"stop", true}, - {"model", params.model_alias}, - {"tokens_predicted", slot.n_decoded}, - {"tokens_evaluated", slot.n_prompt_tokens}, - {"generation_settings", get_formated_generation(slot)}, - {"prompt", common_detokenize(ctx, slot.prompt_tokens)}, - {"has_new_line", slot.has_new_line}, - {"truncated", slot.truncated}, - {"stopped_eos", slot.stopped_eos}, - {"stopped_word", slot.stopped_word}, - {"stopped_limit", slot.stopped_limit}, - {"stopping_word", slot.stopping_word}, - {"tokens_cached", slot.n_past}, - {"timings", slot.get_formated_timings()}, - {"index", slot.index}, - }; - - if (slot.sparams.n_probs > 0) { - std::vector probs; - if (!slot.params.stream && slot.stopped_word) { + void send_final_response(server_slot & slot) { + auto res = std::make_unique(); + res->id = slot.id_task; + res->id_slot = slot.id; + + res->index = slot.index; + res->content = slot.generated_text; + res->tokens = slot.generated_tokens; + res->timings = slot.get_timings(); + res->prompt = common_detokenize(ctx, slot.prompt_tokens, true); + + res->truncated = slot.truncated; + res->n_decoded = slot.n_decoded; + res->n_prompt_tokens = slot.n_prompt_tokens; + res->n_tokens_cached = slot.n_past; + res->has_new_line = slot.has_new_line; + res->stopping_word = slot.stopping_word; + res->stop = slot.stop; + res->post_sampling_probs = slot.params.post_sampling_probs; + + res->verbose = slot.params.verbose; + res->stream = slot.params.stream; + res->oaicompat = slot.params.oaicompat; + res->oaicompat_chat = slot.params.oaicompat_chat; + res->oaicompat_model = slot.params.oaicompat_model; + res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; + + // populate res.probs_output + if (slot.params.sampling.n_probs > 0) { + if (!slot.params.stream && slot.stop == STOP_TYPE_WORD) { const llama_tokens stop_word_toks = common_tokenize(ctx, slot.stopping_word, false); size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size()); - probs = std::vector( + res->probs_output = std::vector( slot.generated_token_probs.begin(), slot.generated_token_probs.end() - safe_offset); } else { - probs = std::vector( + res->probs_output = std::vector( slot.generated_token_probs.begin(), slot.generated_token_probs.end()); } - - res.data["completion_probabilities"] = probs_vector_to_json(ctx, probs); } - if (slot.oaicompat) { - res.data["oaicompat_token_ctr"] = slot.n_decoded; - res.data["model"] = slot.oaicompat_model; - } + res->generation_params = slot.params; // copy the parameters - queue_results.send(res); + queue_results.send(std::move(res)); } void send_embedding(const server_slot & slot, const llama_batch & batch) { - server_task_result res; - res.id = slot.id_task; - res.error = false; - res.stop = true; + auto res = std::make_unique(); + res->id = slot.id_task; + res->index = slot.index; + res->n_tokens = slot.n_prompt_tokens; + res->oaicompat = slot.params.oaicompat; const int n_embd = llama_n_embd(model); @@ -1314,32 +2125,30 @@ struct server_context { if (embd == NULL) { SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]); - res.data = json { - {"embedding", std::vector(n_embd, 0.0f)}, - {"index", slot.index}, - }; - + res->embedding.push_back(std::vector(n_embd, 0.0f)); continue; } - common_embd_normalize(embd, embd_res.data(), n_embd); - - res.data = json { - {"embedding", embd_res}, - {"index", slot.index}, - }; + // normalize only when there is pooling + // TODO: configurable + if (llama_pooling_type(slot.ctx) != LLAMA_POOLING_TYPE_NONE) { + common_embd_normalize(embd, embd_res.data(), n_embd, 2); + res->embedding.push_back(embd_res); + } else { + res->embedding.push_back({ embd, embd + n_embd }); + } } SLT_DBG(slot, "%s", "sending embeddings\n"); - queue_results.send(res); + queue_results.send(std::move(res)); } void send_rerank(const server_slot & slot, const llama_batch & batch) { - server_task_result res; - res.id = slot.id_task; - res.error = false; - res.stop = true; + auto res = std::make_unique(); + res->id = slot.id_task; + res->index = slot.index; + res->n_tokens = slot.n_prompt_tokens; for (int i = 0; i < batch.n_tokens; ++i) { if (!batch.logits[i] || batch.seq_id[i][0] != slot.id) { @@ -1354,104 +2163,29 @@ struct server_context { if (embd == NULL) { SLT_ERR(slot, "failed to get embeddings, token = %d, seq_id = %d\n", batch.token[i], batch.seq_id[i][0]); - res.data = json { - {"index", slot.index}, - {"score", -1e6}, - }; - + res->score = -1e6; continue; } - res.data = json { - {"index", slot.index}, - {"score", embd[0]}, - }; + res->score = embd[0]; } - SLT_DBG(slot, "sending rerank result, res = '%s'\n", res.data.dump().c_str()); + SLT_DBG(slot, "sending rerank result, res.score = %f\n", res->score); - queue_results.send(res); + queue_results.send(std::move(res)); } // // Functions to create new task(s) and receive result(s) // - // break the input "prompt" into multiple tasks if needed, then format and tokenize the input prompt(s) - std::vector create_tasks_inference(json data, server_task_inf_type inf_type) { - std::vector tasks; - auto create_task = [&](json & task_data, llama_tokens & prompt_tokens) { - SRV_DBG("create task, n_tokens = %d\n", (int) prompt_tokens.size()); - server_task task; - task.id = queue_tasks.get_new_id(); - task.inf_type = inf_type; - task.type = SERVER_TASK_TYPE_INFERENCE; - task.data = task_data; - task.prompt_tokens = std::move(prompt_tokens); - tasks.push_back(std::move(task)); - }; - - static constexpr const char * error_msg = "\"prompt\" must be a string, an array of token ids or an array of prompts"; - if (!data.contains("prompt")) { - throw std::runtime_error(error_msg); - } - - // because llama_tokenize api is thread-safe, we can tokenize the prompt from HTTP thread - bool add_special = inf_type != SERVER_TASK_INF_TYPE_RERANK && inf_type != SERVER_TASK_INF_TYPE_INFILL; - std::vector tokenized_prompts = tokenize_input_prompts(ctx, data.at("prompt"), add_special, true); - switch (inf_type) { - case SERVER_TASK_INF_TYPE_RERANK: - { - // prompts[0] is the question - // the rest are the answers/documents - GGML_ASSERT(tokenized_prompts.size() > 1); - SRV_DBG("creating rerank tasks, n_prompts = %d\n", (int) tokenized_prompts.size() - 1); - for (size_t i = 1; i < tokenized_prompts.size(); i++) { - data["index"] = i - 1; - auto tokens = format_rerank(model, tokenized_prompts[0], tokenized_prompts[i]); - create_task(data, tokens); - } - } break; - case SERVER_TASK_INF_TYPE_INFILL: - { - SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size()); - for (size_t i = 0; i < tokenized_prompts.size(); i++) { - data["index"] = i; - auto tokens = format_infill( - ctx, - data.at("input_prefix"), - data.at("input_suffix"), - data.at("input_extra"), - params.n_batch, - params.n_predict, - slots[0].n_ctx, // TODO: there should be a better way - params.spm_infill, - tokenized_prompts[i] - ); - create_task(data, tokens); - } - } break; - default: - { - SRV_DBG("creating multi-prompt tasks, n_prompts = %d\n", (int) tokenized_prompts.size()); - for (size_t i = 0; i < tokenized_prompts.size(); i++) { - data["index"] = i; - create_task(data, tokenized_prompts[i]); - } - } - } - - return tasks; - } - void cancel_tasks(const std::unordered_set & id_tasks) { std::vector cancel_tasks; cancel_tasks.reserve(id_tasks.size()); for (const auto & id_task : id_tasks) { SRV_WRN("cancel task, id_task = %d\n", id_task); - server_task task; - task.type = SERVER_TASK_TYPE_CANCEL; + server_task task(SERVER_TASK_TYPE_CANCEL); task.id_target = id_task; cancel_tasks.push_back(task); queue_results.remove_waiting_task_id(id_task); @@ -1460,50 +2194,58 @@ struct server_context { queue_tasks.post(cancel_tasks, true); } - // receive the results from task(s) created by create_tasks_inference - void receive_cmpl_results( + // receive the results from task(s) + void receive_multi_results( const std::unordered_set & id_tasks, - const std::function&)> & result_handler, + const std::function&)> & result_handler, const std::function & error_handler) { - // TODO: currently, there is no way to detect the client has cancelled the request - std::vector results(id_tasks.size()); + std::vector results(id_tasks.size()); for (size_t i = 0; i < id_tasks.size(); i++) { - server_task_result result = queue_results.recv(id_tasks); + server_task_result_ptr result = queue_results.recv(id_tasks); - if (result.error) { - error_handler(result.data); + if (result->is_error()) { + error_handler(result->to_json()); cancel_tasks(id_tasks); return; } - const size_t idx = result.data["index"]; + GGML_ASSERT( + dynamic_cast(result.get()) != nullptr + || dynamic_cast(result.get()) != nullptr + || dynamic_cast(result.get()) != nullptr + ); + const size_t idx = result->get_index(); GGML_ASSERT(idx < results.size() && "index out of range"); - - results[idx] = result; + results[idx] = std::move(result); } result_handler(results); } - // receive the results from task(s) created by create_tasks_inference, in stream mode + // receive the results from task(s), in stream mode void receive_cmpl_results_stream( - const std::unordered_set & id_tasks, const - std::function & result_handler, const - std::function & error_handler) { + const std::unordered_set & id_tasks, + const std::function & result_handler, + const std::function & error_handler) { size_t n_finished = 0; while (true) { - server_task_result result = queue_results.recv(id_tasks); - if (!result_handler(result)) { + server_task_result_ptr result = queue_results.recv(id_tasks); + + if (result->is_error()) { + error_handler(result->to_json()); cancel_tasks(id_tasks); - break; + return; } - if (result.error) { - error_handler(result.data); + GGML_ASSERT( + dynamic_cast(result.get()) != nullptr + || dynamic_cast(result.get()) != nullptr + ); + if (!result_handler(result)) { cancel_tasks(id_tasks); break; } - if (result.stop) { + if (result->is_stop()) { if (++n_finished == id_tasks.size()) { break; } @@ -1517,9 +2259,12 @@ struct server_context { void process_single_task(server_task task) { switch (task.type) { - case SERVER_TASK_TYPE_INFERENCE: + case SERVER_TASK_TYPE_COMPLETION: + case SERVER_TASK_TYPE_INFILL: + case SERVER_TASK_TYPE_EMBEDDING: + case SERVER_TASK_TYPE_RERANK: { - const int id_slot = json_value(task.data, "id_slot", -1); + const int id_slot = task.id_selected_slot; server_slot * slot = id_slot != -1 ? get_slot_by_id(id_slot) : get_available_slot(task); @@ -1536,13 +2281,6 @@ struct server_context { break; } - slot->reset(); - - slot->id_task = task.id; - slot->inf_type = task.inf_type; - slot->index = json_value(task.data, "index", 0); - slot->prompt_tokens = std::move(task.prompt_tokens); - if (!launch_slot_with_task(*slot, task)) { SRV_ERR("failed to launch slot with task, id_task = %d\n", task.id); break; @@ -1570,21 +2308,7 @@ struct server_context { int n_processing_slots = 0; for (server_slot & slot : slots) { - json slot_data = get_formated_generation(slot); - slot_data["id"] = slot.id; - slot_data["id_task"] = slot.id_task; - slot_data["is_processing"] = slot.is_processing(); - slot_data["prompt"] = common_detokenize(ctx, slot.prompt_tokens); - slot_data["next_token"] = { - {"has_next_token", slot.has_next_token}, - {"has_new_line", slot.has_new_line}, - {"n_remain", slot.n_remaining}, - {"n_decoded", slot.n_decoded}, - {"stopped_eos", slot.stopped_eos}, - {"stopped_word", slot.stopped_word}, - {"stopped_limit", slot.stopped_limit}, - {"stopping_word", slot.stopping_word}, - }; + json slot_data = slot.to_json(); if (slot.is_processing()) { n_processing_slots++; @@ -1596,43 +2320,38 @@ struct server_context { } SRV_DBG("n_idle_slots = %d, n_processing_slots = %d\n", n_idle_slots, n_processing_slots); - server_task_result res; - res.id = task.id; - res.stop = true; - res.error = false; - res.data = { - { "idle", n_idle_slots }, - { "processing", n_processing_slots }, - { "deferred", queue_tasks.queue_tasks_deferred.size() }, - { "t_start", metrics.t_start}, - - { "n_prompt_tokens_processed_total", metrics.n_prompt_tokens_processed_total}, - { "t_tokens_generation_total", metrics.t_tokens_generation_total}, - { "n_tokens_predicted_total", metrics.n_tokens_predicted_total}, - { "t_prompt_processing_total", metrics.t_prompt_processing_total}, + auto res = std::make_unique(); + res->id = task.id; + res->slots_data = std::move(slots_data); + res->n_idle_slots = n_idle_slots; + res->n_processing_slots = n_processing_slots; + res->n_tasks_deferred = queue_tasks.queue_tasks_deferred.size(); + res->t_start = metrics.t_start; - { "n_prompt_tokens_processed", metrics.n_prompt_tokens_processed}, - { "t_prompt_processing", metrics.t_prompt_processing}, - { "n_tokens_predicted", metrics.n_tokens_predicted}, - { "t_tokens_generation", metrics.t_tokens_generation}, + res->kv_cache_tokens_count = llama_get_kv_cache_token_count(ctx); + res->kv_cache_used_cells = llama_get_kv_cache_used_cells(ctx); - { "n_decode_total", metrics.n_decode_total}, - { "n_busy_slots_total", metrics.n_busy_slots_total}, + res->n_prompt_tokens_processed_total = metrics.n_prompt_tokens_processed_total; + res->t_prompt_processing_total = metrics.t_prompt_processing_total; + res->n_tokens_predicted_total = metrics.n_tokens_predicted_total; + res->t_tokens_generation_total = metrics.t_tokens_generation_total; - { "kv_cache_tokens_count", llama_get_kv_cache_token_count(ctx)}, - { "kv_cache_used_cells", llama_get_kv_cache_used_cells(ctx)}, + res->n_prompt_tokens_processed = metrics.n_prompt_tokens_processed; + res->t_prompt_processing = metrics.t_prompt_processing; + res->n_tokens_predicted = metrics.n_tokens_predicted; + res->t_tokens_generation = metrics.t_tokens_generation; - { "slots", slots_data }, - }; + res->n_decode_total = metrics.n_decode_total; + res->n_busy_slots_total = metrics.n_busy_slots_total; - if (json_value(task.data, "reset_bucket", false)) { + if (task.metrics_reset_bucket) { metrics.reset_bucket(); } - queue_results.send(res); + queue_results.send(std::move(res)); } break; case SERVER_TASK_TYPE_SLOT_SAVE: { - int id_slot = task.data.at("id_slot"); + int id_slot = task.slot_action.slot_id; server_slot * slot = get_slot_by_id(id_slot); if (slot == nullptr) { send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); @@ -1648,32 +2367,27 @@ struct server_context { const size_t token_count = slot->cache_tokens.size(); const int64_t t_start = ggml_time_us(); - std::string filename = task.data.at("filename"); - std::string filepath = task.data.at("filepath"); + std::string filename = task.slot_action.filename; + std::string filepath = task.slot_action.filepath; const size_t nwrite = llama_state_seq_save_file(ctx, filepath.c_str(), slot->id, slot->cache_tokens.data(), token_count); const int64_t t_end = ggml_time_us(); const double t_save_ms = (t_end - t_start) / 1000.0; - server_task_result result; - result.id = task.id; - result.stop = true; - result.error = false; - result.data = json { - { "id_slot", id_slot }, - { "filename", filename }, - { "n_saved", token_count }, // tokens saved - { "n_written", nwrite }, // bytes written - { "timings", { - { "save_ms", t_save_ms } - } } - }; - queue_results.send(result); + auto res = std::make_unique(); + res->id = task.id; + res->id_slot = id_slot; + res->filename = filename; + res->is_save = true; + res->n_tokens = token_count; + res->n_bytes = nwrite; + res->t_ms = t_save_ms; + queue_results.send(std::move(res)); } break; case SERVER_TASK_TYPE_SLOT_RESTORE: { - int id_slot = task.data.at("id_slot"); + int id_slot = task.slot_action.slot_id; server_slot * slot = get_slot_by_id(id_slot); if (slot == nullptr) { send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); @@ -1688,8 +2402,8 @@ struct server_context { const int64_t t_start = ggml_time_us(); - std::string filename = task.data.at("filename"); - std::string filepath = task.data.at("filepath"); + std::string filename = task.slot_action.filename; + std::string filepath = task.slot_action.filepath; slot->cache_tokens.resize(slot->n_ctx); size_t token_count = 0; @@ -1704,24 +2418,19 @@ struct server_context { const int64_t t_end = ggml_time_us(); const double t_restore_ms = (t_end - t_start) / 1000.0; - server_task_result result; - result.id = task.id; - result.stop = true; - result.error = false; - result.data = json { - { "id_slot", id_slot }, - { "filename", filename }, - { "n_restored", token_count }, // tokens restored - { "n_read", nread }, // bytes read - { "timings", { - { "restore_ms", t_restore_ms } - } } - }; - queue_results.send(result); + auto res = std::make_unique(); + res->id = task.id; + res->id_slot = id_slot; + res->filename = filename; + res->is_save = false; + res->n_tokens = token_count; + res->n_bytes = nread; + res->t_ms = t_restore_ms; + queue_results.send(std::move(res)); } break; case SERVER_TASK_TYPE_SLOT_ERASE: { - int id_slot = task.data.at("id_slot"); + int id_slot = task.slot_action.slot_id; server_slot * slot = get_slot_by_id(id_slot); if (slot == nullptr) { send_error(task, "Invalid slot ID", ERROR_TYPE_INVALID_REQUEST); @@ -1739,25 +2448,18 @@ struct server_context { llama_kv_cache_seq_rm(ctx, slot->id, -1, -1); slot->cache_tokens.clear(); - server_task_result result; - result.id = task.id; - result.stop = true; - result.error = false; - result.data = json { - { "id_slot", id_slot }, - { "n_erased", n_erased } - }; - queue_results.send(result); + auto res = std::make_unique(); + res->id = task.id; + res->id_slot = id_slot; + res->n_erased = n_erased; + queue_results.send(std::move(res)); } break; case SERVER_TASK_TYPE_SET_LORA: { common_lora_adapters_apply(ctx, loras); - server_task_result result; - result.id = task.id; - result.stop = true; - result.error = false; - result.data = json{{ "success", true }}; - queue_results.send(result); + auto res = std::make_unique(); + res->id = task.id; + queue_results.send(std::move(res)); } break; } } @@ -1787,10 +2489,8 @@ struct server_context { { SRV_DBG("%s", "posting NEXT_RESPONSE\n"); - server_task task; - task.type = SERVER_TASK_TYPE_NEXT_RESPONSE; - task.id_target = -1; - + server_task task(SERVER_TASK_TYPE_NEXT_RESPONSE); + task.id = queue_tasks.get_new_id(); queue_tasks.post(task); } @@ -1798,7 +2498,7 @@ struct server_context { // TODO: simplify and improve for (server_slot & slot : slots) { if (slot.is_processing() && slot.n_past + 1 >= slot.n_ctx) { - if (!params.ctx_shift) { + if (!params_base.ctx_shift) { // this check is redundant (for good) // we should never get here, because generation should already stopped in process_token() slot.release(); @@ -1864,7 +2564,7 @@ struct server_context { int32_t batch_type = batch.n_tokens > 0 ? 0 : -1; // next, batch any pending prompts without exceeding n_batch - if (params.cont_batching || batch.n_tokens == 0) { + if (params_base.cont_batching || batch.n_tokens == 0) { for (auto & slot : slots) { // this slot still has a prompt to be processed if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) { @@ -1904,7 +2604,7 @@ struct server_context { continue; } - if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING || slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) { + if (slot.is_non_causal()) { if (slot.n_prompt_tokens > n_ubatch) { slot.release(); send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER); @@ -1917,7 +2617,7 @@ struct server_context { continue; } } else { - if (!params.ctx_shift) { + if (!params_base.ctx_shift) { // if context shift is disabled, we make sure prompt size is smaller than KV size // TODO: there should be a separate parameter that control prompt truncation // context shift should be applied only during the generation phase @@ -1960,14 +2660,14 @@ struct server_context { if (slot.params.cache_prompt) { // reuse any previously computed tokens that are common with the new prompt - slot.n_past = longest_common_prefix(slot.cache_tokens, prompt_tokens); + slot.n_past = common_lcp(slot.cache_tokens, prompt_tokens); // reuse chunks from the cached prompt by shifting their KV cache in the new position - if (params.n_cache_reuse > 0) { + if (params_base.n_cache_reuse > 0) { size_t head_c = slot.n_past; // cache size_t head_p = slot.n_past; // current prompt - SLT_DBG(slot, "trying to reuse chunks with size > %d, slot.n_past = %d\n", params.n_cache_reuse, slot.n_past); + SLT_DBG(slot, "trying to reuse chunks with size > %d, slot.n_past = %d\n", params_base.n_cache_reuse, slot.n_past); while (head_c < slot.cache_tokens.size() && head_p < prompt_tokens.size()) { @@ -1980,7 +2680,7 @@ struct server_context { n_match++; } - if (n_match >= (size_t) params.n_cache_reuse) { + if (n_match >= (size_t) params_base.n_cache_reuse) { SLT_INF(slot, "reusing chunk with size %zu, shifting KV cache [%zu, %zu) -> [%zu, %zu)\n", n_match, head_c, head_c + n_match, head_p, head_p + n_match); //for (size_t i = head_p; i < head_p + n_match; i++) { // SLT_DBG(slot, "cache token %3zu: %6d '%s'\n", i, prompt_tokens[i], common_token_to_piece(ctx, prompt_tokens[i]).c_str()); @@ -2019,7 +2719,7 @@ struct server_context { } // non-causal tasks require to fit the entire prompt in the physical batch - if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING || slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) { + if (slot.is_non_causal()) { // cannot fit the prompt in the current batch - will try next iter if (batch.n_tokens + slot.n_prompt_tokens > n_batch) { continue; @@ -2027,10 +2727,7 @@ struct server_context { } // check that we are in the right batch_type, if not defer the slot - const bool slot_type = - slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING || - slot.inf_type == SERVER_TASK_INF_TYPE_RERANK ? 1 : 0; - + int slot_type = slot.is_non_causal(); if (batch_type == -1) { batch_type = slot_type; } else if (batch_type != slot_type) { @@ -2053,7 +2750,10 @@ struct server_context { // add prompt tokens for processing in the current batch while (slot.n_past < slot.n_prompt_tokens && batch.n_tokens < n_batch) { - common_batch_add(batch, prompt_tokens[slot.n_past], slot.n_past, { slot.id }, false); + // without pooling, we want to output the embeddings for all the tokens in the batch + const bool need_embd = slot.task_type == SERVER_TASK_TYPE_EMBEDDING && llama_pooling_type(slot.ctx) == LLAMA_POOLING_TYPE_NONE; + + common_batch_add(batch, prompt_tokens[slot.n_past], slot.n_past, { slot.id }, need_embd); if (slot.params.cache_prompt) { slot.cache_tokens.push_back(prompt_tokens[slot.n_past]); @@ -2147,7 +2847,7 @@ struct server_context { } if (slot.state == SLOT_STATE_DONE_PROMPT) { - if (slot.inf_type == SERVER_TASK_INF_TYPE_EMBEDDING) { + if (slot.task_type == SERVER_TASK_TYPE_EMBEDDING) { // prompt evaluated for embedding send_embedding(slot, batch_view); slot.release(); @@ -2155,7 +2855,7 @@ struct server_context { continue; // continue loop of slots } - if (slot.inf_type == SERVER_TASK_INF_TYPE_RERANK) { + if (slot.task_type == SERVER_TASK_TYPE_RERANK) { send_rerank(slot, batch_view); slot.release(); slot.i_batch = -1; @@ -2168,27 +2868,33 @@ struct server_context { continue; // continue loop of slots } - completion_token_output result; - const llama_token id = common_sampler_sample(slot.smpl, ctx, slot.i_batch - i); + const int tok_idx = slot.i_batch - i; + + llama_token id = common_sampler_sample(slot.smpl, ctx, tok_idx); + + slot.i_batch = -1; common_sampler_accept(slot.smpl, id, true); slot.n_decoded += 1; + + const int64_t t_current = ggml_time_us(); + if (slot.n_decoded == 1) { - slot.t_start_generation = ggml_time_us(); + slot.t_start_generation = t_current; slot.t_prompt_processing = (slot.t_start_generation - slot.t_start_process_prompt) / 1e3; metrics.on_prompt_eval(slot); } - result.tok = id; + slot.t_token_generation = (t_current - slot.t_start_generation) / 1e3; - const auto * cur_p = common_sampler_get_candidates(slot.smpl); + completion_token_output result; + result.tok = id; + result.text_to_send = common_token_to_piece(ctx, result.tok, params_base.special); + result.prob = 1.0f; // TODO: set it here instead of doing inside populate_token_probs - for (size_t i = 0; i < (size_t) slot.sparams.n_probs; ++i) { - result.probs.push_back({ - cur_p->data[i].id, - i >= cur_p->size ? 0.0f : cur_p->data[i].p, - }); + if (slot.params.sampling.n_probs > 0) { + populate_token_probs(slot, result, slot.params.post_sampling_probs, params_base.special, tok_idx); } if (!process_token(result, slot)) { @@ -2197,9 +2903,98 @@ struct server_context { slot.print_timings(); send_final_response(slot); metrics.on_prediction(slot); + continue; } + } - slot.i_batch = -1; + // do speculative decoding + for (auto & slot : slots) { + if (!slot.is_processing() || !slot.can_speculate()) { + continue; + } + + if (slot.state != SLOT_STATE_GENERATING) { + continue; + } + + // determine the max draft that fits the current slot state + int n_draft_max = slot.params.speculative.n_max; + + // note: n_past is not yet increased for the `id` token sampled above + // also, need to leave space for 1 extra token to allow context shifts + n_draft_max = std::min(n_draft_max, slot.n_ctx - slot.n_past - 2); + + if (slot.n_remaining > 0) { + n_draft_max = std::min(n_draft_max, slot.n_remaining - 1); + } + + SLT_DBG(slot, "max possible draft: %d\n", n_draft_max); + + if (n_draft_max < slot.params.speculative.n_min) { + SLT_DBG(slot, "the max possible draft is too small: %d < %d - skipping speculative decoding\n", n_draft_max, slot.params.speculative.n_min); + + continue; + } + + llama_token id = slot.sampled; + + struct common_speculative_params params_spec; + params_spec.n_draft = n_draft_max; + params_spec.n_reuse = llama_n_ctx(slot.ctx_dft) - slot.params.speculative.n_max; + params_spec.p_min = slot.params.speculative.p_min; + + llama_tokens draft = common_speculative_gen_draft(slot.spec, params_spec, slot.cache_tokens, id); + + // ignore small drafts + if (slot.params.speculative.n_min > (int) draft.size()) { + SLT_DBG(slot, "ignoring small draft: %d < %d\n", (int) draft.size(), slot.params.speculative.n_min); + + continue; + } + + // construct the speculation batch + common_batch_clear(slot.batch_spec); + common_batch_add (slot.batch_spec, id, slot.n_past, { slot.id }, true); + + for (size_t i = 0; i < draft.size(); ++i) { + common_batch_add(slot.batch_spec, draft[i], slot.n_past + 1 + i, { slot.id }, true); + } + + SLT_DBG(slot, "decoding speculative batch, size = %d\n", slot.batch_spec.n_tokens); + + llama_decode(ctx, slot.batch_spec); + + // the accepted tokens from the speculation + const auto ids = common_sampler_sample_and_accept_n(slot.smpl, ctx, draft); + + slot.n_past += ids.size(); + slot.n_decoded += ids.size(); + + slot.cache_tokens.push_back(id); + slot.cache_tokens.insert(slot.cache_tokens.end(), ids.begin(), ids.end() - 1); + + llama_kv_cache_seq_rm(ctx, slot.id, slot.n_past, -1); + + for (size_t i = 0; i < ids.size(); ++i) { + completion_token_output result; + + result.tok = ids[i]; + result.text_to_send = common_token_to_piece(ctx, result.tok, params_base.special); + result.prob = 1.0f; // set later + + // TODO: set result.probs + + if (!process_token(result, slot)) { + // release slot because of stop condition + slot.release(); + slot.print_timings(); + send_final_response(slot); + metrics.on_prediction(slot); + break; + } + } + + SLT_DBG(slot, "accepted %d/%d draft tokens, new n_past = %d\n", (int) ids.size() - 1, (int) draft.size(), slot.n_past); } } @@ -2254,17 +3049,9 @@ int main(int argc, char ** argv) { common_init(); - // enabling this will output extra debug information in the HTTP responses from the server - // see format_final_response_oaicompat() - const bool verbose = params.verbosity > 9; - // struct that contains llama context and inference server_context ctx_server; - if (params.model_alias == "unknown") { - params.model_alias = params.model; - } - llama_backend_init(); llama_numa_init(params.numa); @@ -2273,16 +3060,6 @@ int main(int argc, char ** argv) { LOG_INF("%s\n", common_params_get_system_info(params).c_str()); LOG_INF("\n"); - // static files - std::map static_files = { - { "/", { index_html, index_html_len, "text/html; charset=utf-8" }}, - { "/completion.js", { completion_js, completion_js_len, "text/javascript; charset=utf-8" }}, - { "/deps_daisyui.min.css", { deps_daisyui_min_css, deps_daisyui_min_css_len, "text/css; charset=utf-8" }}, - { "/deps_markdown-it.js", { deps_markdown_it_js, deps_markdown_it_js_len, "text/javascript; charset=utf-8" }}, - { "/deps_tailwindcss.js", { deps_tailwindcss_js, deps_tailwindcss_js_len, "text/javascript; charset=utf-8" }}, - { "/deps_vue.esm-browser.js", { deps_vue_esm_browser_js, deps_vue_esm_browser_js_len, "text/javascript; charset=utf-8" }}, - }; - std::unique_ptr svr; #ifdef CPPHTTPLIB_OPENSSL_SUPPORT if (params.ssl_file_key != "" && params.ssl_file_cert != "") { @@ -2309,20 +3086,20 @@ int main(int argc, char ** argv) { auto res_error = [](httplib::Response & res, const json & error_data) { json final_response {{"error", error_data}}; - res.set_content(final_response.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON); + res.set_content(safe_json_to_str(final_response), MIMETYPE_JSON); res.status = json_value(error_data, "code", 500); }; auto res_ok = [](httplib::Response & res, const json & data) { - res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace), MIMETYPE_JSON); + res.set_content(safe_json_to_str(data), MIMETYPE_JSON); res.status = 200; }; - svr->set_exception_handler([&res_error](const httplib::Request &, httplib::Response & res, std::exception_ptr ep) { + svr->set_exception_handler([&res_error](const httplib::Request &, httplib::Response & res, const std::exception_ptr & ep) { std::string message; try { std::rethrow_exception(ep); - } catch (std::exception & e) { + } catch (const std::exception & e) { message = e.what(); } catch (...) { message = "Unknown Exception"; @@ -2363,7 +3140,7 @@ int main(int argc, char ** argv) { // Middlewares // - auto middleware_validate_api_key = [¶ms, &res_error, &static_files](const httplib::Request & req, httplib::Response & res) { + auto middleware_validate_api_key = [¶ms, &res_error](const httplib::Request & req, httplib::Response & res) { static const std::unordered_set public_endpoints = { "/health", "/models", @@ -2376,7 +3153,7 @@ int main(int argc, char ** argv) { } // If path is public or is static file, skip validation - if (public_endpoints.find(req.path) != public_endpoints.end() || static_files.find(req.path) != static_files.end()) { + if (public_endpoints.find(req.path) != public_endpoints.end() || req.path == "/") { return true; } @@ -2451,27 +3228,33 @@ int main(int argc, char ** argv) { } // request slots data using task queue - server_task task; + server_task task(SERVER_TASK_TYPE_METRICS); task.id = ctx_server.queue_tasks.get_new_id(); - task.type = SERVER_TASK_TYPE_METRICS; - ctx_server.queue_results.add_waiting_task_id(task.id); ctx_server.queue_tasks.post(task, true); // high-priority task // get the result - server_task_result result = ctx_server.queue_results.recv(task.id); + server_task_result_ptr result = ctx_server.queue_results.recv(task.id); ctx_server.queue_results.remove_waiting_task_id(task.id); + if (result->is_error()) { + res_error(res, result->to_json()); + return; + } + + // TODO: get rid of this dynamic_cast + auto res_metrics = dynamic_cast(result.get()); + GGML_ASSERT(res_metrics != nullptr); + // optionally return "fail_on_no_slot" error - const int n_idle_slots = result.data.at("idle"); if (req.has_param("fail_on_no_slot")) { - if (n_idle_slots == 0) { + if (res_metrics->n_idle_slots == 0) { res_error(res, format_error_response("no slot available", ERROR_TYPE_UNAVAILABLE)); return; } } - res_ok(res, result.data.at("slots")); + res_ok(res, res_metrics->slots_data); }; const auto handle_metrics = [&](const httplib::Request &, httplib::Response & res) { @@ -2481,83 +3264,77 @@ int main(int argc, char ** argv) { } // request slots data using task queue - server_task task; + server_task task(SERVER_TASK_TYPE_METRICS); task.id = ctx_server.queue_tasks.get_new_id(); - task.id_target = -1; - task.type = SERVER_TASK_TYPE_METRICS; - task.data.push_back({{"reset_bucket", true}}); + task.metrics_reset_bucket = true; ctx_server.queue_results.add_waiting_task_id(task.id); ctx_server.queue_tasks.post(task, true); // high-priority task // get the result - server_task_result result = ctx_server.queue_results.recv(task.id); + server_task_result_ptr result = ctx_server.queue_results.recv(task.id); ctx_server.queue_results.remove_waiting_task_id(task.id); - json data = result.data; - - const uint64_t n_prompt_tokens_processed = data.at("n_prompt_tokens_processed"); - const uint64_t t_prompt_processing = data.at("t_prompt_processing"); - - const uint64_t n_tokens_predicted = data.at("n_tokens_predicted"); - const uint64_t t_tokens_generation = data.at("t_tokens_generation"); - - const uint64_t n_decode_total = data.at("n_decode_total"); - const uint64_t n_busy_slots_total = data.at("n_busy_slots_total"); + if (result->is_error()) { + res_error(res, result->to_json()); + return; + } - const int32_t kv_cache_used_cells = data.at("kv_cache_used_cells"); + // TODO: get rid of this dynamic_cast + auto res_metrics = dynamic_cast(result.get()); + GGML_ASSERT(res_metrics != nullptr); // metrics definition: https://prometheus.io/docs/practices/naming/#metric-names json all_metrics_def = json { {"counter", {{ {"name", "prompt_tokens_total"}, {"help", "Number of prompt tokens processed."}, - {"value", (uint64_t) data.at("n_prompt_tokens_processed_total")} + {"value", (uint64_t) res_metrics->n_prompt_tokens_processed_total} }, { {"name", "prompt_seconds_total"}, {"help", "Prompt process time"}, - {"value", (uint64_t) data.at("t_prompt_processing_total") / 1.e3} + {"value", (uint64_t) res_metrics->t_prompt_processing_total / 1.e3} }, { {"name", "tokens_predicted_total"}, {"help", "Number of generation tokens processed."}, - {"value", (uint64_t) data.at("n_tokens_predicted_total")} + {"value", (uint64_t) res_metrics->n_tokens_predicted_total} }, { {"name", "tokens_predicted_seconds_total"}, {"help", "Predict process time"}, - {"value", (uint64_t) data.at("t_tokens_generation_total") / 1.e3} + {"value", (uint64_t) res_metrics->t_tokens_generation_total / 1.e3} }, { {"name", "n_decode_total"}, {"help", "Total number of llama_decode() calls"}, - {"value", n_decode_total} + {"value", res_metrics->n_decode_total} }, { {"name", "n_busy_slots_per_decode"}, {"help", "Average number of busy slots per llama_decode() call"}, - {"value", (float) n_busy_slots_total / (float) n_decode_total} + {"value", (float) res_metrics->n_busy_slots_total / (float) res_metrics->n_decode_total} }}}, {"gauge", {{ {"name", "prompt_tokens_seconds"}, {"help", "Average prompt throughput in tokens/s."}, - {"value", n_prompt_tokens_processed ? 1.e3 / t_prompt_processing * n_prompt_tokens_processed : 0.} + {"value", res_metrics->n_prompt_tokens_processed ? 1.e3 / res_metrics->t_prompt_processing * res_metrics->n_prompt_tokens_processed : 0.} },{ {"name", "predicted_tokens_seconds"}, {"help", "Average generation throughput in tokens/s."}, - {"value", n_tokens_predicted ? 1.e3 / t_tokens_generation * n_tokens_predicted : 0.} + {"value", res_metrics->n_tokens_predicted ? 1.e3 / res_metrics->t_tokens_generation * res_metrics->n_tokens_predicted : 0.} },{ {"name", "kv_cache_usage_ratio"}, {"help", "KV-cache usage. 1 means 100 percent usage."}, - {"value", 1. * kv_cache_used_cells / params.n_ctx} + {"value", 1. * res_metrics->kv_cache_used_cells / params.n_ctx} },{ {"name", "kv_cache_tokens"}, {"help", "KV-cache tokens."}, - {"value", (uint64_t) data.at("kv_cache_tokens_count")} + {"value", (uint64_t) res_metrics->kv_cache_tokens_count} },{ {"name", "requests_processing"}, {"help", "Number of request processing."}, - {"value", (uint64_t) data.at("processing")} + {"value", (uint64_t) res_metrics->n_processing_slots} },{ {"name", "requests_deferred"}, {"help", "Number of request deferred."}, - {"value", (uint64_t) data.at("deferred")} + {"value", (uint64_t) res_metrics->n_tasks_deferred} }}} }; @@ -2578,8 +3355,7 @@ int main(int argc, char ** argv) { } } - const int64_t t_start = data.at("t_start"); - res.set_header("Process-Start-Time-Unix", std::to_string(t_start)); + res.set_header("Process-Start-Time-Unix", std::to_string(res_metrics->t_start)); res.set_content(prometheus.str(), "text/plain; version=0.0.4"); res.status = 200; // HTTP OK @@ -2594,25 +3370,24 @@ int main(int argc, char ** argv) { } std::string filepath = params.slot_save_path + filename; - server_task task; - task.type = SERVER_TASK_TYPE_SLOT_SAVE; - task.data = { - { "id_slot", id_slot }, - { "filename", filename }, - { "filepath", filepath }, - }; + server_task task(SERVER_TASK_TYPE_SLOT_SAVE); + task.id = ctx_server.queue_tasks.get_new_id(); + task.slot_action.slot_id = id_slot; + task.slot_action.filename = filename; + task.slot_action.filepath = filepath; - const int id_task = ctx_server.queue_tasks.post(task); - ctx_server.queue_results.add_waiting_task_id(id_task); + ctx_server.queue_results.add_waiting_task_id(task.id); + ctx_server.queue_tasks.post(task); - server_task_result result = ctx_server.queue_results.recv(id_task); - ctx_server.queue_results.remove_waiting_task_id(id_task); + server_task_result_ptr result = ctx_server.queue_results.recv(task.id); + ctx_server.queue_results.remove_waiting_task_id(task.id); - if (result.error) { - res_error(res, result.data); - } else { - res_ok(res, result.data); + if (result->is_error()) { + res_error(res, result->to_json()); + return; } + + res_ok(res, result->to_json()); }; const auto handle_slots_restore = [&ctx_server, &res_error, &res_ok, ¶ms](const httplib::Request & req, httplib::Response & res, int id_slot) { @@ -2624,45 +3399,45 @@ int main(int argc, char ** argv) { } std::string filepath = params.slot_save_path + filename; - server_task task; - task.type = SERVER_TASK_TYPE_SLOT_RESTORE; - task.data = { - { "id_slot", id_slot }, - { "filename", filename }, - { "filepath", filepath }, - }; + server_task task(SERVER_TASK_TYPE_SLOT_RESTORE); + task.id = ctx_server.queue_tasks.get_new_id(); + task.slot_action.slot_id = id_slot; + task.slot_action.filename = filename; + task.slot_action.filepath = filepath; - const int id_task = ctx_server.queue_tasks.post(task); - ctx_server.queue_results.add_waiting_task_id(id_task); + ctx_server.queue_results.add_waiting_task_id(task.id); + ctx_server.queue_tasks.post(task); - server_task_result result = ctx_server.queue_results.recv(id_task); - ctx_server.queue_results.remove_waiting_task_id(id_task); + server_task_result_ptr result = ctx_server.queue_results.recv(task.id); + ctx_server.queue_results.remove_waiting_task_id(task.id); - if (result.error) { - res_error(res, result.data); - } else { - res_ok(res, result.data); + if (result->is_error()) { + res_error(res, result->to_json()); + return; } + + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res_ok(res, result->to_json()); }; const auto handle_slots_erase = [&ctx_server, &res_error, &res_ok](const httplib::Request & /* req */, httplib::Response & res, int id_slot) { - server_task task; - task.type = SERVER_TASK_TYPE_SLOT_ERASE; - task.data = { - { "id_slot", id_slot }, - }; + server_task task(SERVER_TASK_TYPE_SLOT_ERASE); + task.id = ctx_server.queue_tasks.get_new_id(); + task.slot_action.slot_id = id_slot; - const int id_task = ctx_server.queue_tasks.post(task); - ctx_server.queue_results.add_waiting_task_id(id_task); + ctx_server.queue_results.add_waiting_task_id(task.id); + ctx_server.queue_tasks.post(task); - server_task_result result = ctx_server.queue_results.recv(id_task); - ctx_server.queue_results.remove_waiting_task_id(id_task); + server_task_result_ptr result = ctx_server.queue_results.recv(task.id); + ctx_server.queue_results.remove_waiting_task_id(task.id); - if (result.error) { - res_error(res, result.data); - } else { - res_ok(res, result.data); + if (result->is_error()) { + res_error(res, result->to_json()); + return; } + + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res_ok(res, result->to_json()); }; const auto handle_slots_action = [¶ms, &res_error, &handle_slots_save, &handle_slots_restore, &handle_slots_erase](const httplib::Request & req, httplib::Response & res) { @@ -2695,9 +3470,11 @@ int main(int argc, char ** argv) { }; const auto handle_props = [&ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) { + // this endpoint is publicly available, please only return what is safe to be exposed json data = { { "default_generation_settings", ctx_server.default_generation_settings_for_props }, - { "total_slots", ctx_server.params.n_parallel }, + { "total_slots", ctx_server.params_base.n_parallel }, + { "model_path", ctx_server.params_base.model }, { "chat_template", llama_get_chat_template(ctx_server.model) }, }; @@ -2705,7 +3482,7 @@ int main(int argc, char ** argv) { }; const auto handle_props_change = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { - if (!ctx_server.params.endpoint_props) { + if (!ctx_server.params_base.endpoint_props) { res_error(res, format_error_response("This server does not support changing global properties. Start it with `--props`", ERROR_TYPE_NOT_SUPPORTED)); return; } @@ -2717,13 +3494,50 @@ int main(int argc, char ** argv) { res_ok(res, {{ "success", true }}); }; - const auto handle_completions_generic = [&ctx_server, &res_error, &res_ok](server_task_inf_type inf_type, json & data, httplib::Response & res) { - if (ctx_server.params.embedding) { + // handle completion-like requests (completion, chat, infill) + // we can optionally provide a custom format for partial results and final results + const auto handle_completions_generic = [&ctx_server, &res_error, &res_ok]( + server_task_type type, + json & data, + httplib::Response & res, + bool oaicompat = false, + bool oaicompat_chat = false) { + GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL); + + if (ctx_server.params_base.embedding) { res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); return; } - std::vector tasks = ctx_server.create_tasks_inference(data, inf_type); + auto completion_id = gen_chatcmplid(); + std::vector tasks; + + try { + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, data.at("prompt"), true, true); + tasks.reserve(tokenized_prompts.size()); + for (size_t i = 0; i < tokenized_prompts.size(); i++) { + server_task task = server_task(type); + + task.id = ctx_server.queue_tasks.get_new_id(); + task.index = i; + + task.prompt_tokens = std::move(tokenized_prompts[i]); + task.params = server_task::params_from_json_cmpl(ctx_server.model, ctx_server.ctx, ctx_server.params_base, data); + task.id_selected_slot = json_value(data, "id_slot", -1); + + // OAI-compat + task.params.oaicompat = oaicompat; + task.params.oaicompat_chat = oaicompat_chat; + task.params.oaicompat_cmpl_id = completion_id; + // oaicompat_model is already populated by params_from_json_cmpl + + tasks.push_back(task); + } + } catch (const std::exception & e) { + res_error(res, format_error_response(e.what(), ERROR_TYPE_INVALID_REQUEST)); + return; + } + ctx_server.queue_results.add_waiting_tasks(tasks); ctx_server.queue_tasks.post(tasks); @@ -2731,15 +3545,15 @@ int main(int argc, char ** argv) { const auto task_ids = server_task::get_list_id(tasks); if (!stream) { - ctx_server.receive_cmpl_results(task_ids, [&](std::vector & results) { + ctx_server.receive_multi_results(task_ids, [&](std::vector & results) { if (results.size() == 1) { // single result - res_ok(res, results[0].data); + res_ok(res, results[0]->to_json()); } else { // multiple results (multitask) json arr = json::array(); - for (const auto & res : results) { - arr.push_back(res.data); + for (auto & res : results) { + arr.push_back(res->to_json()); } res_ok(res, arr); } @@ -2749,12 +3563,26 @@ int main(int argc, char ** argv) { ctx_server.queue_results.remove_waiting_task_ids(task_ids); } else { - const auto chunked_content_provider = [task_ids, &ctx_server](size_t, httplib::DataSink & sink) { - ctx_server.receive_cmpl_results_stream(task_ids, [&](const server_task_result & result) -> bool { - return server_sent_event(sink, "data", result.data); + const auto chunked_content_provider = [task_ids, &ctx_server, oaicompat](size_t, httplib::DataSink & sink) { + ctx_server.receive_cmpl_results_stream(task_ids, [&](server_task_result_ptr & result) -> bool { + json res_json = result->to_json(); + if (res_json.is_array()) { + for (const auto & res : res_json) { + if (!server_sent_event(sink, "data", res)) { + return false; + } + } + return true; + } else { + return server_sent_event(sink, "data", res_json); + } }, [&](const json & error_data) { server_sent_event(sink, "error", error_data); }); + if (oaicompat) { + static const std::string ev_done = "data: [DONE]\n\n"; + sink.write(ev_done.data(), ev_done.size()); + } sink.done(); return false; }; @@ -2769,7 +3597,12 @@ int main(int argc, char ** argv) { const auto handle_completions = [&handle_completions_generic](const httplib::Request & req, httplib::Response & res) { json data = json::parse(req.body); - return handle_completions_generic(SERVER_TASK_INF_TYPE_COMPLETION, data, res); + return handle_completions_generic( + SERVER_TASK_TYPE_COMPLETION, + data, + res, + /* oaicompat */ false, + /* oaicompat_chat */ false); }; const auto handle_infill = [&ctx_server, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) { @@ -2792,6 +3625,11 @@ int main(int argc, char ** argv) { json data = json::parse(req.body); // validate input + if (data.contains("prompt") && !data.at("prompt").is_string()) { + // prompt is optional + res_error(res, format_error_response("\"prompt\" must be a string", ERROR_TYPE_INVALID_REQUEST)); + } + if (!data.contains("input_prefix")) { res_error(res, format_error_response("\"input_prefix\" is required", ERROR_TYPE_INVALID_REQUEST)); } @@ -2801,9 +3639,11 @@ int main(int argc, char ** argv) { } if (data.contains("input_extra") && !data.at("input_extra").is_array()) { + // input_extra is optional res_error(res, format_error_response("\"input_extra\" must be an array of {\"filename\": string, \"text\": string}", ERROR_TYPE_INVALID_REQUEST)); return; } + json input_extra = json_value(data, "input_extra", json::array()); for (const auto & chunk : input_extra) { // { "text": string, "filename": string } @@ -2819,67 +3659,40 @@ int main(int argc, char ** argv) { } data["input_extra"] = input_extra; // default to empty array if it's not exist - return handle_completions_generic(SERVER_TASK_INF_TYPE_INFILL, data, res); + std::string prompt = json_value(data, "prompt", std::string()); + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, true, true); + SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size()); + data["prompt"] = format_infill( + ctx_server.ctx, + data.at("input_prefix"), + data.at("input_suffix"), + data.at("input_extra"), + ctx_server.params_base.n_batch, + ctx_server.params_base.n_predict, + ctx_server.slots[0].n_ctx, // TODO: there should be a better way + ctx_server.params_base.spm_infill, + tokenized_prompts[0] + ); + + return handle_completions_generic(SERVER_TASK_TYPE_INFILL, data, res); }; - // TODO: maybe merge this function with "handle_completions_generic" - const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &res_ok, verbose](const httplib::Request & req, httplib::Response & res) { - if (ctx_server.params.embedding) { + const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + if (ctx_server.params_base.embedding) { res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); return; } json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template); - - std::vector tasks = ctx_server.create_tasks_inference(data, SERVER_TASK_INF_TYPE_COMPLETION); - ctx_server.queue_results.add_waiting_tasks(tasks); - ctx_server.queue_tasks.post(tasks); - - bool stream = json_value(data, "stream", false); - const auto task_ids = server_task::get_list_id(tasks); - const auto completion_id = gen_chatcmplid(); - - if (!stream) { - ctx_server.receive_cmpl_results(task_ids, [&](const std::vector & results) { - // multitask is never support in chat completion, there is only one result - json result_oai = format_final_response_oaicompat(data, results[0].data, completion_id, /*.streaming =*/ false, verbose); - res_ok(res, result_oai); - }, [&](const json & error_data) { - res_error(res, error_data); - }); - - ctx_server.queue_results.remove_waiting_task_ids(task_ids); - } else { - const auto chunked_content_provider = [task_ids, &ctx_server, completion_id](size_t, httplib::DataSink & sink) { - ctx_server.receive_cmpl_results_stream(task_ids, [&](const server_task_result & result) -> bool { - std::vector result_array = format_partial_response_oaicompat(result.data, completion_id); - for (auto & event_data : result_array) { - if (event_data.empty()) { - continue; // skip the stop token - } - if (!server_sent_event(sink, "data", event_data)) { - return false; // connection is closed - } - } - return true; // ok - }, [&](const json & error_data) { - server_sent_event(sink, "error", error_data); - }); - static const std::string ev_done = "data: [DONE]\n\n"; - sink.write(ev_done.data(), ev_done.size()); - sink.done(); - return true; - }; - - auto on_complete = [task_ids, &ctx_server] (bool) { - ctx_server.queue_results.remove_waiting_task_ids(task_ids); - }; - - res.set_chunked_content_provider("text/event-stream", chunked_content_provider, on_complete); - } + return handle_completions_generic( + SERVER_TASK_TYPE_COMPLETION, + data, + res, + /* oaicompat */ true, + /* oaicompat_chat */ true); }; - const auto handle_models = [¶ms, &ctx_server](const httplib::Request &, httplib::Response & res) { + const auto handle_models = [¶ms, &ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) { json models = { {"object", "list"}, {"data", { @@ -2893,7 +3706,7 @@ int main(int argc, char ** argv) { }} }; - res.set_content(models.dump(), MIMETYPE_JSON); + res_ok(res, models); }; const auto handle_tokenize = [&ctx_server, &res_ok](const httplib::Request & req, httplib::Response & res) { @@ -2949,37 +3762,63 @@ int main(int argc, char ** argv) { res_ok(res, data); }; - const auto handle_embeddings = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { + const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, bool oaicompat) { const json body = json::parse(req.body); - bool is_openai = false; - // an input prompt can be a string or a list of tokens (integer) + if (oaicompat && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { + res_error(res, format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST)); + return; + } + + // for the shape of input/content, see tokenize_input_prompts() json prompt; if (body.count("input") != 0) { - is_openai = true; prompt = body.at("input"); - } else if (body.count("content") != 0) { - // with "content", we only support single prompt - prompt = std::vector{body.at("content")}; + } else if (body.contains("content")) { + oaicompat = false; + prompt = body.at("content"); } else { res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST)); return; } + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, true, true); + for (const auto & tokens : tokenized_prompts) { + // this check is necessary for models that do not add BOS token to the input + if (tokens.empty()) { + res_error(res, format_error_response("Input content cannot be empty", ERROR_TYPE_INVALID_REQUEST)); + return; + } + } + // create and queue the task json responses = json::array(); bool error = false; { - std::vector tasks = ctx_server.create_tasks_inference({{"prompt", prompt}}, SERVER_TASK_INF_TYPE_EMBEDDING); + std::vector tasks; + for (size_t i = 0; i < tokenized_prompts.size(); i++) { + server_task task = server_task(SERVER_TASK_TYPE_EMBEDDING); + + task.id = ctx_server.queue_tasks.get_new_id(); + task.index = i; + task.prompt_tokens = std::move(tokenized_prompts[i]); + + // OAI-compat + task.params.oaicompat = oaicompat; + + tasks.push_back(task); + } + ctx_server.queue_results.add_waiting_tasks(tasks); ctx_server.queue_tasks.post(tasks); // get the result std::unordered_set task_ids = server_task::get_list_id(tasks); - ctx_server.receive_cmpl_results(task_ids, [&](std::vector & results) { - for (const auto & res : results) { - responses.push_back(res.data); + ctx_server.receive_multi_results(task_ids, [&](std::vector & results) { + for (auto & res : results) { + GGML_ASSERT(dynamic_cast(res.get()) != nullptr); + responses.push_back(res->to_json()); } }, [&](const json & error_data) { res_error(res, error_data); @@ -2994,14 +3833,20 @@ int main(int argc, char ** argv) { } // write JSON response - json root = is_openai - ? format_embeddings_response_oaicompat(body, responses) - : responses[0]; + json root = oaicompat ? format_embeddings_response_oaicompat(body, responses) : json(responses); res_ok(res, root); }; + const auto handle_embeddings = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { + handle_embeddings_impl(req, res, false); + }; + + const auto handle_embeddings_oai = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { + handle_embeddings_impl(req, res, true); + }; + const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { - if (!ctx_server.params.reranking || ctx_server.params.embedding) { + if (!ctx_server.params_base.reranking || ctx_server.params_base.embedding) { res_error(res, format_error_response("This server does not support reranking. Start it with `--reranking` and without `--embedding`", ERROR_TYPE_NOT_SUPPORTED)); return; } @@ -3035,29 +3880,33 @@ int main(int argc, char ** argv) { return; } - // construct prompt object: array of ["query", "doc0", "doc1", ...] - json prompt; - prompt.push_back(query); - for (const auto & doc : documents) { - prompt.push_back(doc); - } - - LOG_DBG("rerank prompt: %s\n", prompt.dump().c_str()); + llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.ctx, query, /* add_special */ false, true)[0]; // create and queue the task json responses = json::array(); bool error = false; { - std::vector tasks = ctx_server.create_tasks_inference({{"prompt", prompt}}, SERVER_TASK_INF_TYPE_RERANK); + std::vector tasks; + std::vector tokenized_docs = tokenize_input_prompts(ctx_server.ctx, documents, /* add_special */ false, true); + tasks.reserve(tokenized_docs.size()); + for (size_t i = 0; i < tokenized_docs.size(); i++) { + server_task task = server_task(SERVER_TASK_TYPE_RERANK); + task.id = ctx_server.queue_tasks.get_new_id(); + task.index = i; + task.prompt_tokens = format_rerank(ctx_server.model, tokenized_query, tokenized_docs[i]); + tasks.push_back(task); + } + ctx_server.queue_results.add_waiting_tasks(tasks); ctx_server.queue_tasks.post(tasks); // get the result std::unordered_set task_ids = server_task::get_list_id(tasks); - ctx_server.receive_cmpl_results(task_ids, [&](std::vector & results) { - for (const auto & res : results) { - responses.push_back(res.data); + ctx_server.receive_multi_results(task_ids, [&](std::vector & results) { + for (auto & res : results) { + GGML_ASSERT(dynamic_cast(res.get()) != nullptr); + responses.push_back(res->to_json()); } }, [&](const json & error_data) { res_error(res, error_data); @@ -3108,36 +3957,47 @@ int main(int argc, char ** argv) { } } - server_task task; - task.type = SERVER_TASK_TYPE_SET_LORA; - const int id_task = ctx_server.queue_tasks.post(task); - ctx_server.queue_results.add_waiting_task_id(id_task); + server_task task(SERVER_TASK_TYPE_SET_LORA); + task.id = ctx_server.queue_tasks.get_new_id(); + ctx_server.queue_results.add_waiting_task_id(task.id); + ctx_server.queue_tasks.post(task); + + server_task_result_ptr result = ctx_server.queue_results.recv(task.id); + ctx_server.queue_results.remove_waiting_task_id(task.id); - server_task_result result = ctx_server.queue_results.recv(id_task); - ctx_server.queue_results.remove_waiting_task_id(id_task); + if (result->is_error()) { + res_error(res, result->to_json()); + return; + } - res_ok(res, result.data); - res.status = 200; // HTTP OK + GGML_ASSERT(dynamic_cast(result.get()) != nullptr); + res_ok(res, result->to_json()); }; // // Router // - // register static assets routes - if (!params.public_path.empty()) { - // Set the base directory for serving static files - bool is_found = svr->set_mount_point("/", params.public_path); - if (!is_found) { - LOG_ERR("%s: static assets path not found: %s\n", __func__, params.public_path.c_str()); - return 1; - } + if (!params.webui) { + LOG_INF("Web UI is disabled\n"); } else { - // using embedded static files - for (const auto & it : static_files) { - const server_static_file & static_file = it.second; - svr->Get(it.first.c_str(), [&static_file](const httplib::Request &, httplib::Response & res) { - res.set_content(reinterpret_cast(static_file.data), static_file.size, static_file.mime_type); + // register static assets routes + if (!params.public_path.empty()) { + // Set the base directory for serving static files + bool is_found = svr->set_mount_point("/", params.public_path); + if (!is_found) { + LOG_ERR("%s: static assets path not found: %s\n", __func__, params.public_path.c_str()); + return 1; + } + } else { + // using embedded static index.html + svr->Get("/", [](const httplib::Request & req, httplib::Response & res) { + if (req.get_header_value("Accept-Encoding").find("gzip") == std::string::npos) { + res.set_content("Error: gzip is not supported by this browser", "text/plain"); + } else { + res.set_header("Content-Encoding", "gzip"); + res.set_content(reinterpret_cast(index_html_gz), index_html_gz_len, "text/html; charset=utf-8"); + } return false; }); } @@ -3158,7 +4018,7 @@ int main(int argc, char ** argv) { svr->Post("/infill", handle_infill); svr->Post("/embedding", handle_embeddings); // legacy svr->Post("/embeddings", handle_embeddings); - svr->Post("/v1/embeddings", handle_embeddings); + svr->Post("/v1/embeddings", handle_embeddings_oai); svr->Post("/rerank", handle_rerank); svr->Post("/reranking", handle_rerank); svr->Post("/v1/rerank", handle_rerank); @@ -3188,8 +4048,18 @@ int main(int argc, char ** argv) { llama_backend_free(); }; - // bind HTTP listen port, run the HTTP server in a thread - if (!svr->bind_to_port(params.hostname, params.port)) { + // bind HTTP listen port + bool was_bound = false; + if (params.port == 0) { + int bound_port = svr->bind_to_any_port(params.hostname); + if ((was_bound = (bound_port >= 0))) { + params.port = bound_port; + } + } else { + was_bound = svr->bind_to_port(params.hostname, params.port); + } + + if (!was_bound) { //LOG_ERROR("couldn't bind HTTP server socket", { // {"hostname", params.hostname}, // {"port", params.port}, @@ -3198,6 +4068,8 @@ int main(int argc, char ** argv) { clean_up(); return 1; } + + // run the HTTP server in a thread std::thread t([&]() { svr->listen_after_bind(); }); svr->wait_until_ready(); diff --git a/examples/server/tests/.gitignore b/examples/server/tests/.gitignore index 1d17dae13b53a..90ee7fe6d971a 100644 --- a/examples/server/tests/.gitignore +++ b/examples/server/tests/.gitignore @@ -1 +1,2 @@ .venv +tmp diff --git a/examples/server/tests/README.md b/examples/server/tests/README.md index 10f22c4471ea7..fa3d0a2f5ff66 100644 --- a/examples/server/tests/README.md +++ b/examples/server/tests/README.md @@ -1,19 +1,9 @@ # Server tests -Python based server tests scenario using [BDD](https://en.wikipedia.org/wiki/Behavior-driven_development) -and [behave](https://behave.readthedocs.io/en/latest/): - -* [issues.feature](./features/issues.feature) Pending issues scenario -* [parallel.feature](./features/parallel.feature) Scenario involving multi slots and concurrent requests -* [security.feature](./features/security.feature) Security, CORS and API Key -* [server.feature](./features/server.feature) Server base scenario: completion, embedding, tokenization, etc... +Python based server tests scenario using [pytest](https://docs.pytest.org/en/stable/). Tests target GitHub workflows job runners with 4 vCPU. -Requests are -using [aiohttp](https://docs.aiohttp.org/en/stable/client_reference.html), [asyncio](https://docs.python.org/fr/3/library/asyncio.html) -based http client. - Note: If the host architecture inference speed is faster than GitHub runners one, parallel scenario may randomly fail. To mitigate it, you can increase values in `n_predict`, `kv_size`. @@ -39,26 +29,25 @@ It's possible to override some scenario steps values with environment variables: |--------------------------|------------------------------------------------------------------------------------------------| | `PORT` | `context.server_port` to set the listening port of the server during scenario, default: `8080` | | `LLAMA_SERVER_BIN_PATH` | to change the server binary path, default: `../../../build/bin/llama-server` | -| `DEBUG` | "ON" to enable steps and server verbose mode `--verbose` | +| `DEBUG` | to enable steps and server verbose mode `--verbose` | | `N_GPU_LAYERS` | number of model layers to offload to VRAM `-ngl --n-gpu-layers` | -### Run @bug, @wip or @wrong_usage annotated scenario - -Feature or Scenario must be annotated with `@llama.cpp` to be included in the default scope. +To run slow tests: -- `@bug` annotation aims to link a scenario with a GitHub issue. -- `@wrong_usage` are meant to show user issue that are actually an expected behavior -- `@wip` to focus on a scenario working in progress -- `@slow` heavy test, disabled by default +```shell +SLOW_TESTS=1 ./tests.sh +``` -To run a scenario annotated with `@bug`, start: +To run with stdout/stderr display in real time (verbose output, but useful for debugging): ```shell -DEBUG=ON ./tests.sh --no-skipped --tags bug --stop +DEBUG=1 ./tests.sh -s -v -x ``` -After changing logic in `steps.py`, ensure that `@bug` and `@wrong_usage` scenario are updated. +Hint: You can compile and run test in single command, useful for local developement: ```shell -./tests.sh --no-skipped --tags bug,wrong_usage || echo "should failed but compile" +cmake --build build -j --target llama-server && ./examples/server/tests/tests.sh ``` + +To see all available arguments, please refer to [pytest documentation](https://docs.pytest.org/en/stable/how-to/usage.html) diff --git a/examples/server/tests/conftest.py b/examples/server/tests/conftest.py new file mode 100644 index 0000000000000..017d1bb841efd --- /dev/null +++ b/examples/server/tests/conftest.py @@ -0,0 +1,15 @@ +import pytest +from utils import * + + +# ref: https://stackoverflow.com/questions/22627659/run-code-before-and-after-each-test-in-py-test +@pytest.fixture(autouse=True) +def stop_server_after_each_test(): + # do nothing before each test + yield + # stop all servers after each test + instances = set( + server_instances + ) # copy the set to prevent 'Set changed size during iteration' + for server in instances: + server.stop() diff --git a/examples/server/tests/features/ctx_shift.feature b/examples/server/tests/features/ctx_shift.feature deleted file mode 100644 index ae6c6b01b0221..0000000000000 --- a/examples/server/tests/features/ctx_shift.feature +++ /dev/null @@ -1,66 +0,0 @@ -@llama.cpp -@ctx_shift -Feature: llama.cpp server - - Background: Server startup - Given a server listening on localhost:8080 - And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models - And a model file test-model.gguf - And a model alias tinyllama-2 - And BOS token is 1 - And 42 as server seed - And 256 KV cache size - And 32 as batch size - And 2 slots - - # the prompt is 301 tokens - # the slot context is 256/2 = 128 tokens - # the prompt is truncated to keep the last 109 tokens - # 64 tokens are generated thanks to shifting the context when it gets full - Scenario: Inference with context shift - And 64 server max tokens to predict - Then the server is starting - Then the server is healthy - Given a prompt: - """ - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. - Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. - """ - And a completion request with no api error - Then 64 tokens are predicted matching fun|Annaks|popcorns|pictry|bowl - And the completion is truncated - And 109 prompt tokens are processed - - Scenario Outline: Inference without context shift - And server max tokens to predict - And disable context shifting - Then the server is starting - Then the server is healthy - Given a prompt: - """ - Hi how are you - """ - And a completion request with no api error - Then tokens are predicted matching twind|Anna - And the completion is truncated - And 8 prompt tokens are processed - Examples: - | n_predict | n_token_output | truncated | - | 64 | 64 | not | - | -1 | 120 | | - - Scenario: Inference without context shift (expected error: prompt too long) - And disable context shifting - Then the server is starting - Then the server is healthy - Given a prompt: - """ - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. - Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. - """ - And a completion request with 400 api error - diff --git a/examples/server/tests/features/embeddings.feature b/examples/server/tests/features/embeddings.feature deleted file mode 100644 index f4fe2ee4335ff..0000000000000 --- a/examples/server/tests/features/embeddings.feature +++ /dev/null @@ -1,113 +0,0 @@ -@llama.cpp -@embeddings -Feature: llama.cpp server - - Background: Server startup - Given a server listening on localhost:8080 - And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf - And a model file bert-bge-small.gguf - And a model alias bert-bge-small - And 42 as server seed - And 2 slots - # the bert-bge-small model has context size of 512 - # since the generated prompts are as big as the batch size, we need to set the batch size to <= 512 - # ref: https://huggingface.co/BAAI/bge-small-en-v1.5/blob/5c38ec7c405ec4b44b94cc5a9bb96e735b38267a/config.json#L20 - And 128 as batch size - And 128 as ubatch size - And 512 KV cache size - And enable embeddings endpoint - Then the server is starting - Then the server is healthy - - Scenario: Embedding - When embeddings are computed for: - """ - What is the capital of Bulgaria ? - """ - Then embeddings are generated - - Scenario: Embedding (error: prompt too long) - When embeddings are computed for: - """ - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. - Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. - Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. - """ - And embeddings request with 500 api error - - Scenario: OAI Embeddings compatibility - Given a model bert-bge-small - When an OAI compatible embeddings computation request for: - """ - What is the capital of Spain ? - """ - Then embeddings are generated - - Scenario: OAI Embeddings compatibility with multiple inputs - Given a model bert-bge-small - Given a prompt: - """ - In which country Paris is located ? - """ - And a prompt: - """ - Is Madrid the capital of Spain ? - """ - When an OAI compatible embeddings computation request for multiple inputs - Then embeddings are generated - - Scenario: Multi users embeddings - Given a prompt: - """ - Write a very long story about AI. - """ - And a prompt: - """ - Write another very long music lyrics. - """ - And a prompt: - """ - Write a very long poem. - """ - And a prompt: - """ - Write a very long joke. - """ - Given concurrent embedding requests - Then the server is busy - Then the server is idle - Then all embeddings are generated - - Scenario: Multi users OAI compatibility embeddings - Given a prompt: - """ - In which country Paris is located ? - """ - And a prompt: - """ - Is Madrid the capital of Spain ? - """ - And a prompt: - """ - What is the biggest US city ? - """ - And a prompt: - """ - What is the capital of Bulgaria ? - """ - And a model bert-bge-small - Given concurrent OAI embedding requests - Then the server is busy - Then the server is idle - Then all embeddings are generated - - Scenario: All embeddings should be the same - Given 10 fixed prompts - And a model bert-bge-small - Given concurrent OAI embedding requests - Then all embeddings are the same diff --git a/examples/server/tests/features/environment.py b/examples/server/tests/features/environment.py deleted file mode 100644 index e7845dc2f51fc..0000000000000 --- a/examples/server/tests/features/environment.py +++ /dev/null @@ -1,71 +0,0 @@ -import os -import signal -import socket -import sys -import time -import traceback -from contextlib import closing -from subprocess import TimeoutExpired - - -def before_scenario(context, scenario): - context.debug = 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON' - if context.debug: - print("DEBUG=ON") - print(f"\x1b[33;42mStarting new scenario: {scenario.name}!\x1b[0m") - port = 8080 - if 'PORT' in os.environ: - port = int(os.environ['PORT']) - if is_server_listening("localhost", port): - assert False, "Server already started" - - -def after_scenario(context, scenario): - try: - if 'server_process' not in context or context.server_process is None: - return - if scenario.status == "failed": - if 'GITHUB_ACTIONS' in os.environ: - print(f"\x1b[33;101mSCENARIO FAILED: {scenario.name} server logs:\x1b[0m\n") - if os.path.isfile('llama.log'): - with closing(open('llama.log', 'r')) as f: - for line in f: - print(line) - if not is_server_listening(context.server_fqdn, context.server_port): - print("\x1b[33;101mERROR: Server stopped listening\x1b[0m") - - if context.server_process.poll() is not None: - assert False, f"Server not running pid={context.server_process.pid} ..." - - server_graceful_shutdown(context) # SIGINT - - try: - context.server_process.wait(0.5) - except TimeoutExpired: - print(f"server still alive after 500ms, force-killing pid={context.server_process.pid} ...") - context.server_process.kill() # SIGKILL - context.server_process.wait() - - while is_server_listening(context.server_fqdn, context.server_port): - time.sleep(0.1) - except Exception: - print("ignoring error in after_scenario:") - traceback.print_exc(file=sys.stdout) - - -def server_graceful_shutdown(context): - print(f"shutting down server pid={context.server_process.pid} ...") - if os.name == 'nt': - interrupt = signal.CTRL_C_EVENT - else: - interrupt = signal.SIGINT - context.server_process.send_signal(interrupt) - - -def is_server_listening(server_fqdn, server_port): - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: - result = sock.connect_ex((server_fqdn, server_port)) - _is_server_listening = result == 0 - if _is_server_listening: - print(f"server is listening on {server_fqdn}:{server_port}...") - return _is_server_listening diff --git a/examples/server/tests/features/infill.feature b/examples/server/tests/features/infill.feature deleted file mode 100644 index a0bbfef77707b..0000000000000 --- a/examples/server/tests/features/infill.feature +++ /dev/null @@ -1,36 +0,0 @@ -@llama.cpp -@infill -Feature: llama.cpp server - - # The current model is made by adding FIM tokens to the existing stories260K - # We may want to use a better model in the future, maybe something like SmolLM 360M - - Background: Server startup - Given a server listening on localhost:8080 - And a model file tinyllamas/stories260K-infill.gguf from HF repo ggml-org/models - And a model file test-model-infill.gguf - And a model alias tinyllama-infill - And 42 as server seed - And 1024 as batch size - And 1024 as ubatch size - And 2048 KV cache size - And 64 max tokens to predict - And 0.0 temperature - Then the server is starting - Then the server is healthy - - Scenario: Infill without input_extra - Given a prompt "Complete this" - And an infill input extra none none - And an infill input prefix "#include \n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_" - And an infill input suffix "}\n" - And an infill request with no api error - Then 64 tokens are predicted matching One|day|she|saw|big|scary|bird - - Scenario: Infill with input_extra - Given a prompt "Complete this" - And an infill input extra "llama.h" "LLAMA_API int32_t llama_n_threads();\n" - And an infill input prefix "#include \n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_" - And an infill input suffix "}\n" - And an infill request with no api error - Then 64 tokens are predicted matching cuts|Jimmy|mom|came|into|the|room" diff --git a/examples/server/tests/features/issues.feature b/examples/server/tests/features/issues.feature deleted file mode 100644 index 7b13e44cad395..0000000000000 --- a/examples/server/tests/features/issues.feature +++ /dev/null @@ -1,5 +0,0 @@ -# List of ongoing issues -# run with: DEBUG=ON ./tests.sh --no-skipped --tags bug -@bug -Feature: Issues - # No confirmed issue at the moment diff --git a/examples/server/tests/features/lora.feature b/examples/server/tests/features/lora.feature deleted file mode 100644 index 7b85988ac6e87..0000000000000 --- a/examples/server/tests/features/lora.feature +++ /dev/null @@ -1,36 +0,0 @@ -@llama.cpp -@lora -Feature: llama.cpp server - - Background: Server startup - Given a server listening on localhost:8080 - And a model url https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/stories15M_MOE-F16.gguf - And a model file stories15M_MOE-F16.gguf - And a model alias stories15M_MOE - And a lora adapter file from https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/moe_shakespeare15M.gguf - And 42 as server seed - And 1024 as batch size - And 1024 as ubatch size - And 2048 KV cache size - And 64 max tokens to predict - And 0.0 temperature - Then the server is starting - Then the server is healthy - - Scenario: Completion LoRA disabled - Given switch off lora adapter 0 - Given a prompt: - """ - Look in thy glass - """ - And a completion request with no api error - Then 64 tokens are predicted matching little|girl|three|years|old - - Scenario: Completion LoRA enabled - Given switch on lora adapter 0 - Given a prompt: - """ - Look in thy glass - """ - And a completion request with no api error - Then 64 tokens are predicted matching eye|love|glass|sun diff --git a/examples/server/tests/features/parallel.feature b/examples/server/tests/features/parallel.feature deleted file mode 100644 index 423d0f1d42f55..0000000000000 --- a/examples/server/tests/features/parallel.feature +++ /dev/null @@ -1,131 +0,0 @@ -@llama.cpp -@parallel -Feature: Parallel - - Background: Server startup - Given a server listening on localhost:8080 - And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models - And a model file test-model-00001-of-00003.gguf - And 42 as server seed - And 128 as batch size - And 256 KV cache size - And 2 slots - And continuous batching - Then the server is starting - Then the server is healthy - - Scenario Outline: Multi users completion - Given a prompt: - """ - Write a very long story about AI. - """ - And a prompt: - """ - Write another very long music lyrics. - """ - And max tokens to predict - Given concurrent completion requests - Then the server is busy - Then the server is idle - And all slots are idle - Then all prompts are predicted with tokens - Examples: - | n_predict | - | 128 | - - Scenario Outline: Multi users OAI completions compatibility - Given a system prompt You are a writer. - And a model tinyllama-2 - Given a prompt: - """ - Write a very long book. - """ - And a prompt: - """ - Write another a poem. - """ - And max tokens to predict - And streaming is - Given concurrent OAI completions requests - Then the server is busy - Then the server is idle - Then all prompts are predicted with tokens - Examples: - | streaming | n_predict | - | disabled | 128 | - | enabled | 64 | - - Scenario Outline: Multi users OAI completions compatibility no v1 - Given a system prompt You are a writer. - And a model tinyllama-2 - Given a prompt: - """ - Write a very long book. - """ - And a prompt: - """ - Write another a poem. - """ - And max tokens to predict - And streaming is - Given concurrent OAI completions requests no v1 - Then the server is busy - Then the server is idle - Then all prompts are predicted with tokens - Examples: - | streaming | n_predict | - | disabled | 128 | - | enabled | 64 | - - Scenario Outline: Multi users with number of prompts exceeding number of slots - Given a system prompt You are a writer. - And a model tinyllama-2 - Given a prompt: - """ - Write a very long book. - """ - And a prompt: - """ - Write another a poem. - """ - And a prompt: - """ - What is LLM? - """ - And a prompt: - """ - The sky is blue and I love it. - """ - And max tokens to predict - And streaming is - Given concurrent OAI completions requests - Then the server is busy - Then the server is idle - Then all prompts are predicted with tokens - Examples: - | streaming | n_predict | - | disabled | 128 | - | enabled | 64 | - - Scenario: Multi users with total number of tokens to predict exceeds the KV Cache size #3969 - Given a prompt: - """ - Write a very long story about AI. - """ - And a prompt: - """ - Write another very long music lyrics. - """ - And a prompt: - """ - Write a very long poem. - """ - And a prompt: - """ - Write a very long joke. - """ - And 128 max tokens to predict - Given concurrent completion requests - Then the server is busy - Then the server is idle - Then all prompts are predicted diff --git a/examples/server/tests/features/passkey.feature b/examples/server/tests/features/passkey.feature deleted file mode 100644 index ff0a82cc46581..0000000000000 --- a/examples/server/tests/features/passkey.feature +++ /dev/null @@ -1,56 +0,0 @@ -# run with: ./tests.sh --no-skipped --tags passkey -@passkey -@slow -Feature: Passkey / Self-extend with context shift - - Background: Server startup - Given a server listening on localhost:8080 - - # Generates a long text of junk and inserts a secret passkey number inside it. - # Then we query the LLM for the secret passkey. - # see #3856 and #4810 - Scenario Outline: Passkey - Given a model file from HF repo - And as batch size - And as number of junk - And server max tokens to predict - And 42 as seed - And 0.0 temperature - And KV cache size - And 1 slots - And group attention factor to extend context size through self-extend - And group attention width to extend context size through self-extend - # Can be override with N_GPU_LAYERS - And GPU offloaded layers - Then the server is starting - # Higher timeout because the model may need to be downloaded from the internet - Then the server is healthy with timeout 120 seconds - Given available models - Then model 0 is trained on tokens context - Given a prefix prompt: - """ - here is an important info hidden inside a lot of irrelevant text. Find it and memorize them. I will quiz you about the important information there. - """ - And a passkey prompt template: - """ - The pass key is Remember it. is the pass key. - """ - And a junk suffix prompt: - """ - The grass is green. The sky is blue. The sun is yellow. Here we go. There and back again. - """ - And a suffix prompt: - """ - What is the pass key? The pass key is - """ - Given a "" passkey challenge prompt with the passkey inserted every junk - And a completion request with no api error - Then tokens are predicted matching - - Examples: - | hf_repo | hf_file | n_ctx_train | ngl | n_ctx | n_batch | n_ga | n_ga_w | n_junk | i_pos | passkey | n_predicted | re_content | - | TheBloke/phi-2-GGUF | phi-2.Q4_K_M.gguf | 2048 | 5 | 8192 | 512 | 4 | 512 | 250 | 50 | 42 | 1 | 42 | - | TheBloke/phi-2-GGUF | phi-2.Q4_K_M.gguf | 2048 | 5 | 8192 | 512 | 2 | 512 | 250 | 50 | 42 | 1 | \b((?!42)\w)+\b | - #| TheBloke/Llama-2-7B-GGUF | llama-2-7b.Q2_K.gguf | 4096 | 3 | 16384 | 512 | 4 | 512 | 500 | 300 | 1234 | 5 | 1234 | - #| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768 | 2 | 16384 | 512 | 4 | 512 | 500 | 100 | 0987 | 5 | 0 - # 987 | diff --git a/examples/server/tests/features/rerank.feature b/examples/server/tests/features/rerank.feature deleted file mode 100644 index c36cc8e215fa6..0000000000000 --- a/examples/server/tests/features/rerank.feature +++ /dev/null @@ -1,42 +0,0 @@ -@llama.cpp -@rerank -Feature: llama.cpp server - - Background: Server startup - Given a server listening on localhost:8080 - And a model url https://huggingface.co/ggml-org/models/resolve/main/jina-reranker-v1-tiny-en/ggml-model-f16.gguf - And a model file jina-reranker-v1-tiny-en.gguf - And a model alias jina-reranker-v1-tiny-en - And 42 as server seed - And 2 slots - And 512 as batch size - And 512 as ubatch size - And 512 KV cache size - And enable reranking endpoint - Then the server is starting - Then the server is healthy - - Scenario: Rerank - Given a rerank query: - """ - Machine learning is - """ - And a rerank document: - """ - A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines. - """ - And a rerank document: - """ - Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants. - """ - And a rerank document: - """ - Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions. - """ - And a rerank document: - """ - Paris, capitale de la France, est une grande ville europĆ©enne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siĆØcle est traversĆ© par de larges boulevards et la Seine. - """ - When reranking request - Then reranking results are returned - Then reranking highest score is index 2 and lowest score is index 3 diff --git a/examples/server/tests/features/results.feature b/examples/server/tests/features/results.feature deleted file mode 100644 index e8e1b54147b05..0000000000000 --- a/examples/server/tests/features/results.feature +++ /dev/null @@ -1,118 +0,0 @@ -@llama.cpp -@results -Feature: Results - - Background: Server startup - Given a server listening on localhost:8080 - And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models - And a model file test-model-00001-of-00003.gguf - And 128 as batch size - And 1024 KV cache size - And 128 max tokens to predict - And continuous batching - - Scenario Outline: consistent results with same seed - Given slots - And 1.0 temperature - Then the server is starting - Then the server is healthy - - Given 4 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 42 - - Given concurrent completion requests - Then the server is busy - Then the server is idle - And all slots are idle - Then all predictions are equal - Examples: - | n_slots | - | 1 | - # FIXME: unified KV cache nondeterminism - # | 2 | - - Scenario Outline: different results with different seed - Given slots - And 1.0 temperature - Then the server is starting - Then the server is healthy - - Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 42 - Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 43 - Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 44 - Given 1 prompts "Title: Little Red Riding Hood But In Space\n\nSummary:" with seed 45 - - Given concurrent completion requests - Then the server is busy - Then the server is idle - And all slots are idle - Then all predictions are different - Examples: - | n_slots | - | 1 | - | 2 | - - Scenario Outline: consistent results with same seed and varying batch size - Given 4 slots - And temperature - # And 0 as draft - Then the server is starting - Then the server is healthy - - Given 1 prompts "Write a very long story about AI." with seed 42 - And concurrent completion requests - # Then the server is busy # Not all slots will be utilized. - Then the server is idle - And all slots are idle - - Given prompts "Write a very long story about AI." with seed 42 - And concurrent completion requests - # Then the server is busy # Not all slots will be utilized. - Then the server is idle - And all slots are idle - - Then all predictions are equal - Examples: - | n_parallel | temp | - | 1 | 0.0 | - | 1 | 1.0 | - # FIXME: unified KV cache nondeterminism - # See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227 - # and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574 - # and https://github.com/ggerganov/llama.cpp/pull/7347 . - # | 2 | 0.0 | - # | 4 | 0.0 | - # | 2 | 1.0 | - # | 4 | 1.0 | - - Scenario Outline: consistent token probs with same seed and prompt - Given slots - And KV cache size - And 1.0 temperature - And max tokens to predict - Then the server is starting - Then the server is healthy - - Given 1 prompts "The meaning of life is" with seed 42 - And concurrent completion requests - # Then the server is busy # Not all slots will be utilized. - Then the server is idle - And all slots are idle - - Given prompts "The meaning of life is" with seed 42 - And concurrent completion requests - # Then the server is busy # Not all slots will be utilized. - Then the server is idle - And all slots are idle - - Then all token probabilities are equal - Examples: - | n_slots | n_kv | n_predict | n_parallel | - | 4 | 1024 | 1 | 1 | - # FIXME: unified KV cache nondeterminism - # See https://github.com/ggerganov/whisper.cpp/issues/1941#issuecomment-1986923227 - # and https://github.com/ggerganov/llama.cpp/pull/6122#discussion_r1531405574 - # and https://github.com/ggerganov/llama.cpp/pull/7347 . - # | 4 | 1024 | 1 | 4 | - # | 4 | 1024 | 100 | 1 | - # This test still fails even the above patches; the first token probabilities are already different. - # | 4 | 1024 | 100 | 4 | diff --git a/examples/server/tests/features/security.feature b/examples/server/tests/features/security.feature deleted file mode 100644 index ef30007c3eddb..0000000000000 --- a/examples/server/tests/features/security.feature +++ /dev/null @@ -1,68 +0,0 @@ -@llama.cpp -@security -Feature: Security - - Background: Server startup with an api key defined - Given a server listening on localhost:8080 - And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models - And a server api key THIS_IS_THE_KEY - Then the server is starting - Then the server is healthy - - Scenario Outline: Completion with some user api key - Given a prompt test - And a user api key - And 4 max tokens to predict - And a completion request with api error - - Examples: Prompts - | api_key | api_error | - | THIS_IS_THE_KEY | no | - | THIS_IS_THE_KEY | no | - | hackeme | raised | - | | raised | - - Scenario Outline: OAI Compatibility - Given a system prompt test - And a user prompt test - And a model test - And 2 max tokens to predict - And streaming is disabled - And a user api key - Given an OAI compatible chat completions request with api error - - Examples: Prompts - | api_key | api_error | - | THIS_IS_THE_KEY | no | - | THIS_IS_THE_KEY | no | - | hackme | raised | - - Scenario Outline: OAI Compatibility (invalid response formats) - Given a system prompt test - And a user prompt test - And a response format - And a model test - And 2 max tokens to predict - And streaming is disabled - Given an OAI compatible chat completions request with raised api error - - Examples: Prompts - | response_format | - | {"type": "sound"} | - | {"type": "json_object", "schema": 123} | - | {"type": "json_object", "schema": {"type": 123}} | - | {"type": "json_object", "schema": {"type": "hiccup"}} | - - - Scenario Outline: CORS Options - Given a user api key THIS_IS_THE_KEY - When an OPTIONS request is sent from - Then CORS header is set to - - Examples: Headers - | origin | cors_header | cors_header_value | - | localhost | Access-Control-Allow-Origin | localhost | - | web.mydomain.fr | Access-Control-Allow-Origin | web.mydomain.fr | - | origin | Access-Control-Allow-Credentials | true | - | web.mydomain.fr | Access-Control-Allow-Methods | GET, POST | - | web.mydomain.fr | Access-Control-Allow-Headers | * | diff --git a/examples/server/tests/features/server.feature b/examples/server/tests/features/server.feature deleted file mode 100644 index 15e24c624af37..0000000000000 --- a/examples/server/tests/features/server.feature +++ /dev/null @@ -1,120 +0,0 @@ -@llama.cpp -@server -Feature: llama.cpp server - - Background: Server startup - Given a server listening on localhost:8080 - And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models - And a model file test-model.gguf - And a model alias tinyllama-2 - And BOS token is 1 - And 42 as server seed - # KV Cache corresponds to the total amount of tokens - # that can be stored across all independent sequences: #4130 - # see --ctx-size and #5568 - And 256 KV cache size - And 32 as batch size - And 2 slots - And 64 server max tokens to predict - And prometheus compatible metrics exposed - Then the server is starting - Then the server is healthy - - Scenario: Health - Then the server is ready - And all slots are idle - - - Scenario Outline: Completion - Given a prompt - And max tokens to predict - And a completion request with no api error - Then tokens are predicted matching - And the completion is truncated - And prompt tokens are processed - And prometheus metrics are exposed - And metric llamacpp:tokens_predicted is - - Examples: Prompts - | prompt | n_predict | re_content | n_prompt | n_predicted | truncated | - | I believe the meaning of life is | 8 | (read\|going)+ | 18 | 8 | not | - | Write a joke about AI from a very long prompt which will not be truncated | 256 | (princesses\|everyone\|kids\|Anna\|forest)+ | 46 | 64 | not | - - Scenario: Completion prompt truncated - Given a prompt: - """ - Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. - Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. - Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. - Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. - """ - And a completion request with no api error - Then 64 tokens are predicted matching fun|Annaks|popcorns|pictry|bowl - And the completion is truncated - And 109 prompt tokens are processed - - - Scenario Outline: OAI Compatibility - Given a model - And a system prompt - And a user prompt - And max tokens to predict - And streaming is - Given an OAI compatible chat completions request with no api error - Then tokens are predicted matching - And prompt tokens are processed - And the completion is truncated - - Examples: Prompts - | model | system_prompt | user_prompt | max_tokens | re_content | n_prompt | n_predicted | enable_streaming | truncated | - | llama-2 | Book | What is the best book | 8 | (Here\|what)+ | 77 | 8 | disabled | not | - | codellama70b | You are a coding assistant. | Write the fibonacci function in c++. | 128 | (thanks\|happy\|bird\|Annabyear)+ | -1 | 64 | enabled | | - - - Scenario Outline: OAI Compatibility w/ response format - Given a model test - And a system prompt test - And a user prompt test - And a response format - And 10 max tokens to predict - Given an OAI compatible chat completions request with no api error - Then tokens are predicted matching - - Examples: Prompts - | response_format | n_predicted | re_content | - | {"type": "json_object", "schema": {"const": "42"}} | 6 | "42" | - | {"type": "json_object", "schema": {"items": [{"type": "integer"}]}} | 10 | \[ -300 \] | - | {"type": "json_object"} | 10 | \{ " Jacky. | - - - Scenario: Tokenize / Detokenize - When tokenizing: - """ - What is the capital of France ? - """ - Then tokens can be detokenized - And tokens do not begin with BOS - - Scenario: Tokenize w/ BOS - Given adding special tokens - When tokenizing: - """ - What is the capital of Germany? - """ - Then tokens begin with BOS - Given first token is removed - Then tokens can be detokenized - - Scenario: Tokenize with pieces - When tokenizing with pieces: - """ - What is the capital of Germany? - åŖ½ - """ - Then tokens are given with pieces - - Scenario: Models available - Given available models - Then 1 models are supported - Then model 0 is identified by tinyllama-2 - Then model 0 is trained on 128 tokens context diff --git a/examples/server/tests/features/slotsave.feature b/examples/server/tests/features/slotsave.feature deleted file mode 100644 index 1c281c0741afe..0000000000000 --- a/examples/server/tests/features/slotsave.feature +++ /dev/null @@ -1,58 +0,0 @@ -@llama.cpp -@slotsave -Feature: llama.cpp server slot management - - Background: Server startup - Given a server listening on localhost:8080 - And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models - And prompt caching is enabled - And 2 slots - And . as slot save path - And 2048 KV cache size - And 42 as server seed - And 24 max tokens to predict - Then the server is starting - Then the server is healthy - - Scenario: Save and Restore Slot - # First prompt in slot 1 should be fully processed - Given a user prompt "What is the capital of France?" - And using slot id 1 - And a completion request with no api error - Then 24 tokens are predicted matching (Lily|cake) - And 22 prompt tokens are processed - When the slot 1 is saved with filename "slot1.bin" - Then the server responds with status code 200 - # Since we have cache, this should only process the last tokens - Given a user prompt "What is the capital of Germany?" - And a completion request with no api error - Then 24 tokens are predicted matching (Thank|special) - And 7 prompt tokens are processed - # Loading the original cache into slot 0, - # we should only be processing 1 prompt token and get the same output - When the slot 0 is restored with filename "slot1.bin" - Then the server responds with status code 200 - Given a user prompt "What is the capital of France?" - And using slot id 0 - And a completion request with no api error - Then 24 tokens are predicted matching (Lily|cake) - And 1 prompt tokens are processed - # For verification that slot 1 was not corrupted during slot 0 load, same thing - Given a user prompt "What is the capital of Germany?" - And using slot id 1 - And a completion request with no api error - Then 24 tokens are predicted matching (Thank|special) - And 1 prompt tokens are processed - - Scenario: Erase Slot - Given a user prompt "What is the capital of France?" - And using slot id 1 - And a completion request with no api error - Then 24 tokens are predicted matching (Lily|cake) - And 22 prompt tokens are processed - When the slot 1 is erased - Then the server responds with status code 200 - Given a user prompt "What is the capital of France?" - And a completion request with no api error - Then 24 tokens are predicted matching (Lily|cake) - And 22 prompt tokens are processed diff --git a/examples/server/tests/features/steps/steps.py b/examples/server/tests/features/steps/steps.py deleted file mode 100644 index 687b163f487b6..0000000000000 --- a/examples/server/tests/features/steps/steps.py +++ /dev/null @@ -1,1518 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -import asyncio -import json -import os -import re -import socket -import subprocess -import sys -import threading -import time -import requests -from collections.abc import Sequence -from contextlib import closing -from re import RegexFlag -from typing import Any, Literal, cast - -import aiohttp -import numpy as np -import openai -from openai.types.chat import ChatCompletionChunk -from behave import step # pyright: ignore[reportAttributeAccessIssue] -from behave.api.async_step import async_run_until_complete -from prometheus_client import parser - -# pyright: reportRedeclaration=false - -DEFAULT_TIMEOUT_SECONDS = aiohttp.ClientTimeout(total=600) - -@step("a server listening on {server_fqdn}:{server_port}") -def step_server_config(context, server_fqdn: str, server_port: str): - context.server_fqdn = server_fqdn - context.server_port = int(server_port) - context.n_threads = None - context.n_gpu_layer = None - if 'PORT' in os.environ: - context.server_port = int(os.environ['PORT']) - print(f"$PORT set, overriding server port with to {context.server_port}") - if 'FQDN' in os.environ: - context.server_fqdn = os.environ['FQDN'] - print(f"$FQDN set, overriding server fqdn with to {context.server_fqdn}") - if 'N_GPU_LAYERS' in os.environ: - context.n_gpu_layer = int(os.environ['N_GPU_LAYERS']) - print(f"$N_GPU_LAYERS set, overriding n_gpu_layer with to {context.n_gpu_layer}") - - context.base_url = f'http://{context.server_fqdn}:{context.server_port}' - - context.model_alias = None - context.model_file = None - context.model_hf_repo = None - context.model_hf_file = None - context.model_url = None - context.n_batch = None - context.n_ubatch = None - context.n_ctx = None - context.n_ga = None - context.n_ga_w = None - context.n_predict = None - context.n_prompts = 0 - context.n_server_predict = None - context.slot_save_path = None - context.id_slot = None - context.cache_prompt = None - context.n_slots = None - context.prompt_prefix = None - context.prompt_suffix = None - context.server_api_key = None - context.server_continuous_batching = False - context.server_embeddings = False - context.server_reranking = False - context.server_metrics = False - context.server_process = None - context.seed = None - context.draft = None - context.server_seed = None - context.user_api_key = None - context.response_format = None - context.temperature = None - context.lora_file = None - context.disable_ctx_shift = False - - # infill - context.infill_input_extra = None - context.infill_input_suffix = '' - context.infill_input_prefix = '' - - context.tasks_result = [] - context.concurrent_tasks = [] - context.prompts = [] - - context.reranking_query = None - context.reranking_documents = [] - context.reranking_results = None - - -@step('a model file {hf_file} from HF repo {hf_repo}') -def step_download_hf_model(context, hf_file: str, hf_repo: str): - context.model_hf_repo = hf_repo - context.model_hf_file = hf_file - context.model_file = os.path.basename(hf_file) - -@step('a lora adapter file from {lora_file_url}') -def step_download_lora_file(context, lora_file_url: str): - file_name = lora_file_url.split('/').pop() - context.lora_file = f'../../../{file_name}' - with open(context.lora_file, 'wb') as f: - f.write(requests.get(lora_file_url).content) - -@step('a model file {model_file}') -def step_model_file(context, model_file: str): - context.model_file = model_file - - -@step('a model url {model_url}') -def step_model_url(context, model_url: str): - context.model_url = model_url - - -@step('a model alias {model_alias}') -def step_model_alias(context, model_alias: str): - context.model_alias = model_alias - - -@step('{seed:d} as server seed') -def step_seed(context, seed: int): - context.server_seed = seed - - -@step('{ngl:d} GPU offloaded layers') -def step_n_gpu_layer(context, ngl: int): - if 'N_GPU_LAYERS' in os.environ: - new_ngl = int(os.environ['N_GPU_LAYERS']) - if context.debug: - print(f"-ngl upgraded from {ngl} to {new_ngl}") - ngl = new_ngl - context.n_gpu_layer = ngl - - -@step('{n_threads:d} threads') -def step_n_threads(context, n_threads: int): - context.n_thread = n_threads - - -@step('{draft:d} as draft') -def step_draft(context, draft: int): - context.draft = draft - - -@step('{n_ctx:d} KV cache size') -def step_n_ctx(context, n_ctx: int): - context.n_ctx = n_ctx - - -@step('{n_slots:d} slots') -def step_n_slots(context, n_slots: int): - context.n_slots = n_slots - - -@step('{n_predict:d} server max tokens to predict') -def step_server_n_predict(context, n_predict: int): - context.n_server_predict = n_predict if n_predict > 0 else None - - -@step('{slot_save_path} as slot save path') -def step_slot_save_path(context, slot_save_path: str): - context.slot_save_path = slot_save_path - - -@step('using slot id {id_slot:d}') -def step_id_slot(context, id_slot: int): - context.id_slot = id_slot - - -@step('prompt caching is enabled') -def step_enable_prompt_cache(context): - context.cache_prompt = True - - -@step('continuous batching') -def step_server_continuous_batching(context): - context.server_continuous_batching = True - - -@step('enable embeddings endpoint') -def step_server_embeddings(context): - context.server_embeddings = True - -@step('enable reranking endpoint') -def step_server_reranking(context): - context.server_reranking = True - -@step('prometheus compatible metrics exposed') -def step_server_metrics(context): - context.server_metrics = True - -@step('disable context shifting') -def step_server_disable_ctx_shift(context): - context.disable_ctx_shift = True - -@step("the server is starting") -def step_start_server(context): - start_server_background(context) - attempts = 0 - max_attempts = 20 - if 'GITHUB_ACTIONS' in os.environ: - max_attempts *= 2 - - addrs = socket.getaddrinfo(context.server_fqdn, context.server_port, type=socket.SOCK_STREAM) - family, typ, proto, _, sockaddr = addrs[0] - - while True: - with closing(socket.socket(family, typ, proto)) as sock: - result = sock.connect_ex(sockaddr) - if result == 0: - print("\x1b[33;46mserver started!\x1b[0m") - return - attempts += 1 - if attempts > max_attempts: - assert False, "server not started" - print(f"waiting for server to start, connect error code = {result}...") - time.sleep(0.1) - - -async def wait_for_server_status_with_timeout(context, expecting_status: Literal['healthy', 'ready', 'idle', 'busy'] | str, timeout: int): - match expecting_status: - case 'healthy': - await wait_for_slots_status(context, context.base_url, 200, - timeout=timeout) - - case 'ready' | 'idle': - await wait_for_slots_status(context, context.base_url, 200, - timeout=timeout, - params={'fail_on_no_slot': 1}, - slots_idle=context.n_slots, - slots_processing=0) - case 'busy': - await wait_for_slots_status(context, context.base_url, 503, - params={'fail_on_no_slot': 1}, - slots_idle=0, - slots_processing=context.n_slots) - case _: - assert False, "unknown status" - - -@step("the server is {expecting_status} with timeout {timeout:d} seconds") -@async_run_until_complete -async def step_wait_for_server_status_with_timeout(context, expecting_status: Literal['healthy', 'ready', 'idle', 'busy'] | str, timeout: int): - await wait_for_server_status_with_timeout(context, expecting_status, timeout) - - -@step("the server is {expecting_status}") -@async_run_until_complete -async def step_wait_for_server_status(context, expecting_status: Literal['healthy', 'ready', 'idle', 'busy'] | str): - await wait_for_server_status_with_timeout(context, expecting_status, 30) - - -@step('all slots are {expected_slot_status_string}') -@async_run_until_complete -async def step_all_slots_status(context, expected_slot_status_string: Literal['idle', 'busy'] | str): - match expected_slot_status_string: - case 'idle': - expected_slot_status = False - case 'busy': - expected_slot_status = True - case _: - assert False, "unknown status" - - expected_slots = [{'id': slot_id, 'is_processing': expected_slot_status} - for slot_id in range(context.n_slots)] - await request_slots_status(context, expected_slots) - - -@step('a completion request with {api_error} api error') -@async_run_until_complete -async def step_request_completion(context, api_error: Literal['raised'] | str): - expect_api_error = api_error == 'raised' or api_error != 'no' - seeds = await completions_seed(context, num_seeds=1) - completion = await request_completion(context.prompts.pop(), - seeds[0] if seeds is not None else seeds, - context.base_url, - debug=context.debug, - n_predict=context.n_predict, - cache_prompt=context.cache_prompt, - id_slot=context.id_slot, - expect_api_error=expect_api_error, - user_api_key=context.user_api_key, - temperature=context.temperature) - context.tasks_result.append(completion) - if context.debug: - print(f"Completion response: {completion}") - if api_error == 'raised': - assert completion == 401, f"completion must be an 401 status code: {completion}" - elif api_error.isdigit(): - api_error_code = int(api_error) - assert completion == api_error_code, f"completion must be an {api_error_code} status code: {completion}" - - -@step('an infill request with {api_error} api error') -@async_run_until_complete -async def step_request_completion(context, api_error: Literal['raised'] | str): - if api_error != 'no': - raise ValueError(f'api_error={api_error} is not yet implemented') - payload = { - "prompt": context.prompts[0], - "input_suffix": context.infill_input_suffix, - "input_prefix": context.infill_input_prefix, - "n_predict": context.n_predict, - "seed": context.seed, - "temperature": context.temperature, - } - if context.infill_input_extra is not None: - payload['input_extra'] = context.infill_input_extra - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/infill', - json=payload) as response: - assert response.status == 200 - context.tasks_result = [await response.json()] - - -@step('{predicted_n:d} tokens are predicted matching {re_content}') -def step_n_tokens_predicted_with_content(context, predicted_n, re_content): - context.completion = context.tasks_result.pop() - assert_n_tokens_predicted(context.completion, predicted_n, re_content) - - -@step('{predicted_n:d} tokens are predicted') -def step_n_tokens_predicted(context, predicted_n): - context.completion = context.tasks_result.pop() - assert_n_tokens_predicted(context.completion, predicted_n) - - -@step('all predictions are equal') -@async_run_until_complete -async def step_predictions_equal(context): - n_completions = await gather_tasks_results(context) - assert n_completions >= 2, "need at least 2 completions" - assert_all_predictions_equal(context.tasks_result) - context.tasks_result = [] - - -@step('all predictions are different') -@async_run_until_complete -async def step_predictions_different(context): - n_completions = await gather_tasks_results(context) - assert n_completions >= 2, "need at least 2 completions" - assert_all_predictions_different(context.tasks_result) - context.tasks_result = [] - - -@step('all token probabilities are equal') -@async_run_until_complete -async def step_token_probabilities_equal(context): - n_completions = await gather_tasks_results(context) - assert n_completions >= 2, "need at least 2 completions" - assert_all_token_probabilities_equal(context.tasks_result) - context.tasks_result = [] - - -@step('the completion is truncated') -def step_assert_completion_truncated(context): - step_assert_completion_truncated(context, '') - - -@step('the completion is {truncated} truncated') -def step_assert_completion_truncated(context, truncated): - truncated = truncated != "not" - assert context.completion['truncated'] == truncated, f'{context.completion}' - - -@step('{n_prompt:d} prompt tokens are processed') -def step_impl(context, n_prompt): - assert n_prompt < 0 or n_prompt == context.completion['timings']['prompt_n'], f"n_prompt={context.completion['timings']['prompt_n']}" - - -@step('a user prompt {user_prompt}') -def step_user_prompt(context, user_prompt): - context.prompts.append(user_prompt) - context.n_prompts = len(context.prompts) - - -@step('a system prompt {system_prompt}') -def step_system_prompt(context, system_prompt): - context.system_prompt = system_prompt - - -@step('a model {model}') -def step_model(context, model): - context.model = model - - -@step('{max_tokens:d} max tokens to predict') -def step_max_tokens(context, max_tokens): - context.n_predict = max_tokens - - -@step('a response format {response_format}') -def step_response_format(context, response_format): - context.response_format = json.loads(response_format) - - -@step('{temperature:f} temperature') -def step_temperature(context, temperature): - context.temperature = temperature - - -@step('streaming is {enable_streaming}') -def step_streaming(context, enable_streaming): - context.enable_streaming = enable_streaming == 'enabled' - - -@step('a user api key {user_api_key}') -def step_user_api_key(context, user_api_key): - context.user_api_key = user_api_key - - -@step('no user api key') -def step_no_user_api_key(context): - context.user_api_key = None - - -@step('a user api key ') -def step_no_user_api_key_space(context): - context.user_api_key = None - - -@step('a server api key {server_api_key}') -def step_server_api_key(context, server_api_key): - context.server_api_key = server_api_key - - -@step('{n_junk:d} as number of junk') -def step_n_junk(context, n_junk): - context.n_junk = n_junk - - -@step('{n_batch:d} as batch size') -def step_n_batch(context, n_batch): - context.n_batch = n_batch - - -@step('{n_ubatch:d} as ubatch size') -def step_n_ubatch(context, n_ubatch): - context.n_ubatch = n_ubatch - - -@step('{seed:d} as seed') -def step_seed(context, seed): - if context.seed is None: - context.seed = [seed] - else: - context.seed.append(seed) - - -@step('BOS token is {bos:d}') -def step_bos_token(context, bos): - context.bos = bos - - -@step('a prefix prompt') -def step_prompt_prefix(context): - context.prompt_prefix = context_text(context) - - -@step('a junk suffix prompt') -def step_prompt_junk_suffix(context): - context.prompt_junk_suffix = context_text(context) - - -@step('a suffix prompt') -def step_prompt_suffix(context): - context.prompt_suffix = context_text(context) - - -@step('{n_ga:d} group attention factor' - ' to extend context size through self-extend') -def step_impl(context, n_ga): - context.n_ga = n_ga - - -@step('{n_ga_w:d} group attention width to extend context size through self-extend') -def step_impl(context, n_ga_w): - context.n_ga_w = n_ga_w - - -@step('a passkey prompt template') -def step_prompt_passkey(context): - context.prompt_passkey = context_text(context) - -@step('a rerank query') -def step_set_rerank_query(context): - context.reranking_query = context_text(context) - context.reranking_documents = [] - -@step('a rerank document') -def step_set_rerank_document(context): - context.reranking_documents.append(context_text(context)) - -@step('{n_prompts:d} fixed prompts') -def step_fixed_prompts(context, n_prompts): - context.prompts.extend([str(0)*(context.n_batch if context.n_batch is not None else 512) for i in range(n_prompts)]) - context.n_prompts = n_prompts - - -@step('a "{passkey}" passkey challenge prompt with the passkey inserted every {i_pos:d} junk') -def step_prompt_passkey(context, passkey, i_pos): - prompt = "" - for i in range(context.n_junk): - if i % context.n_junk == i_pos: - prompt += context.prompt_passkey # the passkey is already substituted - prompt += context.prompt_junk_suffix - if context.debug: - passkey_highlight = "\x1b[33m" + passkey + "\x1b[0m" - print(f"Passkey challenge:\n```{prompt.replace(passkey, passkey_highlight)}```") - context.prompts.append(context.prompt_prefix + prompt + context.prompt_suffix) - context.n_prompts = len(context.prompts) - - -@step('an OAI compatible chat completions request with {api_error} api error') -@async_run_until_complete -async def step_oai_chat_completions(context, api_error): - if context.debug: - print(f"Submitting OAI compatible completions request...") - expect_api_error = api_error == 'raised' - seeds = await completions_seed(context, num_seeds=1), - completion = await oai_chat_completions(context.prompts.pop(), - seeds[0] if seeds is not None else seeds, - context.system_prompt, - context.base_url, - '/v1/chat', - False, - model=context.model if hasattr(context, 'model') else None, - - n_predict=context.n_predict - if hasattr(context, 'n_predict') else None, - - enable_streaming=context.enable_streaming - if hasattr(context, 'enable_streaming') else None, - - response_format=context.response_format - if hasattr(context, 'response_format') else None, - - user_api_key=context.user_api_key - if hasattr(context, 'user_api_key') else None, - - expect_api_error=expect_api_error) - context.tasks_result.append(completion) - if context.debug: - print(f"Completion response: {completion}") - if expect_api_error: - assert completion == 401, f"completion must be an 401 status code: {completion}" - - if context.debug: - print(f"Completion response: {completion}") - - -@step('a prompt') -def step_a_prompt(context): - context.prompts.append(context_text(context)) - context.n_prompts = len(context.prompts) - - -@step('a prompt {prompt}') -def step_a_prompt_prompt(context, prompt): - context.prompts.append(prompt) - context.n_prompts = len(context.prompts) - - -# TODO: allow this to be repeated -@step('an infill input extra {filename} {text}') -def step_infill_input_extra(context, filename, text): - if filename == 'none': - context.infill_input_extra = None - else: - context.infill_input_extra = [{'filename': filename, 'text': text}] - - -@step('an infill input suffix {text}') -def step_infill_input_suffix(context, text): - context.infill_input_suffix = text - - -@step('an infill input prefix {text}') -def step_infill_input_prefix(context, text): - context.infill_input_prefix = text - - -@step('{num_prompts:d} prompts {prompt} with seed {seed:d}') -def step_many_prompts(context, num_prompts, prompt, seed): - if context.seed is None: - context.seed = [] - for _ in range(num_prompts): - context.seed.append(seed) - context.prompts.append(prompt) - context.n_prompts = len(context.prompts) - - -@step('concurrent completion requests') -@async_run_until_complete() -async def step_concurrent_completion_requests(context): - await concurrent_requests( - context, - request_completion, - # prompt is inserted automatically - context.base_url, - debug=context.debug, - prompt_prefix=context.prompt_prefix, - prompt_suffix=context.prompt_suffix, - n_predict=context.n_predict if hasattr(context, 'n_predict') else None, - user_api_key=context.user_api_key if hasattr(context, 'user_api_key') else None, - temperature=context.temperature, - ) - - -@step('concurrent OAI completions requests') -@async_run_until_complete -async def step_oai_chat_completions(context): - await concurrent_requests(context, oai_chat_completions, - # user_prompt is inserted automatically - context.system_prompt, - context.base_url, - '/v1/chat/completions', - True, # async_client - model=context.model - if hasattr(context, 'model') else None, - n_predict=context.n_predict - if hasattr(context, 'n_predict') else None, - enable_streaming=context.enable_streaming - if hasattr(context, 'enable_streaming') else None, - response_format=context.response_format - if hasattr(context, 'response_format') else None, - user_api_key=context.user_api_key - if hasattr(context, 'user_api_key') else None) - - -@step('concurrent OAI completions requests no v1') -@async_run_until_complete -async def step_oai_chat_completions(context): - await concurrent_requests(context, oai_chat_completions, - # user_prompt is inserted automatically - context.system_prompt, - context.base_url, - '/chat/completions', - True, # async_client - model=context.model - if hasattr(context, 'model') else None, - n_predict=context.n_predict - if hasattr(context, 'n_predict') else None, - enable_streaming=context.enable_streaming - if hasattr(context, 'enable_streaming') else None, - response_format=context.response_format - if hasattr(context, 'response_format') else None, - user_api_key=context.user_api_key - if hasattr(context, 'user_api_key') else None) - - -@step('all prompts are predicted') -@async_run_until_complete -async def step_all_prompts_are_predicted(context): - await all_prompts_are_predicted(context) - - -@step('all prompts are predicted with {n_expected_predicted:d} tokens') -@async_run_until_complete -async def step_all_prompts_are_predicted_with_n_tokens(context, n_expected_predicted): - await all_prompts_are_predicted(context, n_expected_predicted) - - -async def all_prompts_are_predicted(context, expected_predicted_n=None): - n_completions = await gather_tasks_results(context) - assert n_completions > 0 - for i in range(n_completions): - assert_n_tokens_predicted(context.tasks_result.pop(), expected_predicted_n=expected_predicted_n) - assert len(context.concurrent_tasks) == 0, f"{len(context.concurrent_tasks)} pending requests" - - -@step('embeddings are computed for') -@async_run_until_complete -async def step_compute_embedding(context): - context.n_prompts = 1 - context.embeddings = await request_embedding(context_text(context), None, base_url=context.base_url) - - -@step('reranking request') -@async_run_until_complete -async def step_compute_reranking(context): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/reranking', - json={ - "query": context.reranking_query, - "documents": context.reranking_documents, - }) as response: - if response.status == 200: - response_json = await response.json() - context.reranking_results = response_json['results'] - else: - context.reranking_results = response.status - - -@step('all embeddings are the same') -@async_run_until_complete -async def step_all_embeddings_are_the_same(context): - n_embedding_requests = await gather_tasks_results(context) - assert n_embedding_requests > 0 - embeddings = [] - for i in range(n_embedding_requests): - embedding = context.tasks_result.pop().pop() - embeddings.append(embedding) - assert_embeddings(embedding) - n = len(embeddings) - for i in range(n-1): - for j in range(i+1, n): - embedding1 = np.array(embeddings[i]) - embedding2 = np.array(embeddings[j]) - if context.debug: - print(f"embedding1: {embedding1[-8:]}") - print(f"embedding2: {embedding2[-8:]}") - similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2)) - msg = f"Similarity between {i} and {j}: {similarity:.10f}" - if context.debug: - print(f"{msg}") - assert np.isclose(similarity, 1.0, rtol=1e-05, atol=1e-08, equal_nan=False), msg - - -@step('embeddings are generated') -def step_assert_embeddings(context): - assert context.n_prompts == len(context.embeddings), (f"unexpected response:\n" - f"context.n_prompts={context.n_prompts}\n" - f"context.embeddings={context.embeddings}") - for embedding in context.embeddings: - assert_embeddings(embedding) - -@step('embeddings request with {api_error_code:d} api error') -def step_assert_embeddings(context, api_error_code: int): - assert context.embeddings == api_error_code, f"embeddings request must return code {api_error_code}, but got {context.embeddings}" - -@step('an OAI compatible embeddings computation request for') -@async_run_until_complete -async def step_oai_compute_embeddings(context): - context.n_prompts = 1 - context.embeddings = await request_oai_embeddings(context_text(context), None, - base_url=context.base_url, - user_api_key=context.user_api_key, - model=context.model) - - -@step('an OAI compatible embeddings computation request for multiple inputs') -@async_run_until_complete -async def step_oai_compute_embeddings_multiple_inputs(context): - context.embeddings = await request_oai_embeddings(context.prompts, None, - base_url=context.base_url, - user_api_key=context.user_api_key, - model=context.model) - context.prompts.clear() - - -@step('concurrent embedding requests') -@async_run_until_complete() -async def step_concurrent_embedding_requests(context): - await concurrent_requests(context, - request_embedding, - # prompt is inserted automatically - base_url=context.base_url) - - -@step('concurrent OAI embedding requests') -@async_run_until_complete() -async def step_concurrent_oai_embedding_requests(context): - await concurrent_requests(context, - request_oai_embeddings, - # prompt is inserted automatically - base_url=context.base_url, - async_client=True, - model=context.model) - - -@step('all embeddings are generated') -@async_run_until_complete() -async def all_embeddings_are_generated(context): - n_embedding_requests = await gather_tasks_results(context) - assert n_embedding_requests == context.n_prompts - for i in range(n_embedding_requests): - assert_embeddings(context.tasks_result.pop().pop()) - -@step('reranking results are returned') -def reranking_results_are_returned(context): - assert len(context.reranking_results) == len(context.reranking_documents) - -@step('reranking highest score is index {idx_high:d} and lowest score is index {idx_low:d}') -def reranking_results_are_returned(context, idx_high: int, idx_low: int): - max_score, max_idx = 0, 0 - min_score, min_idx = 0, 0 - for res in context.reranking_results: - if max_score < res['relevance_score']: - max_score = res['relevance_score'] - max_idx = res['index'] - if min_score > res['relevance_score']: - min_score = res['relevance_score'] - min_idx = res['index'] - print(context.reranking_results) - assert max_idx == idx_high - assert min_idx == idx_low - -@step('adding special tokens') -def step_tokenize_set_add_special(context): - context.tokenize_add_special = True - - -@step("tokenizing with pieces") -@async_run_until_complete -async def step_tokenize_with_pieces(context): - context.tokenized_text = context_text(context) - async with aiohttp.ClientSession() as session: - tokenize_args = {"content": context.tokenized_text, "with_pieces": True} - if getattr(context, "tokenize_add_special", None) is not None: - tokenize_args["add_special"] = context.tokenize_add_special - - async with session.post( - f"{context.base_url}/tokenize", json=tokenize_args - ) as response: - assert response.status == 200 - tokenize_json = await response.json() - context.tokens_with_pieces = tokenize_json["tokens"] - - -@step("tokens are given with pieces") -@async_run_until_complete -async def step_tokenize_with_pieces(context): - # Verify that the response contains both token IDs and pieces - assert all( - "id" in token and "piece" in token for token in context.tokens_with_pieces - ) - - -@step('tokenizing') -@async_run_until_complete -async def step_tokenize(context): - context.tokenized_text = context_text(context) - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - tokenize_args = { - "content": context.tokenized_text, - } - if getattr(context, 'tokenize_add_special', None) is not None: - tokenize_args['add_special'] = context.tokenize_add_special - async with session.post(f'{context.base_url}/tokenize', - json=tokenize_args) as response: - assert response.status == 200 - tokenize_json = await response.json() - context.tokens = tokenize_json['tokens'] - - -@step('tokens can be detokenized') -@async_run_until_complete -async def step_detokenize(context): - assert len(context.tokens) > 0 - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/detokenize', - json={ - "tokens": context.tokens, - }) as response: - assert response.status == 200 - detokenize_json = await response.json() - # SPM tokenizer adds a whitespace prefix: https://github.com/google/sentencepiece/issues/15 - assert context.tokenized_text == detokenize_json['content'].strip() - - -@step('tokens begin with BOS') -def step_strings_for_tokenization(context): - assert context.tokens[0] == context.bos - - -@step('tokens do not begin with BOS') -def step_strings_for_tokenization(context): - assert context.tokens[0] != context.bos - - -@step('first token is removed') -def step_strings_for_tokenization(context): - context.tokens = context.tokens[1:] - - -@step('an OPTIONS request is sent from {origin}') -@async_run_until_complete -async def step_options_request(context, origin): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - headers = {'Authorization': f'Bearer {context.user_api_key}', 'Origin': origin} - async with session.options(f'{context.base_url}/v1/chat/completions', - headers=headers) as response: - assert response.status == 200 - context.options_response = response - - -@step('CORS header {cors_header} is set to {cors_header_value}') -def step_check_options_header_value(context, cors_header, cors_header_value): - assert context.options_response.headers[cors_header] == cors_header_value - - -@step('prometheus metrics are exposed') -@async_run_until_complete -async def step_prometheus_metrics_exported(context): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with await session.get(f'{context.base_url}/metrics') as metrics_response: - assert metrics_response.status == 200 - assert metrics_response.headers['Content-Type'] == "text/plain; version=0.0.4" - metrics_raw = await metrics_response.text() - metric_exported = False - if context.debug: - print(f"/metrics answer:\n{metrics_raw}") - context.metrics = {} - for metric in parser.text_string_to_metric_families(metrics_raw): - match metric.name: - case "llamacpp:kv_cache_usage_ratio": - assert len(metric.samples) > 0 - metric_exported = True - context.metrics[metric.name] = metric - assert int(metrics_response.headers["Process-Start-Time-Unix"]) > 0, "no header process start time" - assert metric_exported, "No metrics exported" - - -@step('metric {metric_name} is {metric_value:d}') -def step_assert_metric_value(context, metric_name, metric_value): - if metric_name not in context.metrics: - assert False, f"no metric {metric_name} in {context.metrics.keys()}" - assert context.metrics[metric_name].samples[0].value == metric_value, f"metric: {context.metrics[metric_name]}" - - -@step('available models') -def step_available_models(context): - # openai client always expects an api_key - openai.api_key = context.user_api_key if context.user_api_key is not None else 'nope' - openai.base_url = f'{context.base_url}/v1/' - context.models = openai.models.list().data - - -@step('{n_model:d} models are supported') -def step_supported_models(context, n_model): - if context.debug: - print("server models available:", context.models) - assert len(context.models) == n_model - - -@step('model {i_model:d} is {param} {preposition} {param_value}') -def step_supported_models(context, i_model: int, param: Literal['identified', 'trained'] | str, preposition: str, param_value: str): - assert i_model < len(context.models) - model = context.models[i_model] - - param_value = param_value.split(' ', 1)[0] - match param: - case 'identified': - value = model.id - case 'trained': - value = str(model.meta["n_ctx_train"]) - case _: - assert False, "param {param} not supported" - assert param_value == value, f"model param {param} {value} != {param_value}" - - -async def concurrent_requests(context, f_completion, *args, **kwargs): - context.n_prompts = len(context.prompts) - if context.debug: - print(f"starting {context.n_prompts} concurrent completion requests...") - assert context.n_prompts > 0 - seeds = await completions_seed(context) - assert seeds is not None - for prompt_no in range(context.n_prompts): - shifted_args = [context.prompts.pop(), seeds[prompt_no], *args] - context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs))) - await asyncio.sleep(0.01) - - -@step('the slot {slot_id:d} is saved with filename "{filename}"') -@async_run_until_complete -async def step_save_slot(context, slot_id, filename): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/slots/{slot_id}?action=save', - json={"filename": filename}, - headers={"Content-Type": "application/json"}) as response: - context.response = response - - -@step('the slot {slot_id:d} is restored with filename "{filename}"') -@async_run_until_complete -async def step_restore_slot(context, slot_id, filename): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/slots/{slot_id}?action=restore', - json={"filename": filename}, - headers={"Content-Type": "application/json"}) as response: - context.response = response - - -@step('the slot {slot_id:d} is erased') -@async_run_until_complete -async def step_erase_slot(context, slot_id): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/slots/{slot_id}?action=erase', - headers={"Content-Type": "application/json"}) as response: - context.response = response - - -@step('switch {on_or_off} lora adapter {lora_id:d}') -@async_run_until_complete -async def toggle_lora_adapter(context, on_or_off: str, lora_id: int): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{context.base_url}/lora-adapters', - json=[{'id': lora_id, 'scale': 1 if on_or_off == 'on' else 0}], - headers={"Content-Type": "application/json"}) as response: - context.response = response - print([{'id': lora_id, 'scale': 1 if on_or_off == 'on' else 0}]) - - -@step('the server responds with status code {status_code:d}') -def step_server_responds_with_status_code(context, status_code): - assert context.response.status == status_code - - -async def request_completion(prompt, - seed, - base_url, - debug=False, - prompt_prefix=None, - prompt_suffix=None, - n_predict=None, - cache_prompt=False, - id_slot=None, - expect_api_error=None, - user_api_key=None, - temperature=None) -> int | dict[str, Any]: - if debug: - print(f"Sending completion request: {prompt}") - origin = "my.super.domain" - headers = { - 'Origin': origin - } - if user_api_key is not None: - if debug: - print(f"Set user_api_key: {user_api_key}") - headers['Authorization'] = f'Bearer {user_api_key}' - - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{base_url}/completion', - json={ - "input_prefix": prompt_prefix, - "prompt": prompt, - "input_suffix": prompt_suffix, - "n_predict": n_predict if n_predict is not None else -1, - "cache_prompt": cache_prompt, - "id_slot": id_slot, - "seed": seed if seed is not None else 42, - "temperature": temperature if temperature is not None else 0.8, - "n_probs": 2, - }, - headers=headers) as response: - if expect_api_error is None or not expect_api_error: - assert response.status == 200 - assert response.headers['Access-Control-Allow-Origin'] == origin - return await response.json() - else: - return response.status - - -async def oai_chat_completions(user_prompt, - seed, - system_prompt, - base_url: str, - base_path: str, - async_client, - debug=False, - temperature=None, - model=None, - n_predict=None, - enable_streaming=None, - response_format=None, - user_api_key=None, - expect_api_error=None) -> int | dict[str, Any]: - if debug: - print(f"Sending OAI Chat completions request: {user_prompt}") - # openai client always expects an api key - user_api_key = user_api_key if user_api_key is not None else 'nope' - seed = seed if seed is not None else 42 - enable_streaming = enable_streaming if enable_streaming is not None else False - payload = { - "messages": [ - { - "role": "system", - "content": system_prompt, - }, - { - "role": "user", - "content": user_prompt, - } - ], - "model": model, - "max_tokens": n_predict, - "stream": enable_streaming, - "temperature": temperature if temperature is not None else 0.0, - "seed": seed, - } - if response_format is not None: - payload['response_format'] = response_format - completion_response = { - 'content': '', - 'timings': { - 'predicted_n': 0, - 'prompt_n': 0 - } - } - if async_client: - origin = 'llama.cpp' - headers = {'Authorization': f'Bearer {user_api_key}', 'Origin': origin} - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{base_url}{base_path}', - json=payload, - headers=headers) as response: - if enable_streaming: - assert response.status == 200 - assert response.headers['Access-Control-Allow-Origin'] == origin - assert response.headers['Content-Type'] == "text/event-stream" - event_received = True - while event_received: - event_received = False - async for line_in_bytes in response.content: - line = line_in_bytes.decode('utf-8') - line = line.rstrip('\n').rstrip('\r') - if line == '': - continue - event_data = line.split(': ', 1) - assert event_data[0] == 'data', f'Bad event code received: ```{event_data}```' - chunk_raw = event_data[1] - if chunk_raw == '[DONE]': - break - - chunk = json.loads(chunk_raw) - assert len(chunk['choices']) == 1, f"no choices provided, line ```{line}```" - delta = chunk['choices'][0]['delta'] - if 'content' in delta: - completion_response['content'] += delta['content'] - completion_response['timings']['predicted_n'] += 1 - else: - if expect_api_error is None or not expect_api_error: - assert response.status == 200 - assert response.headers['Access-Control-Allow-Origin'] == origin - assert response.headers['Content-Type'] == "application/json; charset=utf-8" - chat_completion_raw = await response.json() - completion_response = { - 'content': chat_completion_raw['choices'][0]['message'], - 'timings': { - 'predicted_n': chat_completion_raw['usage']['completion_tokens'], - 'prompt_n': chat_completion_raw['usage']['prompt_tokens'] - } - } - else: - return response.status - else: - try: - openai.api_key = user_api_key - openai.base_url = f'{base_url}{base_path.removesuffix("chat")}' - assert model is not None - chat_completion = openai.chat.completions.create( - messages=payload['messages'], - model=model, - max_tokens=n_predict, - stream=enable_streaming, - response_format=payload.get('response_format') or openai.NOT_GIVEN, - seed=seed, - temperature=payload['temperature'] - ) - except openai.AuthenticationError as e: - if expect_api_error is not None and expect_api_error: - return 401 - else: - assert False, f'error raised: {e}' - - if enable_streaming: - chat_completion = cast(openai.Stream[ChatCompletionChunk], chat_completion) - for chunk in chat_completion: - assert len(chunk.choices) == 1 - delta = chunk.choices[0].delta - if delta.content is not None: - completion_response['content'] += delta.content - completion_response['timings']['predicted_n'] += 1 - completion_response['truncated'] = chunk.choices[0].finish_reason != 'stop' - else: - assert len(chat_completion.choices) == 1 - assert chat_completion.usage is not None - completion_response = { - 'content': chat_completion.choices[0].message.content, - 'timings': { - 'predicted_n': chat_completion.usage.completion_tokens, - 'prompt_n': chat_completion.usage.prompt_tokens - }, - 'truncated': chat_completion.choices[0].finish_reason != 'stop' - } - if debug: - print("OAI response formatted to llama.cpp:", completion_response) - return completion_response - - -async def request_embedding(content, seed, base_url=None) -> list[list[float]] | int: - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{base_url}/embedding', - json={ - "content": content, - }) as response: - if response.status == 200: - response_json = await response.json() - return [response_json['embedding']] - else: - return response.status - - -async def request_oai_embeddings(input, seed, - base_url=None, user_api_key=None, - model=None, async_client=False) -> list[list[float]]: - # openai client always expects an api_key - user_api_key = user_api_key if user_api_key is not None else 'nope' - if async_client: - origin = 'llama.cpp' - headers=[] - if user_api_key is not None: - headers = {'Authorization': f'Bearer {user_api_key}', 'Origin': origin} - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with session.post(f'{base_url}/v1/embeddings', - json={ - "input": input, - "model": model, - }, - headers=headers) as response: - assert response.status == 200, f"received status code not expected: {response.status}" - assert response.headers['Access-Control-Allow-Origin'] == origin - assert response.headers['Content-Type'] == "application/json; charset=utf-8" - response_json = await response.json() - assert response_json['model'] == model, f"invalid model received: {response_json['model']}" - assert response_json['object'] == 'list' - if isinstance(input, Sequence): - embeddings = [] - for an_oai_embeddings in response_json['data']: - embeddings.append(an_oai_embeddings['embedding']) - else: - embeddings = [response_json['data']['embedding']] - return embeddings - else: - openai.api_key = user_api_key - openai.base_url = f'{base_url}/v1/' - assert model is not None - oai_embeddings = openai.embeddings.create( - model=model, - input=input, - ) - - return [e.embedding for e in oai_embeddings.data] - - -def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re_content=None): - content = completion_response['content'] - n_predicted = completion_response['timings']['predicted_n'] - assert len(content) > 0, "no token predicted" - if re_content is not None: - p = re.compile(re_content, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL) - matches = p.finditer(content) - last_match = 0 - highlighted = '' - for match in matches: - start, end = match.span() - highlighted += content[last_match: start] - highlighted += '\x1b[33m' - highlighted += content[start: end] - highlighted += '\x1b[0m' - last_match = end - highlighted += content[last_match:] - if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON': - print(f"Checking completion response: {highlighted}") - assert last_match > 0, f'/{re_content}/ must match ```{highlighted}```' - if expected_predicted_n and expected_predicted_n > 0: - assert n_predicted == expected_predicted_n, (f'invalid number of tokens predicted:' - f' {n_predicted} <> {expected_predicted_n}') - -def assert_all_predictions_equal(completion_responses): - if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON': - for i, response_i in enumerate(completion_responses): - content_i = response_i['content'] - print(f"content {i}: {content_i}") - for i, response_i in enumerate(completion_responses): - content_i = response_i['content'] - for j, response_j in enumerate(completion_responses): - if i == j: - continue - content_j = response_j['content'] - assert content_i == content_j, "contents not equal" - - -def assert_all_predictions_different(completion_responses): - if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON': - for i, response_i in enumerate(completion_responses): - content_i = response_i['content'] - print(f"content {i}: {content_i}") - for i, response_i in enumerate(completion_responses): - content_i = response_i['content'] - for j, response_j in enumerate(completion_responses): - if i == j: - continue - content_j = response_j['content'] - assert content_i != content_j, "contents not different" - - -def assert_all_token_probabilities_equal(completion_responses): - n_predict = len(completion_responses[0]['completion_probabilities']) - if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'ON': - for pos in range(n_predict): - for i, response_i in enumerate(completion_responses): - probs_i = response_i['completion_probabilities'][pos]['probs'] - print(f"pos {pos}, probs {i}: {probs_i}") - for pos in range(n_predict): - for i, response_i in enumerate(completion_responses): - probs_i = response_i['completion_probabilities'][pos]['probs'] - for j, response_j in enumerate(completion_responses): - if i == j: - continue - probs_j = response_j['completion_probabilities'][pos]['probs'] - assert probs_i == probs_j, "contents not equal" - - -async def gather_tasks_results(context): - n_tasks = len(context.concurrent_tasks) - if context.debug: - print(f"Waiting for all {n_tasks} tasks results...") - for task_no in range(n_tasks): - context.tasks_result.append(await context.concurrent_tasks.pop()) - n_completions = len(context.tasks_result) - return n_completions - - -async def wait_for_slots_status(context, - base_url, - expected_http_status_code, - timeout=3, - params=None, - slots_idle=None, - slots_processing=None): - if context.debug: - print(f"Starting checking for health for expected_http_status_code={expected_http_status_code}") - interval = 0.5 - counter = 0 - if 'GITHUB_ACTIONS' in os.environ: - timeout *= 2 - - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - while True: - headers = {'Authorization': f'Bearer {context.server_api_key}'} - async with await session.get(f'{base_url}/slots', params=params, headers=headers) as slots_response: - status_code = slots_response.status - slots = await slots_response.json() - if context.debug: - print(f"slots responses {slots}\n") - if status_code == 503 and status_code == expected_http_status_code: - return - if status_code == 200 and status_code == expected_http_status_code: - n_slots_idle = sum(1 if not slot["is_processing"] else 0 for slot in slots) - n_slots_processing = sum(1 if slot["is_processing"] else 0 for slot in slots) - if ((slots_idle is None or slots_idle == n_slots_idle) - and (slots_processing is None or slots_processing == n_slots_processing)): - return - await asyncio.sleep(interval) - - counter += interval - if counter >= timeout: - # Sometimes health requests are triggered after completions are predicted - if expected_http_status_code == 503: - if len(context.tasks_result) == 0: - print("\x1b[5;37;43mWARNING: forcing concurrent tasks," - " busy health check missed, probably too fast inference\x1b[0m\n") - n_completions = await gather_tasks_results(context) - if n_completions > 0: - return - - assert False, f'slots check timeout exceeded {counter}s>={timeout}' - - -def assert_embeddings(embeddings): - assert len(embeddings) > 0 - embeddings_computed = False - for emb in embeddings: - if not isinstance(emb, float): - assert False, f"Bad embeddings: {embeddings}" - if emb != 0: - embeddings_computed = True - assert embeddings_computed, f"Embeddings: {embeddings}" - - -async def request_slots_status(context, expected_slots): - async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: - async with await session.get(f'{context.base_url}/slots') as slots_response: - assert slots_response.status == 200 - slots = await slots_response.json() - assert_slots_status(slots, expected_slots) - - -def assert_slots_status(slots, expected_slots): - assert len(slots) == len(expected_slots) - for slot_id, (expected, slot) in enumerate(zip(expected_slots, slots)): - for key in expected: - assert expected[key] == slot[key], (f"invalid slot {slot_id}" - f" expected[{key}] != slot[{key}]" - f" = {expected[key]} != {slot[key]}") - - -async def completions_seed(context, num_seeds=None): - if hasattr(context, "seed") and context.seed is not None: - assert len(context.seed) == context.n_prompts - if num_seeds is None: - num_seeds = context.n_prompts - assert num_seeds <= context.n_prompts - seeds = context.seed[:num_seeds] - context.seed = context.seed[num_seeds:] if num_seeds < context.n_prompts else None - return seeds - - if hasattr(context, "server_seed") and context.server_seed is not None: - if num_seeds is None: - return [context.server_seed] * context.n_prompts - else: - return [context.server_seed] * num_seeds - return None - - -def context_text(context): - return context.text.replace('\r', '') - - -def start_server_background(context): - if os.name == 'nt': - context.server_path = '../../../build/bin/Release/llama-server.exe' - else: - context.server_path = '../../../build/bin/llama-server' - if 'LLAMA_SERVER_BIN_PATH' in os.environ: - context.server_path = os.environ['LLAMA_SERVER_BIN_PATH'] - server_listen_addr = context.server_fqdn - server_args = [ - '--slots', # requires to get slot status via /slots endpoint - '--host', server_listen_addr, - '--port', context.server_port, - ] - if context.model_file: - server_args.extend(['--model', context.model_file]) - if context.model_url: - server_args.extend(['--model-url', context.model_url]) - if context.model_hf_repo: - server_args.extend(['--hf-repo', context.model_hf_repo]) - if context.model_hf_file: - server_args.extend(['--hf-file', context.model_hf_file]) - if context.n_batch: - server_args.extend(['--batch-size', context.n_batch]) - if context.n_ubatch: - server_args.extend(['--ubatch-size', context.n_ubatch]) - if context.n_threads: - server_args.extend(['--threads', context.threads]) - if context.n_gpu_layer: - server_args.extend(['--n-gpu-layers', context.n_gpu_layer]) - if context.draft is not None: - server_args.extend(['--draft', context.draft]) - if context.server_continuous_batching: - server_args.append('--cont-batching') - if context.server_embeddings: - server_args.append('--embedding') - if context.server_reranking: - server_args.append('--reranking') - if context.server_metrics: - server_args.append('--metrics') - if context.model_alias: - server_args.extend(['--alias', context.model_alias]) - if context.n_ctx: - server_args.extend(['--ctx-size', context.n_ctx]) - if context.n_slots: - server_args.extend(['--parallel', context.n_slots]) - if context.n_server_predict: - server_args.extend(['--n-predict', context.n_server_predict]) - if context.slot_save_path: - server_args.extend(['--slot-save-path', context.slot_save_path]) - if context.server_api_key: - server_args.extend(['--api-key', context.server_api_key]) - if context.n_ga: - server_args.extend(['--grp-attn-n', context.n_ga]) - if context.n_ga_w: - server_args.extend(['--grp-attn-w', context.n_ga_w]) - if context.debug: - server_args.append('--verbose') - if context.lora_file: - server_args.extend(['--lora', context.lora_file]) - if context.disable_ctx_shift: - server_args.extend(['--no-context-shift']) - - args = [str(arg) for arg in [context.server_path, *server_args]] - print(f"bench: starting server with: {' '.join(args)}") - - flags = 0 - if 'nt' == os.name: - flags |= subprocess.DETACHED_PROCESS - flags |= subprocess.CREATE_NEW_PROCESS_GROUP - flags |= subprocess.CREATE_NO_WINDOW - - pkwargs = { - 'creationflags': flags, - 'stdout': subprocess.PIPE, - 'stderr': subprocess.PIPE - } - context.server_process = subprocess.Popen( - [str(arg) for arg in [context.server_path, *server_args]], - **pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue] - - def server_log(in_stream, out_stream): - for line in iter(in_stream.readline, b''): - print(line.decode('utf-8'), end='', file=out_stream) - - thread_stdout = threading.Thread(target=server_log, args=(context.server_process.stdout, sys.stdout)) - thread_stdout.start() - - thread_stderr = threading.Thread(target=server_log, args=(context.server_process.stderr, sys.stderr)) - thread_stderr.start() - - print(f"server pid={context.server_process.pid}, behave pid={os.getpid()}") diff --git a/examples/server/tests/features/wrong_usages.feature b/examples/server/tests/features/wrong_usages.feature deleted file mode 100644 index 61d5f315e1567..0000000000000 --- a/examples/server/tests/features/wrong_usages.feature +++ /dev/null @@ -1,25 +0,0 @@ -# run with: ./tests.sh --no-skipped --tags wrong_usage -@wrong_usage -Feature: Wrong usage of llama.cpp server - - #3969 The user must always set --n-predict option - # to cap the number of tokens any completion request can generate - # or pass n_predict/max_tokens in the request. - Scenario: Infinite loop - Given a server listening on localhost:8080 - And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models - And 42 as server seed - And 2048 KV cache size - # Uncomment below to fix the issue - #And 64 server max tokens to predict - Then the server is starting - Then the server is healthy - Given a prompt: - """ - Go to: infinite loop - """ - # Uncomment below to fix the issue - #And 128 max tokens to predict - Given concurrent completion requests - Then the server is idle - Then all prompts are predicted diff --git a/examples/server/tests/requirements.txt b/examples/server/tests/requirements.txt index 5539548720ff1..074b9d47bddce 100644 --- a/examples/server/tests/requirements.txt +++ b/examples/server/tests/requirements.txt @@ -1,7 +1,7 @@ aiohttp~=3.9.3 -behave~=1.2.6 +pytest~=8.3.3 huggingface_hub~=0.23.2 numpy~=1.26.4 -openai~=1.30.3 +openai~=1.55.3 prometheus-client~=0.20.0 requests~=2.32.3 diff --git a/examples/server/tests/tests.sh b/examples/server/tests/tests.sh index 72a0fbad827db..1e0777de367fc 100755 --- a/examples/server/tests/tests.sh +++ b/examples/server/tests/tests.sh @@ -1,11 +1,14 @@ #!/bin/bash +# make sure we are in the right directory +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $SCRIPT_DIR + set -eu if [ $# -lt 1 ] then - # Start @llama.cpp scenario - behave --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp + pytest -v -x else - behave "$@" + pytest "$@" fi diff --git a/examples/server/tests/unit/test_basic.py b/examples/server/tests/unit/test_basic.py new file mode 100644 index 0000000000000..1485de8ceb3fc --- /dev/null +++ b/examples/server/tests/unit/test_basic.py @@ -0,0 +1,96 @@ +import pytest +import requests +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +def test_server_start_simple(): + global server + server.start() + res = server.make_request("GET", "/health") + assert res.status_code == 200 + + +def test_server_props(): + global server + server.start() + res = server.make_request("GET", "/props") + assert res.status_code == 200 + assert ".gguf" in res.body["model_path"] + assert res.body["total_slots"] == server.n_slots + default_val = res.body["default_generation_settings"] + assert server.n_ctx is not None and server.n_slots is not None + assert default_val["n_ctx"] == server.n_ctx / server.n_slots + assert default_val["params"]["seed"] == server.seed + + +def test_server_models(): + global server + server.start() + res = server.make_request("GET", "/models") + assert res.status_code == 200 + assert len(res.body["data"]) == 1 + assert res.body["data"][0]["id"] == server.model_alias + + +def test_server_slots(): + global server + + # without slots endpoint enabled, this should return error + server.server_slots = False + server.start() + res = server.make_request("GET", "/slots") + assert res.status_code == 501 # ERROR_TYPE_NOT_SUPPORTED + assert "error" in res.body + server.stop() + + # with slots endpoint enabled, this should return slots info + server.server_slots = True + server.n_slots = 2 + server.start() + res = server.make_request("GET", "/slots") + assert res.status_code == 200 + assert len(res.body) == server.n_slots + assert server.n_ctx is not None and server.n_slots is not None + assert res.body[0]["n_ctx"] == server.n_ctx / server.n_slots + assert "params" in res.body[0] + assert res.body[0]["params"]["seed"] == server.seed + + +def test_load_split_model(): + global server + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "tinyllamas/split/stories15M-q8_0-00001-of-00003.gguf" + server.model_alias = "tinyllama-split" + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": 16, + "prompt": "Hello", + "temperature": 0.0, + }) + assert res.status_code == 200 + assert match_regex("(little|girl)+", res.body["content"]) + + +def test_no_webui(): + global server + # default: webui enabled + server.start() + url = f"http://{server.server_host}:{server.server_port}" + res = requests.get(url) + assert res.status_code == 200 + assert "" in res.text + server.stop() + + # with --no-webui + server.no_webui = True + server.start() + res = requests.get(url) + assert res.status_code == 404 diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py new file mode 100644 index 0000000000000..0fa1a17c1f50a --- /dev/null +++ b/examples/server/tests/unit/test_chat_completion.py @@ -0,0 +1,225 @@ +import pytest +from openai import OpenAI +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +@pytest.mark.parametrize( + "model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason", + [ + (None, "Book", "What is the best book", 8, "(Suddenly)+", 77, 8, "length"), + ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length"), + ] +) +def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "model": model, + "max_tokens": max_tokens, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + }) + assert res.status_code == 200 + assert "cmpl" in res.body["id"] # make sure the completion id has the expected format + assert res.body["model"] == model if model is not None else server.model_alias + assert res.body["usage"]["prompt_tokens"] == n_prompt + assert res.body["usage"]["completion_tokens"] == n_predicted + choice = res.body["choices"][0] + assert "assistant" == choice["message"]["role"] + assert match_regex(re_content, choice["message"]["content"]) + assert choice["finish_reason"] == finish_reason + + +@pytest.mark.parametrize( + "system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason", + [ + ("Book", "What is the best book", 8, "(Suddenly)+", 77, 8, "length"), + ("You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length"), + ] +) +def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason): + global server + server.model_alias = None # try using DEFAULT_OAICOMPAT_MODEL + server.start() + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": max_tokens, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + "stream": True, + }) + content = "" + last_cmpl_id = None + for data in res: + choice = data["choices"][0] + assert "gpt-3.5" in data["model"] # DEFAULT_OAICOMPAT_MODEL, maybe changed in the future + if last_cmpl_id is None: + last_cmpl_id = data["id"] + assert last_cmpl_id == data["id"] # make sure the completion id is the same for all events in the stream + if choice["finish_reason"] in ["stop", "length"]: + assert data["usage"]["prompt_tokens"] == n_prompt + assert data["usage"]["completion_tokens"] == n_predicted + assert "content" not in choice["delta"] + assert match_regex(re_content, content) + assert choice["finish_reason"] == finish_reason + else: + assert choice["finish_reason"] is None + content += choice["delta"]["content"] + + +def test_chat_completion_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=8, + seed=42, + temperature=0.8, + ) + assert res.choices[0].finish_reason == "length" + assert res.choices[0].message.content is not None + assert match_regex("(Suddenly)+", res.choices[0].message.content) + + +@pytest.mark.parametrize("response_format,n_predicted,re_content", [ + ({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""), + ({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"), + ({"type": "json_object"}, 10, "(\\{|John)+"), + ({"type": "sound"}, 0, None), + # invalid response format (expected to fail) + ({"type": "json_object", "schema": 123}, 0, None), + ({"type": "json_object", "schema": {"type": 123}}, 0, None), + ({"type": "json_object", "schema": {"type": "hiccup"}}, 0, None), +]) +def test_completion_with_response_format(response_format: dict, n_predicted: int, re_content: str | None): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predicted, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "response_format": response_format, + }) + if re_content is not None: + assert res.status_code == 200 + choice = res.body["choices"][0] + assert match_regex(re_content, choice["message"]["content"]) + else: + assert res.status_code != 200 + assert "error" in res.body + + +@pytest.mark.parametrize("messages", [ + None, + "string", + [123], + [{}], + [{"role": 123}], + [{"role": "system", "content": 123}], + # [{"content": "hello"}], # TODO: should not be a valid case + [{"role": "system", "content": "test"}, {}], +]) +def test_invalid_chat_completion_req(messages): + global server + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "messages": messages, + }) + assert res.status_code == 400 or res.status_code == 500 + assert "error" in res.body + + +def test_chat_completion_with_timings_per_token(): + global server + server.start() + res = server.make_stream_request("POST", "/chat/completions", data={ + "max_tokens": 10, + "messages": [{"role": "user", "content": "test"}], + "stream": True, + "timings_per_token": True, + }) + for data in res: + assert "timings" in data + assert "prompt_per_second" in data["timings"] + assert "predicted_per_second" in data["timings"] + assert "predicted_n" in data["timings"] + assert data["timings"]["predicted_n"] <= 10 + + +def test_logprobs(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + temperature=0.0, + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=5, + logprobs=True, + top_logprobs=10, + ) + output_text = res.choices[0].message.content + aggregated_text = '' + assert res.choices[0].logprobs is not None + assert res.choices[0].logprobs.content is not None + for token in res.choices[0].logprobs.content: + aggregated_text += token.token + assert token.logprob <= 0.0 + assert token.bytes is not None + assert len(token.top_logprobs) > 0 + assert aggregated_text == output_text + + +def test_logprobs_stream(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + res = client.chat.completions.create( + model="gpt-3.5-turbo-instruct", + temperature=0.0, + messages=[ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ], + max_tokens=5, + logprobs=True, + top_logprobs=10, + stream=True, + ) + output_text = '' + aggregated_text = '' + for data in res: + choice = data.choices[0] + if choice.finish_reason is None: + if choice.delta.content: + output_text += choice.delta.content + assert choice.logprobs is not None + assert choice.logprobs.content is not None + for token in choice.logprobs.content: + aggregated_text += token.token + assert token.logprob <= 0.0 + assert token.bytes is not None + assert token.top_logprobs is not None + assert len(token.top_logprobs) > 0 + assert aggregated_text == output_text diff --git a/examples/server/tests/unit/test_completion.py b/examples/server/tests/unit/test_completion.py new file mode 100644 index 0000000000000..b88d45f18547f --- /dev/null +++ b/examples/server/tests/unit/test_completion.py @@ -0,0 +1,337 @@ +import pytest +import time +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + +@pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated,return_tokens", [ + ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False, False), + ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False, True), +]) +def test_completion(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool, return_tokens: bool): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": n_predict, + "prompt": prompt, + "return_tokens": return_tokens, + }) + assert res.status_code == 200 + assert res.body["timings"]["prompt_n"] == n_prompt + assert res.body["timings"]["predicted_n"] == n_predicted + assert res.body["truncated"] == truncated + assert type(res.body["has_new_line"]) == bool + assert match_regex(re_content, res.body["content"]) + if return_tokens: + assert len(res.body["tokens"]) > 0 + assert all(type(tok) == int for tok in res.body["tokens"]) + else: + assert res.body["tokens"] == [] + + +@pytest.mark.parametrize("prompt,n_predict,re_content,n_prompt,n_predicted,truncated", [ + ("I believe the meaning of life is", 8, "(going|bed)+", 18, 8, False), + ("Write a joke about AI from a very long prompt which will not be truncated", 256, "(princesses|everyone|kids|Anna|forest)+", 46, 64, False), +]) +def test_completion_stream(prompt: str, n_predict: int, re_content: str, n_prompt: int, n_predicted: int, truncated: bool): + global server + server.start() + res = server.make_stream_request("POST", "/completion", data={ + "n_predict": n_predict, + "prompt": prompt, + "stream": True, + }) + content = "" + for data in res: + assert "stop" in data and type(data["stop"]) == bool + if data["stop"]: + assert data["timings"]["prompt_n"] == n_prompt + assert data["timings"]["predicted_n"] == n_predicted + assert data["truncated"] == truncated + assert data["stop_type"] == "limit" + assert type(data["has_new_line"]) == bool + assert "generation_settings" in data + assert server.n_predict is not None + assert data["generation_settings"]["n_predict"] == min(n_predict, server.n_predict) + assert data["generation_settings"]["seed"] == server.seed + assert match_regex(re_content, content) + else: + assert len(data["tokens"]) > 0 + assert all(type(tok) == int for tok in data["tokens"]) + content += data["content"] + + +def test_completion_stream_vs_non_stream(): + global server + server.start() + res_stream = server.make_stream_request("POST", "/completion", data={ + "n_predict": 8, + "prompt": "I believe the meaning of life is", + "stream": True, + }) + res_non_stream = server.make_request("POST", "/completion", data={ + "n_predict": 8, + "prompt": "I believe the meaning of life is", + }) + content_stream = "" + for data in res_stream: + content_stream += data["content"] + assert content_stream == res_non_stream.body["content"] + + +@pytest.mark.parametrize("n_slots", [1, 2]) +def test_consistent_result_same_seed(n_slots: int): + global server + server.n_slots = n_slots + server.start() + last_res = None + for _ in range(4): + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": 1.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + if last_res is not None: + assert res.body["content"] == last_res.body["content"] + last_res = res + + +@pytest.mark.parametrize("n_slots", [1, 2]) +def test_different_result_different_seed(n_slots: int): + global server + server.n_slots = n_slots + server.start() + last_res = None + for seed in range(4): + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": seed, + "temperature": 1.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + if last_res is not None: + assert res.body["content"] != last_res.body["content"] + last_res = res + + +@pytest.mark.parametrize("n_batch", [16, 32]) +@pytest.mark.parametrize("temperature", [0.0, 1.0]) +def test_consistent_result_different_batch_size(n_batch: int, temperature: float): + global server + server.n_batch = n_batch + server.start() + last_res = None + for _ in range(4): + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": temperature, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + if last_res is not None: + assert res.body["content"] == last_res.body["content"] + last_res = res + + +@pytest.mark.skip(reason="This test fails on linux, need to be fixed") +def test_cache_vs_nocache_prompt(): + global server + server.start() + res_cache = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": 1.0, + "cache_prompt": True, + }) + res_no_cache = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "seed": 42, + "temperature": 1.0, + "cache_prompt": False, + }) + assert res_cache.body["content"] == res_no_cache.body["content"] + + +def test_completion_with_tokens_input(): + global server + server.temperature = 0.0 + server.start() + prompt_str = "I believe the meaning of life is" + res = server.make_request("POST", "/tokenize", data={ + "content": prompt_str, + "add_special": True, + }) + assert res.status_code == 200 + tokens = res.body["tokens"] + + # single completion + res = server.make_request("POST", "/completion", data={ + "prompt": tokens, + }) + assert res.status_code == 200 + assert type(res.body["content"]) == str + + # batch completion + res = server.make_request("POST", "/completion", data={ + "prompt": [tokens, tokens], + }) + assert res.status_code == 200 + assert type(res.body) == list + assert len(res.body) == 2 + assert res.body[0]["content"] == res.body[1]["content"] + + # mixed string and tokens + res = server.make_request("POST", "/completion", data={ + "prompt": [tokens, prompt_str], + }) + assert res.status_code == 200 + assert type(res.body) == list + assert len(res.body) == 2 + assert res.body[0]["content"] == res.body[1]["content"] + + # mixed string and tokens in one sequence + res = server.make_request("POST", "/completion", data={ + "prompt": [1, 2, 3, 4, 5, 6, prompt_str, 7, 8, 9, 10, prompt_str], + }) + assert res.status_code == 200 + assert type(res.body["content"]) == str + + +@pytest.mark.parametrize("n_slots,n_requests", [ + (1, 3), + (2, 2), + (2, 4), + (4, 2), # some slots must be idle + (4, 6), +]) +def test_completion_parallel_slots(n_slots: int, n_requests: int): + global server + server.n_slots = n_slots + server.temperature = 0.0 + server.start() + + PROMPTS = [ + ("Write a very long book.", "(very|special|big)+"), + ("Write another a poem.", "(small|house)+"), + ("What is LLM?", "(Dad|said)+"), + ("The sky is blue and I love it.", "(climb|leaf)+"), + ("Write another very long music lyrics.", "(friends|step|sky)+"), + ("Write a very long joke.", "(cat|Whiskers)+"), + ] + def check_slots_status(): + should_all_slots_busy = n_requests >= n_slots + time.sleep(0.1) + res = server.make_request("GET", "/slots") + n_busy = sum([1 for slot in res.body if slot["is_processing"]]) + if should_all_slots_busy: + assert n_busy == n_slots + else: + assert n_busy <= n_slots + + tasks = [] + for i in range(n_requests): + prompt, re_content = PROMPTS[i % len(PROMPTS)] + tasks.append((server.make_request, ("POST", "/completion", { + "prompt": prompt, + "seed": 42, + "temperature": 1.0, + }))) + tasks.append((check_slots_status, ())) + results = parallel_function_calls(tasks) + + # check results + for i in range(n_requests): + prompt, re_content = PROMPTS[i % len(PROMPTS)] + res = results[i] + assert res.status_code == 200 + assert type(res.body["content"]) == str + assert len(res.body["content"]) > 10 + # FIXME: the result is not deterministic when using other slot than slot 0 + # assert match_regex(re_content, res.body["content"]) + + +def test_n_probs(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "n_probs": 10, + "temperature": 0.0, + "n_predict": 5, + }) + assert res.status_code == 200 + assert "completion_probabilities" in res.body + assert len(res.body["completion_probabilities"]) == 5 + for tok in res.body["completion_probabilities"]: + assert "id" in tok and tok["id"] > 0 + assert "token" in tok and type(tok["token"]) == str + assert "logprob" in tok and tok["logprob"] <= 0.0 + assert "bytes" in tok and type(tok["bytes"]) == list + assert len(tok["top_logprobs"]) == 10 + for prob in tok["top_logprobs"]: + assert "id" in prob and prob["id"] > 0 + assert "token" in prob and type(prob["token"]) == str + assert "logprob" in prob and prob["logprob"] <= 0.0 + assert "bytes" in prob and type(prob["bytes"]) == list + + +def test_n_probs_stream(): + global server + server.start() + res = server.make_stream_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "n_probs": 10, + "temperature": 0.0, + "n_predict": 5, + "stream": True, + }) + for data in res: + if data["stop"] == False: + assert "completion_probabilities" in data + assert len(data["completion_probabilities"]) == 1 + for tok in data["completion_probabilities"]: + assert "id" in tok and tok["id"] > 0 + assert "token" in tok and type(tok["token"]) == str + assert "logprob" in tok and tok["logprob"] <= 0.0 + assert "bytes" in tok and type(tok["bytes"]) == list + assert len(tok["top_logprobs"]) == 10 + for prob in tok["top_logprobs"]: + assert "id" in prob and prob["id"] > 0 + assert "token" in prob and type(prob["token"]) == str + assert "logprob" in prob and prob["logprob"] <= 0.0 + assert "bytes" in prob and type(prob["bytes"]) == list + + +def test_n_probs_post_sampling(): + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "n_probs": 10, + "temperature": 0.0, + "n_predict": 5, + "post_sampling_probs": True, + }) + assert res.status_code == 200 + assert "completion_probabilities" in res.body + assert len(res.body["completion_probabilities"]) == 5 + for tok in res.body["completion_probabilities"]: + assert "id" in tok and tok["id"] > 0 + assert "token" in tok and type(tok["token"]) == str + assert "prob" in tok and 0.0 < tok["prob"] <= 1.0 + assert "bytes" in tok and type(tok["bytes"]) == list + assert len(tok["top_probs"]) == 10 + for prob in tok["top_probs"]: + assert "id" in prob and prob["id"] > 0 + assert "token" in prob and type(prob["token"]) == str + assert "prob" in prob and 0.0 <= prob["prob"] <= 1.0 + assert "bytes" in prob and type(prob["bytes"]) == list + # because the test model usually output token with either 100% or 0% probability, we need to check all the top_probs + assert any(prob["prob"] == 1.0 for prob in tok["top_probs"]) diff --git a/examples/server/tests/unit/test_ctx_shift.py b/examples/server/tests/unit/test_ctx_shift.py new file mode 100644 index 0000000000000..be93a6d31f410 --- /dev/null +++ b/examples/server/tests/unit/test_ctx_shift.py @@ -0,0 +1,67 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama2() + + +LONG_TEXT = """ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. +Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. +Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. +""".strip() + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.n_ctx = 256 + server.n_slots = 2 + + +def test_ctx_shift_enabled(): + # the prompt is 301 tokens + # the slot context is 256/2 = 128 tokens + # the prompt is truncated to keep the last 109 tokens + # 64 tokens are generated thanks to shifting the context when it gets full + global server + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": 64, + "prompt": LONG_TEXT, + }) + assert res.status_code == 200 + assert res.body["timings"]["prompt_n"] == 109 + assert res.body["timings"]["predicted_n"] == 64 + assert res.body["truncated"] is True + + +@pytest.mark.parametrize("n_predict,n_token_output,truncated", [ + (64, 64, False), + (-1, 120, True), +]) +def test_ctx_shift_disabled_short_prompt(n_predict: int, n_token_output: int, truncated: bool): + global server + server.disable_ctx_shift = True + server.n_predict = -1 + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": n_predict, + "prompt": "Hi how are you", + }) + assert res.status_code == 200 + assert res.body["timings"]["predicted_n"] == n_token_output + assert res.body["truncated"] == truncated + + +def test_ctx_shift_disabled_long_prompt(): + global server + server.disable_ctx_shift = True + server.start() + res = server.make_request("POST", "/completion", data={ + "n_predict": 64, + "prompt": LONG_TEXT, + }) + assert res.status_code != 200 + assert "error" in res.body + assert "exceeds the available context size" in res.body["error"]["message"] diff --git a/examples/server/tests/unit/test_embedding.py b/examples/server/tests/unit/test_embedding.py new file mode 100644 index 0000000000000..43e372fc70d71 --- /dev/null +++ b/examples/server/tests/unit/test_embedding.py @@ -0,0 +1,196 @@ +import pytest +from openai import OpenAI +from utils import * + +server = ServerPreset.bert_bge_small() + +EPSILON = 1e-3 + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.bert_bge_small() + + +def test_embedding_single(): + global server + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": "I believe the meaning of life is", + }) + assert res.status_code == 200 + assert len(res.body['data']) == 1 + assert 'embedding' in res.body['data'][0] + assert len(res.body['data'][0]['embedding']) > 1 + + # make sure embedding vector is normalized + assert abs(sum([x ** 2 for x in res.body['data'][0]['embedding']]) - 1) < EPSILON + + +def test_embedding_multiple(): + global server + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "I believe the meaning of life is", + "Write a joke about AI from a very long prompt which will not be truncated", + "This is a test", + "This is another test", + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 4 + for d in res.body['data']: + assert 'embedding' in d + assert len(d['embedding']) > 1 + + +@pytest.mark.parametrize( + "input,is_multi_prompt", + [ + # do not crash on empty input + ("", False), + # single prompt + ("string", False), + ([12, 34, 56], False), + ([12, 34, "string", 56, 78], False), + # multiple prompts + (["string1", "string2"], True), + (["string1", [12, 34, 56]], True), + ([[12, 34, 56], [12, 34, 56]], True), + ([[12, 34, 56], [12, "string", 34, 56]], True), + ] +) +def test_embedding_mixed_input(input, is_multi_prompt: bool): + global server + server.start() + res = server.make_request("POST", "/v1/embeddings", data={"input": input}) + assert res.status_code == 200 + data = res.body['data'] + if is_multi_prompt: + assert len(data) == len(input) + for d in data: + assert 'embedding' in d + assert len(d['embedding']) > 1 + else: + assert 'embedding' in data[0] + assert len(data[0]['embedding']) > 1 + + +def test_embedding_pooling_none(): + global server + server.pooling = 'none' + server.start() + res = server.make_request("POST", "/embeddings", data={ + "input": "hello hello hello", + }) + assert res.status_code == 200 + assert 'embedding' in res.body[0] + assert len(res.body[0]['embedding']) == 5 # 3 text tokens + 2 special + + # make sure embedding vector is not normalized + for x in res.body[0]['embedding']: + assert abs(sum([x ** 2 for x in x]) - 1) > EPSILON + + +def test_embedding_pooling_none_oai(): + global server + server.pooling = 'none' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": "hello hello hello", + }) + + # /v1/embeddings does not support pooling type 'none' + assert res.status_code == 400 + assert "error" in res.body + + +def test_embedding_openai_library_single(): + global server + server.pooling = 'last' + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.embeddings.create(model="text-embedding-3-small", input="I believe the meaning of life is") + assert len(res.data) == 1 + assert len(res.data[0].embedding) > 1 + + +def test_embedding_openai_library_multiple(): + global server + server.pooling = 'last' + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.embeddings.create(model="text-embedding-3-small", input=[ + "I believe the meaning of life is", + "Write a joke about AI from a very long prompt which will not be truncated", + "This is a test", + "This is another test", + ]) + assert len(res.data) == 4 + for d in res.data: + assert len(d.embedding) > 1 + + +def test_embedding_error_prompt_too_long(): + global server + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": "This is a test " * 512, + }) + assert res.status_code != 200 + assert "too large" in res.body["error"]["message"] + + +def test_same_prompt_give_same_result(): + server.pooling = 'last' + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "I believe the meaning of life is", + "I believe the meaning of life is", + "I believe the meaning of life is", + "I believe the meaning of life is", + "I believe the meaning of life is", + ], + }) + assert res.status_code == 200 + assert len(res.body['data']) == 5 + for i in range(1, len(res.body['data'])): + v0 = res.body['data'][0]['embedding'] + vi = res.body['data'][i]['embedding'] + for x, y in zip(v0, vi): + assert abs(x - y) < EPSILON + + +@pytest.mark.parametrize( + "content,n_tokens", + [ + ("I believe the meaning of life is", 9), + ("This is a test", 6), + ] +) +def test_embedding_usage_single(content, n_tokens): + global server + server.start() + res = server.make_request("POST", "/v1/embeddings", data={"input": content}) + assert res.status_code == 200 + assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] + assert res.body['usage']['prompt_tokens'] == n_tokens + + +def test_embedding_usage_multiple(): + global server + server.start() + res = server.make_request("POST", "/v1/embeddings", data={ + "input": [ + "I believe the meaning of life is", + "I believe the meaning of life is", + ], + }) + assert res.status_code == 200 + assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] + assert res.body['usage']['prompt_tokens'] == 2 * 9 diff --git a/examples/server/tests/unit/test_infill.py b/examples/server/tests/unit/test_infill.py new file mode 100644 index 0000000000000..ad4b8192a7875 --- /dev/null +++ b/examples/server/tests/unit/test_infill.py @@ -0,0 +1,77 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama_infill() + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama_infill() + + +def test_infill_without_input_extra(): + global server + server.start() + res = server.make_request("POST", "/infill", data={ + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 200 + assert match_regex("(Ann|small|shiny)+", res.body["content"]) + + +def test_infill_with_input_extra(): + global server + server.start() + res = server.make_request("POST", "/infill", data={ + "input_extra": [{ + "filename": "llama.h", + "text": "LLAMA_API int32_t llama_n_threads();\n" + }], + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 200 + assert match_regex("(Dad|excited|park)+", res.body["content"]) + + +@pytest.mark.parametrize("input_extra", [ + {}, + {"filename": "ok"}, + {"filename": 123}, + {"filename": 123, "text": "abc"}, + {"filename": 123, "text": 456}, +]) +def test_invalid_input_extra_req(input_extra): + global server + server.start() + res = server.make_request("POST", "/infill", data={ + "input_extra": [input_extra], + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 400 + assert "error" in res.body + + +@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test") +def test_with_qwen_model(): + global server + server.model_file = None + server.model_hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-IQ3_XXS-GGUF" + server.model_hf_file = "qwen2.5-coder-1.5b-iq3_xxs-imat.gguf" + server.start(timeout_seconds=600) + res = server.make_request("POST", "/infill", data={ + "input_extra": [{ + "filename": "llama.h", + "text": "LLAMA_API int32_t llama_n_threads();\n" + }], + "input_prefix": "#include \n#include \"llama.h\"\n\nint main() {\n", + "prompt": " int n_threads = llama_", + "input_suffix": "}\n", + }) + assert res.status_code == 200 + assert res.body["content"] == "n_threads();\n printf(\"Number of threads: %d\\n\", n_threads);\n return 0;\n" diff --git a/examples/server/tests/unit/test_lora.py b/examples/server/tests/unit/test_lora.py new file mode 100644 index 0000000000000..7496154493917 --- /dev/null +++ b/examples/server/tests/unit/test_lora.py @@ -0,0 +1,42 @@ +import pytest +import os +from utils import * + +server = ServerPreset.stories15m_moe() + +LORA_FILE_URL = "https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/moe_shakespeare15M.gguf" + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.stories15m_moe() + # download lora file if needed + file_name = LORA_FILE_URL.split('/').pop() + lora_file = f'../../../{file_name}' + if not os.path.exists(lora_file): + print(f"Downloading {LORA_FILE_URL} to {lora_file}") + with open(lora_file, 'wb') as f: + f.write(requests.get(LORA_FILE_URL).content) + print(f"Done downloading lora file") + server.lora_files = [lora_file] + + +@pytest.mark.parametrize("scale,re_content", [ + # without applying lora, the model should behave like a bedtime story generator + (0.0, "(little|girl|three|years|old)+"), + # with lora, the model should behave like a Shakespearean text generator + (1.0, "(eye|love|glass|sun)+"), +]) +def test_lora(scale: float, re_content: str): + global server + server.start() + res_lora_control = server.make_request("POST", "/lora-adapters", data=[ + {"id": 0, "scale": scale} + ]) + assert res_lora_control.status_code == 200 + res = server.make_request("POST", "/completion", data={ + "prompt": "Look in thy glass", + }) + assert res.status_code == 200 + assert match_regex(re_content, res.body["content"]) + diff --git a/examples/server/tests/unit/test_rerank.py b/examples/server/tests/unit/test_rerank.py new file mode 100644 index 0000000000000..7203d79435702 --- /dev/null +++ b/examples/server/tests/unit/test_rerank.py @@ -0,0 +1,78 @@ +import pytest +from utils import * + +server = ServerPreset.jina_reranker_tiny() + + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.jina_reranker_tiny() + + +def test_rerank(): + global server + server.start() + res = server.make_request("POST", "/rerank", data={ + "query": "Machine learning is", + "documents": [ + "A machine is a physical system that uses power to apply forces and control movement to perform an action. The term is commonly applied to artificial devices, such as those employing engines or motors, but also to natural biological macromolecules, such as molecular machines.", + "Learning is the process of acquiring new understanding, knowledge, behaviors, skills, values, attitudes, and preferences. The ability to learn is possessed by humans, non-human animals, and some machines; there is also evidence for some kind of learning in certain plants.", + "Machine learning is a field of study in artificial intelligence concerned with the development and study of statistical algorithms that can learn from data and generalize to unseen data, and thus perform tasks without explicit instructions.", + "Paris, capitale de la France, est une grande ville europĆ©enne et un centre mondial de l'art, de la mode, de la gastronomie et de la culture. Son paysage urbain du XIXe siĆØcle est traversĆ© par de larges boulevards et la Seine." + ] + }) + assert res.status_code == 200 + assert len(res.body["results"]) == 4 + + most_relevant = res.body["results"][0] + least_relevant = res.body["results"][0] + for doc in res.body["results"]: + if doc["relevance_score"] > most_relevant["relevance_score"]: + most_relevant = doc + if doc["relevance_score"] < least_relevant["relevance_score"]: + least_relevant = doc + + assert most_relevant["relevance_score"] > least_relevant["relevance_score"] + assert most_relevant["index"] == 2 + assert least_relevant["index"] == 3 + + +@pytest.mark.parametrize("documents", [ + [], + None, + 123, + [1, 2, 3], +]) +def test_invalid_rerank_req(documents): + global server + server.start() + res = server.make_request("POST", "/rerank", data={ + "query": "Machine learning is", + "documents": documents, + }) + assert res.status_code == 400 + assert "error" in res.body + + +@pytest.mark.parametrize( + "query,doc1,doc2,n_tokens", + [ + ("Machine learning is", "A machine", "Learning is", 19), + ("Which city?", "Machine learning is ", "Paris, capitale de la", 26), + ] +) +def test_rerank_usage(query, doc1, doc2, n_tokens): + global server + server.start() + + res = server.make_request("POST", "/rerank", data={ + "query": query, + "documents": [ + doc1, + doc2, + ] + }) + assert res.status_code == 200 + assert res.body['usage']['prompt_tokens'] == res.body['usage']['total_tokens'] + assert res.body['usage']['prompt_tokens'] == n_tokens diff --git a/examples/server/tests/unit/test_security.py b/examples/server/tests/unit/test_security.py new file mode 100644 index 0000000000000..620b25376bd81 --- /dev/null +++ b/examples/server/tests/unit/test_security.py @@ -0,0 +1,83 @@ +import pytest +from openai import OpenAI +from utils import * + +server = ServerPreset.tinyllama2() + +TEST_API_KEY = "sk-this-is-the-secret-key" + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.api_key = TEST_API_KEY + + +@pytest.mark.parametrize("endpoint", ["/health", "/models"]) +def test_access_public_endpoint(endpoint: str): + global server + server.start() + res = server.make_request("GET", endpoint) + assert res.status_code == 200 + assert "error" not in res.body + + +@pytest.mark.parametrize("api_key", [None, "invalid-key"]) +def test_incorrect_api_key(api_key: str): + global server + server.start() + res = server.make_request("POST", "/completions", data={ + "prompt": "I believe the meaning of life is", + }, headers={ + "Authorization": f"Bearer {api_key}" if api_key else None, + }) + assert res.status_code == 401 + assert "error" in res.body + assert res.body["error"]["type"] == "authentication_error" + + +def test_correct_api_key(): + global server + server.start() + res = server.make_request("POST", "/completions", data={ + "prompt": "I believe the meaning of life is", + }, headers={ + "Authorization": f"Bearer {TEST_API_KEY}", + }) + assert res.status_code == 200 + assert "error" not in res.body + assert "content" in res.body + + +def test_openai_library_correct_api_key(): + global server + server.start() + client = OpenAI(api_key=TEST_API_KEY, base_url=f"http://{server.server_host}:{server.server_port}") + res = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a chatbot."}, + {"role": "user", "content": "What is the meaning of life?"}, + ], + ) + assert len(res.choices) == 1 + + +@pytest.mark.parametrize("origin,cors_header,cors_header_value", [ + ("localhost", "Access-Control-Allow-Origin", "localhost"), + ("web.mydomain.fr", "Access-Control-Allow-Origin", "web.mydomain.fr"), + ("origin", "Access-Control-Allow-Credentials", "true"), + ("web.mydomain.fr", "Access-Control-Allow-Methods", "GET, POST"), + ("web.mydomain.fr", "Access-Control-Allow-Headers", "*"), +]) +def test_cors_options(origin: str, cors_header: str, cors_header_value: str): + global server + server.start() + res = server.make_request("OPTIONS", "/completions", headers={ + "Origin": origin, + "Access-Control-Request-Method": "POST", + "Access-Control-Request-Headers": "Authorization", + }) + assert res.status_code == 200 + assert cors_header in res.headers + assert res.headers[cors_header] == cors_header_value diff --git a/examples/server/tests/unit/test_slot_save.py b/examples/server/tests/unit/test_slot_save.py new file mode 100644 index 0000000000000..38704f5ece35a --- /dev/null +++ b/examples/server/tests/unit/test_slot_save.py @@ -0,0 +1,98 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama2() + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.slot_save_path = "./tmp" + server.temperature = 0.0 + + +def test_slot_save_restore(): + global server + server.start() + + # First prompt in slot 1 should be fully processed + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of France?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Whiskers|Flana)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed + + # Save state of slot 1 + res = server.make_request("POST", "/slots/1?action=save", data={ + "filename": "slot1.bin", + }) + assert res.status_code == 200 + assert res.body["n_saved"] == 84 + + # Since we have cache, this should only process the last tokens + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of Germany?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Jack|said)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 6 # only different part is processed + + # Loading the saved cache into slot 0 + res = server.make_request("POST", "/slots/0?action=restore", data={ + "filename": "slot1.bin", + }) + assert res.status_code == 200 + assert res.body["n_restored"] == 84 + + # Since we have cache, slot 0 should only process the last tokens + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of Germany?", + "id_slot": 0, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Jack|said)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 6 # only different part is processed + + # For verification that slot 1 was not corrupted during slot 0 load, same thing should work + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of Germany?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Jack|said)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 1 + + +def test_slot_erase(): + global server + server.start() + + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of France?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Whiskers|Flana)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed + + # erase slot 1 + res = server.make_request("POST", "/slots/1?action=erase") + assert res.status_code == 200 + + # re-run the same prompt, it should process all tokens again + res = server.make_request("POST", "/completion", data={ + "prompt": "What is the capital of France?", + "id_slot": 1, + "cache_prompt": True, + }) + assert res.status_code == 200 + assert match_regex("(Whiskers|Flana)+", res.body["content"]) + assert res.body["timings"]["prompt_n"] == 21 # all tokens are processed diff --git a/examples/server/tests/unit/test_speculative.py b/examples/server/tests/unit/test_speculative.py new file mode 100644 index 0000000000000..3bb5733cbdf48 --- /dev/null +++ b/examples/server/tests/unit/test_speculative.py @@ -0,0 +1,134 @@ +import pytest +from utils import * + +# We use a F16 MOE gguf as main model, and q4_0 as draft model + +server = ServerPreset.stories15m_moe() + +MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories15M-q4_0.gguf" + +def create_server(): + global server + server = ServerPreset.stories15m_moe() + # download draft model file if needed + file_name = MODEL_DRAFT_FILE_URL.split('/').pop() + model_draft_file = f'../../../{file_name}' + if not os.path.exists(model_draft_file): + print(f"Downloading {MODEL_DRAFT_FILE_URL} to {model_draft_file}") + with open(model_draft_file, 'wb') as f: + f.write(requests.get(MODEL_DRAFT_FILE_URL).content) + print(f"Done downloading draft model file") + # set default values + server.model_draft = model_draft_file + server.draft_min = 4 + server.draft_max = 8 + + +@pytest.fixture(scope="module", autouse=True) +def fixture_create_server(): + return create_server() + + +def test_with_and_without_draft(): + global server + server.model_draft = None # disable draft model + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + }) + assert res.status_code == 200 + content_no_draft = res.body["content"] + server.stop() + + # create new server with draft model + create_server() + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + }) + assert res.status_code == 200 + content_draft = res.body["content"] + + assert content_no_draft == content_draft + + +def test_different_draft_min_draft_max(): + global server + test_values = [ + (1, 2), + (1, 4), + (4, 8), + (4, 12), + (8, 16), + ] + last_content = None + for draft_min, draft_max in test_values: + server.stop() + server.draft_min = draft_min + server.draft_max = draft_max + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + }) + assert res.status_code == 200 + if last_content is not None: + assert last_content == res.body["content"] + last_content = res.body["content"] + + +def test_slot_ctx_not_exceeded(): + global server + server.n_ctx = 64 + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "Hello " * 56, + "temperature": 0.0, + "top_k": 1, + "speculative.p_min": 0.0, + }) + assert res.status_code == 200 + assert len(res.body["content"]) > 0 + + +def test_with_ctx_shift(): + global server + server.n_ctx = 64 + server.start() + res = server.make_request("POST", "/completion", data={ + "prompt": "Hello " * 56, + "temperature": 0.0, + "top_k": 1, + "n_predict": 64, + "speculative.p_min": 0.0, + }) + assert res.status_code == 200 + assert len(res.body["content"]) > 0 + assert res.body["tokens_predicted"] == 64 + assert res.body["truncated"] == True + + +@pytest.mark.parametrize("n_slots,n_requests", [ + (1, 2), + (2, 2), +]) +def test_multi_requests_parallel(n_slots: int, n_requests: int): + global server + server.n_slots = n_slots + server.start() + tasks = [] + for _ in range(n_requests): + tasks.append((server.make_request, ("POST", "/completion", { + "prompt": "I believe the meaning of life is", + "temperature": 0.0, + "top_k": 1, + }))) + results = parallel_function_calls(tasks) + for res in results: + assert res.status_code == 200 + assert match_regex("(wise|kind|owl|answer)+", res.body["content"]) diff --git a/examples/server/tests/unit/test_tokenize.py b/examples/server/tests/unit/test_tokenize.py new file mode 100644 index 0000000000000..382457c9d602f --- /dev/null +++ b/examples/server/tests/unit/test_tokenize.py @@ -0,0 +1,59 @@ +import pytest +from utils import * + +server = ServerPreset.tinyllama2() + + +@pytest.fixture(scope="module", autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + + +def test_tokenize_detokenize(): + global server + server.start() + # tokenize + content = "What is the capital of France ?" + res_tok = server.make_request("POST", "/tokenize", data={ + "content": content + }) + assert res_tok.status_code == 200 + assert len(res_tok.body["tokens"]) > 5 + # detokenize + res_detok = server.make_request("POST", "/detokenize", data={ + "tokens": res_tok.body["tokens"], + }) + assert res_detok.status_code == 200 + assert res_detok.body["content"].strip() == content + + +def test_tokenize_with_bos(): + global server + server.start() + # tokenize + content = "What is the capital of France ?" + bosId = 1 + res_tok = server.make_request("POST", "/tokenize", data={ + "content": content, + "add_special": True, + }) + assert res_tok.status_code == 200 + assert res_tok.body["tokens"][0] == bosId + + +def test_tokenize_with_pieces(): + global server + server.start() + # tokenize + content = "This is a test string with unicode åŖ½ and emoji šŸ¤—" + res_tok = server.make_request("POST", "/tokenize", data={ + "content": content, + "with_pieces": True, + }) + assert res_tok.status_code == 200 + for token in res_tok.body["tokens"]: + assert "id" in token + assert token["id"] > 0 + assert "piece" in token + assert len(token["piece"]) > 0 diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py new file mode 100644 index 0000000000000..277125e88b534 --- /dev/null +++ b/examples/server/tests/utils.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# type: ignore[reportUnusedImport] + +import subprocess +import os +import re +import json +import sys +import requests +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import ( + Any, + Callable, + ContextManager, + Iterable, + Iterator, + List, + Literal, + Tuple, + Set, +) +from re import RegexFlag + + +class ServerResponse: + headers: dict + status_code: int + body: dict | Any + + +class ServerProcess: + # default options + debug: bool = False + server_port: int = 8080 + server_host: str = "127.0.0.1" + model_hf_repo: str = "ggml-org/models" + model_hf_file: str = "tinyllamas/stories260K.gguf" + model_alias: str = "tinyllama-2" + temperature: float = 0.8 + seed: int = 42 + + # custom options + model_alias: str | None = None + model_url: str | None = None + model_file: str | None = None + model_draft: str | None = None + n_threads: int | None = None + n_gpu_layer: int | None = None + n_batch: int | None = None + n_ubatch: int | None = None + n_ctx: int | None = None + n_ga: int | None = None + n_ga_w: int | None = None + n_predict: int | None = None + n_prompts: int | None = 0 + slot_save_path: str | None = None + id_slot: int | None = None + cache_prompt: bool | None = None + n_slots: int | None = None + server_continuous_batching: bool | None = False + server_embeddings: bool | None = False + server_reranking: bool | None = False + server_metrics: bool | None = False + server_slots: bool | None = False + pooling: str | None = None + draft: int | None = None + api_key: str | None = None + response_format: str | None = None + lora_files: List[str] | None = None + disable_ctx_shift: int | None = False + draft_min: int | None = None + draft_max: int | None = None + no_webui: bool | None = None + + # session variables + process: subprocess.Popen | None = None + + def __init__(self): + if "N_GPU_LAYERS" in os.environ: + self.n_gpu_layer = int(os.environ["N_GPU_LAYERS"]) + if "DEBUG" in os.environ: + self.debug = True + if "PORT" in os.environ: + self.server_port = int(os.environ["PORT"]) + + def start(self, timeout_seconds: int = 10) -> None: + if "LLAMA_SERVER_BIN_PATH" in os.environ: + server_path = os.environ["LLAMA_SERVER_BIN_PATH"] + elif os.name == "nt": + server_path = "../../../build/bin/Release/llama-server.exe" + else: + server_path = "../../../build/bin/llama-server" + server_args = [ + "--host", + self.server_host, + "--port", + self.server_port, + "--temp", + self.temperature, + "--seed", + self.seed, + ] + if self.model_file: + server_args.extend(["--model", self.model_file]) + if self.model_url: + server_args.extend(["--model-url", self.model_url]) + if self.model_draft: + server_args.extend(["--model-draft", self.model_draft]) + if self.model_hf_repo: + server_args.extend(["--hf-repo", self.model_hf_repo]) + if self.model_hf_file: + server_args.extend(["--hf-file", self.model_hf_file]) + if self.n_batch: + server_args.extend(["--batch-size", self.n_batch]) + if self.n_ubatch: + server_args.extend(["--ubatch-size", self.n_ubatch]) + if self.n_threads: + server_args.extend(["--threads", self.n_threads]) + if self.n_gpu_layer: + server_args.extend(["--n-gpu-layers", self.n_gpu_layer]) + if self.draft is not None: + server_args.extend(["--draft", self.draft]) + if self.server_continuous_batching: + server_args.append("--cont-batching") + if self.server_embeddings: + server_args.append("--embedding") + if self.server_reranking: + server_args.append("--reranking") + if self.server_metrics: + server_args.append("--metrics") + if self.server_slots: + server_args.append("--slots") + if self.pooling: + server_args.extend(["--pooling", self.pooling]) + if self.model_alias: + server_args.extend(["--alias", self.model_alias]) + if self.n_ctx: + server_args.extend(["--ctx-size", self.n_ctx]) + if self.n_slots: + server_args.extend(["--parallel", self.n_slots]) + if self.n_predict: + server_args.extend(["--n-predict", self.n_predict]) + if self.slot_save_path: + server_args.extend(["--slot-save-path", self.slot_save_path]) + if self.n_ga: + server_args.extend(["--grp-attn-n", self.n_ga]) + if self.n_ga_w: + server_args.extend(["--grp-attn-w", self.n_ga_w]) + if self.debug: + server_args.append("--verbose") + if self.lora_files: + for lora_file in self.lora_files: + server_args.extend(["--lora", lora_file]) + if self.disable_ctx_shift: + server_args.extend(["--no-context-shift"]) + if self.api_key: + server_args.extend(["--api-key", self.api_key]) + if self.draft_max: + server_args.extend(["--draft-max", self.draft_max]) + if self.draft_min: + server_args.extend(["--draft-min", self.draft_min]) + if self.no_webui: + server_args.append("--no-webui") + + args = [str(arg) for arg in [server_path, *server_args]] + print(f"bench: starting server with: {' '.join(args)}") + + flags = 0 + if "nt" == os.name: + flags |= subprocess.DETACHED_PROCESS + flags |= subprocess.CREATE_NEW_PROCESS_GROUP + flags |= subprocess.CREATE_NO_WINDOW + + self.process = subprocess.Popen( + [str(arg) for arg in [server_path, *server_args]], + creationflags=flags, + stdout=sys.stdout, + stderr=sys.stdout, + env={**os.environ, "LLAMA_CACHE": "tmp"}, + ) + server_instances.add(self) + + print(f"server pid={self.process.pid}, pytest pid={os.getpid()}") + + # wait for server to start + start_time = time.time() + while time.time() - start_time < timeout_seconds: + try: + response = self.make_request("GET", "/health", headers={ + "Authorization": f"Bearer {self.api_key}" if self.api_key else None + }) + if response.status_code == 200: + self.ready = True + return # server is ready + except Exception as e: + pass + print(f"Waiting for server to start...") + time.sleep(0.5) + raise TimeoutError(f"Server did not start within {timeout_seconds} seconds") + + def stop(self) -> None: + if self in server_instances: + server_instances.remove(self) + if self.process: + print(f"Stopping server with pid={self.process.pid}") + self.process.kill() + self.process = None + + def make_request( + self, + method: str, + path: str, + data: dict | Any | None = None, + headers: dict | None = None, + ) -> ServerResponse: + url = f"http://{self.server_host}:{self.server_port}{path}" + parse_body = False + if method == "GET": + response = requests.get(url, headers=headers) + parse_body = True + elif method == "POST": + response = requests.post(url, headers=headers, json=data) + parse_body = True + elif method == "OPTIONS": + response = requests.options(url, headers=headers) + else: + raise ValueError(f"Unimplemented method: {method}") + result = ServerResponse() + result.headers = dict(response.headers) + result.status_code = response.status_code + result.body = response.json() if parse_body else None + print("Response from server", json.dumps(result.body, indent=2)) + return result + + def make_stream_request( + self, + method: str, + path: str, + data: dict | None = None, + headers: dict | None = None, + ) -> Iterator[dict]: + url = f"http://{self.server_host}:{self.server_port}{path}" + if method == "POST": + response = requests.post(url, headers=headers, json=data, stream=True) + else: + raise ValueError(f"Unimplemented method: {method}") + for line_bytes in response.iter_lines(): + line = line_bytes.decode("utf-8") + if '[DONE]' in line: + break + elif line.startswith('data: '): + data = json.loads(line[6:]) + print("Partial response from server", json.dumps(data, indent=2)) + yield data + + +server_instances: Set[ServerProcess] = set() + + +class ServerPreset: + @staticmethod + def tinyllama2() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "tinyllamas/stories260K.gguf" + server.model_alias = "tinyllama-2" + server.n_ctx = 256 + server.n_batch = 32 + server.n_slots = 2 + server.n_predict = 64 + server.seed = 42 + return server + + @staticmethod + def bert_bge_small() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "bert-bge-small/ggml-model-f16.gguf" + server.model_alias = "bert-bge-small" + server.n_ctx = 512 + server.n_batch = 128 + server.n_ubatch = 128 + server.n_slots = 2 + server.seed = 42 + server.server_embeddings = True + return server + + @staticmethod + def tinyllama_infill() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "tinyllamas/stories260K-infill.gguf" + server.model_alias = "tinyllama-infill" + server.n_ctx = 2048 + server.n_batch = 1024 + server.n_slots = 1 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + return server + + @staticmethod + def stories15m_moe() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/stories15M_MOE" + server.model_hf_file = "stories15M_MOE-F16.gguf" + server.model_alias = "stories15m-moe" + server.n_ctx = 2048 + server.n_batch = 1024 + server.n_slots = 1 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + return server + + @staticmethod + def jina_reranker_tiny() -> ServerProcess: + server = ServerProcess() + server.model_hf_repo = "ggml-org/models" + server.model_hf_file = "jina-reranker-v1-tiny-en/ggml-model-f16.gguf" + server.model_alias = "jina-reranker" + server.n_ctx = 512 + server.n_batch = 512 + server.n_slots = 1 + server.seed = 42 + server.server_reranking = True + return server + + +def parallel_function_calls(function_list: List[Tuple[Callable[..., Any], Tuple[Any, ...]]]) -> List[Any]: + """ + Run multiple functions in parallel and return results in the same order as calls. Equivalent to Promise.all in JS. + + Example usage: + + results = parallel_function_calls([ + (func1, (arg1, arg2)), + (func2, (arg3, arg4)), + ]) + """ + results = [None] * len(function_list) + exceptions = [] + + def worker(index, func, args): + try: + result = func(*args) + results[index] = result + except Exception as e: + exceptions.append((index, str(e))) + + with ThreadPoolExecutor() as executor: + futures = [] + for i, (func, args) in enumerate(function_list): + future = executor.submit(worker, i, func, args) + futures.append(future) + + # Wait for all futures to complete + for future in as_completed(futures): + pass + + # Check if there were any exceptions + if exceptions: + print("Exceptions occurred:") + for index, error in exceptions: + print(f"Function at index {index}: {error}") + + return results + + +def match_regex(regex: str, text: str) -> bool: + return ( + re.compile( + regex, flags=RegexFlag.IGNORECASE | RegexFlag.MULTILINE | RegexFlag.DOTALL + ).search(text) + is not None + ) + +def is_slow_test_allowed(): + return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON" diff --git a/examples/server/themes/buttons-top/index.html b/examples/server/themes/buttons-top/index.html index 2797c37c96456..3fb88fcc88d31 100644 --- a/examples/server/themes/buttons-top/index.html +++ b/examples/server/themes/buttons-top/index.html @@ -222,7 +222,6 @@ temperature: 0.7, repeat_last_n: 256, // 0 = disable penalty, -1 = context size repeat_penalty: 1.18, // 1.0 = disabled - penalize_nl: false, top_k: 40, // <= 0 to use vocab size top_p: 0.95, // 1.0 = disabled min_p: 0.05, // 0 = disabled @@ -779,7 +778,6 @@ ${FloatField({ label: "Temperature", max: 2.0, min: 0.0, name: "temperature", step: 0.01, value: params.value.temperature })} ${FloatField({ label: "Penalize repeat sequence", max: 2.0, min: 0.0, name: "repeat_penalty", step: 0.01, value: params.value.repeat_penalty })} ${IntField({ label: "Consider N tokens for penalize", max: 2048, min: 0, name: "repeat_last_n", value: params.value.repeat_last_n })} - ${BoolField({ label: "Penalize repetition of newlines", name: "penalize_nl", value: params.value.penalize_nl })} ${IntField({ label: "Top-K sampling", max: 100, min: -1, name: "top_k", value: params.value.top_k })} ${FloatField({ label: "Top-P sampling", max: 1.0, min: 0.0, name: "top_p", step: 0.01, value: params.value.top_p })} ${FloatField({ label: "Min-P sampling", max: 1.0, min: 0.0, name: "min_p", step: 0.01, value: params.value.min_p })} diff --git a/examples/server/themes/wild/index.html b/examples/server/themes/wild/index.html index dbe23c4024155..73f36d4b29fdd 100644 --- a/examples/server/themes/wild/index.html +++ b/examples/server/themes/wild/index.html @@ -225,7 +225,6 @@ temperature: 0.7, repeat_last_n: 256, // 0 = disable penalty, -1 = context size repeat_penalty: 1.18, // 1.0 = disabled - penalize_nl: false, top_k: 40, // <= 0 to use vocab size top_p: 0.95, // 1.0 = disabled min_p: 0.05, // 0 = disabled @@ -782,7 +781,6 @@ ${FloatField({ label: "Temperature", max: 2.0, min: 0.0, name: "temperature", step: 0.01, value: params.value.temperature })} ${FloatField({ label: "Penalize repeat sequence", max: 2.0, min: 0.0, name: "repeat_penalty", step: 0.01, value: params.value.repeat_penalty })} ${IntField({ label: "Consider N tokens for penalize", max: 2048, min: 0, name: "repeat_last_n", value: params.value.repeat_last_n })} - ${BoolField({ label: "Penalize repetition of newlines", name: "penalize_nl", value: params.value.penalize_nl })} ${IntField({ label: "Top-K sampling", max: 100, min: -1, name: "top_k", value: params.value.top_k })} ${FloatField({ label: "Top-P sampling", max: 1.0, min: 0.0, name: "top_p", step: 0.01, value: params.value.top_p })} ${FloatField({ label: "Min-P sampling", max: 1.0, min: 0.0, name: "min_p", step: 0.01, value: params.value.min_p })} diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index c47ed3e47a76d..94bb285b6f2d1 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -20,11 +20,11 @@ #include #include #include +#include -#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613" +#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo" using json = nlohmann::ordered_json; -using llama_tokens = std::vector; #define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) #define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__) @@ -41,17 +41,6 @@ using llama_tokens = std::vector; #define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) #define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__) -// https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11 -enum error_type { - ERROR_TYPE_INVALID_REQUEST, - ERROR_TYPE_AUTHENTICATION, - ERROR_TYPE_SERVER, - ERROR_TYPE_NOT_FOUND, - ERROR_TYPE_PERMISSION, - ERROR_TYPE_UNAVAILABLE, // custom error - ERROR_TYPE_NOT_SUPPORTED, // custom error -}; - template static T json_value(const json & body, const std::string & key, const T & default_value) { // Fallback null to default value @@ -149,6 +138,7 @@ static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_ * and multiple prompts (multi-tasks): * - "prompt": ["string1", "string2"] * - "prompt": ["string1", [12, 34, 56]] + * - "prompt": [[12, 34, 56], [78, 90, 12]] * - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]] */ static std::vector tokenize_input_prompts(llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) { @@ -175,9 +165,42 @@ static std::vector tokenize_input_prompts(llama_context * ctx, con } else { throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts"); } + if (result.empty()) { + throw std::runtime_error("\"prompt\" must not be empty"); + } return result; } +// return the last index of character that can form a valid string +// if the last character is potentially cut in half, return the index before the cut +// if validate_utf8(text) == text.size(), then the whole text is valid utf8 +static size_t validate_utf8(const std::string& text) { + size_t len = text.size(); + if (len == 0) return 0; + + // Check the last few bytes to see if a multi-byte character is cut off + for (size_t i = 1; i <= 4 && i <= len; ++i) { + unsigned char c = text[len - i]; + // Check for start of a multi-byte sequence from the end + if ((c & 0xE0) == 0xC0) { + // 2-byte character start: 110xxxxx + // Needs at least 2 bytes + if (i < 2) return len - i; + } else if ((c & 0xF0) == 0xE0) { + // 3-byte character start: 1110xxxx + // Needs at least 3 bytes + if (i < 3) return len - i; + } else if ((c & 0xF8) == 0xF0) { + // 4-byte character start: 11110xxx + // Needs at least 4 bytes + if (i < 4) return len - i; + } + } + + // If no cut-off multi-byte character is found, return full length + return len; +} + // // template utils // @@ -338,12 +361,12 @@ static std::string llama_get_chat_template(const struct llama_model * model) { std::string template_key = "tokenizer.chat_template"; // call with NULL buffer to get the total size of the string int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0); - if (res < 0) { + if (res < 2) { return ""; } else { - std::vector model_template(res, 0); + std::vector model_template(res + 1, 0); llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); - return std::string(model_template.data(), model_template.size()); + return std::string(model_template.data(), model_template.size() - 1); } } @@ -439,62 +462,6 @@ static std::string gen_chatcmplid() { // other common utils // -static size_t longest_common_prefix(const llama_tokens & a, const llama_tokens & b) { - size_t i; - for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++) {} - - return i; -} - -static size_t longest_common_subsequence(const llama_tokens & a, const llama_tokens & b) { - // check for empty sequences - if (a.empty() || b.empty()) { - return 0; - } - - // get the lengths of the input sequences - size_t a_len = a.size(); - size_t b_len = b.size(); - - // initialize the maximum length of the longest common subsequence (LCS) - size_t max_length = 0; - - // use two rows instead of a 2D matrix to optimize space - std::vector prev_row(b_len + 1, 0); - std::vector curr_row(b_len + 1, 0); - - // iterate through the elements of a - for (size_t i = 1; i <= a_len; i++) { - // iterate through the elements of b - for (size_t j = 1; j <= b_len; j++) { - // if elements at the current positions match - if (a[i - 1] == b[j - 1]) { - // if it's the first element of either sequences, set LCS length to 1 - if (i == 1 || j == 1) { - curr_row[j] = 1; - } else { - // increment LCS length by 1 compared to the previous element - curr_row[j] = prev_row[j - 1] + 1; - } - - // update max_length if necessary - if (curr_row[j] > max_length) { - max_length = curr_row[j]; - } - } else { - // reset LCS length if elements don't match - curr_row[j] = 0; - } - } - - // update the previous row for the next iteration - prev_row = curr_row; - } - - // return the maximum length of the LCS - return max_length; -} - static bool ends_with(const std::string & str, const std::string & suffix) { return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix); } @@ -542,48 +509,11 @@ static std::string tokens_to_output_formatted_string(const llama_context * ctx, return out; } -struct completion_token_output { - llama_token tok; - std::string text_to_send; - - struct token_prob { - llama_token tok; - float prob; - }; - - std::vector probs; -}; - -// convert a vector of completion_token_output to json -static json probs_vector_to_json(const llama_context * ctx, const std::vector & probs) { - json out = json::array(); - - for (const auto & prob : probs) { - json probs_for_token = json::array(); - - for (const auto & p : prob.probs) { - const std::string tok_str = tokens_to_output_formatted_string(ctx, p.tok); - probs_for_token.push_back(json { - {"tok_str", tok_str}, - {"prob", p.prob}, - }); - } - - const std::string tok_str = tokens_to_output_formatted_string(ctx, prob.tok); - out.push_back(json { - {"content", tok_str}, - {"probs", probs_for_token}, - }); - } - - return out; -} - static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) { const std::string str = std::string(event) + ": " + data.dump(-1, ' ', false, json::error_handler_t::replace) + - "\n\n"; // note: these newlines are important (not sure why though, if you know, add a comment to explain) + "\n\n"; // required by RFC 8895 - A message is terminated by a blank line (two line terminators in a row). LOG_DBG("data stream, to_send: %s", str.c_str()); @@ -600,8 +530,6 @@ static json oaicompat_completion_params_parse( const std::string & chat_template) { json llama_params; - llama_params["__oaicompat"] = true; - // Apply chat template to the list of messages llama_params["prompt"] = format_chat(model, chat_template, body.at("messages")); @@ -661,157 +589,9 @@ static json oaicompat_completion_params_parse( return llama_params; } -static json format_final_response_oaicompat(const json & request, const json & result, const std::string & completion_id, bool streaming = false, bool verbose = false) { - bool stopped_word = result.count("stopped_word") != 0; - bool stopped_eos = json_value(result, "stopped_eos", false); - int num_tokens_predicted = json_value(result, "tokens_predicted", 0); - int num_prompt_tokens = json_value(result, "tokens_evaluated", 0); - std::string content = json_value(result, "content", std::string("")); - - std::string finish_reason = "length"; - if (stopped_word || stopped_eos) { - finish_reason = "stop"; - } - - json choices = - streaming ? json::array({json{{"finish_reason", finish_reason}, - {"index", 0}, - {"delta", json::object()}}}) - : json::array({json{{"finish_reason", finish_reason}, - {"index", 0}, - {"message", json{{"content", content}, - {"role", "assistant"}}}}}); - - std::time_t t = std::time(0); - - json res = json { - {"choices", choices}, - {"created", t}, - {"model", - json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, - {"object", streaming ? "chat.completion.chunk" : "chat.completion"}, - {"usage", json { - {"completion_tokens", num_tokens_predicted}, - {"prompt_tokens", num_prompt_tokens}, - {"total_tokens", num_tokens_predicted + num_prompt_tokens} - }}, - {"id", completion_id} - }; - - // extra fields for debugging purposes - if (verbose) { - res["__verbose"] = result; - } - - if (result.contains("completion_probabilities")) { - res["completion_probabilities"] = json_value(result, "completion_probabilities", json::array()); - } - - return res; -} - -// return value is vector as there is one case where we might need to generate two responses -static std::vector format_partial_response_oaicompat(const json & result, const std::string & completion_id) { - if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) { - return std::vector({result}); - } - - bool first = json_value(result, "oaicompat_token_ctr", 0) == 0; - std::string modelname = json_value(result, "model", std::string(DEFAULT_OAICOMPAT_MODEL)); - - bool stopped_word = json_value(result, "stopped_word", false); - bool stopped_eos = json_value(result, "stopped_eos", false); - bool stopped_limit = json_value(result, "stopped_limit", false); - std::string content = json_value(result, "content", std::string("")); - - std::string finish_reason; - if (stopped_word || stopped_eos) { - finish_reason = "stop"; - } - if (stopped_limit) { - finish_reason = "length"; - } - - std::time_t t = std::time(0); - - json choices; - - if (!finish_reason.empty()) { - choices = json::array({json{{"finish_reason", finish_reason}, - {"index", 0}, - {"delta", json::object()}}}); - } else { - if (first) { - if (content.empty()) { - choices = json::array({json{{"finish_reason", nullptr}, - {"index", 0}, - {"delta", json{{"role", "assistant"}}}}}); - } else { - // We have to send this as two updates to conform to openai behavior - json initial_ret = json{{"choices", json::array({json{ - {"finish_reason", nullptr}, - {"index", 0}, - {"delta", json{ - {"role", "assistant"} - }}}})}, - {"created", t}, - {"id", completion_id}, - {"model", modelname}, - {"object", "chat.completion.chunk"}}; - - json second_ret = json{ - {"choices", json::array({json{{"finish_reason", nullptr}, - {"index", 0}, - {"delta", json{ - {"content", content}}} - }})}, - {"created", t}, - {"id", completion_id}, - {"model", modelname}, - {"object", "chat.completion.chunk"}}; - - return std::vector({initial_ret, second_ret}); - } - } else { - // Some idiosyncrasy in task processing logic makes several trailing calls - // with empty content, we ignore these at the calee site. - if (content.empty()) { - return std::vector({json::object()}); - } - - choices = json::array({json{ - {"finish_reason", nullptr}, - {"index", 0}, - {"delta", - json{ - {"content", content}, - }}, - }}); - } - } - - json ret = json { - {"choices", choices}, - {"created", t}, - {"id", completion_id}, - {"model", modelname}, - {"object", "chat.completion.chunk"} - }; - if (!finish_reason.empty()) { - int num_tokens_predicted = json_value(result, "tokens_predicted", 0); - int num_prompt_tokens = json_value(result, "tokens_evaluated", 0); - ret.push_back({"usage", json { - {"completion_tokens", num_tokens_predicted}, - {"prompt_tokens", num_prompt_tokens}, - {"total_tokens", num_tokens_predicted + num_prompt_tokens} - }}); - } - - return std::vector({ret}); -} - static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) { json data = json::array(); + int32_t n_tokens = 0; int i = 0; for (const auto & elem : embeddings) { data.push_back(json{ @@ -819,14 +599,16 @@ static json format_embeddings_response_oaicompat(const json & request, const jso {"index", i++}, {"object", "embedding"} }); + + n_tokens += json_value(elem, "tokens_evaluated", 0); } json res = json { {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, {"object", "list"}, - {"usage", json { // TODO: fill - {"prompt_tokens", 0}, - {"total_tokens", 0} + {"usage", json { + {"prompt_tokens", n_tokens}, + {"total_tokens", n_tokens} }}, {"data", data} }; @@ -836,20 +618,23 @@ static json format_embeddings_response_oaicompat(const json & request, const jso static json format_response_rerank(const json & request, const json & ranks) { json data = json::array(); + int32_t n_tokens = 0; int i = 0; for (const auto & rank : ranks) { data.push_back(json{ {"index", i++}, {"relevance_score", json_value(rank, "score", 0.0)}, }); + + n_tokens += json_value(rank, "tokens_evaluated", 0); } json res = json { {"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))}, {"object", "list"}, - {"usage", json { // TODO: fill - {"prompt_tokens", 0}, - {"total_tokens", 0} + {"usage", json { + {"prompt_tokens", n_tokens}, + {"total_tokens", n_tokens} }}, {"results", data} }; @@ -902,42 +687,47 @@ static json format_detokenized_response(const std::string & content) { }; } -static json format_error_response(const std::string & message, const enum error_type type) { - std::string type_str; - int code = 500; - switch (type) { - case ERROR_TYPE_INVALID_REQUEST: - type_str = "invalid_request_error"; - code = 400; - break; - case ERROR_TYPE_AUTHENTICATION: - type_str = "authentication_error"; - code = 401; - break; - case ERROR_TYPE_NOT_FOUND: - type_str = "not_found_error"; - code = 404; - break; - case ERROR_TYPE_SERVER: - type_str = "server_error"; - code = 500; - break; - case ERROR_TYPE_PERMISSION: - type_str = "permission_error"; - code = 403; - break; - case ERROR_TYPE_NOT_SUPPORTED: - type_str = "not_supported_error"; - code = 501; - break; - case ERROR_TYPE_UNAVAILABLE: - type_str = "unavailable_error"; - code = 503; - break; +static json format_logit_bias(const std::vector & logit_bias) { + json data = json::array(); + for (const auto & lb : logit_bias) { + data.push_back(json{ + {"bias", lb.bias}, + {"token", lb.token}, + }); } - return json { - {"code", code}, - {"message", message}, - {"type", type_str}, - }; + return data; +} + +static std::string safe_json_to_str(json data) { + return data.dump(-1, ' ', false, json::error_handler_t::replace); +} + +static std::vector get_token_probabilities(llama_context * ctx, int idx) { + std::vector cur; + const auto * logits = llama_get_logits_ith(ctx, idx); + const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + + cur.resize(n_vocab); + for (llama_token token_id = 0; token_id < n_vocab; token_id++) { + cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f}; + } + + // sort tokens by logits + std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) { + return a.logit > b.logit; + }); + + // apply softmax + float max_l = cur[0].logit; + float cum_sum = 0.0f; + for (size_t i = 0; i < cur.size(); ++i) { + float p = expf(cur[i].logit - max_l); + cur[i].p = p; + cum_sum += p; + } + for (size_t i = 0; i < cur.size(); ++i) { + cur[i].p /= cum_sum; + } + + return cur; } diff --git a/examples/server/webui/index.html b/examples/server/webui/index.html new file mode 100644 index 0000000000000..dcdd410797d06 --- /dev/null +++ b/examples/server/webui/index.html @@ -0,0 +1,310 @@ + + + + + + + šŸ¦™ llama.cpp - chat + + + +
+
+ + + +
+ +
+
+

Conversations

+ + + +
+ + +
+ + New conversation +
+
+ {{ conv.messages[0].content }} +
+
+ Conversations are saved to browser's localStorage +
+
+
+ + +
+ +
+ + + +
llama.cpp
+ + +
+ + + + + +
+
+ + +
+
+ + {{ messages.length === 0 ? 'Send a message to start' : '' }} +
+
+ +
+ + +
+ +
+
+ + +
+ + + +
+
+ +
+ + + + + + + +
+ + + + + + + + + + + + + diff --git a/examples/server/webui/package-lock.json b/examples/server/webui/package-lock.json new file mode 100644 index 0000000000000..bbebccbf20c64 --- /dev/null +++ b/examples/server/webui/package-lock.json @@ -0,0 +1,3309 @@ +{ + "name": "webui", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "webui", + "version": "0.0.0", + "dependencies": { + "@sec-ant/readable-stream": "^0.6.0", + "@vscode/markdown-it-katex": "^1.1.1", + "autoprefixer": "^10.4.20", + "daisyui": "^4.12.14", + "highlight.js": "^11.10.0", + "katex": "^0.16.15", + "markdown-it": "^14.1.0", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.15", + "textlinestream": "^1.1.1", + "vite-plugin-singlefile": "^2.0.3", + "vue": "^3.5.13" + }, + "devDependencies": { + "sass-embedded": "^1.83.0", + "vite": "^5.4.10" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@bufbuild/protobuf": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/@bufbuild/protobuf/-/protobuf-2.2.3.tgz", + "integrity": "sha512-tFQoXHJdkEOSwj5tRIZSPNUuXK3RaR7T1nUrPgbYX1pUbvqqaaZAsfo+NXBPsz5rZMSKVFrgK1WL8Q/MSLvprg==", + "devOptional": true, + "license": "(Apache-2.0 AND BSD-3-Clause)" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.28.0.tgz", + "integrity": "sha512-wLJuPLT6grGZsy34g4N1yRfYeouklTgPhH1gWXCYspenKYD0s3cR99ZevOGw5BexMNywkbV3UkjADisozBmpPQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.28.0.tgz", + "integrity": "sha512-eiNkznlo0dLmVG/6wf+Ifi/v78G4d4QxRhuUl+s8EWZpDewgk7PX3ZyECUXU0Zq/Ca+8nU8cQpNC4Xgn2gFNDA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.28.0.tgz", + "integrity": "sha512-8hxgfReVs7k9Js1uAIhS6zq3I+wKQETInnWQtgzt8JfGx51R1N6DRVy3F4o0lQwumbErRz52YqwjfvuwRxGv1w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.28.0.tgz", + "integrity": "sha512-lA1zZB3bFx5oxu9fYud4+g1mt+lYXCoch0M0V/xhqLoGatbzVse0wlSQ1UYOWKpuSu3gyN4qEc0Dxf/DII1bhQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.28.0.tgz", + "integrity": "sha512-aI2plavbUDjCQB/sRbeUZWX9qp12GfYkYSJOrdYTL/C5D53bsE2/nBPuoiJKoWp5SN78v2Vr8ZPnB+/VbQ2pFA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.28.0.tgz", + "integrity": "sha512-WXveUPKtfqtaNvpf0iOb0M6xC64GzUX/OowbqfiCSXTdi/jLlOmH0Ba94/OkiY2yTGTwteo4/dsHRfh5bDCZ+w==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.28.0.tgz", + "integrity": "sha512-yLc3O2NtOQR67lI79zsSc7lk31xjwcaocvdD1twL64PK1yNaIqCeWI9L5B4MFPAVGEVjH5k1oWSGuYX1Wutxpg==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.28.0.tgz", + "integrity": "sha512-+P9G9hjEpHucHRXqesY+3X9hD2wh0iNnJXX/QhS/J5vTdG6VhNYMxJ2rJkQOxRUd17u5mbMLHM7yWGZdAASfcg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.28.0.tgz", + "integrity": "sha512-1xsm2rCKSTpKzi5/ypT5wfc+4bOGa/9yI/eaOLW0oMs7qpC542APWhl4A37AENGZ6St6GBMWhCCMM6tXgTIplw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.28.0.tgz", + "integrity": "sha512-zgWxMq8neVQeXL+ouSf6S7DoNeo6EPgi1eeqHXVKQxqPy1B2NvTbaOUWPn/7CfMKL7xvhV0/+fq/Z/J69g1WAQ==", + "cpu": [ + "ppc64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.28.0.tgz", + "integrity": "sha512-VEdVYacLniRxbRJLNtzwGt5vwS0ycYshofI7cWAfj7Vg5asqj+pt+Q6x4n+AONSZW/kVm+5nklde0qs2EUwU2g==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.28.0.tgz", + "integrity": "sha512-LQlP5t2hcDJh8HV8RELD9/xlYtEzJkm/aWGsauvdO2ulfl3QYRjqrKW+mGAIWP5kdNCBheqqqYIGElSRCaXfpw==", + "cpu": [ + "s390x" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.28.0.tgz", + "integrity": "sha512-Nl4KIzteVEKE9BdAvYoTkW19pa7LR/RBrT6F1dJCV/3pbjwDcaOq+edkP0LXuJ9kflW/xOK414X78r+K84+msw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.28.0.tgz", + "integrity": "sha512-eKpJr4vBDOi4goT75MvW+0dXcNUqisK4jvibY9vDdlgLx+yekxSm55StsHbxUsRxSTt3JEQvlr3cGDkzcSP8bw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.28.0.tgz", + "integrity": "sha512-Vi+WR62xWGsE/Oj+mD0FNAPY2MEox3cfyG0zLpotZdehPFXwz6lypkGs5y38Jd/NVSbOD02aVad6q6QYF7i8Bg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.28.0.tgz", + "integrity": "sha512-kN/Vpip8emMLn/eOza+4JwqDZBL6MPNpkdaEsgUtW1NYN3DZvZqSQrbKzJcTL6hd8YNmFTn7XGWMwccOcJBL0A==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.28.0.tgz", + "integrity": "sha512-Bvno2/aZT6usSa7lRDL2+hMjVAGjuqaymF1ApZm31JXzniR/hvr14jpU+/z4X6Gt5BPlzosscyJZGUvguXIqeQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@sec-ant/readable-stream": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.6.0.tgz", + "integrity": "sha512-uiBh8DrB5FN35gP6/o8JEhEQ7/ci1jUsOZO/VMUjyvTpjtV54VstOXVj1TvTj/wsT23pfX6butxxh3qufsW3+g==", + "license": "MIT" + }, + "node_modules/@vscode/markdown-it-katex": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@vscode/markdown-it-katex/-/markdown-it-katex-1.1.1.tgz", + "integrity": "sha512-3KTlbsRBPJQLE2YmLL7K6nunTlU+W9T5+FjfNdWuIUKgxSS6HWLQHaO3L4MkJi7z7MpIPpY+g4N+cWNBPE/MSA==", + "license": "MIT", + "dependencies": { + "katex": "^0.16.4" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.13.tgz", + "integrity": "sha512-ZOJ46sMOKUjO3e94wPdCzQ6P1Lx/vhp2RSvfaab88Ajexs0AHeV0uasYhi99WPaogmBlRHNRuly8xV75cNTMDA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-core": "3.5.13", + "@vue/shared": "3.5.13" + } + }, + "node_modules/@vue/compiler-dom/node_modules/@babel/helper-string-parser": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@vue/compiler-dom/node_modules/@babel/helper-validator-identifier": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@vue/compiler-dom/node_modules/@babel/parser": { + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.26.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@vue/compiler-dom/node_modules/@babel/types": { + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz", + "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@vue/compiler-dom/node_modules/@vue/compiler-core": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.13.tgz", + "integrity": "sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.25.3", + "@vue/shared": "3.5.13", + "entities": "^4.5.0", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-dom/node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/@vue/compiler-dom/node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.13.tgz", + "integrity": "sha512-6VdaljMpD82w6c2749Zhf5T9u5uLBWKnVue6XWxprDobftnletJ8+oel7sexFfM3qIxNmVE7LSFGTpv6obNyaQ==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.25.3", + "@vue/compiler-core": "3.5.13", + "@vue/compiler-dom": "3.5.13", + "@vue/compiler-ssr": "3.5.13", + "@vue/shared": "3.5.13", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.11", + "postcss": "^8.4.48", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/@babel/helper-string-parser": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/@babel/helper-validator-identifier": { + "version": "7.25.9", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/@babel/parser": { + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.26.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/@babel/types": { + "version": "7.26.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.26.0.tgz", + "integrity": "sha512-Z/yiTPj+lDVnF7lWeKCIJzaIkI0vYO87dMpZ4bg4TDrFe4XXLFWL1TbXU27gBP3QccxV9mZICCrnjnYlJjXHOA==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" + }, + "node_modules/@vue/compiler-sfc/node_modules/@vue/compiler-core": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.13.tgz", + "integrity": "sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.25.3", + "@vue/shared": "3.5.13", + "entities": "^4.5.0", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/@vue/compiler-ssr": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.13.tgz", + "integrity": "sha512-wMH6vrYHxQl/IybKJagqbquvxpWCuVYpoUJfCqFZwa/JY1GdATAQ+TgVtgrwwMZ0D07QhA99rs/EAAWfvG6KpA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.13", + "@vue/shared": "3.5.13" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/@vue/compiler-sfc/node_modules/magic-string": { + "version": "0.30.14", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.14.tgz", + "integrity": "sha512-5c99P1WKTed11ZC0HMJOj6CDIue6F8ySu+bJL+85q1zBEIY8IklrJ1eiKC2NDRh3Ct3FcvmJPyQHb9erXMTJNw==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0" + } + }, + "node_modules/@vue/compiler-sfc/node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.13.tgz", + "integrity": "sha512-dLaj94s93NYLqjLiyFzVs9X6dWhTdAlEAciC3Moq7gzAc13VJUdCnjjRurNM6uTLFATRHexHCTu/Xp3eW6yoog==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.13", + "@vue/runtime-core": "3.5.13", + "@vue/shared": "3.5.13", + "csstype": "^3.1.3" + } + }, + "node_modules/@vue/runtime-dom/node_modules/@vue/reactivity": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.13.tgz", + "integrity": "sha512-NaCwtw8o48B9I6L1zl2p41OHo/2Z4wqYGGIK1Khu5T7yxrn+ATOixn/Udn2m+6kZKB/J7cuT9DbWWhRxqixACg==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.5.13" + } + }, + "node_modules/@vue/runtime-dom/node_modules/@vue/runtime-core": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.13.tgz", + "integrity": "sha512-Fj4YRQ3Az0WTZw1sFe+QDb0aXCerigEpw418pw1HBUKFtnQHWzwojaukAs2X/c9DQz4MQ4bsXTGlcpGxU/RCIw==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.13", + "@vue/shared": "3.5.13" + } + }, + "node_modules/@vue/runtime-dom/node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "license": "MIT" + }, + "node_modules/@vue/server-renderer": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.13.tgz", + "integrity": "sha512-wAi4IRJV/2SAW3htkTlB+dHeRmpTiVIK1OGLWV1yeStVSebSQQOwGwIq0D3ZIoBj2C2qpgz5+vX9iEBkTdk5YA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-ssr": "3.5.13", + "@vue/shared": "3.5.13" + }, + "peerDependencies": { + "vue": "3.5.13" + } + }, + "node_modules/@vue/server-renderer/node_modules/@vue/compiler-ssr": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.13.tgz", + "integrity": "sha512-wMH6vrYHxQl/IybKJagqbquvxpWCuVYpoUJfCqFZwa/JY1GdATAQ+TgVtgrwwMZ0D07QhA99rs/EAAWfvG6KpA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.13", + "@vue/shared": "3.5.13" + } + }, + "node_modules/@vue/shared": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.13.tgz", + "integrity": "sha512-/hnE/qP5ZoGpol0a5mDi45bOd7t3tjYJBjsgCsivow7D48cJeV5l05RD82lPqi7gRiphZM37rnhW1l6ZoCNNnQ==", + "license": "MIT" + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/autoprefixer": { + "version": "10.4.20", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", + "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.23.3", + "caniuse-lite": "^1.0.30001646", + "fraction.js": "^4.3.7", + "normalize-range": "^0.1.2", + "picocolors": "^1.0.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/browserslist": { + "version": "4.24.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", + "integrity": "sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001669", + "electron-to-chromium": "^1.5.41", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.1" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/browserslist/node_modules/electron-to-chromium": { + "version": "1.5.67", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.67.tgz", + "integrity": "sha512-nz88NNBsD7kQSAGGJyp8hS6xSPtWwqNogA0mjtc2nUYeEf3nURK9qpV18TuBdDmEDgVWotS8Wkzf+V52dSQ/LQ==", + "license": "ISC" + }, + "node_modules/browserslist/node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/browserslist/node_modules/node-releases": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==", + "license": "MIT" + }, + "node_modules/browserslist/node_modules/update-browserslist-db": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.1.tgz", + "integrity": "sha512-R8UzCaa9Az+38REPiJ1tXlImTJXlVfgHZsglwBD/k6nj76ctsH1E3q4doGrukiLQd3sGQYu56r5+lo5r94l29A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/buffer-builder": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/buffer-builder/-/buffer-builder-0.2.0.tgz", + "integrity": "sha512-7VPMEPuYznPSoR21NE1zvd2Xna6c/CloiZCfcMXR1Jny6PjX0N4Nsa38zcBFo/FMK+BlA+FLKbJCQ0i2yxp+Xg==", + "devOptional": true, + "license": "MIT/X11" + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001684", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001684.tgz", + "integrity": "sha512-G1LRwLIQjBQoyq0ZJGqGIJUXzJ8irpbjHLpVRXDvBEScFJ9b17sgK6vlx0GAJFE21okD7zXl08rRRUfq6HdoEQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/chokidar/node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chokidar/node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar/node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/chokidar/node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar/node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/chokidar/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/chokidar/node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/chokidar/node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/colorjs.io": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/colorjs.io/-/colorjs.io-0.5.2.tgz", + "integrity": "sha512-twmVoizEW7ylZSN32OgKdXRmo1qg+wT5/6C3xu5b9QsWzSFAhHLn2xd8ro0diCsKfCj1RdaTP/nrcW+vAoQPIw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/css-selector-tokenizer": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/css-selector-tokenizer/-/css-selector-tokenizer-0.8.0.tgz", + "integrity": "sha512-Jd6Ig3/pe62/qe5SBPTN8h8LeUg/pT4lLgtavPf7updwwHpvFzxvOQBHYj2LZDMjUnBzgvIUSjRcf6oT5HzHFg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "fastparse": "^1.1.2" + } + }, + "node_modules/css-selector-tokenizer/node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/css-selector-tokenizer/node_modules/fastparse": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/fastparse/-/fastparse-1.1.2.tgz", + "integrity": "sha512-483XLLxTVIwWK3QTrMGRqUfUpoOs/0hbQrl2oz4J0pAcm3A3bu84wxTFqGqkJzewCLdME38xJLJAxBABfQT8sQ==", + "license": "MIT" + }, + "node_modules/culori": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/culori/-/culori-3.3.0.tgz", + "integrity": "sha512-pHJg+jbuFsCjz9iclQBqyL3B2HLCBF71BwVNujUYEvCeQMvV97R59MNK3R2+jgJ3a1fcZgI9B3vYgz8lzr/BFQ==", + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + } + }, + "node_modules/daisyui": { + "version": "4.12.14", + "resolved": "https://registry.npmjs.org/daisyui/-/daisyui-4.12.14.tgz", + "integrity": "sha512-hA27cdBasdwd4/iEjn+aidoCrRroDuo3G5W9NDKaVCJI437Mm/3eSL/2u7MkZ0pt8a+TrYF3aT2pFVemTS3how==", + "license": "MIT", + "dependencies": { + "css-selector-tokenizer": "^0.8", + "culori": "^3", + "picocolors": "^1", + "postcss-js": "^4" + }, + "engines": { + "node": ">=16.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/daisyui" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "license": "MIT" + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/esbuild/node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fast-glob/node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/fast-glob/node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fast-glob/node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-glob/node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/fast-glob/node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/fast-glob/node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/fast-glob/node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/fraction.js": { + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "patreon", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "devOptional": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/highlight.js": { + "version": "11.10.0", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.10.0.tgz", + "integrity": "sha512-SYVnVFswQER+zu1laSya563s+F8VDGt7o35d4utbamowvUNLLMovFqwCLSocpZTz3MgaSRA1IbqRWZv97dtErQ==", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/immutable": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-5.0.3.tgz", + "integrity": "sha512-P8IdPQHq3lA1xVeBRi5VPqUm5HDgKnx0Ru51wZz5mjxHr5n3RWhjIpOFU7ybkUxfB+5IToy+OLaHYDBIWsv+uw==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob/node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jiti": { + "version": "1.21.6", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", + "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/katex": { + "version": "0.16.15", + "resolved": "https://registry.npmjs.org/katex/-/katex-0.16.15.tgz", + "integrity": "sha512-yE9YJIEAk2aZ+FL/G8r+UGw0CTUzEA8ZFy6E+8tc3spHUKq3qBnzCkI1CQwGoI9atJhVyFPEypQsTY7mJ1Pi9w==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "license": "MIT", + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/lilconfig": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", + "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", + "license": "MIT", + "engines": { + "node": ">=10" + } + }, + "node_modules/linkify-it": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-5.0.0.tgz", + "integrity": "sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==", + "license": "MIT", + "dependencies": { + "uc.micro": "^2.0.0" + } + }, + "node_modules/markdown-it": { + "version": "14.1.0", + "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", + "integrity": "sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1", + "entities": "^4.4.0", + "linkify-it": "^5.0.0", + "mdurl": "^2.0.0", + "punycode.js": "^2.3.1", + "uc.micro": "^2.1.0" + }, + "bin": { + "markdown-it": "bin/markdown-it.mjs" + } + }, + "node_modules/mdurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-2.0.0.tgz", + "integrity": "sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==", + "license": "MIT" + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/micromatch/node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/micromatch/node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/micromatch/node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/normalize-range": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", + "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/postcss": { + "version": "8.4.49", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.49.tgz", + "integrity": "sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-import/node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/postcss-import/node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-js/node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/postcss-load-config": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", + "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.0.0", + "yaml": "^2.3.4" + }, + "engines": { + "node": ">= 14" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-load-config/node_modules/lilconfig": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/postcss-load-config/node_modules/yaml": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.6.1.tgz", + "integrity": "sha512-7r0XPzioN/Q9kXBro/XPnA6kznR73DHq+GXh5ON7ZozRO6aMjbmiBuKste2wslTFkC5d1dw0GooOCepZXJ2SAg==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-selector-parser/node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-selector-parser/node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "license": "MIT" + }, + "node_modules/postcss/node_modules/nanoid": { + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/postcss/node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/punycode.js": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode.js/-/punycode.js-2.3.1.tgz", + "integrity": "sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/resolve": { + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve/node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve/node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/resolve/node_modules/is-core-module": { + "version": "2.15.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.1.tgz", + "integrity": "sha512-z0vtXSwucUJtANQWldhbtbt7BnL0vxiFjIdDLAatwhDYty2bad6s+rijD6Ri4YuYJubLzIJLUidCh09e1djEVQ==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve/node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/resolve/node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/rollup": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.28.0.tgz", + "integrity": "sha512-G9GOrmgWHBma4YfCcX8PjH0qhXSdH8B4HDE2o4/jaxj93S4DPCIDoLcXz99eWMji4hB29UFCEd7B2gwGJDR9cQ==", + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.6" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.28.0", + "@rollup/rollup-android-arm64": "4.28.0", + "@rollup/rollup-darwin-arm64": "4.28.0", + "@rollup/rollup-darwin-x64": "4.28.0", + "@rollup/rollup-freebsd-arm64": "4.28.0", + "@rollup/rollup-freebsd-x64": "4.28.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.28.0", + "@rollup/rollup-linux-arm-musleabihf": "4.28.0", + "@rollup/rollup-linux-arm64-gnu": "4.28.0", + "@rollup/rollup-linux-arm64-musl": "4.28.0", + "@rollup/rollup-linux-powerpc64le-gnu": "4.28.0", + "@rollup/rollup-linux-riscv64-gnu": "4.28.0", + "@rollup/rollup-linux-s390x-gnu": "4.28.0", + "@rollup/rollup-linux-x64-gnu": "4.28.0", + "@rollup/rollup-linux-x64-musl": "4.28.0", + "@rollup/rollup-win32-arm64-msvc": "4.28.0", + "@rollup/rollup-win32-ia32-msvc": "4.28.0", + "@rollup/rollup-win32-x64-msvc": "4.28.0", + "fsevents": "~2.3.2" + } + }, + "node_modules/rollup/node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.28.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.28.0.tgz", + "integrity": "sha512-lmKx9yHsppblnLQZOGxdO66gT77bvdBtr/0P+TPOseowE7D9AJoBw8ZDULRasXRWf1Z86/gcOdpBrV6VDUY36Q==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/rollup/node_modules/@types/estree": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", + "integrity": "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==", + "license": "MIT" + }, + "node_modules/rxjs": { + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", + "devOptional": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/sass-embedded": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded/-/sass-embedded-1.83.0.tgz", + "integrity": "sha512-/8cYZeL39evUqe0o//193na51Q1VWZ61qhxioQvLJwOtWIrX+PgNhCyD8RSuTtmzc4+6+waFZf899bfp/MCUwA==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "@bufbuild/protobuf": "^2.0.0", + "buffer-builder": "^0.2.0", + "colorjs.io": "^0.5.0", + "immutable": "^5.0.2", + "rxjs": "^7.4.0", + "supports-color": "^8.1.1", + "sync-child-process": "^1.0.2", + "varint": "^6.0.0" + }, + "bin": { + "sass": "dist/bin/sass.js" + }, + "engines": { + "node": ">=16.0.0" + }, + "optionalDependencies": { + "sass-embedded-android-arm": "1.83.0", + "sass-embedded-android-arm64": "1.83.0", + "sass-embedded-android-ia32": "1.83.0", + "sass-embedded-android-riscv64": "1.83.0", + "sass-embedded-android-x64": "1.83.0", + "sass-embedded-darwin-arm64": "1.83.0", + "sass-embedded-darwin-x64": "1.83.0", + "sass-embedded-linux-arm": "1.83.0", + "sass-embedded-linux-arm64": "1.83.0", + "sass-embedded-linux-ia32": "1.83.0", + "sass-embedded-linux-musl-arm": "1.83.0", + "sass-embedded-linux-musl-arm64": "1.83.0", + "sass-embedded-linux-musl-ia32": "1.83.0", + "sass-embedded-linux-musl-riscv64": "1.83.0", + "sass-embedded-linux-musl-x64": "1.83.0", + "sass-embedded-linux-riscv64": "1.83.0", + "sass-embedded-linux-x64": "1.83.0", + "sass-embedded-win32-arm64": "1.83.0", + "sass-embedded-win32-ia32": "1.83.0", + "sass-embedded-win32-x64": "1.83.0" + } + }, + "node_modules/sass-embedded-android-arm": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-android-arm/-/sass-embedded-android-arm-1.83.0.tgz", + "integrity": "sha512-uwFSXzJlfbd4Px189xE5l+cxN8+TQpXdQgJec7TIrb4HEY7imabtpYufpVdqUVwT1/uiis5V4+qIEC4Vl5XObQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-android-arm64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-android-arm64/-/sass-embedded-android-arm64-1.83.0.tgz", + "integrity": "sha512-GBiCvM4a2rkWBLdYDxI6XYnprfk5U5c81g69RC2X6kqPuzxzx8qTArQ9M6keFK4+iDQ5N9QTwFCr0KbZTn+ZNQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-android-ia32": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-android-ia32/-/sass-embedded-android-ia32-1.83.0.tgz", + "integrity": "sha512-5ATPdGo2SICqAhiJl/Z8KQ23zH4sGgobGgux0TnrNtt83uHZ+r+To/ubVJ7xTkZxed+KJZnIpolGD8dQyQqoTg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-android-riscv64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-android-riscv64/-/sass-embedded-android-riscv64-1.83.0.tgz", + "integrity": "sha512-aveknUOB8GZewOzVn2Uwk+DKcncTR50Q6vtzslNMGbYnxtgQNHzy8A1qVEviNUruex+pHofppeMK4iMPFAbiEQ==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-android-x64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-android-x64/-/sass-embedded-android-x64-1.83.0.tgz", + "integrity": "sha512-WqIay/72ncyf9Ph4vS742J3a73wZihWmzFUwpn1OD6lme1Aj4eWzWIve5IVnlTEJgcZcDHu6ECID9IZgehJKoA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-darwin-arm64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-darwin-arm64/-/sass-embedded-darwin-arm64-1.83.0.tgz", + "integrity": "sha512-XQl9QqgxFFIPm/CzHhmppse5o9ocxrbaAdC2/DAnlAqvYWBBtgFqPjGoYlej13h9SzfvNoogx+y9r+Ap+e+hYg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-darwin-x64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-darwin-x64/-/sass-embedded-darwin-x64-1.83.0.tgz", + "integrity": "sha512-ERQ7Tvp1kFOW3ux4VDFIxb7tkYXHYc+zJpcrbs0hzcIO5ilIRU2tIOK1OrNwrFO6Qxyf7AUuBwYKLAtIU/Nz7g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-arm": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-arm/-/sass-embedded-linux-arm-1.83.0.tgz", + "integrity": "sha512-baG9RYBJxUFmqwDNC9h9ZFElgJoyO3jgHGjzEZ1wHhIS9anpG+zZQvO8bHx3dBpKEImX+DBeLX+CxsFR9n81gQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-arm64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-arm64/-/sass-embedded-linux-arm64-1.83.0.tgz", + "integrity": "sha512-syEAVTJt4qhaMLxrSwOWa46zdqHJdnqJkLUK+t9aCr8xqBZLPxSUeIGji76uOehQZ1C+KGFj6n9xstHN6wzOJw==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-ia32": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-ia32/-/sass-embedded-linux-ia32-1.83.0.tgz", + "integrity": "sha512-RRBxQxMpoxu5+XcSSc6QR/o9asEwUzR8AbCS83RaXcdTIHTa/CccQsiAoDDoPlRsMTLqnzs0LKL4CfOsf7zBbA==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-musl-arm": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-musl-arm/-/sass-embedded-linux-musl-arm-1.83.0.tgz", + "integrity": "sha512-Yc7u2TelCfBab+PRob9/MNJFh3EooMiz4urvhejXkihTiKSHGCv5YqDdtWzvyb9tY2Jb7YtYREVuHwfdVn3dTQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-musl-arm64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-musl-arm64/-/sass-embedded-linux-musl-arm64-1.83.0.tgz", + "integrity": "sha512-Y7juhPHClUO2H5O+u+StRy6SEAcwZ+hTEk5WJdEmo1Bb1gDtfHvJaWB/iFZJ2tW0W1e865AZeUrC4OcOFjyAQA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-musl-ia32": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-musl-ia32/-/sass-embedded-linux-musl-ia32-1.83.0.tgz", + "integrity": "sha512-arQeYwGmwXV8byx5G1PtSzZWW1jbkfR5qrIHMEbTFSAvAxpqjgSvCvrHMOFd73FcMxVaYh4BX9LQNbKinkbEdg==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-musl-riscv64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-musl-riscv64/-/sass-embedded-linux-musl-riscv64-1.83.0.tgz", + "integrity": "sha512-E6uzlIWz59rut+Z3XR6mLG915zNzv07ISvj3GUNZENdHM7dF8GQ//ANoIpl5PljMQKp89GnYdvo6kj2gnaBf/g==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-musl-x64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-musl-x64/-/sass-embedded-linux-musl-x64-1.83.0.tgz", + "integrity": "sha512-eAMK6tyGqvqr21r9g8BnR3fQc1rYFj85RGduSQ3xkITZ6jOAnOhuU94N5fwRS852Hpws0lXhET+7JHXgg3U18w==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-riscv64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-riscv64/-/sass-embedded-linux-riscv64-1.83.0.tgz", + "integrity": "sha512-Ojpi78pTv02sy2fUYirRGXHLY3fPnV/bvwuC2i5LwPQw2LpCcFyFTtN0c5h4LJDk9P6wr+/ZB/JXU8tHIOlK+Q==", + "cpu": [ + "riscv64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-linux-x64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-linux-x64/-/sass-embedded-linux-x64-1.83.0.tgz", + "integrity": "sha512-3iLjlXdoPfgZRtX4odhRvka1BQs5mAXqfCtDIQBgh/o0JnGPzJIWWl9bYLpHxK8qb+uyVBxXYgXpI0sCzArBOw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-win32-arm64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-win32-arm64/-/sass-embedded-win32-arm64-1.83.0.tgz", + "integrity": "sha512-iOHw/8/t2dlTW3lOFwG5eUbiwhEyGWawivlKWJ8lkXH7fjMpVx2VO9zCFAm8RvY9xOHJ9sf1L7g5bx3EnNP9BQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-win32-ia32": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-win32-ia32/-/sass-embedded-win32-ia32-1.83.0.tgz", + "integrity": "sha512-2PxNXJ8Pad4geVcTXY4rkyTr5AwbF8nfrCTDv0ulbTvPhzX2mMKEGcBZUXWn5BeHZTBc6whNMfS7d5fQXR9dDQ==", + "cpu": [ + "ia32" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sass-embedded-win32-x64": { + "version": "1.83.0", + "resolved": "https://registry.npmjs.org/sass-embedded-win32-x64/-/sass-embedded-win32-x64-1.83.0.tgz", + "integrity": "sha512-muBXkFngM6eLTNqOV0FQi7Dv9s+YRQ42Yem26mosdan/GmJQc81deto6uDTgrYn+bzFNmiXcOdfm+0MkTWK3OQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sucrase/node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/sucrase/node_modules/@jridgewell/gen-mapping": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/sucrase/node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/sucrase/node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/sucrase/node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/sucrase/node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/sucrase/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/sucrase/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/sucrase/node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/sucrase/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/sucrase/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/sucrase/node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/sucrase/node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sucrase/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sucrase/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/sucrase/node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/sucrase/node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "license": "ISC" + }, + "node_modules/sucrase/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sucrase/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sucrase/node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/sucrase/node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/sucrase/node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "license": "BlueOak-1.0.0" + }, + "node_modules/sucrase/node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sucrase/node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/sucrase/node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sucrase/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/sucrase/node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/sucrase/node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/sucrase/node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/sucrase/node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "license": "Apache-2.0" + }, + "node_modules/sucrase/node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/sucrase/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/sucrase/node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/sucrase/node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/sucrase/node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/sucrase/node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase/node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/sync-child-process": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/sync-child-process/-/sync-child-process-1.0.2.tgz", + "integrity": "sha512-8lD+t2KrrScJ/7KXCSyfhT3/hRq78rC0wBFqNJXv3mZyn6hW2ypM05JmlSvtqRbeq6jqA94oHbxAr2vYsJ8vDA==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "sync-message-port": "^1.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/sync-message-port": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/sync-message-port/-/sync-message-port-1.1.3.tgz", + "integrity": "sha512-GTt8rSKje5FilG+wEdfCkOcLL7LWqpMlr2c3LRuKt/YXxcJ52aGSbGBAdI4L3aaqfrBt6y711El53ItyH1NWzg==", + "devOptional": true, + "license": "MIT", + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.15", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.15.tgz", + "integrity": "sha512-r4MeXnfBmSOuKUWmXe6h2CcyfzJCEk4F0pptO5jlnYSIViUkVmsawj80N5h2lO3gwcmSb4n3PuN+e+GC1Guylw==", + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.6", + "lilconfig": "^2.1.0", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/textlinestream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/textlinestream/-/textlinestream-1.1.1.tgz", + "integrity": "sha512-iBHbi7BQxrFmwZUQJsT0SjNzlLLsXhvW/kg7EyOMVMBIrlnj/qYofwo1LVLZi+3GbUEo96Iu2eqToI2+lZoAEQ==", + "license": "MIT" + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "devOptional": true, + "license": "0BSD" + }, + "node_modules/uc.micro": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-2.1.0.tgz", + "integrity": "sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==", + "license": "MIT" + }, + "node_modules/varint": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/varint/-/varint-6.0.0.tgz", + "integrity": "sha512-cXEIW6cfr15lFv563k4GuVuW/fiwjknytD37jIOLSdSWuOI6WnO/oKwmP2FQTU2l01LP8/M5TSAJpzUaGe3uWg==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "5.4.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.11.tgz", + "integrity": "sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q==", + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-plugin-singlefile": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/vite-plugin-singlefile/-/vite-plugin-singlefile-2.0.3.tgz", + "integrity": "sha512-OEBEwdX8nCGPSdtaB1D7rryYnT+YfPTS8ojL1TDyeUF+bWDCTfRriQqw6T0vl9EbKI/KMg7szN3awst6cLrKkA==", + "license": "MIT", + "dependencies": { + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">18.0.0" + }, + "peerDependencies": { + "rollup": "^4.24.3", + "vite": "^5.4.10" + } + }, + "node_modules/vue": { + "version": "3.5.13", + "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.13.tgz", + "integrity": "sha512-wmeiSMxkZCSc+PM2w2VRsOYAZC8GdipNFRTsLSfodVqI9mbejKeXEGr8SckuLnrQPGe3oJN5c3K0vpoU9q/wCQ==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.13", + "@vue/compiler-sfc": "3.5.13", + "@vue/runtime-dom": "3.5.13", + "@vue/server-renderer": "3.5.13", + "@vue/shared": "3.5.13" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + } + } +} diff --git a/examples/server/webui/package.json b/examples/server/webui/package.json new file mode 100644 index 0000000000000..2836cce00d41d --- /dev/null +++ b/examples/server/webui/package.json @@ -0,0 +1,30 @@ +{ + "name": "webui", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vite build", + "preview": "vite preview", + "analyze": "ANALYZE=1 npx vite-bundle-visualizer" + }, + "devDependencies": { + "sass-embedded": "^1.83.0", + "vite": "^5.4.10" + }, + "dependencies": { + "@sec-ant/readable-stream": "^0.6.0", + "@vscode/markdown-it-katex": "^1.1.1", + "autoprefixer": "^10.4.20", + "daisyui": "^4.12.14", + "highlight.js": "^11.10.0", + "katex": "^0.16.15", + "markdown-it": "^14.1.0", + "postcss": "^8.4.49", + "tailwindcss": "^3.4.15", + "textlinestream": "^1.1.1", + "vite-plugin-singlefile": "^2.0.3", + "vue": "^3.5.13" + } +} diff --git a/examples/server/webui/postcss.config.js b/examples/server/webui/postcss.config.js new file mode 100644 index 0000000000000..2e7af2b7f1a6f --- /dev/null +++ b/examples/server/webui/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/examples/server/webui/public/demo-conversation.json b/examples/server/webui/public/demo-conversation.json new file mode 100644 index 0000000000000..75ab599dd6ee5 --- /dev/null +++ b/examples/server/webui/public/demo-conversation.json @@ -0,0 +1,33 @@ +{ + "demo": true, + "id": "conv-1734086746930", + "lastModified": 1734087548943, + "messages": [ + { + "id": 1734086764521, + "role": "user", + "content": "this is a demo conversation, used in dev mode" + }, + { + "id": 1734087548327, + "role": "assistant", + "content": "This is the formula:\n\n$\\frac{e^{x_i}}{\\sum_{j=1}^{n}e^{x_j}}$\n\nGiven an input vector \\(\\mathbf{x} = [x_1, x_2, \\ldots, x_n]\\)\n\n\\[\ny_i = \\frac{e^{x_i}}{\\sum_{j=1}^n e^{x_j}}\n\\]\n\nCode block latex:\n```latex\n\\frac{e^{x_i}}{\\sum_{j=1}^{n}e^{x_j}}\n```\n\nTest dollar sign: $1234 $4567\n\nInvalid latex syntax: $E = mc^$ and $$E = mc^$$", + "timings": { + "prompt_n": 1, + "prompt_ms": 28.923, + "predicted_n": 25, + "predicted_ms": 573.016 + } + }, + { + "id": 1734087548328, + "role": "user", + "content": "this is a demo conversation, used in dev mode" + }, + { + "id": 1734087548329, + "role": "assistant", + "content": "Code block:\n```js\nconsole.log('hello world')\n```\n```sh\nls -la /dev\n```" + } + ] +} diff --git a/examples/server/webui/src/highlight-config.js b/examples/server/webui/src/highlight-config.js new file mode 100644 index 0000000000000..96c7028f98b25 --- /dev/null +++ b/examples/server/webui/src/highlight-config.js @@ -0,0 +1,60 @@ +import hljs from 'highlight.js/lib/core'; + +// only import commonly used languages to reduce bundle size + +import python from 'highlight.js/lib/languages/python'; +import javascript from 'highlight.js/lib/languages/javascript'; +import json from 'highlight.js/lib/languages/json'; +import bash from 'highlight.js/lib/languages/bash'; +import yaml from 'highlight.js/lib/languages/yaml'; +import markdown from 'highlight.js/lib/languages/markdown'; +import scss from 'highlight.js/lib/languages/scss'; +import xml from 'highlight.js/lib/languages/xml'; +import ruby from 'highlight.js/lib/languages/ruby'; +import go from 'highlight.js/lib/languages/go'; +import java from 'highlight.js/lib/languages/java'; +import rust from 'highlight.js/lib/languages/rust'; +import scala from 'highlight.js/lib/languages/scala'; +import cpp from 'highlight.js/lib/languages/cpp'; +import csharp from 'highlight.js/lib/languages/csharp'; +import swift from 'highlight.js/lib/languages/swift'; +import dart from 'highlight.js/lib/languages/dart'; +import elixir from 'highlight.js/lib/languages/elixir'; +import kotlin from 'highlight.js/lib/languages/kotlin'; +import lua from 'highlight.js/lib/languages/lua'; +import php from 'highlight.js/lib/languages/php'; +import latex from 'highlight.js/lib/languages/latex'; + +hljs.registerLanguage('python', python); +hljs.registerLanguage('javascript', javascript); +hljs.registerLanguage('json', json); +hljs.registerLanguage('yaml', yaml); +hljs.registerLanguage('markdown', markdown); +hljs.registerLanguage('xml', xml); +hljs.registerLanguage('ruby', ruby); +hljs.registerLanguage('go', go); +hljs.registerLanguage('java', java); +hljs.registerLanguage('rust', rust); +hljs.registerLanguage('scala', scala); +hljs.registerLanguage('csharp', csharp); +hljs.registerLanguage('swift', swift); +hljs.registerLanguage('dart', dart); +hljs.registerLanguage('elixir', elixir); +hljs.registerLanguage('kotlin', kotlin); +hljs.registerLanguage('lua', lua); +hljs.registerLanguage('php', php); +hljs.registerLanguage('latex', latex); + +// reuse some languages to further reduce bundle size + +hljs.registerLanguage('shell', bash); +hljs.registerLanguage('bash', bash); +hljs.registerLanguage('sh', bash); + +hljs.registerLanguage('css', scss); +hljs.registerLanguage('scss', scss); + +hljs.registerLanguage('c', cpp); +hljs.registerLanguage('cpp', cpp); + +export default hljs; diff --git a/examples/server/webui/src/katex-gpt.js b/examples/server/webui/src/katex-gpt.js new file mode 100644 index 0000000000000..7c7c5e22c1cf0 --- /dev/null +++ b/examples/server/webui/src/katex-gpt.js @@ -0,0 +1,66 @@ +import katex from 'katex'; + +// Adapted from https://github.com/SchneeHertz/markdown-it-katex-gpt +// MIT license + +const defaultOptions = { + delimiters: [ + { left: '\\[', right: '\\]', display: true }, + { left: '\\(', right: '\\)', display: false }, + ], +}; + +export function renderLatexHTML(content, display = false) { + return katex.renderToString(content, { + throwOnError: false, + output: 'mathml', + displayMode: display, + }); +} + +function escapedBracketRule(options) { + return (state, silent) => { + const max = state.posMax; + const start = state.pos; + + for (const { left, right, display } of options.delimiters) { + + // Check if it starts with the left delimiter + if (!state.src.slice(start).startsWith(left)) continue; + + // Skip the length of the left delimiter + let pos = start + left.length; + + // Find the matching right delimiter + while (pos < max) { + if (state.src.slice(pos).startsWith(right)) { + break; + } + pos++; + } + + // No matching right delimiter found, skip to the next match + if (pos >= max) continue; + + // If not in silent mode, convert LaTeX formula to MathML + if (!silent) { + const content = state.src.slice(start + left.length, pos); + try { + const renderedContent = renderLatexHTML(content, display); + const token = state.push('html_inline', '', 0); + token.content = renderedContent; + } catch (e) { + console.error(e); + } + } + + // Update position, skip the length of the right delimiter + state.pos = pos + right.length; + return true; + } + } +} + +export default function (md, options = defaultOptions) { + md.inline.ruler.after('text', 'escaped_bracket', escapedBracketRule(options)); +} diff --git a/examples/server/webui/src/main.js b/examples/server/webui/src/main.js new file mode 100644 index 0000000000000..997ab0ff5d769 --- /dev/null +++ b/examples/server/webui/src/main.js @@ -0,0 +1,585 @@ +import './styles.scss'; +import { createApp, defineComponent, shallowRef, computed, h } from 'vue/dist/vue.esm-bundler.js'; +import MarkdownIt from 'markdown-it'; +import TextLineStream from 'textlinestream'; + +// math formula rendering +import 'katex/dist/katex.min.css'; +import markdownItKatexGpt from './katex-gpt'; +import markdownItKatexNormal from '@vscode/markdown-it-katex'; + +// code highlighting +import hljs from './highlight-config'; +import daisyuiThemes from 'daisyui/src/theming/themes'; + +// ponyfill for missing ReadableStream asyncIterator on Safari +import { asyncIterator } from "@sec-ant/readable-stream/ponyfill/asyncIterator"; + +const isDev = import.meta.env.MODE === 'development'; + +// utility functions +const isString = (x) => !!x.toLowerCase; +const isBoolean = (x) => x === true || x === false; +const isNumeric = (n) => !isString(n) && !isNaN(n) && !isBoolean(n); +const escapeAttr = (str) => str.replace(/>/g, '>').replace(/"/g, '"'); +const copyStr = (str) => navigator.clipboard.writeText(str); + +// constants +const BASE_URL = isDev + ? (localStorage.getItem('base') || 'https://localhost:8080') // for debugging + : (new URL('.', document.baseURI).href).toString().replace(/\/$/, ''); // for production +console.log({ BASE_URL }); + +const CONFIG_DEFAULT = { + // Note: in order not to introduce breaking changes, please keep the same data type (number, string, etc) if you want to change the default value. Do not use null or undefined for default value. + apiKey: '', + systemMessage: 'You are a helpful assistant.', + showTokensPerSecond: false, + // make sure these default values are in sync with `common.h` + samplers: 'edkypmxt', + temperature: 0.8, + dynatemp_range: 0.0, + dynatemp_exponent: 1.0, + top_k: 40, + top_p: 0.95, + min_p: 0.05, + xtc_probability: 0.0, + xtc_threshold: 0.1, + typical_p: 1.0, + repeat_last_n: 64, + repeat_penalty: 1.0, + presence_penalty: 0.0, + frequency_penalty: 0.0, + dry_multiplier: 0.0, + dry_base: 1.75, + dry_allowed_length: 2, + dry_penalty_last_n: -1, + max_tokens: -1, + custom: '', // custom json-stringified object +}; +const CONFIG_INFO = { + apiKey: 'Set the API Key if you are using --api-key option for the server.', + systemMessage: 'The starting message that defines how model should behave.', + samplers: 'The order at which samplers are applied, in simplified way. Default is "dkypmxt": dry->top_k->typ_p->top_p->min_p->xtc->temperature', + temperature: 'Controls the randomness of the generated text by affecting the probability distribution of the output tokens. Higher = more random, lower = more focused.', + dynatemp_range: 'Addon for the temperature sampler. The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.', + dynatemp_exponent: 'Addon for the temperature sampler. Smoothes out the probability redistribution based on the most probable token.', + top_k: 'Keeps only k top tokens.', + top_p: 'Limits tokens to those that together have a cumulative probability of at least p', + min_p: 'Limits tokens based on the minimum probability for a token to be considered, relative to the probability of the most likely token.', + xtc_probability: 'XTC sampler cuts out top tokens; this parameter controls the chance of cutting tokens at all. 0 disables XTC.', + xtc_threshold: 'XTC sampler cuts out top tokens; this parameter controls the token probability that is required to cut that token.', + typical_p: 'Sorts and limits tokens based on the difference between log-probability and entropy.', + repeat_last_n: 'Last n tokens to consider for penalizing repetition', + repeat_penalty: 'Controls the repetition of token sequences in the generated text', + presence_penalty: 'Limits tokens based on whether they appear in the output or not.', + frequency_penalty: 'Limits tokens based on how often they appear in the output.', + dry_multiplier: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling multiplier.', + dry_base: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling base value.', + dry_allowed_length: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the allowed length for DRY sampling.', + dry_penalty_last_n: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets DRY penalty for the last n tokens.', + max_tokens: 'The maximum number of token per output.', + custom: '', // custom json-stringified object +}; +// config keys having numeric value (i.e. temperature, top_k, top_p, etc) +const CONFIG_NUMERIC_KEYS = Object.entries(CONFIG_DEFAULT).filter(e => isNumeric(e[1])).map(e => e[0]); +// list of themes supported by daisyui +const THEMES = ['light', 'dark'] + // make sure light & dark are always at the beginning + .concat(Object.keys(daisyuiThemes).filter(t => t !== 'light' && t !== 'dark')); + +// markdown support +const VueMarkdown = defineComponent( + (props) => { + const md = shallowRef(new MarkdownIt({ + breaks: true, + highlight: function (str, lang) { // Add highlight.js + if (lang && hljs.getLanguage(lang)) { + try { + return '
' +
+                   hljs.highlight(str, { language: lang, ignoreIllegals: true }).value +
+                   '
'; + } catch (__) {} + } + return '
' + md.value.utils.escapeHtml(str) + '
'; + } + })); + // support latex with double dollar sign and square brackets + md.value.use(markdownItKatexGpt, { + delimiters: [ + { left: '\\[', right: '\\]', display: true }, + { left: '\\(', right: '\\)', display: false }, + { left: '$$', right: '$$', display: false }, + // do not add single dollar sign here, other wise it will confused with dollar used for money symbol + ], + throwOnError: false, + }); + // support latex with single dollar sign + md.value.use(markdownItKatexNormal, { throwOnError: false }); + // add copy button to code blocks + const origFenchRenderer = md.value.renderer.rules.fence; + md.value.renderer.rules.fence = (tokens, idx, ...args) => { + const content = tokens[idx].content; + const origRendered = origFenchRenderer(tokens, idx, ...args); + return `
+ + ${origRendered} +
`; + }; + window.copyStr = copyStr; + const content = computed(() => md.value.render(props.source)); + return () => h("div", { innerHTML: content.value }); + }, + { props: ["source"] } +); + +// input field to be used by settings modal +const SettingsModalShortInput = defineComponent({ + template: document.getElementById('settings-modal-short-input').innerHTML, + props: { + label: { type: String, required: false }, + configKey: String, + configDefault: Object, + configInfo: Object, + modelValue: [Object, String, Number], + }, +}); + +// message bubble component +const MessageBubble = defineComponent({ + components: { + VueMarkdown + }, + template: document.getElementById('message-bubble').innerHTML, + props: { + config: Object, + msg: Object, + isGenerating: Boolean, + editUserMsgAndRegenerate: Function, + regenerateMsg: Function, + }, + data() { + return { + editingContent: null, + }; + }, + computed: { + timings() { + if (!this.msg.timings) return null; + return { + ...this.msg.timings, + prompt_per_second: this.msg.timings.prompt_n / (this.msg.timings.prompt_ms / 1000), + predicted_per_second: this.msg.timings.predicted_n / (this.msg.timings.predicted_ms / 1000), + }; + } + }, + methods: { + copyMsg() { + copyStr(this.msg.content); + }, + editMsg() { + this.editUserMsgAndRegenerate({ + ...this.msg, + content: this.editingContent, + }); + this.editingContent = null; + }, + }, +}); + +// coversations is stored in localStorage +// format: { [convId]: { id: string, lastModified: number, messages: [...] } } +// convId is a string prefixed with 'conv-' +const StorageUtils = { + // manage conversations + getAllConversations() { + const res = []; + for (const key in localStorage) { + if (key.startsWith('conv-')) { + res.push(JSON.parse(localStorage.getItem(key))); + } + } + res.sort((a, b) => b.lastModified - a.lastModified); + return res; + }, + // can return null if convId does not exist + getOneConversation(convId) { + return JSON.parse(localStorage.getItem(convId) || 'null'); + }, + // if convId does not exist, create one + appendMsg(convId, msg) { + if (msg.content === null) return; + const conv = StorageUtils.getOneConversation(convId) || { + id: convId, + lastModified: Date.now(), + messages: [], + }; + conv.messages.push(msg); + conv.lastModified = Date.now(); + localStorage.setItem(convId, JSON.stringify(conv)); + }, + getNewConvId() { + return `conv-${Date.now()}`; + }, + remove(convId) { + localStorage.removeItem(convId); + }, + filterAndKeepMsgs(convId, predicate) { + const conv = StorageUtils.getOneConversation(convId); + if (!conv) return; + conv.messages = conv.messages.filter(predicate); + conv.lastModified = Date.now(); + localStorage.setItem(convId, JSON.stringify(conv)); + }, + popMsg(convId) { + const conv = StorageUtils.getOneConversation(convId); + if (!conv) return; + const msg = conv.messages.pop(); + conv.lastModified = Date.now(); + if (conv.messages.length === 0) { + StorageUtils.remove(convId); + } else { + localStorage.setItem(convId, JSON.stringify(conv)); + } + return msg; + }, + + // manage config + getConfig() { + const savedVal = JSON.parse(localStorage.getItem('config') || '{}'); + // to prevent breaking changes in the future, we always provide default value for missing keys + return { + ...CONFIG_DEFAULT, + ...savedVal, + }; + }, + setConfig(config) { + localStorage.setItem('config', JSON.stringify(config)); + }, + getTheme() { + return localStorage.getItem('theme') || 'auto'; + }, + setTheme(theme) { + if (theme === 'auto') { + localStorage.removeItem('theme'); + } else { + localStorage.setItem('theme', theme); + } + }, +}; + +// scroll to bottom of chat messages +// if requiresNearBottom is true, only auto-scroll if user is near bottom +const chatScrollToBottom = (requiresNearBottom) => { + const msgListElem = document.getElementById('messages-list'); + const spaceToBottom = msgListElem.scrollHeight - msgListElem.scrollTop - msgListElem.clientHeight; + if (!requiresNearBottom || (spaceToBottom < 100)) { + setTimeout(() => msgListElem.scrollTo({ top: msgListElem.scrollHeight }), 1); + } +}; + +// wrapper for SSE +async function* sendSSEPostRequest(url, fetchOptions) { + const res = await fetch(url, fetchOptions); + const lines = res.body + .pipeThrough(new TextDecoderStream()) + .pipeThrough(new TextLineStream()); + for await (const line of asyncIterator(lines)) { + if (isDev) console.log({line}); + if (line.startsWith('data:') && !line.endsWith('[DONE]')) { + const data = JSON.parse(line.slice(5)); + yield data; + } else if (line.startsWith('error:')) { + const data = JSON.parse(line.slice(6)); + throw new Error(data.message || 'Unknown error'); + } + } +}; + +const mainApp = createApp({ + components: { + VueMarkdown, + SettingsModalShortInput, + MessageBubble, + }, + data() { + return { + conversations: StorageUtils.getAllConversations(), + messages: [], // { id: number, role: 'user' | 'assistant', content: string } + viewingConvId: StorageUtils.getNewConvId(), + inputMsg: '', + isGenerating: false, + pendingMsg: null, // the on-going message from assistant + stopGeneration: () => {}, + selectedTheme: StorageUtils.getTheme(), + config: StorageUtils.getConfig(), + showConfigDialog: false, + // const + themes: THEMES, + configDefault: {...CONFIG_DEFAULT}, + configInfo: {...CONFIG_INFO}, + isDev, + } + }, + computed: {}, + mounted() { + document.getElementById('app').classList.remove('opacity-0'); // show app + // scroll to the bottom when the pending message height is updated + const pendingMsgElem = document.getElementById('pending-msg'); + const resizeObserver = new ResizeObserver(() => { + if (this.isGenerating) chatScrollToBottom(true); + }); + resizeObserver.observe(pendingMsgElem); + this.setSelectedTheme(this.selectedTheme); + }, + watch: { + viewingConvId: function(val, oldVal) { + if (val != oldVal) { + this.fetchMessages(); + chatScrollToBottom(); + this.hideSidebar(); + } + } + }, + methods: { + hideSidebar() { + document.getElementById('toggle-drawer').checked = false; + }, + setSelectedTheme(theme) { + this.selectedTheme = theme; + document.body.setAttribute('data-theme', theme); + document.body.setAttribute('data-color-scheme', daisyuiThemes[theme]?.['color-scheme'] ?? 'auto'); + StorageUtils.setTheme(theme); + }, + newConversation() { + if (this.isGenerating) return; + this.viewingConvId = StorageUtils.getNewConvId(); + }, + setViewingConv(convId) { + if (this.isGenerating) return; + this.viewingConvId = convId; + }, + deleteConv(convId) { + if (this.isGenerating) return; + if (window.confirm('Are you sure to delete this conversation?')) { + StorageUtils.remove(convId); + if (this.viewingConvId === convId) { + this.viewingConvId = StorageUtils.getNewConvId(); + } + this.fetchConversation(); + this.fetchMessages(); + } + }, + downloadConv(convId) { + const conversation = StorageUtils.getOneConversation(convId); + if (!conversation) { + alert('Conversation not found.'); + return; + } + const conversationJson = JSON.stringify(conversation, null, 2); + const blob = new Blob([conversationJson], { type: 'application/json' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `conversation_${convId}.json`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + }, + async sendMessage() { + if (!this.inputMsg) return; + const currConvId = this.viewingConvId; + + StorageUtils.appendMsg(currConvId, { + id: Date.now(), + role: 'user', + content: this.inputMsg, + }); + this.fetchConversation(); + this.fetchMessages(); + this.inputMsg = ''; + this.generateMessage(currConvId); + chatScrollToBottom(); + }, + async generateMessage(currConvId) { + if (this.isGenerating) return; + this.pendingMsg = { id: Date.now()+1, role: 'assistant', content: null }; + this.isGenerating = true; + + try { + const abortController = new AbortController(); + this.stopGeneration = () => abortController.abort(); + const params = { + messages: [ + { role: 'system', content: this.config.systemMessage }, + ...this.messages, + ], + stream: true, + cache_prompt: true, + samplers: this.config.samplers, + temperature: this.config.temperature, + dynatemp_range: this.config.dynatemp_range, + dynatemp_exponent: this.config.dynatemp_exponent, + top_k: this.config.top_k, + top_p: this.config.top_p, + min_p: this.config.min_p, + typical_p: this.config.typical_p, + xtc_probability: this.config.xtc_probability, + xtc_threshold: this.config.xtc_threshold, + repeat_last_n: this.config.repeat_last_n, + repeat_penalty: this.config.repeat_penalty, + presence_penalty: this.config.presence_penalty, + frequency_penalty: this.config.frequency_penalty, + dry_multiplier: this.config.dry_multiplier, + dry_base: this.config.dry_base, + dry_allowed_length: this.config.dry_allowed_length, + dry_penalty_last_n: this.config.dry_penalty_last_n, + max_tokens: this.config.max_tokens, + timings_per_token: !!this.config.showTokensPerSecond, + ...(this.config.custom.length ? JSON.parse(this.config.custom) : {}), + }; + const chunks = sendSSEPostRequest(`${BASE_URL}/v1/chat/completions`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + ...(this.config.apiKey ? {'Authorization': `Bearer ${this.config.apiKey}`} : {}) + }, + body: JSON.stringify(params), + signal: abortController.signal, + }); + for await (const chunk of chunks) { + const stop = chunk.stop; + const addedContent = chunk.choices[0].delta.content; + const lastContent = this.pendingMsg.content || ''; + if (addedContent) { + this.pendingMsg = { + id: this.pendingMsg.id, + role: 'assistant', + content: lastContent + addedContent, + }; + } + const timings = chunk.timings; + if (timings && this.config.showTokensPerSecond) { + // only extract what's really needed, to save some space + this.pendingMsg.timings = { + prompt_n: timings.prompt_n, + prompt_ms: timings.prompt_ms, + predicted_n: timings.predicted_n, + predicted_ms: timings.predicted_ms, + }; + } + } + + StorageUtils.appendMsg(currConvId, this.pendingMsg); + this.fetchConversation(); + this.fetchMessages(); + setTimeout(() => document.getElementById('msg-input').focus(), 1); + } catch (error) { + if (error.name === 'AbortError') { + // user stopped the generation via stopGeneration() function + StorageUtils.appendMsg(currConvId, this.pendingMsg); + this.fetchConversation(); + this.fetchMessages(); + } else { + console.error(error); + alert(error); + // pop last user message + const lastUserMsg = StorageUtils.popMsg(currConvId); + this.inputMsg = lastUserMsg ? lastUserMsg.content : ''; + } + } + + this.pendingMsg = null; + this.isGenerating = false; + this.stopGeneration = () => {}; + this.fetchMessages(); + chatScrollToBottom(); + }, + + // message actions + regenerateMsg(msg) { + if (this.isGenerating) return; + // TODO: somehow keep old history (like how ChatGPT has different "tree"). This can be done by adding "sub-conversations" with "subconv-" prefix, and new message will have a list of subconvIds + const currConvId = this.viewingConvId; + StorageUtils.filterAndKeepMsgs(currConvId, (m) => m.id < msg.id); + this.fetchConversation(); + this.fetchMessages(); + this.generateMessage(currConvId); + }, + editUserMsgAndRegenerate(msg) { + if (this.isGenerating) return; + const currConvId = this.viewingConvId; + const newContent = msg.content; + StorageUtils.filterAndKeepMsgs(currConvId, (m) => m.id < msg.id); + StorageUtils.appendMsg(currConvId, { + id: Date.now(), + role: 'user', + content: newContent, + }); + this.fetchConversation(); + this.fetchMessages(); + this.generateMessage(currConvId); + }, + + // settings dialog methods + closeAndSaveConfigDialog() { + try { + if (this.config.custom.length) JSON.parse(this.config.custom); + } catch (error) { + alert('Invalid JSON for custom config. Please either fix it or leave it empty.'); + return; + } + for (const key of CONFIG_NUMERIC_KEYS) { + if (isNaN(this.config[key]) || this.config[key].toString().trim().length === 0) { + alert(`Invalid number for ${key} (expected an integer or a float)`); + return; + } + this.config[key] = parseFloat(this.config[key]); + } + this.showConfigDialog = false; + StorageUtils.setConfig(this.config); + }, + closeAndDiscardConfigDialog() { + this.showConfigDialog = false; + this.config = StorageUtils.getConfig(); + }, + resetConfigDialog() { + if (window.confirm('Are you sure to reset all settings?')) { + this.config = {...CONFIG_DEFAULT}; + } + }, + + // sync state functions + fetchConversation() { + this.conversations = StorageUtils.getAllConversations(); + }, + fetchMessages() { + this.messages = StorageUtils.getOneConversation(this.viewingConvId)?.messages ?? []; + }, + + // debug functions + async debugImportDemoConv() { + const res = await fetch('/demo-conversation.json'); + const demoConv = await res.json(); + StorageUtils.remove(demoConv.id); + for (const msg of demoConv.messages) { + StorageUtils.appendMsg(demoConv.id, msg); + } + this.fetchConversation(); + } + }, +}); +mainApp.config.errorHandler = alert; +try { + mainApp.mount('#app'); +} catch (err) { + console.error(err); + document.getElementById('app').innerHTML = `
+ Failed to start app. Please try clearing localStorage and try again.
+
+ +
`; +} diff --git a/examples/server/webui/src/styles.scss b/examples/server/webui/src/styles.scss new file mode 100644 index 0000000000000..34fe2aaf01ceb --- /dev/null +++ b/examples/server/webui/src/styles.scss @@ -0,0 +1,48 @@ +@use "sass:meta"; + +@tailwind base; +@tailwind components; +@tailwind utilities; + +.markdown { + h1, h2, h3, h4, h5, h6, ul, ol, li { all: revert; } + pre { + @apply whitespace-pre-wrap rounded-lg p-2; + border: 1px solid currentColor; + } + /* TODO: fix markdown table */ +} + +.show-on-hover { + @apply md:opacity-0 md:group-hover:opacity-100; +} +.btn-mini { + @apply cursor-pointer hover:shadow-md; +} +.chat-screen { max-width: 900px; } + +.chat-bubble-base-300 { + --tw-bg-opacity: 1; + --tw-text-opacity: 1; + @apply bg-base-300 text-base-content; +} + +/* Highlight.js */ +[data-color-scheme='light'] { + @include meta.load-css('highlight.js/styles/stackoverflow-light'); +} +[data-color-scheme='dark'] { + @include meta.load-css('highlight.js/styles/stackoverflow-dark'); +} +[data-color-scheme='auto'] { + @media (prefers-color-scheme: light) { + @include meta.load-css('highlight.js/styles/stackoverflow-light'); + } + @media (prefers-color-scheme: dark) { + @include meta.load-css('highlight.js/styles/stackoverflow-dark'); + } +} +.hljs { + background: transparent !important; + padding: 0.5em !important; +} diff --git a/examples/server/webui/tailwind.config.js b/examples/server/webui/tailwind.config.js new file mode 100644 index 0000000000000..c43066a19e7ad --- /dev/null +++ b/examples/server/webui/tailwind.config.js @@ -0,0 +1,16 @@ +/** @type {import('tailwindcss').Config} */ +export default { + content: [ + "./index.html", + "./src/**/*.{js,ts,jsx,tsx}", + ], + theme: { + extend: {}, + }, + plugins: [ + require('daisyui'), + ], + daisyui: { + themes: ['light', 'dark', 'cupcake', 'bumblebee', 'emerald', 'corporate', 'synthwave', 'retro', 'cyberpunk', 'valentine', 'halloween', 'garden', 'forest', 'aqua', 'lofi', 'pastel', 'fantasy', 'wireframe', 'black', 'luxury', 'dracula', 'cmyk', 'autumn', 'business', 'acid', 'lemonade', 'night', 'coffee', 'winter', 'dim', 'nord', 'sunset'], + } +} diff --git a/examples/server/webui/vite.config.js b/examples/server/webui/vite.config.js new file mode 100644 index 0000000000000..6619a630d47f6 --- /dev/null +++ b/examples/server/webui/vite.config.js @@ -0,0 +1,59 @@ + +import { viteSingleFile } from 'vite-plugin-singlefile'; +import path from 'path'; +import fs from 'fs'; +import zlib from 'zlib'; + +const MAX_BUNDLE_SIZE = 1.5 * 1024 * 1024; // only increase when absolutely necessary + +const GUIDE_FOR_FRONTEND = ` + +`.trim(); + +const BUILD_PLUGINS = [ + viteSingleFile(), + (function llamaCppPlugin() { + let config; + return { + name: 'llamacpp:build', + apply: 'build', + async configResolved(_config) { + config = _config; + }, + writeBundle() { + const outputIndexHtml = path.join(config.build.outDir, 'index.html'); + const content = GUIDE_FOR_FRONTEND + '\n' + fs.readFileSync(outputIndexHtml, 'utf-8'); + const compressed = zlib.gzipSync(Buffer.from(content, 'utf-8'), { level: 9 }); + + // because gzip header contains machine-specific info, we must remove these data from the header + // timestamp + compressed[0x4] = 0; + compressed[0x5] = 0; + compressed[0x6] = 0; + compressed[0x7] = 0; + // OS + compressed[0x9] = 0; + + if (compressed.byteLength > MAX_BUNDLE_SIZE) { + throw new Error( + `Bundle size is too large (${Math.ceil(compressed.byteLength / 1024)} KB).\n` + + `Please reduce the size of the frontend or increase MAX_BUNDLE_SIZE in vite.config.js.\n`, + ); + } + + const targetOutputFile = path.join(config.build.outDir, '../../public/index.html.gz'); + fs.writeFileSync(targetOutputFile, compressed); + } + } + })(), +]; + +/** @type {import('vite').UserConfig} */ +export default { + plugins: process.env.ANALYZE ? [] : BUILD_PLUGINS, +}; diff --git a/examples/simple-chat/CMakeLists.txt b/examples/simple-chat/CMakeLists.txt index 87723533b3226..567f7fbbbf43a 100644 --- a/examples/simple-chat/CMakeLists.txt +++ b/examples/simple-chat/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-simple-chat) add_executable(${TARGET} simple-chat.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/simple-chat/simple-chat.cpp b/examples/simple-chat/simple-chat.cpp index 5f9973163732d..7f4da666b08ec 100644 --- a/examples/simple-chat/simple-chat.cpp +++ b/examples/simple-chat/simple-chat.cpp @@ -62,6 +62,9 @@ int main(int argc, char ** argv) { } }, nullptr); + // load dynamic backends + ggml_backend_load_all(); + // initialize the model llama_model_params model_params = llama_model_default_params(); model_params.n_gpu_layers = ngl; diff --git a/examples/simple/CMakeLists.txt b/examples/simple/CMakeLists.txt index b63afbb8b3081..104ecabfd7236 100644 --- a/examples/simple/CMakeLists.txt +++ b/examples/simple/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-simple) add_executable(${TARGET} simple.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/simple/README.md b/examples/simple/README.md index 0ff3425359a41..937008b243ee4 100644 --- a/examples/simple/README.md +++ b/examples/simple/README.md @@ -3,7 +3,7 @@ The purpose of this example is to demonstrate a minimal usage of llama.cpp for generating text with a given prompt. ```bash -./llama-simple -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" +./llama-simple -m ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" ... diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 59760fe95db22..3288c0250a001 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -74,6 +74,10 @@ int main(int argc, char ** argv) { } } + // load dynamic backends + + ggml_backend_load_all(); + // initialize the model llama_model_params model_params = llama_model_default_params(); diff --git a/examples/speculative-simple/CMakeLists.txt b/examples/speculative-simple/CMakeLists.txt new file mode 100644 index 0000000000000..aeaea74fcd1f1 --- /dev/null +++ b/examples/speculative-simple/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET llama-speculative-simple) +add_executable(${TARGET} speculative-simple.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/speculative-simple/README.md b/examples/speculative-simple/README.md new file mode 100644 index 0000000000000..e3a6c6b4aa0bf --- /dev/null +++ b/examples/speculative-simple/README.md @@ -0,0 +1,12 @@ +# llama.cpp/examples/speculative-simple + +Demonstration of basic greedy speculative decoding + +```bash +./bin/llama-speculative-simple \ + -m ../models/qwen2.5-32b-coder-instruct/ggml-model-q8_0.gguf \ + -md ../models/qwen2.5-1.5b-coder-instruct/ggml-model-q4_0.gguf \ + -f test.txt -c 0 -ngl 99 --color \ + --sampling-seq k --top-k 1 -fa --temp 0.0 \ + -ngld 99 --draft-max 16 --draft-min 5 --draft-p-min 0.9 +``` diff --git a/examples/speculative-simple/speculative-simple.cpp b/examples/speculative-simple/speculative-simple.cpp new file mode 100644 index 0000000000000..8ca84f7afacbc --- /dev/null +++ b/examples/speculative-simple/speculative-simple.cpp @@ -0,0 +1,265 @@ +#include "arg.h" +#include "common.h" +#include "sampling.h" +#include "speculative.h" +#include "log.h" +#include "llama.h" + +#include +#include +#include +#include + +int main(int argc, char ** argv) { + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) { + return 1; + } + + if (params.n_predict < -1) { + LOG_ERR("%s: --n-predict must be >= -1\n", __func__); + return 1; + } + + common_init(); + + if (params.speculative.model.empty()) { + LOG_ERR("%s: --model-draft is required\n", __func__); + return 1; + } + + // init llama.cpp + llama_backend_init(); + llama_numa_init(params.numa); + + llama_model * model_tgt = NULL; + llama_model * model_dft = NULL; + + llama_context * ctx_tgt = NULL; + llama_context * ctx_dft = NULL; + + // load the target model + common_init_result llama_init_tgt = common_init_from_params(params); + + model_tgt = llama_init_tgt.model; + ctx_tgt = llama_init_tgt.context; + + // load the draft model + params.devices = params.speculative.devices; + params.model = params.speculative.model; + params.n_ctx = params.speculative.n_ctx; + params.n_batch = params.speculative.n_ctx > 0 ? params.speculative.n_ctx : params.n_batch; + params.n_gpu_layers = params.speculative.n_gpu_layers; + + if (params.speculative.cpuparams.n_threads > 0) { + params.cpuparams.n_threads = params.speculative.cpuparams.n_threads; + } + + params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; + common_init_result llama_init_dft = common_init_from_params(params); + + model_dft = llama_init_dft.model; + ctx_dft = llama_init_dft.context; + + if (!common_speculative_are_compatible(ctx_tgt, ctx_dft)) { + return 1; + } + + // Tokenize the prompt + std::vector inp; + inp = common_tokenize(ctx_tgt, params.prompt, true, true); + + if (llama_n_ctx(ctx_tgt) < (uint32_t) inp.size()) { + LOG_ERR("%s: the prompt exceeds the context size (%d tokens, ctx %d)\n", __func__, (int) inp.size(), llama_n_ctx(ctx_tgt)); + + return 1; + } + + if (llama_n_batch(ctx_tgt) < (uint32_t) inp.size()) { + LOG_ERR("%s: the prompt exceeds the batch size (%d tokens, batch %d)\n", __func__, (int) inp.size(), llama_n_batch(ctx_tgt)); + + return 1; + } + + LOG("\n\n"); + + for (auto id : inp) { + LOG("%s", common_token_to_piece(ctx_tgt, id).c_str()); + } + + // how many tokens to draft each time + int n_draft = params.speculative.n_max; + int n_draft_min = params.speculative.n_min; + + float p_min = params.speculative.p_min; + + int n_predict = 0; + int n_drafted = 0; + int n_accept = 0; + + // used to determine end of generation + bool has_eos = false; + + // ================================================ + // everything until here is standard initialization + // the relevant stuff for speculative decoding starts here + + const auto t_enc_start = ggml_time_us(); + + // target model sampling context + struct common_sampler * smpl = common_sampler_init(model_tgt, params.sampling); + + // eval the prompt + llama_decode(ctx_tgt, llama_batch_get_one(inp.data(), inp.size() - 1)); + + // note: keep the last token separate! + llama_token id_last = inp.back(); + + // all tokens currently in the target context + llama_tokens prompt_tgt(inp.begin(), inp.end() - 1); + prompt_tgt.reserve(llama_n_ctx(ctx_tgt)); + + int n_past = inp.size() - 1; + + // init the speculator + struct common_speculative_params params_spec; + params_spec.n_draft = n_draft; + params_spec.n_reuse = llama_n_ctx(ctx_dft) - n_draft; + params_spec.p_min = p_min; + + struct common_speculative * spec = common_speculative_init(ctx_dft); + + llama_batch batch_tgt = llama_batch_init(llama_n_batch(ctx_tgt), 0, 1); + + const auto t_enc_end = ggml_time_us(); + + const auto t_dec_start = ggml_time_us(); + + while (true) { + // optionally, generate draft tokens that can be appended to the target batch + // + // this is the most important part of the speculation. the more probable tokens that are provided here + // the better the performance will be. in theory, this computation can be performed asynchronously and even + // offloaded to a remote device. it doesn't even have to be based on an LLM. instead, it can provide tokens + // from a cache or lookup tables. + // + llama_tokens draft = common_speculative_gen_draft(spec, params_spec, prompt_tgt, id_last); + + //LOG_DBG("draft: %s\n", string_from(ctx_dft, draft).c_str()); + + // always have a token to evaluate from before - id_last + common_batch_clear(batch_tgt); + common_batch_add (batch_tgt, id_last, n_past++, { 0 }, true); + + // evaluate the target model on [id_last, draft0, draft1, ..., draftN-1] + { + // do not waste time on small drafts + if (draft.size() < (size_t) n_draft_min) { + draft.clear(); + } + + for (size_t i = 0; i < draft.size(); ++i) { + common_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true); + } + + //LOG_DBG("target batch: %s\n", string_from(ctx_tgt, batch_tgt).c_str()); + + llama_decode(ctx_tgt, batch_tgt); + } + + // sample from the full target batch and return the accepted tokens based on the target sampler + // + // for each token to be accepted, the sampler would have to sample that same token + // in such cases, instead of decoding the sampled token as we normally do, we simply continue with the + // available logits from the batch and sample the next token until we run out of logits or the sampler + // disagrees with the draft + // + const auto ids = common_sampler_sample_and_accept_n(smpl, ctx_tgt, draft); + + //LOG_DBG("ids: %s\n", string_from(ctx_tgt, ids).c_str()); + + GGML_ASSERT(ids.size() > 0); // there will always be at least one accepted token + + n_past += ids.size() - 1; + n_drafted += draft.size(); // note: we ignore the discarded small drafts + n_accept += ids.size() - 1; + n_predict += ids.size(); + + // process the accepted tokens and update contexts + // + // this is the standard token post-processing that we normally do + // in this case, we do it for a group of accepted tokens at once + // + for (size_t i = 0; i < ids.size(); ++i) { + prompt_tgt.push_back(id_last); + + id_last = ids[i]; + + if (llama_token_is_eog(model_tgt, id_last)) { + has_eos = true; + break; + } + + const std::string token_str = common_token_to_piece(ctx_tgt, id_last); + + if (params.use_color && i + 1 < ids.size()) { + LOG("\u001b[%dm%s\u001b[37m", (36 - 0 % 6), token_str.c_str()); + } else { + LOG("%s", token_str.c_str()); + } + } + + LOG_DBG("accepted %d/%d draft tokens, the last target token is: (%d)\n", (int) ids.size() - 1, (int) draft.size(), id_last); + + { + LOG_DBG("clear kv cache from any extra tokens, n_past = %d\n", n_past); + + llama_kv_cache_seq_rm(ctx_tgt, 0, n_past, -1); + } + + if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) { + break; + } + } + + auto t_dec_end = ggml_time_us(); + + const int n_input = inp.size(); + + LOG("\n\n"); + + LOG_INF("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f)); + LOG_INF("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f)); + + LOG_INF("\n"); + LOG_INF("n_draft = %d\n", n_draft); + LOG_INF("n_predict = %d\n", n_predict); + LOG_INF("n_drafted = %d\n", n_drafted); + LOG_INF("n_accept = %d\n", n_accept); + LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted); + + LOG_INF("\n"); + LOG_INF("draft:\n\n"); + + llama_perf_context_print(ctx_dft); + + LOG_INF("\n"); + LOG_INF("target:\n\n"); + common_perf_print(ctx_tgt, smpl); + + common_sampler_free(smpl); + common_speculative_free(spec); + + llama_free(ctx_tgt); + llama_free_model(model_tgt); + + llama_free(ctx_dft); + llama_free_model(model_dft); + + llama_backend_free(); + + LOG("\n\n"); + + return 0; +} diff --git a/examples/speculative/CMakeLists.txt b/examples/speculative/CMakeLists.txt index aa208e7aaeeb0..c84196bd95b1e 100644 --- a/examples/speculative/CMakeLists.txt +++ b/examples/speculative/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-speculative) add_executable(${TARGET} speculative.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index f6fa4cb5d5354..657336255bbd7 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -12,7 +12,7 @@ #include #include -#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 100 +#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128 #define SPEC_VOCAB_CHECK_START_TOKEN_ID 5 struct seq_draft { @@ -33,7 +33,7 @@ int main(int argc, char ** argv) { common_params params; // needed to get candidate probs even for temp <= 0.0 - params.sparams.n_probs = 128; + params.sampling.n_probs = 128; if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) { return 1; @@ -41,7 +41,7 @@ int main(int argc, char ** argv) { common_init(); - if (params.model_draft.empty()) { + if (params.speculative.model.empty()) { LOG_ERR("%s: --model-draft is required\n", __func__); return 1; } @@ -50,9 +50,9 @@ int main(int argc, char ** argv) { const int n_seq_dft = params.n_parallel; // probability threshold for splitting a draft branch (only for n_seq_dft > 1) - const float p_split = params.p_split; + const float p_draft_split = params.speculative.p_split; - std::default_random_engine rng(params.sparams.seed == LLAMA_DEFAULT_SEED ? std::random_device()() : params.sparams.seed); + std::default_random_engine rng(params.sampling.seed == LLAMA_DEFAULT_SEED ? std::random_device()() : params.sampling.seed); std::uniform_real_distribution<> u_dist; // init llama.cpp @@ -71,13 +71,14 @@ int main(int argc, char ** argv) { ctx_tgt = llama_init_tgt.context; // load the draft model - params.model = params.model_draft; - params.n_gpu_layers = params.n_gpu_layers_draft; - if (params.draft_cpuparams.n_threads > 0) { - params.cpuparams.n_threads = params.draft_cpuparams.n_threads; + params.devices = params.speculative.devices; + params.model = params.speculative.model; + params.n_gpu_layers = params.speculative.n_gpu_layers; + if (params.speculative.cpuparams.n_threads > 0) { + params.cpuparams.n_threads = params.speculative.cpuparams.n_threads; } - params.cpuparams_batch.n_threads = params.draft_cpuparams_batch.n_threads; + params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; common_init_result llama_init_dft = common_init_from_params(params); model_dft = llama_init_dft.model; ctx_dft = llama_init_dft.context; @@ -165,7 +166,7 @@ int main(int argc, char ** argv) { //GGML_ASSERT(n_vocab == llama_n_vocab(model_dft)); // how many tokens to draft each time - int n_draft = params.n_draft; + int n_draft = params.speculative.n_max; int n_predict = 0; int n_drafted = 0; @@ -178,14 +179,14 @@ int main(int argc, char ** argv) { bool has_eos = false; // target model sampling context (reuse the llama_context's sampling instance) - struct common_sampler * smpl = common_sampler_init(model_tgt, params.sparams); + struct common_sampler * smpl = common_sampler_init(model_tgt, params.sampling); // draft sequence data std::vector drafts(n_seq_dft); for (int s = 0; s < n_seq_dft; ++s) { // allocate llama_sampler for each draft sequence - drafts[s].smpl = common_sampler_init(model_dft, params.sparams); + drafts[s].smpl = common_sampler_init(model_dft, params.sampling); } llama_batch batch_dft = llama_batch_init(params.n_ctx, 0, 1); @@ -225,7 +226,7 @@ int main(int argc, char ** argv) { // for stochastic sampling, attempt to match the token with the drafted tokens { bool accept = false; - if (params.sparams.temp > 0) { + if (params.sampling.temp > 0) { // stochastic verification common_sampler_sample(smpl, ctx_tgt, drafts[s_keep].i_batch_tgt[i_dft], true); @@ -489,7 +490,7 @@ int main(int argc, char ** argv) { // attempt to split the branch if the probability is high enough for (int f = 1; f < 8; ++f) { - if (n_seq_cur < n_seq_dft && cur_p->data[f].p > p_split) { + if (n_seq_cur < n_seq_dft && cur_p->data[f].p > p_draft_split) { LOG_DBG("splitting seq %3d into %3d\n", s, n_seq_cur); llama_kv_cache_seq_rm(ctx_dft, n_seq_cur, -1, -1); diff --git a/examples/tokenize/CMakeLists.txt b/examples/tokenize/CMakeLists.txt index b704dcae18c52..1690b53e5d52b 100644 --- a/examples/tokenize/CMakeLists.txt +++ b/examples/tokenize/CMakeLists.txt @@ -2,4 +2,4 @@ set(TARGET llama-tokenize) add_executable(${TARGET} tokenize.cpp) install(TARGETS ${TARGET} RUNTIME) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp index 12ad542565128..c97e227242c42 100644 --- a/examples/tokenize/tokenize.cpp +++ b/examples/tokenize/tokenize.cpp @@ -394,7 +394,7 @@ int main(int raw_argc, char ** raw_argv) { } if (show_token_count) { - printf("Total number of tokens: %ld\n", tokens.size()); + printf("Total number of tokens: %zu\n", tokens.size()); } // silence valgrind llama_free(ctx); diff --git a/examples/tts/CMakeLists.txt b/examples/tts/CMakeLists.txt new file mode 100644 index 0000000000000..c72bd814c3b31 --- /dev/null +++ b/examples/tts/CMakeLists.txt @@ -0,0 +1,5 @@ +set(TARGET llama-tts) +add_executable(${TARGET} tts.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE llama common ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/tts/convert_pt_to_hf.py b/examples/tts/convert_pt_to_hf.py new file mode 100644 index 0000000000000..8909a65fd1e13 --- /dev/null +++ b/examples/tts/convert_pt_to_hf.py @@ -0,0 +1,180 @@ +# convert the https://huggingface.co/novateur/WavTokenizer-large-speech-75token to HF format +# the goal is to be able to reuse the convert_hf_to_gguf.py after that to create a GGUF file with the WavTokenizer decoder +# +# TODO: this script is LLM-generated and probably very inefficient and should be rewritten + +import torch +import json +import os +import sys +import re + +from safetensors.torch import save_file + +# default +model_path = './model.pt'; + +# read from CLI +if len(sys.argv) > 1: + model_path = sys.argv[1] + +# get the directory of the input model +path_dst = os.path.dirname(model_path) + +print(f"Loading model from {model_path}") + +model = torch.load(model_path, map_location='cpu') + +#print(model) + +# print all keys +for key in model.keys(): + print(key) + if key == 'hyper_parameters': + #print(model[key]) + # dump as json pretty + print(json.dumps(model[key], indent=4)) + #if key != 'state_dict' and key != 'optimizer_states': + # print(model[key]) + +# Check if the loaded model is a state_dict or a model instance +if isinstance(model, torch.nn.Module): + state_dict = model.state_dict() +else: + state_dict = model + +# Print the structure of the state_dict to understand its format +print("State dictionary keys:") +for key in state_dict.keys(): + print(key) + +# Ensure the state_dict is flat and contains only torch.Tensor objects +def flatten_state_dict(state_dict, parent_key='', sep='.'): + items = [] + items_new = [] + + for k, v in state_dict.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, torch.Tensor): + items.append((new_key, v)) + elif isinstance(v, dict): + items.extend(flatten_state_dict(v, new_key, sep=sep).items()) + return dict(items) + + size_total_mb = 0 + + for key, value in list(items): + # keep only what we need for inference + if not key.startswith('state_dict.feature_extractor.encodec.quantizer.') and \ + not key.startswith('state_dict.backbone.') and \ + not key.startswith('state_dict.head.out'): + print('Skipping key: ', key) + continue + + new_key = key + + new_key = new_key.replace('state_dict.', '') + new_key = new_key.replace('pos_net', 'posnet') + + # check if matches "backbone.posnet.%d.bias" or "backbone.posnet.%d.weight" + if new_key.startswith("backbone.posnet."): + match = re.match(r"backbone\.posnet\.(\d+)\.(bias|weight)", new_key) + if match: + new_key = f"backbone.posnet.{match.group(1)}.norm.{match.group(2)}" + + # "feature_extractor.encodec.quantizer.vq.layers.0._codebook.embed" -> "backbone.embedding.weight" + if new_key == "feature_extractor.encodec.quantizer.vq.layers.0._codebook.embed": + new_key = "backbone.embedding.weight" + + # these are the only rows used + # ref: https://github.com/edwko/OuteTTS/blob/a613e79c489d8256dd657ea9168d78de75895d82/outetts/wav_tokenizer/audio_codec.py#L100 + if new_key.endswith("norm.scale.weight"): + new_key = new_key.replace("norm.scale.weight", "norm.weight") + value = value[0] + + if new_key.endswith("norm.shift.weight"): + new_key = new_key.replace("norm.shift.weight", "norm.bias") + value = value[0] + + if new_key.endswith("gamma"): + new_key = new_key.replace("gamma", "gamma.weight") + + # convert from 1D [768] to 2D [768, 1] so that ggml_add can broadcast the bias + if (new_key.endswith("norm.weight") or new_key.endswith("norm1.weight") or new_key.endswith("norm2.weight") or new_key.endswith(".bias")) and (new_key.startswith("backbone.posnet") or new_key.startswith("backbone.embed.bias")): + value = value.unsqueeze(1) + + if new_key.endswith("dwconv.bias"): + value = value.unsqueeze(1) + + size_mb = value.element_size() * value.nelement() / (1024 * 1024) + print(f"{size_mb:8.2f} MB - {new_key}: {value.shape}") + + size_total_mb += size_mb + + #print(key, '->', new_key, ': ', value) + #print(key, '->', new_key) + + items_new.append((new_key, value)) + + print(f"Total size: {size_total_mb:8.2f} MB") + + return dict(items_new) + +flattened_state_dict = flatten_state_dict(state_dict) + + +# Convert the model to the safetensors format +output_path = path_dst + '/model.safetensors' +save_file(flattened_state_dict, output_path) + +print(f"Model has been successfully converted and saved to {output_path}") + +# Calculate the total size of the .safetensors file +total_size = os.path.getsize(output_path) + +# Create the weight map +weight_map = { + "model.safetensors": ["*"] # Assuming all weights are in one file +} + +# Create metadata for the index.json file +metadata = { + "total_size": total_size, + "weight_map": weight_map +} + +# Save the metadata to index.json +index_path = path_dst + '/index.json' +with open(index_path, 'w') as f: + json.dump(metadata, f, indent=4) + +print(f"Metadata has been saved to {index_path}") + +config = { + "architectures": [ + "WavTokenizerDec" + ], + "hidden_size": 1282, + "n_embd_features": 512, + "n_ff": 2304, + "vocab_size": 4096, + "n_head": 1, + "layer_norm_epsilon": 1e-6, + "group_norm_epsilon": 1e-6, + "group_norm_groups": 32, + "max_position_embeddings": 8192, # ? + "n_layer": 12, + "posnet": { + "n_embd": 768, + "n_layer": 6 + }, + "convnext": { + "n_embd": 768, + "n_layer": 12 + }, +} + +with open(path_dst + '/config.json', 'w') as f: + json.dump(config, f, indent=4) + +print(f"Config has been saved to {path_dst + 'config.json'}") diff --git a/examples/tts/tts-outetts.py b/examples/tts/tts-outetts.py new file mode 100644 index 0000000000000..0f81192fca52d --- /dev/null +++ b/examples/tts/tts-outetts.py @@ -0,0 +1,175 @@ +import sys +#import json +#import struct +import requests +import re + +def process_text(text: str): + text = re.sub(r'\d+(\.\d+)?', lambda x: x.group(), text.lower()) # TODO this needs to be fixed + text = re.sub(r'[-_/,\.\\]', ' ', text) + text = re.sub(r'[^a-z\s]', '', text) + text = re.sub(r'\s+', ' ', text).strip() + return text.split() + +# usage: +# python tts-outetts.py http://server-llm:port http://server-dec:port "text" + +if len(sys.argv) <= 3: + print("usage: python tts-outetts.py http://server-llm:port http://server-dec:port \"text\"") + exit(1) + +host_llm = sys.argv[1] +host_dec = sys.argv[2] +text = sys.argv[3] + +prefix = """<|im_start|> +<|text_start|>the<|text_sep|>overall<|text_sep|>package<|text_sep|>from<|text_sep|>just<|text_sep|>two<|text_sep|>people<|text_sep|>is<|text_sep|>pretty<|text_sep|>remarkable<|text_sep|>sure<|text_sep|>i<|text_sep|>have<|text_sep|>some<|text_sep|>critiques<|text_sep|>about<|text_sep|>some<|text_sep|>of<|text_sep|>the<|text_sep|>gameplay<|text_sep|>aspects<|text_sep|>but<|text_sep|>its<|text_sep|>still<|text_sep|>really<|text_sep|>enjoyable<|text_sep|>and<|text_sep|>it<|text_sep|>looks<|text_sep|>lovely<|text_sep|>""" + +words = process_text(text) +words = "<|text_sep|>".join([i.strip() for i in words]) +words += "<|text_end|>\n" + +# voice data +# TODO: load from json +#suffix = """<|audio_start|> +#the<|t_0.08|><|code_start|><|257|><|740|><|636|><|913|><|788|><|1703|><|code_end|> +#overall<|t_0.36|><|code_start|><|127|><|201|><|191|><|774|><|700|><|532|><|1056|><|557|><|798|><|298|><|1741|><|747|><|1662|><|1617|><|1702|><|1527|><|368|><|1588|><|1049|><|1008|><|1625|><|747|><|1576|><|728|><|1019|><|1696|><|1765|><|code_end|> +#package<|t_0.56|><|code_start|><|935|><|584|><|1319|><|627|><|1016|><|1491|><|1344|><|1117|><|1526|><|1040|><|239|><|1435|><|951|><|498|><|723|><|1180|><|535|><|789|><|1649|><|1637|><|78|><|465|><|1668|><|901|><|595|><|1675|><|117|><|1009|><|1667|><|320|><|840|><|79|><|507|><|1762|><|1508|><|1228|><|1768|><|802|><|1450|><|1457|><|232|><|639|><|code_end|> +#from<|t_0.19|><|code_start|><|604|><|782|><|1682|><|872|><|1532|><|1600|><|1036|><|1761|><|647|><|1554|><|1371|><|653|><|1595|><|950|><|code_end|> +#just<|t_0.25|><|code_start|><|1782|><|1670|><|317|><|786|><|1748|><|631|><|599|><|1155|><|1364|><|1524|><|36|><|1591|><|889|><|1535|><|541|><|440|><|1532|><|50|><|870|><|code_end|> +#two<|t_0.24|><|code_start|><|1681|><|1510|><|673|><|799|><|805|><|1342|><|330|><|519|><|62|><|640|><|1138|><|565|><|1552|><|1497|><|1552|><|572|><|1715|><|1732|><|code_end|> +#people<|t_0.39|><|code_start|><|593|><|274|><|136|><|740|><|691|><|633|><|1484|><|1061|><|1138|><|1485|><|344|><|428|><|397|><|1562|><|645|><|917|><|1035|><|1449|><|1669|><|487|><|442|><|1484|><|1329|><|1832|><|1704|><|600|><|761|><|653|><|269|><|code_end|> +#is<|t_0.16|><|code_start|><|566|><|583|><|1755|><|646|><|1337|><|709|><|802|><|1008|><|485|><|1583|><|652|><|10|><|code_end|> +#pretty<|t_0.32|><|code_start|><|1818|><|1747|><|692|><|733|><|1010|><|534|><|406|><|1697|><|1053|><|1521|><|1355|><|1274|><|816|><|1398|><|211|><|1218|><|817|><|1472|><|1703|><|686|><|13|><|822|><|445|><|1068|><|code_end|> +#remarkable<|t_0.68|><|code_start|><|230|><|1048|><|1705|><|355|><|706|><|1149|><|1535|><|1787|><|1356|><|1396|><|835|><|1583|><|486|><|1249|><|286|><|937|><|1076|><|1150|><|614|><|42|><|1058|><|705|><|681|><|798|><|934|><|490|><|514|><|1399|><|572|><|1446|><|1703|><|1346|><|1040|><|1426|><|1304|><|664|><|171|><|1530|><|625|><|64|><|1708|><|1830|><|1030|><|443|><|1509|><|1063|><|1605|><|1785|><|721|><|1440|><|923|><|code_end|> +#sure<|t_0.36|><|code_start|><|792|><|1780|><|923|><|1640|><|265|><|261|><|1525|><|567|><|1491|><|1250|><|1730|><|362|><|919|><|1766|><|543|><|1|><|333|><|113|><|970|><|252|><|1606|><|133|><|302|><|1810|><|1046|><|1190|><|1675|><|code_end|> +#i<|t_0.08|><|code_start|><|123|><|439|><|1074|><|705|><|1799|><|637|><|code_end|> +#have<|t_0.16|><|code_start|><|1509|><|599|><|518|><|1170|><|552|><|1029|><|1267|><|864|><|419|><|143|><|1061|><|0|><|code_end|> +#some<|t_0.16|><|code_start|><|619|><|400|><|1270|><|62|><|1370|><|1832|><|917|><|1661|><|167|><|269|><|1366|><|1508|><|code_end|> +#critiques<|t_0.60|><|code_start|><|559|><|584|><|1163|><|1129|><|1313|><|1728|><|721|><|1146|><|1093|><|577|><|928|><|27|><|630|><|1080|><|1346|><|1337|><|320|><|1382|><|1175|><|1682|><|1556|><|990|><|1683|><|860|><|1721|><|110|><|786|><|376|><|1085|><|756|><|1523|><|234|><|1334|><|1506|><|1578|><|659|><|612|><|1108|><|1466|><|1647|><|308|><|1470|><|746|><|556|><|1061|><|code_end|> +#about<|t_0.29|><|code_start|><|26|><|1649|><|545|><|1367|><|1263|><|1728|><|450|><|859|><|1434|><|497|><|1220|><|1285|><|179|><|755|><|1154|><|779|><|179|><|1229|><|1213|><|922|><|1774|><|1408|><|code_end|> +#some<|t_0.23|><|code_start|><|986|><|28|><|1649|><|778|><|858|><|1519|><|1|><|18|><|26|><|1042|><|1174|><|1309|><|1499|><|1712|><|1692|><|1516|><|1574|><|code_end|> +#of<|t_0.07|><|code_start|><|197|><|716|><|1039|><|1662|><|64|><|code_end|> +#the<|t_0.08|><|code_start|><|1811|><|1568|><|569|><|886|><|1025|><|1374|><|code_end|> +#gameplay<|t_0.48|><|code_start|><|1269|><|1092|><|933|><|1362|><|1762|><|1700|><|1675|><|215|><|781|><|1086|><|461|><|838|><|1022|><|759|><|649|><|1416|><|1004|><|551|><|909|><|787|><|343|><|830|><|1391|><|1040|><|1622|><|1779|><|1360|><|1231|><|1187|><|1317|><|76|><|997|><|989|><|978|><|737|><|189|><|code_end|> +#aspects<|t_0.56|><|code_start|><|1423|><|797|><|1316|><|1222|><|147|><|719|><|1347|><|386|><|1390|><|1558|><|154|><|440|><|634|><|592|><|1097|><|1718|><|712|><|763|><|1118|><|1721|><|1311|><|868|><|580|><|362|><|1435|><|868|><|247|><|221|><|886|><|1145|><|1274|><|1284|><|457|><|1043|><|1459|><|1818|><|62|><|599|><|1035|><|62|><|1649|><|778|><|code_end|> +#but<|t_0.20|><|code_start|><|780|><|1825|><|1681|><|1007|><|861|><|710|><|702|><|939|><|1669|><|1491|><|613|><|1739|><|823|><|1469|><|648|><|code_end|> +#its<|t_0.09|><|code_start|><|92|><|688|><|1623|><|962|><|1670|><|527|><|599|><|code_end|> +#still<|t_0.27|><|code_start|><|636|><|10|><|1217|><|344|><|713|><|957|><|823|><|154|><|1649|><|1286|><|508|><|214|><|1760|><|1250|><|456|><|1352|><|1368|><|921|><|615|><|5|><|code_end|> +#really<|t_0.36|><|code_start|><|55|><|420|><|1008|><|1659|><|27|><|644|><|1266|><|617|><|761|><|1712|><|109|><|1465|><|1587|><|503|><|1541|><|619|><|197|><|1019|><|817|><|269|><|377|><|362|><|1381|><|507|><|1488|><|4|><|1695|><|code_end|> +#enjoyable<|t_0.49|><|code_start|><|678|><|501|><|864|><|319|><|288|><|1472|><|1341|><|686|><|562|><|1463|><|619|><|1563|><|471|><|911|><|730|><|1811|><|1006|><|520|><|861|><|1274|><|125|><|1431|><|638|><|621|><|153|><|876|><|1770|><|437|><|987|><|1653|><|1109|><|898|><|1285|><|80|><|593|><|1709|><|843|><|code_end|> +#and<|t_0.15|><|code_start|><|1285|><|987|><|303|><|1037|><|730|><|1164|><|502|><|120|><|1737|><|1655|><|1318|><|code_end|> +#it<|t_0.09|><|code_start|><|848|><|1366|><|395|><|1601|><|1513|><|593|><|1302|><|code_end|> +#looks<|t_0.27|><|code_start|><|1281|><|1266|><|1755|><|572|><|248|><|1751|><|1257|><|695|><|1380|><|457|><|659|><|585|><|1315|><|1105|><|1776|><|736|><|24|><|736|><|654|><|1027|><|code_end|> +#lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|1481|><|1721|><|1123|><|438|><|1246|><|1251|><|795|><|659|><|1381|><|1658|><|217|><|1772|><|562|><|952|><|107|><|1129|><|1112|><|467|><|550|><|1079|><|840|><|1615|><|1469|><|1380|><|168|><|917|><|836|><|1827|><|437|><|583|><|67|><|595|><|1087|><|1646|><|1493|><|1677|><|code_end|>""" + +# TODO: tokenization is slow for some reason - here is pre-tokenized input +suffix = [ 151667, 198, 1782, 155780, 151669, 151929, 152412, 152308, 152585, 152460, 153375, 151670, 198, 74455, + 155808, 151669, 151799, 151873, 151863, 152446, 152372, 152204, 152728, 152229, 152470, 151970, 153413, + 152419, 153334, 153289, 153374, 153199, 152040, 153260, 152721, 152680, 153297, 152419, 153248, 152400, + 152691, 153368, 153437, 151670, 198, 1722, 155828, 151669, 152607, 152256, 152991, 152299, 152688, 153163, + 153016, 152789, 153198, 152712, 151911, 153107, 152623, 152170, 152395, 152852, 152207, 152461, 153321, + 153309, 151750, 152137, 153340, 152573, 152267, 153347, 151789, 152681, 153339, 151992, 152512, 151751, + 152179, 153434, 153180, 152900, 153440, 152474, 153122, 153129, 151904, 152311, 151670, 198, 1499, 155791, + 151669, 152276, 152454, 153354, 152544, 153204, 153272, 152708, 153433, 152319, 153226, 153043, 152325, + 153267, 152622, 151670, 198, 4250, 155797, 151669, 153454, 153342, 151989, 152458, 153420, 152303, 152271, + 152827, 153036, 153196, 151708, 153263, 152561, 153207, 152213, 152112, 153204, 151722, 152542, 151670, 198, + 19789, 155796, 151669, 153353, 153182, 152345, 152471, 152477, 153014, 152002, 152191, 151734, 152312, 152810, + 152237, 153224, 153169, 153224, 152244, 153387, 153404, 151670, 198, 16069, 155811, 151669, 152265, 151946, + 151808, 152412, 152363, 152305, 153156, 152733, 152810, 153157, 152016, 152100, 152069, 153234, 152317, + 152589, 152707, 153121, 153341, 152159, 152114, 153156, 153001, 153504, 153376, 152272, 152433, 152325, + 151941, 151670, 198, 285, 155788, 151669, 152238, 152255, 153427, 152318, 153009, 152381, 152474, 152680, + 152157, 153255, 152324, 151682, 151670, 198, 32955, 155804, 151669, 153490, 153419, 152364, 152405, 152682, + 152206, 152078, 153369, 152725, 153193, 153027, 152946, 152488, 153070, 151883, 152890, 152489, 153144, + 153375, 152358, 151685, 152494, 152117, 152740, 151670, 198, 37448, 480, 155840, 151669, 151902, 152720, + 153377, 152027, 152378, 152821, 153207, 153459, 153028, 153068, 152507, 153255, 152158, 152921, 151958, + 152609, 152748, 152822, 152286, 151714, 152730, 152377, 152353, 152470, 152606, 152162, 152186, 153071, + 152244, 153118, 153375, 153018, 152712, 153098, 152976, 152336, 151843, 153202, 152297, 151736, 153380, + 153502, 152702, 152115, 153181, 152735, 153277, 153457, 152393, 153112, 152595, 151670, 198, 19098, 155808, + 151669, 152464, 153452, 152595, 153312, 151937, 151933, 153197, 152239, 153163, 152922, 153402, 152034, + 152591, 153438, 152215, 151673, 152005, 151785, 152642, 151924, 153278, 151805, 151974, 153482, 152718, + 152862, 153347, 151670, 198, 72, 155780, 151669, 151795, 152111, 152746, 152377, 153471, 152309, 151670, 198, + 19016, 155788, 151669, 153181, 152271, 152190, 152842, 152224, 152701, 152939, 152536, 152091, 151815, 152733, + 151672, 151670, 198, 14689, 155788, 151669, 152291, 152072, 152942, 151734, 153042, 153504, 152589, 153333, + 151839, 151941, 153038, 153180, 151670, 198, 36996, 8303, 155832, 151669, 152231, 152256, 152835, 152801, + 152985, 153400, 152393, 152818, 152765, 152249, 152600, 151699, 152302, 152752, 153018, 153009, 151992, + 153054, 152847, 153354, 153228, 152662, 153355, 152532, 153393, 151782, 152458, 152048, 152757, 152428, + 153195, 151906, 153006, 153178, 153250, 152331, 152284, 152780, 153138, 153319, 151980, 153142, 152418, + 152228, 152733, 151670, 198, 9096, 155801, 151669, 151698, 153321, 152217, 153039, 152935, 153400, 152122, + 152531, 153106, 152169, 152892, 152957, 151851, 152427, 152826, 152451, 151851, 152901, 152885, 152594, + 153446, 153080, 151670, 198, 14689, 155795, 151669, 152658, 151700, 153321, 152450, 152530, 153191, 151673, + 151690, 151698, 152714, 152846, 152981, 153171, 153384, 153364, 153188, 153246, 151670, 198, 1055, 155779, + 151669, 151869, 152388, 152711, 153334, 151736, 151670, 198, 1782, 155780, 151669, 153483, 153240, 152241, + 152558, 152697, 153046, 151670, 198, 5804, 1363, 155820, 151669, 152941, 152764, 152605, 153034, 153434, + 153372, 153347, 151887, 152453, 152758, 152133, 152510, 152694, 152431, 152321, 153088, 152676, 152223, + 152581, 152459, 152015, 152502, 153063, 152712, 153294, 153451, 153032, 152903, 152859, 152989, 151748, + 152669, 152661, 152650, 152409, 151861, 151670, 198, 300, 7973, 155828, 151669, 153095, 152469, 152988, + 152894, 151819, 152391, 153019, 152058, 153062, 153230, 151826, 152112, 152306, 152264, 152769, 153390, + 152384, 152435, 152790, 153393, 152983, 152540, 152252, 152034, 153107, 152540, 151919, 151893, 152558, + 152817, 152946, 152956, 152129, 152715, 153131, 153490, 151734, 152271, 152707, 151734, 153321, 152450, + 151670, 198, 8088, 155792, 151669, 152452, 153497, 153353, 152679, 152533, 152382, 152374, 152611, 153341, + 153163, 152285, 153411, 152495, 153141, 152320, 151670, 198, 1199, 155781, 151669, 151764, 152360, 153295, + 152634, 153342, 152199, 152271, 151670, 198, 43366, 155799, 151669, 152308, 151682, 152889, 152016, 152385, + 152629, 152495, 151826, 153321, 152958, 152180, 151886, 153432, 152922, 152128, 153024, 153040, 152593, + 152287, 151677, 151670, 198, 53660, 155808, 151669, 151727, 152092, 152680, 153331, 151699, 152316, 152938, + 152289, 152433, 153384, 151781, 153137, 153259, 152175, 153213, 152291, 151869, 152691, 152489, 151941, + 152049, 152034, 153053, 152179, 153160, 151676, 153367, 151670, 198, 268, 4123, 480, 155821, 151669, 152350, + 152173, 152536, 151991, 151960, 153144, 153013, 152358, 152234, 153135, 152291, 153235, 152143, 152583, + 152402, 153483, 152678, 152192, 152533, 152946, 151797, 153103, 152310, 152293, 151825, 152548, 153442, + 152109, 152659, 153325, 152781, 152570, 152957, 151752, 152265, 153381, 152515, 151670, 198, 437, 155787, + 151669, 152957, 152659, 151975, 152709, 152402, 152836, 152174, 151792, 153409, 153327, 152990, 151670, 198, + 275, 155781, 151669, 152520, 153038, 152067, 153273, 153185, 152265, 152974, 151670, 198, 94273, 155799, + 151669, 152953, 152938, 153427, 152244, 151920, 153423, 152929, 152367, 153052, 152129, 152331, 152257, + 152987, 152777, 153448, 152408, 151696, 152408, 152326, 152699, 151670, 198, 385, 16239, 155828, 151669, + 152306, 152268, 153438, 153228, 152978, 152957, 153153, 153393, 152795, 152110, 152918, 152923, 152467, + 152331, 153053, 153330, 151889, 153444, 152234, 152624, 151779, 152801, 152784, 152139, 152222, 152751, + 152512, 153287, 153141, 153052, 151840, 152589, 152508, 153499, 152109, 152255, 151739, 152267, 152759, + 153318, 153165, 153349, 151670, ] + +response = requests.post( + host_llm + "/completion", + json={ + "prompt": [prefix + words, *suffix], + "n_predict": 1024, + "cache_prompt": True, + "return_tokens": True, + "samplers": ["top_k"], + "top_k": 16, + "seed": 1003, + } +) + +response_json = response.json() + +#print(json.dumps(response_json, indent=4)) +#print(json.dumps(response_json["prompt"], indent=4).replace("\\n", "\n")) +#print(json.dumps(response_json["timings"], indent=4)) +#print(json.dumps(response_json["tokens"], indent=4)) + +codes = response_json["tokens"] + +codes = [t - 151672 for t in codes if t >= 151672 and t <= 155772] + +response = requests.post( + host_dec + "/embeddings", + json={ + "input": [*codes], + } +) + +response_json = response.json() + +#print(json.dumps(response_json, indent=4)) + +# spectrogram +embd = response_json[0]["embedding"] + +n_codes = len(embd) +n_embd = len(embd[0]) + +print('spectrogram generated: n_codes: %d, n_embd: %d' % (n_codes, n_embd)) + +# post-process the spectrogram to convert to audio +# TODO: see the tts.cpp:embd_to_audio() and implement it in Python +print('converting to audio ...') +print('TODO: see the tts.cpp:embd_to_audio() and implement it in Python') diff --git a/examples/tts/tts.cpp b/examples/tts/tts.cpp new file mode 100644 index 0000000000000..7f36b80f0dee1 --- /dev/null +++ b/examples/tts/tts.cpp @@ -0,0 +1,932 @@ +#include "arg.h" +#include "common.h" +#include "sampling.h" +#include "log.h" +#include "llama.h" + +#define _USE_MATH_DEFINES // For M_PI on MSVC + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// +// Terminal utils +// + +#define SQR(X) ((X) * (X)) +#define UNCUBE(x) x < 48 ? 0 : x < 115 ? 1 : (x - 35) / 40 + +/** + * Quantizes 24-bit RGB to xterm256 code range [16,256). + */ +static int rgb2xterm256(int r, int g, int b) { + unsigned char cube[] = {0, 0137, 0207, 0257, 0327, 0377}; + int av, ir, ig, ib, il, qr, qg, qb, ql; + av = r * .299 + g * .587 + b * .114 + .5; + ql = (il = av > 238 ? 23 : (av - 3) / 10) * 10 + 8; + qr = cube[(ir = UNCUBE(r))]; + qg = cube[(ig = UNCUBE(g))]; + qb = cube[(ib = UNCUBE(b))]; + if (SQR(qr - r) + SQR(qg - g) + SQR(qb - b) <= + SQR(ql - r) + SQR(ql - g) + SQR(ql - b)) + return ir * 36 + ig * 6 + ib + 020; + return il + 0350; +} + +static std::string set_xterm256_foreground(int r, int g, int b) { + int x = rgb2xterm256(r, g, b); + std::ostringstream oss; + oss << "\033[38;5;" << x << "m"; + return oss.str(); +} + +const std::vector k_colors = { + set_xterm256_foreground(220, 5, 12), + set_xterm256_foreground(232, 96, 28), + set_xterm256_foreground(241, 147, 45), + set_xterm256_foreground(246, 193, 65), + set_xterm256_foreground(247, 240, 86), + set_xterm256_foreground(144, 201, 135), + set_xterm256_foreground( 78, 178, 101), +}; + +static void print_usage(int, char ** argv) { + LOG("\nexample usage:\n"); + LOG("\n %s -m model.gguf -p \"Hello!\"\n", argv[0]); + LOG("\n"); +} + +struct wav_header { + char riff[4] = {'R', 'I', 'F', 'F'}; + uint32_t chunk_size; + char wave[4] = {'W', 'A', 'V', 'E'}; + char fmt[4] = {'f', 'm', 't', ' '}; + uint32_t fmt_chunk_size = 16; + uint16_t audio_format = 1; // PCM + uint16_t num_channels = 1; // Mono + uint32_t sample_rate; + uint32_t byte_rate; + uint16_t block_align; + uint16_t bits_per_sample = 16; + char data[4] = {'d', 'a', 't', 'a'}; + uint32_t data_size; +}; + +static void save_wav16(const std::string & fname, const std::vector & data, int sample_rate) { + std::ofstream file(fname, std::ios::binary); + if (!file) { + LOG_ERR("%s: Failed to open file '%s' for writing", __func__, fname.c_str()); + return; + } + + wav_header header; + header.sample_rate = sample_rate; + header.byte_rate = header.sample_rate * header.num_channels * (header.bits_per_sample / 8); + header.block_align = header.num_channels * (header.bits_per_sample / 8); + header.data_size = data.size() * (header.bits_per_sample / 8); + header.chunk_size = 36 + header.data_size; + + file.write(reinterpret_cast(&header), sizeof(header)); + + for (const auto & sample : data) { + int16_t pcm_sample = static_cast(std::clamp(sample * 32767.0, -32768.0, 32767.0)); + file.write(reinterpret_cast(&pcm_sample), sizeof(pcm_sample)); + } + + file.close(); +} + +static void fill_hann_window(int length, bool periodic, float * output) { + int offset = -1; + if (periodic) { + offset = 0; + } + for (int i = 0; i < length; i++) { + output[i] = 0.5 * (1.0 - cosf((2.0 * M_PI * i) / (length + offset))); + } +} + +// very poor-man fft +static void twiddle(float * real, float * imag, int k, int N) { + float angle = 2 * M_PI * k / N; + *real = cos(angle); + *imag = sin(angle); +} + +static void irfft(int n, const float * inp_cplx, float * out_real) { + int N = n / 2 + 1; + + std::vector real_input(N); + std::vector imag_input(N); + for (int i = 0; i < N; ++i) { + real_input[i] = inp_cplx[2 * i]; + imag_input[i] = inp_cplx[2 * i + 1]; + } + + std::vector real_output(n); + std::vector imag_output(n); + + for (int k = 0; k < n; ++k) { + real_output[k] = 0.0f; + imag_output[k] = 0.0f; + for (int m = 0; m < N; ++m) { + float twiddle_real; + float twiddle_imag; + + twiddle(&twiddle_real, &twiddle_imag, k * m, n); + + real_output[k] += real_input[m] * twiddle_real - imag_input[m] * twiddle_imag; + imag_output[k] += real_input[m] * twiddle_imag + imag_input[m] * twiddle_real; + } + } + + for (int i = 0; i < n; ++i) { + out_real[i] = real_output[i] / N; + } +} + +// +// y = torch.nn.functional.fold( +// data, output_size=(1, output_size), kernel_size=(1, self.win_length), stride=(1, self.hop_length), +// )[:, 0, 0, pad:-pad] +// +// data.shape = torch.Size([1, 1280, 261]) +// output_size = 84480 +// win_length = 1280 +// hop_length = 320 +// pad = 480 +// +static void fold(const std::vector & data, int64_t n_out, int64_t n_win, int64_t n_hop, int64_t n_pad, std::vector & output) { + int64_t output_height = n_out; + int64_t kernel_w = n_win; + int64_t stride_w = n_hop; + int64_t width = n_out; + + output.resize(width, 0.0f); + + int64_t col_idx = 0; + for (int64_t w_col = 0; w_col < width; ++w_col) { + int64_t start = w_col * stride_w - n_pad; + int64_t end = start + kernel_w; + + for (int64_t w_im = start; w_im < end; ++w_im) { + if (w_im >= 0 && w_im < output_height && col_idx < (int64_t) data.size()) { + output[w_im] += data[col_idx]; + } + col_idx++; + } + } + + output.resize(n_out - 2 * n_pad); +} + +// TODO: not optimized at all +static std::vector embd_to_audio( + const float * embd, + const int n_codes, + const int n_embd, + const int n_thread) { + const int n_fft = 1280; + const int n_hop = 320; + const int n_win = 1280; + const int n_pad = (n_win - n_hop)/2; + const int n_out = (n_codes - 1)*n_hop + n_win; + + std::vector hann(n_fft); + + fill_hann_window(hann.size(), true, hann.data()); + + int n_spec = n_embd*n_codes; + + std::vector E (n_spec); + std::vector S (n_spec); + std::vector ST(n_spec); + + for (int l = 0; l < n_codes; ++l) { + for (int k = 0; k < n_embd; ++k) { + E[k*n_codes + l] = embd[l*n_embd + k]; + } + } + + for (int k = 0; k < n_embd/2; ++k) { + for (int l = 0; l < n_codes; ++l) { + float mag = E[(k )*n_codes + l]; + float phi = E[(k + n_embd/2)*n_codes + l]; + + mag = exp(mag); + + if (mag > 1e2) { + mag = 1e2; + } + S[2*(k*n_codes + l) + 0] = mag*cosf(phi); + S[2*(k*n_codes + l) + 1] = mag*sinf(phi); + } + } + + for (int l = 0; l < n_codes; ++l) { + for (int k = 0; k < n_embd/2; ++k) { + ST[l*n_embd + 2*k + 0] = S[2*(k*n_codes + l) + 0]; + ST[l*n_embd + 2*k + 1] = S[2*(k*n_codes + l) + 1]; + } + } + + std::vector res (n_codes*n_fft); + std::vector hann2(n_codes*n_fft); + + std::vector workers(n_thread); + for (int i = 0; i < n_thread; ++i) { + workers[i] = std::thread([&, i]() { + for (int l = i; l < n_codes; l += n_thread) { + irfft(n_fft, ST.data() + l*n_embd, res.data() + l*n_fft); + for (int j = 0; j < n_fft; ++j) { + res [l*n_fft + j] *= hann[j]; + hann2[l*n_fft + j] = hann[j] * hann[j]; + } + } + }); + } + for (int i = 0; i < n_thread; ++i) { + workers[i].join(); + } + + std::vector audio; + std::vector env; + + fold(res, n_out, n_win, n_hop, n_pad, audio); + fold(hann2, n_out, n_win, n_hop, n_pad, env); // TODO: can be done once + + for (size_t i = 0; i < audio.size(); ++i) { + audio[i] /= env[i]; + } + + return audio; +} + +static const std::map ones = { + {0, "zero"}, {1, "one"}, {2, "two"}, {3, "three"}, {4, "four"}, + {5, "five"}, {6, "six"}, {7, "seven"}, {8, "eight"}, {9, "nine"}, + {10, "ten"}, {11, "eleven"}, {12, "twelve"}, {13, "thirteen"}, {14, "fourteen"}, + {15, "fifteen"}, {16, "sixteen"}, {17, "seventeen"}, {18, "eighteen"}, {19, "nineteen"} +}; + +static const std::map tens = { + {2, "twenty"}, {3, "thirty"}, {4, "forty"}, {5, "fifty"}, + {6, "sixty"}, {7, "seventy"}, {8, "eighty"}, {9, "ninety"} +}; + +// Convert a number less than 1000 to words +static std::string convert_less_than_thousand(int num) { + std::string result; + + if (num >= 100) { + result += ones.at(num / 100) + " hundred "; + num %= 100; + } + + if (num >= 20) { + result += tens.at(num / 10); + if (num % 10 > 0) { + result += "-" + ones.at(num % 10); + } + } else if (num > 0) { + result += ones.at(num); + } + + return result; +} + +static std::string number_to_words(const std::string & number_str) { + try { + size_t decimal_pos = number_str.find('.'); + std::string integer_part = number_str.substr(0, decimal_pos); + + int int_number = std::stoi(integer_part); + std::string result; + + if (int_number == 0) { + result = "zero"; + } else { + if (int_number >= 1000000000) { + int billions = int_number / 1000000000; + result += convert_less_than_thousand(billions) + " billion "; + int_number %= 1000000000; + } + + if (int_number >= 1000000) { + int millions = int_number / 1000000; + result += convert_less_than_thousand(millions) + " million "; + int_number %= 1000000; + } + + if (int_number >= 1000) { + int thousands = int_number / 1000; + result += convert_less_than_thousand(thousands) + " thousand "; + int_number %= 1000; + } + + if (int_number > 0) { + result += convert_less_than_thousand(int_number); + } + } + + // Handle decimal part + if (decimal_pos != std::string::npos) { + result += " point"; + std::string decimal_part = number_str.substr(decimal_pos + 1); + for (char digit : decimal_part) { + result += " " + ones.at(digit - '0'); + } + } + + return result; + } catch (const std::exception& e) { + // Skip if fails + return " "; + } +} + +static std::string replace_numbers_with_words(const std::string & input_text) { + std::regex number_pattern(R"(\d+(\.\d+)?)"); + std::string result; + auto it = std::sregex_iterator(input_text.begin(), input_text.end(), number_pattern); + auto end = std::sregex_iterator(); + + size_t last_pos = 0; + for (std::sregex_iterator i = it; i != end; ++i) { + const std::smatch& match = *i; + result.append(input_text, last_pos, match.position() - last_pos); + result.append(number_to_words(match.str())); + last_pos = match.position() + match.length(); + } + result.append(input_text, last_pos); + + return result; +} + +// Based on: https://github.com/edwko/OuteTTS/blob/a613e79c489d8256dd657ea9168d78de75895d82/outetts/version/v1/prompt_processor.py#L39 +static std::string process_text(const std::string & text) { + + // For now I skipped text romanization as I am unsure how to handle + // uroman and MeCab implementations in C++ + // maybe something like https://github.com/anyascii/anyascii/ could work. + // currently only English would be supported in this function + + std::string processed_text = replace_numbers_with_words(text); + + std::transform(processed_text.begin(), processed_text.end(), + processed_text.begin(), ::tolower); + + std::regex special_chars(R"([-_/,\.\\])"); + processed_text = std::regex_replace(processed_text, special_chars, " "); + + std::regex non_alpha(R"([^a-z\s])"); + processed_text = std::regex_replace(processed_text, non_alpha, ""); + + std::regex multiple_spaces(R"(\s+)"); + processed_text = std::regex_replace(processed_text, multiple_spaces, " "); + + processed_text = std::regex_replace(processed_text, std::regex(R"(^\s+|\s+$)"), ""); + + /* + Replace spaces with the separator token same as in line 365 + + for (auto & c : prompt_user) { + if (c == ' ') { + prompt_clean += "<|text_sep|>"; + */ + processed_text = std::regex_replace(processed_text, std::regex(R"(\s)"), "<|text_sep|>"); + + return processed_text; +} + +static void prompt_add(llama_tokens & prompt, llama_token token) { + prompt.push_back(token); +} + +static void prompt_add(llama_tokens & prompt, const llama_tokens & tokens) { + prompt.insert(prompt.end(), tokens.begin(), tokens.end()); +} + +static void prompt_add(llama_tokens & prompt, const llama_model * model, const std::string & txt, bool add_special, bool parse_special) { + auto tmp = common_tokenize(model, txt, add_special, parse_special); + prompt_add(prompt, tmp); +} + +static void prompt_init(llama_tokens & prompt, const llama_model * model) { + prompt.clear(); + + prompt_add(prompt, model, "<|im_start|>\n", true, true); +} + +int main(int argc, char ** argv) { + common_params params; + + params.prompt = ""; + + params.n_predict = 4096; + params.n_batch = 8192; + params.n_ctx = 8192; + + params.sampling.top_k = 4; + params.sampling.samplers = { COMMON_SAMPLER_TYPE_TOP_K, }; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_TTS, print_usage)) { + return 1; + } + + const int n_parallel = params.n_parallel; + const int n_predict = params.n_predict; + + common_init(); + + // init LLM + + llama_backend_init(); + llama_numa_init(params.numa); + + llama_model * model_ttc = NULL; // text-to-codes + llama_model * model_cts = NULL; // codes-to-speech + + llama_context * ctx_ttc = NULL; + llama_context * ctx_cts = NULL; + + common_init_result llama_init_ttc = common_init_from_params(params); + model_ttc = llama_init_ttc.model; + ctx_ttc = llama_init_ttc.context; + + // TODO: refactor in a common struct + params.model = params.vocoder.model; + params.model_url = params.vocoder.model_url; + params.hf_repo = params.vocoder.hf_repo; + params.hf_file = params.vocoder.hf_file; + + params.embedding = true; + + common_init_result llama_init_cts = common_init_from_params(params); + model_cts = llama_init_cts.model; + ctx_cts = llama_init_cts.context; + + std::vector smpl(n_parallel); + for (int i = 0; i < n_parallel; ++i) { + params.sampling.no_perf = (i != 0); + params.sampling.seed = params.sampling.seed + 1; + + smpl[i] = common_sampler_init(model_ttc, params.sampling); + } + + LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl[0])); + LOG_INF("sampler params: \n%s\n", params.sampling.print().c_str()); + LOG_INF("sampler chain: %s\n", common_sampler_print(smpl[0]).c_str()); + + LOG_INF("%s: loading done\n", __func__); + + const auto t_main_start = ggml_time_us(); + + std::vector codes; + + // process prompt and generate voice codes + { + LOG_INF("%s: constructing prompt ..\n", __func__); + + std::vector prompt_inp; + + prompt_init(prompt_inp, model_ttc); + + prompt_add(prompt_inp, model_ttc, "<|text_start|>the<|text_sep|>overall<|text_sep|>package<|text_sep|>from<|text_sep|>just<|text_sep|>two<|text_sep|>people<|text_sep|>is<|text_sep|>pretty<|text_sep|>remarkable<|text_sep|>sure<|text_sep|>i<|text_sep|>have<|text_sep|>some<|text_sep|>critiques<|text_sep|>about<|text_sep|>some<|text_sep|>of<|text_sep|>the<|text_sep|>gameplay<|text_sep|>aspects<|text_sep|>but<|text_sep|>its<|text_sep|>still<|text_sep|>really<|text_sep|>enjoyable<|text_sep|>and<|text_sep|>it<|text_sep|>looks<|text_sep|>lovely<|text_sep|>", false, true); + + // convert the input text into the necessary format expected by OuteTTS + { + std::string prompt_clean = process_text(params.prompt); + + LOG_INF("%s: prompt: '%s'\n", __func__, prompt_clean.c_str()); + + prompt_add(prompt_inp, model_ttc, prompt_clean, false, true); + } + + prompt_add(prompt_inp, model_ttc, "<|text_end|>\n", false, true); + + // disabled to save time on tokenizing each time + // TODO: load voices from the json files +#if 0 + const std::string voice_data = R"(<|audio_start|> +the<|t_0.08|><|code_start|><|257|><|740|><|636|><|913|><|788|><|1703|><|code_end|> +overall<|t_0.36|><|code_start|><|127|><|201|><|191|><|774|><|700|><|532|><|1056|><|557|><|798|><|298|><|1741|><|747|><|1662|><|1617|><|1702|><|1527|><|368|><|1588|><|1049|><|1008|><|1625|><|747|><|1576|><|728|><|1019|><|1696|><|1765|><|code_end|> +package<|t_0.56|><|code_start|><|935|><|584|><|1319|><|627|><|1016|><|1491|><|1344|><|1117|><|1526|><|1040|><|239|><|1435|><|951|><|498|><|723|><|1180|><|535|><|789|><|1649|><|1637|><|78|><|465|><|1668|><|901|><|595|><|1675|><|117|><|1009|><|1667|><|320|><|840|><|79|><|507|><|1762|><|1508|><|1228|><|1768|><|802|><|1450|><|1457|><|232|><|639|><|code_end|> +from<|t_0.19|><|code_start|><|604|><|782|><|1682|><|872|><|1532|><|1600|><|1036|><|1761|><|647|><|1554|><|1371|><|653|><|1595|><|950|><|code_end|> +just<|t_0.25|><|code_start|><|1782|><|1670|><|317|><|786|><|1748|><|631|><|599|><|1155|><|1364|><|1524|><|36|><|1591|><|889|><|1535|><|541|><|440|><|1532|><|50|><|870|><|code_end|> +two<|t_0.24|><|code_start|><|1681|><|1510|><|673|><|799|><|805|><|1342|><|330|><|519|><|62|><|640|><|1138|><|565|><|1552|><|1497|><|1552|><|572|><|1715|><|1732|><|code_end|> +people<|t_0.39|><|code_start|><|593|><|274|><|136|><|740|><|691|><|633|><|1484|><|1061|><|1138|><|1485|><|344|><|428|><|397|><|1562|><|645|><|917|><|1035|><|1449|><|1669|><|487|><|442|><|1484|><|1329|><|1832|><|1704|><|600|><|761|><|653|><|269|><|code_end|> +is<|t_0.16|><|code_start|><|566|><|583|><|1755|><|646|><|1337|><|709|><|802|><|1008|><|485|><|1583|><|652|><|10|><|code_end|> +pretty<|t_0.32|><|code_start|><|1818|><|1747|><|692|><|733|><|1010|><|534|><|406|><|1697|><|1053|><|1521|><|1355|><|1274|><|816|><|1398|><|211|><|1218|><|817|><|1472|><|1703|><|686|><|13|><|822|><|445|><|1068|><|code_end|> +remarkable<|t_0.68|><|code_start|><|230|><|1048|><|1705|><|355|><|706|><|1149|><|1535|><|1787|><|1356|><|1396|><|835|><|1583|><|486|><|1249|><|286|><|937|><|1076|><|1150|><|614|><|42|><|1058|><|705|><|681|><|798|><|934|><|490|><|514|><|1399|><|572|><|1446|><|1703|><|1346|><|1040|><|1426|><|1304|><|664|><|171|><|1530|><|625|><|64|><|1708|><|1830|><|1030|><|443|><|1509|><|1063|><|1605|><|1785|><|721|><|1440|><|923|><|code_end|> +sure<|t_0.36|><|code_start|><|792|><|1780|><|923|><|1640|><|265|><|261|><|1525|><|567|><|1491|><|1250|><|1730|><|362|><|919|><|1766|><|543|><|1|><|333|><|113|><|970|><|252|><|1606|><|133|><|302|><|1810|><|1046|><|1190|><|1675|><|code_end|> +i<|t_0.08|><|code_start|><|123|><|439|><|1074|><|705|><|1799|><|637|><|code_end|> +have<|t_0.16|><|code_start|><|1509|><|599|><|518|><|1170|><|552|><|1029|><|1267|><|864|><|419|><|143|><|1061|><|0|><|code_end|> +some<|t_0.16|><|code_start|><|619|><|400|><|1270|><|62|><|1370|><|1832|><|917|><|1661|><|167|><|269|><|1366|><|1508|><|code_end|> +critiques<|t_0.60|><|code_start|><|559|><|584|><|1163|><|1129|><|1313|><|1728|><|721|><|1146|><|1093|><|577|><|928|><|27|><|630|><|1080|><|1346|><|1337|><|320|><|1382|><|1175|><|1682|><|1556|><|990|><|1683|><|860|><|1721|><|110|><|786|><|376|><|1085|><|756|><|1523|><|234|><|1334|><|1506|><|1578|><|659|><|612|><|1108|><|1466|><|1647|><|308|><|1470|><|746|><|556|><|1061|><|code_end|> +about<|t_0.29|><|code_start|><|26|><|1649|><|545|><|1367|><|1263|><|1728|><|450|><|859|><|1434|><|497|><|1220|><|1285|><|179|><|755|><|1154|><|779|><|179|><|1229|><|1213|><|922|><|1774|><|1408|><|code_end|> +some<|t_0.23|><|code_start|><|986|><|28|><|1649|><|778|><|858|><|1519|><|1|><|18|><|26|><|1042|><|1174|><|1309|><|1499|><|1712|><|1692|><|1516|><|1574|><|code_end|> +of<|t_0.07|><|code_start|><|197|><|716|><|1039|><|1662|><|64|><|code_end|> +the<|t_0.08|><|code_start|><|1811|><|1568|><|569|><|886|><|1025|><|1374|><|code_end|> +gameplay<|t_0.48|><|code_start|><|1269|><|1092|><|933|><|1362|><|1762|><|1700|><|1675|><|215|><|781|><|1086|><|461|><|838|><|1022|><|759|><|649|><|1416|><|1004|><|551|><|909|><|787|><|343|><|830|><|1391|><|1040|><|1622|><|1779|><|1360|><|1231|><|1187|><|1317|><|76|><|997|><|989|><|978|><|737|><|189|><|code_end|> +aspects<|t_0.56|><|code_start|><|1423|><|797|><|1316|><|1222|><|147|><|719|><|1347|><|386|><|1390|><|1558|><|154|><|440|><|634|><|592|><|1097|><|1718|><|712|><|763|><|1118|><|1721|><|1311|><|868|><|580|><|362|><|1435|><|868|><|247|><|221|><|886|><|1145|><|1274|><|1284|><|457|><|1043|><|1459|><|1818|><|62|><|599|><|1035|><|62|><|1649|><|778|><|code_end|> +but<|t_0.20|><|code_start|><|780|><|1825|><|1681|><|1007|><|861|><|710|><|702|><|939|><|1669|><|1491|><|613|><|1739|><|823|><|1469|><|648|><|code_end|> +its<|t_0.09|><|code_start|><|92|><|688|><|1623|><|962|><|1670|><|527|><|599|><|code_end|> +still<|t_0.27|><|code_start|><|636|><|10|><|1217|><|344|><|713|><|957|><|823|><|154|><|1649|><|1286|><|508|><|214|><|1760|><|1250|><|456|><|1352|><|1368|><|921|><|615|><|5|><|code_end|> +really<|t_0.36|><|code_start|><|55|><|420|><|1008|><|1659|><|27|><|644|><|1266|><|617|><|761|><|1712|><|109|><|1465|><|1587|><|503|><|1541|><|619|><|197|><|1019|><|817|><|269|><|377|><|362|><|1381|><|507|><|1488|><|4|><|1695|><|code_end|> +enjoyable<|t_0.49|><|code_start|><|678|><|501|><|864|><|319|><|288|><|1472|><|1341|><|686|><|562|><|1463|><|619|><|1563|><|471|><|911|><|730|><|1811|><|1006|><|520|><|861|><|1274|><|125|><|1431|><|638|><|621|><|153|><|876|><|1770|><|437|><|987|><|1653|><|1109|><|898|><|1285|><|80|><|593|><|1709|><|843|><|code_end|> +and<|t_0.15|><|code_start|><|1285|><|987|><|303|><|1037|><|730|><|1164|><|502|><|120|><|1737|><|1655|><|1318|><|code_end|> +it<|t_0.09|><|code_start|><|848|><|1366|><|395|><|1601|><|1513|><|593|><|1302|><|code_end|> +looks<|t_0.27|><|code_start|><|1281|><|1266|><|1755|><|572|><|248|><|1751|><|1257|><|695|><|1380|><|457|><|659|><|585|><|1315|><|1105|><|1776|><|736|><|24|><|736|><|654|><|1027|><|code_end|> +lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|1481|><|1721|><|1123|><|438|><|1246|><|1251|><|795|><|659|><|1381|><|1658|><|217|><|1772|><|562|><|952|><|107|><|1129|><|1112|><|467|><|550|><|1079|><|840|><|1615|><|1469|><|1380|><|168|><|917|><|836|><|1827|><|437|><|583|><|67|><|595|><|1087|><|1646|><|1493|><|1677|><|code_end|>)"; + + auto tmp = common_tokenize(model_ttc, voice_data, false, true); + printf("\n\n"); + for (int i = 0; i < tmp.size(); ++i) { + printf("%d, ", tmp[i]); + } + printf("\n\n"); +#else + prompt_add(prompt_inp, llama_tokens { + 151667, 198, 1782, 155780, 151669, 151929, 152412, 152308, 152585, + 152460, 153375, 151670, 198, 74455, 155808, 151669, 151799, + 151873, 151863, 152446, 152372, 152204, 152728, 152229, 152470, + 151970, 153413, 152419, 153334, 153289, 153374, 153199, 152040, + 153260, 152721, 152680, 153297, 152419, 153248, 152400, 152691, + 153368, 153437, 151670, 198, 1722, 155828, 151669, 152607, + 152256, 152991, 152299, 152688, 153163, 153016, 152789, 153198, + 152712, 151911, 153107, 152623, 152170, 152395, 152852, 152207, + 152461, 153321, 153309, 151750, 152137, 153340, 152573, 152267, + 153347, 151789, 152681, 153339, 151992, 152512, 151751, 152179, + 153434, 153180, 152900, 153440, 152474, 153122, 153129, 151904, + 152311, 151670, 198, 1499, 155791, 151669, 152276, 152454, + 153354, 152544, 153204, 153272, 152708, 153433, 152319, 153226, + 153043, 152325, 153267, 152622, 151670, 198, 4250, 155797, + 151669, 153454, 153342, 151989, 152458, 153420, 152303, 152271, + 152827, 153036, 153196, 151708, 153263, 152561, 153207, 152213, + 152112, 153204, 151722, 152542, 151670, 198, 19789, 155796, + 151669, 153353, 153182, 152345, 152471, 152477, 153014, 152002, + 152191, 151734, 152312, 152810, 152237, 153224, 153169, 153224, + 152244, 153387, 153404, 151670, 198, 16069, 155811, 151669, + 152265, 151946, 151808, 152412, 152363, 152305, 153156, 152733, + 152810, 153157, 152016, 152100, 152069, 153234, 152317, 152589, + 152707, 153121, 153341, 152159, 152114, 153156, 153001, 153504, + 153376, 152272, 152433, 152325, 151941, 151670, 198, 285, + 155788, 151669, 152238, 152255, 153427, 152318, 153009, 152381, + 152474, 152680, 152157, 153255, 152324, 151682, 151670, 198, + 32955, 155804, 151669, 153490, 153419, 152364, 152405, 152682, + 152206, 152078, 153369, 152725, 153193, 153027, 152946, 152488, + 153070, 151883, 152890, 152489, 153144, 153375, 152358, 151685, + 152494, 152117, 152740, 151670, 198, 37448, 480, 155840, 151669, + 151902, 152720, 153377, 152027, 152378, 152821, 153207, 153459, + 153028, 153068, 152507, 153255, 152158, 152921, 151958, 152609, + 152748, 152822, 152286, 151714, 152730, 152377, 152353, 152470, + 152606, 152162, 152186, 153071, 152244, 153118, 153375, 153018, + 152712, 153098, 152976, 152336, 151843, 153202, 152297, 151736, + 153380, 153502, 152702, 152115, 153181, 152735, 153277, 153457, + 152393, 153112, 152595, 151670, 198, 19098, 155808, 151669, + 152464, 153452, 152595, 153312, 151937, 151933, 153197, 152239, + 153163, 152922, 153402, 152034, 152591, 153438, 152215, 151673, + 152005, 151785, 152642, 151924, 153278, 151805, 151974, 153482, + 152718, 152862, 153347, 151670, 198, 72, 155780, 151669, 151795, + 152111, 152746, 152377, 153471, 152309, 151670, 198, 19016, + 155788, 151669, 153181, 152271, 152190, 152842, 152224, 152701, + 152939, 152536, 152091, 151815, 152733, 151672, 151670, 198, + 14689, 155788, 151669, 152291, 152072, 152942, 151734, 153042, + 153504, 152589, 153333, 151839, 151941, 153038, 153180, 151670, + 198, 36996, 8303, 155832, 151669, 152231, 152256, 152835, + 152801, 152985, 153400, 152393, 152818, 152765, 152249, 152600, + 151699, 152302, 152752, 153018, 153009, 151992, 153054, 152847, + 153354, 153228, 152662, 153355, 152532, 153393, 151782, 152458, + 152048, 152757, 152428, 153195, 151906, 153006, 153178, 153250, + 152331, 152284, 152780, 153138, 153319, 151980, 153142, 152418, + 152228, 152733, 151670, 198, 9096, 155801, 151669, 151698, + 153321, 152217, 153039, 152935, 153400, 152122, 152531, 153106, + 152169, 152892, 152957, 151851, 152427, 152826, 152451, 151851, + 152901, 152885, 152594, 153446, 153080, 151670, 198, 14689, + 155795, 151669, 152658, 151700, 153321, 152450, 152530, 153191, + 151673, 151690, 151698, 152714, 152846, 152981, 153171, 153384, + 153364, 153188, 153246, 151670, 198, 1055, 155779, 151669, + 151869, 152388, 152711, 153334, 151736, 151670, 198, 1782, + 155780, 151669, 153483, 153240, 152241, 152558, 152697, 153046, + 151670, 198, 5804, 1363, 155820, 151669, 152941, 152764, 152605, + 153034, 153434, 153372, 153347, 151887, 152453, 152758, 152133, + 152510, 152694, 152431, 152321, 153088, 152676, 152223, 152581, + 152459, 152015, 152502, 153063, 152712, 153294, 153451, 153032, + 152903, 152859, 152989, 151748, 152669, 152661, 152650, 152409, + 151861, 151670, 198, 300, 7973, 155828, 151669, 153095, 152469, + 152988, 152894, 151819, 152391, 153019, 152058, 153062, 153230, + 151826, 152112, 152306, 152264, 152769, 153390, 152384, 152435, + 152790, 153393, 152983, 152540, 152252, 152034, 153107, 152540, + 151919, 151893, 152558, 152817, 152946, 152956, 152129, 152715, + 153131, 153490, 151734, 152271, 152707, 151734, 153321, 152450, + 151670, 198, 8088, 155792, 151669, 152452, 153497, 153353, + 152679, 152533, 152382, 152374, 152611, 153341, 153163, 152285, + 153411, 152495, 153141, 152320, 151670, 198, 1199, 155781, + 151669, 151764, 152360, 153295, 152634, 153342, 152199, 152271, + 151670, 198, 43366, 155799, 151669, 152308, 151682, 152889, + 152016, 152385, 152629, 152495, 151826, 153321, 152958, 152180, + 151886, 153432, 152922, 152128, 153024, 153040, 152593, 152287, + 151677, 151670, 198, 53660, 155808, 151669, 151727, 152092, + 152680, 153331, 151699, 152316, 152938, 152289, 152433, 153384, + 151781, 153137, 153259, 152175, 153213, 152291, 151869, 152691, + 152489, 151941, 152049, 152034, 153053, 152179, 153160, 151676, + 153367, 151670, 198, 268, 4123, 480, 155821, 151669, 152350, + 152173, 152536, 151991, 151960, 153144, 153013, 152358, 152234, + 153135, 152291, 153235, 152143, 152583, 152402, 153483, 152678, + 152192, 152533, 152946, 151797, 153103, 152310, 152293, 151825, + 152548, 153442, 152109, 152659, 153325, 152781, 152570, 152957, + 151752, 152265, 153381, 152515, 151670, 198, 437, 155787, + 151669, 152957, 152659, 151975, 152709, 152402, 152836, 152174, + 151792, 153409, 153327, 152990, 151670, 198, 275, 155781, + 151669, 152520, 153038, 152067, 153273, 153185, 152265, 152974, + 151670, 198, 94273, 155799, 151669, 152953, 152938, 153427, + 152244, 151920, 153423, 152929, 152367, 153052, 152129, 152331, + 152257, 152987, 152777, 153448, 152408, 151696, 152408, 152326, + 152699, 151670, 198, 385, 16239, 155828, 151669, 152306, 152268, + 153438, 153228, 152978, 152957, 153153, 153393, 152795, 152110, + 152918, 152923, 152467, 152331, 153053, 153330, 151889, 153444, + 152234, 152624, 151779, 152801, 152784, 152139, 152222, 152751, + 152512, 153287, 153141, 153052, 151840, 152589, 152508, 153499, + 152109, 152255, 151739, 152267, 152759, 153318, 153165, 153349, + 151670,}); +#endif + + // print the prompt token-by-token + + LOG("\n"); + + for (auto id : prompt_inp) { + LOG("%s", common_token_to_piece(ctx_ttc, id).c_str()); + } + + LOG_INF("%s: prompt size: %d\n", __func__, (int) prompt_inp.size()); + + LOG("\n"); + + // create a llama_batch + // we use this object to submit token data for decoding + llama_batch batch = llama_batch_init(std::max(prompt_inp.size(), (size_t) n_parallel), 0, n_parallel); + + std::vector seq_ids(n_parallel, 0); + for (int32_t i = 0; i < n_parallel; ++i) { + seq_ids[i] = i; + } + + // evaluate the initial prompt + for (size_t i = 0; i < prompt_inp.size(); ++i) { + common_batch_add(batch, prompt_inp[i], i, seq_ids, false); + } + GGML_ASSERT(batch.n_tokens == (int) prompt_inp.size()); + + // llama_decode will output logits only for the last token of the prompt + batch.logits[batch.n_tokens - 1] = true; + + if (llama_decode(ctx_ttc, batch) != 0) { + LOG_ERR("%s: llama_decode() failed\n", __func__); + return 1; + } + + if (n_parallel > 1) { + LOG_INF("\n\n%s: generating %d sequences ...\n", __func__, n_parallel); + } + + llama_synchronize(ctx_ttc); + + LOG_INF("%s: time for prompt: %.3f ms\n\n", __func__, (ggml_time_us() - t_main_start) / 1000.0f); + + const auto t_dec_start = ggml_time_us(); + + // main loop + + // remember the batch index of the last token for each parallel sequence + // we need this to determine which logits to sample from + std::vector i_batch(n_parallel, batch.n_tokens - 1); + + int n_past = batch.n_tokens; + int n_decode = 0; + + while (n_decode <= n_predict) { + // prepare the next batch + common_batch_clear(batch); + + // sample the next token for each parallel sequence / stream + for (int32_t i = 0; i < n_parallel; ++i) { + if (i_batch[i] < 0) { + // the stream has already finished + continue; + } + + const llama_token new_token_id = common_sampler_sample(smpl[i], ctx_ttc, i_batch[i]); + + common_sampler_accept(smpl[i], new_token_id, true); + + codes.push_back(new_token_id); + + const auto * cands = common_sampler_get_candidates(smpl[i]); + + // is it an end of generation? -> mark the stream as finished + if (llama_token_is_eog(model_ttc, new_token_id) || n_decode == n_predict) { + std::string reason; + if (llama_token_is_eog(model_ttc, new_token_id)) { + reason = "eos"; + } else { + reason = "n_predict"; + } + + i_batch[i] = -1; + + LOG("\n"); + if (n_parallel > 1) { + LOG_CNT("\n"); + LOG_INF("%s: stream %d finished at n_past = %d, reason = '%s'\n", __func__, i, n_past, reason.c_str()); + } + + continue; + } + + { + const float p = cands->data[cands->selected].p; + + const int col = std::max(0, std::min((int) k_colors.size() - 1, (int) ((3*p)*float(k_colors.size())))); + + LOG_CNT("%s%d%s", k_colors[col].c_str(), i, "\033[0m"); + //LOG_CNT("%d", i); + } + + i_batch[i] = batch.n_tokens; + + // push this new token for next evaluation + common_batch_add(batch, new_token_id, n_past, { i }, true); + } + + // all streams are finished + if (batch.n_tokens == 0) { + break; + } + + n_decode += 1; + n_past += 1; + + // evaluate the current batch with the transformer model + if (llama_decode(ctx_ttc, batch)) { + LOG_ERR("%s : failed to eval, return code %d\n", __func__, 1); + return 1; + } + } + + llama_batch_free(batch); + + LOG("\n"); + LOG_INF("%s: time for decoder: %.3f ms\n", __func__, (ggml_time_us() - t_dec_start) / 1000.0f); + } + + common_perf_print(ctx_ttc, smpl[0]); + + //std::vector codes = {198, 88225, 155856, 151669, 152205, + // 153064, 152537, 153421, 153209, 152524, 151689, 152993, 152438, 152695, + // 153091, 152945, 152829, 152534, 152934, 153020, 151997, 152263, 153010, + // 153146, 152399, 153208, 152496, 151793, 152848, 152263, 152571, 153286, + // 152227, 153300, 152934, 152263, 153208, 152263, 152965, 152430, 152296, + // 153146, 152920, 152376, 152556, 153363, 151775, 152044, 152972, 152690, + // 153379, 152368, 152233, 153422, 152490, 151996, 152022, 151694, 152061, + // 153238, 152539, 153356, 152640, 153021, 153123, 151962, 153094, 151670, + // 198, 20339, 13189, 155824, 151669, 152070, 152007, 152910, 151683, + // 152000, 152373, 152760, 152046, 151735, 152334, 152394, 153073, 152908, + // 151856, 151953, 153247, 153293, 151903, 153480, 153168, 152478, 153359, + // 153429, 151905, 151678, 152567, 152411, 152165, 152556, 153075, 153424, + // 151993, 152999, 153078, 152151, 152088, 153389, 152484, 151874, 151670, + // 198, 285, 155784, 151669, 152226, 152126, 152638, 153215, 151729, + // 152959, 153479, 153059, 151838, 151670, 198, 1782, 155783, 151669, + // 153288, 153055, 153314, 152497, 152962, 152741, 152076, 153253, 151670, + // 198, 471, 16488, 155825, 151669, 152060, 152916, 151893, 153469, 152501, + // 152080, 152743, 151932, 153161, 152096, 152761, 152698, 153401, 153242, + // 153336, 152441, 152838, 153467, 152706, 153496, 153310, 152422, 153360, + // 153115, 152763, 151998, 152373, 153450, 152554, 151968, 153323, 152055, + // 152468, 153111, 153358, 152813, 152010, 151770, 152823, 152960, 151670, + // 198, 22627, 155823, 151669, 152814, 152366, 153484, 152931, 153441, + // 152164, 152877, 152915, 153463, 151692, 152911, 152747, 152776, 151831, + // 153449, 151882, 152975, 152031, 152513, 153150, 152448, 152667, 153133, + // 153189, 152619, 153466, 152054, 152106, 153119, 152277, 152439, 153109, + // 152997, 152141, 153154, 153256, 153311, 151922, 151670, 198, 1055, + // 155781, 151669, 152633, 151850, 153060, 153270, 152560, 153348, 152729, + // 151670, 198, 25312, 155803, 151669, 152521, 153403, 152561, 153337, + // 153383, 152199, 153493, 153326, 151830, 152254, 152248, 152349, 152153, + // 153007, 151823, 153037, 152575, 152457, 152406, 152592, 153116, 153365, + // 153456, 151670, 198, 88225, 155817, 151669, 153271, 151925, 152218, + // 152418, 152253, 153140, 151903, 153151, 152626, 152338, 152647, 153464, + // 152785, 152768, 151711, 152037, 152033, 151804, 152216, 151701, 151855, + // 152348, 152995, 152955, 152905, 152342, 152340, 153391, 153453, 152418, + // 153415, 151990, 153083, 152884, 151670, 198, 151668, 198, 151645}; + + { + const std::string inp_txt = common_detokenize(ctx_ttc, codes, true); + + LOG("\n"); + LOG_INF("codes: '%s'\n", inp_txt.c_str()); + LOG_INF("%s: codes size: %d\n", __func__, (int) codes.size()); + } + + // remove all non-audio tokens (i.e. < 151672 || > 155772) + codes.erase(std::remove_if(codes.begin(), codes.end(), [](llama_token t) { return t < 151672 || t > 155772; }), codes.end()); + + { + const std::string inp_txt = common_detokenize(ctx_ttc, codes, true); + LOG_INF("codes audio: '%s'\n", inp_txt.c_str()); + LOG_INF("%s: codes audio size: %d\n", __func__, (int) codes.size()); + } + + for (auto & token : codes) { + token -= 151672; + } + + const auto t_voc_start = ggml_time_us(); + + const int n_codes = codes.size(); + + llama_batch batch = llama_batch_init(n_codes, 0, 1); + + for (size_t i = 0; i < codes.size(); ++i) { + common_batch_add(batch, codes[i], i, { 0 }, true); // TODO: all logits? + } + GGML_ASSERT(batch.n_tokens == n_codes); + + if (llama_decode(ctx_cts, batch) != 0) { + LOG_ERR("%s: llama_decode() failed\n", __func__); + return 1; + } + + llama_synchronize(ctx_cts); + + LOG_INF("%s: time for vocoder: %.3f ms\n", __func__, (ggml_time_us() - t_voc_start) / 1000.0f); + + const auto t_spec_start = ggml_time_us(); + +#if 1 + // spectral operations + const int n_embd = llama_n_embd(model_cts); + const float * embd = llama_get_embeddings(ctx_cts); + + auto audio = embd_to_audio(embd, n_codes, n_embd, params.cpuparams.n_threads); + +#else + // read the spectrogram from a file for debugging purposes + std::vector audio; + { + std::ifstream fin("out.bin", std::ios::binary); + if (!fin) { + LOG_ERR("%s: failed to open file '%s'\n", __func__, "out.bin"); + return 1; + } + + std::vector embd; + + int n_codes; + int n_embd; + + fin.read(reinterpret_cast(&n_codes), sizeof(int)); + fin.read(reinterpret_cast(&n_embd), sizeof(int)); + + embd.resize(n_codes * n_embd); + fin.read(reinterpret_cast(embd.data()), n_codes * n_embd * sizeof(float)); + fin.close(); + + LOG_INF("%s: n_codes: %d, n_embd: %d\n", __func__, n_codes, n_embd); + + audio = embd_to_audio(embd.data(), n_codes, n_embd, params.cpuparams.n_threads); + } +#endif + + const std::string fname = "output.wav"; + + const int n_sr = 24000; // sampling rate + + // zero out first 0.25 seconds + for (int i = 0; i < 24000/4; ++i) { + audio[i] = 0.0f; + } + + LOG_INF("%s: time for spectral ops: %.3f ms\n", __func__, (ggml_time_us() - t_spec_start) / 1000.0f); + LOG_INF("%s: total time: %.3f ms\n", __func__, (ggml_time_us() - t_main_start) / 1000.0f); + + save_wav16(fname, audio, n_sr); + + LOG_INF("%s: audio written to file '%s'\n", __func__, fname.c_str()); + + llama_free(ctx_ttc); + llama_free_model(model_ttc); + + llama_free(ctx_cts); + llama_free_model(model_cts); + + llama_backend_free(); + + return 0; +} diff --git a/flake.lock b/flake.lock index 470363a2f8fad..d114f4422a36a 100644 --- a/flake.lock +++ b/flake.lock @@ -20,11 +20,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1730785428, - "narHash": "sha256-Zwl8YgTVJTEum+L+0zVAWvXAGbWAuXHax3KzuejaDyo=", + "lastModified": 1732014248, + "narHash": "sha256-y/MEyuJ5oBWrWAic/14LaIr/u5E0wRVzyYsouYY3W6w=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "4aa36568d413aca0ea84a1684d2d46f55dbabad7", + "rev": "23e89b7da85c3640bbc2173fe04f4bd114342367", "type": "github" }, "original": { diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index a82818d607f5e..e33d974827cbe 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -32,7 +32,15 @@ else() endif() endif() +# remove the lib prefix on win32 mingw +if (WIN32) + set(CMAKE_STATIC_LIBRARY_PREFIX "") + set(CMAKE_SHARED_LIBRARY_PREFIX "") + set(CMAKE_SHARED_MODULE_PREFIX "") +endif() + option(BUILD_SHARED_LIBS "ggml: build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT}) +option(GGML_BACKEND_DL "ggml: build backends as dynamic libraries (requires BUILD_SHARED_LIBS)" OFF) # # option list @@ -66,10 +74,10 @@ if (NOT GGML_CUDA_GRAPHS_DEFAULT) endif() # general -option(GGML_STATIC "ggml: static link libraries" OFF) -option(GGML_NATIVE "ggml: enable -march=native flag" ${GGML_NATIVE_DEFAULT}) -option(GGML_LTO "ggml: enable link time optimization" OFF) -option(GGML_CCACHE "ggml: use ccache if available" ON) +option(GGML_STATIC "ggml: static link libraries" OFF) +option(GGML_NATIVE "ggml: optimize the build for the current system" ${GGML_NATIVE_DEFAULT}) +option(GGML_LTO "ggml: enable link time optimization" OFF) +option(GGML_CCACHE "ggml: use ccache if available" ON) # debug option(GGML_ALL_WARNINGS "ggml: enable all compiler warnings" ON) @@ -91,28 +99,34 @@ else() set(INS_ENB ON) endif() -option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF) -option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) - -option(GGML_AVX "ggml: enable AVX" ${INS_ENB}) -option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB}) -option(GGML_AVX512 "ggml: enable AVX512" OFF) -option(GGML_AVX512_VBMI "ggml: enable AVX512-VBMI" OFF) -option(GGML_AVX512_VNNI "ggml: enable AVX512-VNNI" OFF) -option(GGML_AVX512_BF16 "ggml: enable AVX512-BF16" OFF) -option(GGML_AMX_TILE "ggml: enable AMX-TILE" OFF) -option(GGML_AMX_INT8 "ggml: enable AMX-INT8" OFF) -option(GGML_AMX_BF16 "ggml: enable AMX-BF16" OFF) -option(GGML_FMA "ggml: enable FMA" ${INS_ENB}) +option(GGML_CPU_HBM "ggml: use memkind for CPU HBM" OFF) +option(GGML_CPU_AARCH64 "ggml: use runtime weight conversion of Q4_0 to Q4_X_X" ON) +option(GGML_AVX "ggml: enable AVX" ${INS_ENB}) +option(GGML_AVX_VNNI "ggml: enable AVX-VNNI" OFF) +option(GGML_AVX2 "ggml: enable AVX2" ${INS_ENB}) +option(GGML_AVX512 "ggml: enable AVX512F" OFF) +option(GGML_AVX512_VBMI "ggml: enable AVX512-VBMI" OFF) +option(GGML_AVX512_VNNI "ggml: enable AVX512-VNNI" OFF) +option(GGML_AVX512_BF16 "ggml: enable AVX512-BF16" OFF) if (NOT MSVC) - option(GGML_F16C "ggml: enable F16C" ${INS_ENB}) # in MSVC F16C is implied with AVX2/AVX512 + # in MSVC F16C and FMA is implied with AVX2/AVX512 + option(GGML_FMA "ggml: enable FMA" ${INS_ENB}) + option(GGML_F16C "ggml: enable F16C" ${INS_ENB}) + # MSVC does not seem to support AMX + option(GGML_AMX_TILE "ggml: enable AMX-TILE" OFF) + option(GGML_AMX_INT8 "ggml: enable AMX-INT8" OFF) + option(GGML_AMX_BF16 "ggml: enable AMX-BF16" OFF) endif() -option(GGML_LASX "ggml: enable lasx" ON) -option(GGML_LSX "ggml: enable lsx" ON) -option(GGML_SVE "ggml: enable SVE" OFF) +option(GGML_LASX "ggml: enable lasx" ON) +option(GGML_LSX "ggml: enable lsx" ON) +option(GGML_RVV "ggml: enable rvv" ON) + +option(GGML_CPU_ALL_VARIANTS "ggml: build all variants of the CPU backend (requires GGML_BACKEND_DL)" OFF) +set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM") + if (WIN32) - set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows Version") + set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version") endif() # ggml core @@ -159,11 +173,17 @@ set (GGML_METAL_MACOSX_VERSION_MIN "" CACHE STRING set (GGML_METAL_STD "" CACHE STRING "ggml: metal standard version (-std flag)") option(GGML_OPENMP "ggml: use OpenMP" ON) option(GGML_RPC "ggml: use RPC" OFF) -option(GGML_AMX "ggml: use AMX" OFF) option(GGML_SYCL "ggml: use SYCL" OFF) option(GGML_SYCL_F16 "ggml: use 16 bit floats for sycl calculations" OFF) set (GGML_SYCL_TARGET "INTEL" CACHE STRING "ggml: sycl target device") +set (GGML_SYCL_DEVICE_ARCH "" CACHE STRING + "ggml: sycl device architecture") + +option(GGML_OPENCL "ggml: use OpenCL" OFF) +option(GGML_OPENCL_PROFILING "ggml: use OpenCL profiling (increases overhead)" OFF) +option(GGML_OPENCL_EMBED_KERNELS "ggml: embed kernels" ON) +option(GGML_OPENCL_USE_ADRENO_KERNELS "ggml: use optimized kernels for Adreno" ON) # extra artifacts option(GGML_BUILD_TESTS "ggml: build tests" ${GGML_STANDALONE}) @@ -176,11 +196,7 @@ option(GGML_BUILD_EXAMPLES "ggml: build examples" ${GGML_STANDALONE}) set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED true) -if (GGML_SYCL) - set(CMAKE_CXX_STANDARD 17) -else() - set(CMAKE_CXX_STANDARD 11) -endif() +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED true) set(THREADS_PREFER_PTHREAD_FLAG ON) @@ -233,12 +249,8 @@ set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}") #if (GGML_METAL) # set_target_properties(ggml PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/src/ggml-metal.metal") #endif() -install(TARGETS ggml PUBLIC_HEADER) - -if (BUILD_SHARED_LIBS) - install(TARGETS ggml LIBRARY) - install(TARGETS ggml-base LIBRARY) -endif() +install(TARGETS ggml LIBRARY PUBLIC_HEADER) +install(TARGETS ggml-base LIBRARY) # FIXME: this should be done in the backend cmake files if (GGML_METAL) diff --git a/ggml/include/ggml-amx.h b/ggml/include/ggml-amx.h deleted file mode 100644 index 042d6d91998f7..0000000000000 --- a/ggml/include/ggml-amx.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include "ggml.h" -#include "ggml-backend.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -// buffer_type API -GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void); - -GGML_BACKEND_API bool ggml_backend_is_amx(ggml_backend_t backend); - -// backend API -GGML_BACKEND_API ggml_backend_t ggml_backend_amx_init(void); - -GGML_BACKEND_API void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads); - -GGML_BACKEND_API ggml_backend_reg_t ggml_backend_amx_reg(void); - -#ifdef __cplusplus -} -#endif diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h index cef164764bb1a..7221a08309274 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -190,6 +190,14 @@ extern "C" { typedef void (*ggml_backend_set_n_threads_t)(ggml_backend_t backend, int n_threads); // Get additional buffer types provided by the device (returns a NULL-terminated array) typedef ggml_backend_buffer_type_t * (*ggml_backend_dev_get_extra_bufts_t)(ggml_backend_dev_t device); + // Set the abort callback for the backend + typedef void (*ggml_backend_set_abort_callback_t)(ggml_backend_t backend, ggml_abort_callback abort_callback, void * abort_callback_data); + // Get a list of feature flags supported by the backend (returns a NULL-terminated array) + struct ggml_backend_feature { + const char * name; + const char * value; + }; + typedef struct ggml_backend_feature * (*ggml_backend_get_features_t)(ggml_backend_reg_t reg); // // Backend registry @@ -214,6 +222,14 @@ extern "C" { // = ggml_backend_dev_init(ggml_backend_dev_by_type(GPU) OR ggml_backend_dev_by_type(CPU), NULL) GGML_API ggml_backend_t ggml_backend_init_best(void); + // Load a backend from a dynamic library and register it + GGML_API ggml_backend_reg_t ggml_backend_load(const char * path); + // Unload a backend if loaded dynamically and unregister it + GGML_API void ggml_backend_unload(ggml_backend_reg_t reg); + // Load all known backends from dynamic libraries + GGML_API void ggml_backend_load_all(void); + GGML_API void ggml_backend_load_all_from_path(const char * dir_path); + // // Backend scheduler // diff --git a/ggml/include/ggml-cpu.h b/ggml/include/ggml-cpu.h index 7571ef9798364..3aa71badb5fb0 100644 --- a/ggml/include/ggml-cpu.h +++ b/ggml/include/ggml-cpu.h @@ -7,29 +7,6 @@ extern "C" { #endif - // Scheduling priorities - enum ggml_sched_priority { - GGML_SCHED_PRIO_NORMAL, - GGML_SCHED_PRIO_MEDIUM, - GGML_SCHED_PRIO_HIGH, - GGML_SCHED_PRIO_REALTIME - }; - - // Threadpool params - // Use ggml_threadpool_params_default() or ggml_threadpool_params_init() to populate the defaults - struct ggml_threadpool_params { - bool cpumask[GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings) - int n_threads; // number of threads - enum ggml_sched_priority prio; // thread priority - uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling) - bool strict_cpu; // strict cpu placement - bool paused; // start in paused state - }; - - struct ggml_threadpool; // forward declaration, see ggml.c - - typedef struct ggml_threadpool * ggml_threadpool_t; - // the compute plan that needs to be prepared for ggml_graph_compute() // since https://github.com/ggerganov/ggml/issues/287 struct ggml_cplan { @@ -75,14 +52,11 @@ extern "C" { GGML_BACKEND_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3); GGML_BACKEND_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value); - GGML_BACKEND_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads); - GGML_BACKEND_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads); - GGML_BACKEND_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1); - GGML_BACKEND_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params); - GGML_BACKEND_API void ggml_threadpool_free (struct ggml_threadpool * threadpool); - GGML_BACKEND_API int ggml_threadpool_get_n_threads(struct ggml_threadpool * threadpool); - GGML_BACKEND_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool); - GGML_BACKEND_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool); + GGML_BACKEND_API struct ggml_threadpool * ggml_threadpool_new (struct ggml_threadpool_params * params); + GGML_BACKEND_API void ggml_threadpool_free (struct ggml_threadpool * threadpool); + GGML_BACKEND_API int ggml_threadpool_get_n_threads (struct ggml_threadpool * threadpool); + GGML_BACKEND_API void ggml_threadpool_pause (struct ggml_threadpool * threadpool); + GGML_BACKEND_API void ggml_threadpool_resume (struct ggml_threadpool * threadpool); // ggml_graph_plan() has to be called before ggml_graph_compute() // when plan.work_size > 0, caller must allocate memory for plan.work_data @@ -104,10 +78,10 @@ extern "C" { GGML_BACKEND_API int ggml_cpu_has_sse3 (void); GGML_BACKEND_API int ggml_cpu_has_ssse3 (void); GGML_BACKEND_API int ggml_cpu_has_avx (void); + GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void); GGML_BACKEND_API int ggml_cpu_has_avx2 (void); GGML_BACKEND_API int ggml_cpu_has_f16c (void); GGML_BACKEND_API int ggml_cpu_has_fma (void); - GGML_BACKEND_API int ggml_cpu_has_avx_vnni (void); GGML_BACKEND_API int ggml_cpu_has_avx512 (void); GGML_BACKEND_API int ggml_cpu_has_avx512_vbmi(void); GGML_BACKEND_API int ggml_cpu_has_avx512_vnni(void); @@ -117,6 +91,7 @@ extern "C" { GGML_BACKEND_API int ggml_cpu_has_neon (void); GGML_BACKEND_API int ggml_cpu_has_arm_fma (void); GGML_BACKEND_API int ggml_cpu_has_fp16_va (void); + GGML_BACKEND_API int ggml_cpu_has_dotprod (void); GGML_BACKEND_API int ggml_cpu_has_matmul_int8(void); GGML_BACKEND_API int ggml_cpu_has_sve (void); GGML_BACKEND_API int ggml_cpu_get_sve_cnt (void); // sve vector length in bytes @@ -128,24 +103,14 @@ extern "C" { // Internal types and functions exposed for tests and benchmarks - typedef void (*ggml_from_float_to_mat_t) - (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs); typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx, const void * GGML_RESTRICT y, size_t by, int nrc); - typedef void (*ggml_gemv_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, - const void * GGML_RESTRICT y, int nr, int nc); - typedef void (*ggml_gemm_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, - const void * GGML_RESTRICT y, int nr, int nc); struct ggml_type_traits_cpu { ggml_from_float_t from_float; - ggml_from_float_to_mat_t from_float_to_mat; ggml_vec_dot_t vec_dot; enum ggml_type vec_dot_type; int64_t nrows; // number of rows to process simultaneously - int64_t ncols; // number of columns to process simultaneously - ggml_gemv_t gemv; - ggml_gemm_t gemm; }; GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type); @@ -165,13 +130,6 @@ extern "C" { GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void); -#ifdef GGML_USE_CPU_HBM - GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); -#endif - - GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void); - GGML_BACKEND_API bool ggml_backend_cpu_buft_is_aarch64(ggml_backend_buffer_type_t buft); - #ifdef __cplusplus } #endif diff --git a/ggml/include/ggml-opencl.h b/ggml/include/ggml-opencl.h new file mode 100644 index 0000000000000..6b61771358f87 --- /dev/null +++ b/ggml/include/ggml-opencl.h @@ -0,0 +1,26 @@ +#ifndef GGML_OPENCL_H +#define GGML_OPENCL_H + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// backend API +// +GGML_BACKEND_API ggml_backend_t ggml_backend_opencl_init(void); +GGML_BACKEND_API bool ggml_backend_is_opencl(ggml_backend_t backend); + +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void); +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_opencl_reg(void); + +#ifdef __cplusplus +} +#endif + +#endif // GGML_OPENCL_H diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 69e6a24344b97..c714fc8c837bb 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -237,7 +237,9 @@ #define GGML_EXIT_SUCCESS 0 #define GGML_EXIT_ABORTED 1 -#define GGML_ROPE_TYPE_NEOX 2 +#define GGML_ROPE_TYPE_NEOX 2 +#define GGML_ROPE_TYPE_MROPE 8 +#define GGML_ROPE_TYPE_VISION 24 #define GGUF_MAGIC "GGUF" @@ -384,12 +386,15 @@ extern "C" { GGML_TYPE_F64 = 28, GGML_TYPE_IQ1_M = 29, GGML_TYPE_BF16 = 30, - GGML_TYPE_Q4_0_4_4 = 31, - GGML_TYPE_Q4_0_4_8 = 32, - GGML_TYPE_Q4_0_8_8 = 33, + // GGML_TYPE_Q4_0_4_4 = 31, support has been removed from gguf files + // GGML_TYPE_Q4_0_4_8 = 32, + // GGML_TYPE_Q4_0_8_8 = 33, GGML_TYPE_TQ1_0 = 34, GGML_TYPE_TQ2_0 = 35, - GGML_TYPE_COUNT, + // GGML_TYPE_IQ4_NL_4_4 = 36, + // GGML_TYPE_IQ4_NL_4_8 = 37, + // GGML_TYPE_IQ4_NL_8_8 = 38, + GGML_TYPE_COUNT = 39, }; // precision @@ -430,9 +435,6 @@ extern "C" { GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_0_4_4 = 25, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_0_4_8 = 26, // except 1d tensors - GGML_FTYPE_MOSTLY_Q4_0_8_8 = 27, // except 1d tensors }; // available tensor operations: @@ -496,6 +498,7 @@ extern "C" { GGML_OP_POOL_2D_BACK, GGML_OP_UPSCALE, // nearest interpolate GGML_OP_PAD, + GGML_OP_PAD_REFLECT_1D, GGML_OP_ARANGE, GGML_OP_TIMESTEP_EMBEDDING, GGML_OP_ARGSORT, @@ -1442,6 +1445,22 @@ extern "C" { float beta_fast, float beta_slow); + GGML_API struct ggml_tensor * ggml_rope_multi( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c, + int n_dims, + int sections[4], + int mode, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow); + // in-place, returns view(a) GGML_API struct ggml_tensor * ggml_rope_ext_inplace( struct ggml_context * ctx, @@ -1545,17 +1564,6 @@ extern "C" { int d1, // dilation dimension 1 bool is_2D); - GGML_API struct ggml_tensor * ggml_conv_depthwise_2d( - struct ggml_context * ctx, - struct ggml_tensor * a, // convolution kernel - struct ggml_tensor * b, // data - int s0, // stride dimension 0 - int s1, // stride dimension 1 - int p0, // padding dimension 0 - int p1, // padding dimension 1 - int d0, // dilation dimension 0 - int d1); // dilation dimension 1 - GGML_API struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel @@ -1573,6 +1581,23 @@ extern "C" { int s, // stride int d); // dilation + // depthwise + // TODO: this is very likely wrong for some cases! - needs more testing + GGML_API struct ggml_tensor * ggml_conv_1d_dw( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel + struct ggml_tensor * b, // data + int s0, // stride + int p0, // padding + int d0); // dilation + + GGML_API struct ggml_tensor * ggml_conv_1d_dw_ph( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel + struct ggml_tensor * b, // data + int s0, // stride + int d0); // dilation + GGML_API struct ggml_tensor * ggml_conv_transpose_1d( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel @@ -1592,7 +1617,6 @@ extern "C" { int d0, // dilation dimension 0 int d1); // dilation dimension 1 - // kernel size is a->ne[0] x a->ne[1] // stride is equal to kernel size // padding is zero @@ -1619,6 +1643,18 @@ extern "C" { struct ggml_tensor * a, struct ggml_tensor * b); + // depthwise + GGML_API struct ggml_tensor * ggml_conv_2d_dw( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel + struct ggml_tensor * b, // data + int s0, // stride dimension 0 + int s1, // stride dimension 1 + int p0, // padding dimension 0 + int p1, // padding dimension 1 + int d0, // dilation dimension 0 + int d1); // dilation dimension 1 + GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0( struct ggml_context * ctx, struct ggml_tensor * a, @@ -1692,6 +1728,13 @@ extern "C" { int p2, int p3); + // pad each dimension with reflection: [a, b, c, d] -> [b, a, b, c, d, c] + GGML_API struct ggml_tensor * ggml_pad_reflect_1d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int p0, + int p1); + // Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151 // timesteps: [N,] // return: [N, dim] @@ -2194,11 +2237,19 @@ extern "C" { GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx); GGML_API void gguf_get_meta_data(const struct gguf_context * ctx, void * data); -#ifdef __cplusplus -// restrict not standard in C++ -#define GGML_RESTRICT +#ifdef __cplusplus + // restrict not standard in C++ +# if defined(__GNUC__) +# define GGML_RESTRICT __restrict__ +# elif defined(__clang__) +# define GGML_RESTRICT __restrict +# elif defined(_MSC_VER) +# define GGML_RESTRICT __restrict +# else +# define GGML_RESTRICT +# endif #else -#define GGML_RESTRICT restrict +# define GGML_RESTRICT restrict #endif typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k); typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k); @@ -2215,6 +2266,37 @@ extern "C" { GGML_API const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type); + // ggml threadpool + // TODO: currently, only a few functions are in the base ggml API, while the rest are in the CPU backend + // the goal should be to create an API that other backends can use move everything to the ggml base + + // scheduling priorities + enum ggml_sched_priority { + GGML_SCHED_PRIO_NORMAL, + GGML_SCHED_PRIO_MEDIUM, + GGML_SCHED_PRIO_HIGH, + GGML_SCHED_PRIO_REALTIME + }; + + // threadpool params + // Use ggml_threadpool_params_default() or ggml_threadpool_params_init() to populate the defaults + struct ggml_threadpool_params { + bool cpumask[GGML_MAX_N_THREADS]; // mask of cpu cores (all-zeros means use default affinity settings) + int n_threads; // number of threads + enum ggml_sched_priority prio; // thread priority + uint32_t poll; // polling level (0 - no polling, 100 - aggressive polling) + bool strict_cpu; // strict cpu placement + bool paused; // start in paused state + }; + + struct ggml_threadpool; // forward declaration, see ggml.c + + typedef struct ggml_threadpool * ggml_threadpool_t; + + GGML_API struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads); + GGML_API void ggml_threadpool_params_init (struct ggml_threadpool_params * p, int n_threads); + GGML_API bool ggml_threadpool_params_match (const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1); + #ifdef __cplusplus } #endif diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index ae7d3abc8de32..bf5ee5fc273fc 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -24,7 +24,7 @@ if (NOT MSVC) endif() endif() -function(get_flags CCID CCVER) +function(ggml_get_flags CCID CCVER) set(C_FLAGS "") set(CXX_FLAGS "") @@ -41,6 +41,7 @@ function(get_flags CCID CCVER) elseif (CCID STREQUAL "GNU") set(C_FLAGS -Wdouble-promotion) set(CXX_FLAGS -Wno-array-bounds) + if (CCVER VERSION_GREATER_EQUAL 8.1.0) list(APPEND CXX_FLAGS -Wextra-semi) endif() @@ -69,7 +70,7 @@ if (GGML_ALL_WARNINGS) list(APPEND C_FLAGS ${WARNING_FLAGS}) list(APPEND CXX_FLAGS ${WARNING_FLAGS}) - get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) + ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) add_compile_options("$<$:${C_FLAGS};${GF_C_FLAGS}>" "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") @@ -193,15 +194,14 @@ endif() if (WIN32) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) - - if (BUILD_SHARED_LIBS) - # TODO: should not use this - set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) - endif() endif() # ggml +if (GGML_BACKEND_DL AND NOT BUILD_SHARED_LIBS) + message(FATAL_ERROR "GGML_BACKEND_DL requires BUILD_SHARED_LIBS") +endif() + add_library(ggml-base ../include/ggml.h ../include/ggml-alloc.h @@ -215,9 +215,7 @@ add_library(ggml-base ggml-threading.cpp ggml-threading.h ggml-quants.c - ggml-quants.h - ggml-aarch64.c - ggml-aarch64.h) + ggml-quants.h) target_include_directories(ggml-base PRIVATE .) @@ -226,44 +224,95 @@ add_library(ggml target_link_libraries(ggml PUBLIC ggml-base) +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + target_link_libraries(ggml PRIVATE dl) +endif() + +function(ggml_add_backend_library backend) + if (GGML_BACKEND_DL) + add_library(${backend} MODULE ${ARGN}) + # write the shared library to the output directory + set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL) + else() + add_library(${backend} ${ARGN}) + target_link_libraries(ggml PUBLIC ${backend}) + install(TARGETS ${backend} LIBRARY) + endif() + + target_link_libraries(${backend} PRIVATE ggml-base) + target_include_directories(${backend} PRIVATE ..) + + if (${BUILD_SHARED_LIBS}) + target_compile_definitions(${backend} PRIVATE GGML_BACKEND_BUILD) + target_compile_definitions(${backend} PUBLIC GGML_BACKEND_SHARED) + endif() +endfunction() + function(ggml_add_backend backend) string(TOUPPER "GGML_${backend}" backend_id) if (${backend_id}) string(TOLOWER "ggml-${backend}" backend_target) add_subdirectory(${backend_target}) - # check again in case the backend disabled itself - # note that this should NOT be the normal behavior, in case of errors the backend should fail the build - # however, currently it is necessary for AMX, since it is enabled by default on llama.cpp - if (${backend_id}) - message(STATUS "Including ${backend} backend") - if (${BUILD_SHARED_LIBS}) - target_compile_definitions(${backend_target} PRIVATE GGML_BACKEND_BUILD) - target_compile_definitions(${backend_target} PUBLIC GGML_BACKEND_SHARED) - install(TARGETS ${backend_target} LIBRARY) - endif() - target_link_libraries(ggml PUBLIC ${backend_target}) + message(STATUS "Including ${backend} backend") + if (NOT GGML_BACKEND_DL) string(TOUPPER "GGML_USE_${backend}" backend_use) target_compile_definitions(ggml PUBLIC ${backend_use}) endif() endif() endfunction() +function(ggml_add_cpu_backend_variant tag_name) + set(GGML_CPU_TAG_NAME ${tag_name}) + # other: OPENMP LLAMAFILE CPU_HBM + foreach (feat NATIVE + AVX AVX2 AVX_VNNI FMA F16C + AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 + AMX_TILE AMX_INT8 AMX_BF16) + set(GGML_${feat} OFF) + endforeach() + + foreach (feat ${ARGN}) + set(GGML_${feat} ON) + endforeach() + + ggml_add_cpu_backend_variant_impl(${tag_name}) +endfunction() + ggml_add_backend(CPU) -ggml_add_backend(AMX) + +if (GGML_CPU_ALL_VARIANTS) + if (NOT GGML_BACKEND_DL) + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") + endif() + ggml_add_cpu_backend_variant(sandybridge AVX) + ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 FMA) + ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + if (NOT MSVC) + # MSVC doesn't support AVX-VNNI or AMX + ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 FMA AVX_VNNI) + ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + endif() +else () + ggml_add_cpu_backend_variant_impl("") +endif() + ggml_add_backend(BLAS) ggml_add_backend(CANN) ggml_add_backend(CUDA) ggml_add_backend(HIP) ggml_add_backend(Kompute) ggml_add_backend(METAL) +ggml_add_backend(MUSA) ggml_add_backend(RPC) ggml_add_backend(SYCL) ggml_add_backend(Vulkan) -ggml_add_backend(MUSA) +ggml_add_backend(OpenCL) foreach (target ggml-base ggml) target_include_directories(${target} PUBLIC $ $) - target_compile_features (${target} PRIVATE c_std_11) # don't bump + target_compile_features (${target} PRIVATE c_std_11 cxx_std_17) # don't bump endforeach() target_link_libraries(ggml-base PRIVATE Threads::Threads) diff --git a/ggml/src/ggml-aarch64.c b/ggml/src/ggml-aarch64.c deleted file mode 100644 index 0139120519cea..0000000000000 --- a/ggml/src/ggml-aarch64.c +++ /dev/null @@ -1,129 +0,0 @@ -#define GGML_COMMON_DECL_C -#include "ggml-common.h" - -#include "ggml-aarch64.h" -#include "ggml-impl.h" -#include "ggml-quants.h" -#include - -#define UNUSED GGML_UNUSED - -static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { - block_q4_0x4 out; - - for (int i = 0; i < 4; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_0 * 2 / blck_size_interleave; - - if (blck_size_interleave == 8) { - const uint64_t xor_mask = 0x8888888888888888ULL; - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - // Using memcpy to avoid unaligned memory accesses - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - } else if (blck_size_interleave == 4) { - const uint32_t xor_mask = 0x88888888; - for (int i = 0; i < end; ++i) { - int src_id = i % 4; - int src_offset = (i / 4) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint32_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t)); - } - } else { - GGML_ASSERT(false); - } - - return out; -} - -// interleave 8 block_q4_0s in blocks of blck_size_interleave -// returns an interleaved block_q4_0x8 -// in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks -// first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave -static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) { - block_q4_0x8 out; - - for (int i = 0; i < 8; i++) { - out.d[i] = in[i].d; - } - - const int end = QK4_0 * 4 / blck_size_interleave; - const uint64_t xor_mask = 0x8888888888888888ULL; - - for (int i = 0; i < end; ++i) { - int src_id = i % 8; - int src_offset = (i / 8) * blck_size_interleave; - int dst_offset = i * blck_size_interleave; - - uint64_t elems; - memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t)); - elems ^= xor_mask; - memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t)); - } - - return out; -} - -static size_t quantize_q4_0_nr_bl(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, int nrows_interleaved, int blck_size_interleave) { - assert(n_per_row % QK4_0 == 0); - const int nb = n_per_row / QK4_0; - - void * out_ptr = NULL; - if (nrows_interleaved == 8) { - out_ptr = (block_q4_0x8 *) dst; - } - else if (nrows_interleaved == 4) { - out_ptr = (block_q4_0x4 *) dst; - } - assert(nrows_interleaved <= 8); - block_q4_0 dst_tmp[8]; - - for (int b = 0; b < (nrow * n_per_row); b += nrows_interleaved * n_per_row) { - - for (int64_t x = 0; x < nb; x++) { - - for (int i = 0; i < nrows_interleaved; i++ ) { - quantize_row_q4_0_ref(src + b + i * n_per_row + x * QK4_0, (block_q4_0 *) dst_tmp + i, QK4_0); - } - - if (nrows_interleaved == 8) { - *(block_q4_0x8 *) out_ptr = make_block_q4_0x8(dst_tmp, blck_size_interleave); - out_ptr = (block_q4_0x8 *) out_ptr + 1; - } - else if (nrows_interleaved == 4) { - *(block_q4_0x4 *) out_ptr = make_block_q4_0x4(dst_tmp, blck_size_interleave); - out_ptr = (block_q4_0x4 *) out_ptr + 1; - } - } - } - - return ((nrow * n_per_row) / QK4_0 * sizeof(block_q4_0)); -} - -size_t quantize_q4_0_4x4(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { - UNUSED(quant_weights); - return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 4, 4); -} - -size_t quantize_q4_0_4x8(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { - UNUSED(quant_weights); - return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 4, 8); -} - -size_t quantize_q4_0_8x8(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) { - UNUSED(quant_weights); - return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 8, 8); -} diff --git a/ggml/src/ggml-aarch64.h b/ggml/src/ggml-aarch64.h deleted file mode 100644 index a578685911be6..0000000000000 --- a/ggml/src/ggml-aarch64.h +++ /dev/null @@ -1,19 +0,0 @@ -#pragma once - -#include "ggml.h" - -// GGML internal header - -#ifdef __cplusplus -extern "C" { -#endif - -// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization") -size_t quantize_q4_0_4x4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q4_0_4x8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); -size_t quantize_q4_0_8x8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix); - -#ifdef __cplusplus -} -#endif - diff --git a/ggml/src/ggml-alloc.c b/ggml/src/ggml-alloc.c index 2b2240be85dd3..8dc8226ac4932 100644 --- a/ggml/src/ggml-alloc.c +++ b/ggml/src/ggml-alloc.c @@ -534,7 +534,6 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor size_t offset = ggml_dyn_tallocr_alloc(alloc, size, node); hn->buffer_id = buffer_id; hn->offset = offset; - return; } } diff --git a/ggml/src/ggml-amx/CMakeLists.txt b/ggml/src/ggml-amx/CMakeLists.txt deleted file mode 100644 index d6676f3f67b20..0000000000000 --- a/ggml/src/ggml-amx/CMakeLists.txt +++ /dev/null @@ -1,107 +0,0 @@ -if (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64)$") AND - CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 11.0) - message(STATUS "Using AMX") - - file(GLOB GGML_HEADERS_AMX "*.h") - list(APPEND GGML_HEADERS_AMX "../../include/ggml-amx.h") - - file(GLOB GGML_SOURCES_AMX "*.cpp") - - add_library(ggml-amx - ${GGML_HEADERS_AMX} - ${GGML_SOURCES_AMX}) - - target_link_libraries(ggml-amx PRIVATE ggml-base) - target_include_directories(ggml-amx PRIVATE . ..) - - # this is duplicated from the CPU backend, since the AMX backend also depends on the architecture flags - # TODO: integrate AMX backend into the CPU backend - if (MSVC) - # instruction set detection for MSVC only - if (GGML_NATIVE) - # TODO: improve, should not reference files from the parent folder - include(../ggml-cpu/cmake/FindSIMD.cmake) - endif () - if (GGML_AVX512) - list(APPEND ARCH_FLAGS /arch:AVX512) - # MSVC has no compile-time flags enabling specific - # AVX512 extensions, neither it defines the - # macros corresponding to the extensions. - # Do it manually. - if (GGML_AVX512_VBMI) - add_compile_definitions($<$:__AVX512VBMI__>) - add_compile_definitions($<$:__AVX512VBMI__>) - endif() - if (GGML_AVX512_VNNI) - add_compile_definitions($<$:__AVX512VNNI__>) - add_compile_definitions($<$:__AVX512VNNI__>) - endif() - if (GGML_AVX512_BF16) - add_compile_definitions($<$:__AVX512BF16__>) - add_compile_definitions($<$:__AVX512BF16__>) - endif() - if (GGML_AMX_TILE) - add_compile_definitions($<$:__AMX_TILE__>) - add_compile_definitions($<$:__AMX_TILE__>) - endif() - if (GGML_AMX_INT8) - add_compile_definitions($<$:__AMX_INT8__>) - add_compile_definitions($<$:__AMX_INT8__>) - endif() - if (GGML_AMX_BF16) - add_compile_definitions($<$:__AMX_BF16__>) - add_compile_definitions($<$:__AMX_BF16__>) - endif() - elseif (GGML_AVX2) - list(APPEND ARCH_FLAGS /arch:AVX2) - elseif (GGML_AVX) - list(APPEND ARCH_FLAGS /arch:AVX) - endif() - else() - if (GGML_NATIVE) - list(APPEND ARCH_FLAGS -march=native) - endif() - if (GGML_F16C) - list(APPEND ARCH_FLAGS -mf16c) - endif() - if (GGML_FMA) - list(APPEND ARCH_FLAGS -mfma) - endif() - if (GGML_AVX) - list(APPEND ARCH_FLAGS -mavx) - endif() - if (GGML_AVX2) - list(APPEND ARCH_FLAGS -mavx2) - endif() - if (GGML_AVX512) - list(APPEND ARCH_FLAGS -mavx512f) - list(APPEND ARCH_FLAGS -mavx512dq) - list(APPEND ARCH_FLAGS -mavx512bw) - endif() - if (GGML_AVX512_VBMI) - list(APPEND ARCH_FLAGS -mavx512vbmi) - endif() - if (GGML_AVX512_VNNI) - list(APPEND ARCH_FLAGS -mavx512vnni) - endif() - if (GGML_AVX512_BF16) - list(APPEND ARCH_FLAGS -mavx512bf16) - endif() - if (GGML_AMX_TILE) - list(APPEND ARCH_FLAGS -mamx-tile) - endif() - if (GGML_AMX_INT8) - list(APPEND ARCH_FLAGS -mamx-int8) - endif() - if (GGML_AMX_BF16) - list(APPEND ARCH_FLAGS -mamx-bf16) - endif() - endif() - - target_compile_options(ggml-amx PRIVATE ${ARCH_FLAGS}) -else() - set(GGML_AMX OFF PARENT_SCOPE) - message(WARNING "AMX requires x86 and gcc version > 11.0. Turning off GGML_AMX.") -endif() diff --git a/ggml/src/ggml-amx/ggml-amx.cpp b/ggml/src/ggml-amx/ggml-amx.cpp deleted file mode 100644 index 8568e7965fd2e..0000000000000 --- a/ggml/src/ggml-amx/ggml-amx.cpp +++ /dev/null @@ -1,446 +0,0 @@ -#include "ggml-amx.h" -#include "ggml-amx/common.h" -#include "ggml-amx/mmq.h" -#include "ggml-backend-impl.h" -#include "ggml-impl.h" - -#if defined(__gnu_linux__) -#include -#include -#endif - -#include -#include -#include - -#if defined(__AMX_INT8__) - -// AMX buffer interface -static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) { - free(buffer->context); -} - -static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) { - return (void *)(buffer->context); -} - -static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { - memset((char *)tensor->data + offset, value, size); - - GGML_UNUSED(buffer); -} - -static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - if (qtype_has_amx_kernels(tensor->type)) { - ggml_backend_amx_convert_weight(tensor, data, offset, size); - } else { - memcpy((char *)tensor->data + offset, data, size); - } - - GGML_UNUSED(buffer); -} - -static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { - GGML_ASSERT(!qtype_has_amx_kernels(tensor->type)); - memcpy(data, (const char *)tensor->data + offset, size); - - GGML_UNUSED(buffer); -} - -static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - if (ggml_backend_buffer_is_host(src->buffer)) { - if (qtype_has_amx_kernels(src->type)) { - ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_backend_amx_get_alloc_size(dst)); - } else { - memcpy(dst->data, src->data, ggml_nbytes(src)); - } - return true; - } - return false; - - GGML_UNUSED(buffer); -} - -static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - memset(buffer->context, value, buffer->size); -} - -static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = { - /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer, - /* .get_base = */ ggml_backend_amx_buffer_get_base, - /* .init_tensor = */ NULL, // no initialization required - /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor, - /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor, - /* .get_tensor = */ ggml_backend_amx_buffer_get_tensor, - /* .cpy_tensor = */ ggml_backend_amx_buffer_cpy_tensor, - /* .clear = */ ggml_backend_amx_buffer_clear, - /* .reset = */ NULL, -}; - -static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "AMX"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - void * data = aligned_alloc(TENSOR_ALIGNMENT, size); - if (data == NULL) { - fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size); - return NULL; - } - - return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size); -} - -static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { - return TENSOR_ALIGNMENT; - - GGML_UNUSED(buft); -} - -static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) { - return ggml_backend_amx_get_alloc_size(tensor); - - GGML_UNUSED(buft); -} - -static bool ggml_backend_amx_buffer_type_is_host(ggml_backend_buffer_type_t buft) { - return false; - - GGML_UNUSED(buft); -} - -ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() { - static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = { - /* .iface = */ { - /* .get_name = */ ggml_backend_amx_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment, - /* .get_max_size = */ NULL, // defaults to SIZE_MAX - /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size, - /* .is_host = */ ggml_backend_amx_buffer_type_is_host, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_amx_reg(), 0), - /* .context = */ NULL, - }; - - return &ggml_backend_buffer_type_amx; -} - -// backend interface - -static const char * ggml_backend_amx_name(ggml_backend_t backend) { - return "AMX"; - - GGML_UNUSED(backend); -} - -static void ggml_backend_amx_free(ggml_backend_t backend) { - ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend->context; - delete ctx; - delete backend; -} - -static enum ggml_status ggml_backend_amx_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend->context; - - for (int i = 0; i < cgraph->n_nodes; i++) { - struct ggml_tensor * node = cgraph->nodes[i]; - - switch (node->op) { - case GGML_OP_MUL_MAT: - ggml_backend_amx_mul_mat(ctx, node); - break; - - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - break; - - default: - fprintf(stderr, "%s: unsupported op %s\n", __func__, ggml_op_desc(node)); - GGML_ASSERT(false); - } - } - - return GGML_STATUS_SUCCESS; - - GGML_UNUSED(backend); -} - -static struct ggml_backend_i ggml_backend_amx_i = { - /* .get_name = */ ggml_backend_amx_name, - /* .free = */ ggml_backend_amx_free, - /* .set_tensor_async = */ NULL, - /* .get_tensor_async = */ NULL, - /* .cpy_tensor_async = */ NULL, - /* .synchronize = */ NULL, - /* .graph_plan_create = */ NULL, - /* .graph_plan_free = */ NULL, - /* .graph_plan_update = */ NULL, - /* .graph_plan_compute = */ NULL, - /* .graph_compute = */ ggml_backend_amx_graph_compute, - /* .event_record = */ NULL, - /* .event_wait = */ NULL, -}; - -static ggml_guid_t ggml_backend_amx_guid() { - static ggml_guid guid = { 0x13, 0xb8, 0xa4, 0xc4, 0xba, 0xfe, 0x51, 0x67, 0x87, 0x44, 0x55, 0x15, 0xb2, 0x35, 0x62, 0x3e }; - return &guid; -} - -#define ARCH_GET_XCOMP_PERM 0x1022 -#define ARCH_REQ_XCOMP_PERM 0x1023 -#define XFEATURE_XTILECFG 17 -#define XFEATURE_XTILEDATA 18 - -static bool ggml_amx_init() { -#if defined(__gnu_linux__) - if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) { - fprintf(stderr, "AMX is not ready to be used!\n"); - return false; - } - return true; -#elif defined(_WIN32) - return true; -#endif -} - -ggml_backend_t ggml_backend_amx_init() { - - // invoke a Linux system call to request access to AMX features - ggml_amx_init(); - - // backend context - ggml_backend_amx_context * ctx = new ggml_backend_amx_context; - - // ggml amx backend - ggml_backend_t backend = new ggml_backend { - /* .guid = */ ggml_backend_amx_guid(), - /* .interface = */ ggml_backend_amx_i, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_amx_reg(), 0), - /* .context = */ ctx, - }; - - return backend; -} - -bool ggml_backend_is_amx(ggml_backend_t backend) { - return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_amx_guid()); -} - -void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads) { - GGML_ASSERT(ggml_backend_is_amx(backend_amx)); - - ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend_amx->context; - ctx->n_threads = n_threads; -} - -// device interface - -static const char * ggml_backend_amx_device_get_name(ggml_backend_dev_t dev) { - return "AMX"; - - GGML_UNUSED(dev); -} - -static const char * ggml_backend_amx_device_get_description(ggml_backend_dev_t dev) { - return "Intel Advanced Matrix Extensions"; - - GGML_UNUSED(dev); -} - -static void ggml_backend_amx_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { - // TODO - *free = 0; - *total = 0; - - GGML_UNUSED(dev); -} - -static enum ggml_backend_dev_type ggml_backend_amx_device_get_type(ggml_backend_dev_t dev) { - return GGML_BACKEND_DEVICE_TYPE_ACCEL; - - GGML_UNUSED(dev); -} - -static void ggml_backend_amx_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { - props->name = ggml_backend_amx_device_get_name(dev); - props->description = ggml_backend_amx_device_get_description(dev); - props->type = ggml_backend_amx_device_get_type(dev); - ggml_backend_amx_device_get_memory(dev, &props->memory_free, &props->memory_total); - - // `buffer_from_host_ptr` is intended to be used in mmap, when memory layout unchanged - props->caps = { - /* .async = */ false, - /* .host_buffer = */ false, - /* .buffer_from_host_ptr = */ false, - /* .events = */ false, - }; -} - -static ggml_backend_t ggml_backend_amx_device_init(ggml_backend_dev_t dev, const char * params) { - return ggml_backend_amx_init(); - - GGML_UNUSED(dev); - GGML_UNUSED(params); -} - -static ggml_backend_buffer_type_t ggml_backend_amx_device_get_buffer_type(ggml_backend_dev_t dev) { - return ggml_backend_amx_buffer_type(); - - GGML_UNUSED(dev); -} - -static bool ggml_backend_amx_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { - - // handle only 2d gemm for now - auto is_contiguous_2d = [](const struct ggml_tensor * t) { - return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1; - }; - - switch (op->op) { - case GGML_OP_NONE: - case GGML_OP_RESHAPE: - case GGML_OP_VIEW: - case GGML_OP_PERMUTE: - case GGML_OP_TRANSPOSE: - return true; - - case GGML_OP_MUL_MAT: { - const struct ggml_tensor * src0 = op->src[0]; - const struct ggml_tensor * src1 = op->src[1]; - - const enum ggml_type type = src0->type; - const int64_t ne0 = op->ne[0]; - - // amx kernels enables for Q4_0, Q4_1, Q8_0, F16 - // Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256 - bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == GGML_TYPE_F16); - - bool can_use_amx = - is_contiguous_2d(src0) && // src0 must be contiguous - is_contiguous_2d(src1) && // src1 must be contiguous - src1->type == GGML_TYPE_F32 && // src1 must be float32 - has_amx_kernels && // with amx kernel impls - ne0 % (TILE_N * 2) == 0; // out_features is 32x - - return can_use_amx; - } - default: - return false; - } - - GGML_UNUSED(dev); -} - -static bool ggml_backend_amx_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return buft->iface.get_name == ggml_backend_amx_buffer_type_get_name; - - GGML_UNUSED(dev); -} - -static const struct ggml_backend_device_i ggml_backend_amx_device_i = { - /* .get_name = */ ggml_backend_amx_device_get_name, - /* .get_description = */ ggml_backend_amx_device_get_description, - /* .get_memory = */ ggml_backend_amx_device_get_memory, - /* .get_type = */ ggml_backend_amx_device_get_type, - /* .get_props = */ ggml_backend_amx_device_get_props, - /* .init_backend = */ ggml_backend_amx_device_init, - /* .get_buffer_type = */ ggml_backend_amx_device_get_buffer_type, - /* .get_host_buffer_type = */ NULL, - /* .buffer_from_host_ptr = */ NULL, - /* .supports_op = */ ggml_backend_amx_device_supports_op, - /* .supports_buft = */ ggml_backend_amx_device_supports_buft, - /* .offload_op = */ NULL, - /* .event_new = */ NULL, - /* .event_free = */ NULL, - /* .event_synchronize = */ NULL, -}; - -// backend reg interface - -static const char * ggml_backend_amx_reg_get_name(ggml_backend_reg_t reg) { - return "AMX"; - - GGML_UNUSED(reg); -} - -static size_t ggml_backend_amx_reg_get_device_count(ggml_backend_reg_t reg) { - return 1; - - GGML_UNUSED(reg); -} - -static ggml_backend_dev_t ggml_backend_amx_reg_get_device(ggml_backend_reg_t reg, size_t index) { - GGML_ASSERT(index == 0); - - static ggml_backend_device ggml_backend_amx_device = { - /* .iface = */ ggml_backend_amx_device_i, - /* .reg = */ reg, - /* .context = */ nullptr, - }; - - return &ggml_backend_amx_device; - - GGML_UNUSED(reg); - GGML_UNUSED(index); -} - -static void * ggml_backend_amx_get_proc_address(ggml_backend_reg_t reg, const char * name) { - if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) { - return (void *)ggml_backend_amx_set_n_threads; - } - return NULL; - - GGML_UNUSED(reg); - GGML_UNUSED(name); -} - -static const struct ggml_backend_reg_i ggml_backend_amx_reg_i = { - /* .get_name = */ ggml_backend_amx_reg_get_name, - /* .get_device_count = */ ggml_backend_amx_reg_get_device_count, - /* .get_device = */ ggml_backend_amx_reg_get_device, - /* .get_proc_address = */ ggml_backend_amx_get_proc_address, -}; - -ggml_backend_reg_t ggml_backend_amx_reg(void) { - static struct ggml_backend_reg ggml_backend_amx_reg = { - /* .iface = */ ggml_backend_amx_reg_i, - /* .context = */ NULL, - }; - - return &ggml_backend_amx_reg; -} - -#else // if defined(__AMX_INT8__) - -ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void) { - return nullptr; -} - -bool ggml_backend_is_amx(ggml_backend_t backend) { - GGML_UNUSED(backend); - return false; -} - -ggml_backend_t ggml_backend_amx_init(void) { - fprintf(stderr, "GGML is not compiled with AMX support!\n"); - return nullptr; -} - -void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads) { - fprintf(stderr, "GGML is not compiled with AMX support!\n"); - - GGML_UNUSED(backend_amx); - GGML_UNUSED(n_threads); -} - -ggml_backend_reg_t ggml_backend_amx_reg(void) { - return nullptr; -} - -#endif diff --git a/ggml/src/ggml-backend-impl.h b/ggml/src/ggml-backend-impl.h index fa8d5b7fb68c9..36d72e95f028c 100644 --- a/ggml/src/ggml-backend-impl.h +++ b/ggml/src/ggml-backend-impl.h @@ -8,6 +8,8 @@ extern "C" { #endif + #define GGML_BACKEND_API_VERSION 1 + // // Backend buffer type // @@ -63,20 +65,20 @@ extern "C" { enum ggml_backend_buffer_usage usage; }; - ggml_backend_buffer_t ggml_backend_buffer_init( + GGML_API ggml_backend_buffer_t ggml_backend_buffer_init( ggml_backend_buffer_type_t buft, struct ggml_backend_buffer_i iface, void * context, size_t size); // do not use directly, use ggml_backend_tensor_copy instead - bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst); + GGML_API bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst); // multi-buffer // buffer that contains a collection of buffers - ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers); - bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer); - void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); + GGML_API ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers); + GGML_API bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); // // Backend (stream) @@ -199,17 +201,55 @@ extern "C" { }; struct ggml_backend_reg { - // int api_version; // TODO: for dynamic loading + int api_version; // initialize to GGML_BACKEND_API_VERSION struct ggml_backend_reg_i iface; void * context; }; - // Internal backend registry API - void ggml_backend_register(ggml_backend_reg_t reg); - void ggml_backend_device_register(ggml_backend_dev_t device); - // TODO: backends can be loaded as a dynamic library, in which case it needs to export this function - // typedef ggml_backend_register_t * (*ggml_backend_init)(void); + GGML_API void ggml_backend_register(ggml_backend_reg_t reg); + GGML_API void ggml_backend_device_register(ggml_backend_dev_t device); + + // Add backend dynamic loading support to the backend + + // Initialize the backend + typedef ggml_backend_reg_t (*ggml_backend_init_t)(void); + // Optional: obtain a score for the backend based on the system configuration + // Higher scores are preferred, 0 means the backend is not supported in the current system + typedef int (*ggml_backend_score_t)(void); + +#ifdef GGML_BACKEND_DL +# ifdef __cplusplus +# define GGML_BACKEND_DL_IMPL(reg_fn) \ + extern "C" { \ + GGML_BACKEND_API ggml_backend_reg_t ggml_backend_init(void); \ + } \ + ggml_backend_reg_t ggml_backend_init(void) { \ + return reg_fn(); \ + } +# define GGML_BACKEND_DL_SCORE_IMPL(score_fn) \ + extern "C" { \ + GGML_BACKEND_API int ggml_backend_score(void); \ + } \ + int ggml_backend_score(void) { \ + return score_fn(); \ + } +# else +# define GGML_BACKEND_DL_IMPL(reg_fn) \ + GGML_BACKEND_API ggml_backend_reg_t ggml_backend_init(void); \ + ggml_backend_reg_t ggml_backend_init(void) { \ + return reg_fn(); \ + } +# define GGML_BACKEND_DL_SCORE_IMPL(score_fn) \ + GGML_BACKEND_API int ggml_backend_score(void); \ + int ggml_backend_score(void) { \ + return score_fn(); \ + } +# endif +#else +# define GGML_BACKEND_DL_IMPL(reg_fn) +# define GGML_BACKEND_DL_SCORE_IMPL(score_fn) +#endif #ifdef __cplusplus } diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 63e9d82017457..66927148abcb9 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -1,11 +1,34 @@ #include "ggml-backend-impl.h" #include "ggml-backend.h" -#include "ggml-cpu.h" #include "ggml-impl.h" +#include +#include #include +#include +#include +#include +#include +#include #include +#ifdef _WIN32 +# define WIN32_LEAN_AND_MEAN +# ifndef NOMINMAX +# define NOMINMAX +# endif +# include +#elif defined(__APPLE__) +# include +# include +#else +# include +# include +#endif + // Backend registry +#ifdef GGML_USE_CPU +#include "ggml-cpu.h" +#endif #ifdef GGML_USE_CUDA #include "ggml-cuda.h" @@ -23,6 +46,10 @@ #include "ggml-vulkan.h" #endif +#ifdef GGML_USE_OPENCL +#include "ggml-opencl.h" +#endif + #ifdef GGML_USE_BLAS #include "ggml-blas.h" #endif @@ -31,10 +58,6 @@ #include "ggml-rpc.h" #endif -#ifdef GGML_USE_AMX -# include "ggml-amx.h" -#endif - #ifdef GGML_USE_CANN #include "ggml-cann.h" #endif @@ -43,8 +66,75 @@ #include "ggml-kompute.h" #endif +#ifdef _WIN32 + +using dl_handle = std::remove_pointer_t; + +struct dl_handle_deleter { + void operator()(HMODULE handle) { + FreeLibrary(handle); + } +}; + +static dl_handle * dl_load_library(const std::wstring & path) { + // suppress error dialogs for missing DLLs + DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS); + SetErrorMode(old_mode | SEM_FAILCRITICALERRORS); + + HMODULE handle = LoadLibraryW(path.c_str()); + + SetErrorMode(old_mode); + + return handle; +} + +static dl_handle * dl_load_library(const std::string & path) { + std::wstring_convert> converter; + return dl_load_library(converter.from_bytes(path)); +} + +static void * dl_get_sym(dl_handle * handle, const char * name) { + DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS); + SetErrorMode(old_mode | SEM_FAILCRITICALERRORS); + + void * p = (void *) GetProcAddress(handle, name); + + SetErrorMode(old_mode); + + return p; +} + +#else + +using dl_handle = void; + +struct dl_handle_deleter { + void operator()(void * handle) { + dlclose(handle); + } +}; + +static void * dl_load_library(const std::string & path) { + dl_handle * handle = dlopen(path.c_str(), RTLD_NOW | RTLD_LOCAL); + + return handle; +} + +static void * dl_get_sym(dl_handle * handle, const char * name) { + return dlsym(handle, name); +} + +#endif + +using dl_handle_ptr = std::unique_ptr; + +struct ggml_backend_reg_entry { + ggml_backend_reg_t reg; + dl_handle_ptr handle; +}; + struct ggml_backend_registry { - std::vector backends; + std::vector backends; std::vector devices; ggml_backend_registry() { @@ -60,6 +150,9 @@ struct ggml_backend_registry { #ifdef GGML_USE_VULKAN register_backend(ggml_backend_vk_reg()); #endif +#ifdef GGML_USE_OPENCL + register_backend(ggml_backend_opencl_reg()); +#endif #ifdef GGML_USE_CANN register_backend(ggml_backend_cann_reg()); #endif @@ -69,17 +162,25 @@ struct ggml_backend_registry { #ifdef GGML_USE_RPC register_backend(ggml_backend_rpc_reg()); #endif -#ifdef GGML_USE_AMX - register_backend(ggml_backend_amx_reg()); -#endif #ifdef GGML_USE_KOMPUTE register_backend(ggml_backend_kompute_reg()); #endif - +#ifdef GGML_USE_CPU register_backend(ggml_backend_cpu_reg()); +#endif + } + + ~ggml_backend_registry() { + // FIXME: backends cannot be safely unloaded without a function to destroy all the backend resources, + // since backend threads may still be running and accessing resources from the dynamic library + for (auto & entry : backends) { + if (entry.handle) { + entry.handle.release(); // NOLINT + } + } } - void register_backend(ggml_backend_reg_t reg) { + void register_backend(ggml_backend_reg_t reg, dl_handle_ptr handle = nullptr) { if (!reg) { return; } @@ -88,7 +189,7 @@ struct ggml_backend_registry { GGML_LOG_DEBUG("%s: registered backend %s (%zu devices)\n", __func__, ggml_backend_reg_name(reg), ggml_backend_reg_dev_count(reg)); #endif - backends.push_back(reg); + backends.push_back({ reg, std::move(handle) }); for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { register_device(ggml_backend_reg_dev_get(reg, i)); } @@ -100,6 +201,76 @@ struct ggml_backend_registry { #endif devices.push_back(device); } + + ggml_backend_reg_t load_backend(const char * path, bool silent) { + dl_handle_ptr handle { dl_load_library(path) }; + if (!handle) { + if (!silent) { + GGML_LOG_ERROR("%s: failed to load %s\n", __func__, path); + } + return nullptr; + } + + auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); + if (score_fn && score_fn() == 0) { + if (!silent) { + GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, path); + } + return nullptr; + } + + auto backend_init_fn = (ggml_backend_init_t) dl_get_sym(handle.get(), "ggml_backend_init"); + if (!backend_init_fn) { + if (!silent) { + GGML_LOG_ERROR("%s: failed to find ggml_backend_init in %s\n", __func__, path); + } + return nullptr; + } + + ggml_backend_reg_t reg = backend_init_fn(); + if (!reg || reg->api_version != GGML_BACKEND_API_VERSION) { + if (!silent) { + if (!reg) { + GGML_LOG_ERROR("%s: failed to initialize backend from %s: ggml_backend_init returned NULL\n", __func__, path); + } else { + GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n", + __func__, path, reg->api_version, GGML_BACKEND_API_VERSION); + } + } + return nullptr; + } + + GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), path); + + register_backend(reg, std::move(handle)); + + return reg; + } + + void unload_backend(ggml_backend_reg_t reg, bool silent) { + auto it = std::find_if(backends.begin(), backends.end(), + [reg](const ggml_backend_reg_entry & entry) { return entry.reg == reg; }); + + if (it == backends.end()) { + if (!silent) { + GGML_LOG_ERROR("%s: backend not found\n", __func__); + } + return; + } + + if (!silent) { + GGML_LOG_DEBUG("%s: unloading %s backend\n", __func__, ggml_backend_reg_name(reg)); + } + + // remove devices + devices.erase( + std::remove_if(devices.begin(), devices.end(), + [reg](ggml_backend_dev_t dev) { return ggml_backend_dev_backend_reg(dev) == reg; }), + devices.end()); + + // remove backend + backends.erase(it); + } }; static ggml_backend_registry & get_reg() { @@ -117,23 +288,32 @@ void ggml_backend_device_register(ggml_backend_dev_t device) { } // Backend (reg) enumeration +static bool striequals(const char * a, const char * b) { + for (; *a && *b; a++, b++) { + if (std::tolower(*a) != std::tolower(*b)) { + return false; + } + } + return *a == *b; +} + size_t ggml_backend_reg_count() { return get_reg().backends.size(); } ggml_backend_reg_t ggml_backend_reg_get(size_t index) { GGML_ASSERT(index < ggml_backend_reg_count()); - return get_reg().backends[index]; + return get_reg().backends[index].reg; } ggml_backend_reg_t ggml_backend_reg_by_name(const char * name) { for (size_t i = 0; i < ggml_backend_reg_count(); i++) { ggml_backend_reg_t reg = ggml_backend_reg_get(i); - if (std::strcmp(ggml_backend_reg_name(reg), name) == 0) { + if (striequals(ggml_backend_reg_name(reg), name)) { return reg; } } - return NULL; + return nullptr; } // Device enumeration @@ -149,11 +329,11 @@ ggml_backend_dev_t ggml_backend_dev_get(size_t index) { ggml_backend_dev_t ggml_backend_dev_by_name(const char * name) { for (size_t i = 0; i < ggml_backend_dev_count(); i++) { ggml_backend_dev_t dev = ggml_backend_dev_get(i); - if (strcmp(ggml_backend_dev_name(dev), name) == 0) { + if (striequals(ggml_backend_dev_name(dev), name)) { return dev; } } - return NULL; + return nullptr; } ggml_backend_dev_t ggml_backend_dev_by_type(enum ggml_backend_dev_type type) { @@ -163,14 +343,14 @@ ggml_backend_dev_t ggml_backend_dev_by_type(enum ggml_backend_dev_type type) { return dev; } } - return NULL; + return nullptr; } // Convenience functions ggml_backend_t ggml_backend_init_by_name(const char * name, const char * params) { ggml_backend_dev_t dev = ggml_backend_dev_by_name(name); if (!dev) { - return NULL; + return nullptr; } return ggml_backend_dev_init(dev, params); } @@ -178,7 +358,7 @@ ggml_backend_t ggml_backend_init_by_name(const char * name, const char * params) ggml_backend_t ggml_backend_init_by_type(enum ggml_backend_dev_type type, const char * params) { ggml_backend_dev_t dev = ggml_backend_dev_by_type(type); if (!dev) { - return NULL; + return nullptr; } return ggml_backend_dev_init(dev, params); } @@ -189,7 +369,184 @@ ggml_backend_t ggml_backend_init_best(void) { dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); } if (!dev) { - return NULL; + return nullptr; + } + return ggml_backend_dev_init(dev, nullptr); +} + +// Dynamic loading +ggml_backend_reg_t ggml_backend_load(const char * path) { + return get_reg().load_backend(path, false); +} + +void ggml_backend_unload(ggml_backend_reg_t reg) { + get_reg().unload_backend(reg, true); +} + +static std::string get_executable_path() { +#if defined(__APPLE__) + // get executable path + std::vector path; + uint32_t size; + while (true) { + size = path.size(); + if (_NSGetExecutablePath(path.data(), &size) == 0) { + break; + } + path.resize(size); + } + std::string base_path(path.data(), size); + // remove executable name + auto last_slash = base_path.find_last_of('/'); + if (last_slash != std::string::npos) { + base_path = base_path.substr(0, last_slash); + } + return base_path + "/"; +#elif defined(__linux__) + std::string base_path = "."; + std::vector path(1024); + while (true) { + // get executable path + ssize_t len = readlink("/proc/self/exe", path.data(), path.size()); + if (len == -1) { + break; + } + if (len < (ssize_t) path.size()) { + base_path = std::string(path.data(), len); + // remove executable name + auto last_slash = base_path.find_last_of('/'); + if (last_slash != std::string::npos) { + base_path = base_path.substr(0, last_slash); + } + break; + } + path.resize(path.size() * 2); + } + + return base_path + "/"; +#elif defined(_WIN32) + std::vector path(MAX_PATH); + DWORD len = GetModuleFileNameA(NULL, path.data(), path.size()); + if (len == 0) { + return ""; } - return ggml_backend_dev_init(dev, NULL); + std::string base_path(path.data(), len); + // remove executable name + auto last_slash = base_path.find_last_of('\\'); + if (last_slash != std::string::npos) { + base_path = base_path.substr(0, last_slash); + } + return base_path + "\\"; +#endif +} + +static std::string backend_filename_prefix() { +#ifdef _WIN32 + return "ggml-"; +#else + return "libggml-"; +#endif +} + +static std::string backend_filename_suffix() { +#ifdef _WIN32 + return ".dll"; +#else + return ".so"; +#endif +} + +static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) { + // enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths + // TODO: search system paths + std::string file_prefix = backend_filename_prefix() + name + "-"; + std::vector search_paths; + if (user_search_path == nullptr) { + search_paths.push_back("./"); + search_paths.push_back(get_executable_path()); + } else { +#if defined(_WIN32) + search_paths.push_back(std::string(user_search_path) + "\\"); +#else + search_paths.push_back(std::string(user_search_path) + "/"); +#endif + } + + int best_score = 0; + std::string best_path; + + namespace fs = std::filesystem; + for (const auto & search_path : search_paths) { + if (!fs::exists(search_path)) { + continue; + } + fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied); + for (const auto & entry : dir_it) { + if (entry.is_regular_file()) { + std::string filename = entry.path().filename().string(); + std::string ext = entry.path().extension().string(); + if (filename.find(file_prefix) == 0 && ext == backend_filename_suffix()) { + dl_handle_ptr handle { dl_load_library(entry.path().c_str()) }; + if (!handle && !silent) { + GGML_LOG_ERROR("%s: failed to load %s\n", __func__, entry.path().string().c_str()); + } + if (handle) { + auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); + if (score_fn) { + int s = score_fn(); +#ifndef NDEBUG + GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, entry.path().string().c_str(), s); +#endif + if (s > best_score) { + best_score = s; + best_path = entry.path().string(); + } + } else { + if (!silent) { + GGML_LOG_INFO("%s: failed to find ggml_backend_score in %s\n", __func__, entry.path().string().c_str()); + } + } + } + } + } + } + } + + if (best_score == 0) { + // try to load the base backend + for (const auto & search_path : search_paths) { + std::string path = search_path + backend_filename_prefix() + name + backend_filename_suffix(); + if (fs::exists(path)) { + return get_reg().load_backend(path.c_str(), silent); + } + } + return nullptr; + } + + return get_reg().load_backend(best_path.c_str(), silent); +} + +void ggml_backend_load_all() { + ggml_backend_load_all_from_path(nullptr); +} + +void ggml_backend_load_all_from_path(const char * dir_path) { +#ifdef NDEBUG + bool silent = true; +#else + bool silent = false; +#endif + + ggml_backend_load_best("blas", silent, dir_path); + ggml_backend_load_best("cann", silent, dir_path); + ggml_backend_load_best("cuda", silent, dir_path); + ggml_backend_load_best("hip", silent, dir_path); + ggml_backend_load_best("kompute", silent, dir_path); + ggml_backend_load_best("metal", silent, dir_path); + ggml_backend_load_best("rpc", silent, dir_path); + ggml_backend_load_best("sycl", silent, dir_path); + ggml_backend_load_best("vulkan", silent, dir_path); + ggml_backend_load_best("opencl", silent, dir_path); + ggml_backend_load_best("musa", silent, dir_path); + ggml_backend_load_best("cpu", silent, dir_path); } diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp index 9dcde8d11952a..fdb4b986f613b 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp @@ -252,6 +252,7 @@ void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_ten } void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + GGML_ASSERT(tensor); ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; if (size == 0) { @@ -266,6 +267,7 @@ void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, siz } void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { + GGML_ASSERT(tensor); ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; if (size == 0) { @@ -740,7 +742,8 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) { // since the tensor is pre-allocated, it cannot be moved to another backend - GGML_ABORT("pre-allocated tensor (%s) in a backend that cannot run the operation", tensor->name); + ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; + GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op)); } // graph input @@ -884,9 +887,6 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; int * node_backend_id = &tensor_backend_id(node); - if (ggml_is_view_op(node->op)) { - continue; - } // do not overwrite user assignments if (*node_backend_id == -1) { *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node); diff --git a/ggml/src/ggml-blas/CMakeLists.txt b/ggml/src/ggml-blas/CMakeLists.txt index ffe39e8bd1c97..0bf3c05d93a89 100644 --- a/ggml/src/ggml-blas/CMakeLists.txt +++ b/ggml/src/ggml-blas/CMakeLists.txt @@ -11,12 +11,9 @@ find_package(BLAS) if (BLAS_FOUND) message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") - add_library(ggml-blas - ggml-blas.cpp - ) - - target_link_libraries(ggml-blas PRIVATE ggml-base) - target_include_directories(ggml-blas PRIVATE . ..) + ggml_add_backend_library(ggml-blas + ggml-blas.cpp + ) if (${GGML_BLAS_VENDOR} MATCHES "Apple") add_compile_definitions(ACCELERATE_NEW_LAPACK) @@ -75,7 +72,6 @@ if (BLAS_FOUND) message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") - #add_compile_options(${BLAS_LINKER_FLAGS}) target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS}) if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${GGML_BLAS_VENDOR} MATCHES "Generic" OR ${GGML_BLAS_VENDOR} MATCHES "Intel")) diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp index 648c9d875e346..ec158dfac6e3e 100644 --- a/ggml/src/ggml-blas/ggml-blas.cpp +++ b/ggml/src/ggml-blas/ggml-blas.cpp @@ -506,9 +506,12 @@ static const struct ggml_backend_reg_i ggml_backend_blas_reg_i = { ggml_backend_reg_t ggml_backend_blas_reg(void) { static struct ggml_backend_reg ggml_backend_blas_reg = { - /* .iface = */ ggml_backend_blas_reg_i, - /* .context = */ NULL, + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_blas_reg_i, + /* .context = */ NULL, }; return &ggml_backend_blas_reg; } + +GGML_BACKEND_DL_IMPL(ggml_backend_blas_reg) diff --git a/ggml/src/ggml-cann/CMakeLists.txt b/ggml/src/ggml-cann/CMakeLists.txt index c8e15c6d40ce9..05cf06bfab4fc 100644 --- a/ggml/src/ggml-cann/CMakeLists.txt +++ b/ggml/src/ggml-cann/CMakeLists.txt @@ -3,6 +3,34 @@ if ("cann${CANN_INSTALL_DIR}" STREQUAL "cann" AND DEFINED ENV{ASCEND_TOOLKIT_HOM message(STATUS "CANN: updated CANN_INSTALL_DIR from ASCEND_TOOLKIT_HOME=$ENV{ASCEND_TOOLKIT_HOME}") endif() +# Auto-detech Soc type and Soc version, if detect failed, will abort build +set(SOC_VERSION "") +function(detect_ascend_soc_type SOC_VERSION) + execute_process( + COMMAND bash -c "npu-smi info|awk -F' ' 'NF > 0 && NR==7 {print $3}'" + OUTPUT_VARIABLE npu_info + RESULT_VARIABLE npu_result + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + if("${npu_info}" STREQUAL "" OR ${npu_result}) + message(FATAL_ERROR "Auto-detech ascend soc type failed, please specify manually or check ascend device working normally.") + endif() + set(${SOC_VERSION} "Ascend${npu_info}" PARENT_SCOPE) +endfunction() + +if(NOT SOC_TYPE) + detect_ascend_soc_type(SOC_VERSION) + set(SOC_TYPE "${SOC_VERSION}") + message(STATUS "CANN: SOC_VERSION auto-detected is:${SOC_VERSION}") +endif() + +string(TOLOWER ${SOC_TYPE} SOC_VERSION) # SOC_VERSION need lower + +# Construct Soc specify compile option: ASCEND_#Soc_Major_SN. Such as ASCEND_910B, ASCEND_310P. +string(REGEX MATCH "[0-9]+[a-zA-Z]" SOC_TYPE_MAJOR_SN "${SOC_VERSION}") +set(SOC_TYPE_COMPILE_OPTION "ASCEND_${SOC_TYPE_MAJOR_SN}") +string(TOUPPER ${SOC_TYPE_COMPILE_OPTION} SOC_TYPE_COMPILE_OPTION) + if (CANN_INSTALL_DIR) # Only Support Linux. if (NOT UNIX) @@ -34,11 +62,13 @@ if (CANN_INSTALL_DIR) file(GLOB GGML_SOURCES_CANN "*.cpp") - add_library(ggml-cann ${GGML_SOURCES_CANN}) - target_link_libraries(ggml-cann PRIVATE ggml-base ${CANN_LIBRARIES}) - target_include_directories(ggml-cann PRIVATE . .. ${CANN_INCLUDE_DIRS}) + ggml_add_backend_library(ggml-cann ${GGML_SOURCES_CANN}) + target_link_libraries(ggml-cann PRIVATE ${CANN_LIBRARIES}) + target_include_directories(ggml-cann PRIVATE ${CANN_INCLUDE_DIRS}) target_link_directories(ggml-cann PRIVATE ${CANN_INSTALL_DIR}/lib64) + target_compile_definitions(ggml-cann PRIVATE "-D${SOC_TYPE_COMPILE_OPTION}") + message(STATUS "CANN: CANN_INCLUDE_DIRS = ${CANN_INCLUDE_DIRS}") message(STATUS "CANN: CANN_LIBRARIES = ${CANN_LIBRARIES}") else() diff --git a/ggml/src/ggml-cann/aclnn_ops.cpp b/ggml/src/ggml-cann/aclnn_ops.cpp index a4ec8418e2ab3..b2d857e1e549b 100644 --- a/ggml/src/ggml-cann/aclnn_ops.cpp +++ b/ggml/src/ggml-cann/aclnn_ops.cpp @@ -22,11 +22,14 @@ #include "aclnn_ops.h" +#include #include +#include #include #include #include #include +#include #include #include #include @@ -34,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +57,7 @@ #include #include +#include "ggml-impl.h" #include "kernels/ascendc_kernels.h" #define GGML_COMMON_DECL_C @@ -241,10 +246,14 @@ void ggml_cann_concat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { aclTensor* acl_src1 = ggml_cann_create_tensor(src1); aclTensor* acl_dst = ggml_cann_create_tensor(dst); - int64_t concat_dim = 1; + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + int32_t acl_dim = 3 - dim; + aclTensor* tensors[] = {acl_src0, acl_src1}; aclTensorList* tensorList = aclCreateTensorList(tensors, 2); - aclnn_concat(ctx, tensorList, acl_dst, concat_dim); + aclnn_concat(ctx, tensorList, acl_dst, acl_dim); ACL_CHECK(aclDestroyTensorList(tensorList)); ACL_CHECK(aclDestroyTensor(acl_dst)); @@ -1096,9 +1105,9 @@ static aclTensor* aclnn_zero(ggml_backend_cann_context& ctx, void* buffer, } /** - * @brief Creates an ACL tensor initialized with ones using a provided buffer. + * @brief Creates an ACL tensor initialized with value using a provided buffer. * - * This function initializes a tensor with ones using the specified buffer and + * This function initializes a tensor with value using the specified buffer and * tensor parameters. * * @param ctx The context for the CANN backend operations. @@ -1111,12 +1120,12 @@ static aclTensor* aclnn_zero(ggml_backend_cann_context& ctx, void* buffer, * @param type_size The size of each element in the tensor data type. * @param value The value to be used for initializing the tensor (default * is 1.0). - * @return An ACL tensor initialized with ones. + * @return An ACL tensor initialized with value. */ -static aclTensor* aclnn_ones(ggml_backend_cann_context& ctx, void* buffer, - size_t n_bytes, int64_t* ne, int64_t dims, - aclDataType type, size_t type_size, - float value = 1.0f) { +static aclTensor* aclnn_values(ggml_backend_cann_context& ctx, void* buffer, + size_t n_bytes, int64_t* ne, int64_t dims, + aclDataType type, size_t type_size, + float value = 1.0f) { aclTensor* acl_tensor = aclnn_zero(ctx, buffer, n_bytes, ne, dims, type, type_size); float alpha_host = 1.0f; @@ -1158,7 +1167,7 @@ void ggml_cann_rms_norm(ggml_backend_cann_context& ctx, ggml_tensor* dst) { size_t one_tensor_n_bytes = src->ne[0] * ggml_element_size(src); ggml_cann_pool_alloc one_tensor_allocator(ctx.pool(), one_tensor_n_bytes); - aclTensor* acl_gamma = aclnn_ones( + aclTensor* acl_gamma = aclnn_values( ctx, one_tensor_allocator.get(), one_tensor_n_bytes, src->ne, 1, ggml_cann_type_mapping(src->type), ggml_element_size(src)); @@ -1202,9 +1211,9 @@ void ggml_cann_diag_mask(ggml_backend_cann_context& ctx, ggml_tensor* dst, ggml_cann_pool_alloc one_tensor_allocator(ctx.pool(), one_tensor_n_bytes); aclTensor* mask_tensor = - aclnn_ones(ctx, one_tensor_allocator.get(), one_tensor_n_bytes, src->ne, - GGML_MAX_DIMS, ggml_cann_type_mapping(src->type), - ggml_element_size(src), value); + aclnn_values(ctx, one_tensor_allocator.get(), one_tensor_n_bytes, + src->ne, GGML_MAX_DIMS, ggml_cann_type_mapping(src->type), + ggml_element_size(src), value); uint64_t workspaceSize = 0; aclOpExecutor* executor; @@ -1437,10 +1446,6 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ggml_tensor* src0 = dst->src[0]; // kernel ggml_tensor* src1 = dst->src[1]; // input - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32); - GGML_TENSOR_BINARY_OP_LOCALS; // aclnnIm2col only works on 2D. set s1, p1, d1 to 1 to perform 2D @@ -1462,9 +1467,6 @@ void ggml_cann_im2col(ggml_backend_cann_context& ctx, ggml_tensor* dst) { const int64_t OH = is_2D ? ne2 : 1; const int64_t OW = ne1; - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - // memory allocated increased to 3x when is_2D == false const int64_t n_bytes_factor = is_2D ? 1 : 3; @@ -1768,6 +1770,92 @@ static void aclnn_sin(ggml_backend_cann_context& ctx, aclTensor* acl_src, ACL_CHECK(aclnnSin(workspaceAddr, workspaceSize, executor, ctx.stream())); } +/** + * @brief Performs element-wise division of tensor1 by tensor2 , multiplies the + result by the scalar value and adds it to self . + * + * Performs element-wise division of tensor1 by tensor2, + * multiplies the result by the scalar value and adds it to self . + * The operation is defined as: + * \f[ + * \text{out}_i = \text{selft}_i + \text{value} \times + \frac{\text{tensor1}_i}{\text{tensor2}_i} + * \f] + + * @param ctx The context for the CANN backend operations. + * @param acl_self The source tensor on which the addcdiv function will be + applied. + * @param tensor1 Numerator tensor. + * @param tensor2 Denominator tensor. + * @param value The value to be used for coefficient. + */ +static void aclnn_inplace_addcdiv(ggml_backend_cann_context& ctx, + aclTensor* acl_self, aclTensor* tensor1, + aclTensor* tensor2, float value) { + uint64_t workspaceSize = 0; + aclOpExecutor* executor; + void* workspaceAddr = nullptr; + aclScalar* acl_value = aclCreateScalar(&value, aclDataType::ACL_FLOAT); + + ACL_CHECK(aclnnInplaceAddcdivGetWorkspaceSize( + acl_self, tensor1, tensor2, acl_value, &workspaceSize, &executor)); + if (workspaceSize > 0) { + ggml_cann_pool_alloc workspace_allocator(ctx.pool(), workspaceSize); + workspaceAddr = workspace_allocator.get(); + } + + ACL_CHECK(aclnnInplaceAddcdiv(workspaceAddr, workspaceSize, executor, + ctx.stream())); +} + +/** + * @brief Matrix division, optionally in-place. + * + * This function division each element of the source tensor `acl_src` by the + * tensor `acl_other` and stores the result in the destination tensor `acl_dst`. + * If `inplace` is true, `acl_dst` will not be used and the operation is + * performed in-place on `acl_src`. The operation is defined as: \f[ + * \text{dst}_i = \frac{\text{acl_src}_i}{\text{acl_other}_i} + * \f] + * + * @param ctx The context for the CANN backend operations. + * @param acl_src Numerator tensor.. + * @param acl_other Denominator tensor. + * @param acl_dst The destination tensor where the result will be stored if + * `inplace` is false. + * @param inplace Flag indicating whether to perform the operation in-place on + * `acl_src`. + */ +static void aclnn_div_tensor(ggml_backend_cann_context& ctx, aclTensor* acl_src, + aclTensor* acl_other, aclTensor* acl_dst, + bool inplace) { + uint64_t workspaceSize = 0; + aclOpExecutor* executor; + void* workspaceAddr = nullptr; + + if (inplace) { + ACL_CHECK(aclnnInplaceDivGetWorkspaceSize(acl_src, acl_other, + &workspaceSize, &executor)); + if (workspaceSize > 0) { + ggml_cann_pool_alloc workspace_allocator(ctx.pool(), workspaceSize); + workspaceAddr = workspace_allocator.get(); + } + + ACL_CHECK(aclnnInplaceDiv(workspaceAddr, workspaceSize, executor, + ctx.stream())); + } else { + ACL_CHECK(aclnnDivGetWorkspaceSize(acl_src, acl_other, acl_dst, + &workspaceSize, &executor)); + if (workspaceSize > 0) { + ggml_cann_pool_alloc workspace_allocator(ctx.pool(), workspaceSize); + workspaceAddr = workspace_allocator.get(); + } + + ACL_CHECK( + aclnnDiv(workspaceAddr, workspaceSize, executor, ctx.stream())); + } +} + void ggml_cann_timestep_embedding(ggml_backend_cann_context& ctx, ggml_tensor* dst) { const ggml_tensor* src = dst->src[0]; @@ -2311,7 +2399,16 @@ void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ctx.stream())); switch (src0->type) { - case GGML_TYPE_F32: + case GGML_TYPE_F32: { +#ifdef ASCEND_310P + // Special operation for get_row_f32 kernel of 310P: clear the + // content of dest data buffer when row is not aligned to 32 bytes + if ((src0->ne[0] % 8) != 0) { + size_t dst_len = src1->ne[0] * src1->ne[1] * src1->ne[2] * + src0->ne[0] * ggml_type_size(GGML_TYPE_F32); + ACL_CHECK(aclrtMemset((char*)dst->data, dst_len, 0, dst_len)); + } +#endif aclrtlaunch_ascendc_get_row_f32( 24, ctx.stream(), src0->data, src1->data, dst->data, ((ggml_tensor*)src0->extra)->ne, @@ -2320,7 +2417,19 @@ void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ((ggml_tensor*)src1->extra)->nb, ((ggml_tensor*)dst->extra)->ne, ((ggml_tensor*)dst->extra)->nb); break; - case GGML_TYPE_F16: + } + case GGML_TYPE_F16: { +#ifdef ASCEND_310P + // Special operation for get_row_f16 kernel of 310P: clear the + // content of dest data buffer when row is not aligned to 32 bytes + if ((src0->ne[0] % 16) != 0) { + size_t dst_len = + src1->ne[0] * src1->ne[1] * src1->ne[2] * src0->ne[0] * + ggml_type_size( + GGML_TYPE_F32); // out is also f32, even input is f16 + ACL_CHECK(aclrtMemset((char*)dst->data, dst_len, 0, dst_len)); + } +#endif aclrtlaunch_ascendc_get_row_f16( 24, ctx.stream(), src0->data, src1->data, dst->data, ((ggml_tensor*)src0->extra)->ne, @@ -2329,6 +2438,7 @@ void ggml_cann_get_rows(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ((ggml_tensor*)src1->extra)->nb, ((ggml_tensor*)dst->extra)->ne, ((ggml_tensor*)dst->extra)->nb); break; + } case GGML_TYPE_Q4_0: aclrtlaunch_ascendc_get_row_q4_0( 24, ctx.stream(), src0->data, src1->data, dst->data, @@ -2407,7 +2517,6 @@ static void aclnn_mat_mul(ggml_backend_cann_context& ctx, aclTensor* acl_input, aclTensor* acl_weight, aclTensor* acl_dst) { int8_t cube_math_type = 1; // ALLOW_FP32_DOWN_PRECISION, when input is // fp32, atlas a2 will transpose it to HFLOAT32. - uint64_t workspaceSize = 0; aclOpExecutor* executor; void* workspaceAddr = nullptr; @@ -2425,6 +2534,81 @@ static void aclnn_mat_mul(ggml_backend_cann_context& ctx, aclTensor* acl_input, aclnnMatmul(workspaceAddr, workspaceSize, executor, ctx.stream())); } +/** + * @brief Performs matrix multiplication of two 2D tensors. + * + * This function computes the matrix multiplication of the input tensor + * `acl_input` and the weight tensor `acl_weight`, and stores the result in the + * destination tensor `acl_dst`. + * The operation is defined as: + * \f[ + * \text {acl_dst}=\text {acl_input@acl_weight} + * \f] + * + * @param ctx The context for the CANN backend operations. + * @param acl_input The input tensor for the matrix multiplication. + * @param acl_weight The weight tensor for the matrix multiplication. + * @param acl_dst The destination tensor where the result of the matrix + * multiplication will be stored. + */ +static void aclnn_mat_mul_2d(ggml_backend_cann_context& ctx, + aclTensor* acl_input, aclTensor* acl_weight, + aclTensor* acl_dst) { + int8_t cube_math_type = 2; + uint64_t workspaceSize = 0; + aclOpExecutor* executor; + void* workspaceAddr = nullptr; + + ACL_CHECK(aclnnMmGetWorkspaceSize(acl_input, acl_weight, acl_dst, + cube_math_type, &workspaceSize, + &executor)); + + if (workspaceSize > 0) { + ggml_cann_pool_alloc workspace_allocator(ctx.pool(), workspaceSize); + workspaceAddr = workspace_allocator.get(); + } + + ACL_CHECK(aclnnMm(workspaceAddr, workspaceSize, executor, ctx.stream())); +} + +/** + * @brief Performs matrix multiplication of two 3D tensors. + * + * This function computes the matrix multiplication of the input tensor + * `acl_input` and the weight tensor `acl_weight`, and stores the result in the + * destination tensor `acl_dst`. + * The operation is defined as: + * \f[ + * \text {acl_dst}=\text {acl_input@acl_weight} + * \f] + * + * @param ctx The context for the CANN backend operations. + * @param acl_input The input tensor for the matrix multiplication. + * @param acl_weight The weight tensor for the matrix multiplication. + * @param acl_dst The destination tensor where the result of the matrix + * multiplication will be stored. + */ +static void aclnn_mat_mul_3d(ggml_backend_cann_context& ctx, + aclTensor* acl_input, aclTensor* acl_weight, + aclTensor* acl_dst) { + int8_t cube_math_type = 2; + uint64_t workspaceSize = 0; + aclOpExecutor* executor; + void* workspaceAddr = nullptr; + + ACL_CHECK(aclnnBatchMatMulGetWorkspaceSize(acl_input, acl_weight, acl_dst, + cube_math_type, &workspaceSize, + &executor)); + + if (workspaceSize > 0) { + ggml_cann_pool_alloc workspace_allocator(ctx.pool(), workspaceSize); + workspaceAddr = workspace_allocator.get(); + } + + ACL_CHECK( + aclnnBatchMatMul(workspaceAddr, workspaceSize, executor, ctx.stream())); +} + /** * @brief Performs matrix multiplication with floating-point precision on * tensors using the CANN backend. @@ -2446,20 +2630,39 @@ static void ggml_cann_mat_mul_fp(ggml_backend_cann_context& ctx, // broadcast, when weight ne2 or ne3 is not 1, weight need repeat. BCAST_MUL_MAT_SHAPE(input, weight, dst); - // transpose weight: [1,2,3,4] -> [1,2,4,3] + int64_t n_dims = bcast_dims; + if (bcast_input_ne[3] == bcast_weight_ne[3] && bcast_input_ne[3] == 1) { + if (bcast_input_ne[2] == 1 && bcast_weight_ne[2] == 1) { + n_dims = 2; + } else if (bcast_input_ne[2] == 1) { + n_dims = 3; + } + } + + aclTensor* acl_input_tensor = + ggml_cann_create_tensor(input, bcast_input_ne, bcast_input_nb, n_dims); int64_t transpose_ne[] = {bcast_weight_ne[1], bcast_weight_ne[0], bcast_weight_ne[2], bcast_weight_ne[3], bcast_weight_ne[4], bcast_weight_ne[5]}; size_t transpose_nb[] = {bcast_weight_nb[1], bcast_weight_nb[0], bcast_weight_nb[2], bcast_weight_nb[3], bcast_weight_nb[4], bcast_weight_nb[5]}; - aclTensor* acl_weight_tensor = - ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, bcast_dims); - aclTensor* acl_input_tensor = - ggml_cann_create_tensor(input, BCAST_MUL_MAT_PARAM(input)); - aclTensor* acl_dst = ggml_cann_create_tensor(dst, BCAST_MUL_MAT_PARAM(dst)); - aclnn_mat_mul(ctx, acl_input_tensor, acl_weight_tensor, acl_dst); + ggml_cann_create_tensor(weight, transpose_ne, transpose_nb, n_dims); + aclTensor* acl_dst = + ggml_cann_create_tensor(dst, bcast_dst_ne, bcast_dst_nb, n_dims); + + switch (n_dims) { + case 2: + aclnn_mat_mul_2d(ctx, acl_input_tensor, acl_weight_tensor, acl_dst); + break; + case 3: + aclnn_mat_mul_3d(ctx, acl_input_tensor, acl_weight_tensor, acl_dst); + break; + default: + aclnn_mat_mul(ctx, acl_input_tensor, acl_weight_tensor, acl_dst); + break; + } ACL_CHECK(aclDestroyTensor(acl_weight_tensor)); ACL_CHECK(aclDestroyTensor(acl_input_tensor)); @@ -2480,51 +2683,47 @@ static void ggml_cann_mat_mul_fp(ggml_backend_cann_context& ctx, * multiplication will be stored. */ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, - ggml_tensor* dst, - const enum ggml_type type) { + ggml_tensor* dst, + const enum ggml_type type) { ggml_tensor* src0 = dst->src[0]; // weight ggml_tensor* src1 = dst->src[1]; // input - // The shape of the weight is NCHW. Matrix multiplication uses HW dims. HC - // is regarded as batch. weight need transpose. - int64_t weight_ne[] = {src0->ne[1], src0->ne[0]}; + // The shape of the weight is NCHW. + // Matrix multiplication uses HW dims. + // HC is regarded as batch. + // weight need transpose. float weight_elem_size; if (type == GGML_TYPE_Q4_0) { weight_elem_size = float(sizeof(uint8_t)) / 2; - } - else if (type == GGML_TYPE_Q8_0) { + } else if (type == GGML_TYPE_Q8_0) { weight_elem_size = float(sizeof(uint8_t)); - } - else { + } else { GGML_ABORT("Only support Q4_0 and Q8_0 MUL_MAT"); } - float weight_nb[] = {weight_elem_size * src0->ne[0], weight_elem_size}; - - // size of one matrix is element_size * height * width. - size_t weight_stride = weight_elem_size * src0->ne[0] * src0->ne[1]; + float weight_nb[] = {src0->ne[0] * weight_elem_size, weight_elem_size}; + size_t weight_stride = src0->ne[1] * src0->ne[0] * weight_elem_size; size_t weight_size = weight_stride * src0->ne[2] * src0->ne[3]; // scale stored at the end of weight. Also need transpose. - GGML_ASSERT(QK4_0 == QK8_0); - int64_t scale_ne[] = {src0->ne[1], src0->ne[0] / QK8_0}; size_t scale_elem_size = sizeof(uint16_t); size_t scale_nb[] = {src0->ne[0] / QK8_0 * scale_elem_size, scale_elem_size}; - size_t scale_stride = scale_elem_size * src0->ne[0] * src0->ne[1] / QK8_0; + size_t scale_stride = src0->ne[1] * src0->ne[0] / QK8_0 * scale_elem_size; char* scale_offset = (char*)src0->data + weight_size; // input - void* input_buffer; size_t input_elem_size = sizeof(uint16_t); int64_t input_ne[] = {src1->ne[0], src1->ne[1]}; - size_t input_nb[] = {input_elem_size, input_elem_size * src1->ne[0]}; - size_t input_stride = input_elem_size * src1->ne[0] * src1->ne[1]; - + size_t input_nb[] = {input_elem_size, input_ne[0] * input_elem_size}; + size_t input_stride = input_ne[0] * input_ne[1] * input_elem_size; ggml_cann_pool_alloc input_alloctor(ctx.pool()); + void* input_buffer = src1->data; + + // case in if (src1->type != GGML_TYPE_F16) { aclTensor* acl_src1_tensor = ggml_cann_create_tensor(src1); - input_alloctor.alloc(ggml_nelements(src1) * input_elem_size); - input_buffer = input_alloctor.get(); + input_buffer = + input_alloctor.alloc(ggml_nelements(src1) * input_elem_size); int64_t* input_cast_ne = src1->ne; size_t input_cast_nb[GGML_MAX_DIMS]; @@ -2537,85 +2736,136 @@ static void ggml_cann_mul_mat_quant(ggml_backend_cann_context& ctx, input_buffer, ACL_FLOAT16, input_elem_size, input_cast_ne, input_cast_nb, GGML_MAX_DIMS); aclnn_cast(ctx, acl_src1_tensor, acl_input_tensor, ACL_FLOAT16); + ACL_CHECK(aclDestroyTensor(acl_input_tensor)); ACL_CHECK(aclDestroyTensor(acl_src1_tensor)); - } else { - input_buffer = src1->data; } // output size_t output_elem_size = sizeof(uint16_t); - int64_t output_ne[] = {dst->ne[0], dst->ne[1]}; - size_t output_nb[] = {output_elem_size, output_elem_size * dst->ne[0]}; - ggml_cann_pool_alloc output_alloctor( - ctx.pool(), ggml_nelements(dst) * output_elem_size); - void* output_buffer = output_alloctor.get(); - size_t output_stride = output_elem_size * dst->ne[0] * dst->ne[1]; + size_t output_nb[] = {output_elem_size, dst->ne[0] * output_elem_size}; + ggml_cann_pool_alloc output_allocator(ctx.pool()); + void* output_buffer = + output_allocator.alloc(ggml_nelements(dst) * output_elem_size); + size_t output_stride = dst->ne[0] * dst->ne[1] * output_elem_size; // aclnn + int64_t max_elem_size = 65535; + int64_t split_size = (src0->ne[1] / max_elem_size) + 1; + ggml_cann_pool_alloc workspace_allocator(ctx.pool()); + aclOpExecutor* executor = nullptr; uint64_t workspaceSize = 0; - aclOpExecutor* executor; void* workspaceAddr = nullptr; - for (int64_t n1 = 0; n1 < src1->ne[3]; n1++) { for (int64_t c1 = 0; c1 < src1->ne[2]; c1++) { int64_t n0 = n1 / (src1->ne[3] / src0->ne[3]); int64_t c0 = c1 / (src1->ne[2] / src0->ne[2]); - int64_t batch1 = n1 * src1->ne[2] + c1; - int64_t batch0 = n0 * src0->ne[2] + c0; + int64_t batch1 = (n1 * src1->ne[2]) + c1; + int64_t batch0 = (n0 * src0->ne[2]) + c0; aclTensor* acl_input_tensor = ggml_cann_create_tensor( (char*)input_buffer + batch1 * input_stride, ACL_FLOAT16, input_elem_size, input_ne, input_nb, 2); + + // first split + int64_t weight_ne_offset = 0; + int64_t weight_ne[2] = { + max_elem_size > src0->ne[1] ? src0->ne[1] : max_elem_size, + src0->ne[0]}; + int64_t scale_ne_offset = 0; + int64_t scale_ne[2] = {weight_ne[0], weight_ne[1] / QK8_0}; + int64_t output_ne_offset = 0; + int64_t output_ne[2] = {weight_ne[0], dst->ne[1]}; + aclTensor* acl_weight_tensor = ggml_cann_create_tensor( (char*)src0->data + batch0 * weight_stride, ggml_cann_type_mapping(type), weight_elem_size, weight_ne, - weight_nb, 2); + weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); aclTensor* acl_scale_tensor = ggml_cann_create_tensor( scale_offset + batch0 * scale_stride, ACL_FLOAT16, - scale_elem_size, scale_ne, scale_nb, 2); + scale_elem_size, scale_ne, scale_nb, 2, ACL_FORMAT_ND, + scale_ne_offset); aclTensor* acl_output_tensor = ggml_cann_create_tensor( (char*)output_buffer + batch1 * output_stride, ACL_FLOAT16, - output_elem_size, output_ne, output_nb, 2); + output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, + output_ne_offset); ACL_CHECK(aclnnWeightQuantBatchMatmulV2GetWorkspaceSize( acl_input_tensor, acl_weight_tensor, acl_scale_tensor, nullptr, nullptr, nullptr, nullptr, QK8_0, acl_output_tensor, &workspaceSize, &executor)); - - if (workspaceSize > 0 && workspaceAddr == nullptr) { - ggml_cann_pool_alloc workspace_allocator(ctx.pool(), - workspaceSize); - workspaceAddr = workspace_allocator.get(); + if (workspaceAddr == nullptr) { + workspaceAddr = workspace_allocator.alloc(workspaceSize); } - ACL_CHECK(aclnnWeightQuantBatchMatmulV2( workspaceAddr, workspaceSize, executor, ctx.stream())); - ACL_CHECK(aclDestroyTensor(acl_input_tensor)); ACL_CHECK(aclDestroyTensor(acl_weight_tensor)); ACL_CHECK(aclDestroyTensor(acl_scale_tensor)); ACL_CHECK(aclDestroyTensor(acl_output_tensor)); + + // other splits + for (int64_t split = 1; split < split_size; split++) { + weight_ne_offset += + weight_elem_size * weight_ne[0] * weight_ne[1]; + weight_ne[0] = max_elem_size * (split + 1) > src0->ne[1] + ? src0->ne[1] - (max_elem_size * split) + : max_elem_size; + scale_ne_offset += scale_elem_size * scale_ne[0] * scale_ne[1]; + scale_ne[0] = weight_ne[0]; + output_ne_offset += + output_elem_size * output_ne[0] * output_ne[1]; + output_ne[0] = weight_ne[0]; + + acl_weight_tensor = ggml_cann_create_tensor( + (char*)src0->data + batch0 * weight_stride, + ggml_cann_type_mapping(type), weight_elem_size, weight_ne, + weight_nb, 2, ACL_FORMAT_ND, weight_ne_offset); + acl_scale_tensor = ggml_cann_create_tensor( + scale_offset + batch0 * scale_stride, ACL_FLOAT16, + scale_elem_size, scale_ne, scale_nb, 2, ACL_FORMAT_ND, + scale_ne_offset); + acl_output_tensor = ggml_cann_create_tensor( + (char*)output_buffer + batch1 * output_stride, ACL_FLOAT16, + output_elem_size, output_ne, output_nb, 2, ACL_FORMAT_ND, + output_ne_offset); + + ACL_CHECK(aclnnWeightQuantBatchMatmulV2GetWorkspaceSize( + acl_input_tensor, acl_weight_tensor, acl_scale_tensor, + nullptr, nullptr, nullptr, nullptr, QK8_0, + acl_output_tensor, &workspaceSize, &executor)); + ACL_CHECK(aclnnWeightQuantBatchMatmulV2( + workspaceAddr, workspaceSize, executor, ctx.stream())); + + ACL_CHECK(aclDestroyTensor(acl_weight_tensor)); + ACL_CHECK(aclDestroyTensor(acl_scale_tensor)); + ACL_CHECK(aclDestroyTensor(acl_output_tensor)); + } + + ACL_CHECK(aclDestroyTensor(acl_input_tensor)); } } // cast out - int64_t* output_cast_ne = dst->ne; - size_t output_cast_nb[GGML_MAX_DIMS]; - output_cast_nb[0] = sizeof(uint16_t); - for (int i = 1; i < GGML_MAX_DIMS; i++) { - output_cast_nb[i] = output_cast_nb[i - 1] * output_cast_ne[i - 1]; - } + if (dst->type != GGML_TYPE_F16) { + int64_t* output_cast_ne = dst->ne; + size_t output_cast_nb[GGML_MAX_DIMS]; + output_cast_nb[0] = sizeof(uint16_t); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + output_cast_nb[i] = output_cast_nb[i - 1] * output_cast_ne[i - 1]; + } - aclTensor* acl_output_tensor = - ggml_cann_create_tensor(output_buffer, ACL_FLOAT16, output_elem_size, - output_cast_ne, output_cast_nb, GGML_MAX_DIMS); - aclTensor* acl_dst_tensor = ggml_cann_create_tensor(dst); - aclnn_cast(ctx, acl_output_tensor, acl_dst_tensor, ACL_FLOAT); + aclTensor* acl_output_tensor = ggml_cann_create_tensor( + output_buffer, ACL_FLOAT16, output_elem_size, output_cast_ne, + output_cast_nb, GGML_MAX_DIMS); + aclTensor* acl_dst_tensor = ggml_cann_create_tensor(dst); + aclnn_cast(ctx, acl_output_tensor, acl_dst_tensor, + ggml_cann_type_mapping(dst->type)); - ACL_CHECK(aclDestroyTensor(acl_output_tensor)); - ACL_CHECK(aclDestroyTensor(acl_dst_tensor)); + ACL_CHECK(aclDestroyTensor(acl_output_tensor)); + ACL_CHECK(aclDestroyTensor(acl_dst_tensor)); + } } void ggml_cann_mul_mat(ggml_backend_cann_context& ctx, ggml_tensor* dst) { @@ -2714,12 +2964,14 @@ static void aclnn_index_fill_tensor(ggml_backend_cann_context& ctx, static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, aclTensor* acl_cos_repeat_tensor, aclTensor* acl_sin_repeat_tensor, - float theta_scale, bool is_neox) { + float theta_scale, float freq_scale, + float attn_factor, bool is_neox) { // int sin/cos cache, cache has different repeat method depond on // @param.is_neox ggml_tensor* src0 = dst->src[0]; // input ggml_tensor* src1 = dst->src[1]; // position + ggml_tensor* src2 = dst->src[2]; // freq_factors // arange, [0,1,...,ne0/2] int64_t arange_length = src0->ne[0] / 2; @@ -2748,11 +3000,26 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, ggml_cann_pool_alloc theta_scale_allocator(ctx.pool(), arange_length * sizeof(float_t)); void* theta_scale_buffer = theta_scale_allocator.get(); - aclTensor* acl_theta_scale_tensor = aclnn_ones( + aclTensor* acl_theta_scale_tensor = aclnn_values( ctx, theta_scale_buffer, arange_length * sizeof(float_t), arange_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float_t), theta_scale); aclnn_pow_tensor_tensor(ctx, acl_theta_scale_tensor, acl_arange_tensor); + // freq_scale + if (freq_scale != 1) { + aclnn_muls(ctx, acl_theta_scale_tensor, freq_scale, nullptr, true); + } + + // freq_factors + if (src2) { + aclTensor* acl_freq_factors_tensor = ggml_cann_create_tensor( + src2->data, ggml_cann_type_mapping(src2->type), + ggml_type_size(src2->type), arange_ne, arange_nb, GGML_MAX_DIMS); + aclnn_div_tensor(ctx, acl_theta_scale_tensor, acl_freq_factors_tensor, + nullptr, true); + ACL_CHECK(aclDestroyTensor(acl_freq_factors_tensor)); + } + // position GGML_ASSERT(src1->type == GGML_TYPE_I32); int64_t position_length = src1->ne[0]; @@ -2816,6 +3083,12 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, GGML_MAX_DIMS, ACL_FORMAT_ND); aclnn_cos(ctx, acl_permute_tensor, acl_cos_tensor); + // attn_factor + if (attn_factor != 1) { + aclnn_muls(ctx, acl_sin_tensor, attn_factor, nullptr, true); + aclnn_muls(ctx, acl_cos_tensor, attn_factor, nullptr, true); + } + // repeat if (is_neox) { int64_t repeatsArray[] = {1, 1, 1, 2}; @@ -2841,15 +3114,27 @@ static void aclnn_cache_init(ggml_backend_cann_context& ctx, ggml_tensor* dst, ACL_CHECK(aclDestroyTensor(acl_cos_tensor)); } +#ifdef __cplusplus +extern "C" { +#endif +aclnnStatus aclnnRotaryPositionEmbeddingGetWorkspaceSize( + const aclTensor* x, const aclTensor* cos, const aclTensor* sin, + int64_t mode, const aclTensor* yOut, uint64_t* workspaceSize, + aclOpExecutor** executor); +aclnnStatus aclnnRotaryPositionEmbedding(void* workspace, + uint64_t workspaceSize, + aclOpExecutor* executor, + aclrtStream stream); +#ifdef __cplusplus +} +#endif + void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { // TODO: use ascendc // Only test with LLAMA model. ggml_tensor* src0 = dst->src[0]; // input ggml_tensor* src2 = dst->src[2]; // freq_factors - // TODO: with freq_factors - GGML_ASSERT(src2 == NULL); - // param float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; // const int n_past = ((int32_t *) dst->op_params)[0]; @@ -2867,13 +3152,11 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { memcpy(&beta_fast, (int32_t*)dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t*)dst->op_params + 10, sizeof(float)); - GGML_ASSERT(n_dims <= ne0); + // TODO: n_dims <= ne0 + GGML_ASSERT(n_dims == ne0); GGML_ASSERT(n_dims % 2 == 0); - // TODO: ext_factor != 0 GGML_ASSERT(ext_factor == 0); - // TODO: freq_scale != 1 - GGML_ASSERT(freq_scale == 1); const float theta_scale = powf(freq_base, -2.0f / n_dims); @@ -2904,7 +3187,13 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ggml_cann_create_tensor(cos_buffer, ACL_FLOAT, sizeof(float_t), sin_reshape_ne, sin_reshape_nb, GGML_MAX_DIMS); aclnn_cache_init(ctx, dst, acl_cos_reshape_tensor, acl_sin_reshape_tensor, - theta_scale, is_neox); + theta_scale, freq_scale, attn_factor, is_neox); + + aclTensor* acl_src = ggml_cann_create_tensor(src0); + aclTensor* acl_dst = ggml_cann_create_tensor(dst); + +#ifdef ASCEND_310P + // Special ROPE operation for 310P // roll input void* input_roll_buffer; @@ -2947,7 +3236,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { for (int i = 1; i < GGML_MAX_DIMS; i++) { minus_one_nb[i] = minus_one_nb[i - 1] * minus_one_ne[i - 1]; } - acl_minus_one_tensor = aclnn_ones( + acl_minus_one_tensor = aclnn_values( ctx, minus_one_scale_buffer, sizeof(float_t) * src0->ne[0], minus_one_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float_t), 1); int64_t dim = 3; @@ -2974,17 +3263,15 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ACL_CHECK(aclDestroyTensor(acl_input_roll_tensor)); ACL_CHECK(aclDestroyTensor(acl_input_tensor)); - // init [-1, -1, -1, 1, 1,1,...] minus_one_scale_buffer = minus_one_scale_allocator.get(); - int64_t minus_one_ne[4] = {src0->ne[0], 1, 1, 1}; size_t minus_one_nb[GGML_MAX_DIMS]; minus_one_nb[0] = sizeof(float_t); for (int i = 1; i < GGML_MAX_DIMS; i++) { minus_one_nb[i] = minus_one_nb[i - 1] * minus_one_ne[i - 1]; } - acl_minus_one_tensor = aclnn_ones( + acl_minus_one_tensor = aclnn_values( ctx, minus_one_scale_buffer, sizeof(float_t) * src0->ne[0], minus_one_ne, GGML_MAX_DIMS, ACL_FLOAT, sizeof(float_t), 1); // -1 * first half @@ -3026,14 +3313,12 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { acl_input_roll_mul_scale_tensor); // output - aclTensor* acl_src0 = ggml_cann_create_tensor(src0); - aclTensor* acl_dst = ggml_cann_create_tensor(dst); void* output_fp32_buffer; if (src0->type == GGML_TYPE_F32) { - aclnn_inplace_mul(ctx, acl_src0, acl_cos_reshape_tensor); + aclnn_inplace_mul(ctx, acl_src, acl_cos_reshape_tensor); aclnn_inplace_mul(ctx, acl_input_roll_mul_scale_tensor, acl_sin_reshape_tensor); - aclnn_add(ctx, acl_src0, acl_input_roll_mul_scale_tensor, acl_dst); + aclnn_add(ctx, acl_src, acl_input_roll_mul_scale_tensor, acl_dst); // TODO: ne0 != n_dims in mode2 } else if (src0->type == GGML_TYPE_F16) { size_t input_fp32_nb[GGML_MAX_DIMS]; @@ -3060,7 +3345,7 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { aclTensor* output_fp32_tensor = ggml_cann_create_tensor( output_fp32_buffer, ACL_FLOAT, sizeof(float_t), dst->ne, input_fp32_nb, GGML_MAX_DIMS); - aclnn_mul(ctx, acl_src0, acl_cos_reshape_tensor, input_fp32_tensor1); + aclnn_mul(ctx, acl_src, acl_cos_reshape_tensor, input_fp32_tensor1); aclnn_mul(ctx, acl_input_roll_mul_scale_tensor, acl_sin_reshape_tensor, input_fp32_tensor2); aclnn_add(ctx, input_fp32_tensor1, input_fp32_tensor2, @@ -3070,13 +3355,73 @@ void ggml_cann_rope(ggml_backend_cann_context& ctx, ggml_tensor* dst) { ACL_CHECK(aclDestroyTensor(input_fp32_tensor1)); ACL_CHECK(aclDestroyTensor(input_fp32_tensor2)); ACL_CHECK(aclDestroyTensor(output_fp32_tensor)); + ACL_CHECK(aclDestroyTensor(acl_sin_reshape_tensor)); + ACL_CHECK(aclDestroyTensor(acl_minus_one_tensor)); + ACL_CHECK(aclDestroyTensor(acl_input_roll_mul_scale_tensor)); + ACL_CHECK(aclDestroyTensor(acl_input_roll_reshape_tensor)); + ACL_CHECK(aclDestroyTensor(acl_src)); } + return; +#endif - ACL_CHECK(aclDestroyTensor(acl_sin_reshape_tensor)); + // src0 == GGML_TYPE_F16 + // TODO: optimization this `if` code + if (src0->type == GGML_TYPE_F16) { + ggml_cann_pool_alloc sin_final_allocator( + ctx.pool(), src0->ne[0] * src0->ne[2] * ggml_type_size(src0->type)); + ggml_cann_pool_alloc cos_final_allocator( + ctx.pool(), src0->ne[0] * src0->ne[2] * ggml_type_size(src0->type)); + void* sin_final_buffer = sin_final_allocator.get(); + void* cos_final_buffer = cos_final_allocator.get(); + + int64_t sin_final_ne[4] = {src0->ne[0], 1, src0->ne[2], 1}; + size_t sin_final_nb[GGML_MAX_DIMS]; + sin_final_nb[0] = ggml_type_size(src0->type); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + sin_final_nb[i] = sin_final_nb[i - 1] * sin_final_ne[i - 1]; + } + aclTensor* acl_sin_final_tensor = ggml_cann_create_tensor( + sin_final_buffer, ggml_cann_type_mapping(src0->type), + ggml_type_size(src0->type), sin_final_ne, sin_final_nb, + GGML_MAX_DIMS); + aclTensor* acl_cos_final_tensor = ggml_cann_create_tensor( + cos_final_buffer, ggml_cann_type_mapping(src0->type), + ggml_type_size(src0->type), sin_final_ne, sin_final_nb, + GGML_MAX_DIMS); + + aclnn_cast(ctx, acl_sin_reshape_tensor, acl_sin_final_tensor, + ggml_cann_type_mapping(src0->type)); + aclnn_cast(ctx, acl_cos_reshape_tensor, acl_cos_final_tensor, + ggml_cann_type_mapping(src0->type)); + ACL_CHECK(aclDestroyTensor(acl_cos_reshape_tensor)); + ACL_CHECK(aclDestroyTensor(acl_sin_reshape_tensor)); + acl_sin_reshape_tensor = acl_sin_final_tensor; + acl_cos_reshape_tensor = acl_cos_final_tensor; + } + + uint64_t workspaceSize = 0; + aclOpExecutor* executor; + + void* workspaceAddr = nullptr; + + int acl_mode = mode; + if (mode == 0) { + acl_mode = 1; + } + + ACL_CHECK(aclnnRotaryPositionEmbeddingGetWorkspaceSize( + acl_src, acl_cos_reshape_tensor, acl_sin_reshape_tensor, acl_mode, + acl_dst, &workspaceSize, &executor)); + if (workspaceSize > 0) { + ggml_cann_pool_alloc workspace_allocator(ctx.pool(), workspaceSize); + workspaceAddr = workspace_allocator.get(); + } + + ACL_CHECK(aclnnRotaryPositionEmbedding(workspaceAddr, workspaceSize, + executor, ctx.stream())); + + ACL_CHECK(aclDestroyTensor(acl_src)); ACL_CHECK(aclDestroyTensor(acl_cos_reshape_tensor)); - ACL_CHECK(aclDestroyTensor(acl_minus_one_tensor)); - ACL_CHECK(aclDestroyTensor(acl_input_roll_mul_scale_tensor)); - ACL_CHECK(aclDestroyTensor(acl_input_roll_reshape_tensor)); - ACL_CHECK(aclDestroyTensor(acl_src0)); + ACL_CHECK(aclDestroyTensor(acl_sin_reshape_tensor)); ACL_CHECK(aclDestroyTensor(acl_dst)); } diff --git a/ggml/src/ggml-cann/common.h b/ggml/src/ggml-cann/common.h index edfa496148ff2..5164cb74ec92e 100644 --- a/ggml/src/ggml-cann/common.h +++ b/ggml/src/ggml-cann/common.h @@ -211,17 +211,20 @@ struct ggml_cann_pool_alloc { struct ggml_backend_cann_context { int32_t device; /**< Device ID. */ std::string name; /**< Name of the device. */ + std::string description; /**< Description of the device. */ aclrtEvent copy_event = nullptr; /**< Event for managing copy operations. */ - aclrtStream streams[GGML_CANN_MAX_STREAMS] = { - {nullptr}}; /**< Array of streams for the device. */ + aclrtStream streams[GGML_CANN_MAX_STREAMS] = {nullptr}; /**< Array of streams for the device. */ /** * @brief Constructor for initializing the context with a given device. * @param device Device ID. */ explicit ggml_backend_cann_context(int device) - : device(device), name("CANN" + std::to_string(device)) {} + : device(device), name("CANN" + std::to_string(device)) { + ggml_cann_set_device(device); + description = aclrtGetSocName(); + } /** * @brief Destructor for cleaning up resources. diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 776340881434d..d410c02445c27 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -122,6 +122,10 @@ static ggml_cann_device_info ggml_cann_init() { ACL_CHECK(aclrtMemGetAllocationGranularity( &prop, ACL_RT_MEM_ALLOC_GRANULARITY_RECOMMENDED, &info.devices[id].vmm_granularity)); + + size_t free, total; + ggml_backend_cann_get_device_memory(id, &free, &total); + info.devices[id].total_vram = free; } // TODO: add more device info later. @@ -208,6 +212,11 @@ struct ggml_cann_pool_leg : public ggml_cann_pool { * @return A pointer to the allocated buffer. */ void* alloc(size_t size, size_t* actual_size) override { + const size_t alignment = 128; + size = GGML_PAD(size, alignment); + if (size == 0) { + size = alignment; + } #ifdef DEBUG_CANN_MALLOC int nnz = 0; size_t max_size = 0; @@ -246,13 +255,11 @@ struct ggml_cann_pool_leg : public ggml_cann_pool { return ptr; } void* ptr; - size_t look_ahead_size = (size_t)(1.05 * size); - look_ahead_size = 256 * ((look_ahead_size + 255) / 256); ggml_cann_set_device(device); ACL_CHECK( - aclrtMalloc(&ptr, look_ahead_size, ACL_MEM_MALLOC_HUGE_FIRST)); - *actual_size = look_ahead_size; - pool_size += look_ahead_size; + aclrtMalloc(&ptr, size, ACL_MEM_MALLOC_HUGE_FIRST)); + *actual_size = size; + pool_size += size; #ifdef DEBUG_CANN_MALLOC GGML_LOG_INFO( "%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, " @@ -296,7 +303,7 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { /** * @brief The maximum size of the virtual memory pool (32 GB). */ - static const size_t CANN_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB + size_t max_size; /** * @brief The device ID associated with this buffer pool. @@ -341,7 +348,11 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { */ explicit ggml_cann_pool_vmm(int device) : device(device), - granularity(ggml_cann_info().devices[device].vmm_granularity) {} + granularity(ggml_cann_info().devices[device].vmm_granularity) { + auto dev = ggml_cann_info().devices[device]; + granularity = dev.vmm_granularity; + max_size = dev.total_vram; + } /** * @brief Destructor to free all buffers in the virtual memory pool. @@ -370,17 +381,19 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { // round up the allocation size to the alignment to ensure that all // allocations are aligned for all data types const size_t alignment = 128; - size = alignment * ((size + alignment - 1) / alignment); + size = GGML_PAD(size, alignment); + if (size == 0) { + size = alignment; + } size_t avail = pool_size - pool_used; if (size > avail) { // round up to the next multiple of the granularity size_t reserve_size = size - avail; - reserve_size = - granularity * ((reserve_size + granularity - 1) / granularity); + reserve_size = GGML_PAD(reserve_size, granularity); - GGML_ASSERT(pool_size + reserve_size <= CANN_POOL_VMM_MAX_SIZE); + GGML_ASSERT(pool_size + reserve_size <= max_size); // allocate more physical memory aclrtPhysicalMemProp prop = {}; @@ -396,7 +409,7 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { // reserve virtual address space (if not already reserved) if (pool_addr == 0) { ACL_CHECK(aclrtReserveMemAddress( - &pool_addr, CANN_POOL_VMM_MAX_SIZE, 0, NULL, 1)); + &pool_addr, max_size, 0, NULL, 1)); } // map at the end of the pool @@ -409,10 +422,11 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { // add to the pool pool_size += reserve_size; - // GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB ( - // reserved %llu MB)\n", - // device, (unsigned long long) (pool_size/1024/1024), - // (unsigned long long) (reserve_size/1024/1024)); +#ifdef DEBUG_CANN_MALLOC + GGML_LOG_INFO("cann pool[%d]: size increased to %llu MB (reserved %llu MB)\n", + device, (unsigned long long) (pool_size/1024/1024), + (unsigned long long) (reserve_size/1024/1024)); +#endif } GGML_ASSERT(pool_addr != 0); @@ -457,7 +471,6 @@ struct ggml_cann_pool_vmm : public ggml_cann_pool { */ std::unique_ptr ggml_backend_cann_context::new_pool_for_device( int device) { - // return std::unique_ptr(new ggml_cann_pool_leg(device)); return std::unique_ptr(new ggml_cann_pool_vmm(device)); } @@ -1130,10 +1143,10 @@ ggml_backend_cann_buffer_type(int32_t device) { static bool ggml_backend_cann_buffer_type_initialized = false; if (!ggml_backend_cann_buffer_type_initialized) { - for (int32_t i = 0; i < GGML_CANN_MAX_DEVICES; i++) { + for (int32_t i = 0; i < ggml_cann_info().device_count; i++) { ggml_backend_cann_buffer_types[i] = { /* .iface = */ ggml_backend_cann_buffer_type_interface, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), device), + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cann_reg(), i), /* .context = */ new ggml_backend_cann_buffer_type_context{ i, "CANN" + std::to_string(i)}, @@ -1199,10 +1212,15 @@ static void * ggml_cann_host_malloc(size_t size) { return nullptr; } + const size_t alignment = 128; + size = GGML_PAD(size, alignment); + if (size == 0) { + size = alignment; + } + void * hostPtr = nullptr; aclError err = aclrtMallocHost((void **) &hostPtr, size); if (err != ACL_SUCCESS) { - GGML_LOG_WARN("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, aclGetRecentErrMsg()); return nullptr; @@ -1669,12 +1687,14 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, } case GGML_OP_MUL_MAT: { switch (op->src[0]->type) { - case GGML_TYPE_F16: - case GGML_TYPE_F32: case GGML_TYPE_Q8_0: - // TODO: fix me // Current groupsize should not be greater than k-1 in - // aclnnWeightQuantBatchMatmulV2GetWorkspaceSize(). + // aclnnWeightQuantBatchMatmulV2GetWorkspaceSize + if (op->src[0]->ne[0] <= QK8_0) { + return false; + } + case GGML_TYPE_F16: + case GGML_TYPE_F32: case GGML_TYPE_Q4_0: return true; default: @@ -1706,9 +1726,50 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, return false; } } + case GGML_OP_CONT: { + // TODO: support GGML_TYPE_BF16 + switch (op->src[0]->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + return true; + default: + return false; + } + } + case GGML_OP_ROPE: { + // TODO: with ops-test v == 1 + float * ext_factor = (float*)((int32_t*)op->op_params + 7); + // TODO: n_dims <= ne0 + if (op->src[0]->ne[0] != op->op_params[1]) { + return false; + } + // TODO: ext_factor != 0 + if (*ext_factor != 0) { + return false; + } + + const int mode = ((const int32_t *) op->op_params)[2]; + if (mode & GGML_ROPE_TYPE_MROPE) { + return false; + } + if (mode & GGML_ROPE_TYPE_VISION) { + return false; + } + + return true; + } + case GGML_OP_UPSCALE: { + // aclnnUpsampleNearest2dGetWorkspaceSize not support + // selfDimN[2]/outDimN[2] or selfDimC[3]/outDimC[3] not equal + if (op->src[0]->ne[2] * op->ne[3] != op->src[0]->ne[3] * op->ne[2]) { + return false; + } + return true; + } + case GGML_OP_IM2COL: + case GGML_OP_CONCAT: case GGML_OP_DUP: case GGML_OP_REPEAT: - case GGML_OP_CONCAT: case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: @@ -1722,17 +1783,13 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, case GGML_OP_SCALE: case GGML_OP_SQR: case GGML_OP_CLAMP: - case GGML_OP_CONT: case GGML_OP_DIAG_MASK_INF: case GGML_OP_SOFT_MAX: - case GGML_OP_ROPE: - case GGML_OP_IM2COL: case GGML_OP_POOL_2D: case GGML_OP_SUM_ROWS: case GGML_OP_ARGSORT: case GGML_OP_ACC: case GGML_OP_GROUP_NORM: - case GGML_OP_UPSCALE: case GGML_OP_PAD: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: @@ -2041,7 +2098,7 @@ static void * ggml_backend_cann_reg_get_proc_address(ggml_backend_reg_t reg, con static const ggml_backend_reg_i ggml_backend_cann_reg_interface = { /* .get_name = */ ggml_backend_cann_reg_get_name, /* .get_device_count = */ ggml_backend_cann_reg_get_device_count, - /* .get_device_get = */ ggml_backend_cann_reg_get_device, + /* .get_device = */ ggml_backend_cann_reg_get_device, /* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address, }; @@ -2064,16 +2121,17 @@ ggml_backend_reg_t ggml_backend_cann_reg() { dev_ctx->name = GGML_CANN_NAME + std::to_string(i); ggml_cann_set_device(i); ggml_backend_dev_t dev = new ggml_backend_device { - /* .interface = */ ggml_backend_cann_device_interface, - /* .reg = */ ®, - /* .context = */ dev_ctx + /* .iface = */ ggml_backend_cann_device_interface, + /* .reg = */ ®, + /* .context = */ dev_ctx }; ctx->devices.push_back(dev); } reg = ggml_backend_reg { - /* .interface = */ ggml_backend_cann_reg_interface, - /* .context = */ ctx + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_cann_reg_interface, + /* .context = */ ctx }; } @@ -2126,3 +2184,5 @@ void ggml_backend_cann_get_device_memory(int32_t device, size_t* free, ggml_cann_set_device(device); ACL_CHECK(aclrtGetMemInfo(ACL_HBM_MEM, free, total)); } + +GGML_BACKEND_DL_IMPL(ggml_backend_cann_reg) diff --git a/ggml/src/ggml-cann/kernels/CMakeLists.txt b/ggml/src/ggml-cann/kernels/CMakeLists.txt index 5b4fef91b5877..d687220c3c57e 100644 --- a/ggml/src/ggml-cann/kernels/CMakeLists.txt +++ b/ggml/src/ggml-cann/kernels/CMakeLists.txt @@ -1,7 +1,3 @@ -if (NOT SOC_TYPE) - set (SOC_TYPE "Ascend910B3") -endif() - file(GLOB SRC_FILES get_row_f32.cpp get_row_f16.cpp @@ -13,7 +9,6 @@ file(GLOB SRC_FILES dup.cpp ) -string(TOLOWER ${SOC_TYPE} SOC_VERSION) set(ASCEND_CANN_PACKAGE_PATH ${CANN_INSTALL_DIR}) set(RUN_MODE "npu" CACHE STRING "run mode: npu/sim") @@ -30,4 +25,6 @@ ascendc_library(ascendc_kernels STATIC ${SRC_FILES} ) +message(STATUS "CANN: compile ascend kernels witch SOC_TYPE:${SOC_TYPE}, SOC_VERSION:${SOC_VERSION}, compile macro:-D${SOC_TYPE_COMPILE_OPTION}.") +ascendc_compile_definitions(ascendc_kernels PRIVATE "-D${SOC_TYPE_COMPILE_OPTION}") # ascendc_compile_definitions(ascendc_kernels PRIVATE -DASCENDC_DUMP) diff --git a/ggml/src/ggml-cann/kernels/dup.cpp b/ggml/src/ggml-cann/kernels/dup.cpp index e2c651152f486..c7ba38d10a0b2 100644 --- a/ggml/src/ggml-cann/kernels/dup.cpp +++ b/ggml/src/ggml-cann/kernels/dup.cpp @@ -5,6 +5,7 @@ using namespace AscendC; #define BUFFER_NUM 2 +const int64_t SUPPORTED_MAX_DIM = 65535; // currently the limit of max block dim supportted by dup kernel is 65535template template class DupByRows { @@ -51,24 +52,36 @@ class DupByRows { __aicore__ inline void copy_in() { LocalTensor src_local = src_queue.AllocTensor(); - - DataCopyExtParams dataCopyParams; - dataCopyParams.blockCount = 1; - dataCopyParams.blockLen = num_elem * sizeof(SRC_T); - DataCopyPadExtParams padParams; - DataCopyPad(src_local, src_gm, dataCopyParams, padParams); - + const size_t elem_per_block = 32 / sizeof(SRC_T); + size_t tail = num_elem % elem_per_block; + size_t cpy_elements_len = tail > 0 ? num_elem + 1 : num_elem; + DataCopy(src_local, src_gm, cpy_elements_len); src_queue.EnQue(src_local); } __aicore__ inline void copy_out() { LocalTensor dst_local = dst_queue.DeQue(); - +#ifdef ASCEND_310P + const size_t elem_per_block = 32 / sizeof(DST_T); + size_t tail = num_elem % elem_per_block; + size_t len = num_elem & ~(elem_per_block - 1); + if (len > 0) { + DataCopy(dst_gm, dst_local, len); + } + if(tail != 0) { + for (size_t i = tail; i < elem_per_block; i++) { + dst_local[len + i].SetValue(0, 0); + } + SetAtomicAdd(); + DataCopy(dst_gm[len], dst_local[len], elem_per_block); + SetAtomicNone(); + } +#else DataCopyExtParams dataCopyParams; dataCopyParams.blockCount = 1; dataCopyParams.blockLen = num_elem * sizeof(DST_T); DataCopyPad(dst_gm, dst_local, dataCopyParams); - +#endif dst_queue.FreeTensor(dst_local); } diff --git a/ggml/src/ggml-cann/kernels/get_row_f16.cpp b/ggml/src/ggml-cann/kernels/get_row_f16.cpp index c704b5b2ec0f3..416b45104de5b 100644 --- a/ggml/src/ggml-cann/kernels/get_row_f16.cpp +++ b/ggml/src/ggml-cann/kernels/get_row_f16.cpp @@ -14,7 +14,7 @@ class GET_ROW_F16 { int64_t *output_ne_ub, size_t *output_nb_ub) { // TODO, use template for F16/f32 int64_t op_block_num = GetBlockNum(); - int64_t op_block_idx = GetBlockIdx(); + op_block_idx = GetBlockIdx(); for (int i = 0; i < 4; i++) { input_ne[i] = input_ne_ub[i]; @@ -59,32 +59,42 @@ class GET_ROW_F16 { } __aicore__ inline void copy_in(uint32_t offset, size_t len) { + size_t origin_len = len; LocalTensor input_local = input_queue.AllocTensor(); - size_t tail = len % 32; - len = len & ~31; - DataCopy(input_local, input_gm[offset], len); + const size_t elem_per_block = 32 / sizeof(half); + size_t tail = len % elem_per_block; + len = len & ~(elem_per_block - 1); if(tail != 0) { - DataCopyExtParams dataCopyParams; - dataCopyParams.blockCount = 1; - dataCopyParams.blockLen = tail * sizeof(half); - DataCopyPadExtParams padParams; - DataCopyPad(input_local[len], input_gm[offset + len], - dataCopyParams, padParams); + len += elem_per_block; } + DataCopy(input_local, input_gm[offset], len); input_queue.EnQue(input_local); } __aicore__ inline void copy_out(uint32_t offset, size_t len) { LocalTensor output_local = output_queue.DeQue(); - size_t tail = len % 32; - len = len & ~31; - DataCopy(output_gm[offset], output_local, len); + const size_t elem_per_block = 32 / sizeof(float); + size_t tail = len % elem_per_block; + len = len & ~(elem_per_block - 1); + if (len > 0) { + DataCopy(output_gm[offset], output_local, len); + } + if(tail != 0) { +#ifdef ASCEND_310P + for (size_t i = tail; i < elem_per_block; i++) { + output_local[len + i].SetValue(0, 0); + } + SetAtomicAdd(); + DataCopy(output_gm[offset + len], output_local[len], elem_per_block); + SetAtomicNone(); +#else DataCopyExtParams dataCopyParams; dataCopyParams.blockCount = 1; dataCopyParams.blockLen = tail * sizeof(float); DataCopyPad(output_gm[offset + len], output_local[len], dataCopyParams); +#endif } output_queue.FreeTensor(output_local); } @@ -150,6 +160,7 @@ class GET_ROW_F16 { GlobalTensor output_gm; TQue input_queue; TQue output_queue; + int64_t op_block_idx; }; template diff --git a/ggml/src/ggml-cann/kernels/get_row_f32.cpp b/ggml/src/ggml-cann/kernels/get_row_f32.cpp index 9db080af36998..02116905b18e4 100644 --- a/ggml/src/ggml-cann/kernels/get_row_f32.cpp +++ b/ggml/src/ggml-cann/kernels/get_row_f32.cpp @@ -13,7 +13,7 @@ class GET_ROW_F32 { int64_t *indices_ne_ub, size_t *indices_nb_ub, int64_t *output_ne_ub, size_t *output_nb_ub) { int64_t op_block_num = GetBlockNum(); - int64_t op_block_idx = GetBlockIdx(); + op_block_idx = GetBlockIdx(); for (int i = 0; i < 4; i++) { input_ne[i] = input_ne_ub[i]; @@ -55,31 +55,40 @@ class GET_ROW_F32 { __aicore__ inline void copy_in(uint32_t offset, size_t len) { LocalTensor input_local = input_queue.AllocTensor(); - size_t tail = len % 32; - len = len & ~31; - DataCopy(input_local, input_gm[offset], len); + const size_t elem_per_block = 32 / sizeof(float); + size_t tail = len % elem_per_block; + len = len & ~(elem_per_block - 1); if(tail != 0) { - DataCopyExtParams dataCopyParams; - dataCopyParams.blockCount = 1; - dataCopyParams.blockLen = tail * sizeof(float); - DataCopyPadExtParams padParams; - DataCopyPad(input_local[len], input_gm[offset + len], - dataCopyParams, padParams); + len += elem_per_block; } + DataCopy(input_local, input_gm[offset], len); input_queue.EnQue(input_local); } __aicore__ inline void copy_out(uint32_t offset, size_t len) { LocalTensor output_local = output_queue.DeQue(); - size_t tail = len % 32; - len = len & ~31; - DataCopy(output_gm[offset], output_local, len); + const size_t elem_per_block = 32 / sizeof(float); + size_t tail = len % elem_per_block; + len = len & ~(elem_per_block - 1); + if (len > 0) { + DataCopy(output_gm[offset], output_local, len); + } + if(tail != 0) { +#ifdef ASCEND_310P + for (size_t i = tail; i < elem_per_block; i++) { + output_local[len + i].SetValue(0, 0); + } + SetAtomicAdd(); + DataCopy(output_gm[offset + len], output_local[len], elem_per_block); + SetAtomicNone(); +#else DataCopyExtParams dataCopyParams; dataCopyParams.blockCount = 1; dataCopyParams.blockLen = tail * sizeof(float); DataCopyPad(output_gm[offset + len], output_local[len], dataCopyParams); +#endif } output_queue.FreeTensor(output_local); } @@ -144,6 +153,7 @@ class GET_ROW_F32 { GlobalTensor output_gm; TQue input_queue; TQue output_queue; + int64_t op_block_idx; }; template diff --git a/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp b/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp index a80bfeec2417d..4fbe722086cf0 100644 --- a/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp +++ b/ggml/src/ggml-cann/kernels/get_row_q4_0.cpp @@ -2,6 +2,15 @@ // optimize me. Use template to avoid copy code. using namespace AscendC; +#ifdef ASCEND_310P // 310P not support 4bit get row + extern "C" __global__ __aicore__ void ascendc_get_row_q4_0( + GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm, + GM_ADDR input_ne_gm, GM_ADDR indices_ne_gm, GM_ADDR indices_nb_gm, + GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) { + // let following test cases can continue run, here just print error information. Of Cource the test case that call this operator is failed. + printf("Ascend310P not support 4bit get row.\n"); + } +#else #define BUFFER_NUM 2 @@ -191,3 +200,5 @@ extern "C" __global__ __aicore__ void ascendc_get_row_q4_0( indices_nb_ub, output_ne_ub, output_nb_ub); op.calculate(); } + +#endif // #ifdef ASCEND_310P diff --git a/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp b/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp index 8423b3f02a8f8..504b43afaa1f4 100644 --- a/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp +++ b/ggml/src/ggml-cann/kernels/quantize_f16_q8_0.cpp @@ -1,6 +1,14 @@ #include "kernel_operator.h" using namespace AscendC; +#ifdef ASCEND_310P + extern "C" __global__ __aicore__ void ascendc_quantize_f16_q8_0( + GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm, + GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) { + // let following test cases can continue run, here just print error information. Of Cource the test case that call this operator is failed. + printf("Ascend310P not support f16->8bit quantization.\n"); + } +#else #define BUFFER_NUM 2 #define QK8_0 32 @@ -206,3 +214,5 @@ extern "C" __global__ __aicore__ void ascendc_quantize_f16_q8_0( op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub); op.calculate(); } + +#endif // #ifdef ASCEND_310P diff --git a/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp b/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp index b7c575093e9c1..05b0bc1df59af 100644 --- a/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp +++ b/ggml/src/ggml-cann/kernels/quantize_f32_q8_0.cpp @@ -1,6 +1,14 @@ #include "kernel_operator.h" using namespace AscendC; +#ifdef ASCEND_310P // 310P not support f32->8bit quantization + extern "C" __global__ __aicore__ void ascendc_quantize_f32_q8_0( + GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm, + GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) { + // let following test cases can continue run, here just print error information. Of Cource the test case that call this operator is failed. + printf("Ascend310P not support f32->8bit quantization.\n"); + } +#else #define BUFFER_NUM 2 #define QK8_0 32 @@ -204,3 +212,5 @@ extern "C" __global__ __aicore__ void ascendc_quantize_f32_q8_0( op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub); op.calculate(); } + +#endif // #ifdef ASCEND_310P diff --git a/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp b/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp index 9c8c86b66ad66..1188937b74461 100644 --- a/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp +++ b/ggml/src/ggml-cann/kernels/quantize_float_to_q4_0.cpp @@ -1,6 +1,21 @@ #include "kernel_operator.h" using namespace AscendC; +#ifdef ASCEND_310P // 310P not support float->4bit quantization + extern "C" __global__ __aicore__ void ascendc_quantize_f32_to_q4_0( + GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm, + GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) { + // let following test cases can continue run, here just print error information. Of Cource the test case that call this operator is failed. + printf("Ascend310P not support f32->4bit quantization.\n"); + } + + extern "C" __global__ __aicore__ void ascendc_quantize_f16_to_q4_0( + GM_ADDR input_gm, GM_ADDR output_gm, GM_ADDR input_ne_gm, + GM_ADDR input_nb_gm, GM_ADDR output_ne_gm) { + // let following test cases can continue run, here just print error information. Of Cource the test case that call this operator is failed. + printf("Ascend310P not support f16->4bit quantization.\n"); + } +#else #define BUFFER_NUM 2 #define Group_Size 32 @@ -276,3 +291,5 @@ extern "C" __global__ __aicore__ void ascendc_quantize_f32_to_q4_0( op.init(input_gm, output_gm, input_ne_ub, input_nb_ub, output_ne_ub); op.calculate(); } + +#endif // #ifdef ASCEND_310P diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h index 050161393456e..f13fd4dea6f41 100644 --- a/ggml/src/ggml-common.h +++ b/ggml/src/ggml-common.h @@ -6,7 +6,20 @@ typedef uint16_t ggml_half; typedef uint32_t ggml_half2; -#define GGML_COMMON_AGGR +#define GGML_COMMON_AGGR_U +#define GGML_COMMON_AGGR_S + +#define GGML_COMMON_DECL +#elif defined(GGML_COMMON_DECL_CPP) +#include + +typedef uint16_t ggml_half; +typedef uint32_t ggml_half2; + +// std-c++ allow anonymous unions but some compiler warn on it +#define GGML_COMMON_AGGR_U data +// std-c++ do not allow it. +#define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_METAL) @@ -15,7 +28,8 @@ typedef uint32_t ggml_half2; typedef half ggml_half; typedef half2 ggml_half2; -#define GGML_COMMON_AGGR +#define GGML_COMMON_AGGR_U +#define GGML_COMMON_AGGR_S #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_CUDA) @@ -29,7 +43,8 @@ typedef half2 ggml_half2; typedef half ggml_half; typedef half2 ggml_half2; -#define GGML_COMMON_AGGR data +#define GGML_COMMON_AGGR_U +#define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_HIP) @@ -39,7 +54,8 @@ typedef half2 ggml_half2; typedef half ggml_half; typedef half2 ggml_half2; -#define GGML_COMMON_AGGR data +#define GGML_COMMON_AGGR_U +#define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #elif defined(GGML_COMMON_DECL_SYCL) @@ -49,7 +65,8 @@ typedef half2 ggml_half2; typedef sycl::half ggml_half; typedef sycl::half2 ggml_half2; -#define GGML_COMMON_AGGR data +#define GGML_COMMON_AGGR_U +#define GGML_COMMON_AGGR_S data #define GGML_COMMON_DECL #endif @@ -154,9 +171,9 @@ typedef struct { struct { ggml_half d; // delta ggml_half m; // min - } GGML_COMMON_AGGR; + } GGML_COMMON_AGGR_S; ggml_half2 dm; - }; + } GGML_COMMON_AGGR_U; uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding"); @@ -175,9 +192,9 @@ typedef struct { struct { ggml_half d; // delta ggml_half m; // min - } GGML_COMMON_AGGR; + } GGML_COMMON_AGGR_S; ggml_half2 dm; - }; + } GGML_COMMON_AGGR_U; uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; @@ -196,37 +213,13 @@ typedef struct { struct { ggml_half d; // delta ggml_half s; // d * sum(qs[i]) - } GGML_COMMON_AGGR; + } GGML_COMMON_AGGR_S; ggml_half2 ds; - }; + } GGML_COMMON_AGGR_U; int8_t qs[QK8_1]; // quants } block_q8_1; static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding"); -typedef struct { - ggml_half d[4]; // deltas for 4 q4_0 blocks - uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks -} block_q4_0x4; -static_assert(sizeof(block_q4_0x4) == 4 * sizeof(ggml_half) + QK4_0 * 2, "wrong q4_0x4 block size/padding"); - -typedef struct { - ggml_half d[8]; // deltas for 8 q4_0 blocks - uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks -} block_q4_0x8; -static_assert(sizeof(block_q4_0x8) == 8 * sizeof(ggml_half) + QK4_0 * 4, "wrong q4_0x8 block size/padding"); - -typedef struct { - ggml_half d[4]; // deltas for 4 q8_0 blocks - int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks -} block_q8_0x4; -static_assert(sizeof(block_q8_0x4) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong q8_0x4 block size/padding"); - -typedef struct { - ggml_half d[8]; // deltas for 8 q8_0 blocks - int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks -} block_q8_0x8; -static_assert(sizeof(block_q8_0x8) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong q8_0x8 block size/padding"); - // // Ternary quantization // @@ -261,9 +254,9 @@ typedef struct { struct { ggml_half d; // super-block scale for quantized scales ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; + } GGML_COMMON_AGGR_S; ggml_half2 dm; - }; + } GGML_COMMON_AGGR_U; } block_q2_K; static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); @@ -288,9 +281,9 @@ typedef struct { struct { ggml_half d; // super-block scale for quantized scales ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; + } GGML_COMMON_AGGR_S; ggml_half2 dm; - }; + } GGML_COMMON_AGGR_U; uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; @@ -305,9 +298,9 @@ typedef struct { struct { ggml_half d; // super-block scale for quantized scales ggml_half dmin; // super-block scale for quantized mins - } GGML_COMMON_AGGR; + } GGML_COMMON_AGGR_S; ggml_half2 dm; - }; + } GGML_COMMON_AGGR_U; uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits @@ -431,6 +424,13 @@ static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_ #define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { #define GGML_TABLE_END() }; +#define GGML_COMMON_IMPL +#elif defined(GGML_COMMON_IMPL_CPP) +#include + +#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = { +#define GGML_TABLE_END() }; + #define GGML_COMMON_IMPL #elif defined(GGML_COMMON_IMPL_METAL) #include @@ -473,7 +473,7 @@ GGML_TABLE_BEGIN(uint8_t, ksigns_iq2xs, 128) 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255, GGML_TABLE_END() -//#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics +//#if __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A // lowest compute capability for integer intrinsics GGML_TABLE_BEGIN(uint64_t, ksigns64, 128) 0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff, 0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff, diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index cef41a0743cef..683b90aff8b55 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -1,261 +1,336 @@ -add_library(ggml-cpu - ggml-cpu.c - ggml-cpu.cpp - ggml-cpu-aarch64.c - ggml-cpu-aarch64.h - ggml-cpu-quants.c - ggml-cpu-quants.h - ) +function(ggml_add_cpu_backend_variant_impl tag_name) + if (tag_name) + set(GGML_CPU_NAME ggml-cpu-${tag_name}) + else() + set(GGML_CPU_NAME ggml-cpu) + endif() -target_link_libraries(ggml-cpu PRIVATE ggml-base) -target_include_directories(ggml-cpu PRIVATE . ..) + ggml_add_backend_library(${GGML_CPU_NAME}) -if (APPLE AND GGML_ACCELERATE) - find_library(ACCELERATE_FRAMEWORK Accelerate) - if (ACCELERATE_FRAMEWORK) - message(STATUS "Accelerate framework found") + list (APPEND GGML_CPU_SOURCES + ggml-cpu/ggml-cpu.c + ggml-cpu/ggml-cpu.cpp + ggml-cpu/ggml-cpu-aarch64.cpp + ggml-cpu/ggml-cpu-aarch64.h + ggml-cpu/ggml-cpu-hbm.cpp + ggml-cpu/ggml-cpu-hbm.h + ggml-cpu/ggml-cpu-quants.c + ggml-cpu/ggml-cpu-quants.h + ggml-cpu/ggml-cpu-traits.cpp + ggml-cpu/ggml-cpu-traits.h + ggml-cpu/amx/amx.cpp + ggml-cpu/amx/amx.h + ggml-cpu/amx/mmq.cpp + ggml-cpu/amx/mmq.h + ggml-cpu/ggml-cpu-impl.h + ) - add_compile_definitions(GGML_USE_ACCELERATE) - add_compile_definitions(ACCELERATE_NEW_LAPACK) - add_compile_definitions(ACCELERATE_LAPACK_ILP64) + target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17) + target_include_directories(${GGML_CPU_NAME} PRIVATE . ggml-cpu) - target_link_libraries(ggml-cpu PRIVATE ${ACCELERATE_FRAMEWORK}) - else() - message(WARNING "Accelerate framework not found") - endif() -endif() + if (APPLE AND GGML_ACCELERATE) + find_library(ACCELERATE_FRAMEWORK Accelerate) + if (ACCELERATE_FRAMEWORK) + message(STATUS "Accelerate framework found") -if (GGML_OPENMP) - find_package(OpenMP) - if (OpenMP_FOUND) - message(STATUS "OpenMP found") + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_ACCELERATE) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_NEW_LAPACK) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_LAPACK_ILP64) - add_compile_definitions(GGML_USE_OPENMP) + target_link_libraries(${GGML_CPU_NAME} PRIVATE ${ACCELERATE_FRAMEWORK}) + else() + message(WARNING "Accelerate framework not found") + endif() + endif() - target_link_libraries(ggml-cpu PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) + if (GGML_OPENMP) + find_package(OpenMP) + if (OpenMP_FOUND) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_OPENMP) - # FIXME: should be replaced with a compiler id check - #if (GGML_MUSA) - # list(APPEND GGML_CPU_EXTRA_INCLUDES "/usr/lib/llvm-14/lib/clang/14.0.0/include") - # list(APPEND GGML_CPU_EXTRA_LIBS_PRIVATE "/usr/lib/llvm-14/lib/libomp.so") - #endif() - else() - message(WARNING "OpenMP not found") + target_link_libraries(${GGML_CPU_NAME} PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) + else() + message(WARNING "OpenMP not found") + endif() endif() -endif() -if (GGML_LLAMAFILE) - message(STATUS "Using llamafile") + if (GGML_LLAMAFILE) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_LLAMAFILE) - add_compile_definitions(GGML_USE_LLAMAFILE) + list(APPEND GGML_CPU_SOURCES + ggml-cpu/llamafile/sgemm.cpp + ggml-cpu/llamafile/sgemm.h) + endif() - target_sources(ggml-cpu PRIVATE - llamafile/sgemm.cpp - llamafile/sgemm.h) -endif() + if (GGML_CPU_HBM) + find_library(memkind memkind REQUIRED) -if (GGML_CPU_HBM) - find_library(memkind memkind REQUIRED) + message(STATUS "Using memkind for CPU HBM") - message(STATUS "Using memkind for CPU HBM") + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_HBM) - add_compile_definitions(GGML_USE_CPU_HBM) + target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind) + endif() - target_link_libraries(ggml-cpu PUBLIC memkind) -endif() + if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR + CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) -if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR - CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR - (NOT CMAKE_OSX_ARCHITECTURES AND - NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + message(STATUS "ARM detected") - message(STATUS "ARM detected") + if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") + message(FATAL_ERROR "MSVC is not supported for ARM, use clang") + else() + check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E) + if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") + list(APPEND ARCH_FLAGS -mfp16-format=ieee) + endif() - if (MSVC) - add_compile_definitions(__aarch64__) # MSVC defines _M_ARM64 instead - add_compile_definitions(__ARM_NEON) - add_compile_definitions(__ARM_FEATURE_FMA) + if (GGML_NATIVE) + # -mcpu=native does not always enable all the features in some compilers, + # so we check for them manually and enable them if available - set(CMAKE_REQUIRED_FLAGS_PREV ${CMAKE_REQUIRED_FLAGS}) - string(JOIN " " CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} "/arch:armv8.2") + execute_process( + COMMAND ${CMAKE_C_COMPILER} -mcpu=native -E -v - + INPUT_FILE "/dev/null" + OUTPUT_QUIET + ERROR_VARIABLE ARM_MCPU + RESULT_VARIABLE ARM_MCPU_RESULT + ) + if (NOT ARM_MCPU_RESULT) + string(REGEX MATCH "-mcpu=[^ ']+" ARM_MCPU_FLAG "${ARM_MCPU}") + endif() + if ("${ARM_MCPU_FLAG}" STREQUAL "") + set(ARM_MCPU_FLAG -mcpu=native) + message(STATUS "ARM -mcpu not found, -mcpu=native will be used") + endif() - check_cxx_source_compiles("#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_DOTPROD) - if (GGML_COMPILER_SUPPORT_DOTPROD) - add_compile_definitions(__ARM_FEATURE_DOTPROD) - endif () + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) + include(CheckCXXSourceRuns) - check_cxx_source_compiles("#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vmlaq_f32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_MATMUL_INT8) + set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+dotprod") + check_cxx_source_runs( + "#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }" + GGML_COMPILER_SUPPORT_DOTPROD) + if (GGML_COMPILER_SUPPORT_DOTPROD) + set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+dotprod") + endif() - if (GGML_COMPILER_SUPPORT_MATMUL_INT8) - add_compile_definitions(__ARM_FEATURE_MATMUL_INT8) - endif () + set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+i8mm") + check_cxx_source_runs( + "#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }" + GGML_COMPILER_SUPPORT_I8MM) + if (GGML_COMPILER_SUPPORT_I8MM) + set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+i8mm") + endif() - check_cxx_source_compiles("#include \nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) - if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) - add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) - endif () + set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) + list(APPEND ARCH_FLAGS "${ARM_MCPU_FLAG}${ARM_MCPU_FLAG_FIX}") - set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_PREV}) - else() - check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E) - if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") - list(APPEND ARCH_FLAGS -mfp16-format=ieee) - endif() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6") - # Raspberry Pi 1, Zero - list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access) - endif() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7") - if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Android") - # Android armeabi-v7a - list(APPEND ARCH_FLAGS -mfpu=neon-vfpv4 -mno-unaligned-access -funsafe-math-optimizations) else() - # Raspberry Pi 2 - list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations) + if (GGML_CPU_ARM_ARCH) + list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) + endif() + endif() + + # show enabled features + execute_process( + COMMAND ${CMAKE_C_COMPILER} ${ARCH_FLAGS} -dM -E - + INPUT_FILE "/dev/null" + OUTPUT_VARIABLE ARM_FEATURE + RESULT_VARIABLE ARM_FEATURE_RESULT + ) + if (ARM_FEATURE_RESULT) + message(FATAL_ERROR "Failed to get ARM features") + else() + foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC) + string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos) + if (NOT ${feature_pos} EQUAL -1) + message(STATUS "ARM feature ${feature} enabled") + endif() + endforeach() endif() endif() - if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8") - # Android arm64-v8a - # Raspberry Pi 3, 4, Zero 2 (32-bit) - list(APPEND ARCH_FLAGS -mno-unaligned-access) - endif() - if (GGML_SVE) - list(APPEND ARCH_FLAGS -march=armv8.6-a+sve) - endif() - endif() -elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR - (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND - CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64)$")) - message(STATUS "x86 detected") - if (MSVC) - # instruction set detection for MSVC only - if (GGML_NATIVE) - # TODO: improve, should not reference files from the parent folder - include(cmake/FindSIMD.cmake) - endif () - if (GGML_AVX512) - list(APPEND ARCH_FLAGS /arch:AVX512) - # MSVC has no compile-time flags enabling specific - # AVX512 extensions, neither it defines the - # macros corresponding to the extensions. - # Do it manually. - if (GGML_AVX512_VBMI) - add_compile_definitions($<$:__AVX512VBMI__>) - add_compile_definitions($<$:__AVX512VBMI__>) - if (CMAKE_C_COMPILER_ID STREQUAL "Clang") - list(APPEND ARCH_FLAGS -mavx512vbmi) + elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) + + message(STATUS "x86 detected") + + if (MSVC) + # instruction set detection for MSVC only + if (GGML_NATIVE) + include(ggml-cpu/cmake/FindSIMD.cmake) + endif () + if (GGML_AVX512) + list(APPEND ARCH_FLAGS /arch:AVX512) + # /arch:AVX512 includes: __AVX512F__, __AVX512CD__, __AVX512BW__, __AVX512DQ__, and __AVX512VL__ + # MSVC has no compile-time flags enabling specific + # AVX512 extensions, neither it defines the + # macros corresponding to the extensions. + # Do it manually. + list(APPEND ARCH_DEFINITIONS GGML_AVX512) + if (GGML_AVX512_VBMI) + list(APPEND ARCH_DEFINITIONS __AVX512VBMI__) + if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + list(APPEND ARCH_FLAGS -mavx512vbmi) + endif() + endif() + if (GGML_AVX512_VNNI) + list(APPEND ARCH_DEFINITIONS __AVX512VNNI__ GGML_AVX512_VNNI) + if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + list(APPEND ARCH_FLAGS -mavx512vnni) + endif() + endif() + if (GGML_AVX512_BF16) + list(APPEND ARCH_DEFINITIONS __AVX512BF16__ GGML_AVX512_BF16) + if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + list(APPEND ARCH_FLAGS -mavx512bf16) + endif() + endif() + if (GGML_AMX_TILE) + list(APPEND ARCH_DEFINITIONS __AMX_TILE__ GGML_AMX_TILE) endif() + if (GGML_AMX_INT8) + list(APPEND ARCH_DEFINITIONS __AMX_INT8__ GGML_AMX_INT8) + endif() + if (GGML_AMX_BF16) + list(APPEND ARCH_DEFINITIONS __AMX_BF16__ GGML_AMX_BF16) + endif() + elseif (GGML_AVX2) + list(APPEND ARCH_FLAGS /arch:AVX2) + list(APPEND ARCH_DEFINITIONS GGML_AVX2 GGML_FMA GGML_F16C) + elseif (GGML_AVX) + list(APPEND ARCH_FLAGS /arch:AVX) + list(APPEND ARCH_DEFINITIONS GGML_AVX) + else () + list(APPEND ARCH_FLAGS /arch:SSE4.2) + list(APPEND ARCH_DEFINITIONS GGML_SSE42) + endif() + if (GGML_AVX_VNNI) + # MSVC generates AVX512 with AVX-VNNI intrinsics even with /arch:AVX2 + #list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) endif() - if (GGML_AVX512_VNNI) - add_compile_definitions($<$:__AVX512VNNI__>) - add_compile_definitions($<$:__AVX512VNNI__>) - if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + else () + if (GGML_NATIVE) + list(APPEND ARCH_FLAGS -march=native) + else () + list(APPEND ARCH_FLAGS -msse4.2) + list(APPEND ARCH_DEFINITIONS GGML_SSE42) + if (GGML_F16C) + list(APPEND ARCH_FLAGS -mf16c) + list(APPEND ARCH_DEFINITIONS GGML_F16C) + endif() + if (GGML_FMA) + list(APPEND ARCH_FLAGS -mfma) + list(APPEND ARCH_DEFINITIONS GGML_FMA) + endif() + if (GGML_AVX) + list(APPEND ARCH_FLAGS -mavx) + list(APPEND ARCH_DEFINITIONS GGML_AVX) + endif() + if (GGML_AVX2) + list(APPEND ARCH_FLAGS -mavx2) + list(APPEND ARCH_DEFINITIONS GGML_AVX2) + endif() + if (GGML_AVX_VNNI) + list(APPEND ARCH_FLAGS -mavxvnni) + list(APPEND ARCH_DEFINITIONS GGML_AVX_VNNI) + endif() + if (GGML_AVX512) + list(APPEND ARCH_FLAGS -mavx512f) + list(APPEND ARCH_FLAGS -mavx512cd) + list(APPEND ARCH_FLAGS -mavx512vl) + list(APPEND ARCH_FLAGS -mavx512dq) + list(APPEND ARCH_FLAGS -mavx512bw) + list(APPEND ARCH_DEFINITIONS GGML_AVX512) + endif() + if (GGML_AVX512_VBMI) + list(APPEND ARCH_FLAGS -mavx512vbmi) + list(APPEND ARCH_DEFINITIONS GGML_AVX512_VBMI) + endif() + if (GGML_AVX512_VNNI) list(APPEND ARCH_FLAGS -mavx512vnni) + list(APPEND ARCH_DEFINITIONS GGML_AVX512_VNNI) endif() - endif() - if (GGML_AVX512_BF16) - add_compile_definitions($<$:__AVX512BF16__>) - add_compile_definitions($<$:__AVX512BF16__>) - if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + if (GGML_AVX512_BF16) list(APPEND ARCH_FLAGS -mavx512bf16) + list(APPEND ARCH_DEFINITIONS GGML_AVX512_BF16) + endif() + if (GGML_AMX_TILE) + list(APPEND ARCH_FLAGS -mamx-tile) + list(APPEND ARCH_DEFINITIONS GGML_AMX_TILE) + endif() + if (GGML_AMX_INT8) + list(APPEND ARCH_FLAGS -mamx-int8) + list(APPEND ARCH_DEFINITIONS GGML_AMX_INT8) + endif() + if (GGML_AMX_BF16) + list(APPEND ARCH_FLAGS -mamx-bf16) + list(APPEND ARCH_DEFINITIONS GGML_AMX_BF16) endif() endif() - if (GGML_AMX_TILE) - add_compile_definitions($<$:__AMX_TILE__>) - add_compile_definitions($<$:__AMX_TILE__>) - endif() - if (GGML_AMX_INT8) - add_compile_definitions($<$:__AMX_INT8__>) - add_compile_definitions($<$:__AMX_INT8__>) - endif() - if (GGML_AMX_BF16) - add_compile_definitions($<$:__AMX_BF16__>) - add_compile_definitions($<$:__AMX_BF16__>) - endif() - elseif (GGML_AVX2) - list(APPEND ARCH_FLAGS /arch:AVX2) - elseif (GGML_AVX) - list(APPEND ARCH_FLAGS /arch:AVX) - endif() - else() - if (GGML_NATIVE) - list(APPEND ARCH_FLAGS -march=native) - endif() - if (GGML_F16C) - list(APPEND ARCH_FLAGS -mf16c) - endif() - if (GGML_FMA) - list(APPEND ARCH_FLAGS -mfma) - endif() - if (GGML_AVX) - list(APPEND ARCH_FLAGS -mavx) - endif() - if (GGML_AVX2) - list(APPEND ARCH_FLAGS -mavx2) - endif() - if (GGML_AVX512) - list(APPEND ARCH_FLAGS -mavx512f) - list(APPEND ARCH_FLAGS -mavx512dq) - list(APPEND ARCH_FLAGS -mavx512bw) - endif() - if (GGML_AVX512_VBMI) - list(APPEND ARCH_FLAGS -mavx512vbmi) endif() - if (GGML_AVX512_VNNI) - list(APPEND ARCH_FLAGS -mavx512vnni) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") + message(STATUS "PowerPC detected") + execute_process(COMMAND bash -c "grep POWER10 /proc/cpuinfo | head -n 1" OUTPUT_VARIABLE POWER10_M) + string(FIND "${POWER10_M}" "POWER10" substring_index) + if (NOT DEFINED substring_index OR "${substring_index}" STREQUAL "") + set(substring_index -1) endif() - if (GGML_AVX512_BF16) - list(APPEND ARCH_FLAGS -mavx512bf16) + + if (${substring_index} GREATER_EQUAL 0) + list(APPEND ARCH_FLAGS -mcpu=power10) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") + list(APPEND ARCH_FLAGS -mcpu=powerpc64le) + else() + list(APPEND ARCH_FLAGS -mcpu=native -mtune=native) + # TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) endif() - if (GGML_AMX_TILE) - list(APPEND ARCH_FLAGS -mamx-tile) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + message(STATUS "loongarch64 detected") + + list(APPEND ARCH_FLAGS -march=loongarch64) + if (GGML_LASX) + list(APPEND ARCH_FLAGS -mlasx) endif() - if (GGML_AMX_INT8) - list(APPEND ARCH_FLAGS -mamx-int8) + if (GGML_LSX) + list(APPEND ARCH_FLAGS -mlsx) endif() - if (GGML_AMX_BF16) - list(APPEND ARCH_FLAGS -mamx-bf16) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") + message(STATUS "RISC-V detected") + if (GGML_RVV) + list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d) endif() - endif() -elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") - message(STATUS "PowerPC detected") - execute_process(COMMAND bash -c "grep POWER10 /proc/cpuinfo | head -n 1" OUTPUT_VARIABLE POWER10_M) - string(FIND "${POWER10_M}" "POWER10" substring_index) - if (NOT DEFINED substring_index OR "${substring_index}" STREQUAL "") - set(substring_index -1) - endif() - - if (${substring_index} GREATER_EQUAL 0) - list(APPEND ARCH_FLAGS -mcpu=power10) - elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") - list(APPEND ARCH_FLAGS -mcpu=powerpc64le) else() - list(APPEND ARCH_FLAGS -mcpu=native -mtune=native) - #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) + message(STATUS "Unknown architecture") endif() -elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") - message(STATUS "loongarch64 detected") - list(APPEND ARCH_FLAGS -march=loongarch64) - if (GGML_LASX) - list(APPEND ARCH_FLAGS -mlasx) - endif() - if (GGML_LSX) - list(APPEND ARCH_FLAGS -mlsx) + if (GGML_CPU_AARCH64) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64) endif() -else() - message(STATUS "Unknown architecture") -endif() -if (GGML_CPU_AARCH64) - message(STATUS "Using runtime weight conversion of Q4_0 to Q4_0_x_x to enable optimized GEMM/GEMV kernels") - add_compile_definitions(GGML_USE_CPU_AARCH64) -endif() + message(STATUS "Adding CPU backend variant ${GGML_CPU_NAME}: ${ARCH_FLAGS} ${ARCH_DEFINITIONS}") + target_sources(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_SOURCES}) + target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS}) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS}) -target_compile_options(ggml-cpu PRIVATE "$<$:${ARCH_FLAGS}>") -target_compile_options(ggml-cpu PRIVATE "$<$:${ARCH_FLAGS}>") + if (GGML_BACKEND_DL) + # The feature detection code is compiled as a separate target so that + # it can be built without the architecture flags + # Since multiple variants of the CPU backend may be included in the same + # build, using set_source_files_properties() to set the arch flags is not possible + set(GGML_CPU_FEATS_NAME ${GGML_CPU_NAME}-feats) + add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/cpu-feats-x86.cpp) + target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARCH_DEFINITIONS}) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) + set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME}) + endif() -if (EMSCRIPTEN) - set_target_properties(ggml-cpu PROPERTIES COMPILE_FLAGS "-msimd128") -endif() + if (EMSCRIPTEN) + set_target_properties(${GGML_CPU_NAME} PROPERTIES COMPILE_FLAGS "-msimd128") + endif() +endfunction() diff --git a/ggml/src/ggml-cpu/amx/amx.cpp b/ggml/src/ggml-cpu/amx/amx.cpp new file mode 100644 index 0000000000000..5ec5263ceb4ba --- /dev/null +++ b/ggml/src/ggml-cpu/amx/amx.cpp @@ -0,0 +1,220 @@ +#include "amx.h" +#include "common.h" +#include "mmq.h" +#include "ggml-backend-impl.h" +#include "ggml-backend.h" +#include "ggml-impl.h" +#include "ggml-cpu.h" +#include "ggml-cpu-traits.h" + +#if defined(__gnu_linux__) +#include +#include +#endif + +#include +#include +#include + +#if defined(__AMX_INT8__) && defined(__AVX512VNNI__) + +// AMX type_trais +namespace ggml::cpu::amx { +class tensor_traits : public ggml::cpu::tensor_traits { + bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { + size = ggml_backend_amx_desired_wsize(op); + return true; + } + + bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { + if (op->op == GGML_OP_MUL_MAT) { + ggml_backend_amx_mul_mat(params, op); + return true; + } + return false; + } +}; + +static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) { + static tensor_traits traits; + return &traits; +} +} // namespace ggml::cpu::amx + +// AMX buffer interface +static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) { + free(buffer->context); +} + +static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) { + return (void *) (buffer->context); +} + +static void ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, + uint8_t value, size_t offset, size_t size) { + memset((char *) tensor->data + offset, value, size); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, + const void * data, size_t offset, size_t size) { + if (qtype_has_amx_kernels(tensor->type)) { + GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type)); + ggml_backend_amx_convert_weight(tensor, data, offset, size); + } else { + memcpy((char *) tensor->data + offset, data, size); + } + + GGML_UNUSED(buffer); +} + +/* +// need to figure what we need to do with buffer->extra. +static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { + GGML_ASSERT(!qtype_has_amx_kernels(tensor->type)); + memcpy(data, (const char *)tensor->data + offset, size); + + GGML_UNUSED(buffer); +} + +static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { + if (ggml_backend_buffer_is_host(src->buffer)) { + if (qtype_has_amx_kernels(src->type)) { + ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst)); + } else { + memcpy(dst->data, src->data, ggml_nbytes(src)); + } + return true; + } + return false; + + GGML_UNUSED(buffer); +} +*/ + +static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + memset(buffer->context, value, buffer->size); +} + +static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = { + /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer, + /* .get_base = */ ggml_backend_amx_buffer_get_base, + /* .init_tensor = */ ggml_backend_amx_buffer_init_tensor, + /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor, + /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor, + /* .get_tensor = */ nullptr, + /* .cpy_tensor = */ nullptr, + /* .clear = */ ggml_backend_amx_buffer_clear, + /* .reset = */ nullptr, +}; + +static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "AMX"; + + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + void * data = ggml_aligned_malloc(size); + if (data == NULL) { + fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size); + return NULL; + } + + return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size); +} + +static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + return TENSOR_ALIGNMENT; + + GGML_UNUSED(buft); +} + +namespace ggml::cpu::amx { +class extra_buffer_type : ggml::cpu::extra_buffer_type { + bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { + // handle only 2d gemm for now + auto is_contiguous_2d = [](const struct ggml_tensor * t) { + return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1; + }; + + if (op->op == GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous + is_contiguous_2d(op->src[1]) && // src1 must be contiguous + op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type() && + op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x + (qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == GGML_TYPE_F16))) { + // src1 must be host buffer + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + // src1 must be float32 + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + } + return false; + } + + ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { + if (op->op == GGML_OP_MUL_MAT && op->src[0]->buffer && + op->src[0]->buffer->buft == ggml_backend_amx_buffer_type()) { + return (ggml::cpu::tensor_traits *) op->src[0]->extra; + } + + return nullptr; + } +}; +} // namespace ggml::cpu::amx + +static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { + return ggml_backend_amx_get_alloc_size(tensor); + + GGML_UNUSED(buft); +} + +#define ARCH_GET_XCOMP_PERM 0x1022 +#define ARCH_REQ_XCOMP_PERM 0x1023 +#define XFEATURE_XTILECFG 17 +#define XFEATURE_XTILEDATA 18 + +static bool ggml_amx_init() { +#if defined(__gnu_linux__) + if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) { + fprintf(stderr, "AMX is not ready to be used!\n"); + return false; + } + return true; +#elif defined(_WIN32) + return true; +#endif +} + +ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() { + static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = { + /* .iface = */ { + /* .get_name = */ ggml_backend_amx_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment, + /* .get_max_size = */ nullptr, // defaults to SIZE_MAX + /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size, + /* .is_host = */ nullptr, + }, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ new ggml::cpu::amx::extra_buffer_type(), + }; + + if (!ggml_amx_init()) { + return nullptr; + } + + return &ggml_backend_buffer_type_amx; +} + +#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__) diff --git a/ggml/src/ggml-cpu/amx/amx.h b/ggml/src/ggml-cpu/amx/amx.h new file mode 100644 index 0000000000000..5b65d76bdc89c --- /dev/null +++ b/ggml/src/ggml-cpu/amx/amx.h @@ -0,0 +1,8 @@ +#include "ggml-backend.h" +#include "ggml-cpu-impl.h" + +// GGML internal header + +#if defined(__AMX_INT8__) && defined(__AVX512VNNI__) +ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void); +#endif diff --git a/ggml/src/ggml-amx/common.h b/ggml/src/ggml-cpu/amx/common.h similarity index 67% rename from ggml/src/ggml-amx/common.h rename to ggml/src/ggml-cpu/amx/common.h index 5db8ce30d9bb4..f392e898518a7 100644 --- a/ggml/src/ggml-amx/common.h +++ b/ggml/src/ggml-cpu/amx/common.h @@ -1,14 +1,13 @@ #pragma once #include "ggml.h" -// hack until AMX is moved into the CPU backend -#include "../ggml-cpu/ggml-cpu-impl.h" // +#include "ggml-cpu-impl.h" #include #include #include -#if defined(_OPENMP) +#if defined(GGML_USE_OPENMP) #include #endif @@ -57,11 +56,11 @@ inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) { } template -inline void parallel_for(int nth, int n, const func_t& f) { -#if defined(_OPENMP) -#pragma omp parallel num_threads(nth) +inline void parallel_for(int n, const func_t& f) { +#if defined(GGML_USE_OPENMP) +#pragma omp parallel { - //int nth = omp_get_num_threads(); + int nth = omp_get_num_threads(); int ith = omp_get_thread_num(); int tbegin, tend; balance211(n, nth, ith, tbegin, tend); @@ -69,26 +68,24 @@ inline void parallel_for(int nth, int n, const func_t& f) { } #else f(0, n); - - GGML_UNUSED(nth); #endif } +template +inline void parallel_for_ggml(const ggml_compute_params * params, int n, const func_t & f) { + int tbegin, tend; + balance211(n, params->nth, params->ith, tbegin, tend); + f(tbegin, tend); +} + // quantized types that have AMX support inline bool qtype_has_amx_kernels(const enum ggml_type type) { // TODO: fix padding for vnni format return (type == GGML_TYPE_Q4_0) || - (type == GGML_TYPE_Q4_1); - //(type == GGML_TYPE_Q8_0) || - //(type == GGML_TYPE_Q4_K) || - //(type == GGML_TYPE_Q5_K) || - //(type == GGML_TYPE_Q6_K) || - //(type == GGML_TYPE_IQ4_XS); + (type == GGML_TYPE_Q4_1) || + (type == GGML_TYPE_Q8_0) || + (type == GGML_TYPE_Q4_K) || + (type == GGML_TYPE_Q5_K) || + (type == GGML_TYPE_Q6_K) || + (type == GGML_TYPE_IQ4_XS); } - -// ggml backend context -struct ggml_backend_amx_context { - int n_threads = GGML_DEFAULT_N_THREADS; - std::unique_ptr work_data; - size_t work_size = 0; -}; diff --git a/ggml/src/ggml-amx/mmq.cpp b/ggml/src/ggml-cpu/amx/mmq.cpp similarity index 96% rename from ggml/src/ggml-amx/mmq.cpp rename to ggml/src/ggml-cpu/amx/mmq.cpp index 529bee25b5e73..0ea91596bc7e2 100644 --- a/ggml/src/ggml-amx/mmq.cpp +++ b/ggml/src/ggml-cpu/amx/mmq.cpp @@ -4,8 +4,11 @@ #pragma GCC diagnostic ignored "-Wunused-local-typedefs" #endif +#include "amx.h" #include "mmq.h" #include "ggml-impl.h" +#include "ggml-cpu-impl.h" +#include "ggml-cpu-quants.h" #include "ggml-quants.h" #include #include @@ -15,10 +18,6 @@ #include #endif -#if defined(_OPENMP) -#include -#endif - #if (defined(_WIN32) || defined(_WIN64)) #define RESTRICT __restrict #else @@ -33,7 +32,7 @@ #define ALWAYS_INLINE inline #endif -#if defined(__AMX_INT8__) +#if defined(__AMX_INT8__) && defined(__AVX512VNNI__) namespace { @@ -496,13 +495,12 @@ inline void from_float(const float * x, char * vy, int64_t k); template <> inline void from_float(const float * x, char * vy, int64_t k) { - // FIXME: using unoptimized reference impl until moved to CPU backend - quantize_row_q8_0_ref(x, (block_q8_0 *)vy, k); + quantize_row_q8_0(x, (block_q8_0 *)vy, k); } template <> inline void from_float(const float * x, char * vy, int64_t k) { - quantize_row_q8_1_ref(x, (block_q8_1 *)vy, k); + quantize_row_q8_1(x, (block_q8_1 *)vy, k); } template <> @@ -950,7 +948,7 @@ template> void unpack_B(packed_B_t * RESTRICT tile, const void * RESTRICT packed_B) { GGML_UNUSED(tile); GGML_UNUSED(packed_B); -}; +} template <> void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B) { @@ -1338,21 +1336,19 @@ struct tinygemm_kernel_avx __m512 vb[COLS]; __m512 vc[ROWS * COLS]; - auto loadc = [&](int idx) { + auto loadc = [&](auto idx) { vc[idx] = _mm512_setzero_ps(); }; Unroll{}(loadc); - auto compute = [&](int idx, int k) { - // TODO: use `constexpr` here to get rid of interger div - // when upgraded to C++17 - const int row = idx / COLS; - const int col = idx % COLS; + auto compute = [&](auto idx, auto k) { + constexpr int row = idx / COLS; + constexpr int col = idx % COLS; - if (col == 0) { + if constexpr (col == 0) { va = _mm512_loadu_ps(A + row * K + k); } - if (row == 0) { + if constexpr (row == 0) { vb[col] = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(B + col * K + k))); } vc[idx] = _mm512_fmadd_ps(va, vb[col], vc[idx]); @@ -1362,9 +1358,9 @@ struct tinygemm_kernel_avx Unroll{}(compute, k); } - auto storec = [&](int idx) { - const int row = idx / COLS; - const int col = idx % COLS; + auto storec = [&](auto idx) { + constexpr int row = idx / COLS; + constexpr int col = idx % COLS; C[row * ldc + col] = _mm512_reduce_add_ps(vc[idx]); }; Unroll{}(storec); @@ -1382,13 +1378,13 @@ struct tinygemm_kernel_avx #define PACKED_INDEX(n, k, KB, tile_size) (n * KB + k) * tile_size template -void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K, int n_threads) { +void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K) { const int NB = N / TILE_N; const int KB = K / BLOCK_K; const int TILE_SIZE = get_tile_size(); // parallel on NB should be enough - parallel_for(n_threads, NB, [&](int begin, int end) { + parallel_for(NB, [&](int begin, int end) { for (int n = begin; n < end; ++n) { for (int k = 0; k < KB; ++k) { int n0 = n * TILE_N; @@ -1427,14 +1423,14 @@ struct tinygemm_kernel_vnni{}(loadc); - auto compute = [&](int col, int i) { + auto compute = [&](auto col, auto i) { // load a and compute compensation - if (col == 0) { + if constexpr (col == 0) { const int32_t * a_ptr = reinterpret_cast(A[0 * KB + i].qs); vcomp = _mm512_setzero_si512(); for (int k = 0; k < 8; ++k) { @@ -1466,7 +1462,7 @@ struct tinygemm_kernel_vnni{}(storec); @@ -1490,14 +1486,14 @@ struct tinygemm_kernel_vnni const __m512i lowMask = _mm512_set1_epi8(0xF); - auto loadc = [&](int col) { + auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); - auto compute = [&](int col, int i) { + auto compute = [&](auto col, auto i) { // load a - if (col == 0) { + if constexpr (col == 0) { const int32_t * a_ptr = reinterpret_cast(A[0 * KB + i].qs); for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); @@ -1531,7 +1527,7 @@ struct tinygemm_kernel_vnni } //store to C - auto storec = [&](int col) { + auto storec = [&](auto col) { _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]); }; Unroll{}(storec); @@ -1562,14 +1558,14 @@ struct tinygemm_kernel_vnni(0x80)); - auto loadc = [&](int col) { + auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); - auto compute = [&](int col, int i) { + auto compute = [&](auto col, auto i) { // load a and add offset 128 - if (col == 0) { + if constexpr (col == 0) { const int32_t * a_ptr = reinterpret_cast(A[0 * KB + i].qs); for (int k = 0; k < 8; ++k) { va[k] = _mm512_set1_epi32(a_ptr[k]); @@ -1602,7 +1598,7 @@ struct tinygemm_kernel_vnni{}(storec); @@ -1634,7 +1630,7 @@ struct tinygemm_kernel_vnni{}(loadc); @@ -1648,9 +1644,9 @@ struct tinygemm_kernel_vnni{}(storec); @@ -1735,15 +1731,15 @@ struct tinygemm_kernel_vnni{}(loadc); // Q5_K and Q4_K shares the same vnni formats, refer to notes above. - auto compute = [&](int col, int i) { + auto compute = [&](auto col, auto i) { // load a - if (col == 0) { + if constexpr (col == 0) { for (int k_group = 0; k_group < QK_K / 32; ++k_group) { va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32))); } @@ -1808,7 +1804,7 @@ struct tinygemm_kernel_vnni{}(storec); @@ -1841,13 +1837,13 @@ struct tinygemm_kernel_vnni{}(loadc); - auto compute = [&](int col, int i) { - if (col == 0) { + auto compute = [&](auto col, auto i) { + if constexpr (col == 0) { // load a va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0)); va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64)); @@ -1959,13 +1955,13 @@ struct tinygemm_kernel_vnni(0x80)); const __m512i values256 = _mm512_add_epi8(values128, off); - auto loadc = [&](int col) { + auto loadc = [&](auto col) { vc[col] = _mm512_setzero_ps(); }; Unroll{}(loadc); - auto compute = [&](int col, int i) { - if (col == 0) { + auto compute = [&](auto col, auto i) { + if constexpr (col == 0) { // load a va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0)); va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64)); @@ -2015,7 +2011,7 @@ struct tinygemm_kernel_vnni{}(storec); @@ -2327,25 +2323,39 @@ size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor) { // pack weight to vnni format void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - - size_t alloc_size = ggml_backend_amx_get_alloc_size(tensor); - GGML_ASSERT(alloc_size == size); + GGML_ASSERT(offset == 0 && size == ggml_nbytes(tensor)); // only full tensor conversion is supported for now const enum ggml_type TYPE = tensor->type; const int K = tensor->ne[0]; // ne0: in_features const int N = tensor->ne[1]; // ne1: out_features -#if defined(_OPENMP) - // the buffer ctx is not initialized when .set_tensor is called - int n_threads = omp_get_num_threads(); -#else - int n_threads = 1; -#endif + GGML_DISPATCH_QTYPES(TYPE, [&] { + convert_B_packed_format((void *)((char *)tensor->data + offset), (const type *)data, N, K); + }); +} + +size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst) { + struct ggml_tensor * src0 = dst->src[0]; + + const enum ggml_type TYPE = src0->type; + + const bool is_floating_type = TYPE == GGML_TYPE_F16; + if (is_floating_type) { + return 0; + } + + const int M = dst->ne[1]; + const int K = src0->ne[0]; + + size_t desired_wsize = 0; GGML_DISPATCH_QTYPES(TYPE, [&] { - convert_B_packed_format((void *)((char *)tensor->data + offset), (const type *)data, N, K, n_threads); + const size_t row_size_A = K / blck_size * sizeof(vec_dot_type); + desired_wsize = M * row_size_A; }); + + return desired_wsize; } // NB: mixed dtype gemm with Advanced Matrix Extensions (Intel AMX) @@ -2356,14 +2366,12 @@ void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * d // // the function performs: dst = src1 @ src0.T // -void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst) { +void ggml_backend_amx_mul_mat(const ggml_compute_params * params, struct ggml_tensor * dst) { struct ggml_tensor * src0 = dst->src[0]; struct ggml_tensor * src1 = dst->src[1]; const enum ggml_type TYPE = src0->type; - const int n_threads = ctx->n_threads; - // f16 only has avx512 kernels for now, // amx kernels will be added once 6th gen xeon is released. const bool is_floating_type = TYPE == GGML_TYPE_F16; @@ -2379,7 +2387,7 @@ void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor const int MB = div_up(M, BLOCK_M); const int NB = div_up(N, BLOCK_N); - parallel_for(n_threads, MB * NB, [&](int begin, int end) { + parallel_for_ggml(params, MB * NB, [&](int begin, int end) { GGML_DISPATCH_FLOATING_TYPES(TYPE, [&] { for (int i = begin; i < end; ++i) { int mb = i / NB; @@ -2412,27 +2420,29 @@ void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor } // pointer to work space, used convert A from float to quantized type - void * wdata = nullptr; + void * wdata = params->wdata; //TODO: performance improvement: merge quant A - GGML_DISPATCH_QTYPES(TYPE, [&] { - const size_t row_size_A = K / blck_size * sizeof(vec_dot_type); - const size_t desired_wsize = M * row_size_A; - if (ctx->work_size < desired_wsize) { - ctx->work_data.reset(new char[desired_wsize]); - ctx->work_size = desired_wsize; - } - wdata = ctx->work_data.get(); + if (params->ith == 0) { + GGML_DISPATCH_QTYPES(TYPE, [&] { + const size_t row_size_A = K / blck_size * sizeof(vec_dot_type); + const size_t desired_wsize = M * row_size_A; + if (params->wsize < desired_wsize) { + GGML_ABORT("insufficient work space size"); + } - // Q4_0, Q4_1, Q8_0 handles 1 TILE_K per blck_size - // Q4_K, Q5_K, Q6_K, IQ4_XS handles 8 TILE_K per blck_size - GGML_ASSERT(TILE_K == blck_size || TILE_K * 8 == blck_size); + // Q4_0, Q4_1, Q8_0 handles 1 TILE_K per blck_size + // Q4_K, Q5_K, Q6_K, IQ4_XS handles 8 TILE_K per blck_size + GGML_ASSERT(TILE_K == blck_size || TILE_K * 8 == blck_size); - const float * A_data = static_cast(src1->data); - for (int m = 0; m < M; ++m) { - from_float(A_data + m * K, (char *)wdata + m * row_size_A, K); - } - }); + const float * A_data = static_cast(src1->data); + for (int m = 0; m < M; ++m) { + from_float(A_data + m * K, (char *)wdata + m * row_size_A, K); + } + }); + } + + ggml_barrier(params->threadpool); if (M == 1) { // MB = 1 and handle 8 tiles in each block @@ -2440,7 +2450,7 @@ void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor constexpr int BLOCK_N = TILE_N * kTilesN; const int NB = div_up(N, BLOCK_N); - parallel_for(n_threads, NB, [&](int begin, int end) { + parallel_for_ggml(params, NB, [&](int begin, int end) { GGML_DISPATCH_QTYPES(TYPE, [&] { const int KB = K / blck_size; const int TILE_SIZE = get_tile_size(); @@ -2470,7 +2480,7 @@ void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor const int MB = div_up(M, BLOCK_M); const int NB = div_up(N, BLOCK_N); - parallel_for(n_threads, MB * NB, [&](int begin, int end) { + parallel_for_ggml(params, MB * NB, [&](int begin, int end) { // init tile config for each thread ggml_tile_config_init(); @@ -2498,13 +2508,4 @@ void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor }); } -#else // if defined(__AMX_INT8__) - -void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst) { - fprintf(stderr, "GGML is not compiled with AMX support!\n"); - - GGML_UNUSED(ctx); - GGML_UNUSED(dst); -} - -#endif // if defined(__AMX_INT8__) +#endif // if defined(__AMX_INT8__) && defined(__AVX512VNNI__) diff --git a/ggml/src/ggml-amx/mmq.h b/ggml/src/ggml-cpu/amx/mmq.h similarity index 56% rename from ggml/src/ggml-amx/mmq.h rename to ggml/src/ggml-cpu/amx/mmq.h index cf09206206370..baf7684773453 100644 --- a/ggml/src/ggml-amx/mmq.h +++ b/ggml/src/ggml-cpu/amx/mmq.h @@ -1,17 +1,10 @@ #pragma once #include "common.h" -#include -#ifdef __cplusplus -extern "C" { -#endif +size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst); size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor); void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); -void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst); - -#ifdef __cplusplus -} -#endif +void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ggml/src/ggml-cpu/cpu-feats-x86.cpp b/ggml/src/ggml-cpu/cpu-feats-x86.cpp new file mode 100644 index 0000000000000..e8133d411fd14 --- /dev/null +++ b/ggml/src/ggml-cpu/cpu-feats-x86.cpp @@ -0,0 +1,323 @@ +#include "ggml-backend-impl.h" + +#if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) + +#ifdef _MSC_VER +#include +#endif + +#include +#include +#include +#include +#include + +// ref: https://cdrdv2-public.intel.com/782156/325383-sdm-vol-2abcd.pdf +struct cpuid_x86 { + bool SSE3(void) { return f_1_ecx[0]; } + bool PCLMULQDQ(void) { return f_1_ecx[1]; } + bool MONITOR(void) { return f_1_ecx[3]; } + bool SSSE3(void) { return f_1_ecx[9]; } + bool FMA(void) { return f_1_ecx[12]; } + bool CMPXCHG16B(void) { return f_1_ecx[13]; } + bool SSE41(void) { return f_1_ecx[19]; } + bool SSE42(void) { return f_1_ecx[20]; } + bool MOVBE(void) { return f_1_ecx[22]; } + bool POPCNT(void) { return f_1_ecx[23]; } + bool AES(void) { return f_1_ecx[25]; } + bool XSAVE(void) { return f_1_ecx[26]; } + bool OSXSAVE(void) { return f_1_ecx[27]; } + bool AVX(void) { return f_1_ecx[28]; } + bool F16C(void) { return f_1_ecx[29]; } + bool RDRAND(void) { return f_1_ecx[30]; } + + bool MSR(void) { return f_1_edx[5]; } + bool CX8(void) { return f_1_edx[8]; } + bool SEP(void) { return f_1_edx[11]; } + bool CMOV(void) { return f_1_edx[15]; } + bool CLFSH(void) { return f_1_edx[19]; } + bool MMX(void) { return f_1_edx[23]; } + bool FXSR(void) { return f_1_edx[24]; } + bool SSE(void) { return f_1_edx[25]; } + bool SSE2(void) { return f_1_edx[26]; } + + bool FSGSBASE(void) { return f_7_ebx[0]; } + bool BMI1(void) { return f_7_ebx[3]; } + bool HLE(void) { return is_intel && f_7_ebx[4]; } + bool AVX2(void) { return f_7_ebx[5]; } + bool BMI2(void) { return f_7_ebx[8]; } + bool ERMS(void) { return f_7_ebx[9]; } + bool INVPCID(void) { return f_7_ebx[10]; } + bool RTM(void) { return is_intel && f_7_ebx[11]; } + bool AVX512F(void) { return f_7_ebx[16]; } + bool AVX512DQ(void) { return f_7_ebx[17]; } + bool RDSEED(void) { return f_7_ebx[18]; } + bool ADX(void) { return f_7_ebx[19]; } + bool AVX512PF(void) { return f_7_ebx[26]; } + bool AVX512ER(void) { return f_7_ebx[27]; } + bool AVX512CD(void) { return f_7_ebx[28]; } + bool AVX512BW(void) { return f_7_ebx[30]; } + bool AVX512VL(void) { return f_7_ebx[31]; } + + bool SHA(void) { return f_7_ebx[29]; } + + bool PREFETCHWT1(void) { return f_7_ecx[0]; } + + bool LAHF(void) { return f_81_ecx[0]; } + bool LZCNT(void) { return is_intel && f_81_ecx[5]; } + bool ABM(void) { return is_amd && f_81_ecx[5]; } + bool SSE4a(void) { return is_amd && f_81_ecx[6]; } + bool XOP(void) { return is_amd && f_81_ecx[11]; } + bool TBM(void) { return is_amd && f_81_ecx[21]; } + + bool SYSCALL(void) { return is_intel && f_81_edx[11]; } + bool MMXEXT(void) { return is_amd && f_81_edx[22]; } + bool RDTSCP(void) { return is_intel && f_81_edx[27]; } + bool _3DNOWEXT(void) { return is_amd && f_81_edx[30]; } + bool _3DNOW(void) { return is_amd && f_81_edx[31]; } + + bool AVX512_VBMI(void) { return f_7_ecx[1]; } + bool AVX512_VNNI(void) { return f_7_ecx[11]; } + bool AVX512_FP16(void) { return f_7_edx[23]; } + bool AVX512_BF16(void) { return f_7_1_eax[5]; } + bool AVX_VNNI(void) { return f_7_1_eax[4]; } + + bool AMX_TILE(void) { return f_7_edx[24]; } + bool AMX_INT8(void) { return f_7_edx[25]; } + bool AMX_FP16(void) { return f_7_1_eax[21]; } + bool AMX_BF16(void) { return f_7_edx[22]; } + +#ifdef _MSC_VER + static void cpuid(int cpu_info[4], int eax) { + __cpuid(cpu_info, eax); + } + static void cpuidex(int cpu_info[4], int eax, int ecx) { + __cpuidex(cpu_info, eax, ecx); + } +#else + static void cpuid(int cpu_info[4], int eax) { + __asm__ __volatile__( + "cpuid" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(eax), "c"(0)); + } + static void cpuidex(int cpu_info[4], int eax, int ecx) { + __asm__ __volatile__( + "cpuid" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(eax), "c"(ecx)); + } +#endif + + cpuid_x86() { + std::array cpui; + std::vector> data; + + // calling __cpuid with 0x0 as the function_id argument + // gets the number of the highest valid function ID. + cpuid(cpui.data(), 0); + int n_ids = cpui[0]; + + for (int i = 0; i <= n_ids; ++i) { + cpuidex(cpui.data(), i, 0); + data.push_back(cpui); + } + + // capture vendor string + char vendor[0x20] = {}; + *reinterpret_cast(vendor) = data[0][1]; + *reinterpret_cast(vendor + 4) = data[0][3]; + *reinterpret_cast(vendor + 8) = data[0][2]; + this->vendor = vendor; + if (this->vendor == "GenuineIntel") { + is_intel = true; + } else if (this->vendor == "AuthenticAMD") { + is_amd = true; + } + + // load bitset with flags for function 0x00000001 + if (n_ids >= 1) { + f_1_ecx = data[1][2]; + f_1_edx = data[1][3]; + } + + // load bitset with flags for function 0x00000007 + if (n_ids >= 7) { + f_7_ebx = data[7][1]; + f_7_ecx = data[7][2]; + f_7_edx = data[7][3]; + cpuidex(cpui.data(), 7, 1); + f_7_1_eax = cpui[0]; + } + + // calling __cpuid with 0x80000000 as the function_id argument + // gets the number of the highest valid extended ID. + cpuid(cpui.data(), 0x80000000); + unsigned int n_ex_ids = cpui[0]; + + std::vector> ext_data; + for (unsigned int i = 0x80000000; i <= n_ex_ids; ++i) { + cpuidex(cpui.data(), i, 0); + ext_data.push_back(cpui); + } + + // load bitset with flags for function 0x80000001 + if (n_ex_ids >= 0x80000001) { + f_81_ecx = ext_data[1][2]; + f_81_edx = ext_data[1][3]; + } + + // interpret CPU brand string if reported + char brand[0x40] = {}; + if (n_ex_ids >= 0x80000004) { + std::memcpy(brand, ext_data[2].data(), sizeof(cpui)); + std::memcpy(brand + 16, ext_data[3].data(), sizeof(cpui)); + std::memcpy(brand + 32, ext_data[4].data(), sizeof(cpui)); + this->brand = brand; + } + } + + bool is_intel = false; + bool is_amd = false; + std::string vendor; + std::string brand; + std::bitset<32> f_1_ecx; + std::bitset<32> f_1_edx; + std::bitset<32> f_7_ebx; + std::bitset<32> f_7_ecx; + std::bitset<32> f_7_edx; + std::bitset<32> f_7_1_eax; + std::bitset<32> f_81_ecx; + std::bitset<32> f_81_edx; +}; + +#if 0 +void test_x86_is() { + cpuid_x86 is; + printf("CPU Vendor: %s\n", is.vendor.c_str()); + printf("Brand: %s\n", is.brand.c_str()); + printf("is_intel: %d\n", is.is_intel); + printf("is_amd: %d\n", is.is_amd); + printf("sse3: %d\n", is.SSE3()); + printf("pclmulqdq: %d\n", is.PCLMULQDQ()); + printf("ssse3: %d\n", is.SSSE3()); + printf("fma: %d\n", is.FMA()); + printf("cmpxchg16b: %d\n", is.CMPXCHG16B()); + printf("sse41: %d\n", is.SSE41()); + printf("sse42: %d\n", is.SSE42()); + printf("movbe: %d\n", is.MOVBE()); + printf("popcnt: %d\n", is.POPCNT()); + printf("aes: %d\n", is.AES()); + printf("xsave: %d\n", is.XSAVE()); + printf("osxsave: %d\n", is.OSXSAVE()); + printf("avx: %d\n", is.AVX()); + printf("f16c: %d\n", is.F16C()); + printf("rdrand: %d\n", is.RDRAND()); + printf("msr: %d\n", is.MSR()); + printf("cx8: %d\n", is.CX8()); + printf("sep: %d\n", is.SEP()); + printf("cmov: %d\n", is.CMOV()); + printf("clflush: %d\n", is.CLFSH()); + printf("mmx: %d\n", is.MMX()); + printf("fxsr: %d\n", is.FXSR()); + printf("sse: %d\n", is.SSE()); + printf("sse2: %d\n", is.SSE2()); + printf("fsgsbase: %d\n", is.FSGSBASE()); + printf("bmi1: %d\n", is.BMI1()); + printf("hle: %d\n", is.HLE()); + printf("avx2: %d\n", is.AVX2()); + printf("bmi2: %d\n", is.BMI2()); + printf("erms: %d\n", is.ERMS()); + printf("invpcid: %d\n", is.INVPCID()); + printf("rtm: %d\n", is.RTM()); + printf("avx512f: %d\n", is.AVX512F()); + printf("rdseed: %d\n", is.RDSEED()); + printf("adx: %d\n", is.ADX()); + printf("avx512pf: %d\n", is.AVX512PF()); + printf("avx512er: %d\n", is.AVX512ER()); + printf("avx512cd: %d\n", is.AVX512CD()); + printf("sha: %d\n", is.SHA()); + printf("prefetchwt1: %d\n", is.PREFETCHWT1()); + printf("lahf: %d\n", is.LAHF()); + printf("lzcnt: %d\n", is.LZCNT()); + printf("abm: %d\n", is.ABM()); + printf("sse4a: %d\n", is.SSE4a()); + printf("xop: %d\n", is.XOP()); + printf("tbm: %d\n", is.TBM()); + printf("syscall: %d\n", is.SYSCALL()); + printf("mmxext: %d\n", is.MMXEXT()); + printf("rdtscp: %d\n", is.RDTSCP()); + printf("3dnowext: %d\n", is._3DNOWEXT()); + printf("3dnow: %d\n", is._3DNOW()); + printf("avx512_vbmi: %d\n", is.AVX512_VBMI()); + printf("avx512_vnni: %d\n", is.AVX512_VNNI()); + printf("avx512_fp16: %d\n", is.AVX512_FP16()); + printf("avx512_bf16: %d\n", is.AVX512_BF16()); + printf("amx_tile: %d\n", is.AMX_TILE()); + printf("amx_int8: %d\n", is.AMX_INT8()); + printf("amx_fp16: %d\n", is.AMX_FP16()); + printf("amx_bf16: %d\n", is.AMX_BF16()); +} +#endif + +static int ggml_backend_cpu_x86_score() { + // FIXME: this does not check for OS support + + int score = 0; + cpuid_x86 is; + +#ifdef GGML_FMA + if (!is.FMA()) { return 0; } + score += 1; +#endif +#ifdef GGML_F16C + if (!is.F16C()) { return 0; } + score += 1<<1; +#endif +#ifdef GGML_SSE42 + if (!is.SSE42()) { return 0; } + score += 1<<2; +#endif +#ifdef GGML_AVX + if (!is.AVX()) { return 0; } + score += 1<<4; +#endif +#ifdef GGML_AVX2 + if (!is.AVX2()) { return 0; } + score += 1<<5; +#endif +#ifdef GGML_AVX_VNNI + if (!is.AVX_VNNI()) { return 0; } + score += 1<<6; +#endif +#ifdef GGML_AVX512 + if (!is.AVX512F()) { return 0; } + if (!is.AVX512CD()) { return 0; } + if (!is.AVX512VL()) { return 0; } + if (!is.AVX512DQ()) { return 0; } + if (!is.AVX512BW()) { return 0; } + score += 1<<7; +#endif +#ifdef GGML_AVX512_VBMI + if (!is.AVX512_VBMI()) { return 0; } + score += 1<<8; +#endif +#ifdef GGML_AVX512_BF16 + if (!is.AVX512_BF16()) { return 0; } + score += 1<<9; +#endif +#ifdef GGML_AVX512_VNNI + if (!is.AVX512_VNNI()) { return 0; } + score += 1<<10; +#endif +#ifdef GGML_AMX_INT8 + if (!is.AMX_INT8()) { return 0; } + score += 1<<11; +#endif + + return score; +} + +GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_x86_score) + +#endif // defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.c b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp similarity index 80% rename from ggml/src/ggml-cpu/ggml-cpu-aarch64.c rename to ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index 96a16dfba1f65..a51d1a6c59517 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.c +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -1,24 +1,57 @@ -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates -// SPDX-License-Identifier: MIT -// - -#define GGML_COMMON_IMPL_C +#define GGML_COMMON_IMPL_CPP +#define GGML_COMMON_DECL_CPP #include "ggml-common.h" +#include "ggml-backend-impl.h" #include "ggml-quants.h" #include "ggml-impl.h" #include "ggml-cpu.h" -#include "ggml-cpu/ggml-cpu-impl.h" +#include "ggml-cpu-impl.h" +#include "ggml-cpu-traits.h" -#include -#include -#include -#include -#include // for qsort -#include // for GGML_ASSERT +#include +#include +#include +#include +#include // for qsort +#include // for GGML_ASSERT #include "ggml-cpu-aarch64.h" +// TODO: move to include file? +template constexpr int QK_0() { + if constexpr (K == 4) { + return QK4_0; + } + if constexpr (K == 8) { + return QK8_0; + } + return -1; +} + +template struct block { + ggml_half d[N]; // deltas for N qK_0 blocks + int8_t qs[(QK_0() * N * K) / 8]; // quants for N qK_0 blocks +}; + +// control size +static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding"); +static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding"); +static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding"); +static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding"); + +using block_q4_0x4 = block<4, 4>; +using block_q4_0x8 = block<4, 8>; +using block_q8_0x4 = block<8, 4>; +using block_q8_0x8 = block<8, 8>; + +struct block_iq4_nlx4 { + ggml_half d[4]; // deltas for 4 iq4_nl blocks + uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks +}; + +static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding"); + #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Woverlength-strings" #elif defined(_MSC_VER) @@ -132,7 +165,7 @@ static inline __m512i sum_i16_pairs_int_32x16(const __m512i x) { } static inline __m512i mul_sum_us8_pairs_int32x16(const __m512i ax, const __m512i sy) { -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) const __m512i zero = _mm512_setzero_si512(); return _mm512_dpbusd_epi32(zero, ax, sy); #else @@ -187,12 +220,14 @@ static inline __m256i mul_sum_i8_pairs_int32x8(const __m256i x, const __m256i y) } #endif -static void quantize_q8_0_4x4(const float * restrict x, void * restrict vy, int64_t k) { +static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113}; + +static void quantize_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; - block_q8_0x4 * restrict y = (block_q8_0x4 *) vy; + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; #if defined(__ARM_NEON) float32x4_t srcv[4][8]; @@ -281,12 +316,12 @@ static void quantize_q8_0_4x4(const float * restrict x, void * restrict vy, int6 #endif } -static void quantize_q8_0_4x8(const float * restrict x, void * restrict vy, int64_t k) { +static void quantize_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) { assert(QK8_0 == 32); assert(k % QK8_0 == 0); const int nb = k / QK8_0; - block_q8_0x4 * restrict y = (block_q8_0x4 *) vy; + block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy; #if defined(__ARM_NEON) float32x4_t srcv[4][8]; @@ -496,7 +531,7 @@ static void quantize_q8_0_4x8(const float * restrict x, void * restrict vy, int6 #endif } -void quantize_mat_q8_0(const float * restrict x, void * restrict vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) { +static void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) { assert(nrow == 4); UNUSED(nrow); if (blck_size_interleave == 4) { @@ -508,7 +543,7 @@ void quantize_mat_q8_0(const float * restrict x, void * restrict vy, int64_t nro } } -void ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) { +static void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -527,67 +562,47 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * UNUSED(ncols_interleaved); UNUSED(blocklen); -#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - if (ggml_cpu_has_neon()) { - const void * b_ptr = vx; - const void * a_ptr = vy; - float * res_ptr = s; - - __asm__ __volatile__( - "movi v31.16b, #0x4\n" - "movi v30.16b, #0xf0\n" - "add %x[b_ptr], %x[b_ptr], #0x8\n" - "1:" // Column loop - "add x22, %x[a_ptr], #0x2\n" - "movi v29.16b, #0x0\n" - "mov x21, %x[nb]\n" - "2:" // Block loop - "ldr q28, [%x[b_ptr], #0x0]\n" - "ldr q27, [x22, #0x0]\n" - "movi v26.4s, #0x0\n" - "sub x20, x22, #0x2\n" - "ldr q25, [x22, #0x10]\n" - "ldr q24, [%x[b_ptr], #0x10]\n" - "sub x21, x21, #0x1\n" - "add x22, x22, #0x22\n" - "ldr q23, [%x[b_ptr], #0x20]\n" - "ldr q22, [%x[b_ptr], #0x30]\n" - "ld1r { v21.8h }, [x20]\n" - "ldr q20, [%x[b_ptr], #-0x8]\n" - "sshl v16.16b, v28.16b, v31.16b\n" - "and v28.16b, v28.16b, v30.16b\n" - "sshl v19.16b, v24.16b, v31.16b\n" - "and v24.16b, v24.16b, v30.16b\n" - "add %x[b_ptr], %x[b_ptr], #0x48\n" - "sshl v18.16b, v23.16b, v31.16b\n" - "and v23.16b, v23.16b, v30.16b\n" - ".inst 0x4f9be21a // sdot v26.4s, v16.16b, v27.4b[0]\n" - "sshl v17.16b, v22.16b, v31.16b\n" - "and v22.16b, v22.16b, v30.16b\n" - "fcvtl v21.4s, v21.4h\n" - "fcvtl v16.4s, v20.4h\n" - ".inst 0x4f99e39a // sdot v26.4s, v28.16b, v25.4b[0]\n" - "fmul v16.4s, v16.4s, v21.4s\n" - ".inst 0x4fbbe27a // sdot v26.4s, v19.16b, v27.4b[1]\n" - ".inst 0x4fb9e31a // sdot v26.4s, v24.16b, v25.4b[1]\n" - ".inst 0x4f9bea5a // sdot v26.4s, v18.16b, v27.4b[2]\n" - ".inst 0x4f99eafa // sdot v26.4s, v23.16b, v25.4b[2]\n" - ".inst 0x4fbbea3a // sdot v26.4s, v17.16b, v27.4b[3]\n" - ".inst 0x4fb9eada // sdot v26.4s, v22.16b, v25.4b[3]\n" - "scvtf v26.4s, v26.4s, #0x4\n" - "fmla v29.4s, v26.4s, v16.4s\n" - "cbnz x21, 2b\n" - "sub %x[nc], %x[nc], #0x4\n" - "str q29, [%x[res_ptr], #0x0]\n" - "add %x[res_ptr], %x[res_ptr], #0x10\n" - "cbnz %x[nc], 1b\n" - : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc) - : [a_ptr] "r" (a_ptr), [nb] "r" (nb) - : "memory", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x20", "x21", "x22" - ); +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const block_q4_0x4 * b_ptr = (const block_q4_0x4 *)vx; + + for (int c = 0; c < nc; c += ncols_interleaved) { + const block_q8_0 * a_ptr = (const block_q8_0 *)vy; + float32x4_t acc = vdupq_n_f32(0); + for (int b = 0; b < nb; b++) { + int8x16_t b0 = vld1q_s8((const int8_t *)b_ptr->qs); + int8x16_t b1 = vld1q_s8((const int8_t *)b_ptr->qs + 16); + int8x16_t b2 = vld1q_s8((const int8_t *)b_ptr->qs + 32); + int8x16_t b3 = vld1q_s8((const int8_t *)b_ptr->qs + 48); + float16x4_t bd = vld1_f16((const __fp16 *)b_ptr->d); + + int8x16_t a0 = vld1q_s8(a_ptr->qs); + int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2); + float16x4_t ad = vld1_dup_f16((const __fp16 *)&a_ptr->d); + + int32x4_t ret = vdupq_n_s32(0); + + ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0); + ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1); + ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2); + ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3); + + ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0); + ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1); + ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2); + ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3); + + acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4), + vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd))); + a_ptr++; + b_ptr++; + } + vst1q_f32(s, acc); + s += ncols_interleaved; + } return; } -#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) float sumf[4]; int sumi; @@ -613,7 +628,7 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * } } -void ggml_gemv_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) { +static void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -723,7 +738,7 @@ void ggml_gemv_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * } } -void ggml_gemv_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) { +static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; @@ -996,7 +1011,103 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * } } -void ggml_gemm_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) { +static void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + float * res_ptr = s; + + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + float32x4_t sumf = vdupq_n_f32(0); + for (int l = 0; l < nb; l++) { + uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0); + uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16); + uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32); + uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48); + + int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4); + int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F); + int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4); + int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F); + int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4); + int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F); + int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4); + int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F); + + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16); + + int32x4_t sumi = vdupq_n_s32(0); + sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0); + sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0); + sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1); + sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1); + sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2); + sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2); + sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3); + sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3); + + float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + float32x4_t d = a_d * b_d; + + sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi)); + } + + vst1q_f32(res_ptr + x * 4, sumf); + } + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + { + float sumf[4]; + int sumi; + + const block_q8_0 * a_ptr = (const block_q8_0 *) vy; + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0; + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])); + } + sumf[j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d); + } + } + } + for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j]; + } + } +} + +static void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -1017,7 +1128,7 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * UNUSED(blocklen); #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) - if (ggml_cpu_has_neon()) { + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { const void * b_ptr = vx; const void * a_ptr = vy; float * res_ptr = s; @@ -1512,7 +1623,7 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * } } -void ggml_gemm_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) { +static void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 4; @@ -1966,7 +2077,7 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * } } -void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) { +static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { const int qk = QK8_0; const int nb = n / qk; const int ncols_interleaved = 8; @@ -2486,31 +2597,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) // Shuffle pattern one - right side input - const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) - const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) + const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) + const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) - const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) - const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) + const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) + const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) - const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) - const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) + const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) + const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) - const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) - const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) + const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) + const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) // Shuffle pattern two - right side input - const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) - const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) + const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) + const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) - const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) - const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) + const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) + const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) - const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) - const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) + const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) + const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) - const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) - const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) + const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) + const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) // Scale values - Load the weight scale values of two block_q4_0x8 const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d); @@ -2544,31 +2655,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * // Shuffle pattern one - left side input - const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) - const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) + const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) + const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) - const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) - const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) + const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) + const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) - const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) - const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) + const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) + const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) - const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) - const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) + const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) + const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) // Shuffle pattern two - left side input - const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) - const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) + const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) + const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) - const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) - const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) + const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) + const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) - const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) - const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) + const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) + const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) - const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) - const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) + const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) + const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version @@ -2597,10 +2708,10 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * // Straighten out to make 4 row vectors - __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, 78)); - __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01); - __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, 78)); - __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11); + __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); + __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptrs[rp][b].d), loadMask), 68); @@ -2679,31 +2790,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31) // Shuffle pattern one - right side input - const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) - const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) + const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3) + const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3) - const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) - const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) + const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11) + const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11) - const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) - const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) + const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19) + const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19) - const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) - const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) + const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27) + const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27) // Shuffle pattern two - right side input - const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) - const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) + const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7) + const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7) - const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) - const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) + const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15) + const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15) - const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) - const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) + const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23) + const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23) - const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) - const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) + const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31) + const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31) // Scale values - Load the weight scale values of two block_q4_0x8 @@ -2735,31 +2846,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * // Shuffle pattern one - left side input - const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) - const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) + const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) + const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) - const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) - const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) + const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) + const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) - const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) - const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) + const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) + const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) - const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) - const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) + const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) + const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) // Shuffle pattern two - left side input - const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) - const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) + const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) + const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) - const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) - const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) + const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) + const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) - const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) - const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) + const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) + const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) - const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) - const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) + const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) + const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) // The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane // Resembles MMLAs into 2x2 matrices in ARM Version @@ -2788,10 +2899,10 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * // Straighten out to make 4 row vectors - __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, 78)); - __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01); - __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, 78)); - __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11); + __m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78)); + __m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01); + __m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78)); + __m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11); // Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptr[b].d), loadMask), 68); @@ -3386,7 +3497,117 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * } } -// FIXME: this code is duplicated from ggml-aarch64.c +static void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) { + const int qk = QK8_0; + const int nb = n / qk; + const int ncols_interleaved = 4; + const int blocklen = 4; + + assert (n % qk == 0); + assert (nr % 4 == 0); + assert (nc % ncols_interleaved == 0); + + UNUSED(s); + UNUSED(bs); + UNUSED(vx); + UNUSED(vy); + UNUSED(nr); + UNUSED(nc); + UNUSED(nb); + UNUSED(ncols_interleaved); + UNUSED(blocklen); + +#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD) + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl); + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + + float32x4_t sumf[4]; + for (int m = 0; m < 4; m++) { + sumf[m] = vdupq_n_f32(0); + } + + for (int l = 0; l < nb; l++) { + float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d)); + float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d)); + + int32x4_t sumi_0 = vdupq_n_s32(0); + int32x4_t sumi_1 = vdupq_n_s32(0); + int32x4_t sumi_2 = vdupq_n_s32(0); + int32x4_t sumi_3 = vdupq_n_s32(0); + + for (int k = 0; k < 4; k++) { + int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0); + int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64); + + uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k); + int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4); + int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF); + + sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3); + sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0); + sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1); + sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2); + sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3); + } + + sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0)); + sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1)); + sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2)); + sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3)); + } + + for (int m = 0; m < 4; m++) { + vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]); + } + } + } + return; + } +#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) + { + float sumf[4][4]; + int sumi; + + for (int y = 0; y < nr / 4; y++) { + const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb); + for (int x = 0; x < nc / ncols_interleaved; x++) { + const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb); + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0; + } + for (int l = 0; l < nb; l++) { + for (int k = 0; k < (qk / (2 * blocklen)); k++) { + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) { + sumi = 0; + for (int i = 0; i < blocklen; ++i) { + const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F]; + const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4]; + sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) + + (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])); + } + sumf[m][j] += sumi * GGML_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_FP16_TO_FP32(a_ptr[l].d[m]); + } + } + } + } + for (int m = 0; m < 4; m++) { + for (int j = 0; j < ncols_interleaved; j++) + s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j]; + } + } + } + } +} + static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) { block_q4_0x4 out; @@ -3456,20 +3677,20 @@ static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_in return out; } -static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * restrict data, size_t data_size) { +static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); GGML_ASSERT(interleave_block == 4 || interleave_block == 8); + constexpr int nrows_interleaved = 4; block_q4_0x4 * dst = (block_q4_0x4 *)t->data; const block_q4_0 * src = (const block_q4_0 *)data; block_q4_0 dst_tmp[4]; - int nrow = t->ne[1]; // Number of rows - int nrows_interleaved = 4; + int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK4_0; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); - if (nrow % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } @@ -3487,20 +3708,20 @@ static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block GGML_UNUSED(data_size); } -static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor *t, int interleave_block, const void * restrict data, size_t data_size) { +static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { GGML_ASSERT(t->type == GGML_TYPE_Q4_0); GGML_ASSERT(interleave_block == 8); + constexpr int nrows_interleaved = 8; block_q4_0x8 * dst = (block_q4_0x8*)t->data; const block_q4_0 * src = (const block_q4_0*) data; block_q4_0 dst_tmp[8]; - int nrow = t->ne[1]; // Number of rows - int nrows_interleaved = 8; + int nrow = ggml_nrows(t); int nblocks = t->ne[0] / QK4_0; GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0)); - if (nrow % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { return -1; } @@ -3518,43 +3739,524 @@ static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor *t, int interleave_block, GGML_UNUSED(data_size); } -// Prepare for optimized kernels if applicable -void ggml_aarch64_repack_tensor(struct ggml_tensor * cur, enum ggml_type repack_type, const void * restrict data, size_t data_size) { - if (cur->type == repack_type) { - memcpy(cur->data, data, data_size); - return; +static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_size_interleave) { + block_iq4_nlx4 out; + + for (int i = 0; i < 4; i++) { + out.d[i] = in[i].d; } - GGML_ASSERT(cur->type == GGML_TYPE_Q4_0); + const int end = QK4_NL * 2 / blck_size_interleave; - switch (repack_type) { - case GGML_TYPE_Q4_0_8_8: - repack_q4_0_to_q4_0_8_bl(cur, 8, data, data_size); - break; - case GGML_TYPE_Q4_0_4_8: - repack_q4_0_to_q4_0_4_bl(cur, 8, data, data_size); - break; - case GGML_TYPE_Q4_0_4_4: - repack_q4_0_to_q4_0_4_bl(cur, 4, data, data_size); + // TODO: this branch seems wrong + //if (blck_size_interleave == 8) { + // for (int i = 0; i < end; ++i) { + // int src_id = i % 4; + // int src_offset = (i / 4) * blck_size_interleave; + // int dst_offset = i * blck_size_interleave; + + // // Using memcpy to avoid unaligned memory accesses + // memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint64_t)); + // } + //} else + if (blck_size_interleave == 4) { + for (int i = 0; i < end; ++i) { + int src_id = i % 4; + int src_offset = (i / 4) * blck_size_interleave; + int dst_offset = i * blck_size_interleave; + + memcpy(&out.qs[dst_offset], &in[src_id].qs[src_offset], sizeof(uint32_t)); + } + } else { + GGML_ASSERT(false); + } + + return out; +} + +static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) { + GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL); + //GGML_ASSERT(interleave_block == 4 || interleave_block == 8); + GGML_ASSERT(interleave_block == 4); + + block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data; + const block_iq4_nl * src = (const block_iq4_nl *)data; + block_iq4_nl dst_tmp[4]; + int nrow = ggml_nrows(t); + int nrows_interleaved = 4; + int nblocks = t->ne[0] / QK4_0; + + GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl)); + + if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) { + return -1; + } + + for (int b = 0; b < nrow; b += nrows_interleaved) { + for (int64_t x = 0; x < nblocks; x++) { + for (int i = 0; i < nrows_interleaved; i++) { + dst_tmp[i] = src[x + i * nblocks]; + } + *dst++ = make_block_iq4_nlx4(dst_tmp, interleave_block); + } + src += nrows_interleaved * nblocks; + } + return 0; + + GGML_UNUSED(data_size); +} + +namespace ggml::cpu::aarch64 { +// repack +template +int repack(struct ggml_tensor *, const void *, size_t); + +// TODO: generalise. +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size); +} + +template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { + return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size); +} + +// TODO: needs to be revisited +//template <> int repack(struct ggml_tensor * t, const void * data, size_t data_size) { +// return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size); +//} + +// gemv +template +void gemv(int, float *, size_t, const void *, const void *, int, int); + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> +void gemv(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +// gemm +template +void gemm(int, float *, size_t, const void *, const void *, int, int); + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc); +} + +template <> +void gemm(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) { + ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc); +} + +class tensor_traits_base : public ggml::cpu::tensor_traits { + public: + virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0; +}; + +template class tensor_traits : public tensor_traits_base { + + bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override { + // not realy a GGML_TYPE_Q8_0 but same size. + switch (op->op) { + case GGML_OP_MUL_MAT: + size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1])); + return true; + case GGML_OP_MUL_MAT_ID: + size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1])); + size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc. + size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2]; + return true; + default: + // GGML_ABORT("fatal error"); break; + } + return false; + } + + bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override { + switch (op->op) { + case GGML_OP_MUL_MAT: + forward_mul_mat(params, op); + return true; + case GGML_OP_MUL_MAT_ID: + forward_mul_mat_id(params, op); + return true; default: - GGML_ABORT("Unsupported type"); + // GGML_ABORT("fatal error"); + break; + } + return false; } -} -enum ggml_type ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur) { + void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + ggml_tensor * dst = op; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne0 == ne01); + GGML_ASSERT(ne1 == ne11); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + GGML_ASSERT(ggml_n_dims(op->src[0]) == 2); + // GGML_ASSERT(ggml_n_dims(op->src[1]) == 2); + + char * wdata = static_cast(params->wdata); + const size_t nbw1 = ggml_row_size(GGML_TYPE_Q8_0, ne10); + + assert(params->wsize >= nbw1 * ne11); + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(GGML_TYPE_Q8_0)->from_float; + + int64_t i11_processed = 0; + for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { + quantize_mat_q8_0((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10, + INTER_SIZE); + } + i11_processed = ne11 - ne11 % 4; + for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10); + } + + ggml_barrier(params->threadpool); + + const void * src1_wdata = params->wdata; + const size_t src1_col_stride = ggml_row_size(GGML_TYPE_Q8_0, ne10); + int64_t src0_start = (ith * ne01) / nth; + int64_t src0_end = ((ith + 1) * ne01) / nth; + src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start; + src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end; + if (src0_start >= src0_end) { + return; + } + + // If there are more than three rows in src1, use gemm; otherwise, use gemv. + if (ne11 > 3) { + gemm(ne00, (float *) ((char *) dst->data) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); + } + for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) { + gemv(ne00, (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01, + (const char *) src0->data + src0_start * nb01, + (const char *) src1_wdata + (src1_col_stride * iter), 1, + src0_end - src0_start); + } + } + + void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) { + const ggml_tensor * src0 = op->src[0]; + const ggml_tensor * src1 = op->src[1]; + const ggml_tensor * ids = op->src[2]; + ggml_tensor * dst = op; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(GGML_TYPE_Q8_0)->from_float; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(src0->type)); + GGML_ASSERT(nb10 == ggml_type_size(src1->type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne03 == 1); + GGML_ASSERT(ne13 == 1); + GGML_ASSERT(ne3 == 1); + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // row groups + const int n_ids = ids->ne[0]; // n_expert_used + const int n_as = ne02; // n_expert + + const size_t nbw1 = ggml_row_size(GGML_TYPE_Q8_0, ne10); + const size_t nbw2 = nbw1*ne11; + const size_t nbw3 = nbw2*ne12; + + struct mmid_row_mapping { + int32_t i1; + int32_t i2; + }; + + GGML_ASSERT(params->wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as * sizeof(int64_t) + + n_as * ne12 * sizeof(mmid_row_mapping))); + + auto wdata = (char *) params->wdata; + auto wdata_src1_end = (char *) wdata + GGML_PAD(nbw3, sizeof(int64_t)); + int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as] + struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12] + + // src1: float32 => block_q8_0 + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = ith; i11 < ne11; i11 += nth) { + from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11), + (void *) (wdata + i12 * nbw2 + i11 * nbw1), + ne10); + } + } + +#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)] + + if (ith == 0) { + // initialize matrix_row_counts + memset(matrix_row_counts, 0, n_as * sizeof(int64_t)); + + // group rows by src0 matrix + for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) { + for (int32_t id = 0; id < n_ids; ++id) { + const int32_t i02 = + *(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]); + + GGML_ASSERT(i02 >= 0 && i02 < n_as); + + MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 }; + matrix_row_counts[i02] += 1; + } + } + } + + ggml_barrier(params->threadpool); + + // compute each matrix multiplication in sequence + for (int cur_a = 0; cur_a < n_as; ++cur_a) { + const int64_t cne1 = matrix_row_counts[cur_a]; + + if (cne1 == 0) { + continue; + } + + auto src0_cur = (const char *) src0->data + cur_a*nb02; + + //const int64_t nr0 = ne01; // src0 rows + const int64_t nr1 = cne1; // src1 rows + + int64_t src0_cur_start = (ith * ne01) / nth; + int64_t src0_cur_end = ((ith + 1) * ne01) / nth; + src0_cur_start = + (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start; + src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end; + + if (src0_cur_start >= src0_cur_end) return; + + for (int ir1 = 0; ir1 < nr1; ir1++) { + struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); + const int id = row_mapping.i1; // selected expert index + + const int64_t i11 = id % ne11; + const int64_t i12 = row_mapping.i2; // row index in src1 + + const int64_t i1 = id; // selected expert index + const int64_t i2 = i12; // row + + auto src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2); + + gemv( + ne00, (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, + ne01, src0_cur + src0_cur_start * nb01, + src1_col, 1, src0_cur_end - src0_cur_start); + } + } +#undef MMID_MATRIX_ROW + } + + int repack(struct ggml_tensor * t, const void * data, size_t data_size) override { + GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type), + (int) NB_COLS, (int) INTER_SIZE); + return ggml::cpu::aarch64::repack(t, data, data_size); + } +}; + +// instance for Q4 +static const tensor_traits q4_0_4x4_q8_0; +static const tensor_traits q4_0_4x8_q8_0; +static const tensor_traits q4_0_8x8_q8_0; + +// instance for IQ4 +static const tensor_traits iq4_nl_4x4_q8_0; + +} // namespace ggml::cpu::aarch64 + +static const ggml::cpu::tensor_traits * ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur) { if (cur->type == GGML_TYPE_Q4_0) { - // TODO: enable for AVX2 - currently disabled due to bad gemv performance - if (/* ggml_cpu_has_avx2() || */ (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { - return GGML_TYPE_Q4_0_8_8; + if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) { + if (cur->ne[1] % 8 == 0) { + return &ggml::cpu::aarch64::q4_0_8x8_q8_0; + } } if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) { - return GGML_TYPE_Q4_0_4_8; + if (cur->ne[1] % 4 == 0) { + return &ggml::cpu::aarch64::q4_0_4x8_q8_0; + } + } + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + if (cur->ne[1] % 4 == 0) { + return &ggml::cpu::aarch64::q4_0_4x4_q8_0; + } } - if (ggml_cpu_has_neon()) { - return GGML_TYPE_Q4_0_4_4; + } else if (cur->type == GGML_TYPE_IQ4_NL) { + if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) { + if (cur->ne[1] % 4 == 0) { + return &ggml::cpu::aarch64::iq4_nl_4x4_q8_0; + } + } + } + + return nullptr; +} + +static void ggml_backend_cpu_aarch64_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { + tensor->extra = (void *) const_cast(ggml_aarch64_get_optimal_repack_type(tensor)); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_cpu_aarch64_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, + const void * data, size_t offset, size_t size) { + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + + auto tensor_traits = (ggml::cpu::aarch64::tensor_traits_base *) tensor->extra; + auto OK = tensor_traits->repack(tensor, data, size); + + GGML_ASSERT(OK == 0); + GGML_UNUSED(buffer); +} + +static const char * ggml_backend_cpu_aarch64_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU_AARCH64"; + + GGML_UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); + + if (buffer == nullptr) { + return nullptr; + } + + buffer->buft = buft; + buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor; + buffer->iface.set_tensor = ggml_backend_cpu_aarch64_buffer_set_tensor; + return buffer; +} + +static size_t ggml_backend_cpu_aarch64_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + return TENSOR_ALIGNMENT; + + GGML_UNUSED(buft); +} + +namespace ggml::cpu::aarch64 { +class extra_buffer_type : ggml::cpu::extra_buffer_type { + bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override { + if ( op->op == GGML_OP_MUL_MAT && + op->src[0]->buffer && + (ggml_n_dims(op->src[0]) == 2) && + op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() && + ggml_aarch64_get_optimal_repack_type(op->src[0]) + ) { + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + //if (op->src[1]->type == GGML_TYPE_Q8_0) { + // return true; + //} + // may be possible if Q8_0 packed... + } else if (op->op == GGML_OP_MUL_MAT_ID + && op->src[0]->buffer + && (ggml_n_dims(op->src[0]) == 3) + && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() + && ggml_aarch64_get_optimal_repack_type(op->src[0]) + ) { + if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) { + return false; + } + if (op->src[1]->type == GGML_TYPE_F32) { + return true; + } + //if (op->src[1]->type == GGML_TYPE_Q8_0) { + // return true; + //} } + return false; } - return cur->type; + ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override { + if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) { + if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type()) { + return (ggml::cpu::tensor_traits *) op->src[0]->extra; + } + } + return nullptr; + } +}; +} // namespace ggml::cpu::aarch64 + +ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_aarch64 = { + /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_aarch64_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_cpu_aarch64_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_aarch64_buffer_type_get_alignment, + /* .get_max_size = */ nullptr, // defaults to SIZE_MAX + /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes + /* .is_host = */ nullptr, + }, + /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), + /* .context = */ new ggml::cpu::aarch64::extra_buffer_type(), + }; + + return &ggml_backend_cpu_buffer_type_aarch64; } diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.h b/ggml/src/ggml-cpu/ggml-cpu-aarch64.h index 53b30c1dd2dfe..6e84c826b4091 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.h +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.h @@ -1,30 +1,8 @@ #pragma once +#include "ggml-cpu-traits.h" #include "ggml.h" // GGML internal header -#ifdef __cplusplus -extern "C" { -#endif - -// Quantization -void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t nrows, int64_t n_per_row, int64_t blck_size_interleave); - -// GEMV -void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); - -// GEMM -void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); -void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc); - -void ggml_aarch64_repack_tensor(struct ggml_tensor * cur, enum ggml_type repack_type, const void * data, size_t data_size); -enum ggml_type ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur); - -#ifdef __cplusplus -} -#endif - +ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void); diff --git a/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp b/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp new file mode 100644 index 0000000000000..fa8dea2af9c72 --- /dev/null +++ b/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp @@ -0,0 +1,55 @@ +#ifdef GGML_USE_CPU_HBM + +#include "ggml-backend.h" +#include "ggml-backend-impl.h" +#include "ggml-cpu.h" +#include "ggml-impl.h" + +#include "ggml-cpu-hbm.h" + +// buffer type HBM + +#include + +static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU_HBM"; + + GGML_UNUSED(buft); +} + +static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) { + hbw_free(buffer->context); +} + +static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, + size_t size) { + void * ptr; + int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size); + if (result != 0) { + GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size); + return NULL; + } + + ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); + buffer->buft = buft; + buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer; + + return buffer; +} + +ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = { + /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, + /* .get_max_size = */ nullptr, // defaults to SIZE_MAX + /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes + /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, + }, + /* .context = */ nullptr, + }; + + return &ggml_backend_cpu_buffer_type_hbm; +} +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-hbm.h b/ggml/src/ggml-cpu/ggml-cpu-hbm.h new file mode 100644 index 0000000000000..09a1f09d72be2 --- /dev/null +++ b/ggml/src/ggml-cpu/ggml-cpu-hbm.h @@ -0,0 +1,8 @@ +#pragma once + +#include "ggml-backend.h" +#include "ggml.h" + +// GGML CPU internal header + +ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 27a530b227954..d71076ad12b1f 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -15,6 +15,18 @@ extern "C" { #endif +struct ggml_compute_params { + // ith = thread index, nth = number of threads + int ith, nth; + + // work buffer for all threads + size_t wsize; + void * wdata; + + struct ggml_threadpool * threadpool; +}; + + #if defined(_MSC_VER) #define m512bh(p) p @@ -366,6 +378,9 @@ static __m256 __lasx_xvreplfr2vr_s(float val) { } #endif +// TODO: move to ggml-threading +void ggml_barrier(struct ggml_threadpool * tp); + #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index f0e276b698795..634c5fa1162c3 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -1791,11 +1791,12 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - float32_t _scale[4] = { GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; - + float32_t _scale[4] = { + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + }; float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -1811,13 +1812,15 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); + l1, r1)), l2, r2)), l3, r3))), scale); } - float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - vst1_f32(s, vget_low_f32(sumv2)); + vst1_f32(s, vget_low_f32 (sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); + return; } #endif @@ -2345,10 +2348,12 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r const block_q8_1 * restrict b_y0 = &vy0[i]; const block_q8_1 * restrict b_y1 = &vy1[i]; - float32_t summs_t[4] = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)}; + float32_t summs_t[4] = { + GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s) + }; summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); const uint8x16_t m4b = vdupq_n_u8(0x0F); @@ -2369,10 +2374,12 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); // mmla into int32x4_t - float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d, - GGML_FP16_TO_FP32(b_x0->d)*b_y1->d, - GGML_FP16_TO_FP32(b_x1->d)*b_y0->d, - GGML_FP16_TO_FP32(b_x1->d)*b_y1->d}; + float32_t _scale[4] = { + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + }; float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -2387,15 +2394,17 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); + l1, r1)), l2, r2)), l3, r3))), scale); } - float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); + sumv2 = vaddq_f32(sumv2, summs0); vst1_f32(s, vget_low_f32 (sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); + return; } #endif @@ -3372,10 +3381,12 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32_t _scale[4] = { + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d) + }; float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -3391,13 +3402,15 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h))); sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)), - l1, r1)), l2, r2)), l3, r3))), scale); + l1, r1)), l2, r2)), l3, r3))), scale); } - float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); + + float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - vst1_f32(s, vget_low_f32(sumv2)); + vst1_f32(s, vget_low_f32 (sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); + return; } #endif diff --git a/ggml/src/ggml-cpu/ggml-cpu-traits.cpp b/ggml/src/ggml-cpu/ggml-cpu-traits.cpp new file mode 100644 index 0000000000000..62a0712dabbf6 --- /dev/null +++ b/ggml/src/ggml-cpu/ggml-cpu-traits.cpp @@ -0,0 +1,36 @@ +#include "ggml-cpu-traits.h" + +#include "ggml-backend-impl.h" +#include "ggml-backend.h" + +namespace ggml::cpu { +tensor_traits::~tensor_traits() {} + +extra_buffer_type::~extra_buffer_type() {} +} // namespace ggml::cpu + +bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) { + for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { + if (extra && extra->context) { + auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context; + auto tensor_traits = buf_extra->get_tensor_traits(op); + if (tensor_traits && tensor_traits->compute_forward(params, op)) { + return true; + } + } + } + return false; +} + +bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size) { + for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { + if (extra && extra->context) { + auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context; + auto tensor_traits = buf_extra->get_tensor_traits(op); + if (tensor_traits && tensor_traits->work_size(n_threads, op, *size)) { + return true; + } + } + } + return false; +} diff --git a/ggml/src/ggml-cpu/ggml-cpu-traits.h b/ggml/src/ggml-cpu/ggml-cpu-traits.h new file mode 100644 index 0000000000000..99a6186b1d6b5 --- /dev/null +++ b/ggml/src/ggml-cpu/ggml-cpu-traits.h @@ -0,0 +1,38 @@ +#pragma once +#include "ggml-backend-impl.h" +#include "ggml-cpu-impl.h" +#include "ggml.h" + +#ifdef __cplusplus +# include +extern "C" { +#endif + +// return true if op part of extra "accelerator" +bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op); +bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size); + +#ifdef __cplusplus +} + +namespace ggml::cpu { +// register in tensor->extra +class tensor_traits { + public: + virtual ~tensor_traits(); + virtual bool work_size(int n_threads, const struct ggml_tensor * op, size_t & size) = 0; + virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0; +}; + +class extra_buffer_type { + public: + virtual ~extra_buffer_type(); + virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; + virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; +}; +} // namespace ggml::cpu + +// implemented in ggml-cpu.cpp. +std::vector & ggml_backend_cpu_get_extra_buffers_type(); + +#endif diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 0d23669c25bea..67e67a08968a1 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -3,13 +3,14 @@ #include "ggml-backend-impl.h" #include "ggml-backend.h" -#include "ggml-cpu-aarch64.h" +#include "ggml-cpu-traits.h" #include "ggml-cpu-impl.h" #include "ggml-cpu.h" #include "ggml-impl.h" #include "ggml-quants.h" #include "ggml-cpu-quants.h" #include "ggml-threading.h" +#include "amx/amx.h" #include "ggml.h" #if defined(_MSC_VER) || defined(__MINGW32__) @@ -109,10 +110,11 @@ static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16]; #if defined(__ARM_ARCH) struct ggml_arm_arch_features_type { int has_neon; + int has_dotprod; int has_i8mm; int has_sve; int sve_cnt; -} ggml_arm_arch_features = {-1, -1, -1, 0}; +} ggml_arm_arch_features = {-1, -1, -1, -1, 0}; #endif @@ -124,8 +126,7 @@ struct ggml_arm_arch_features_type { #endif #include - -#if !defined(__clang__) +#if defined(_MSC_VER) && !defined(__clang__) #define GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE)) typedef volatile LONG atomic_int; @@ -222,10 +223,6 @@ typedef void * thread_ret_t; typedef pthread_t ggml_thread_t; -#ifdef GGML_USE_CPU_HBM -#include -#endif - #if defined(__APPLE__) #include #include @@ -299,7 +296,6 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { }, [GGML_TYPE_Q8_0] = { .from_float = quantize_row_q8_0, - .from_float_to_mat = quantize_mat_q8_0, .vec_dot = ggml_vec_dot_q8_0_q8_0, .vec_dot_type = GGML_TYPE_Q8_0, #if defined (__ARM_FEATURE_MATMUL_INT8) @@ -407,33 +403,6 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = { .vec_dot_type = GGML_TYPE_BF16, .nrows = 1, }, - [GGML_TYPE_Q4_0_4_4] = { - .from_float = NULL, - .vec_dot = NULL, - .vec_dot_type = GGML_TYPE_Q8_0, - .nrows = 1, - .ncols = 4, - .gemv = ggml_gemv_q4_0_4x4_q8_0, - .gemm = ggml_gemm_q4_0_4x4_q8_0, - }, - [GGML_TYPE_Q4_0_4_8] = { - .from_float = NULL, - .vec_dot = NULL, - .vec_dot_type = GGML_TYPE_Q8_0, - .nrows = 1, - .ncols = 4, - .gemv = ggml_gemv_q4_0_4x8_q8_0, - .gemm = ggml_gemm_q4_0_4x8_q8_0, - }, - [GGML_TYPE_Q4_0_8_8] = { - .from_float = NULL, - .vec_dot = NULL, - .vec_dot_type = GGML_TYPE_Q8_0, - .nrows = 1, - .ncols = 8, - .gemv = ggml_gemv_q4_0_8x8_q8_0, - .gemm = ggml_gemm_q4_0_8x8_q8_0, - }, [GGML_TYPE_TQ1_0] = { .from_float = quantize_row_tq1_0, .vec_dot = ggml_vec_dot_tq1_0_q8_K, @@ -485,21 +454,21 @@ const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type #define GGML_F32x4_ADD vaddq_f32 #define GGML_F32x4_MUL vmulq_f32 #define GGML_F32x4_REDUCE_ONE(x) vaddvq_f32(x) -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ - } \ - (res) = GGML_F32x4_REDUCE_ONE((x)[0]); \ +#define GGML_F32x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + (x)[i] = vaddq_f32((x)[i], (x)[offset+i]); \ + } \ + (res) = (ggml_float) GGML_F32x4_REDUCE_ONE((x)[0]); \ } #define GGML_F32_VEC GGML_F32x4 @@ -614,7 +583,7 @@ do { \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ - res = _mm512_reduce_add_ps(x[0]); \ + res = (ggml_float) _mm512_reduce_add_ps(x[0]); \ } while (0) // TODO: is this optimal ? @@ -664,7 +633,7 @@ do { \ for (int i = 0; i < offset; ++i) { \ x[i] = _mm512_add_ps(x[i], x[offset+i]); \ } \ - res = _mm512_reduce_add_ps(x[0]); \ + res = (ggml_float) _mm512_reduce_add_ps(x[0]); \ } while (0) #define GGML_F16_VEC GGML_F32Cx16 @@ -675,8 +644,8 @@ do { \ #define GGML_F16_VEC_FMA GGML_F32Cx16_FMA #define GGML_F16_VEC_ADD GGML_F32Cx16_ADD #define GGML_F16_VEC_MUL GGML_F32Cx16_MUL -#define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE +#define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE #elif defined(__AVX__) #define GGML_SIMD @@ -745,7 +714,7 @@ do { \ #define GGML_F32Cx8_LOAD(x) _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)(x))) #define GGML_F32Cx8_STORE(x, y) _mm_storeu_si128((__m128i *)(x), _mm256_cvtps_ph(y, 0)) #else -static inline __m256 __avx_f32cx8_load(ggml_fp16_t *x) { +static inline __m256 __avx_f32cx8_load(const ggml_fp16_t * x) { float tmp[8]; for (int i = 0; i < 8; i++) { @@ -1168,28 +1137,28 @@ static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) { #define GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a) #define GGML_F32x4_ADD __lsx_vfadd_s #define GGML_F32x4_MUL __lsx_vfmul_s -#define GGML_F32x4_REDUCE(res, x) \ -{ \ - int offset = GGML_F32_ARR >> 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \ - } \ - offset >>= 1; \ - for (int i = 0; i < offset; ++i) { \ - x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \ - } \ - __m128i tmp = __lsx_vsrli_d((__m128i)x[0], 32); \ - tmp = (__m128i)__lsx_vfadd_s((__m128)tmp, x[0]); \ - tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \ - const __m128 t0 = __lsx_vshuf4i_w(tmp, 0x88); \ - tmp = __lsx_vsrli_d((__m128i)t0, 32); \ - tmp = (__m128i)__lsx_vfadd_s((__m128)tmp, t0); \ - tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \ - res = (ggml_float) __lsx_vpickve2gr_w(__lsx_vshuf4i_w(tmp, 0x88), 0); \ +#define GGML_F32x4_REDUCE(res, x) \ +{ \ + int offset = GGML_F32_ARR >> 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \ + } \ + offset >>= 1; \ + for (int i = 0; i < offset; ++i) { \ + x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \ + } \ + __m128i tmp = __lsx_vsrli_d((__m128i) x[0], 32); \ + tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, x[0]); \ + tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \ + const __m128 t0 = __lsx_vshuf4i_w(tmp, 0x88); \ + tmp = __lsx_vsrli_d((__m128i) t0, 32); \ + tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, t0); \ + tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \ + res = (ggml_float) __lsx_vpickve2gr_w(__lsx_vshuf4i_w(tmp, 0x88), 0); \ } #define GGML_F32_VEC GGML_F32x4 @@ -1357,31 +1326,18 @@ struct ggml_compute_state { int ith; }; -struct ggml_compute_params { - // ith = thread index, nth = number of threads - int ith, nth; - - // work buffer for all threads - size_t wsize; - void * wdata; - - struct ggml_threadpool * threadpool; -}; - // // fundamental operations // inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } -inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } +inline static void ggml_vec_cpy_i32(const int n, int32_t * y, const int32_t * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; } inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; } - inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; } inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; } inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; } @@ -2276,7 +2232,7 @@ struct ggml_state { static struct ggml_state g_state = {0}; -static void ggml_barrier(struct ggml_threadpool * tp) { +void ggml_barrier(struct ggml_threadpool * tp) { int n_threads = atomic_load_explicit(&tp->n_threads_cur, memory_order_relaxed); if (n_threads == 1) { return; @@ -2430,7 +2386,7 @@ bool ggml_is_numa(void) { #endif #if !defined(HWCAP2_I8MM) -#define HWCAP2_I8MM 0 +#define HWCAP2_I8MM (1 << 13) #endif static void ggml_init_arm_arch_features(void) { @@ -2439,6 +2395,7 @@ static void ggml_init_arm_arch_features(void) { uint32_t hwcap2 = getauxval(AT_HWCAP2); ggml_arm_arch_features.has_neon = !!(hwcap & HWCAP_ASIMD); + ggml_arm_arch_features.has_dotprod = !!(hwcap & HWCAP_ASIMDDP); ggml_arm_arch_features.has_i8mm = !!(hwcap2 & HWCAP2_I8MM); ggml_arm_arch_features.has_sve = !!(hwcap & HWCAP_SVE); @@ -2453,6 +2410,11 @@ static void ggml_init_arm_arch_features(void) { } ggml_arm_arch_features.has_neon = oldp; + if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) != 0) { + oldp = 0; + } + ggml_arm_arch_features.has_dotprod = oldp; + if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) != 0) { oldp = 0; } @@ -4505,9 +4467,6 @@ static void ggml_compute_forward_add( case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: { ggml_compute_forward_add_q_f32(params, dst); } break; @@ -4885,9 +4844,6 @@ static void ggml_compute_forward_add1( case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: { ggml_compute_forward_add1_q_f32(params, dst); } break; @@ -5015,9 +4971,6 @@ static void ggml_compute_forward_acc( case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: default: { GGML_ABORT("fatal error"); @@ -7433,20 +7386,9 @@ static void ggml_compute_forward_mul_mat( const int ith = params->ith; const int nth = params->nth; - enum ggml_type type = src0->type; - - if (src0->buffer && ggml_backend_cpu_buft_is_aarch64(src0->buffer->buft)) { - type = (enum ggml_type)(intptr_t)src0->extra; - } - - enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; + enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; - ggml_from_float_to_mat_t const from_float_to_mat = type_traits_cpu[vec_dot_type].from_float_to_mat; - int64_t const vec_dot_num_rows = type_traits_cpu[type].nrows; - int64_t const matmul_num_cols = type_traits_cpu[type].ncols; - int64_t const blck_size_interleave = ggml_get_type_traits(type)->blck_size_interleave; - ggml_gemv_t const gemv = type_traits_cpu[type].gemv; - ggml_gemm_t const gemm = type_traits_cpu[type].gemm; + int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows; GGML_ASSERT(ne0 == ne01); GGML_ASSERT(ne1 == ne11); @@ -7454,7 +7396,7 @@ static void ggml_compute_forward_mul_mat( GGML_ASSERT(ne3 == ne13); // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb00 == ggml_type_size(src0->type)); GGML_ASSERT(nb10 == ggml_type_size(src1->type)); // dst cannot be transposed or permuted @@ -7466,6 +7408,7 @@ static void ggml_compute_forward_mul_mat( // nb01 >= nb00 - src0 is not transposed // compute by src0 rows + // TODO: extract to "extra_op" #if GGML_USE_LLAMAFILE // broadcast factors const int64_t r2 = ne12 / ne02; @@ -7476,15 +7419,15 @@ static void ggml_compute_forward_mul_mat( if (src1_cont) { for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) - if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(type), + if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, - nb01/ggml_type_size(type), + nb01/ggml_type_size(src0->type), (const char *)src1->data + i12*nb12 + i13*nb13, nb11/ggml_type_size(src1->type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), ith, nth, - type, + src0->type, src1->type, dst->type)) goto UseGgmlGemm1; @@ -7505,19 +7448,10 @@ UseGgmlGemm1:; for (int64_t i13 = 0; i13 < ne13; ++i13) { for (int64_t i12 = 0; i12 < ne12; ++i12) { - int64_t i11_processed = 0; - if ((ggml_n_dims(src1) == 2) && from_float_to_mat && gemm) { - for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) { - from_float_to_mat((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), - (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), - 4, ne10, blck_size_interleave); - } - i11_processed = ne11 - ne11 % 4; - } - for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) { + for (int64_t i11 = ith; i11 < ne11; i11 += nth) { from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), - (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), - ne10); + (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1), + ne10); } } } @@ -7537,15 +7471,15 @@ UseGgmlGemm1:; for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) - if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(type), + if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, - nb01/ggml_type_size(type), + nb01/ggml_type_size(src0->type), (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, row_size/ggml_type_size(vec_dot_type), (char *)dst->data + i12*nb2 + i13*nb3, nb1/ggml_type_size(dst->type), ith, nth, - type, + src0->type, vec_dot_type, dst->type)) goto UseGgmlGemm2; @@ -7560,14 +7494,6 @@ UseGgmlGemm2:; // This is the size of the rest of the dimensions of the result const int64_t nr1 = ne1 * ne2 * ne3; - // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols - int64_t num_rows_per_vec_dot = vec_dot_num_rows; - // TODO: currently the mmla kernels support only even numbered rows/cols. - // this check can be removed once they are extended to support odd numbered rows/cols too - if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) { - num_rows_per_vec_dot = 1; - } - // Now select a reasonable chunk size. int chunk_size = 16; @@ -7595,28 +7521,6 @@ UseGgmlGemm2:; const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; - if ((ggml_n_dims(src0) == 2) && gemv) { - const void * src1_wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; - const size_t src1_col_stride = ggml_is_contiguous(src1) || src1->type != vec_dot_type ? ggml_row_size(vec_dot_type, ne10) : nb11; - int64_t src0_start = (ith * ne01) / nth; - int64_t src0_end = ((ith + 1) * ne01) / nth; - src0_start = (src0_start % matmul_num_cols) ? src0_start + matmul_num_cols - (src0_start % matmul_num_cols): src0_start; - src0_end = (src0_end % matmul_num_cols) ? src0_end + matmul_num_cols - (src0_end % matmul_num_cols): src0_end; - if (src0_start >= src0_end) return; - - // If there are more than three rows in src1, use gemm; otherwise, use gemv. - if (gemm && (ne11 > 3)) { - gemm(ne00, (float *)((char *) dst->data) + src0_start, ne01, (const char *) src0->data + src0_start * nb01, - (const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start); - } - for (int iter = gemm ? ne11 - ne11 % 4 : 0; iter < ne11; iter++) { - gemv(ne00, (float *)((char *) dst->data + (iter * nb1)) + src0_start, ne01, - (const char *) src0->data + src0_start * nb01, (const char *) src1_wdata + (src1_col_stride * iter), 1, - src0_end - src0_start); - } - return; - } - // The first chunk comes from our thread_id, the rest will get auto-assigned. int current_chunk = ith; @@ -7630,7 +7534,16 @@ UseGgmlGemm2:; const int64_t ir1_start = dr1 * ith1; const int64_t ir1_end = MIN(ir1_start + dr1, nr1); - ggml_compute_forward_mul_mat_one_chunk(params, dst, type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); + // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols + int64_t num_rows_per_vec_dot = vec_dot_num_rows; + + // these checks are needed to avoid crossing dim1 boundaries + // can be optimized, but the logic would become more complicated, so keeping it like this for simplicity + if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) { + num_rows_per_vec_dot = 1; + } + + ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); if (nth >= nchunk0 * nchunk1) { break; @@ -7662,8 +7575,6 @@ static void ggml_compute_forward_mul_mat_id( ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot; enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type; ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float; - int64_t const matmul_num_cols = type_traits_cpu[type].ncols; - ggml_gemv_t const gemv = type_traits_cpu[type].gemv; // we don't support permuted src0 or src1 GGML_ASSERT(nb00 == ggml_type_size(type)); @@ -7749,34 +7660,6 @@ static void ggml_compute_forward_mul_mat_id( const int64_t nr0 = ne01; // src0 rows const int64_t nr1 = cne1; // src1 rows - if (((ggml_n_dims(src0) - 1) == 2) && gemv) { - int64_t src0_cur_start = (ith * ne01) / nth; - int64_t src0_cur_end = ((ith + 1) * ne01) / nth; - src0_cur_start = (src0_cur_start % matmul_num_cols) ? src0_cur_start + matmul_num_cols - (src0_cur_start % matmul_num_cols): src0_cur_start; - src0_cur_end = (src0_cur_end % matmul_num_cols) ? src0_cur_end + matmul_num_cols - (src0_cur_end % matmul_num_cols): src0_cur_end; - if (src0_cur_start >= src0_cur_end) return; - - for (int ir1 = 0; ir1 < nr1; ir1++) { - struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1); - const int id = row_mapping.i1; // selected expert index - - const int64_t i11 = id % ne11; - const int64_t i12 = row_mapping.i2; // row index in src1 - - const int64_t i1 = id; // selected expert index - const int64_t i2 = i12; // row - - const char * src1_col = (const char *) wdata + - (src1_cont || src1->type != vec_dot_type - ? (i11 + i12 * ne11) * row_size - : (i11 * nb11 + i12 * nb12)); - - gemv(ne00, (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01, - (const char *) src0_cur + src0_cur_start * nb01, src1_col, 1, src0_cur_end - src0_cur_start); - } - continue; - } - // distribute the thread work across the inner or outer loop based on which one is larger const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows @@ -8084,9 +7967,6 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: { ggml_compute_forward_out_prod_q_f32(params, dst); } break; @@ -8239,6 +8119,77 @@ static void ggml_compute_forward_set_f32( } } +static void ggml_compute_forward_set_i32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy( + ((char *) dst->data), + ((char *) src0->data), + ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10-1); + const int im1 = (ne11 == 0 ? 0 : ne11-1); + const int im2 = (ne12 == 0 ? 0 : ne12-1); + const int im3 = (ne13 == 0 ? 0 : ne13-1); + + GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(int32_t)); + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir/(ne12*ne11); + const int i2 = (ir - i3*ne12*ne11)/ne11; + const int i1 = (ir - i3*ne12*ne11 - i2*ne11); + + ggml_vec_cpy_i32(nc, + (int32_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), + (int32_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11)); + } +} + static void ggml_compute_forward_set( const struct ggml_compute_params * params, struct ggml_tensor * dst) { @@ -8250,6 +8201,10 @@ static void ggml_compute_forward_set( { ggml_compute_forward_set_f32(params, dst); } break; + case GGML_TYPE_I32: + { + ggml_compute_forward_set_i32(params, dst); + } break; case GGML_TYPE_F16: case GGML_TYPE_BF16: case GGML_TYPE_Q4_0: @@ -8274,9 +8229,6 @@ static void ggml_compute_forward_set( case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: default: { GGML_ABORT("fatal error"); @@ -8538,9 +8490,6 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: { ggml_compute_forward_get_rows_q(params, dst); } break; @@ -9130,9 +9079,6 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ2_S: case GGML_TYPE_Q8_K: - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - case GGML_TYPE_Q4_0_8_8: case GGML_TYPE_I8: case GGML_TYPE_I16: case GGML_TYPE_I32: @@ -9187,6 +9133,64 @@ static void ggml_rope_cache_init( } } +static void ggml_mrope_cache_init( + float theta_base_t, float theta_base_h, float theta_base_w, float theta_base_e, int sections[4], bool indep_sects, + float freq_scale, const float * freq_factors, float corr_dims[2], int64_t ne0, float ext_factor, float mscale, + float * cache, float sin_sign, float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta_t = theta_base_t; + float theta_h = theta_base_h; + float theta_w = theta_base_w; + float theta_e = theta_base_e; // extra position id for vision encoder + int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; + int sec_w = sections[1] + sections[0]; + int sec_e = sections[2] + sec_w; + GGML_ASSERT(sect_dims <= ne0); + + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0/2] : 1.0f; + + int sector = (i0 / 2) % sect_dims; + if (indep_sects) { + // compute theta independently for each dim sections + // (i.e. reset corresponding theta when `i0` go from one section to another) + if (sector == 0) { + theta_t = theta_base_t; + } + else if (sector == sections[0]) { + theta_h = theta_base_h;; + } + else if (sector == sec_w) { + theta_w = theta_base_w; + } + else if (sector == sec_e) { + theta_e = theta_base_e; + } + } + + float theta = theta_t; + if (sector >= sections[0] && sector < sec_w) { + theta = theta_h; + } + else if (sector >= sec_w && sector < sec_w + sections[2]) { + theta = theta_w; + } + else if (sector >= sec_w + sections[2]) { + theta = theta_e; + } + + rope_yarn( + theta/ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1] + ); + cache[i0 + 1] *= sin_sign; + + theta_t *= theta_scale; + theta_w *= theta_scale; + theta_h *= theta_scale; + theta_e *= theta_scale; + } +} + static void ggml_compute_forward_rope_f32( const struct ggml_compute_params * params, struct ggml_tensor * dst, @@ -9197,6 +9201,7 @@ static void ggml_compute_forward_rope_f32( const struct ggml_tensor * src2 = dst->src[2]; float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + int sections[4]; //const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; @@ -9210,6 +9215,7 @@ static void ggml_compute_forward_rope_f32( memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int)*4); GGML_TENSOR_UNARY_OP_LOCALS @@ -9242,6 +9248,16 @@ static void ggml_compute_forward_rope_f32( ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, multimodal rotary position embedding + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + if (is_mrope) { + GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne0/2); + } const float * freq_factors = NULL; if (src2 != NULL) { @@ -9257,18 +9273,63 @@ static void ggml_compute_forward_rope_f32( const int32_t * pos = (const int32_t *) src1->data; - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { - const int64_t p = pos[i2]; + for (int64_t i3 = 0; i3 < ne3; i3++) { // batch + for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith; - ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + if (!is_mrope) { + const int64_t p = pos[i2]; + ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } + else { + const int64_t p_t = pos[i2]; + const int64_t p_h = pos[i2 + ne2]; + const int64_t p_w = pos[i2 + ne2 * 2]; + const int64_t p_e = pos[i2 + ne2 * 3]; + ggml_mrope_cache_init( + p_t, p_h, p_w, p_e, sections, is_vision, + freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } - for (int64_t i1 = 0; i1 < ne1; i1++) { + for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads if (ir++ < ir0) continue; if (ir > ir1) break; - if (!is_neox) { + if (is_neox || is_mrope) { + if (is_vision){ + for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { + const int64_t ic = i0/2; + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); + float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); + + const float x0 = src[0]; + const float x1 = src[n_dims]; + + dst_data[0] = x0*cos_theta - x1*sin_theta; + dst_data[n_dims] = x0*sin_theta + x1*cos_theta; + } + } else { + for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { + const int64_t ic = i0/2; + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); + float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); + + const float x0 = src[0]; + const float x1 = src[n_dims/2]; + + dst_data[0] = x0*cos_theta - x1*sin_theta; + dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; + } + } + } else { for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { const float cos_theta = cache[i0 + 0]; const float sin_theta = cache[i0 + 1]; @@ -9282,8 +9343,10 @@ static void ggml_compute_forward_rope_f32( dst_data[0] = x0*cos_theta - x1*sin_theta; dst_data[1] = x0*sin_theta + x1*cos_theta; } - } else { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { + } + + if (is_vision) { + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { const int64_t ic = i0/2; const float cos_theta = cache[i0 + 0]; @@ -9293,19 +9356,20 @@ static void ggml_compute_forward_rope_f32( float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = src[0]; - const float x1 = src[n_dims/2]; + const float x1 = src[n_dims]; - dst_data[0] = x0*cos_theta - x1*sin_theta; - dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta; + dst_data[0] = x0*cos_theta - x1*sin_theta; + dst_data[n_dims] = x0*sin_theta + x1*cos_theta; } - } - - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + } else { + // fill the remain channels with data from src tensor + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { + const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - dst_data[0] = src[0]; - dst_data[1] = src[1]; + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } } } } @@ -9323,6 +9387,7 @@ static void ggml_compute_forward_rope_f16( const struct ggml_tensor * src2 = dst->src[2]; float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + int sections[4]; //const int n_past = ((int32_t *) dst->op_params)[0]; const int n_dims = ((int32_t *) dst->op_params)[1]; @@ -9335,6 +9400,8 @@ static void ggml_compute_forward_rope_f16( memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int)*4); + GGML_TENSOR_UNARY_OP_LOCALS @@ -9367,6 +9434,16 @@ static void ggml_compute_forward_rope_f16( ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + if (is_mrope) { + GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne0/2); + } const float * freq_factors = NULL; if (src2 != NULL) { @@ -9384,16 +9461,61 @@ static void ggml_compute_forward_rope_f16( for (int64_t i3 = 0; i3 < ne3; i3++) { for (int64_t i2 = 0; i2 < ne2; i2++) { - const int64_t p = pos[i2]; float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith; - ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + if (!is_mrope) { + const int64_t p = pos[i2]; + ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } + else { + const int64_t p_t = pos[i2]; + const int64_t p_h = pos[i2 + ne2]; + const int64_t p_w = pos[i2 + ne2 * 2]; + const int64_t p_e = pos[i2 + ne2 * 3]; + ggml_mrope_cache_init( + p_t, p_h, p_w, p_e, sections, is_vision, + freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } for (int64_t i1 = 0; i1 < ne1; i1++) { if (ir++ < ir0) continue; if (ir > ir1) break; - if (!is_neox) { + if (is_neox || is_mrope) { + if (is_vision) { + for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { + const int64_t ic = i0/2; + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); + ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); + + const float x0 = GGML_FP16_TO_FP32(src[0]); + const float x1 = GGML_FP16_TO_FP32(src[n_dims]); + + dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + } + } else { + for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { + const int64_t ic = i0/2; + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); + ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); + + const float x0 = GGML_FP16_TO_FP32(src[0]); + const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); + + dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + } + } + } else { for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { const float cos_theta = cache[i0 + 0]; const float sin_theta = cache[i0 + 1]; @@ -9407,8 +9529,10 @@ static void ggml_compute_forward_rope_f16( dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } - } else { - for (int64_t i0 = 0; i0 < n_dims; i0 += 2) { + } + + if (is_vision) { + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { const int64_t ic = i0/2; const float cos_theta = cache[i0 + 0]; @@ -9418,19 +9542,19 @@ static void ggml_compute_forward_rope_f16( ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); const float x0 = GGML_FP16_TO_FP32(src[0]); - const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]); + const float x1 = GGML_FP16_TO_FP32(src[n_dims]); - dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); - dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); + dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta); + dst_data[n_dims] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta); } - } - - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); - ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + } else { + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { + const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); - dst_data[0] = src[0]; - dst_data[1] = src[1]; + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } } } } @@ -10429,6 +10553,40 @@ static void ggml_compute_forward_pad( } } +// ggml_compute_forward_pad_reflect_1d + +static void ggml_compute_forward_pad_reflect_1d( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + const int32_t * opts = (const int32_t *) dst->op_params; + const int p0 = opts[0]; + const int p1 = opts[1]; + + GGML_TENSOR_UNARY_OP_LOCALS + + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = 0; i2 < ne2; i2++) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + float * left = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + p0*nb0); + float * right = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + (ne0-p1-1)*nb0); + + ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01)); + + for (int i0 = 1; i0 <= p0; i0++) { left[-i0] = left[i0]; } + for (int i0 = 1; i0 <= p1; i0++) { right[i0] = right[-i0]; } + } + } + } +} // ggml_compute_forward_arange @@ -12304,6 +12462,9 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm return; } + // extra_buffer op? + if (ggml_cpu_extra_compute_forward(params, tensor)) return; + switch (tensor->op) { case GGML_OP_DUP: { @@ -12525,6 +12686,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_pad(params, tensor); } break; + case GGML_OP_PAD_REFLECT_1D: + { + ggml_compute_forward_pad_reflect_1d(params, tensor); + } break; case GGML_OP_ARANGE: { ggml_compute_forward_arange(params, tensor); @@ -12867,6 +13032,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { } break; case GGML_OP_UPSCALE: case GGML_OP_PAD: + case GGML_OP_PAD_REFLECT_1D: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_ARGSORT: @@ -12956,7 +13122,7 @@ static thread_ret_t ggml_graph_compute_secondary_thread(void* data); #include "windows.h" // TODO: support > 64 CPUs -bool ggml_thread_apply_affinity(bool * mask) { +static bool ggml_thread_apply_affinity(bool * mask) { HANDLE h = GetCurrentThread(); uint64_t bitmask = 0ULL; @@ -13246,140 +13412,142 @@ struct ggml_cplan ggml_graph_plan( size_t cur = 0; - switch (node->op) { - case GGML_OP_CPY: - case GGML_OP_DUP: - { - if (ggml_is_quantized(node->type) || - // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32 - (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) || - (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) { + if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) { + + switch (node->op) { + case GGML_OP_CPY: + case GGML_OP_DUP: + { + if (ggml_is_quantized(node->type) || + // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32 + (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) || + (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16)) { + cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; + } + } break; + case GGML_OP_ADD: + case GGML_OP_ADD1: + { + if (ggml_is_quantized(node->src[0]->type)) { + cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; + } + } break; + case GGML_OP_ACC: + { + if (ggml_is_quantized(node->src[0]->type)) { + cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; + } + } break; + case GGML_OP_COUNT_EQUAL: + { + cur = ggml_type_size(node->type)*n_tasks; + } break; + case GGML_OP_MUL_MAT: + { + const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type; + + if (node->src[1]->type != vec_dot_type) { + cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1])); + } + } break; + case GGML_OP_MUL_MAT_ID: + { + cur = 0; + const struct ggml_tensor * src0 = node->src[0]; + const struct ggml_tensor * src1 = node->src[1]; + const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; + if (src1->type != vec_dot_type) { + cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)); + } + const int n_as = src0->ne[2]; + cur += GGML_PAD(cur, sizeof(int64_t)); // align + cur += n_as * sizeof(int64_t); // matrix_row_counts + cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows + } break; + case GGML_OP_OUT_PROD: + { + if (ggml_is_quantized(node->src[0]->type)) { + cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; + } + } break; + case GGML_OP_SOFT_MAX: + case GGML_OP_ROPE: + { cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; - } - } break; - case GGML_OP_ADD: - case GGML_OP_ADD1: - { - if (ggml_is_quantized(node->src[0]->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; - } - } break; - case GGML_OP_ACC: - { - if (ggml_is_quantized(node->src[0]->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks; - } - } break; - case GGML_OP_COUNT_EQUAL: - { - cur = ggml_type_size(node->type)*n_tasks; - } break; - case GGML_OP_MUL_MAT: - { - const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type; + } break; + case GGML_OP_CONV_TRANSPOSE_1D: + { + GGML_ASSERT(node->src[0]->ne[3] == 1); + GGML_ASSERT(node->src[1]->ne[2] == 1); + GGML_ASSERT(node->src[1]->ne[3] == 1); + + const int64_t ne00 = node->src[0]->ne[0]; // K + const int64_t ne01 = node->src[0]->ne[1]; // Cout + const int64_t ne02 = node->src[0]->ne[2]; // Cin + const int64_t ne10 = node->src[1]->ne[0]; // L + const int64_t ne11 = node->src[1]->ne[1]; // Cin + + if ((node->src[0]->type == GGML_TYPE_F16 || + node->src[0]->type == GGML_TYPE_BF16) && + node->src[1]->type == GGML_TYPE_F32) { + cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; + cur += sizeof(ggml_fp16_t)*ne10*ne11; + } else if (node->src[0]->type == GGML_TYPE_F32 && + node->src[1]->type == GGML_TYPE_F32) { + cur += sizeof(float)*ne00*ne01*ne02; + cur += sizeof(float)*ne10*ne11; + } else { + GGML_ABORT("fatal error"); + } + } break; + case GGML_OP_CONV_TRANSPOSE_2D: + { + const int64_t ne00 = node->src[0]->ne[0]; // W + const int64_t ne01 = node->src[0]->ne[1]; // H + const int64_t ne02 = node->src[0]->ne[2]; // Channels Out + const int64_t ne03 = node->src[0]->ne[3]; // Channels In - if (node->src[1]->type != vec_dot_type) { - cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1])); - } - } break; - case GGML_OP_MUL_MAT_ID: - { - cur = 0; - const struct ggml_tensor * src0 = node->src[0]; - const struct ggml_tensor * src1 = node->src[1]; - const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type; - if (src1->type != vec_dot_type) { - cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)); - } - const int n_as = src0->ne[2]; - cur += GGML_PAD(cur, sizeof(int64_t)); // align - cur += n_as * sizeof(int64_t); // matrix_row_counts - cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows - } break; - case GGML_OP_OUT_PROD: - { - if (ggml_is_quantized(node->src[0]->type)) { - cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks; - } - } break; - case GGML_OP_SOFT_MAX: - case GGML_OP_ROPE: - { - cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks; - } break; - case GGML_OP_CONV_TRANSPOSE_1D: - { - GGML_ASSERT(node->src[0]->ne[3] == 1); - GGML_ASSERT(node->src[1]->ne[2] == 1); - GGML_ASSERT(node->src[1]->ne[3] == 1); - - const int64_t ne00 = node->src[0]->ne[0]; // K - const int64_t ne01 = node->src[0]->ne[1]; // Cout - const int64_t ne02 = node->src[0]->ne[2]; // Cin - - const int64_t ne10 = node->src[1]->ne[0]; // L - const int64_t ne11 = node->src[1]->ne[1]; // Cin - - if ((node->src[0]->type == GGML_TYPE_F16 || - node->src[0]->type == GGML_TYPE_BF16) && - node->src[1]->type == GGML_TYPE_F32) { - cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02; - cur += sizeof(ggml_fp16_t)*ne10*ne11; - } else if (node->src[0]->type == GGML_TYPE_F32 && - node->src[1]->type == GGML_TYPE_F32) { - cur += sizeof(float)*ne00*ne01*ne02; - cur += sizeof(float)*ne10*ne11; - } else { - GGML_ABORT("fatal error"); - } - } break; - case GGML_OP_CONV_TRANSPOSE_2D: - { - const int64_t ne00 = node->src[0]->ne[0]; // W - const int64_t ne01 = node->src[0]->ne[1]; // H - const int64_t ne02 = node->src[0]->ne[2]; // Channels Out - const int64_t ne03 = node->src[0]->ne[3]; // Channels In - - const int64_t ne10 = node->src[1]->ne[0]; // W - const int64_t ne11 = node->src[1]->ne[1]; // H - const int64_t ne12 = node->src[1]->ne[2]; // Channels In - - cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; - cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; - } break; - case GGML_OP_FLASH_ATTN_EXT: - { - const int64_t ne00 = node->src[0]->ne[0]; // D + const int64_t ne10 = node->src[1]->ne[0]; // W + const int64_t ne11 = node->src[1]->ne[1]; // H + const int64_t ne12 = node->src[1]->ne[2]; // Channels In - cur = 3*sizeof(float)*ne00*n_tasks; // 3x head size/thread - } break; - case GGML_OP_FLASH_ATTN_BACK: - { - const int64_t D = node->src[0]->ne[0]; - const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); - const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back - if (node->src[1]->type == GGML_TYPE_F32) { - cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 - } else if (node->src[1]->type == GGML_TYPE_F16) { - cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 - } else if (node->src[1]->type == GGML_TYPE_BF16) { - cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) - cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 - } - } break; + cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03; + cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12; + } break; + case GGML_OP_FLASH_ATTN_EXT: + { + const int64_t ne00 = node->src[0]->ne[0]; // D - case GGML_OP_CROSS_ENTROPY_LOSS: - { - cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); - } break; - case GGML_OP_COUNT: - { - GGML_ABORT("fatal error"); - } - default: - break; + cur = 3*sizeof(float)*ne00*n_tasks; // 3x head size/thread + } break; + case GGML_OP_FLASH_ATTN_BACK: + { + const int64_t D = node->src[0]->ne[0]; + const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL); + const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back + if (node->src[1]->type == GGML_TYPE_F32) { + cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) + cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 + } else if (node->src[1]->type == GGML_TYPE_F16) { + cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) + cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 + } else if (node->src[1]->type == GGML_TYPE_BF16) { + cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1) + cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2 + } + } break; + + case GGML_OP_CROSS_ENTROPY_LOSS: + { + cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks); + } break; + case GGML_OP_COUNT: + { + GGML_ABORT("fatal error"); + } + default: + break; + } } work_size = MAX(work_size, cur); @@ -13578,29 +13746,6 @@ static void ggml_graph_compute_kickoff(struct ggml_threadpool * threadpool, int #endif // GGML_USE_OPENMP -void ggml_threadpool_params_init(struct ggml_threadpool_params * p, int n_threads) { - p->n_threads = n_threads; - p->prio = 0; // default priority (usually means normal or inherited) - p->poll = 50; // hybrid-polling enabled - p->strict_cpu = false; // no strict placement (all threads share same cpumask) - p->paused = false; // threads are ready to go - memset(p->cpumask, 0, GGML_MAX_N_THREADS); // all-zero means use the default affinity (usually inherited) -} - -struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads) { - struct ggml_threadpool_params p; - ggml_threadpool_params_init(&p, n_threads); - return p; -} - -bool ggml_threadpool_params_match(const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1) { - if (p0->n_threads != p1->n_threads ) return false; - if (p0->prio != p1->prio ) return false; - if (p0->poll != p1->poll ) return false; - if (p0->strict_cpu != p1->strict_cpu ) return false; - return memcmp(p0->cpumask, p1->cpumask, GGML_MAX_N_THREADS) == 0; -} - static struct ggml_threadpool * ggml_threadpool_new_impl( struct ggml_threadpool_params * tpp, struct ggml_cgraph * cgraph, @@ -13896,15 +14041,23 @@ int ggml_cpu_has_vsx(void) { } int ggml_cpu_has_neon(void) { -#if defined(__ARM_ARCH) +#if defined(__ARM_ARCH) && defined(__ARM_NEON) return ggml_arm_arch_features.has_neon; #else return 0; #endif } +int ggml_cpu_has_dotprod(void) { +#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD) + return ggml_arm_arch_features.has_dotprod; +#else + return 0; +#endif +} + int ggml_cpu_has_sve(void) { -#if defined(__ARM_ARCH) +#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) return ggml_arm_arch_features.has_sve; #else return 0; @@ -13912,7 +14065,7 @@ int ggml_cpu_has_sve(void) { } int ggml_cpu_has_matmul_int8(void) { -#if defined(__ARM_ARCH) +#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8) return ggml_arm_arch_features.has_i8mm; #else return 0; @@ -13920,7 +14073,7 @@ int ggml_cpu_has_matmul_int8(void) { } int ggml_cpu_get_sve_cnt(void) { -#if defined(__ARM_ARCH) +#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE) return ggml_arm_arch_features.sve_cnt; #else return 0; diff --git a/ggml/src/ggml-cpu/ggml-cpu.cpp b/ggml/src/ggml-cpu/ggml-cpu.cpp index 573b7c5b9b375..f11399cc628ca 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -2,11 +2,18 @@ #include "ggml-backend-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-aarch64.h" +#include "ggml-cpu-traits.h" #include "ggml-impl.h" +#include "amx/amx.h" + #include #include #include +#ifdef GGML_USE_CPU_HBM +#include "ggml-cpu-hbm.h" +#endif + #if defined(__APPLE__) #include #include @@ -22,124 +29,20 @@ // ggml-backend interface -#ifdef GGML_USE_CPU_HBM - -// buffer type HBM - -#include - -static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_HBM"; - - GGML_UNUSED(buft); -} - -static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) { - hbw_free(buffer->context); -} - -static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - void * ptr; - int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size); - if (result != 0) { - GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size); - return NULL; - } - - ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); - buffer->buft = buft; - buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer; - - return buffer; -} - -ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, - /* .get_max_size = */ NULL, // defaults to SIZE_MAX - /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes - /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, - }, - /* .context = */ NULL, - }; - - return &ggml_backend_cpu_buffer_type_hbm; -} -#endif - -// buffer type AARCH64 - -static void ggml_backend_cpu_aarch64_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - tensor->extra = (void *)ggml_aarch64_get_optimal_repack_type(tensor); // NOLINT - - GGML_UNUSED(buffer); -} - -static void ggml_backend_cpu_aarch64_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - GGML_ASSERT(offset == 0); - GGML_ASSERT(size == ggml_nbytes(tensor)); - - enum ggml_type repack_type = (enum ggml_type)(intptr_t)tensor->extra; - - ggml_aarch64_repack_tensor(tensor, repack_type, data, size); - - GGML_UNUSED(buffer); -} - -static const char * ggml_backend_cpu_aarch64_buffer_type_get_name(ggml_backend_buffer_type_t buft) { - return "CPU_AARCH64"; - - GGML_UNUSED(buft); -} - -static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - auto * buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); - - if (buffer == NULL) { - return NULL; - } - - buffer->buft = buft; - buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor; - buffer->iface.set_tensor = ggml_backend_cpu_aarch64_buffer_set_tensor; - - return buffer; -} - -ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) { - static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_aarch64 = { - /* .iface = */ { - /* .get_name = */ ggml_backend_cpu_aarch64_buffer_type_get_name, - /* .alloc_buffer = */ ggml_backend_cpu_aarch64_buffer_type_alloc_buffer, - /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, - /* .get_max_size = */ NULL, // defaults to SIZE_MAX - /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes - /* .is_host = */ NULL, - }, - /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0), - /* .context = */ NULL, - }; - - return &ggml_backend_cpu_buffer_type_aarch64; -} - -bool ggml_backend_cpu_buft_is_aarch64(ggml_backend_buffer_type_t buft) { - return buft == ggml_backend_cpu_aarch64_buffer_type(); -} - -static ggml_backend_buffer_type_t * ggml_backend_cpu_get_extra_bufts(ggml_backend_dev_t device) { +std::vector& ggml_backend_cpu_get_extra_buffers_type() { static std::vector bufts = []() { std::vector bufts; -#ifdef GGML_USE_CPU_HBM - bufts.push_back(ggml_backend_cpu_hbm_buffer_type()); +#if defined(__AMX_INT8__) && defined(__AVX512VNNI__) + if (ggml_backend_amx_buffer_type()) { + bufts.push_back(ggml_backend_amx_buffer_type()); + } #endif #ifdef GGML_USE_CPU_AARCH64 - bufts.push_back(ggml_backend_cpu_aarch64_buffer_type()); + if (ggml_backend_cpu_aarch64_buffer_type()) { + bufts.push_back(ggml_backend_cpu_aarch64_buffer_type()); + } #endif bufts.push_back(NULL); @@ -147,11 +50,22 @@ static ggml_backend_buffer_type_t * ggml_backend_cpu_get_extra_bufts(ggml_backen return bufts; }(); - return bufts.data(); + return bufts; +} + +static ggml_backend_buffer_type_t * ggml_backend_cpu_device_get_extra_buffers_type(ggml_backend_dev_t device) { + return ggml_backend_cpu_get_extra_buffers_type().data(); GGML_UNUSED(device); } +static bool ggml_backend_cpu_is_extra_buffer_type(ggml_backend_buffer_type_t buft) { + for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { + if (extra && extra == buft) return true; + } + return false; +} + // CPU backend - backend (stream) struct ggml_backend_cpu_context { @@ -456,14 +370,23 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st const struct ggml_tensor * src0 = op->src[0]; const struct ggml_tensor * src1 = op->src[1]; - if (src0 && src0->buffer && ggml_backend_cpu_buft_is_aarch64(src0->buffer->buft)) { - if (op->op != GGML_OP_MUL_MAT || src0->type != GGML_TYPE_Q4_0 || ggml_aarch64_get_optimal_repack_type(src0) == GGML_TYPE_Q4_0) { - return false; + if (op->op == GGML_OP_NONE || op->op == GGML_OP_RESHAPE || op->op == GGML_OP_VIEW || op->op == GGML_OP_PERMUTE || op->op == GGML_OP_TRANSPOSE) { + return true; + } + + // extra_buffer_op? + for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) { + if (extra) { + auto buf_extra = (ggml::cpu::extra_buffer_type*) extra->context; + if (buf_extra && buf_extra->supports_op(dev, op)) { + return true; + } } } - for (int i = 1; i < GGML_MAX_SRC; i++) { - if (op->src[i] && op->src[i]->buffer && ggml_backend_cpu_buft_is_aarch64(op->src[i]->buffer->buft)) { + // the other case need host buffer. + for (int i = 0; i < GGML_MAX_SRC; i++) { + if (op->src[i] && op->src[i]->buffer && !ggml_backend_buft_is_host(op->src[i]->buffer->buft)) { return false; } } @@ -471,8 +394,11 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st switch (op->op) { case GGML_OP_CPY: return + op->type != GGML_TYPE_IQ3_XXS && + op->type != GGML_TYPE_IQ3_S && op->type != GGML_TYPE_IQ2_XXS && op->type != GGML_TYPE_IQ2_XS && + op->type != GGML_TYPE_IQ2_S && op->type != GGML_TYPE_IQ1_S && op->type != GGML_TYPE_IQ1_M; // missing type_traits.from_float case GGML_OP_MUL_MAT: @@ -486,13 +412,10 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st default: return true; } - - GGML_UNUSED(dev); } static bool ggml_backend_cpu_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { - return ggml_backend_buft_is_host(buft) || ggml_backend_cpu_buft_is_aarch64(buft); - + return ggml_backend_buft_is_host(buft) || ggml_backend_cpu_is_extra_buffer_type(buft); GGML_UNUSED(dev); } @@ -541,16 +464,12 @@ static ggml_backend_dev_t ggml_backend_cpu_reg_get_device(ggml_backend_reg_t reg return &ggml_backend_cpu_device; } -struct ggml_backend_feature { - const char * name; - const char * value; -}; - -// Not used yet // This is intended to replace the the ggml_cpu_has_* functions when loading the CPU backend dynamically, -// and additionally to allow other backends to expose their own list of features that applications can query using the same API. +// and additionally to allow other backends to expose their own list of features that applications can query using the same API static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t reg) { static std::vector features = []() { + ggml_cpu_init(); + std::vector features; if (ggml_cpu_has_sse3()) { features.push_back({ "SSE3", "1" }); @@ -561,6 +480,9 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r if (ggml_cpu_has_avx()) { features.push_back({ "AVX", "1" }); } + if (ggml_cpu_has_avx_vnni()) { + features.push_back({ "AVX_VNNI", "1" }); + } if (ggml_cpu_has_avx2()) { features.push_back({ "AVX2", "1" }); } @@ -570,9 +492,6 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r if (ggml_cpu_has_fma()) { features.push_back({ "FMA", "1" }); } - if (ggml_cpu_has_avx_vnni()) { - features.push_back({ "AVX_VNNI", "1" }); - } if (ggml_cpu_has_avx512()) { features.push_back({ "AVX512", "1" }); } @@ -603,6 +522,12 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r if (ggml_cpu_has_sve()) { features.push_back({ "SVE", "1" }); } + if (ggml_cpu_has_dotprod()) { + features.push_back({ "DOTPROD", "1" }); + } + if (ggml_cpu_has_matmul_int8()) { + features.push_back({ "MATMUL_INT8", "1" }); + } if (ggml_cpu_get_sve_cnt() > 0) { static std::string sve_cnt = std::to_string(ggml_cpu_get_sve_cnt()); features.push_back({ "SVE_CNT", sve_cnt.c_str() }); @@ -619,6 +544,18 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r if (ggml_cpu_has_llamafile()) { features.push_back({ "LLAMAFILE", "1" }); } + #ifdef GGML_USE_ACCELERATE + features.push_back({ "ACCELERATE", "1" }); + #endif + #ifdef GGML_USE_CPU_HBM + features.push_back({ "CPU_HBM", "1" }); + #endif + #ifdef GGML_USE_OPENMP + features.push_back({ "OPENMP", "1" }); + #endif + #ifdef GGML_USE_CPU_AARCH64 + features.push_back({ "AARCH64_REPACK", "1" }); + #endif features.push_back({ nullptr, nullptr }); @@ -632,10 +569,35 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r static void * ggml_backend_cpu_get_proc_address(ggml_backend_reg_t reg, const char * name) { if (strcmp(name, "ggml_backend_set_n_threads") == 0) { - return (void *)ggml_backend_cpu_set_n_threads; + ggml_backend_set_n_threads_t fct = ggml_backend_cpu_set_n_threads; + return (void *)fct; } if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) { - return (void *)ggml_backend_cpu_get_extra_bufts; + ggml_backend_dev_get_extra_bufts_t fct = ggml_backend_cpu_device_get_extra_buffers_type; + return (void *)fct; + } + if (strcmp(name, "ggml_backend_get_features") == 0) { + return (void *)ggml_backend_cpu_get_features; + } + if (strcmp(name, "ggml_backend_set_abort_callback") == 0) { + return (void *)ggml_backend_cpu_set_abort_callback; + } + if (strcmp(name, "ggml_backend_cpu_numa_init") == 0) { + return (void *)ggml_numa_init; + } + if (strcmp(name, "ggml_backend_cpu_is_numa") == 0) { + return (void *)ggml_is_numa; + } + + // threadpool - TODO: move to ggml-base + if (strcmp(name, "ggml_threadpool_new") == 0) { + return (void *)ggml_threadpool_new; + } + if (strcmp(name, "ggml_threadpool_free") == 0) { + return (void *)ggml_threadpool_free; + } + if (strcmp(name, "ggml_backend_cpu_set_threadpool") == 0) { + return (void *)ggml_backend_cpu_set_threadpool; } return NULL; @@ -655,9 +617,12 @@ ggml_backend_reg_t ggml_backend_cpu_reg(void) { ggml_cpu_init(); static struct ggml_backend_reg ggml_backend_cpu_reg = { - /* .iface = */ ggml_backend_cpu_reg_i, - /* .context = */ NULL, + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_cpu_reg_i, + /* .context = */ NULL, }; return &ggml_backend_cpu_reg; } + +GGML_BACKEND_DL_IMPL(ggml_backend_cpu_reg) diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index b2ce2e6649479..f80a727819283 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -50,8 +50,7 @@ #include "sgemm.h" #include "ggml-impl.h" -// hack until moved into the CPU backend -#include "../ggml-cpu-impl.h" +#include "ggml-cpu-impl.h" #include "ggml-quants.h" #ifdef _MSC_VER @@ -205,6 +204,7 @@ template <> inline float32x4_t load(const float *p) { return vld1q_f32(p); } #if !defined(_MSC_VER) +// FIXME: this should check for __ARM_FEATURE_FP16_VECTOR_ARITHMETIC template <> inline float16x8_t load(const ggml_fp16_t *p) { return vld1q_f16((const float16_t *)p); } diff --git a/ggml/src/ggml-cuda/CMakeLists.txt b/ggml/src/ggml-cuda/CMakeLists.txt index e592f79896db2..14761650f8a8e 100644 --- a/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ggml/src/ggml-cuda/CMakeLists.txt @@ -12,7 +12,7 @@ if (CUDAToolkit_FOUND) # 61 == Pascal, __dp4a instruction (per-byte integer dot product) # 70 == V100, FP16 tensor cores # 75 == Turing, int8 tensor cores - if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6") + if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.24") set(CMAKE_CUDA_ARCHITECTURES "native") elseif(GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16) set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75") @@ -46,13 +46,10 @@ if (CUDAToolkit_FOUND) list(APPEND GGML_SOURCES_CUDA ${SRCS}) endif() - add_library(ggml-cuda - ${GGML_HEADERS_CUDA} - ${GGML_SOURCES_CUDA} - ) - - target_link_libraries(ggml-cuda PRIVATE ggml-base) - target_include_directories(ggml-cuda PRIVATE . ..) + ggml_add_backend_library(ggml-cuda + ${GGML_HEADERS_CUDA} + ${GGML_SOURCES_CUDA} + ) add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE}) @@ -135,7 +132,7 @@ if (CUDAToolkit_FOUND) message("-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}") - get_flags(${CUDA_CCID} ${CUDA_CCVER}) + ggml_get_flags(${CUDA_CCID} ${CUDA_CCVER}) list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later endif() @@ -149,7 +146,7 @@ if (CUDAToolkit_FOUND) list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED}) endif() - add_compile_options("$<$:${CUDA_FLAGS}>") + target_compile_options(ggml-cuda PRIVATE "$<$:${CUDA_FLAGS}>") else() message(FATAL_ERROR "CUDA Toolkit not found") endif() diff --git a/ggml/src/ggml-cuda/argmax.cu b/ggml/src/ggml-cuda/argmax.cu index aab04eca7a385..5340eedc08916 100644 --- a/ggml/src/ggml-cuda/argmax.cu +++ b/ggml/src/ggml-cuda/argmax.cu @@ -1,57 +1,69 @@ -#include "common.cuh" +#include +#include + #include "argmax.cuh" +#include "common.cuh" #include "sum.cuh" -#include +static __global__ void argmax_f32(const float * __restrict__ x, int32_t * __restrict__ dst, const int64_t ncols) { + const int64_t row = blockIdx.x; -static __global__ void argmax_f32( - const float * x, int32_t * dst, const int64_t ncols, const int64_t nrows) { + float maxval = -FLT_MAX; + int argmax = -1; + const float * rowx = x + row * ncols; - int argmax_thread = 0; - const int64_t row0 = (int64_t)blockIdx.x*WARP_SIZE; + for (int32_t col = threadIdx.x; col < ncols; col += blockDim.x) { + const float val = rowx[col]; + if (val > maxval) { + maxval = val; + argmax = col; + } + } #pragma unroll - for (int64_t row1 = 0; row1 < WARP_SIZE; ++row1) { - const int64_t row = row0 + row1; - - if (row >= nrows) { - break; + for (int offset = 16; offset > 0; offset >>= 1) { + const float val = __shfl_xor_sync(0xFFFFFFFF, maxval, offset, WARP_SIZE); + const int col = __shfl_xor_sync(0xFFFFFFFF, argmax, offset, WARP_SIZE); + if (val > maxval) { + maxval = val; + argmax = col; } + } - float maxval = -FLT_MAX; - int argmax = -1; - - for (int32_t col = threadIdx.x; col < ncols; col += WARP_SIZE) { - const float val = x[row*ncols + col]; - const int bigger = val > maxval; - const int not_bigger = bigger ^ 0x00000001; - - maxval = maxval*not_bigger + val*bigger; - argmax = argmax*not_bigger + col*bigger; + const int n_warps = blockDim.x / WARP_SIZE; + const int lane_id = threadIdx.x % WARP_SIZE; + const int warp_id = threadIdx.x / WARP_SIZE; + if (n_warps > 1) { + constexpr int max_warps = 1024 / WARP_SIZE; + __shared__ float shared_maxval[max_warps]; + __shared__ int shared_argmax[max_warps]; + if (lane_id == 0) { + shared_maxval[warp_id] = maxval; + shared_argmax[warp_id] = argmax; } + __syncthreads(); + + if (warp_id == 0) { + if (lane_id < n_warps) { + maxval = shared_maxval[lane_id]; + argmax = shared_argmax[lane_id]; + } #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - const float val = __shfl_xor_sync(0xFFFFFFFF, maxval, mask, WARP_SIZE); - const int col = __shfl_xor_sync(0xFFFFFFFF, argmax, mask, WARP_SIZE); - const int bigger = val > maxval; - const int not_bigger = bigger ^ 0x00000001; - - maxval = maxval*not_bigger + val*bigger; - argmax = argmax*not_bigger + col*bigger; + for (int offset = 16; offset > 0; offset >>= 1) { + const float val = __shfl_xor_sync(0xFFFFFFFF, maxval, offset, WARP_SIZE); + const int col = __shfl_xor_sync(0xFFFFFFFF, argmax, offset, WARP_SIZE); + if (val > maxval) { + maxval = val; + argmax = col; + } + } } - - const int store = row1 == threadIdx.x; - argmax_thread += store*argmax; } - const int row = row0 + threadIdx.x; - - if (row >= nrows) { - return; + if (warp_id == 0 && lane_id == 0) { + dst[row] = argmax; } - - dst[row] = argmax_thread; } void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -70,10 +82,10 @@ void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { cudaStream_t stream = ctx.stream(); - const int64_t num_blocks = (nrows + WARP_SIZE - 1) / WARP_SIZE; - - const dim3 blocks_dim(WARP_SIZE, 1, 1); + const int64_t num_blocks = nrows; + const int64_t num_threads = std::min(1024, (ne00 + WARP_SIZE - 1) / WARP_SIZE * WARP_SIZE); + const dim3 blocks_dim(num_threads, 1, 1); const dim3 blocks_num(num_blocks, 1, 1); - argmax_f32<<>>(src0_d, dst_d, ne00, nrows); + argmax_f32<<>>(src0_d, dst_d, ne00); } diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index e146c691c6f87..2c0a562267281 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -41,17 +41,28 @@ #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) #define CUDART_HMASK 12000 // CUDA 12.0, min. ver. for half2 -> uint mask comparisons -#define CC_PASCAL 600 -#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products -#define CC_VOLTA 700 -#define CC_TURING 750 -#define CC_AMPERE 800 -#define CC_OFFSET_AMD 1000000 -#define CC_RDNA1 (CC_OFFSET_AMD + 1010) -#define CC_RDNA2 (CC_OFFSET_AMD + 1030) -#define CC_RDNA3 (CC_OFFSET_AMD + 1100) -#define CC_QY1 210 -#define CC_QY2 220 +#define GGML_CUDA_CC_PASCAL 600 +#define GGML_CUDA_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products +#define GGML_CUDA_CC_VOLTA 700 +#define GGML_CUDA_CC_TURING 750 +#define GGML_CUDA_CC_AMPERE 800 +#define GGML_CUDA_CC_OFFSET_AMD 1000000 + +// GCN/CNDA, wave size is 64 +#define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 803) // Tonga, Fiji, Polaris, minimum for fast fp16 +#define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 900) // Vega56/64, minimum for fp16 dual issue +#define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 906) // MI50/Radeon VII, minimum for dp4a +#define GGML_CUDA_CC_CDNA (GGML_CUDA_CC_OFFSET_AMD + 908) // MI100, minimum for MFMA, acc registers +#define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 910) // MI210, minimum acc register renameing +#define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 942) // MI300 + +// RNDA removes MFMA, dp4a, xnack, acc registers, wave size is 32 +#define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 1010) // RX 5000 +#define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 1030) // RX 6000, minimum for dp4a +#define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 1100) // RX 7000, minimum for WMMA + +#define GGML_CUDA_CC_QY1 210 +#define GGML_CUDA_CC_QY2 220 #define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses @@ -120,36 +131,36 @@ typedef float dfloat; // dequantize float typedef float2 dfloat2; #endif // GGML_CUDA_F16 -#if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= CC_PASCAL +#if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #define FP16_AVAILABLE -#endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= CC_PASCAL +#endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #if defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 #define FAST_FP16_AVAILABLE #endif // defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610 -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #define FP16_MMA_AVAILABLE -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_TURING +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #define INT8_MMA_AVAILABLE -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_TURING +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING -#if !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= CC_QY1) +#if !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= GGML_CUDA_CC_QY1) #define FLASH_ATTN_AVAILABLE -#endif // !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= CC_QY1) +#endif // !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= GGML_CUDA_CC_QY1) static constexpr bool fast_fp16_available(const int cc) { - return cc >= CC_PASCAL && cc != 610; + return cc >= GGML_CUDA_CC_PASCAL && cc != 610; } static constexpr bool fp16_mma_available(const int cc) { - return cc < CC_OFFSET_AMD && cc >= CC_VOLTA; + return cc < GGML_CUDA_CC_OFFSET_AMD && cc >= GGML_CUDA_CC_VOLTA; } static constexpr bool int8_mma_available(const int cc) { - return cc < CC_OFFSET_AMD && cc >= CC_TURING; + return cc < GGML_CUDA_CC_OFFSET_AMD && cc >= GGML_CUDA_CC_TURING; } [[noreturn]] @@ -176,30 +187,30 @@ static __device__ void no_device_code( #endif // __CUDA_ARCH__ static __device__ __forceinline__ int warp_reduce_sum(int x) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_AMPERE +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE return __reduce_add_sync(0xffffffff, x); #else #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - x += __shfl_xor_sync(0xffffffff, x, mask, 32); + for (int offset = 16; offset > 0; offset >>= 1) { + x += __shfl_xor_sync(0xffffffff, x, offset, 32); } return x; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_AMPERE +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE } static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - x += __shfl_xor_sync(0xffffffff, x, mask, 32); + for (int offset = 16; offset > 0; offset >>= 1) { + x += __shfl_xor_sync(0xffffffff, x, offset, 32); } return x; } static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - a.x += __shfl_xor_sync(0xffffffff, a.x, mask, 32); - a.y += __shfl_xor_sync(0xffffffff, a.y, mask, 32); + for (int offset = 16; offset > 0; offset >>= 1) { + a.x += __shfl_xor_sync(0xffffffff, a.x, offset, 32); + a.y += __shfl_xor_sync(0xffffffff, a.y, offset, 32); } return a; } @@ -209,16 +220,16 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - const half2 a_other = __shfl_xor_sync(0xffffffff, a, mask, 32); + for (int offset = 16; offset > 0; offset >>= 1) { + const half2 a_other = __shfl_xor_sync(0xffffffff, a, offset, 32); reinterpret_cast(a.x) += __low2half(a_other); reinterpret_cast(a.y) += __high2half(a_other); } return a; #else #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, mask, 32)); + for (int offset = 16; offset > 0; offset >>= 1) { + a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, offset, 32)); } return a; #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) @@ -231,8 +242,8 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); + for (int offset = 16; offset > 0; offset >>= 1) { + x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, offset, 32)); } return x; } @@ -273,16 +284,16 @@ static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const hal } static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #pragma unroll - for (int mask = 16; mask > 0; mask >>= 1) { - x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, mask, 32)); + for (int offset = 16; offset > 0; offset >>= 1) { + x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, offset, 32)); } return x; #else GGML_UNUSED(x); NO_DEVICE_CODE; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_PASCAL +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL } #if CUDART_VERSION < CUDART_HMASK @@ -322,13 +333,13 @@ static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, i #else // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#if __CUDA_ARCH__ >= MIN_CC_DP4A +#if __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A return __dp4a(a, b, c); -#else // __CUDA_ARCH__ >= MIN_CC_DP4A +#else // __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A const int8_t * a8 = (const int8_t *) &a; const int8_t * b8 = (const int8_t *) &b; return c + a8[0]*b8[0] + a8[1]*b8[1] + a8[2]*b8[2] + a8[3]*b8[3]; -#endif // __CUDA_ARCH__ >= MIN_CC_DP4A +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_DP4A #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) } diff --git a/ggml/src/ggml-cuda/concat.cu b/ggml/src/ggml-cuda/concat.cu index dac10ec36b0bd..2f42b8a9538e2 100644 --- a/ggml/src/ggml-cuda/concat.cu +++ b/ggml/src/ggml-cuda/concat.cu @@ -94,7 +94,9 @@ static void concat_f32_cuda(const float * x, const float * y, float * dst, int n } // non-contiguous kernel (slow) -static __global__ void concat_f32_non_cont( +template +static __global__ void __launch_bounds__(CUDA_CONCAT_BLOCK_SIZE) + concat_f32_non_cont( const char * src0, const char * src1, char * dst, @@ -121,22 +123,28 @@ static __global__ void concat_f32_non_cont( uint64_t nb0, uint64_t nb1, uint64_t nb2, - uint64_t nb3, - int32_t dim) { + uint64_t nb3){ + static_assert(dim >= 0 && dim <= 3); + const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; const int64_t i1 = blockIdx.x; - int64_t o[4] = {0, 0, 0, 0}; - o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03)); - const float * x; - for (int i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) { + for (int64_t i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) { if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { x = (const float *)(src0 + (i3 )*nb03 + (i2 )*nb02 + (i1 )*nb01 + (i0 )*nb00); } else { - x = (const float *)(src1 + (i3 - o[3])*nb13 + (i2 - o[2])*nb12 + (i1 - o[1])*nb11 + (i0 - o[0])*nb10); + if constexpr (dim == 0) { + x = (const float *) (src1 + i3 * nb13 + i2 * nb12 + i1 * nb11 + (i0 - ne00) * nb10); + } else if constexpr (dim == 1) { + x = (const float *) (src1 + i3 * nb13 + i2 * nb12 + (i1 - ne01) * nb11 + i0 * nb10); + } else if constexpr (dim == 2) { + x = (const float *) (src1 + i3 * nb13 + (i2 - ne02) * nb12 + i1 * nb11 + i0 * nb10); + } else if constexpr (dim == 3) { + x = (const float *) (src1 + (i3 - ne03) * nb13 + i2 * nb12 + i1 * nb11 + i0 * nb10); + } } float * y = (float *)(dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); @@ -182,15 +190,32 @@ void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { } } else { dim3 grid_dim(dst->ne[1], dst->ne[2], dst->ne[3]); - concat_f32_non_cont<<>>( - (const char *)src0->data, - (const char *)src1->data, - ( char *)dst->data, + auto launch_kernel = [&](auto dim) { + concat_f32_non_cont<<>>( + (const char *) src0->data, (const char *) src1->data, (char *) dst->data, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3], - dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], - dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], dim); + dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3]); + }; + switch (dim) { + case 0: + launch_kernel(std::integral_constant{}); + break; + case 1: + launch_kernel(std::integral_constant{}); + break; + case 2: + launch_kernel(std::integral_constant{}); + break; + case 3: + launch_kernel(std::integral_constant{}); + break; + default: + GGML_ABORT("Invalid dim: %d", dim); + break; + } } } diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index c0a4447075c6e..3896f956d7338 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -26,7 +26,7 @@ static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __ template static __global__ void dequantize_block_q8_0_f16(const void * __restrict__ vx, half * __restrict__ y, const int64_t k) { -#if __CUDA_ARCH__ >= CC_PASCAL +#if __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL constexpr int nint = CUDA_Q8_0_NE_ALIGN/sizeof(int) + WARP_SIZE; const int64_t i0 = CUDA_Q8_0_NE_ALIGN*blockIdx.x; @@ -64,7 +64,7 @@ static __global__ void dequantize_block_q8_0_f16(const void * __restrict__ vx, h GGML_UNUSED(y); GGML_UNUSED(k); NO_DEVICE_CODE; -#endif // __CUDA_ARCH__ >= CC_PASCAL +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL } template @@ -599,7 +599,7 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { case GGML_TYPE_Q5_1: return dequantize_block_cuda; case GGML_TYPE_Q8_0: - if (ggml_cuda_info().devices[ggml_cuda_get_device()].cc >= CC_PASCAL) { + if (ggml_cuda_info().devices[ggml_cuda_get_device()].cc >= GGML_CUDA_CC_PASCAL) { return dequantize_block_q8_0_f16_cuda; } return dequantize_block_cuda; diff --git a/ggml/src/ggml-cuda/fattn-vec-f16.cuh b/ggml/src/ggml-cuda/fattn-vec-f16.cuh index 5ec3b91ae2b39..34a2992c769b9 100644 --- a/ggml/src/ggml-cuda/fattn-vec-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-vec-f16.cuh @@ -220,7 +220,6 @@ static __global__ void flash_attn_vec_ext_f16( for (int j = 0; j < ncols; ++j) { half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j]; - kqmax_new_j = warp_reduce_max(kqmax_new_j); if (threadIdx.x == 0) { kqmax_shared[j][threadIdx.y] = kqmax_new_j; } diff --git a/ggml/src/ggml-cuda/fattn-vec-f32.cuh b/ggml/src/ggml-cuda/fattn-vec-f32.cuh index 3d93f4a8acdf2..a28fc8b7fc893 100644 --- a/ggml/src/ggml-cuda/fattn-vec-f32.cuh +++ b/ggml/src/ggml-cuda/fattn-vec-f32.cuh @@ -206,7 +206,6 @@ static __global__ void flash_attn_vec_ext_f32( for (int j = 0; j < ncols; ++j) { float kqmax_new_j = kqmax_new_arr[j]; - kqmax_new_j = warp_reduce_max(kqmax_new_j); if (threadIdx.x == 0) { kqmax_shared[j][threadIdx.y] = kqmax_new_j; } diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index 0e7ebbc539352..0b26b0f8e0595 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -304,7 +304,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst const enum ggml_prec prec = ggml_flash_attn_ext_get_prec(KQV); // On AMD the tile kernels perform poorly, use the vec kernel instead: - if (cc >= CC_OFFSET_AMD) { + if (cc >= GGML_CUDA_CC_OFFSET_AMD) { if (prec == GGML_PREC_DEFAULT && fast_fp16_available(cc)) { ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); } else { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index dd94ab03d5b6c..c180adc84b861 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -177,7 +177,7 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].smpb = prop.sharedMemPerBlock; #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; - info.devices[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; + info.devices[id].cc = 100*prop.major + 10*prop.minor + GGML_CUDA_CC_OFFSET_AMD; #else info.devices[id].smpbo = prop.sharedMemPerBlockOptin; info.devices[id].cc = 100*prop.major + 10*prop.minor; @@ -1081,7 +1081,7 @@ static void ggml_cuda_op_mul_mat_cublas( const int compute_capability = ggml_cuda_info().devices[id].cc; - if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { + if (compute_capability >= GGML_CUDA_CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { @@ -1107,6 +1107,11 @@ static void ggml_cuda_op_mul_mat_cublas( const half alpha_f16 = 1.0f; const half beta_f16 = 0.0f; + cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; + if (ggml_cuda_info().devices[ctx.device].cc == GGML_CUDA_CC_CDNA) { + cu_compute_type = CUBLAS_COMPUTE_32F; + } + CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); CUBLAS_CHECK( cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, @@ -1114,7 +1119,7 @@ static void ggml_cuda_op_mul_mat_cublas( &alpha_f16, src0_ptr, CUDA_R_16F, ne00, src1_ptr, CUDA_R_16F, ne10, &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, - CUBLAS_COMPUTE_16F, + cu_compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); @@ -1607,6 +1612,10 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; cudaDataType_t cu_data_type = CUDA_R_16F; + if (ggml_cuda_info().devices[ctx.device].cc == GGML_CUDA_CC_CDNA) { + cu_compute_type = CUBLAS_COMPUTE_32F; + } + // dst strides size_t nbd2 = dst->nb[2]; size_t nbd3 = dst->nb[3]; @@ -2348,7 +2357,7 @@ static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, std::vector ggml_cuda_cpy_fn_ptrs; if (cuda_ctx->cuda_graph->graph == nullptr) { - if (ggml_cuda_info().devices[cuda_ctx->device].cc < CC_AMPERE) { + if (ggml_cuda_info().devices[cuda_ctx->device].cc < GGML_CUDA_CC_AMPERE) { cuda_ctx->cuda_graph->disable_due_to_gpu_arch = true; #ifndef NDEBUG GGML_LOG_DEBUG("%s: disabling CUDA graphs due to GPU architecture\n", __func__); @@ -3019,7 +3028,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g return true; } const int cc = ggml_cuda_info().devices[dev_ctx->device].cc; - return cc >= CC_VOLTA && cc < CC_OFFSET_AMD && op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16; + return cc >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD && op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16; } case GGML_OP_CROSS_ENTROPY_LOSS: case GGML_OP_CROSS_ENTROPY_LOSS_BACK: @@ -3126,6 +3135,61 @@ static ggml_backend_dev_t ggml_backend_cuda_reg_get_device(ggml_backend_reg_t re return ctx->devices[index]; } +static ggml_backend_feature * ggml_backend_cuda_get_features(ggml_backend_reg_t reg) { + static std::vector features = []() { + std::vector features; + #define _STRINGIFY(...) #__VA_ARGS__ + #define STRINGIFY(...) _STRINGIFY(__VA_ARGS__) + + #ifdef __CUDA_ARCH_LIST__ + features.push_back({ "ARCHS", STRINGIFY(__CUDA_ARCH_LIST__) }); + #endif + + #ifdef GGML_CUDA_FORCE_MMQ + features.push_back({ "FORCE_MMQ", "1" }); + #endif + + #ifdef GGML_CUDA_FORCE_CUBLAS + features.push_back({ "FORCE_CUBLAS", "1" }); + #endif + + #ifdef GGML_CUDA_NO_VMM + features.push_back({ "NO_VMM", "1" }); + #endif + + #ifdef GGML_CUDA_NO_PEER_COPY + features.push_back({ "NO_PEER_COPY", "1" }); + #endif + + #ifdef GGML_CUDA_F16 + features.push_back({ "F16", "1" }); + #endif + + #ifdef GGML_CUDA_USE_GRAPHS + features.push_back({ "USE_GRAPHS", "1" }); + #endif + + #ifdef GGML_CUDA_PEER_MAX_BATCH_SIZE + features.push_back({ "PEER_MAX_BATCH_SIZE", STRINGIFY(GGML_CUDA_PEER_MAX_BATCH_SIZE) }); + #endif + + #ifdef GGML_CUDA_FA_ALL_QUANTS + features.push_back({ "FA_ALL_QUANTS", "1" }); + #endif + + #undef _STRINGIFY + #undef STRINGIFY + + features.push_back({ nullptr, nullptr }); + + return features; + }(); + + return features.data(); + + GGML_UNUSED(reg); +} + static void * ggml_backend_cuda_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) { GGML_UNUSED(reg); if (strcmp(name, "ggml_backend_split_buffer_type") == 0) { @@ -3137,13 +3201,16 @@ static void * ggml_backend_cuda_reg_get_proc_address(ggml_backend_reg_t reg, con if (strcmp(name, "ggml_backend_unregister_host_buffer") == 0) { return (void *)ggml_backend_cuda_unregister_host_buffer; } + if (strcmp(name, "ggml_backend_get_features") == 0) { + return (void *)ggml_backend_cuda_get_features; + } return nullptr; } static const ggml_backend_reg_i ggml_backend_cuda_reg_interface = { /* .get_name = */ ggml_backend_cuda_reg_get_name, /* .get_device_count = */ ggml_backend_cuda_reg_get_device_count, - /* .get_device_get = */ ggml_backend_cuda_reg_get_device, + /* .get_device = */ ggml_backend_cuda_reg_get_device, /* .get_proc_address = */ ggml_backend_cuda_reg_get_proc_address, }; @@ -3169,16 +3236,17 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { dev_ctx->description = prop.name; ggml_backend_dev_t dev = new ggml_backend_device { - /* .interface = */ ggml_backend_cuda_device_interface, - /* .reg = */ ®, - /* .context = */ dev_ctx + /* .iface = */ ggml_backend_cuda_device_interface, + /* .reg = */ ®, + /* .context = */ dev_ctx }; ctx->devices.push_back(dev); } reg = ggml_backend_reg { - /* .interface = */ ggml_backend_cuda_reg_interface, - /* .context = */ ctx + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_cuda_reg_interface, + /* .context = */ ctx }; } @@ -3209,3 +3277,5 @@ ggml_backend_t ggml_backend_cuda_init(int device) { return cuda_backend; } + +GGML_BACKEND_DL_IMPL(ggml_backend_cuda_reg) diff --git a/ggml/src/ggml-cuda/mma.cuh b/ggml/src/ggml-cuda/mma.cuh index a452a3cc3d152..7d11540afd237 100644 --- a/ggml/src/ggml-cuda/mma.cuh +++ b/ggml/src/ggml-cuda/mma.cuh @@ -171,7 +171,7 @@ struct mma_int_C_I16J8 { __device__ __forceinline__ void mma_K4(const mma_int_A_I16K4 & mma_A, const mma_int_B_J8K4 & mma_B) { #ifdef INT8_MMA_AVAILABLE -#if __CUDA_ARCH__ >= CC_AMPERE +#if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(x[0]), "+r"(x[1]), "+r"(x[2]), "+r"(x[3]) : "r"(mma_A.x[0]), "r"(mma_A.x[1]), "r"(mma_B.x[0])); @@ -183,7 +183,7 @@ struct mma_int_C_I16J8 { asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(x[2]), "+r"(x[3]) : "r"(mma_A.x[1]), "r"(mma_B.x[0])); -#endif // __CUDA_ARCH__ >= CC_AMPERE +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED(mma_A); GGML_UNUSED(mma_B); @@ -193,7 +193,7 @@ struct mma_int_C_I16J8 { __device__ __forceinline__ void mma_K8(const mma_int_A_I16K8 & mma_A, const mma_int_B_J8K8 & mma_B) { #ifdef INT8_MMA_AVAILABLE -#if __CUDA_ARCH__ >= CC_AMPERE +#if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(x[0]), "+r"(x[1]), "+r"(x[2]), "+r"(x[3]) : "r"(mma_A.x[0]), "r"(mma_A.x[1]), "r"(mma_A.x[2]), "r"(mma_A.x[3]), "r"(mma_B.x[0]), "r"(mma_B.x[1])); @@ -211,7 +211,7 @@ struct mma_int_C_I16J8 { asm("mma.sync.aligned.m8n8k16.row.col.s32.s8.s8.s32 {%0, %1}, {%2}, {%3}, {%0, %1};" : "+r"(x[2]), "+r"(x[3]) : "r"(mma_A.x[3]), "r"(mma_B.x[1])); -#endif // __CUDA_ARCH__ >= CC_AMPERE +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE #else GGML_UNUSED(mma_A); GGML_UNUSED(mma_B); diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index ae5c68ab35129..270251df4f115 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -27,7 +27,7 @@ void ggml_cuda_op_mul_mat_q( // The stream-k decomposition is only faster for recent NVIDIA GPUs. // Also its fixup needs to allocate a temporary buffer in the memory pool. // There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer. - const bool use_stream_k = compute_capability >= CC_VOLTA && compute_capability < CC_OFFSET_AMD && src1_ncols == ne11; + const bool use_stream_k = compute_capability >= GGML_CUDA_CC_VOLTA && compute_capability < GGML_CUDA_CC_OFFSET_AMD && src1_ncols == ne11; const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst, use_stream_k}; switch (src0->type) { @@ -136,7 +136,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return true; } - if (cc < MIN_CC_DP4A) { + if (cc < GGML_CUDA_CC_DP4A) { return false; } @@ -144,9 +144,9 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return true; #endif //GGML_CUDA_FORCE_MMQ - if (cc < CC_OFFSET_AMD) { - return cc < CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; + if (cc < GGML_CUDA_CC_OFFSET_AMD) { + return cc < GGML_CUDA_CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } - return cc < CC_RDNA3 || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; + return (cc < GGML_CUDA_CC_RDNA3 && cc != GGML_CUDA_CC_CDNA && cc != GGML_CUDA_CC_VEGA20) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index 425acb20da311..3cd508a1d4a1c 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -89,9 +89,9 @@ struct tile_x_sizes { static constexpr int get_mmq_x_max_host(const int cc) { return int8_mma_available(cc) ? 128 : #ifdef GGML_CUDA_FORCE_MMQ - cc >= CC_VOLTA && cc < CC_OFFSET_AMD ? 128 : 64; + cc >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD ? 128 : 64; #else - cc >= CC_VOLTA && cc < CC_OFFSET_AMD ? MMQ_DP4A_MAX_BATCH_SIZE : 64; + cc >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD ? MMQ_DP4A_MAX_BATCH_SIZE : 64; #endif // GGML_CUDA_FORCE_MMQ } @@ -104,23 +104,23 @@ static constexpr __device__ int get_mmq_x_max_device() { return 128; #else // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#if __CUDA_ARCH__ >= CC_VOLTA +#if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #ifdef GGML_CUDA_FORCE_MMQ return MMQ_DP4A_MAX_BATCH_SIZE; #else // GGML_CUDA_FORCE_MMQ return 128; #endif // GGML_CUDA_FORCE_MMQ -#else // __CUDA_ARCH__ >= CC_VOLTA +#else // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA return 64; -#endif // __CUDA_ARCH__ >= CC_VOLTA +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) #endif // INT8_MMA_AVAILABLE } static constexpr int get_mmq_y_host(const int cc) { - return cc >= CC_OFFSET_AMD ? (cc == CC_RDNA1 ? 64 : 128) : (cc >= CC_VOLTA ? 128 : 64); + return cc >= GGML_CUDA_CC_OFFSET_AMD ? (cc == GGML_CUDA_CC_RDNA1 ? 64 : 128) : (cc >= GGML_CUDA_CC_VOLTA ? 128 : 64); } static constexpr __device__ int get_mmq_y_device() { @@ -131,11 +131,11 @@ static constexpr __device__ int get_mmq_y_device() { return 128; #endif // defined RDNA1 #else -#if __CUDA_ARCH__ >= CC_VOLTA +#if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA return 128; #else return 64; -#endif // __CUDA_ARCH__ >= CC_VOLTA +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) } @@ -2570,15 +2570,15 @@ static __device__ void mul_mat_q_process_tile( template #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#if defined(RDNA3) || defined(RDNA2) +#if defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) __launch_bounds__(WARP_SIZE*nwarps, 2) -#endif // defined(RDNA3) || defined(RDNA2) +#endif // defined(RDNA3) || defined(RDNA2) || defined(CDNA) || defined(GCN) #else -#if __CUDA_ARCH__ >= CC_VOLTA +#if __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA __launch_bounds__(WARP_SIZE*nwarps, 1) #else __launch_bounds__(WARP_SIZE*nwarps, 2) -#endif // __CUDA_ARCH__ >= CC_VOLTA +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) static __global__ void mul_mat_q( const char * __restrict__ x, const char * __restrict__ yc, float * __restrict__ dst, float * __restrict__ tmp_fixup, @@ -2594,7 +2594,7 @@ static __global__ void mul_mat_q( constexpr int mmq_y = get_mmq_y_device(); // On AMD or old CUDA the performance with stream-k was worse, use conventional tiling instead: -#if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ < CC_VOLTA +#if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA { constexpr bool fixup = false; mul_mat_q_process_tile @@ -2602,7 +2602,7 @@ static __global__ void mul_mat_q( blockIdx.x, blockIdx.y, 0, ne00/qk); return; } -#endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ < CC_VOLTA +#endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ < GGML_CUDA_CC_VOLTA const int64_t blocks_per_ne00 = ne00 / qk; constexpr int blocks_per_iter = MMQ_ITER_K / qk; @@ -2825,7 +2825,7 @@ void mul_mat_q_case(ggml_backend_cuda_context & ctx, const mmq_args & args, cuda const int mmq_x_max = get_mmq_x_max_host(cc); const int mmq_y = get_mmq_y_host(cc); const int block_num_y = (args.ne01 + mmq_y - 1) / mmq_y; - const bool use_stream_k = cc >= CC_VOLTA && cc < CC_OFFSET_AMD; + const bool use_stream_k = cc >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD; int mmq_x_best = 0; int nparts_best = INT_MAX; diff --git a/ggml/src/ggml-cuda/mmv.cu b/ggml/src/ggml-cuda/mmv.cu index cfe91f4283fa6..a4b4f6bc10d98 100644 --- a/ggml/src/ggml-cuda/mmv.cu +++ b/ggml/src/ggml-cuda/mmv.cu @@ -57,7 +57,7 @@ static __global__ void mul_mat_vec( if (block_size > WARP_SIZE) { buf_iw[tid/WARP_SIZE] = sumf; __syncthreads(); - if (tid > WARP_SIZE) { + if (tid >= WARP_SIZE) { return; } sumf = buf_iw[tid]; diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index 735975c160dd0..e3b912d875b5b 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -142,7 +142,7 @@ static void mul_mat_vec_q_cuda( int64_t nwarps = 1; int64_t rows_per_cuda_block = 1; - if (ggml_cuda_info().devices[id].cc < CC_RDNA2) { // NVIDIA and AMD older than RDNA2 + if (ggml_cuda_info().devices[id].cc < GGML_CUDA_CC_CDNA || ggml_cuda_info().devices[id].cc == GGML_CUDA_CC_RDNA1) { // NVIDIA and AMD older than RDNA2 but not CDNA switch(ncols_y) { case 1: nwarps = 4; diff --git a/ggml/src/ggml-cuda/quantize.cu b/ggml/src/ggml-cuda/quantize.cu index 45408ce8684e4..1702e4ce2feba 100644 --- a/ggml/src/ggml-cuda/quantize.cu +++ b/ggml/src/ggml-cuda/quantize.cu @@ -69,8 +69,8 @@ static __global__ void quantize_mmq_q8_1( // Exchange max. abs. value between vals_per_scale/4 threads. #pragma unroll - for (int mask = vals_per_scale/8; mask > 0; mask >>= 1) { - amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, mask, WARP_SIZE)); + for (int offset = vals_per_scale/8; offset > 0; offset >>= 1) { + amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, offset, WARP_SIZE)); } float sum; @@ -79,8 +79,8 @@ static __global__ void quantize_mmq_q8_1( // Exchange calculate sum across vals_per_sum/4 threads. #pragma unroll - for (int mask = vals_per_sum/8; mask > 0; mask >>= 1) { - sum += __shfl_xor_sync(0xFFFFFFFF, sum, mask, WARP_SIZE); + for (int offset = vals_per_sum/8; offset > 0; offset >>= 1) { + sum += __shfl_xor_sync(0xFFFFFFFF, sum, offset, WARP_SIZE); } } diff --git a/ggml/src/ggml-cuda/rope.cu b/ggml/src/ggml-cuda/rope.cu index 88f586d689cfd..2c84778d29c9b 100644 --- a/ggml/src/ggml-cuda/rope.cu +++ b/ggml/src/ggml-cuda/rope.cu @@ -4,6 +4,11 @@ struct rope_corr_dims { float v[2]; }; + +struct mrope_sections { + int v[4]; +}; + static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) { const float y = (i0 / 2 - low) / max(0.001f, high - low); return 1.0f - min(1.0f, max(0.0f, y)); @@ -108,6 +113,105 @@ static __global__ void rope_neox( dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta; } +template +static __global__ void rope_multi( + const T * x, T * dst, int ne0, int ne2, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows, + float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, const float * freq_factors, mrope_sections sections) { + const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); + + if (i0 >= ne0) { + return; + } + + const int row = blockDim.x*blockIdx.x + threadIdx.x; + + if (i0 >= n_dims) { + const int i = row*ne0 + i0; + + dst[i + 0] = x[i + 0]; + dst[i + 1] = x[i + 1]; + + return; + } + + const int i = row*ne0 + i0/2; + const int i2 = row/p_delta_rows; + + int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3]; + int sec_w = sections.v[1] + sections.v[0]; + int sector = (i0 / 2) % sect_dims; + + float theta_base = 0.0; + if (sector < sections.v[0]) { + theta_base = pos[i2]*powf(theta_scale, i0/2.0f); + } + else if (sector >= sections.v[0] && sector < sec_w) { + theta_base = pos[i2 + ne2 * 1]*powf(theta_scale, i0/2.0f); + } + else if (sector >= sec_w && sector < sec_w + sections.v[2]) { + theta_base = pos[i2 + ne2 * 2]*powf(theta_scale, i0/2.0f); + } + else if (sector >= sec_w + sections.v[2]) { + theta_base = pos[i2 + ne2 * 3]*powf(theta_scale, i0/2.0f); + } + + const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; + + float cos_theta; + float sin_theta; + + rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); + + const float x0 = x[i + 0]; + const float x1 = x[i + n_dims/2]; + + dst[i + 0] = x0*cos_theta - x1*sin_theta; + dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta; +} + +template +static __global__ void rope_vision( + const T * x, T * dst, int ne0, int ne2, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows, + float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, const float * freq_factors, mrope_sections sections) { + const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y); + + if (i0 >= ne0) { + return; + } + + const int row = blockDim.x*blockIdx.x + threadIdx.x; + + const int i = row*ne0 + i0/2; + const int i2 = row/p_delta_rows; // i2-th tokens + + int sect_dims = sections.v[0] + sections.v[1]; + int sec_w = sections.v[1] + sections.v[0]; + int sector = (i0 / 2) % sect_dims; + + float theta_base = 0.0; + if (sector < sections.v[0]) { + const int p = sector; + theta_base = pos[i2]*powf(theta_scale, p); + } + else if (sector >= sections.v[0] && sector < sec_w) { + const int p = sector - sections.v[0]; + theta_base = pos[i2 + ne2]*powf(theta_scale, p); + } + + const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f; + + float cos_theta; + float sin_theta; + + rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta); + + const float x0 = x[i + 0]; + const float x1 = x[i + n_dims]; + + dst[i + 0] = x0*cos_theta - x1*sin_theta; + dst[i + n_dims] = x0*sin_theta + x1*cos_theta; +} + template static void rope_norm_cuda( const T * x, T * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, @@ -156,6 +260,56 @@ static void rope_neox_cuda( } } +template +static void rope_multi_cuda( + const T * x, T * dst, int ne0, int ne2, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, mrope_sections sections, cudaStream_t stream) { + GGML_ASSERT(ne0 % 2 == 0); + const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); + const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); + const dim3 block_nums(nr, n_blocks_x, 1); + + const float theta_scale = powf(freq_base, -2.0f/n_dims); + + if (freq_factors == nullptr) { + rope_multi<<>>( + x, dst, ne0, ne2, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, + theta_scale, freq_factors, sections + ); + } else { + rope_multi<<>>( + x, dst, ne0, ne2, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, + theta_scale, freq_factors, sections + ); + } +} + +template +static void rope_vision_cuda( + const T * x, T * dst, int ne0, int ne2, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, mrope_sections sections, cudaStream_t stream) { + GGML_ASSERT(ne0 % 2 == 0); + const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1); + const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE); + const dim3 block_nums(nr, n_blocks_x, 1); + // break down (head_dim, heads, seq) into (CUDA_ROPE_BLOCK_SIZE, x, heads * seq) + // where x ~= ceil(head_dim / CUDA_ROPE_BLOCK_SIZE); + + const float theta_scale = powf(freq_base, -2.0f/n_dims); + + if (freq_factors == nullptr) { + rope_vision<<>>( + x, dst, ne0, ne2, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, + theta_scale, freq_factors, sections + ); + } else { + rope_vision<<>>( + x, dst, ne0, ne2, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims, + theta_scale, freq_factors, sections + ); + } +} + static void rope_norm_cuda_f16( const half * x, half * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) { @@ -185,6 +339,38 @@ static void rope_neox_cuda_f32( rope_neox_cuda(x, dst, ne0, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream); } +static void rope_multi_cuda_f16( + const half * x, half * dst, int ne0, int ne2, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, mrope_sections sections, cudaStream_t stream +) { + + rope_multi_cuda(x, dst, ne0, ne2, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, stream); +} + +static void rope_multi_cuda_f32( + const float * x, float * dst, int ne0, int ne2, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, mrope_sections sections, cudaStream_t stream +) { + + rope_multi_cuda(x, dst, ne0, ne2, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, stream); +} + +static void rope_vision_cuda_f16( + const half * x, half * dst, int ne0, int ne2, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, mrope_sections sections, cudaStream_t stream +) { + + rope_vision_cuda(x, dst, ne0, ne2, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, stream); +} + +static void rope_vision_cuda_f32( + const float * x, float * dst, int ne0, int ne2, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows, + float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, mrope_sections sections, cudaStream_t stream +) { + + rope_vision_cuda(x, dst, ne0, ne2, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections, stream); +} + void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const ggml_tensor * src0 = dst->src[0]; const ggml_tensor * src1 = dst->src[1]; @@ -201,8 +387,9 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); GGML_ASSERT(src0->type == dst->type); - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; + const int64_t ne00 = src0->ne[0]; // head dims + const int64_t ne01 = src0->ne[1]; // num heads + const int64_t ne02 = src0->ne[2]; // num heads const int64_t nr = ggml_nrows(src0); //const int n_past = ((int32_t *) dst->op_params)[0]; @@ -210,6 +397,7 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int mode = ((int32_t *) dst->op_params)[2]; //const int n_ctx = ((int32_t *) dst->op_params)[3]; const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + mrope_sections sections; // RoPE alteration for extended context float freq_base; @@ -225,8 +413,19 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(§ions.v, (int32_t *) dst->op_params + 11, sizeof(int)*4); const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + if (is_mrope) { + GGML_ASSERT(sections.v[0] > 0 || sections.v[1] > 0 || sections.v[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne00/2); + } const int32_t * pos = (const int32_t *) src1_d; @@ -253,6 +452,34 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { } else { GGML_ABORT("fatal error"); } + } else if (is_mrope && !is_vision) { + if (src0->type == GGML_TYPE_F32) { + rope_multi_cuda_f32( + (const float *)src0_d, (float *)dst_d, ne00, ne02, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, freq_factors, sections, stream + ); + } else if (src0->type == GGML_TYPE_F16) { + rope_multi_cuda_f16( + (const half *)src0_d, (half *)dst_d, ne00, ne02, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, freq_factors, sections, stream + ); + } else { + GGML_ABORT("fatal error"); + } + } else if (is_vision) { + if (src0->type == GGML_TYPE_F32) { + rope_vision_cuda_f32( + (const float *)src0_d, (float *)dst_d, ne00, ne02, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, freq_factors, sections, stream + ); + } else if (src0->type == GGML_TYPE_F16) { + rope_vision_cuda_f16( + (const half *)src0_d, (half *)dst_d, ne00, ne02, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor, + attn_factor, corr_dims, freq_factors, sections, stream + ); + } else { + GGML_ABORT("fatal error"); + } } else { if (src0->type == GGML_TYPE_F32) { rope_norm_cuda_f32( diff --git a/ggml/src/ggml-cuda/sum.cu b/ggml/src/ggml-cuda/sum.cu index 31cfe53941fe0..e0dafc1d2049d 100644 --- a/ggml/src/ggml-cuda/sum.cu +++ b/ggml/src/ggml-cuda/sum.cu @@ -3,8 +3,6 @@ #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11700 #ifdef USE_CUB -// On Windows CUB uses libraries with variables called CC_PASCAL which conflict with the define in common.cuh. -// For this reason CUB must be included BEFORE anything else. #include using namespace cub; #endif // USE_CUB diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index 1f3c70c2e6934..3205534d66f10 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -95,6 +95,14 @@ #define __CUDA_ARCH__ 1300 +#if defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) +#define GCN +#endif + +#if defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx942__) +#define CDNA +#endif + #if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__) || \ defined(__gfx1150__) || defined(__gfx1151__) #define RDNA3 diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index fccf8eb8440b8..b15fbd24d6b36 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -64,12 +64,10 @@ else() list(APPEND GGML_SOURCES_ROCM ${SRCS}) endif() -add_library(ggml-hip - ${GGML_HEADERS_ROCM} - ${GGML_SOURCES_ROCM}) - -target_link_libraries(ggml-hip PRIVATE ggml-base) -target_include_directories(ggml-hip PRIVATE . ..) +ggml_add_backend_library(ggml-hip + ${GGML_HEADERS_ROCM} + ${GGML_SOURCES_ROCM} + ) # TODO: do not use CUDA definitions for HIP target_compile_definitions(ggml PUBLIC GGML_USE_CUDA) diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index 92a64fe5a5930..549772c57c90a 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -14,7 +14,7 @@ #include #endif // __ARM_FEATURE_SVE -#if defined(__ARM_NEON) +#if defined(__ARM_NEON) && !defined(__CUDACC__) // if YCM cannot find , make a symbolic link to it, for example: // // $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/ @@ -30,11 +30,13 @@ extern "C" { #endif -#undef MIN -#undef MAX +#ifndef MIN +# define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#ifndef MAX +# define MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif // required for mmap as gguf only guarantees 32-byte alignment #define TENSOR_ALIGNMENT 32 @@ -72,8 +74,8 @@ static inline int ggml_up(int n, int m) { // GGML_ATTRIBUTE_FORMAT(2, 3) -void ggml_log_internal (enum ggml_log_level level, const char * format, ...); -void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); +GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...); +GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data); #define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__) #define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__) @@ -295,24 +297,27 @@ struct ggml_cgraph { enum ggml_cgraph_eval_order order; }; +// returns a slice of cgraph with nodes [i0, i1) +// the slice does not have leafs or gradients +// if you need the gradients, get them from the original graph struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1); // Memory allocation -void * ggml_aligned_malloc(size_t size); -void ggml_aligned_free(void * ptr, size_t size); +GGML_API void * ggml_aligned_malloc(size_t size); +GGML_API void ggml_aligned_free(void * ptr, size_t size); // FP16 to FP32 conversion #if defined(__ARM_NEON) - #ifdef _MSC_VER + #if defined(_MSC_VER) || (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) typedef uint16_t ggml_fp16_internal_t; #else typedef __fp16 ggml_fp16_internal_t; #endif #endif -#if defined(__ARM_NEON) && !defined(_MSC_VER) +#if defined(__ARM_NEON) && !defined(_MSC_VER) && !(defined(__CUDACC__) && __CUDACC_VER_MAJOR__ <= 11) #define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x) #define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x) @@ -546,6 +551,22 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) +// expose GGUF internals for test code + +GGML_API size_t gguf_type_size(enum gguf_type type); + +GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params); + +struct gguf_buf { + void * data; + size_t size; + size_t offset; +}; +GGML_API struct gguf_buf gguf_buf_init(size_t size); +GGML_API void gguf_buf_free(struct gguf_buf buf); + +GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta); + #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-kompute/CMakeLists.txt b/ggml/src/ggml-kompute/CMakeLists.txt index 0bd027c7f537e..c9109d5e8ee19 100644 --- a/ggml/src/ggml-kompute/CMakeLists.txt +++ b/ggml/src/ggml-kompute/CMakeLists.txt @@ -6,13 +6,13 @@ if (NOT glslc_executable) message(FATAL_ERROR "glslc not found") endif() -add_library(ggml-kompute - ggml-kompute.cpp - ../../include/ggml-kompute.h - ) +ggml_add_backend_library(ggml-kompute + ggml-kompute.cpp + ../../include/ggml-kompute.h + ) target_link_libraries(ggml-kompute PRIVATE ggml-base kompute) -target_include_directories(ggml-kompute PRIVATE . .. ${CMAKE_CURRENT_BINARY_DIR}) +target_include_directories(ggml-kompute PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) @@ -105,8 +105,10 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/kompute/CMakeLists.txt") kompute-shaders/op_getrows_q4_0.comp kompute-shaders/op_getrows_q4_1.comp kompute-shaders/op_getrows_q6_k.comp - kompute-shaders/op_rope_f16.comp - kompute-shaders/op_rope_f32.comp + kompute-shaders/op_rope_norm_f16.comp + kompute-shaders/op_rope_norm_f32.comp + kompute-shaders/op_rope_neox_f16.comp + kompute-shaders/op_rope_neox_f32.comp kompute-shaders/op_cpy_f16_f16.comp kompute-shaders/op_cpy_f16_f32.comp kompute-shaders/op_cpy_f32_f16.comp @@ -139,8 +141,10 @@ if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/kompute/CMakeLists.txt") shaderop_getrows_q4_0.h shaderop_getrows_q4_1.h shaderop_getrows_q6_k.h - shaderop_rope_f16.h - shaderop_rope_f32.h + shaderop_rope_norm_f16.h + shaderop_rope_norm_f32.h + shaderop_rope_neox_f16.h + shaderop_rope_neox_f32.h shaderop_cpy_f16_f16.h shaderop_cpy_f16_f32.h shaderop_cpy_f32_f16.h diff --git a/ggml/src/ggml-kompute/ggml-kompute.cpp b/ggml/src/ggml-kompute/ggml-kompute.cpp index 2fea9e4cc8d38..50579227183d3 100644 --- a/ggml/src/ggml-kompute/ggml-kompute.cpp +++ b/ggml/src/ggml-kompute/ggml-kompute.cpp @@ -28,8 +28,10 @@ #include "shaderop_getrows_q4_0.h" #include "shaderop_getrows_q4_1.h" #include "shaderop_getrows_q6_k.h" -#include "shaderop_rope_f16.h" -#include "shaderop_rope_f32.h" +#include "shaderop_rope_norm_f16.h" +#include "shaderop_rope_norm_f32.h" +#include "shaderop_rope_neox_f16.h" +#include "shaderop_rope_neox_f32.h" #include "shaderop_cpy_f16_f16.h" #include "shaderop_cpy_f16_f32.h" #include "shaderop_cpy_f32_f16.h" @@ -345,7 +347,7 @@ void ggml_vk_allocate_descriptor_pool(struct ggml_kompute_context * ctx, size_t std::vector descriptorPoolSizes = { vk::DescriptorPoolSize( vk::DescriptorType::eStorageBuffer, - 3 * size // Descriptor count is number of possible tensors to pass into an algorithm + 4 * size // Descriptor count is number of possible tensors to pass into an algorithm ) }; @@ -788,7 +790,8 @@ static void ggml_vk_soft_max( const std::shared_ptr& out, uint32_t inAOff, uint32_t inBOff, uint32_t outOff, int32_t ne00, int32_t ne01, int32_t ne02, uint32_t ne03, - float scale + float scale, float max_bias, float m0, float m1, + uint32_t n_head_log2 ) { const static auto spirv = getSpirvShader(kp::shader_data::op_softmax_comp_spv, kp::shader_data::op_softmax_comp_spv_len); @@ -796,12 +799,14 @@ static void ggml_vk_soft_max( struct PushConstants { uint32_t inAOff, inBOff, outOff; int32_t ne00, ne01, ne02; - float scale; + float scale, max_bias, m0, m1; + uint32_t n_head_log2; int32_t mask; } pushConsts { safe_divide(inAOff, 4), safe_divide(inBOff, 4), safe_divide(outOff, 4), ne00, ne01, ne02, - scale, + scale, max_bias, m0, m1, + n_head_log2, bool(inB) }; @@ -911,9 +916,9 @@ static void ggml_vk_mul_mat_f16( const std::shared_ptr& out, uint32_t inAOff, uint32_t inBOff, uint32_t outOff, int32_t ne00, int32_t ne01, int32_t ne02, - uint32_t nb00, uint32_t nb01, uint32_t nb02, + uint32_t nb00, uint32_t nb01, uint32_t nb02, uint32_t nb03, int32_t ne10, int32_t ne11, int32_t ne12, int32_t ne13, - uint32_t nb10, uint32_t nb11, uint32_t nb12, + uint32_t nb10, uint32_t nb11, uint32_t nb12, uint32_t nb13, int32_t ne0, int32_t ne1, uint32_t r2, uint32_t r3 ) { @@ -923,17 +928,17 @@ static void ggml_vk_mul_mat_f16( struct PushConstants { uint32_t inAOff, inBOff, outOff; int32_t ne00, ne01, ne02; - uint32_t nb00, nb01, nb02; + uint32_t nb00, nb01, nb02, nb03; int32_t ne10, ne11, ne12; - uint32_t nb10, nb11, nb12; + uint32_t nb10, nb11, nb12, nb13; int32_t ne0, ne1; uint32_t r2, r3; } pushConsts { safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4), ne00, ne01, ne02, - nb00, nb01, nb02, + nb00, nb01, nb02, nb03, ne10, ne11, ne12, - nb10, nb11, nb12, + nb10, nb11, nb12, nb13, ne0, ne1, r2, r3 }; @@ -1013,6 +1018,8 @@ static void ggml_vk_mul_mat_impl( int32_t ne00, int32_t ne01, int32_t ne02, int32_t ne10, int32_t ne11, int32_t ne12, int32_t ne13, int32_t ne0, int32_t ne1, + uint32_t nb01, uint32_t nb02, uint32_t nb03, + uint32_t nb11, uint32_t nb12, uint32_t nb13, uint32_t r2, uint32_t r3 ) { struct PushConstants { @@ -1020,19 +1027,23 @@ static void ggml_vk_mul_mat_impl( int32_t ne00, ne01, ne02; int32_t ne10, ne12; int32_t ne0, ne1; + uint32_t nb01, nb02, nb03; + uint32_t nb11, nb12, nb13; uint32_t r2, r3; } pushConsts { safe_divide(inAOff, block_size), safe_divide(inBOff, 4), safe_divide(outOff, 4), ne00, ne01, ne02, ne10, ne12, ne0, ne1, + nb01, nb02, nb03, + nb11, nb12, nb13, r2, r3 }; auto name = std::string(__func__) + "_" + suffix; std::shared_ptr s_algo = nullptr; if (!komputeManager()->hasAlgorithm(name)) { - const uint32_t local_x = ggml_vk_current_device().subgroupSize * 2; + const uint32_t local_x = (ggml_vk_current_device().subgroupSize * 2) / 8; s_algo = komputeManager()->algorithm(name, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned((ne01 + 7)/8), unsigned(ne11), unsigned(ne12*ne13)}, {local_x}, {pushConsts}); } else { s_algo = komputeManager()->getAlgorithm(name); @@ -1074,19 +1085,26 @@ static void ggml_vk_mul_mat_q4_k( const std::shared_ptr& inB, const std::shared_ptr& out, uint32_t inAOff, uint32_t inBOff, uint32_t outOff, - int32_t ne00, int32_t ne01, int32_t ne02, int32_t ne10, - int32_t ne11, int32_t ne12, int32_t ne13, int32_t ne0, - int32_t ne1, int32_t r2, int32_t r3 + int32_t ne00, int32_t ne01, int32_t ne02, + int32_t ne10, int32_t ne11, int32_t ne12, int32_t ne13, + int32_t ne0, int32_t ne1, + uint32_t nb01, uint32_t nb02, uint32_t nb03, + uint32_t nb11, uint32_t nb12, uint32_t nb13, + uint32_t r2, uint32_t r3 ) { const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q4_k_comp_spv, kp::shader_data::op_mul_mat_q4_k_comp_spv_len); struct PushConstants { uint32_t inAOff, inBOff, outOff; - int32_t ne00, ne10, ne0, ne1, ne01, ne02, ne12, r2, r3; + int32_t ne00, ne10, ne0, ne1, ne01, ne02, ne12; + uint32_t nb01, nb02, nb03, nb11, nb12, nb13; + uint32_t r2, r3; } pushConsts { - 0, 0, 0, - ne00, ne10, ne0, ne1, ne01, ne02, ne12, r2, r3 + inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4), + ne00, ne10, ne0, ne1, ne01, ne02, ne12, + nb01, nb02, nb03, nb11, nb12, nb13, + r2, r3 }; std::shared_ptr s_algo = nullptr; @@ -1108,28 +1126,37 @@ static void ggml_vk_mul_mat_q6_k( const std::shared_ptr& inB, const std::shared_ptr& out, uint32_t inAOff, uint32_t inBOff, uint32_t outOff, - int32_t ne00, int32_t ne10, int32_t ne0, int32_t ne1, - int32_t ne01, int32_t ne11, int32_t ne12, int32_t ne02 + int32_t ne00, int32_t ne01, int32_t ne02, + int32_t ne10, int32_t ne11, int32_t ne12, int32_t ne13, + int32_t ne0, int32_t ne1, + uint32_t nb01, uint32_t nb02, uint32_t nb03, + uint32_t nb11, uint32_t nb12, uint32_t nb13, + uint32_t r2, uint32_t r3 ) { const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q6_k_comp_spv, kp::shader_data::op_mul_mat_q6_k_comp_spv_len); struct PushConstants { uint32_t inAOff, inBOff, outOff; - int32_t ne00, ne10, ne0, ne1, ne01, gqa; + int32_t ne00, ne10, ne0, ne1, ne01, ne02, ne12; + uint32_t nb01, nb02, nb03, nb11, nb12, nb13; + uint32_t r2, r3; } pushConsts { inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4), - ne00, ne10, ne0, ne1, ne01, ne12/ne02 + ne00, ne10, ne0, ne1, ne01, ne02, ne12, + nb01, nb02, nb03, nb11, nb12, nb13, + r2, r3 }; std::shared_ptr s_algo = nullptr; if (!komputeManager()->hasAlgorithm(__func__)) { - const uint32_t local_x = ggml_vk_current_device().subgroupSize * 2; - s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned((ne01 + 1)/2), unsigned(ne11), unsigned(ne12)}, {local_x}, {pushConsts}); + const uint32_t local_x = 2; + const uint32_t local_y = ggml_vk_current_device().subgroupSize; + s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned((ne01 + 1)/2), unsigned(ne11), unsigned(ne12)*unsigned(ne13)}, {local_x, local_y}, {pushConsts}); } else { s_algo = komputeManager()->getAlgorithm(__func__); s_algo->setTensors({inA, inB, out}); - s_algo->setWorkgroup({unsigned((ne01 + 1)/2), unsigned(ne11), unsigned(ne12)}); + s_algo->setWorkgroup({unsigned((ne01 + 1)/2), unsigned(ne11), unsigned(ne12)*unsigned(ne13)}); s_algo->setPushConstants({pushConsts}); s_algo->updateDescriptors(s_kompute_context->pool.get()); } @@ -1217,10 +1244,11 @@ static void ggml_vk_rope( kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, + const std::shared_ptr& inC, const std::shared_ptr& out, - uint32_t inAOff, uint32_t inBOff, uint32_t outOff, + uint32_t inAOff, uint32_t inBOff, uint32_t inCOff, uint32_t outOff, ggml_type src0t, int32_t n_dims, int32_t mode, int32_t n_ctx_orig, - float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow, + float freq_base, float freq_scale, bool has_freq_factors, float ext_factor, float attn_factor, float beta_fast, float beta_slow, int32_t ne01, int32_t ne02, int32_t ne03, uint32_t nb00, uint32_t nb01, uint32_t nb02, uint32_t nb03, int32_t ne0, @@ -1228,11 +1256,17 @@ static void ggml_vk_rope( ) { GGML_ASSERT(src0t == GGML_TYPE_F16 || src0t == GGML_TYPE_F32); - static const auto spirv_f16 = getSpirvShader( - kp::shader_data::op_rope_f16_comp_spv, kp::shader_data::op_rope_f16_comp_spv_len + static const auto spirv_norm_f16 = getSpirvShader( + kp::shader_data::op_rope_norm_f16_comp_spv, kp::shader_data::op_rope_norm_f16_comp_spv_len + ); + static const auto spirv_norm_f32 = getSpirvShader( + kp::shader_data::op_rope_norm_f32_comp_spv, kp::shader_data::op_rope_norm_f32_comp_spv_len ); - static const auto spirv_f32 = getSpirvShader( - kp::shader_data::op_rope_f32_comp_spv, kp::shader_data::op_rope_f32_comp_spv_len + static const auto spirv_neox_f16 = getSpirvShader( + kp::shader_data::op_rope_neox_f16_comp_spv, kp::shader_data::op_rope_neox_f16_comp_spv_len + ); + static const auto spirv_neox_f32 = getSpirvShader( + kp::shader_data::op_rope_neox_f32_comp_spv, kp::shader_data::op_rope_neox_f32_comp_spv_len ); int type_size = src0t == GGML_TYPE_F16 ? 2 : 4; @@ -1247,32 +1281,40 @@ static void ggml_vk_rope( GGML_ASSERT(nb0 % type_size == 0); struct PushConstants { - uint32_t inAOff, inBOff, outOff; + uint32_t inAOff, inBOff, inCOff, outOff; int32_t n_dims, mode, n_ctx_orig; - float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + float freq_base, freq_scale; + bool has_freq_factors; + float ext_factor, attn_factor, beta_fast, beta_slow; uint32_t nb00, nb01, nb02, nb03; int32_t ne0; uint32_t nb0, nb1, nb2, nb3; } pushConsts { - safe_divide(inAOff, type_size), safe_divide(inBOff, 4), safe_divide(outOff, type_size), + safe_divide(inAOff, type_size), safe_divide(inBOff, 4), safe_divide(inCOff, type_size), safe_divide(outOff, type_size), n_dims, mode, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, + freq_base, freq_scale, + has_freq_factors, + ext_factor, attn_factor, beta_fast, beta_slow, nb00, nb01, nb02, nb03, ne0, nb0, nb1, nb2, nb3 }; - auto name = std::string(__func__) + (src0t == GGML_TYPE_F16 ? "_f16" : "_f32"); + auto & inC_ = inC ? inC : inA; + const bool is_neox = mode & GGML_ROPE_TYPE_NEOX; + const bool is_f16 = src0t == GGML_TYPE_F16; + + auto name = std::string(__func__) + (is_neox ? "_neox" : "_norm") + (src0t == GGML_TYPE_F16 ? "_f16" : "_f32"); std::shared_ptr s_algo = nullptr; if (!komputeManager()->hasAlgorithm(name)) { + auto & spirv = is_neox ? is_f16 ? spirv_neox_f16 : spirv_neox_f32 : is_f16 ? spirv_norm_f16 : spirv_norm_f32; s_algo = komputeManager()->algorithm( - name, s_kompute_context->pool.get(), {inA, inB, out}, - src0t == GGML_TYPE_F16 ? spirv_f16 : spirv_f32, + name, s_kompute_context->pool.get(), {inA, inB, inC_, out}, spirv, {unsigned(ne01), unsigned(ne02), unsigned(ne03)}, {}, {pushConsts} ); } else { s_algo = komputeManager()->getAlgorithm(name); - s_algo->setTensors({inA, inB, out}); + s_algo->setTensors({inA, inB, inC_, out}); s_algo->setWorkgroup({unsigned(ne01), unsigned(ne02), unsigned(ne03)}); s_algo->setPushConstants({pushConsts}); s_algo->updateDescriptors(s_kompute_context->pool.get()); @@ -1351,11 +1393,15 @@ static void ggml_vk_cpy_f16_f32(Args&&... args) { } static bool ggml_backend_kompute_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { + int64_t n = ggml_nelements(op); switch (op->op) { case GGML_OP_UNARY: + if (n % 4 != 0) return false; switch (ggml_get_unary_op(op)) { - case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_GELU: + if (n % 8 != 0) return false; + // fall through + case GGML_UNARY_OP_RELU: case GGML_UNARY_OP_SILU: return ggml_is_contiguous(op->src[0]); default: @@ -1373,8 +1419,18 @@ static bool ggml_backend_kompute_device_supports_op(ggml_backend_dev_t dev, cons case GGML_OP_SOFT_MAX: case GGML_OP_RMS_NORM: case GGML_OP_NORM: - case GGML_OP_ROPE: return true; + case GGML_OP_ROPE: + { + const int mode = ((const int32_t *) op->op_params)[2]; + if (mode & GGML_ROPE_TYPE_MROPE) { + return false; + } + if (mode & GGML_ROPE_TYPE_VISION) { + return false; + } + return true; + } case GGML_OP_DUP: case GGML_OP_CPY: case GGML_OP_CONT: @@ -1413,8 +1469,8 @@ static bool ggml_backend_kompute_device_supports_op(ggml_backend_dev_t dev, cons switch (op->src[0]->type) { case GGML_TYPE_F32: - case GGML_TYPE_Q6_K: return op->ne[3] == 1; + case GGML_TYPE_Q6_K: case GGML_TYPE_F16: case GGML_TYPE_Q8_0: case GGML_TYPE_Q4_0: @@ -1515,9 +1571,11 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml const static std::shared_ptr nullTensor = nullptr; uint32_t off_src0 = 0; uint32_t off_src1 = 0; + uint32_t off_src2 = 0; uint32_t off_dst = 0; const std::shared_ptr& id_src0 = src0 ? ggml_vk_get_tensor(src0, &off_src0) : nullTensor; const std::shared_ptr& id_src1 = src1 ? ggml_vk_get_tensor(src1, &off_src1) : nullTensor; + const std::shared_ptr& id_src2 = src2 ? ggml_vk_get_tensor(src2, &off_src2) : nullTensor; const std::shared_ptr& id_dst = dst ? ggml_vk_get_tensor(dst, &off_dst) : nullTensor; switch (dst->op) { @@ -1593,11 +1651,16 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021") GGML_ASSERT(!src1 || src1t == GGML_TYPE_F32); -#pragma message("TODO: add ALiBi support") -#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/7192") - GGML_ASSERT(max_bias == 0.0f); + const int64_t nrows_x = ggml_nrows(src0); + const int64_t nrows_y = src0->ne[1]; + + const uint32_t n_head = nrows_x/nrows_y; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); - ggml_vk_soft_max(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, ne03, scale); + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + ggml_vk_soft_max(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, ne03, scale, max_bias, m0, m1, n_head_log2); } break; case GGML_OP_DIAG_MASK_INF: { @@ -1649,38 +1712,44 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml case GGML_TYPE_F16: ggml_vk_mul_mat_f16( seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, - ne00, ne01, ne02, nb00, nb01, nb02, ne10, ne11, ne12, ne13, nb10, nb11, nb12, + ne00, ne01, ne02, nb00, nb01, nb02, nb03, + ne10, ne11, ne12, ne13, nb10, nb11, nb12, nb13, ne0, ne1, r2, r3 ); break; case GGML_TYPE_Q8_0: ggml_vk_mul_mat_q8_0( seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, - ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, r2, r3 + ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, + nb01, nb02, nb03, nb11, nb12, nb13, r2, r3 ); break; case GGML_TYPE_Q4_0: ggml_vk_mul_mat_q4_0( seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, - ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, r2, r3 + ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, + nb01, nb02, nb03, nb11, nb12, nb13, r2, r3 ); break; case GGML_TYPE_Q4_1: ggml_vk_mul_mat_q4_1( seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, - ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, r2, r3 + ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, + nb01, nb02, nb03, nb11, nb12, nb13, r2, r3 ); break; case GGML_TYPE_Q4_K: ggml_vk_mul_mat_q4_k( seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, - ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, ne12/ne02, ne13/ne03 + ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, + nb01, nb02, nb03, nb11, nb12, nb13, r2, r3 ); break; case GGML_TYPE_Q6_K: ggml_vk_mul_mat_q6_k( seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, - ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02 + ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, + nb01, nb02, nb03, nb11, nb12, nb13, r2, r3 ); break; default: { @@ -1709,13 +1778,6 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml } break; case GGML_OP_ROPE: { -#pragma message("TODO: implement phi3 frequency factors support") -#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7225") - GGML_ASSERT(dst->src[2] == nullptr && "phi3 frequency factors not implemented yet"); - -#pragma message("TODO: update rope NORM mode to match NEOX mode") -#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7634") - GGML_ASSERT(ne10 == ne02); GGML_ASSERT(src0t == dstt); // const int n_past = ((int32_t *) dst->op_params)[0]; @@ -1724,6 +1786,8 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml // skip 3, n_ctx used in GLM RoPE, unimplemented in Vulkan const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + const bool has_freq_factors = dst->src[2] != nullptr; + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); @@ -1732,8 +1796,8 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); ggml_vk_rope( - seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, src0t, n_dims, mode, n_ctx_orig, - freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, + seq, id_src0, id_src1, id_src2, id_dst, off_src0, off_src1, off_src2, off_dst, src0t, n_dims, mode, n_ctx_orig, + freq_base, freq_scale, has_freq_factors, ext_factor, attn_factor, beta_fast, beta_slow, ne01, ne02, ne03, nb00, nb01, nb02, nb03, ne0, nb0, nb1, nb2, nb3 ); } break; @@ -2176,9 +2240,12 @@ static const struct ggml_backend_reg_i ggml_backend_kompute_reg_i = { ggml_backend_reg_t ggml_backend_kompute_reg() { static ggml_backend_reg reg = { - /* .iface = */ ggml_backend_kompute_reg_i, - /* .context = */ nullptr, + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_kompute_reg_i, + /* .context = */ nullptr, }; return ® } + +GGML_BACKEND_DL_IMPL(ggml_backend_kompute_reg) diff --git a/ggml/src/ggml-kompute/kompute-shaders/common.comp b/ggml/src/ggml-kompute/kompute-shaders/common.comp index 2aaddf704a758..dbe4cf804e6c0 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/common.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/common.comp @@ -3,6 +3,7 @@ #extension GL_EXT_shader_explicit_arithmetic_types_float16: require #extension GL_EXT_shader_explicit_arithmetic_types_int8: require #extension GL_EXT_shader_explicit_arithmetic_types_int16: require +#extension GL_EXT_shader_explicit_arithmetic_types_int64: require #extension GL_EXT_control_flow_attributes: enable #extension GL_KHR_shader_subgroup_arithmetic : require #extension GL_EXT_debug_printf : enable diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp index 8f0a9031f7a37..0ab1b2fc20eeb 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_f16.comp @@ -20,12 +20,14 @@ layout (push_constant) uniform parameter { uint nb00; uint nb01; uint nb02; + uint nb03; int ne10; int ne11; int ne12; uint nb10; uint nb11; uint nb12; + uint nb13; int ne0; int ne1; uint r2; @@ -42,7 +44,7 @@ void main() { const uint i12 = im%pcs.ne12; const uint i13 = im/pcs.ne12; - const uint offset0 = r0*pcs.nb01 + (i12/pcs.r2)*pcs.nb02 + (i13/pcs.r3)*pcs.nb02*pcs.ne02; + const uint offset0 = r0*pcs.nb01 + (i12/pcs.r2)*pcs.nb02 + (i13/pcs.r3)*pcs.nb03; const uint x = offset0 / 2 + pcs.inAOff; // Based from inA @@ -52,7 +54,7 @@ void main() { break; } - const uint y = (r1*pcs.nb11 + im*pcs.nb12) / 4 + pcs.inBOff; // Based from inB + const uint y = (r1*pcs.nb11 + i12*pcs.nb12 + i13*pcs.nb13) / 4 + pcs.inBOff; float sumf = 0; for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) { diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp index fc8e45aa97776..a5752a3a0065f 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q4_k.comp @@ -24,8 +24,14 @@ layout (push_constant) uniform parameter { int ne01; int ne02; int ne12; - int r2; - int r3; + uint nb01; + uint nb02; + uint nb03; + uint nb11; + uint nb12; + uint nb13; + uint r2; + uint r3; } pcs; void main() { @@ -50,10 +56,11 @@ void main() { const uint i12 = im%pcs.ne12; const uint i13 = im/pcs.ne12; - const uint offset0 = (i12/pcs.r2)*(nb*pcs.ne01) + (i13/pcs.r3)*(nb*pcs.ne01*pcs.ne02); + const uint offset0 = first_row*(pcs.nb01/SIZE_OF_BLOCK) + (i12/pcs.r2)*(pcs.nb02/SIZE_OF_BLOCK) + (i13/pcs.r3)*(pcs.nb03/SIZE_OF_BLOCK); + const uint offset1 = r1*pcs.nb11 + (i12 )*pcs.nb12 + (i13 )*pcs.nb13; - const uint xblk = ib_row + offset0 + pcs.inAOff; - const uint y = r1*pcs.ne10 + im*pcs.ne00*pcs.ne1 + pcs.inBOff; + const uint xblk = offset0 + pcs.inAOff; + const uint y = (offset1 / 4) + pcs.inBOff; float yl[16]; float yh[16]; @@ -74,7 +81,7 @@ void main() { } for (int row = 0; row < N_DST; row++) { - uint row_idx = row * nb; + uint row_idx = row * (pcs.nb01 / SIZE_OF_BLOCK); uint16_t sc_0 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 0); uint16_t sc_1 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 2); diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp index c9baebdf4baac..d331d1a70572e 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mat_q6_k.comp @@ -21,7 +21,16 @@ layout (push_constant) uniform parameter { int ne0; int ne1; int ne01; - int gqa; + int ne02; + int ne12; + uint nb01; + uint nb02; + uint nb03; + uint nb11; + uint nb12; + uint nb13; + uint r2; + uint r3; } pcs; void main() { @@ -34,12 +43,15 @@ void main() { const uint r0 = gl_WorkGroupID.x; const uint r1 = gl_WorkGroupID.y; - const uint r2 = gl_WorkGroupID.z; + const uint im = gl_WorkGroupID.z; const uint row = (r0 * gl_NumSubgroups + gl_SubgroupID); - const uint offset0 = r2/pcs.gqa*(nb*pcs.ne0); - const uint x = row * nb + offset0; // Based from inA without base offset - const uint yy = r1*pcs.ne10 + r2*pcs.ne00*pcs.ne1+pcs.inBOff; // Based from inB + + const uint i12 = im%pcs.ne12; + const uint i13 = im/pcs.ne12; + + const uint x = row*(pcs.nb01/SIZE_OF_BLOCK) + (i12/pcs.r2)*(pcs.nb02/SIZE_OF_BLOCK) + (i13/pcs.r3)*(pcs.nb03/SIZE_OF_BLOCK); + const uint yy = (r1*pcs.nb11 + i12*pcs.nb12 + i13*pcs.nb13) / 4 + pcs.inBOff; float sumf = 0; @@ -89,6 +101,6 @@ void main() { const float tot = subgroupAdd(sumf); if (subgroupElect()) { - out_[r1*pcs.ne0 + r2*pcs.ne0*pcs.ne1 + row + pcs.outOff] = tot; + out_[r1*pcs.ne0 + im*pcs.ne0*pcs.ne1 + row + pcs.outOff] = tot; } } diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp index 440b5ab2c81f8..a6517cc1f1993 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n.comp @@ -14,10 +14,15 @@ void main() { const uint i12 = im%pcs.ne12; const uint i13 = im/pcs.ne12; - const uint offset0 = first_row * nb + (i12/pcs.r2)*(nb*pcs.ne01) + (i13/pcs.r3)*(nb*pcs.ne01*pcs.ne02); + // pointers to src0 rows + uint ax[N_ROWS]; + for (int row = 0; row < N_ROWS; ++row) { + const uint offset0 = (first_row + row)*(pcs.nb01/SIZE_OF_BLOCK) + (i12/pcs.r2)*(pcs.nb02/SIZE_OF_BLOCK) + (i13/pcs.r3)*(pcs.nb03/SIZE_OF_BLOCK); + + ax[row] = offset0 + pcs.inAOff; + } - const uint x = offset0; // Based from inA without base offset - const uint y = r1*uint(pcs.ne10)+im*pcs.ne00*pcs.ne1+pcs.inBOff; // Based from inB + const uint y = (r1*pcs.nb11 + i12*pcs.nb12 + i13*pcs.nb13) / 4 + pcs.inBOff; float sumf[N_ROWS] = {0.0f, 0.0f, 0.0f, 0.0f}; @@ -32,8 +37,7 @@ void main() { for (uint ib = ix; ib < nb; ib += 16) { for (int row = 0; row < N_ROWS; row++) { - const uint block_index = x + ib + row * nb; - sumf[row] += block_q_n_dot_y(block_index, yb, il); + sumf[row] += block_q_n_dot_y(ax[row] + ib, yb, il); } yb += BLOCKS_IN_QUANT * 16; diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp index 7912b09ac69c4..a9a2f22180ffd 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/op_mul_mv_q_n_pre.comp @@ -1,5 +1,5 @@ layout(local_size_x_id = 0) in; -layout(local_size_y = 1) in; +layout(local_size_y = 8) in; layout(local_size_z = 1) in; layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; }; @@ -17,6 +17,12 @@ layout (push_constant) uniform parameter { int ne12; int ne0; int ne1; + uint nb01; + uint nb02; + uint nb03; + uint nb11; + uint nb12; + uint nb13; uint r2; uint r3; } pcs; diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_rope_f16.comp b/ggml/src/ggml-kompute/kompute-shaders/op_rope_f16.comp deleted file mode 100644 index 0ecfb2eab527c..0000000000000 --- a/ggml/src/ggml-kompute/kompute-shaders/op_rope_f16.comp +++ /dev/null @@ -1,73 +0,0 @@ -#version 450 - -#include "rope_common.comp" - -layout(binding = 0) buffer restrict readonly tensorInA { float16_t inA[]; }; -layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; }; -layout(binding = 2) buffer restrict writeonly tensorOut { float16_t out_[]; }; - -void main() { - const uint i3 = gl_WorkGroupID.z; - const uint i2 = gl_WorkGroupID.y; - const uint i1 = gl_WorkGroupID.x; - - const bool is_neox = (pcs.mode & GGML_ROPE_TYPE_NEOX) != 0; - - float corr_dims[2]; - rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims); - - const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims); - - const int p = inB[pcs.inBOff + i2]; - - float theta = float(p); - - if (!is_neox) { - for (uint i0 = 0; i0 < pcs.ne0; i0 += 2) { - float cos_theta, sin_theta; - rope_yarn(theta, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); - - theta *= theta_scale; - - const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in - const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_ - - const float x0 = float(inA[src]); - const float x1 = float(inA[src+1]); - - out_[dst_data] = float16_t(x0*cos_theta - x1*sin_theta); - out_[dst_data+1] = float16_t(x0*sin_theta + x1*cos_theta); - } - } else { - const float inv_ndims = -1.f/pcs.n_dims; - for (uint ic = 0; ic < pcs.n_dims; ic += 2) { - const uint cur_rot = ic; - - float cos_theta, sin_theta; - rope_yarn(theta, pcs.freq_scale, corr_dims, cur_rot, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); - - theta *= theta_scale; - - const uint i0 = ic/2; - - const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in - const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_ - - const float x0 = float(inA[src]); - const float x1 = float(inA[src+pcs.n_dims/2]); - - out_[dst_data] = float16_t(x0*cos_theta - x1*sin_theta); - out_[dst_data+pcs.n_dims/2] = float16_t(x0*sin_theta + x1*cos_theta); - } - - for (uint ic = pcs.n_dims; ic < pcs.ne0; ic += 2) { - const uint i0 = ic; - - const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in - const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_ - - out_[dst_data + 0] = inA[src + 0]; - out_[dst_data + 1] = inA[src + 1]; - } - } -} diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_rope_f32.comp b/ggml/src/ggml-kompute/kompute-shaders/op_rope_f32.comp deleted file mode 100644 index cec0fd9a5d10c..0000000000000 --- a/ggml/src/ggml-kompute/kompute-shaders/op_rope_f32.comp +++ /dev/null @@ -1,73 +0,0 @@ -#version 450 - -#include "rope_common.comp" - -layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; }; -layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; }; -layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; }; - -void main() { - const uint i3 = gl_WorkGroupID.z; - const uint i2 = gl_WorkGroupID.y; - const uint i1 = gl_WorkGroupID.x; - - const bool is_neox = (pcs.mode & GGML_ROPE_TYPE_NEOX) != 0; - - float corr_dims[2]; - rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims); - - const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims); - - const int p = inB[pcs.inBOff + i2]; - - float theta = float(p); - - if (!is_neox) { - for (uint i0 = 0; i0 < pcs.ne0; i0 += 2) { - float cos_theta, sin_theta; - rope_yarn(theta, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); - - theta *= theta_scale; - - const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in - const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_ - - const float x0 = inA[src]; - const float x1 = inA[src+1]; - - out_[dst_data] = x0*cos_theta - x1*sin_theta; - out_[dst_data+1] = x0*sin_theta + x1*cos_theta; - } - } else { - const float inv_ndims = -1.f/pcs.n_dims; - for (uint ic = 0; ic < pcs.n_dims; ic += 2) { - const uint cur_rot = ic; - - float cos_theta, sin_theta; - rope_yarn(theta, pcs.freq_scale, corr_dims, cur_rot, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); - - theta *= theta_scale; - - const uint i0 = ic/2; - - const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in - const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_ - - const float x0 = inA[src]; - const float x1 = inA[src+pcs.n_dims/2]; - - out_[dst_data] = x0*cos_theta - x1*sin_theta; - out_[dst_data+pcs.n_dims/2] = x0*sin_theta + x1*cos_theta; - } - - for (uint ic = pcs.n_dims; ic < pcs.ne0; ic += 2) { - const uint i0 = ic; - - const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in - const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_ - - out_[dst_data + 0] = inA[src + 0]; - out_[dst_data + 1] = inA[src + 1]; - } - } -} diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp b/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp new file mode 100644 index 0000000000000..63659cbfe5524 --- /dev/null +++ b/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f16.comp @@ -0,0 +1,52 @@ +#version 450 + +#include "rope_common.comp" + +layout(binding = 0) buffer restrict readonly tensorInA { float16_t inA[]; }; +layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; }; +layout(binding = 2) buffer restrict readonly tensorInC { float inC[]; }; +layout(binding = 3) buffer restrict writeonly tensorOut { float16_t out_[]; }; + +void main() { + const uint i3 = gl_WorkGroupID.z; + const uint i2 = gl_WorkGroupID.y; + const uint i1 = gl_WorkGroupID.x; + + float corr_dims[2]; + rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims); + + const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims); + + float theta_base = float(inB[pcs.inBOff + i2]); + float inv_ndims = -1.f/pcs.n_dims; + + float cos_theta; + float sin_theta; + + for (uint i0 = 2*gl_LocalInvocationIndex; i0 < pcs.ne0; i0 += 2*gl_WorkGroupSize.x) { + if (i0 < pcs.n_dims) { + uint ic = i0/2; + + float theta = theta_base * pow(pcs.freq_base, inv_ndims*i0); + + const float freq_factor = pcs.has_freq_factors ? inC[pcs.inCOff + ic] : 1.0f; + + rope_yarn(theta/freq_factor, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); + + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + ic*pcs.nb00) / 2) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + ic*pcs.nb0) / 2) + pcs.outOff; // Based from out_ + + const float x0 = float(inA[src]); + const float x1 = float(inA[src+pcs.n_dims/2]); + + out_[dst_data] = float16_t(x0*cos_theta - x1*sin_theta); + out_[dst_data+pcs.n_dims/2] = float16_t(x0*sin_theta + x1*cos_theta); + } else { + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_ + + out_[dst_data] = inA[src]; + out_[dst_data+1] = inA[src+1]; + } + } +} diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp b/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp new file mode 100644 index 0000000000000..4df56204d7233 --- /dev/null +++ b/ggml/src/ggml-kompute/kompute-shaders/op_rope_neox_f32.comp @@ -0,0 +1,52 @@ +#version 450 + +#include "rope_common.comp" + +layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; }; +layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; }; +layout(binding = 2) buffer restrict readonly tensorInC { float inC[]; }; +layout(binding = 3) buffer restrict writeonly tensorOut { float out_[]; }; + +void main() { + const uint i3 = gl_WorkGroupID.z; + const uint i2 = gl_WorkGroupID.y; + const uint i1 = gl_WorkGroupID.x; + + float corr_dims[2]; + rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims); + + const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims); + + float theta_base = float(inB[pcs.inBOff + i2]); + float inv_ndims = -1.f/pcs.n_dims; + + float cos_theta; + float sin_theta; + + for (uint i0 = 2*gl_LocalInvocationIndex; i0 < pcs.ne0; i0 += 2*gl_WorkGroupSize.x) { + if (i0 < pcs.n_dims) { + uint ic = i0/2; + + float theta = theta_base * pow(pcs.freq_base, inv_ndims*i0); + + const float freq_factor = pcs.has_freq_factors ? inC[pcs.inCOff + ic] : 1.0f; + + rope_yarn(theta/freq_factor, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); + + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + ic*pcs.nb00) / 4) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + ic*pcs.nb0) / 4) + pcs.outOff; // Based from out_ + + const float x0 = inA[src]; + const float x1 = inA[src+pcs.n_dims/2]; + + out_[dst_data] = x0*cos_theta - x1*sin_theta; + out_[dst_data+pcs.n_dims/2] = x0*sin_theta + x1*cos_theta; + } else { + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_ + + out_[dst_data] = inA[src]; + out_[dst_data+1] = inA[src+1]; + } + } +} diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp b/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp new file mode 100644 index 0000000000000..a3c0eda8bd399 --- /dev/null +++ b/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f16.comp @@ -0,0 +1,52 @@ +#version 450 + +#include "rope_common.comp" + +layout(binding = 0) buffer restrict readonly tensorInA { float16_t inA[]; }; +layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; }; +layout(binding = 2) buffer restrict readonly tensorInC { float inC[]; }; +layout(binding = 3) buffer restrict writeonly tensorOut { float16_t out_[]; }; + +void main() { + const uint i3 = gl_WorkGroupID.z; + const uint i2 = gl_WorkGroupID.y; + const uint i1 = gl_WorkGroupID.x; + + float corr_dims[2]; + rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims); + + const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims); + + float theta_base = float(inB[pcs.inBOff + i2]); + float inv_ndims = -1.f/pcs.n_dims; + + float cos_theta; + float sin_theta; + + for (uint i0 = 2*gl_LocalInvocationIndex; i0 < pcs.ne0; i0 += 2*gl_WorkGroupSize.x) { + if (i0 < pcs.n_dims) { + uint ic = i0/2; + + float theta = theta_base * pow(pcs.freq_base, inv_ndims*i0); + + const float freq_factor = pcs.has_freq_factors ? inC[pcs.inCOff + ic] : 1.0f; + + rope_yarn(theta/freq_factor, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); + + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_ + + const float x0 = float(inA[src]); + const float x1 = float(inA[src+1]); + + out_[dst_data] = float16_t(x0*cos_theta - x1*sin_theta); + out_[dst_data+1] = float16_t(x0*sin_theta + x1*cos_theta); + } else { + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 2) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 2) + pcs.outOff; // Based from out_ + + out_[dst_data] = inA[src]; + out_[dst_data+1] = inA[src+1]; + } + } +} diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp b/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp new file mode 100644 index 0000000000000..b7963ae725390 --- /dev/null +++ b/ggml/src/ggml-kompute/kompute-shaders/op_rope_norm_f32.comp @@ -0,0 +1,52 @@ +#version 450 + +#include "rope_common.comp" + +layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; }; +layout(binding = 1) buffer restrict readonly tensorInB { int inB[]; }; +layout(binding = 2) buffer restrict readonly tensorInC { float inC[]; }; +layout(binding = 3) buffer restrict writeonly tensorOut { float out_[]; }; + +void main() { + const uint i3 = gl_WorkGroupID.z; + const uint i2 = gl_WorkGroupID.y; + const uint i1 = gl_WorkGroupID.x; + + float corr_dims[2]; + rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims); + + const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims); + + float theta_base = float(inB[pcs.inBOff + i2]); + float inv_ndims = -1.f/pcs.n_dims; + + float cos_theta; + float sin_theta; + + for (uint i0 = 2*gl_LocalInvocationIndex; i0 < pcs.ne0; i0 += 2*gl_WorkGroupSize.x) { + if (i0 < pcs.n_dims) { + uint ic = i0/2; + + float theta = theta_base * pow(pcs.freq_base, inv_ndims*i0); + + const float freq_factor = pcs.has_freq_factors ? inC[pcs.inCOff + ic] : 1.0f; + + rope_yarn(theta/freq_factor, pcs.freq_scale, corr_dims, i0, pcs.ext_factor, pcs.attn_factor, cos_theta, sin_theta); + + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_ + + const float x0 = inA[src]; + const float x1 = inA[src+1]; + + out_[dst_data] = x0*cos_theta - x1*sin_theta; + out_[dst_data+1] = x0*sin_theta + x1*cos_theta; + } else { + const uint src = uint((i3*pcs.nb03 + i2*pcs.nb02 + i1*pcs.nb01 + i0*pcs.nb00) / 4) + pcs.inAOff; // Based from in + const uint dst_data = uint((i3*pcs.nb3 + i2*pcs.nb2 + i1*pcs.nb1 + i0*pcs.nb0) / 4) + pcs.outOff; // Based from out_ + + out_[dst_data] = inA[src]; + out_[dst_data+1] = inA[src+1]; + } + } +} diff --git a/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp b/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp index 7bc9176cabaae..4165295bf4b3c 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/op_softmax.comp @@ -18,6 +18,10 @@ layout(push_constant) uniform PushConstants { int ne01; int ne02; float scale; + float max_bias; + float m0; + float m1; + uint n_head_log2; int mask; } pcs; @@ -34,17 +38,29 @@ void main() { const uint pmask = i01*pcs.ne00 + pcs.inBOff; // Based from inB const uint pdst = extra_off + pcs.outOff; // Based from out_ + float slope = 1.0f; + + // ALiBi + if (pcs.max_bias > 0.0f) { + int64_t h = i02; + + float base = h < pcs.n_head_log2 ? pcs.m0 : pcs.m1; + int64_t exp = h < pcs.n_head_log2 ? h + 1 : 2*(h - pcs.n_head_log2) + 1; + + slope = pow(base, float(exp)); + } + // parallel max float localMax = uintBitsToFloat(0xFF800000); for (uint i00 = gl_SubgroupInvocationID.x; i00 < pcs.ne00; i00 += 32) { - localMax = max(localMax, inA[psrc0 + i00]*pcs.scale + (pcs.mask!=0 ? inB[pmask + i00] : 0.0f)); + localMax = max(localMax, inA[psrc0 + i00]*pcs.scale + (pcs.mask!=0 ? slope*inB[pmask + i00] : 0.0f)); } float max_ = subgroupMax(localMax); // parallel sum float localSum = 0.0f; for (uint i00 = gl_SubgroupInvocationID.x; i00 < pcs.ne00; i00 += 32) { - const float exp_psrc0 = exp(inA[psrc0 + i00]*pcs.scale + (pcs.mask!=0 ? inB[pmask + i00] : 0.0f) - max_); + const float exp_psrc0 = exp(inA[psrc0 + i00]*pcs.scale + (pcs.mask!=0 ? slope*inB[pmask + i00] : 0.0f) - max_); localSum += exp_psrc0; out_[pdst + i00] = exp_psrc0; } diff --git a/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp b/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp index df4702896d46f..0fca640dcc232 100644 --- a/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp +++ b/ggml/src/ggml-kompute/kompute-shaders/rope_common.comp @@ -8,12 +8,14 @@ layout(local_size_x = 1) in; layout (push_constant) uniform parameter { uint inAOff; uint inBOff; + uint inCOff; uint outOff; int n_dims; int mode; int n_ctx_orig; float freq_base; float freq_scale; + bool has_freq_factors; float ext_factor; float attn_factor; float beta_fast; diff --git a/ggml/src/ggml-metal/CMakeLists.txt b/ggml/src/ggml-metal/CMakeLists.txt index b237d79f47ddb..1bad272068244 100644 --- a/ggml/src/ggml-metal/CMakeLists.txt +++ b/ggml/src/ggml-metal/CMakeLists.txt @@ -4,19 +4,16 @@ find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) message(STATUS "Metal framework found") -add_library(ggml-metal - ggml-metal.m - ) +ggml_add_backend_library(ggml-metal + ggml-metal.m + ) target_link_libraries(ggml-metal PRIVATE - ggml-base ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK} ) -target_include_directories(ggml-metal PRIVATE . ..) - if (GGML_METAL_NDEBUG) add_compile_definitions(GGML_METAL_NDEBUG) endif() diff --git a/ggml/src/ggml-metal/ggml-metal-impl.h b/ggml/src/ggml-metal/ggml-metal-impl.h index 53c13549650c8..e3dc25f1686fb 100644 --- a/ggml/src/ggml-metal/ggml-metal-impl.h +++ b/ggml/src/ggml-metal/ggml-metal-impl.h @@ -102,6 +102,21 @@ typedef struct { uint64_t nb3; } ggml_metal_kargs_cpy; +typedef struct { + int64_t ne10; + int64_t ne11; + int64_t ne12; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + uint64_t nb1; + uint64_t nb2; + uint64_t nb3; + uint64_t offs; + bool inplace; +} ggml_metal_kargs_set; + typedef struct { int32_t ne00; int32_t ne01; @@ -192,6 +207,30 @@ typedef struct { int16_t r3; } ggml_metal_kargs_mul_mv; +typedef struct { + int32_t ne00; + int32_t ne01; + int32_t ne02; + uint64_t nb00; + uint64_t nb01; + uint64_t nb02; + uint64_t nb03; + int32_t ne10; + int32_t ne11; + int32_t ne12; + uint64_t nb10; + uint64_t nb11; + uint64_t nb12; + uint64_t nb13; + int32_t ne0; + int32_t ne1; + int16_t r2; + int16_t r3; + int16_t nsg; + int16_t nxpsg; + int16_t r1ptg; +} ggml_metal_kargs_mul_mv_ext; + typedef struct { int32_t nei0; int32_t nei1; diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index 58fee4bfd1296..28f590f9216be 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -126,6 +126,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, GGML_METAL_KERNEL_TYPE_SILU, GGML_METAL_KERNEL_TYPE_SILU_4, + GGML_METAL_KERNEL_TYPE_ELU, GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, @@ -174,6 +175,46 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_5, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_2, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_3, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_4, + GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_5, GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, @@ -265,8 +306,11 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_IM2COL_F32, GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F16, GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F32, + GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F32_F32, + GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F16_F32, GGML_METAL_KERNEL_TYPE_UPSCALE_F32, GGML_METAL_KERNEL_TYPE_PAD_F32, + GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32, GGML_METAL_KERNEL_TYPE_ARANGE_F32, GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, @@ -328,6 +372,8 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H256, GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H256, + GGML_METAL_KERNEL_TYPE_SET_I32, + GGML_METAL_KERNEL_TYPE_SET_F32, GGML_METAL_KERNEL_TYPE_CPY_F32_F32, GGML_METAL_KERNEL_TYPE_CPY_F32_F16, GGML_METAL_KERNEL_TYPE_CPY_F32_BF16, @@ -349,6 +395,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte GGML_METAL_KERNEL_TYPE_SUM_ROWS, GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, + GGML_METAL_KERNEL_TYPE_ARGMAX, GGML_METAL_KERNEL_TYPE_COUNT }; @@ -463,6 +510,35 @@ @implementation GGMLMetalClass #endif NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"]; + if (path_lib == nil) { + // Try to find the resource in the directory where the current binary located. + NSString * current_binary = [[NSProcessInfo processInfo] arguments][0]; + NSString * bin_dir = [current_binary stringByDeletingLastPathComponent]; + NSString * default_metallib_path = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]]; + if ([[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) { + GGML_LOG_INFO("%s: found '%s'\n", __func__, [default_metallib_path UTF8String]); + NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:default_metallib_path error:&error]; + if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) { + // Optionally, if this is a symlink, try to resolve it. + default_metallib_path = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:default_metallib_path error:&error]; + if (default_metallib_path && [default_metallib_path length] > 0 && ![[default_metallib_path substringToIndex:1] isEqualToString:@"/"]) { + // It is a relative path, adding the binary directory as directory prefix. + default_metallib_path = [NSString pathWithComponents:@[bin_dir, default_metallib_path]]; + } + if (!default_metallib_path || ![[NSFileManager defaultManager] isReadableFileAtPath:default_metallib_path]) { + // Link to the resource could not be resolved. + default_metallib_path = nil; + } else { + GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [default_metallib_path UTF8String]); + } + } + } else { + // The resource couldn't be found in the binary's directory. + default_metallib_path = nil; + } + path_lib = default_metallib_path; + } + if (try_metallib && path_lib != nil) { // pre-compiled library found NSURL * libURL = [NSURL fileURLWithPath:path_lib]; @@ -649,6 +725,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ELU, elu, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16, soft_max_f16, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F16_4, soft_max_f16_4, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32, soft_max_f32, has_simdgroup_reduction); @@ -697,6 +774,46 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2, mul_mv_ext_f16_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3, mul_mv_ext_f16_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4, mul_mv_ext_f16_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_5, mul_mv_ext_f16_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_2, mul_mv_ext_q4_0_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_3, mul_mv_ext_q4_0_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_4, mul_mv_ext_q4_0_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_5, mul_mv_ext_q4_0_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_2, mul_mv_ext_q4_1_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_3, mul_mv_ext_q4_1_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_4, mul_mv_ext_q4_1_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_5, mul_mv_ext_q4_1_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_2, mul_mv_ext_q5_0_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_3, mul_mv_ext_q5_0_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_4, mul_mv_ext_q5_0_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_5, mul_mv_ext_q5_0_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_2, mul_mv_ext_q5_1_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_3, mul_mv_ext_q5_1_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_4, mul_mv_ext_q5_1_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_5, mul_mv_ext_q5_1_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_2, mul_mv_ext_q8_0_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3, mul_mv_ext_q8_0_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4, mul_mv_ext_q8_0_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5, mul_mv_ext_q8_0_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2, mul_mv_ext_q4_K_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3, mul_mv_ext_q4_K_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4, mul_mv_ext_q4_K_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_5, mul_mv_ext_q4_K_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_2, mul_mv_ext_q5_K_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_3, mul_mv_ext_q5_K_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_4, mul_mv_ext_q5_K_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_5, mul_mv_ext_q5_K_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_2, mul_mv_ext_q6_K_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_3, mul_mv_ext_q6_K_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_4, mul_mv_ext_q6_K_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_5, mul_mv_ext_q6_K_f32_r1_5, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_2, mul_mv_ext_iq4_nl_f32_r1_2, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_3, mul_mv_ext_iq4_nl_f32_r1_3, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_4, mul_mv_ext_iq4_nl_f32_r1_4, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_5, mul_mv_ext_iq4_nl_f32_r1_5, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, has_simdgroup_reduction); @@ -788,8 +905,11 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F16, im2col_ext_f16, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_EXT_F32, im2col_ext_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F32_F32, conv_transpose_1d_f32_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F16_F32, conv_transpose_1d_f16_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32, pad_reflect_1d_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true); @@ -851,6 +971,8 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_0_H256, flash_attn_ext_vec_q5_0_h256, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q5_1_H256, flash_attn_ext_vec_q5_1_h256, has_simdgroup_reduction); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_Q8_0_H256, flash_attn_ext_vec_q8_0_h256, has_simdgroup_reduction); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_F32, set_f32, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SET_I32, set_i32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_BF16, cpy_f32_bf16, use_bfloat); @@ -870,6 +992,7 @@ @implementation GGMLMetalClass GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIN, sin, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true); + GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true); } @@ -968,6 +1091,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_UNARY_OP_GELU: case GGML_UNARY_OP_GELU_QUICK: case GGML_UNARY_OP_SILU: + case GGML_UNARY_OP_ELU: return ggml_is_contiguous(op->src[0]); default: return false; @@ -986,6 +1110,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_REPEAT: case GGML_OP_SCALE: case GGML_OP_CLAMP: + case GGML_OP_CONV_TRANSPOSE_1D: return true; case GGML_OP_SQR: case GGML_OP_SQRT: @@ -994,12 +1119,24 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex return ggml_is_contiguous(op->src[0]); case GGML_OP_SUM_ROWS: case GGML_OP_SOFT_MAX: - case GGML_OP_RMS_NORM: case GGML_OP_GROUP_NORM: return has_simdgroup_reduction; + case GGML_OP_RMS_NORM: + return has_simdgroup_reduction && (op->ne[0] % 4 == 0); + case GGML_OP_ARGMAX: case GGML_OP_NORM: - case GGML_OP_ROPE: return true; + case GGML_OP_ROPE: + { + const int mode = ((const int32_t *) op->op_params)[2]; + if (mode & GGML_ROPE_TYPE_MROPE) { + return false; + } + if (mode & GGML_ROPE_TYPE_VISION) { + return false; + } + return true; + } case GGML_OP_IM2COL: return op->src[0]->type == GGML_TYPE_F16; case GGML_OP_POOL_1D: @@ -1007,6 +1144,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex case GGML_OP_POOL_2D: case GGML_OP_UPSCALE: case GGML_OP_PAD: + case GGML_OP_PAD_REFLECT_1D: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_ARGSORT: @@ -1064,6 +1202,16 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex return false; }; } + case GGML_OP_SET: + { + switch (op->src[0]->type) { + case GGML_TYPE_F32: + case GGML_TYPE_I32: + return true; + default: + return false; + }; + } case GGML_OP_DIAG_MASK_INF: case GGML_OP_GET_ROWS: { @@ -1589,6 +1737,18 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; } break; + case GGML_UNARY_OP_ELU: + { + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ELU].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + + const int64_t n = ggml_nelements(dst); + + [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; default: { GGML_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op)); @@ -1912,340 +2072,490 @@ static void ggml_metal_encode_node( // find the break-even point where the matrix-matrix kernel becomes more efficient compared // to the matrix-vector kernel - int ne11_mm_min = 1; + const int ne11_mm_min = 4; + + // first try to use small-batch mat-mv kernels + // these should be efficient for BS [2, ~8] + if (src1t == GGML_TYPE_F32 && (ne00%256 == 0) && + ( + ( + ( + src0t == GGML_TYPE_F16 || // TODO: helper function + src0t == GGML_TYPE_Q4_0 || + src0t == GGML_TYPE_Q4_1 || + src0t == GGML_TYPE_Q5_0 || + src0t == GGML_TYPE_Q5_1 || + src0t == GGML_TYPE_Q8_0 || + src0t == GGML_TYPE_IQ4_NL || + false) && (ne11 >= 2 && ne11 <= 8) + ) || + ( + ( + src0t == GGML_TYPE_Q4_K || + src0t == GGML_TYPE_Q5_K || + src0t == GGML_TYPE_Q6_K || + false) && (ne11 >= 4 && ne11 <= 8) + ) + ) + ) { + // TODO: determine the optimal parameters based on grid utilization + // I still don't know why we should not always use the maximum available threads: + // + // nsg = pipeline.maxTotalThreadsPerThreadgroup / 32 + // + // my current hypothesis is that the work grid is not evenly divisible for different nsg + // values and there can be some tail effects when nsg is high. need to confirm this + // + const int nsg = 2; // num simdgroups per threadgroup + const int nxpsg = ne11 < 3 ? 16 : 8; // num threads along row per simdgroup + const int nypsg = 32/nxpsg; // num threads along col per simdgroup (i.e. a simdgroup processes that many src0 rows at a time) + const int r0ptg = nypsg*nsg; // num src0 rows per threadgroup + int r1ptg = 4; // num src1 rows per threadgroup + + // note: not sure how optimal are those across all different hardware. there might be someting cleverer + switch (ne11) { + case 2: + r1ptg = 2; break; + case 3: + case 6: + r1ptg = 3; break; + case 4: + case 7: + case 8: + r1ptg = 4; break; + case 5: + r1ptg = 5; break; + }; -#if 0 - // the numbers below are measured on M2 Ultra for 7B and 13B models - // these numbers do not translate to other devices or model sizes - // TODO: need to find a better approach - if ([device.name isEqualToString:@"Apple M2 Ultra"]) { - switch (src0t) { - case GGML_TYPE_F16: ne11_mm_min = 2; break; - case GGML_TYPE_Q8_0: ne11_mm_min = 7; break; - case GGML_TYPE_Q2_K: ne11_mm_min = 15; break; - case GGML_TYPE_Q3_K: ne11_mm_min = 7; break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: ne11_mm_min = 15; break; - case GGML_TYPE_Q4_K: ne11_mm_min = 11; break; - case GGML_TYPE_Q5_0: // not tested yet - case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet - case GGML_TYPE_Q5_K: ne11_mm_min = 7; break; - case GGML_TYPE_Q6_K: ne11_mm_min = 7; break; - default: ne11_mm_min = 1; break; - } - } -#endif + id pipeline = nil; - // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs - // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel - if ([device supportsFamily:MTLGPUFamilyApple7] && - !ggml_is_transposed(src0) && - !ggml_is_transposed(src1) && - src1t == GGML_TYPE_F32 && - ne00 % 32 == 0 && ne00 >= 64 && - (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) { - //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); - - // some Metal matrix data types require aligned pointers - // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5) - switch (src0->type) { - case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break; - case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break; - case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8 == 0); break; - default: break; - } + switch (src0->type) { + case GGML_TYPE_F16: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_F16_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q4_0: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_0_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q4_1: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_1_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q5_0: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_0_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q5_1: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_1_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q8_0: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q8_0_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q4_K: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q4_K_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q5_K: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q5_K_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_Q6_K: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_Q6_K_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + case GGML_TYPE_IQ4_NL: + switch (r1ptg) { + case 2: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_2].pipeline; break; + case 3: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_3].pipeline; break; + case 4: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_4].pipeline; break; + case 5: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_EXT_IQ4_NL_F32_R1_5].pipeline; break; + default: GGML_ABORT("not implemented"); + } break; + default: GGML_ABORT("not implemented"); + } - id pipeline = nil; - - switch (src0->type) { - case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32 ].pipeline; break; - case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32 ].pipeline; break; - case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32 ].pipeline; break; - case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32 ].pipeline; break; - case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32 ].pipeline; break; - case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break; - case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break; - case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break; - case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break; - case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break; - case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break; - case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32 ].pipeline; break; - case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32 ].pipeline; break; - case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break; - case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break; - case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break; - case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32 ].pipeline; break; - case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32 ].pipeline; break; - case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32 ].pipeline; break; - case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32 ].pipeline; break; - case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break; - case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break; - default: GGML_ABORT("MUL MAT-MAT not implemented"); - } + ggml_metal_kargs_mul_mv_ext args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne10 =*/ ne10, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.r2 =*/ r2, + /*.r3 =*/ r3, + /*.nsg =*/ nsg, + /*.nxpsg =*/ nxpsg, + /*.r1ptg =*/ r1ptg, + }; - ggml_metal_kargs_mul_mm args = { - /*.ne00 =*/ ne00, - /*.ne02 =*/ ne02, - /*.nb01 =*/ nb01, - /*.nb02 =*/ nb02, - /*.nb03 =*/ nb03, - /*.ne12 =*/ ne12, - /*.nb10 =*/ nb10, - /*.nb11 =*/ nb11, - /*.nb12 =*/ nb12, - /*.nb13 =*/ nb13, - /*.ne0 =*/ ne0, - /*.ne1 =*/ ne1, - /*.r2 =*/ r2, - /*.r3 =*/ r3, - }; + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + + //printf("ne01 = %lld nr0ptg = %d\n", ne01, nr0ptg); + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + r0ptg - 1)/r0ptg, (ne11 + r1ptg - 1)/r1ptg, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(32, nsg, 1)]; + } else + // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs + // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel + if ([device supportsFamily:MTLGPUFamilyApple7] && + !ggml_is_transposed(src0) && + !ggml_is_transposed(src1) && + src1t == GGML_TYPE_F32 && + ne00 % 32 == 0 && ne00 >= 64 && + (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) { + //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); + + // some Metal matrix data types require aligned pointers + // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5) + switch (src0->type) { + case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break; + case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break; + case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8 == 0); break; + default: break; + } + + id pipeline = nil; - [encoder setComputePipelineState:pipeline]; - [encoder setBytes:&args length:sizeof(args) atIndex:0]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; - - [encoder setThreadgroupMemoryLength:8192 atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)]; - } else { - int nth0 = 32; - int nth1 = 1; - int nrows = 1; - //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); - - id pipeline = nil; - - // use custom matrix x vector kernel - switch (src0t) { - case GGML_TYPE_F32: - { - GGML_ASSERT(src1t == GGML_TYPE_F32); - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline; + switch (src0->type) { + case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32 ].pipeline; break; + case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32 ].pipeline; break; + case GGML_TYPE_BF16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_BF16_F32 ].pipeline; break; + case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32 ].pipeline; break; + case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32 ].pipeline; break; + case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break; + case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break; + case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break; + case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break; + case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break; + case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break; + case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32 ].pipeline; break; + case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32 ].pipeline; break; + case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break; + case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break; + case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break; + case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32 ].pipeline; break; + case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32 ].pipeline; break; + case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32 ].pipeline; break; + case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32 ].pipeline; break; + case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break; + case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break; + default: GGML_ABORT("MUL MAT-MAT not implemented"); + } + + ggml_metal_kargs_mul_mm args = { + /*.ne00 =*/ ne00, + /*.ne02 =*/ ne02, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne12 =*/ ne12, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.r2 =*/ r2, + /*.r3 =*/ r3, + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + + [encoder setThreadgroupMemoryLength:8192 atIndex:0]; + [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)]; + } else { + int nth0 = 32; + int nth1 = 1; + int nrows = 1; + //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12); + + id pipeline = nil; + + // use custom matrix x vector kernel + switch (src0t) { + case GGML_TYPE_F32: + { + GGML_ASSERT(src1t == GGML_TYPE_F32); + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline; + nrows = 4; + } break; + case GGML_TYPE_F16: + { + nth0 = 32; + nth1 = 1; + if (src1t == GGML_TYPE_F32) { + if (ne11 * ne12 < 4) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline; + } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline; + nrows = ne11; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline; nrows = 4; - } break; - case GGML_TYPE_F16: - { - nth0 = 32; - nth1 = 1; - if (src1t == GGML_TYPE_F32) { - if (ne11 * ne12 < 4) { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline; - } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline; - nrows = ne11; - } else { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline; - nrows = 4; - } - } else { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline; - nrows = 4; - } - } break; - case GGML_TYPE_BF16: - { - nth0 = 32; - nth1 = 1; - if (src1t == GGML_TYPE_F32) { - if (ne11 * ne12 < 4) { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW].pipeline; - } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4].pipeline; - nrows = ne11; - } else { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32].pipeline; - nrows = 4; - } - } else { - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16].pipeline; - nrows = 4; - } - } break; - case GGML_TYPE_Q4_0: - { - nth0 = 8; - nth1 = 8; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline; - } break; - case GGML_TYPE_Q4_1: - { - nth0 = 8; - nth1 = 8; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline; - } break; - case GGML_TYPE_Q5_0: - { - nth0 = 8; - nth1 = 8; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline; - } break; - case GGML_TYPE_Q5_1: - { - nth0 = 8; - nth1 = 8; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline; - } break; - case GGML_TYPE_Q8_0: - { - nth0 = 8; - nth1 = 8; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline; - } break; - case GGML_TYPE_Q2_K: - { - nth0 = 2; - nth1 = 32; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline; - } break; - case GGML_TYPE_Q3_K: - { - nth0 = 2; - nth1 = 32; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline; - } break; - case GGML_TYPE_Q4_K: - { - nth0 = 4; //1; - nth1 = 8; //32; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline; - } break; - case GGML_TYPE_Q5_K: - { - nth0 = 2; - nth1 = 32; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline; - } break; - case GGML_TYPE_Q6_K: - { - nth0 = 2; - nth1 = 32; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline; - } break; - case GGML_TYPE_IQ2_XXS: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline; - } break; - case GGML_TYPE_IQ2_XS: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline; - } break; - case GGML_TYPE_IQ3_XXS: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline; - } break; - case GGML_TYPE_IQ3_S: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32].pipeline; - } break; - case GGML_TYPE_IQ2_S: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32].pipeline; - } break; - case GGML_TYPE_IQ1_S: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline; - } break; - case GGML_TYPE_IQ1_M: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32].pipeline; - } break; - case GGML_TYPE_IQ4_NL: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline; - } break; - case GGML_TYPE_IQ4_XS: - { - nth0 = 4; - nth1 = 16; - pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline; - } break; - default: - { - GGML_LOG_ERROR("Asserting on type %d\n", (int)src0t); - GGML_ABORT("not implemented"); } - }; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline; + nrows = 4; + } + } break; + case GGML_TYPE_BF16: + { + nth0 = 32; + nth1 = 1; + if (src1t == GGML_TYPE_F32) { + if (ne11 * ne12 < 4) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_1ROW].pipeline; + } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32_L4].pipeline; + nrows = ne11; + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_F32].pipeline; + nrows = 4; + } + } else { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_BF16_BF16].pipeline; + nrows = 4; + } + } break; + case GGML_TYPE_Q4_0: + { + nth0 = 8; + nth1 = 8; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline; + } break; + case GGML_TYPE_Q4_1: + { + nth0 = 8; + nth1 = 8; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline; + } break; + case GGML_TYPE_Q5_0: + { + nth0 = 8; + nth1 = 8; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline; + } break; + case GGML_TYPE_Q5_1: + { + nth0 = 8; + nth1 = 8; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline; + } break; + case GGML_TYPE_Q8_0: + { + nth0 = 8; + nth1 = 8; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline; + } break; + case GGML_TYPE_Q2_K: + { + nth0 = 2; + nth1 = 32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline; + } break; + case GGML_TYPE_Q3_K: + { + nth0 = 2; + nth1 = 32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline; + } break; + case GGML_TYPE_Q4_K: + { + nth0 = 4; //1; + nth1 = 8; //32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline; + } break; + case GGML_TYPE_Q5_K: + { + nth0 = 2; + nth1 = 32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline; + } break; + case GGML_TYPE_Q6_K: + { + nth0 = 2; + nth1 = 32; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline; + } break; + case GGML_TYPE_IQ2_XXS: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline; + } break; + case GGML_TYPE_IQ2_XS: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline; + } break; + case GGML_TYPE_IQ3_XXS: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline; + } break; + case GGML_TYPE_IQ3_S: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32].pipeline; + } break; + case GGML_TYPE_IQ2_S: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32].pipeline; + } break; + case GGML_TYPE_IQ1_S: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline; + } break; + case GGML_TYPE_IQ1_M: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32].pipeline; + } break; + case GGML_TYPE_IQ4_NL: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline; + } break; + case GGML_TYPE_IQ4_XS: + { + nth0 = 4; + nth1 = 16; + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline; + } break; + default: + { + GGML_LOG_ERROR("Asserting on type %d\n", (int)src0t); + GGML_ABORT("not implemented"); + } + }; - ggml_metal_kargs_mul_mv args = { - /*.ne00 =*/ ne00, - /*.ne01 =*/ ne01, - /*.ne02 =*/ ne02, - /*.nb00 =*/ nb00, - /*.nb01 =*/ nb01, - /*.nb02 =*/ nb02, - /*.nb03 =*/ nb03, - /*.ne10 =*/ ne10, - /*.ne11 =*/ ne11, - /*.ne12 =*/ ne12, - /*.nb10 =*/ nb10, - /*.nb11 =*/ nb11, - /*.nb12 =*/ nb12, - /*.nb13 =*/ nb13, - /*.ne0 =*/ ne0, - /*.ne1 =*/ ne1, - /*.r2 =*/ r2, - /*.r3 =*/ r3, - }; + ggml_metal_kargs_mul_mv args = { + /*.ne00 =*/ ne00, + /*.ne01 =*/ ne01, + /*.ne02 =*/ ne02, + /*.nb00 =*/ nb00, + /*.nb01 =*/ nb01, + /*.nb02 =*/ nb02, + /*.nb03 =*/ nb03, + /*.ne10 =*/ ne10, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.ne0 =*/ ne0, + /*.ne1 =*/ ne1, + /*.r2 =*/ r2, + /*.r3 =*/ r3, + }; - [encoder setComputePipelineState:pipeline]; - [encoder setBytes:&args length:sizeof(args) atIndex:0]; - [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; - [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; - [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; - if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q5_0 || - src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K || - src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ1_M || src0t == GGML_TYPE_IQ2_S) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) { - const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128; - [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_IQ3_XXS || src0t == GGML_TYPE_IQ3_S) { - const int mem_size = src0t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4; - [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS) { - const int mem_size = 32*sizeof(float); - [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_Q4_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_Q3_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_Q5_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - else if (src0t == GGML_TYPE_Q6_K) { - [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } else { - const int64_t ny = (ne11 + nrows - 1)/nrows; - [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; - } - } + if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q5_0 || + src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K || + src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ1_M || src0t == GGML_TYPE_IQ2_S) { + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) { + const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128; + [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_IQ3_XXS || src0t == GGML_TYPE_IQ3_S) { + const int mem_size = src0t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4; + [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS) { + const int mem_size = 32*sizeof(float); + [encoder setThreadgroupMemoryLength:mem_size atIndex:0]; + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_Q4_K) { + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_Q3_K) { + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_Q5_K) { + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + else if (src0t == GGML_TYPE_Q6_K) { + [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } else { + const int64_t ny = (ne11 + nrows - 1)/nrows; + [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)]; + } + } } break; case GGML_OP_MUL_MAT_ID: { @@ -2657,7 +2967,6 @@ static void ggml_metal_encode_node( } break; case GGML_OP_GROUP_NORM: { - GGML_ASSERT(ne00 % 4 == 0); GGML_ASSERT(ggml_is_contiguous(src0)); float eps; @@ -2727,7 +3036,9 @@ static void ggml_metal_encode_node( } break; case GGML_OP_ROPE: { - GGML_ASSERT(ne10 == ne02); + // make sure we have one or more position id(ne10) per token(ne02) + GGML_ASSERT(ne10 % ne02 == 0); + GGML_ASSERT(ne10 >= ne02); const int nth = MIN(1024, ne00); @@ -2893,6 +3204,49 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)]; } } break; + case GGML_OP_CONV_TRANSPOSE_1D: + { + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(src1)); + GGML_ASSERT(src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + const int32_t s0 = ((const int32_t *)(dst->op_params))[0]; + + const int32_t IC = src1->ne[1]; + const int32_t IL = src1->ne[0]; + + const int32_t K = src0->ne[0]; + + const int32_t OL = dst->ne[0]; + const int32_t OC = dst->ne[1]; + + id pipeline; + + switch (src0->type) { + case GGML_TYPE_F32: { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F32_F32].pipeline; + } break; + case GGML_TYPE_F16: { + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONV_TRANSPOSE_1D_F16_F32].pipeline; + } break; + default: GGML_ABORT("fatal error"); + }; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:2]; + [encoder setBytes:&IC length:sizeof( int32_t) atIndex:3]; + [encoder setBytes:&IL length:sizeof( int32_t) atIndex:4]; + [encoder setBytes:&K length:sizeof( int32_t) atIndex:5]; + [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:6]; + [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:7]; + [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:8]; + + [encoder dispatchThreadgroups:MTLSizeMake(OL, OC, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)]; + } break; case GGML_OP_UPSCALE: { GGML_ASSERT(src0->type == GGML_TYPE_F32); @@ -2962,6 +3316,38 @@ static void ggml_metal_encode_node( const int nth = MIN(1024, ne0); + [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; + case GGML_OP_PAD_REFLECT_1D: + { + GGML_ASSERT(src0->type == GGML_TYPE_F32); + + const int32_t p0 = ((const int32_t *)(dst->op_params))[0]; + const int32_t p1 = ((const int32_t *)(dst->op_params))[1]; + + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2]; + [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3]; + [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4]; + [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5]; + [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:6]; + [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7]; + [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8]; + [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9]; + [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10]; + [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:11]; + [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:12]; + [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:13]; + [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:14]; + [encoder setBytes:&p0 length:sizeof(p0) atIndex:15]; + [encoder setBytes:&p1 length:sizeof(p1) atIndex:16]; + + const int nth = MIN(1024, ne0); + [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; case GGML_OP_ARANGE: @@ -3493,6 +3879,68 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; } break; + case GGML_OP_SET: + { + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // src0 and dst as viewed during set + const size_t dst_nb0 = ggml_element_size(src0); + + const size_t dst_nb1 = ((int32_t *) dst->op_params)[0]; + const size_t dst_nb2 = ((int32_t *) dst->op_params)[1]; + const size_t dst_nb3 = ((int32_t *) dst->op_params)[2]; + const size_t offset = ((int32_t *) dst->op_params)[3]; + const bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + + const int im0 = (ne10 == 0 ? 0 : ne10-1); + const int im1 = (ne11 == 0 ? 0 : ne11-1); + const int im2 = (ne12 == 0 ? 0 : ne12-1); + const int im3 = (ne13 == 0 ? 0 : ne13-1); + + GGML_ASSERT(offset + im0*dst_nb0 + im1*dst_nb1 + im2*dst_nb2 + im3*dst_nb3 <= ggml_nbytes(dst)); + + id pipeline = nil; + + switch (src0t) { + case GGML_TYPE_F32: + GGML_ASSERT(nb10 == sizeof(float)); + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_F32].pipeline; break; + case GGML_TYPE_I32: + GGML_ASSERT(nb10 == sizeof(int32_t)); + pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SET_I32].pipeline; break; + default: GGML_ABORT("fatal error"); + } + + ggml_metal_kargs_set args = { + /*.ne10 =*/ ne10, + /*.ne11 =*/ ne11, + /*.ne12 =*/ ne12, + /*.nb10 =*/ nb10, + /*.nb11 =*/ nb11, + /*.nb12 =*/ nb12, + /*.nb13 =*/ nb13, + /*.nb1 =*/ dst_nb1, + /*.nb2 =*/ dst_nb2, + /*.nb3 =*/ dst_nb3, + /*.offs =*/ offset, + /*.inplace =*/ inplace, + }; + + const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne10); + + [encoder setComputePipelineState:pipeline]; + [encoder setBytes:&args length:sizeof(args) atIndex:0]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1]; + [encoder setBuffer:id_src1 offset:offs_src1 atIndex:2]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:3]; + + [encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; case GGML_OP_POOL_2D: { GGML_ASSERT(ggml_is_contiguous(src0)); @@ -3552,6 +4000,31 @@ static void ggml_metal_encode_node( [encoder dispatchThreadgroups:MTLSizeMake(n_tg, 1, 1) threadsPerThreadgroup:MTLSizeMake(n_threads, 1, 1)]; } break; + case GGML_OP_ARGMAX: + { + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(nb00 == ggml_type_size(src0->type)); + + const int64_t nrows = ggml_nrows(src0); + + int nth = 32; // SIMD width + while (nth < ne00 && nth*ne01*ne02*ne03 < 256) { + nth *= 2; + } + + id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGMAX].pipeline; + + [encoder setComputePipelineState:pipeline]; + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; + [encoder setBuffer:id_dst offset:offs_dst atIndex:1]; + [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2]; + [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3]; + [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0]; + [encoder setThreadgroupMemoryLength:32*sizeof(int32_t) atIndex:1]; + + [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)]; + } break; default: { GGML_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(dst->op)); @@ -4357,19 +4830,45 @@ static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t r GGML_UNUSED(index); } +static struct ggml_backend_feature g_ggml_backend_metal_features[] = { +#if defined(GGML_METAL_EMBED_LIBRARY) + { "EMBED_LIBRARY", "1" }, +#endif +#if defined(GGML_METAL_USE_BF16) + { "BF16", "1" }, +#endif + { nil, nil }, +}; + +static struct ggml_backend_feature * ggml_backend_metal_get_features(ggml_backend_reg_t reg) { + return g_ggml_backend_metal_features; + + GGML_UNUSED(reg); +} + +static void * ggml_backend_metal_get_proc_address(ggml_backend_reg_t reg, const char * name) { + if (strcmp(name, "ggml_backend_get_features") == 0) { + return (void *)ggml_backend_metal_get_features; + } + + return NULL; + + GGML_UNUSED(reg); +} static struct ggml_backend_reg_i ggml_backend_metal_reg_i = { /* .get_name = */ ggml_backend_metal_reg_get_name, /* .device_count = */ ggml_backend_metal_reg_device_count, /* .device_get = */ ggml_backend_metal_reg_device_get, - /* .get_proc_address = */ NULL, + /* .get_proc_address = */ ggml_backend_metal_get_proc_address, }; ggml_backend_reg_t ggml_backend_metal_reg(void) { // TODO: make this thread-safe somehow? { g_ggml_backend_metal_reg = (struct ggml_backend_reg) { - /* .iface = */ ggml_backend_metal_reg_i, - /* .context = */ NULL, + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_metal_reg_i, + /* .context = */ NULL, }; g_ggml_backend_metal_device = (struct ggml_backend_device) { @@ -4381,3 +4880,5 @@ ggml_backend_reg_t ggml_backend_metal_reg(void) { return &g_ggml_backend_metal_reg; } + +GGML_BACKEND_DL_IMPL(ggml_backend_metal_reg) diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal index 86fdf1c18cfb6..8ba43904d0c1b 100644 --- a/ggml/src/ggml-metal/ggml-metal.metal +++ b/ggml/src/ggml-metal/ggml-metal.metal @@ -47,6 +47,11 @@ void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) reg = (type4x4)(*src); } +template +void dequantize_f16_t4(device const half4 * src, short il, thread type4 & reg) { + reg = (type4)(*(src + il)); +} + #if defined(GGML_METAL_USE_BF16) template void dequantize_bf16(device const bfloat4x4 * src, short il, thread type4x4 & reg) { @@ -55,7 +60,7 @@ void dequantize_bf16(device const bfloat4x4 * src, short il, thread type4x4 & re #endif template -void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) { +void dequantize_q4_0(device const block_q4_0 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 1); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; @@ -73,8 +78,23 @@ void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg reg = (type4x4) reg_f; } +template +void dequantize_q4_0_t4(device const block_q4_0 * xb, short il, thread type4 & reg) { + device const uint16_t * qs = ((device const uint16_t *)xb + 1); + const float d1 = (il/4) ? (xb->d / 16.h) : xb->d; + const float d2 = d1 / 256.f; + const float md = -8.h * xb->d; + const ushort mask0 = (il/4) ? 0x00F0 : 0x000F; + const ushort mask1 = mask0 << 8; + + for (int i = 0; i < 2; i++) { + reg[2*i + 0] = d1 * (qs[2*(il%4) + i] & mask0) + md; + reg[2*i + 1] = d2 * (qs[2*(il%4) + i] & mask1) + md; + } +} + template -void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) { +void dequantize_q4_1(device const block_q4_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 2); const float d1 = il ? (xb->d / 16.h) : xb->d; const float d2 = d1 / 256.f; @@ -92,8 +112,23 @@ void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg reg = (type4x4) reg_f; } +template +void dequantize_q4_1_t4(device const block_q4_1 * xb, short il, thread type4 & reg) { + device const uint16_t * qs = ((device const uint16_t *)xb + 2); + const float d1 = (il/4) ? (xb->d / 16.h) : xb->d; + const float d2 = d1 / 256.f; + const float m = xb->m; + const ushort mask0 = (il/4) ? 0x00F0 : 0x000F; + const ushort mask1 = mask0 << 8; + + for (int i = 0; i < 2; i++) { + reg[2*i + 0] = d1 * (qs[2*(il%4) + i] & mask0) + m; + reg[2*i + 1] = d2 * (qs[2*(il%4) + i] & mask1) + m; + } +} + template -void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg) { +void dequantize_q5_0(device const block_q5_0 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 3); const float d = xb->d; const float md = -16.h * xb->d; @@ -124,8 +159,38 @@ void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg reg = (type4x4) reg_f; } +template +void dequantize_q5_0_t4(device const block_q5_0 * xb, short il, thread type4 & reg) { + device const uint16_t * qs = ((device const uint16_t *)xb + 3); + const float d = xb->d; + const float md = -16.h * xb->d; + const ushort mask = (il/4) ? 0x00F0 : 0x000F; + + const uint32_t qh = *((device const uint32_t *)xb->qh); + + const int x_mv = (il/4) ? 4 : 0; + + const int gh_mv = (il/4) ? 12 : 0; + const int gh_bk = (il/4) ? 0 : 4; + + for (int ii = 0; ii < 2; ii++) { + int i = 2*(il%4) + ii; + + // extract the 5-th bits for x0 and x1 + const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; + const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; + + // combine the 4-bits from qs with the 5th bit + const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); + const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); + + reg[2*ii + 0] = d * x0 + md; + reg[2*ii + 1] = d * x1 + md; + } +} + template -void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg) { +void dequantize_q5_1(device const block_q5_1 * xb, short il, thread type4x4 & reg) { device const uint16_t * qs = ((device const uint16_t *)xb + 4); const float d = xb->d; const float m = xb->m; @@ -156,10 +221,40 @@ void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg reg = (type4x4) reg_f; } +template +void dequantize_q5_1_t4(device const block_q5_1 * xb, short il, thread type4 & reg) { + device const uint16_t * qs = ((device const uint16_t *)xb + 4); + const float d = xb->d; + const float m = xb->m; + const ushort mask = (il/4) ? 0x00F0 : 0x000F; + + const uint32_t qh = *((device const uint32_t *)xb->qh); + + const int x_mv = (il/4) ? 4 : 0; + + const int gh_mv = (il/4) ? 12 : 0; + const int gh_bk = (il/4) ? 0 : 4; + + for (int ii = 0; ii < 2; ii++) { + int i = 2*(il%4) + ii; + + // extract the 5-th bits for x0 and x1 + const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10; + const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10; + + // combine the 4-bits from qs with the 5th bit + const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0); + const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1); + + reg[2*ii + 0] = d * x0 + m; + reg[2*ii + 1] = d * x1 + m; + } +} + template void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) { device const int8_t * qs = ((device const int8_t *)xb->qs); - const half d = xb->d; + const float d = xb->d; float4x4 reg_f; @@ -170,6 +265,16 @@ void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg reg = (type4x4) reg_f; } +template +void dequantize_q8_0_t4(device const block_q8_0 *xb, short il, thread type4 & reg) { + device const int8_t * qs = ((device const int8_t *)xb->qs); + const float d = xb->d; + + for (int i = 0; i < 4; i++) { + reg[i] = (qs[4*(il%4) + i + 16*(il/4)] * d); + } +} + template void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) { const float d = xb->d; @@ -224,7 +329,7 @@ static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q } template -void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) { +void dequantize_q4_K(device const block_q4_K * xb, short il, thread type4x4 & reg) { device const uchar * q = xb->qs; short is = (il/4) * 2; @@ -236,7 +341,7 @@ void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg const float dl = d * sc[0]; const float ml = min * sc[1]; - const ushort mask = il<2 ? 0x0F : 0xF0; + const ushort mask = il < 2 ? 0x0F : 0xF0; for (int i = 0; i < 16; ++i) { reg[i/4][i%4] = dl * (q[i] & mask) - ml; } @@ -469,6 +574,19 @@ void dequantize_iq4_nl(device const block_iq4_nl * xb, short il, thread type4x4 } } +template +void dequantize_iq4_nl_t4(device const block_iq4_nl * xb, short il, thread type4 & reg) { + device const uint16_t * q4 = (device const uint16_t *)xb->qs; + const float d = xb->d; + uint32_t aux32; + thread const uint8_t * q8 = (thread const uint8_t *)&aux32; + aux32 = ((q4[2*(il%4)] | (q4[2*(il%4)+1] << 16)) >> 4*(il/4)) & 0x0f0f0f0f; + reg[0] = d * kvalues_iq4nl_f[q8[0]]; + reg[1] = d * kvalues_iq4nl_f[q8[1]]; + reg[2] = d * kvalues_iq4nl_f[q8[2]]; + reg[3] = d * kvalues_iq4nl_f[q8[3]]; +} + template void dequantize_iq4_xs(device const block_iq4_xs * xb, short il, thread type4x4 & reg) { // il is 0...15 for QK_K = 256 => index of block of 32 is il/2 @@ -782,6 +900,14 @@ kernel void kernel_silu_4( dst[tpig] = x / (1.0f + exp(-x)); } +kernel void kernel_elu( + device const float * src0, + device float * dst, + uint tpig[[thread_position_in_grid]]) { + device const float & x = src0[tpig]; + dst[tpig] = (x > 0.0f) ? x : (exp(x) - 1.0f); +} + kernel void kernel_sqr( device const float * src0, device float * dst, @@ -1240,6 +1366,63 @@ kernel void kernel_ssm_scan_f32( } } +kernel void kernel_argmax( + device const void * x, + device int32_t * dst, + constant int64_t & ncols, + constant uint64_t & nb01, + threadgroup float * shared_maxval [[threadgroup(0)]], + threadgroup int32_t * shared_argmax [[threadgroup(1)]], + uint tgpig[[threadgroup_position_in_grid]], + uint tpitg[[thread_position_in_threadgroup]], + uint sgitg[[simdgroup_index_in_threadgroup]], + uint tiisg[[thread_index_in_simdgroup]], + uint ntg[[threads_per_threadgroup]]) { + device const float * x_row = (device const float *) ((device const char *) x + tgpig * nb01); + + float lmax = -INFINITY; + int32_t larg = -1; + + for (int i00 = tpitg; i00 < ncols; i00 += ntg) { + if (x_row[i00] > lmax) { + lmax = x_row[i00]; + larg = i00; + } + } + + // find the argmax value in the block + float max_val = simd_max(lmax); + int32_t arg_val = simd_max(select(-1, larg, lmax == max_val)); + + if (ntg > N_SIMDWIDTH) { + if (sgitg == 0) { + shared_maxval[tiisg] = -INFINITY; + shared_argmax[tiisg] = -1; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + if (tiisg == 0) { + shared_maxval[sgitg] = max_val; + shared_argmax[sgitg] = arg_val; + } + + threadgroup_barrier(mem_flags::mem_threadgroup); + + max_val = shared_maxval[tiisg]; + arg_val = shared_argmax[tiisg]; + + float max_val_reduced = simd_max(max_val); + int32_t arg_val_reduced = simd_max(select(-1, arg_val, max_val == max_val_reduced)); + + dst[tgpig] = arg_val_reduced; + + return; + } + + dst[tgpig] = arg_val; +} + kernel void kernel_norm( constant ggml_metal_kargs_norm & args, device const char * src0, @@ -1744,6 +1927,301 @@ kernel void kernel_mul_mv_q8_0_f32( kernel_mul_mv_q8_0_f32_impl(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg); } +// mat-vec kernel processing in chunks of float4 +// chpb - chunks per quantization block +template +void kernel_mul_mv_ext_q4_f32_impl( + constant ggml_metal_kargs_mul_mv_ext & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]]) { + const short chpt = 4; // chunks per thread + + //const short nxpsg = (32); + const short nypsg = (32/nxpsg); + + const short tx = tiisg%nxpsg; + const short ty = tiisg/nxpsg; + + const int i01 = tgpig.x*(nypsg*args.nsg) + nypsg*sgitg + ty; + const int i11 = tgpig.y*r1ptg; + const int i1m = tgpig.z; + + const int i12 = i1m%args.ne12; + const int i13 = i1m/args.ne12; + + const uint64_t offset0 = i01*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + const uint64_t offset1 = i11*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const q_t * xq = (i01 < args.ne01) ? (device const q_t *) (src0 + offset0) + tx/chpb : (device const q_t *) src0; + + device const float4 * y4[r1ptg]; + + for (int ir1 = 0; ir1 < r1ptg; ++ir1) { + y4[ir1] = (i11 + ir1 < args.ne11) ? (device const float4 *) (src1 + offset1 + ir1*args.nb11) + tx : (device const float4 *) src1; + } + + float sumf[r1ptg] = { [ 0 ... r1ptg - 1 ] = 0.0f }; + + short cch = tx%chpb; // current chunk index + + for (int ich = tx; 4*ich < args.ne00; ich += chpt*nxpsg) { + float4 lx[chpt]; + +#pragma unroll(chpt) + for (short ch = 0; ch < chpt; ++ch) { + deq_t4(xq, cch, lx[ch]); + + cch += nxpsg; + if (cch >= chpb) { + xq += cch/chpb; + cch %= chpb; + } + } + +#pragma unroll(chpt) + for (short ch = 0; ch < chpt; ++ch) { +#pragma unroll(r1ptg) + for (short ir1 = 0; ir1 < r1ptg; ++ir1) { + sumf[ir1] += dot(lx[ch], y4[ir1][ch*nxpsg]); + + } + } + +#pragma unroll(r1ptg) + for (short ir1 = 0; ir1 < r1ptg; ++ir1) { + y4[ir1] += chpt*nxpsg; + } + } + + // reduce only the threads in each row + for (short ir1 = 0; ir1 < r1ptg; ++ir1) { + if (nxpsg >= 32) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 16); + } + if (nxpsg >= 16) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 8); + } + if (nxpsg >= 8) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 4); + } + if (nxpsg >= 4) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 2); + } + if (nxpsg >= 2) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 1); + } + + //sumf[ir1] = simd_sum(sumf[ir1]); + } + + if (tx == 0) { + for (short ir1 = 0; ir1 < r1ptg && i11 + ir1 < args.ne11; ++ir1) { + device float * dst_f32 = (device float *) dst + (uint64_t)i1m*args.ne0*args.ne1 + (uint64_t)(i11 + ir1)*args.ne0; + + if (i01 < args.ne01) { + dst_f32[i01] = sumf[ir1]; + } + } + } +} + +// mat-vec kernel processing in chunks of float4x4 +template +void kernel_mul_mv_ext_q4x4_f32_impl( + constant ggml_metal_kargs_mul_mv_ext & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]]) { + const short chpt = 1; + + //const short nxpsg = (32); + const short nypsg = (32/nxpsg); + + const short tx = tiisg%nxpsg; + const short ty = tiisg/nxpsg; + + const int i01 = tgpig.x*(nypsg*args.nsg) + nypsg*sgitg + ty; + const int i11 = tgpig.y*r1ptg; + const int i1m = tgpig.z; + + const int i12 = i1m%args.ne12; + const int i13 = i1m/args.ne12; + + const uint64_t offset0 = i01*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + const uint64_t offset1 = i11*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13; + + device const q_t * xq = (i01 < args.ne01) ? (device const q_t *) (src0 + offset0) + tx/chpb : (device const q_t *) src0; + + device const float4x4 * y4x4[r1ptg]; + + for (int ir1 = 0; ir1 < r1ptg; ++ir1) { + y4x4[ir1] = (i11 + ir1 < args.ne11) ? (device const float4x4 *) (src1 + offset1 + ir1*args.nb11) + tx : (device const float4x4 *) src1; + } + + float sumf[r1ptg] = { [ 0 ... r1ptg - 1 ] = 0.0f }; + + short cch = tx%chpb; + + for (int ich = tx; 16*ich < args.ne00; ich += chpt*nxpsg) { + float4x4 lx[chpt]; + +#pragma unroll(chpt) + for (short ch = 0; ch < chpt; ++ch) { + deq_t4x4(xq, cch, lx[ch]); + + cch += nxpsg; + if (cch >= chpb) { + xq += cch/chpb; + cch %= chpb; + } + } + +#pragma unroll(chpt) + for (short ch = 0; ch < chpt; ++ch) { +#pragma unroll(r1ptg) + for (short ir1 = 0; ir1 < r1ptg; ++ir1) { + sumf[ir1] += + dot(lx[ch][0], y4x4[ir1][ch*nxpsg][0]) + + dot(lx[ch][1], y4x4[ir1][ch*nxpsg][1]) + + dot(lx[ch][2], y4x4[ir1][ch*nxpsg][2]) + + dot(lx[ch][3], y4x4[ir1][ch*nxpsg][3]); + + } + } + +#pragma unroll(r1ptg) + for (short ir1 = 0; ir1 < r1ptg; ++ir1) { + y4x4[ir1] += chpt*nxpsg; + } + } + + for (short ir1 = 0; ir1 < r1ptg; ++ir1) { + if (nxpsg >= 32) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 16); + } + if (nxpsg >= 16) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 8); + } + if (nxpsg >= 8) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 4); + } + if (nxpsg >= 4) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 2); + } + if (nxpsg >= 2) { + sumf[ir1] += simd_shuffle_down(sumf[ir1], 1); + } + + //sumf[ir1] = simd_sum(sumf[ir1]); + } + + if (tx == 0) { + for (short ir1 = 0; ir1 < r1ptg && i11 + ir1 < args.ne11; ++ir1) { + device float * dst_f32 = (device float *) dst + (uint64_t)i1m*args.ne0*args.ne1 + (uint64_t)(i11 + ir1)*args.ne0; + + if (i01 < args.ne01) { + dst_f32[i01] = sumf[ir1]; + } + } + } +} + +// dispatchers needed for compile-time nxpsg +// epb - elements per quantization block +template +kernel void kernel_mul_mv_ext_q4_f32_disp( + constant ggml_metal_kargs_mul_mv_ext & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]]) { + switch (args.nxpsg) { + case 4: kernel_mul_mv_ext_q4_f32_impl<4, r1ptg, q_t, epb/4, deq_t4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + case 8: kernel_mul_mv_ext_q4_f32_impl<8, r1ptg, q_t, epb/4, deq_t4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + case 16: kernel_mul_mv_ext_q4_f32_impl<16, r1ptg, q_t, epb/4, deq_t4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + case 32: kernel_mul_mv_ext_q4_f32_impl<32, r1ptg, q_t, epb/4, deq_t4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + } +} + +template +kernel void kernel_mul_mv_ext_q4x4_f32_disp( + constant ggml_metal_kargs_mul_mv_ext & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort tiisg[[thread_index_in_simdgroup]], + ushort sgitg[[simdgroup_index_in_threadgroup]]) { + switch (args.nxpsg) { + case 4: kernel_mul_mv_ext_q4x4_f32_impl<4, r1ptg, q_t, epb/16, deq_t4x4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + case 8: kernel_mul_mv_ext_q4x4_f32_impl<8, r1ptg, q_t, epb/16, deq_t4x4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + case 16: kernel_mul_mv_ext_q4x4_f32_impl<16, r1ptg, q_t, epb/16, deq_t4x4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + case 32: kernel_mul_mv_ext_q4x4_f32_impl<32, r1ptg, q_t, epb/16, deq_t4x4>(args, src0, src1, dst, tgpig, tiisg, sgitg); break; + } +} + +typedef decltype(kernel_mul_mv_ext_q4_f32_disp <2, block_q8_0, 32, dequantize_q8_0_t4>) mul_mv_ext_q4_f32_t; +typedef decltype(kernel_mul_mv_ext_q4x4_f32_disp<2, block_q4_K, 256, dequantize_q4_K>) mul_mv_ext_q4x4_f32_t; + +template [[host_name("kernel_mul_mv_ext_f16_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, half4, 4, dequantize_f16_t4>; +template [[host_name("kernel_mul_mv_ext_f16_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, half4, 4, dequantize_f16_t4>; +template [[host_name("kernel_mul_mv_ext_f16_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, half4, 4, dequantize_f16_t4>; +template [[host_name("kernel_mul_mv_ext_f16_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, half4, 4, dequantize_f16_t4>; + +template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q4_0, 32, dequantize_q4_0_t4>; +template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q4_0, 32, dequantize_q4_0_t4>; +template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q4_0, 32, dequantize_q4_0_t4>; +template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q4_0, 32, dequantize_q4_0_t4>; + +template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q4_1, 32, dequantize_q4_1_t4>; +template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q4_1, 32, dequantize_q4_1_t4>; +template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q4_1, 32, dequantize_q4_1_t4>; +template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q4_1, 32, dequantize_q4_1_t4>; + +template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q5_0, 32, dequantize_q5_0_t4>; +template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q5_0, 32, dequantize_q5_0_t4>; +template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q5_0, 32, dequantize_q5_0_t4>; +template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q5_0, 32, dequantize_q5_0_t4>; + +template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q5_1, 32, dequantize_q5_1_t4>; +template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q5_1, 32, dequantize_q5_1_t4>; +template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q5_1, 32, dequantize_q5_1_t4>; +template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q5_1, 32, dequantize_q5_1_t4>; + +template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q8_0, 32, dequantize_q8_0_t4>; +template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q8_0, 32, dequantize_q8_0_t4>; +template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q8_0, 32, dequantize_q8_0_t4>; +template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q8_0, 32, dequantize_q8_0_t4>; + +template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_iq4_nl, 32, dequantize_iq4_nl_t4>; +template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_iq4_nl, 32, dequantize_iq4_nl_t4>; +template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_iq4_nl, 32, dequantize_iq4_nl_t4>; +template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_iq4_nl, 32, dequantize_iq4_nl_t4>; + +template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q4_K, 256, dequantize_q4_K>; +template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q4_K, 256, dequantize_q4_K>; +template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q4_K, 256, dequantize_q4_K>; +template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q4_K, 256, dequantize_q4_K>; + +template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q5_K, 256, dequantize_q5_K>; +template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q5_K, 256, dequantize_q5_K>; +template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q5_K, 256, dequantize_q5_K>; +template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q5_K, 256, dequantize_q5_K>; + +template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q6_K, 256, dequantize_q6_K>; +template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q6_K, 256, dequantize_q6_K>; +template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q6_K, 256, dequantize_q6_K>; +template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q6_K, 256, dequantize_q6_K>; + #define N_MV_T_T 4 template @@ -2137,20 +2615,34 @@ kernel void kernel_im2col( uint3 tgpg[[threadgroups_per_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { - const int32_t iiw = tgpig[2] * s0 + tpitg[2] * d0 - p0; - const int32_t iih = tgpig[1] * s1 + tpitg[1] * d1 - p1; +// const int64_t IC = tgpg[0]; + const int64_t OH = tgpg[1]; + const int64_t OW = tgpg[2]; - const int32_t offset_dst = - (tpitg[0] * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + - (tgpig[0] * (ntg[1] * ntg[2]) + tpitg[1] * ntg[2] + tpitg[2]); +// const int64_t N = ntg[0]; + const int64_t KH = ntg[1]; + const int64_t KW = ntg[2]; + + const int64_t in = tpitg[0]; + const int64_t ikh = tpitg[1]; + const int64_t ikw = tpitg[2]; + + const int64_t iic = tgpig[0]; + const int64_t ioh = tgpig[1]; + const int64_t iow = tgpig[2]; + + const int64_t iiw = iow*s0 + ikw*d0 - p0; + const int64_t iih = ioh*s1 + ikh*d1 - p1; + + const int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*CHW + (iic*(KH*KW) + ikh*KW + ikw); device T * pdst = (device T *) (dst); if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { pdst[offset_dst] = 0.0f; } else { - const int32_t offset_src = tpitg[0] * ofs0 + tgpig[0] * ofs1; - pdst[offset_dst] = x[offset_src + iih * IW + iiw]; + const int64_t offset_src = in*ofs0 + iic*ofs1 + iih*IW + iiw; + pdst[offset_dst] = x[offset_src]; } } @@ -2201,25 +2693,25 @@ kernel void kernel_im2col_ext( uint3 tgpg[[threadgroups_per_grid]], // tgpg[0] = D x IC x KH x KW, CHW = IC x KH x KW uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { // [M, 1, 1] - const int32_t KHW = KH * KW; // KHW == ntg[1] * ntg[2], KW == ntg[2] + const int64_t KHW = KH * KW; // KHW == ntg[1] * ntg[2], KW == ntg[2] - const int32_t d = tgpig[0] / CHW; - const int32_t chw = tgpig[0] % CHW; - const int32_t tgpig_0 = chw / KHW; // 0 ~ (IC - 1) - const int32_t HW = tgpig[0] % KHW; + const int64_t d = tgpig[0] / CHW; + const int64_t chw = tgpig[0] % CHW; + const int64_t tgpig_0 = chw / KHW; // 0 ~ (IC - 1) + const int64_t HW = tgpig[0] % KHW; - const int32_t tpitg_0 = (d * ntg[0]) + tpitg[0]; + const int64_t tpitg_0 = (d * ntg[0]) + tpitg[0]; if (tpitg_0 >= N) { return; } - const int32_t tpitg_1 = HW / KW; - const int32_t tpitg_2 = HW % KW; + const int64_t tpitg_1 = HW / KW; + const int64_t tpitg_2 = HW % KW; - const int32_t iiw = tgpig[2] * s0 + tpitg_2 * d0 - p0; - const int32_t iih = tgpig[1] * s1 + tpitg_1 * d1 - p1; + const int64_t iiw = tgpig[2] * s0 + tpitg_2 * d0 - p0; + const int64_t iih = tgpig[1] * s1 + tpitg_1 * d1 - p1; - const int32_t offset_dst = + const int64_t offset_dst = (tpitg_0 * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * CHW + (tgpig_0 * KHW + tpitg_1 * KW + tpitg_2); @@ -2228,7 +2720,7 @@ kernel void kernel_im2col_ext( if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { pdst[offset_dst] = 0.0f; } else { - const int32_t offset_src = tpitg_0 * ofs0 + tgpig_0 * ofs1; + const int64_t offset_src = tpitg_0 * ofs0 + tgpig_0 * ofs1; pdst[offset_dst] = x[offset_src + iih * IW + iiw]; } } @@ -2236,6 +2728,79 @@ kernel void kernel_im2col_ext( template [[host_name("kernel_im2col_ext_f32")]] kernel im2col_ext_t kernel_im2col_ext; template [[host_name("kernel_im2col_ext_f16")]] kernel im2col_ext_t kernel_im2col_ext; +typedef void (conv_transpose_1d_t)( + device const float * src0, + device const float * src1, + device char * dst, + constant int32_t & IC, + constant int32_t & IL, + constant int32_t & K, + constant int32_t & s0, + constant uint64_t & nb0, + constant uint64_t & nb1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]]); + +template +kernel void kernel_conv_transpose_1d( + device const T * src0, + device const float * src1, + device char * dst, + constant int32_t & IC, + constant int32_t & IL, + constant int32_t & K, + constant int32_t & s0, + constant uint64_t & nb0, + constant uint64_t & nb1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]]) { + + float v = 0.0f; + + for (int64_t c = 0; c < IC; c++) { + const int32_t kernel_offset = c * tgpg[1] * K + K * tgpig[1]; + const int32_t input_offset = c * IL; + + for (int64_t i = 0; i < IL; i++) { + if (tgpig[0] >= i * s0 && tgpig[0] < i * s0 + K) { + v += src0[kernel_offset + tgpig[0] - i * s0] * src1[input_offset + i]; + } + } + } + + device float * dst_ptr = (device float *) (dst + tgpig[0] * nb0 + tgpig[1] * nb1); + + dst_ptr[0] = v; +} + +template [[host_name("kernel_conv_transpose_1d_f32_f32")]] +kernel void kernel_conv_transpose_1d( + device const float * src0, + device const float * src1, + device char * dst, + constant int32_t & IC, + constant int32_t & IL, + constant int32_t & K, + constant int32_t & s0, + constant uint64_t & nb0, + constant uint64_t & nb1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]]); + +template [[host_name("kernel_conv_transpose_1d_f16_f32")]] +kernel void kernel_conv_transpose_1d( + device const half * src0, + device const float * src1, + device char * dst, + constant int32_t & IC, + constant int32_t & IL, + constant int32_t & K, + constant int32_t & s0, + constant uint64_t & nb0, + constant uint64_t & nb1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]]); + kernel void kernel_upscale_f32( device const char * src0, device char * dst, @@ -2332,6 +2897,53 @@ kernel void kernel_pad_f32( } } +kernel void kernel_pad_reflect_1d_f32( + device const char * src0, + device char * dst, + constant int64_t & ne00, + constant int64_t & ne01, + constant int64_t & ne02, + constant int64_t & ne03, + constant int64_t & ne0, + constant uint64_t & nb00, + constant uint64_t & nb01, + constant uint64_t & nb02, + constant uint64_t & nb03, + constant uint64_t & nb0, + constant uint64_t & nb1, + constant uint64_t & nb2, + constant uint64_t & nb3, + constant int32_t & p0, + constant int32_t & p1, + uint3 tgpig[[threadgroup_position_in_grid]], + uint3 tgpg[[threadgroups_per_grid]], + uint3 tpitg[[thread_position_in_threadgroup]], + uint3 ntg[[threads_per_threadgroup]]) { + + const int64_t i3 = tgpig.z; + const int64_t i2 = tgpig.y; + const int64_t i1 = tgpig.x; + + const int64_t i03 = i3; + const int64_t i02 = i2; + const int64_t i01 = i1; + + device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); + device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); + + if (i1 < ne01 && i2 < ne02 && i3 < ne03) { + for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { + if (i0 < p0) { + dst_ptr[i0] = src0_ptr[p0 - i0]; + } else if (i0 < ne0 - p1) { + dst_ptr[i0] = src0_ptr[i0 - p0]; + } else { + dst_ptr[i0] = src0_ptr[(ne0 - p1 - p0) - (p1 + 1 - (ne0 - i0)) - 1]; + } + } + } +} + kernel void kernel_arange_f32( device char * dst, constant int64_t & ne0, @@ -3315,6 +3927,38 @@ template [[host_name("kernel_flash_attn_ext_vec_q8_0_h256")]] kernel flash_attn_ #undef FA_TYPES +template +kernel void kernel_set( + constant ggml_metal_kargs_set & args, + device const char * src0, + device const char * src1, + device char * dst, + uint3 tgpig[[threadgroup_position_in_grid]], + ushort3 tpitg[[thread_position_in_threadgroup]], + ushort3 ntg[[threads_per_threadgroup]]) { + const int i13 = tgpig[2]; + const int i12 = tgpig[1]; + const int i11 = tgpig[0]; + + const int64_t n = i13*args.ne12*args.ne11*args.ne10 + i12*args.ne11*args.ne10 + i11*args.ne10; + + const int64_t i3 = n / (args.ne12*args.ne11*args.ne10); + const int64_t i2 = (n - i3*args.ne12*args.ne11*args.ne10) / (args.ne11*args.ne10); + const int64_t i1 = (n - i3*args.ne12*args.ne11*args.ne10 - i2*args.ne11*args.ne10) / args.ne10; + + device T * dst_data = (device T *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + args.offs); + + for (int64_t i10 = tpitg.x; i10 < args.ne10; i10 += ntg.x) { + device const T * src = (device T *) (src1 + i13*args.nb13 + i12*args.nb12 + i11*args.nb11 + i10*args.nb10); + dst_data[i10] = (T) src[0]; + } +} + +typedef decltype(kernel_set) kernel_set_t; + +template [[host_name("kernel_set_f32")]] kernel kernel_set_t kernel_set; +template [[host_name("kernel_set_i32")]] kernel kernel_set_t kernel_set; + template kernel void kernel_cpy( constant ggml_metal_kargs_cpy & args, @@ -5425,12 +6069,12 @@ kernel void kernel_mul_mm( const int im = tgpig.z; // if this block is of 64x32 shape or smaller - short n_rows = (args.ne0 - r0*BLOCK_SIZE_M < BLOCK_SIZE_M) ? (args.ne0 - r0*BLOCK_SIZE_M) : BLOCK_SIZE_M; - short n_cols = (args.ne1 - r1*BLOCK_SIZE_N < BLOCK_SIZE_N) ? (args.ne1 - r1*BLOCK_SIZE_N) : BLOCK_SIZE_N; + const short n_rows = (args.ne0 - r0*BLOCK_SIZE_M < BLOCK_SIZE_M) ? (args.ne0 - r0*BLOCK_SIZE_M) : BLOCK_SIZE_M; + const short n_cols = (args.ne1 - r1*BLOCK_SIZE_N < BLOCK_SIZE_N) ? (args.ne1 - r1*BLOCK_SIZE_N) : BLOCK_SIZE_N; // a thread shouldn't load data outside of the matrix - short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; - short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; + const short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1; + const short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1; simdgroup_T8x8 ma[4]; simdgroup_float8x8 mb[2]; @@ -5445,20 +6089,23 @@ kernel void kernel_mul_mm( const int i12 = im%args.ne12; const int i13 = im/args.ne12; - uint64_t offset0 = (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; - short offset1 = il/nl; + const uint64_t offset0 = (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03; + const short offset1 = il/nl; + + device const block_q * x = (device const block_q *)(src0 + + args.nb01*(r0*BLOCK_SIZE_M + thread_row) + offset0) + offset1; - device const block_q * x = (device const block_q *)(src0 + (r0*BLOCK_SIZE_M + thread_row)*args.nb01 + offset0) + offset1; device const float * y = (device const float *)(src1 + args.nb13*i13 + args.nb12*i12 - + args.nb11*(r1 * BLOCK_SIZE_N + thread_col) + + args.nb11*(r1*BLOCK_SIZE_N + thread_col) + args.nb10*(BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL))); for (int loop_k = 0; loop_k < args.ne00; loop_k += BLOCK_SIZE_K) { // load data and store to threadgroup memory T4x4 temp_a; dequantize_func(x, il, temp_a); + threadgroup_barrier(mem_flags::mem_threadgroup); #pragma unroll(16) @@ -5468,44 +6115,46 @@ kernel void kernel_mul_mm( + (tiitg/THREAD_PER_ROW)%8 + (i&7)*8) = temp_a[i/4][i%4]; } - *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL)*8*32 + 8*(tiitg/THREAD_PER_COL)) = *((device float2x4 *) y); + *(threadgroup float2x4 *)(sb + 32*8*(tiitg%THREAD_PER_COL) + 8*(tiitg/THREAD_PER_COL)) = *((device float2x4 *) y); il = (il + 2 < nl) ? il + 2 : il % 2; - x = (il < 2) ? x + (2+nl-1)/nl : x; + x = (il < 2) ? x + (2 + nl - 1)/nl : x; y += BLOCK_SIZE_K; threadgroup_barrier(mem_flags::mem_threadgroup); // load matrices from threadgroup memory and conduct outer products - threadgroup T * lsma = (sa + THREAD_MAT_M*SG_MAT_SIZE*(sgitg%2)); - threadgroup float * lsmb = (sb + THREAD_MAT_N*SG_MAT_SIZE*(sgitg/2)); + threadgroup const T * lsma = (sa + THREAD_MAT_M*SG_MAT_SIZE*(sgitg%2)); + threadgroup const float * lsmb = (sb + THREAD_MAT_N*SG_MAT_SIZE*(sgitg/2)); #pragma unroll(4) - for (short ik = 0; ik < BLOCK_SIZE_K / 8; ik++) { + for (short ik = 0; ik < BLOCK_SIZE_K/8; ik++) { #pragma unroll(4) for (short i = 0; i < 4; i++) { simdgroup_load(ma[i], lsma + SG_MAT_SIZE * i); } + simdgroup_barrier(mem_flags::mem_none); + #pragma unroll(2) for (short i = 0; i < 2; i++) { simdgroup_load(mb[i], lsmb + SG_MAT_SIZE * i); } - lsma += BLOCK_SIZE_M/SG_MAT_ROW * SG_MAT_SIZE; - lsmb += BLOCK_SIZE_N/SG_MAT_ROW * SG_MAT_SIZE; - #pragma unroll(8) for (short i = 0; i < 8; i++){ simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]); } + + lsma += (BLOCK_SIZE_M/SG_MAT_ROW)*SG_MAT_SIZE; + lsmb += (BLOCK_SIZE_N/SG_MAT_ROW)*SG_MAT_SIZE; } } if ((r0 + 1) * BLOCK_SIZE_M <= args.ne0 && (r1 + 1) * BLOCK_SIZE_N <= args.ne1) { device float * C = (device float *) dst + - (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) + \ - (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0; + (BLOCK_SIZE_M * r0 + 32*(sgitg & 1)) + \ + (BLOCK_SIZE_N * r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0; for (short i = 0; i < 8; i++) { simdgroup_store(mc[i], C + 8 * (i%4) + 8 * args.ne0 * (i/4), args.ne0); @@ -5514,7 +6163,7 @@ kernel void kernel_mul_mm( // block is smaller than 64x32, we should avoid writing data outside of the matrix threadgroup_barrier(mem_flags::mem_threadgroup); threadgroup float * temp_str = ((threadgroup float *) shmem) \ - + 32 * (sgitg&1) + (16 * (sgitg>>1))*BLOCK_SIZE_M; + + 32*(sgitg&1) + (16*(sgitg >> 1))*BLOCK_SIZE_M; for (short i = 0; i < 8; i++) { simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*BLOCK_SIZE_M*(i/4), BLOCK_SIZE_M); } diff --git a/ggml/src/ggml-musa/CMakeLists.txt b/ggml/src/ggml-musa/CMakeLists.txt index f3c0136920540..415b2b2e09c1b 100644 --- a/ggml/src/ggml-musa/CMakeLists.txt +++ b/ggml/src/ggml-musa/CMakeLists.txt @@ -20,6 +20,11 @@ find_package(MUSAToolkit) if (MUSAToolkit_FOUND) message(STATUS "MUSA Toolkit found") + if (NOT DEFINED MUSA_ARCHITECTURES) + set(MUSA_ARCHITECTURES "21;22") + endif() + message(STATUS "Using MUSA architectures: ${MUSA_ARCHITECTURES}") + file(GLOB GGML_HEADERS_MUSA "../ggml-cuda/*.cuh") list(APPEND GGML_HEADERS_MUSA "../../include/ggml-cuda.h") @@ -44,15 +49,17 @@ if (MUSAToolkit_FOUND) set_source_files_properties(${GGML_SOURCES_MUSA} PROPERTIES LANGUAGE CXX) foreach(SOURCE ${GGML_SOURCES_MUSA}) - set_property(SOURCE ${SOURCE} PROPERTY COMPILE_FLAGS "-x musa -mtgpu --cuda-gpu-arch=mp_21 --cuda-gpu-arch=mp_22") + set(COMPILE_FLAGS "-x musa -mtgpu") + foreach(ARCH ${MUSA_ARCHITECTURES}) + set(COMPILE_FLAGS "${COMPILE_FLAGS} --cuda-gpu-arch=mp_${ARCH}") + endforeach() + set_property(SOURCE ${SOURCE} PROPERTY COMPILE_FLAGS ${COMPILE_FLAGS}) endforeach() - add_library(ggml-musa - ${GGML_HEADERS_MUSA} - ${GGML_SOURCES_MUSA}) - - target_link_libraries(ggml-musa PRIVATE ggml-base) - target_include_directories(ggml-musa PRIVATE . ..) + ggml_add_backend_library(ggml-musa + ${GGML_HEADERS_MUSA} + ${GGML_SOURCES_MUSA} + ) # TODO: do not use CUDA definitions for MUSA target_compile_definitions(ggml PUBLIC GGML_USE_CUDA) diff --git a/ggml/src/ggml-opencl/CMakeLists.txt b/ggml/src/ggml-opencl/CMakeLists.txt new file mode 100644 index 0000000000000..45328a6579320 --- /dev/null +++ b/ggml/src/ggml-opencl/CMakeLists.txt @@ -0,0 +1,147 @@ +find_package(OpenCL REQUIRED) +find_package(Python3 REQUIRED) + +set(TARGET_NAME ggml-opencl) + +ggml_add_backend_library(${TARGET_NAME} + ggml-opencl.cpp + ../../include/ggml-opencl.h) +target_link_libraries(${TARGET_NAME} PRIVATE ${OpenCL_LIBRARIES}) +target_include_directories(${TARGET_NAME} PRIVATE ${OpenCL_INCLUDE_DIRS}) + +if (GGML_OPENCL_PROFILING) + message(STATUS "OpenCL profiling enabled (increases CPU overhead)") + add_compile_definitions(GGML_OPENCL_PROFILING) +endif () + +add_compile_definitions(GGML_OPENCL_SOA_Q) + +if (GGML_OPENCL_USE_ADRENO_KERNELS) + message(STATUS "OpenCL will use matmul kernels optimized for Adreno") + add_compile_definitions(GGML_OPENCL_USE_ADRENO_KERNELS) +endif () + +if (GGML_OPENCL_EMBED_KERNELS) + add_compile_definitions(GGML_OPENCL_EMBED_KERNELS) + + set(OPENCL_CL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl.cl.h") + set(OPENCL_MM_CL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_mm.cl.h") + set(OPENCL_CVT_CL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_cvt.cl.h") + + set(OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_gemv_noshuffle.cl.h") + set(OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_gemv_noshuffle_general.cl.h") + set(OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_mul_mat_Ab_Bi_8x4.cl.h") + set(OPENCL_TRANSPOSE_16_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_transpose_16.cl.h") + set(OPENCL_TRANSPOSE_32_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_transpose_32.cl.h") + set(OPENCL_TRANSPOSE_32_16_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-opencl_transpose_32_16.cl.h") + + set(EMBED_KERNEL_SCRIPT "${CMAKE_CURRENT_SOURCE_DIR}/kernels/embed_kernel.py") + file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated") + + include_directories("${CMAKE_BINARY_DIR}/autogenerated") + + # Python must be accessible from command line + add_custom_command( + OUTPUT ${OPENCL_CL_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl.cl + ${OPENCL_CL_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_MM_CL_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_mm.cl + ${OPENCL_MM_CL_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_mm.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_mm.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_CVT_CL_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_cvt.cl + ${OPENCL_CVT_CL_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_cvt.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_cvt.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_gemv_noshuffle.cl + ${OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_gemv_noshuffle.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_gemv_noshuffle.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_gemv_noshuffle_general.cl + ${OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_gemv_noshuffle_general.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_gemv_noshuffle_general.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl + ${OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_mul_mat_Ab_Bi_8x4.cl.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_TRANSPOSE_16_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_transpose_16.cl + ${OPENCL_TRANSPOSE_16_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_transpose_16.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_transpose_16.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_TRANSPOSE_32_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_transpose_32.cl + ${OPENCL_TRANSPOSE_32_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_transpose_32.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_transpose_32.cl.h" + ) + + add_custom_command( + OUTPUT ${OPENCL_TRANSPOSE_32_16_SOURCE_EMBED} + COMMAND ${Python3_EXECUTABLE} ${EMBED_KERNEL_SCRIPT} + ${CMAKE_CURRENT_SOURCE_DIR}/kernels/ggml-opencl_transpose_32_16.cl + ${OPENCL_TRANSPOSE_32_16_SOURCE_EMBED} + DEPENDS kernels/ggml-opencl_transpose_32_16.cl ${EMBED_KERNEL_SCRIPT} + COMMENT "Generate ggml-opencl_transpose_32_16.cl.h" + ) + + target_sources(${TARGET_NAME} PRIVATE + ${OPENCL_CL_SOURCE_EMBED} + ${OPENCL_MM_CL_SOURCE_EMBED} + ${OPENCL_CVT_CL_SOURCE_EMBED} + ${OPENCL_GEMV_NOSHUFFLE_SOURCE_EMBED} + ${OPENCL_GEMV_NOSHUFFLE_GENERAL_SOURCE_EMBED} + ${OPENCL_MUL_MAT_Ab_Bi_8x4_SOURCE_EMBED} + ${OPENCL_TRANSPOSE_16_SOURCE_EMBED} + ${OPENCL_TRANSPOSE_32_SOURCE_EMBED} + ${OPENCL_TRANSPOSE_32_16_SOURCE_EMBED}) +else () + # copy ggml-opencl.cl to bin directory + configure_file(kernels/ggml-opencl.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl.cl COPYONLY) + configure_file(kernels/ggml-opencl_mm.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_mm.cl COPYONLY) + configure_file(kernels/ggml-opencl_cvt.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_cvt.cl COPYONLY) + + configure_file(kernels/ggml-opencl_gemv_noshuffle.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_gemv_noshuffle.cl COPYONLY) + configure_file(kernels/ggml-opencl_gemv_noshuffle_general.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_gemv_noshuffle_general.cl COPYONLY) + configure_file(kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_mul_mat_Ab_Bi_8x4.cl COPYONLY) + configure_file(kernels/ggml-opencl_transpose_16.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_transpose_16.cl COPYONLY) + configure_file(kernels/ggml-opencl_transpose_32.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_transpose_32.cl COPYONLY) + configure_file(kernels/ggml-opencl_transpose_32_16.cl ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-opencl_transpose_32_16.cl COPYONLY) +endif () diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp new file mode 100644 index 0000000000000..c77d629f08760 --- /dev/null +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -0,0 +1,4004 @@ +#define CL_TARGET_OPENCL_VERSION 220 +#define CL_USE_DEPRECATED_OPENCL_1_2_APIS + +// suppress warnings in CL headers for GCC and Clang +#pragma GCC diagnostic ignored "-Woverlength-strings" +#ifdef __clang__ +#pragma GCC diagnostic ignored "-Wgnu-anonymous-struct" +#endif + +#include "ggml-opencl.h" +#include "ggml-backend.h" +#include "ggml-impl.h" +#include "ggml-backend-impl.h" +#include "ggml.h" + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define UNUSED(x) (void)(x) + +#define CL_CHECK(err) \ + do { \ + cl_int err_ = (err); \ + if (err_ != CL_SUCCESS) { \ + GGML_LOG_ERROR("ggml_opencl: %s error %d at %s:%d\n", \ + #err, err_, __FILE__, __LINE__); \ + GGML_ASSERT(0); \ + } \ + } while (0) + +//------------------------------------------------------------------------------ +// OpenCL +//------------------------------------------------------------------------------ + +bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor); + +enum GPU_FAMILY { + ADRENO, + INTEL, + UNKNOWN, +}; + +enum ADRENO_GPU_GEN { + ADRENO_UNKNOWN, + A7X, + A8X, + X1E, +}; + +static ADRENO_GPU_GEN get_adreno_gpu_gen(const char *device_name) { + if (strstr(device_name, "730") || + strstr(device_name, "740") || + strstr(device_name, "750")) { + return ADRENO_GPU_GEN::A7X; + } + + if (strstr(device_name, "830")) { + return ADRENO_GPU_GEN::A8X; + } + + if (strstr(device_name, "X1")) { + return ADRENO_GPU_GEN::X1E; + } + + return ADRENO_GPU_GEN::ADRENO_UNKNOWN; +} + +static int get_adreno_cl_compiler_version(const char *driver_version) { + std::string driver_ver_str(driver_version); + size_t compiler_ver_pos = driver_ver_str.find("E031"); + size_t compiler_ver_len = 13; + size_t compiler_ver_offset = 5; + + if (compiler_ver_pos == std::string::npos) { + compiler_ver_pos = driver_ver_str.find("DX"); + if (compiler_ver_pos == std::string::npos) { + return -1; + } + compiler_ver_len = 11; + compiler_ver_offset = 3; + } + + std::string compiler_ver_str = driver_ver_str.substr(compiler_ver_pos, compiler_ver_len); + std::string major_ver_str = compiler_ver_str.substr(compiler_ver_offset, 2); + return std::atoi(major_ver_str.c_str()); +} + +// backend device context +struct ggml_backend_opencl_device_context { + cl_platform_id platform; + std::string platform_name; + + cl_device_id device; + std::string device_name; +}; + +// backend context +struct ggml_backend_opencl_context { + cl_device_id device; + std::string device_name; + + std::string driver_version; + + GPU_FAMILY gpu_family; + ADRENO_GPU_GEN adreno_gen; + + cl_int alignment; + size_t max_alloc_size; + bool fp16_support; + + int adreno_wave_size; + + cl_context context; + cl_command_queue queue; + + cl_program program; + cl_program program_1; + cl_program program_2; + + cl_kernel kernel_add, kernel_add_row; + cl_kernel kernel_mul, kernel_mul_row; + cl_kernel kernel_scale; + cl_kernel kernel_silu, kernel_silu_4; + cl_kernel kernel_gelu, kernel_gelu_4; + cl_kernel kernel_relu; + cl_kernel kernel_clamp; + cl_kernel kernel_norm; + cl_kernel kernel_rms_norm; + cl_kernel kernel_diag_mask_inf, kernel_diag_mask_inf_8; + cl_kernel kernel_soft_max, kernel_soft_max_4; + cl_kernel kernel_get_rows_f32, kernel_get_rows_f16, kernel_get_rows_q4_0; + cl_kernel kernel_rope_norm_f32, kernel_rope_norm_f16, kernel_rope_neox_f32, kernel_rope_neox_f16; + cl_kernel kernel_cpy_f16_f16, kernel_cpy_f16_f32, kernel_cpy_f32_f16, kernel_cpy_f32_f32; + cl_kernel kernel_mul_mat_f32_f32; + cl_kernel kernel_mul_mat_f16_f16; + cl_kernel kernel_mul_mat_f16_f32_1row; + cl_kernel kernel_mul_mat_f16_f32; + cl_kernel kernel_mul_mat_f16_f32_l4; + cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v; + cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0, kernel_mul_mat_q4_0_f32_flat; + cl_kernel kernel_mul_mat_q4_0_f32_8x_flat; + cl_kernel kernel_convert_block_q4_0_noshuffle, kernel_mul_mat_q4_0_f32_flat_v0, + kernel_mul_mat_q4_0_f32_flat_img_v0; + cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat; + cl_kernel kernel_mul_mv_q6_K_f32; + +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + // Transpose kernels + cl_program program_transpose_32; + cl_program program_transpose_32_16; + cl_program program_transpose_16; + cl_kernel kernel_transpose_32; + cl_kernel kernel_transpose_32_16; + cl_kernel kernel_transpose_16; + + cl_mem A_s_d_max; // max scale buffer size for transpose + cl_mem A_q_d_max; // max weight buffer size for transpose + cl_mem B_d_max; // max activation buffer size for transpose + + // Gemm and Gemv related programs, kernels, etc + cl_program program_CL_gemm; + cl_program program_CL_gemv_general; + cl_program program_CL_gemv_4096_1_11008; + cl_program program_CL_gemv_4096_1_4096; + cl_program program_CL_gemv_11008_1_4096; + cl_program program_CL_gemv_32000_1_4096; + cl_kernel CL_mul_mat_Ab_Bi_8x4; + cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general; + cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008; + cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096; + cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096; + cl_kernel CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096; +#endif // GGML_OPENCL_USE_ADRENO_KERNELS +}; + +static ggml_backend_device g_ggml_backend_opencl_device; +static ggml_backend_opencl_device_context g_ggml_ctx_dev_main { + /*.platform =*/ nullptr, + /*.platform_nane =*/ "", + /*.device =*/ nullptr, + /*.device_name =*/ "", +}; + +static int ggml_backend_opencl_n_devices = 0; + +// Profiling +#ifdef GGML_OPENCL_PROFILING +struct ProfilingInfo { + std::string op_name; + std::string kernel_name; + // Kernel execution time in nanoseconds. + cl_ulong duration_ns; + // Global and local work sizes. + size_t global_size[3]; + size_t local_size[3]; + // Op output size. + size_t output_size[4]; +}; + +std::vector g_profiling_info; +#endif + +inline std::string read_file(const std::string &path) { + std::ifstream ifs(path); + if (!ifs) { + return ""; + } + std::string text; + ifs.seekg(0, std::ios::end); + text.resize(ifs.tellg()); + ifs.seekg(0, std::ios::beg); + ifs.read(&text[0], text.size()); + return text; +} + +static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, const char* program_buffer, const std::string &compile_opts) { + cl_program p; + char *program_log; + size_t program_size; + size_t log_size; + int err; + + program_size = strlen(program_buffer); + + p = clCreateProgramWithSource(ctx, 1, (const char**)&program_buffer, &program_size, &err); + if(err < 0) { + GGML_LOG_ERROR("OpenCL error creating program"); + exit(1); + } + + err = clBuildProgram(p, 0, NULL, compile_opts.c_str(), NULL, NULL); + if(err < 0) { + clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size); + program_log = (char*) malloc(log_size + 1); + program_log[log_size] = '\0'; + clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL); + GGML_LOG_ERROR("ggml_opencl: kernel compile error:\n\n%s\n", program_log); + free(program_log); + exit(1); + } + + return p; +} + +static ggml_backend_opencl_context * ggml_cl2_init(ggml_backend_dev_t dev) { + static bool initialized = false; + static ggml_backend_opencl_context *backend_ctx = nullptr; + + if (initialized) { + return backend_ctx; + } + + ggml_backend_opencl_device_context *dev_ctx = (ggml_backend_opencl_device_context *)dev->context; + GGML_ASSERT(dev_ctx); + GGML_ASSERT(dev_ctx->platform == nullptr); + GGML_ASSERT(dev_ctx->device == nullptr); + GGML_ASSERT(backend_ctx == nullptr); + + initialized = true; + backend_ctx = new ggml_backend_opencl_context(); + backend_ctx->gpu_family = GPU_FAMILY::UNKNOWN; + + cl_int err; + +#ifdef GGML_PROFILE_OPENCL + GGML_LOG_INFO("ggml_opencl: OpenCL profiling enabled\n"); +#endif + + struct cl_device; + struct cl_platform { + cl_platform_id id; + unsigned number; + char name[128]; + char vendor[128]; + struct cl_device * devices; + unsigned n_devices; + struct cl_device * default_device; + }; + + struct cl_device { + struct cl_platform * platform; + cl_device_id id; + unsigned number; + cl_device_type type; + char name[128]; + }; + + enum { NPLAT = 16, NDEV = 16 }; + + struct cl_platform platforms[NPLAT]; + unsigned n_platforms = 0; + struct cl_device devices[NDEV]; + unsigned n_devices = 0; + struct cl_device * default_device = NULL; + + cl_platform_id platform_ids[NPLAT]; + if (clGetPlatformIDs(NPLAT, platform_ids, &n_platforms) != CL_SUCCESS) { + GGML_LOG_ERROR("ggml_opencl: plaform IDs not available.\n"); + return backend_ctx; + } + + for (unsigned i = 0; i < n_platforms; i++) { + struct cl_platform * p = &platforms[i]; + p->number = i; + p->id = platform_ids[i]; + CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_NAME, sizeof(p->name), &p->name, NULL)); + CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_VENDOR, sizeof(p->vendor), &p->vendor, NULL)); + + cl_device_id device_ids[NDEV]; + cl_int clGetDeviceIDsError = clGetDeviceIDs(p->id, CL_DEVICE_TYPE_ALL, NDEV, device_ids, &p->n_devices); + if (clGetDeviceIDsError == CL_DEVICE_NOT_FOUND) { + p->n_devices = 0; + } else { + CL_CHECK(clGetDeviceIDsError); + } + p->devices = p->n_devices > 0 ? &devices[n_devices] : NULL; + p->default_device = NULL; + + for (unsigned j = 0; j < p->n_devices; j++) { + struct cl_device * d = &devices[n_devices]; + d->number = n_devices++; + d->id = device_ids[j]; + d->platform = p; + CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_NAME, sizeof(d->name), &d->name, NULL)); + CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_TYPE, sizeof(d->type), &d->type, NULL)); + + if (p->default_device == NULL && d->type == CL_DEVICE_TYPE_GPU) { + p->default_device = d; + } + } + + if (default_device == NULL && p->default_device != NULL) { + default_device = p->default_device; + } + } + + if (n_devices == 0) { + GGML_LOG_ERROR("ggml_opencl: could find any OpenCL devices.\n"); + return backend_ctx; + } + + char * user_platform_string = getenv("GGML_OPENCL_PLATFORM"); + char * user_device_string = getenv("GGML_OPENCL_DEVICE"); + int user_platform_number = -1; + int user_device_number = -1; + + unsigned n; + if (user_platform_string != NULL && sscanf(user_platform_string, " %u", &n) == 1 && n < n_platforms) { + user_platform_number = (int)n; + } + if (user_device_string != NULL && sscanf(user_device_string, " %u", &n) == 1 && n < n_devices) { + user_device_number = (int)n; + } + if (user_platform_number != -1 && user_device_number != -1) { + cl_platform* platform = &platforms[user_platform_number]; + if ((unsigned)user_device_number >= platform->n_devices) { + GGML_LOG_ERROR("ggml_opencl: invalid device number %d\n", user_device_number); + exit(1); + } + default_device = &platform->devices[user_device_number]; + } else { + + struct cl_device * selected_devices = devices; + unsigned n_selected_devices = n_devices; + + if (user_platform_number == -1 && user_platform_string != NULL && user_platform_string[0] != 0) { + for (unsigned i = 0; i < n_platforms; i++) { + struct cl_platform * p = &platforms[i]; + if (strstr(p->name, user_platform_string) != NULL || + strstr(p->vendor, user_platform_string) != NULL) { + user_platform_number = (int)i; + break; + } + } + if (user_platform_number == -1) { + GGML_LOG_ERROR("ggml_opencl: no platform matching '%s' was found.\n", user_platform_string); + exit(1); + } + } + if (user_platform_number != -1) { + struct cl_platform * p = &platforms[user_platform_number]; + selected_devices = p->devices; + n_selected_devices = p->n_devices; + default_device = p->default_device; + if (n_selected_devices == 0) { + GGML_LOG_ERROR("ggml_opencl: selected platform '%s' does not have any devices.\n", p->name); + exit(1); + } + } + + if (user_device_number == -1 && user_device_string != NULL && user_device_string[0] != 0) { + for (unsigned i = 0; i < n_selected_devices; i++) { + struct cl_device * d = &selected_devices[i]; + if (strstr(d->name, user_device_string) != NULL) { + user_device_number = d->number; + break; + } + } + if (user_device_number == -1) { + GGML_LOG_ERROR("ggml_opencl: no device matching '%s' was found.\n", user_device_string); + exit(1); + } + } + if (user_device_number != -1) { + selected_devices = &devices[user_device_number]; + n_selected_devices = 1; + default_device = &selected_devices[0]; + } + + GGML_ASSERT(n_selected_devices > 0); + + if (default_device == NULL) { + default_device = &selected_devices[0]; + } + } + + GGML_LOG_INFO("ggml_opencl: selecting platform: '%s'\n", default_device->platform->name); + GGML_LOG_INFO("ggml_opencl: selecting device: '%s'\n", default_device->name); + if (default_device->type != CL_DEVICE_TYPE_GPU) { + GGML_LOG_WARN("ggml_opencl: warning, not a GPU: '%s'.\n", default_device->name); + } + + dev_ctx->platform = default_device->platform->id; + dev_ctx->device = default_device->id; + backend_ctx->device = default_device->id; + + if (strstr(default_device->name, "Adreno")) { + backend_ctx->gpu_family = GPU_FAMILY::ADRENO; + backend_ctx->adreno_gen = get_adreno_gpu_gen(default_device->name); + + // Default wave size is 128, A8x uses 64. + if (backend_ctx->adreno_gen == ADRENO_GPU_GEN::A8X) { + backend_ctx->adreno_wave_size = 64; + } else if (backend_ctx->adreno_gen == ADRENO_GPU_GEN::A7X || + backend_ctx->adreno_gen == ADRENO_GPU_GEN::X1E) { + backend_ctx->adreno_wave_size = 128; + } else { + backend_ctx->adreno_wave_size = 128; + GGML_LOG_WARN("ggml_opencl: Unsupported Adreno GPU: %s, " + "using wave size %d, " + "may not work as expected\n", + backend_ctx->device_name.c_str(), backend_ctx->adreno_wave_size); + } + } else if (strstr(default_device->name, "Intel")) { + backend_ctx->gpu_family = GPU_FAMILY::INTEL; + } else { + GGML_LOG_ERROR("Unsupported GPU: %s\n", default_device->name); + backend_ctx->gpu_family = GPU_FAMILY::UNKNOWN; + return backend_ctx; + } + +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + if (backend_ctx->gpu_family != GPU_FAMILY::ADRENO) { + GGML_LOG_ERROR("ggml_opencl: Adreno-specific kernels should not be enabled for non-Adreno GPUs; " + "run on an Adreno GPU or recompile with CMake option `-DGGML_OPENCL_USE_ADRENO_KERNELS=OFF`\n"); + return backend_ctx; + } +#endif + + // Populate backend device name + dev_ctx->platform_name = default_device->platform->name; + dev_ctx->device_name = default_device->name; + backend_ctx->device_name = default_device->name; + + // A local ref of cl_device_id for convenience + cl_device_id device = backend_ctx->device; + + // Check device OpenCL version, OpenCL 2.0 or above is required + size_t device_ver_str_size; + clGetDeviceInfo(device, CL_DEVICE_VERSION, 0, NULL, &device_ver_str_size); + char *device_ver_buffer = (char *)alloca(device_ver_str_size + 1); + clGetDeviceInfo(device, CL_DEVICE_VERSION, device_ver_str_size, device_ver_buffer, NULL); + device_ver_buffer[device_ver_str_size] = '\0'; + GGML_LOG_INFO("ggml_opencl: device OpenCL version: %s\n", device_ver_buffer); + + if (strstr(device_ver_buffer, "OpenCL 2") == NULL && + strstr(device_ver_buffer, "OpenCL 3") == NULL) { + GGML_LOG_ERROR("ggml_opencl: OpenCL 2.0 or above is required\n"); + return backend_ctx; + } + + // Check driver version + size_t driver_version_str_size; + clGetDeviceInfo(device, CL_DRIVER_VERSION, 0, NULL, &driver_version_str_size); + char *driver_version = (char *)alloca(driver_version_str_size + 1); + clGetDeviceInfo(device, CL_DRIVER_VERSION, driver_version_str_size, driver_version, NULL); + driver_version[driver_version_str_size] = '\0'; + GGML_LOG_INFO("ggml_opencl: OpenCL driver: %s\n", driver_version); + backend_ctx->driver_version = driver_version; + + int adreno_cl_compiler_version = get_adreno_cl_compiler_version(driver_version); + bool has_vector_subgroup_broadcast = + adreno_cl_compiler_version >= 47 || adreno_cl_compiler_version == 17; + GGML_LOG_INFO("ggml_opencl: vector subgroup broadcast support: %s\n", + has_vector_subgroup_broadcast ? "true" : "false"); + + size_t ext_str_size; + clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, 0, NULL, &ext_str_size); + char *ext_buffer = (char *)alloca(ext_str_size + 1); + clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, ext_str_size, ext_buffer, NULL); + ext_buffer[ext_str_size] = '\0'; // ensure it is null terminated + // Check if ext_buffer contains cl_khr_fp16 + backend_ctx->fp16_support = strstr(ext_buffer, "cl_khr_fp16") != NULL; + GGML_LOG_INFO("ggml_opencl: device FP16 support: %s\n", backend_ctx->fp16_support ? "true" : "false"); + + // fp16 is required + if (!backend_ctx->fp16_support) { + GGML_LOG_ERROR("ggml_opencl: device does not support FP16\n"); + return backend_ctx; + } + + // If OpenCL 3.0 is supported, then check for cl_khr_subgroups, which becomes + // optional in OpenCL 3.0 (cl_khr_subgroup is mandatory in OpenCL 2.x) + if (strstr(device_ver_buffer, "OpenCL 3") && + strstr(ext_buffer, "cl_khr_subgroups") == NULL && + strstr(ext_buffer, "cl_intel_subgroups") == NULL) { + GGML_LOG_ERROR("ggml_opencl: device does not support subgroups (cl_khr_subgroups or cl_intel_subgroups) " + "(note that subgroups is an optional feature in OpenCL 3.0)\n"); + return backend_ctx; + } + + CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &backend_ctx->alignment, NULL)); + GGML_LOG_INFO("ggml_opencl: mem base addr align: %u\n", backend_ctx->alignment); + + clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(size_t), &backend_ctx->max_alloc_size, NULL); + GGML_LOG_INFO("ggml_opencl: max mem alloc size: %zu MB\n", backend_ctx->max_alloc_size/1024/1024); + + // Check SVM. + cl_device_svm_capabilities svm_caps; + CL_CHECK(clGetDeviceInfo(device, CL_DEVICE_SVM_CAPABILITIES, sizeof(cl_device_svm_capabilities), &svm_caps, 0)); + GGML_LOG_INFO("ggml_opencl: SVM coarse grain buffer support: %s\n", + svm_caps & CL_DEVICE_SVM_COARSE_GRAIN_BUFFER ? "true" : "false"); + GGML_LOG_INFO("ggml_opencl: SVM fine grain buffer support: %s\n", + svm_caps & CL_DEVICE_SVM_FINE_GRAIN_BUFFER ? "true" : "false"); + GGML_LOG_INFO("ggml_opencl: SVM fine grain system support: %s\n", + svm_caps & CL_DEVICE_SVM_FINE_GRAIN_SYSTEM ? "true" : "false"); + GGML_LOG_INFO("ggml_opencl: SVM atomics support: %s\n", + svm_caps & CL_DEVICE_SVM_ATOMICS ? "true" : "false"); + + // Print out configurations +#ifdef GGML_OPENCL_SOA_Q + GGML_LOG_INFO("ggml_opencl: flattening quantized weights representation as struct of arrays (GGML_OPENCL_SOA_Q)\n"); +#endif // GGML_OPENCL_SOA_Q + +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + GGML_LOG_INFO("ggml_opencl: using kernels optimized for Adreno (GGML_OPENCL_USE_ADRENO_KERNELS)\n"); +#endif // GGML_OPENCL_USE_ADRENO_KERNELS + + cl_context_properties properties[] = { + (intptr_t)CL_CONTEXT_PLATFORM, (intptr_t)dev_ctx->platform, 0 + }; + + CL_CHECK((backend_ctx->context = clCreateContext(properties, 1, &device, NULL, NULL, &err), err)); + + // A local ref of cl_context for convenience + cl_context context = backend_ctx->context; + + //CL_CHECK((queue = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &err), + // (err != CL_INVALID_QUEUE_PROPERTIES && err != CL_INVALID_VALUE ? err : + // (queue = clCreateCommandQueue(context, device, 0, &err), err) + //))); + cl_command_queue_properties command_queue_props = 0; +#ifdef GGML_OPENCL_PROFILING + command_queue_props |= CL_QUEUE_PROFILING_ENABLE; +#endif + CL_CHECK((backend_ctx->queue = clCreateCommandQueue(context, device, command_queue_props, &err), err)); + +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src { + #include "ggml-opencl.cl.h" + }; +#else + const std::string kernel_src = read_file("ggml-opencl.cl"); +#endif + + std::string compile_opts = + "-cl-std=CL2.0 -cl-mad-enable -cl-unsafe-math-optimizations " + "-cl-finite-math-only -cl-fast-relaxed-math "; + backend_ctx->program = build_program_from_source(context, device, kernel_src.c_str(), compile_opts); + + // Non matmul kernels. + CL_CHECK((backend_ctx->kernel_get_rows_f32 = clCreateKernel(backend_ctx->program, "kernel_get_rows_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_get_rows_f16 = clCreateKernel(backend_ctx->program, "kernel_get_rows_f16", &err), err)); + CL_CHECK((backend_ctx->kernel_get_rows_q4_0 = clCreateKernel(backend_ctx->program, "kernel_get_rows_q4_0", &err), err)); + CL_CHECK((backend_ctx->kernel_add = clCreateKernel(backend_ctx->program, "kernel_add", &err), err)); + CL_CHECK((backend_ctx->kernel_add_row = clCreateKernel(backend_ctx->program, "kernel_add_row", &err), err)); + CL_CHECK((backend_ctx->kernel_mul = clCreateKernel(backend_ctx->program, "kernel_mul", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_row = clCreateKernel(backend_ctx->program, "kernel_mul_row", &err), err)); + CL_CHECK((backend_ctx->kernel_scale = clCreateKernel(backend_ctx->program, "kernel_scale", &err), err)); + CL_CHECK((backend_ctx->kernel_silu = clCreateKernel(backend_ctx->program, "kernel_silu", &err), err)); + CL_CHECK((backend_ctx->kernel_silu_4 = clCreateKernel(backend_ctx->program, "kernel_silu_4", &err), err)); + CL_CHECK((backend_ctx->kernel_gelu = clCreateKernel(backend_ctx->program, "kernel_gelu", &err), err)); + CL_CHECK((backend_ctx->kernel_gelu_4 = clCreateKernel(backend_ctx->program, "kernel_gelu_4", &err), err)); + CL_CHECK((backend_ctx->kernel_relu = clCreateKernel(backend_ctx->program, "kernel_relu", &err), err)); + CL_CHECK((backend_ctx->kernel_clamp = clCreateKernel(backend_ctx->program, "kernel_clamp", &err), err)); + CL_CHECK((backend_ctx->kernel_norm = clCreateKernel(backend_ctx->program, "kernel_norm", &err), err)); + CL_CHECK((backend_ctx->kernel_rms_norm = clCreateKernel(backend_ctx->program, "kernel_rms_norm", &err), err)); + CL_CHECK((backend_ctx->kernel_diag_mask_inf = clCreateKernel(backend_ctx->program, "kernel_diag_mask_inf", &err), err)); + CL_CHECK((backend_ctx->kernel_diag_mask_inf_8 = clCreateKernel(backend_ctx->program, "kernel_diag_mask_inf_8", &err), err)); + CL_CHECK((backend_ctx->kernel_soft_max = clCreateKernel(backend_ctx->program, "kernel_soft_max", &err), err)); + CL_CHECK((backend_ctx->kernel_soft_max_4 = clCreateKernel(backend_ctx->program, "kernel_soft_max_4", &err), err)); + CL_CHECK((backend_ctx->kernel_rope_norm_f32 = clCreateKernel(backend_ctx->program, "kernel_rope_norm_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_rope_norm_f16 = clCreateKernel(backend_ctx->program, "kernel_rope_norm_f16", &err), err)); + CL_CHECK((backend_ctx->kernel_rope_neox_f32 = clCreateKernel(backend_ctx->program, "kernel_rope_neox_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_rope_neox_f16 = clCreateKernel(backend_ctx->program, "kernel_rope_neox_f16", &err), err)); + CL_CHECK((backend_ctx->kernel_cpy_f16_f16 = clCreateKernel(backend_ctx->program, "kernel_cpy_f16_f16", &err), err)); + CL_CHECK((backend_ctx->kernel_cpy_f16_f32 = clCreateKernel(backend_ctx->program, "kernel_cpy_f16_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_cpy_f32_f16 = clCreateKernel(backend_ctx->program, "kernel_cpy_f32_f16", &err), err)); + CL_CHECK((backend_ctx->kernel_cpy_f32_f32 = clCreateKernel(backend_ctx->program, "kernel_cpy_f32_f32", &err), err)); + + // Matmul kernels. + CL_CHECK((backend_ctx->kernel_mul_mat_f32_f32 = clCreateKernel(backend_ctx->program, "kernel_mul_mat_f32_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_f16_f16 = clCreateKernel(backend_ctx->program, "kernel_mul_mat_f16_f16", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32_1row = clCreateKernel(backend_ctx->program, "kernel_mul_mat_f16_f32_1row", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32 = clCreateKernel(backend_ctx->program, "kernel_mul_mat_f16_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_f16_f32_l4 = clCreateKernel(backend_ctx->program, "kernel_mul_mat_f16_f32_l4", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32 = clCreateKernel(backend_ctx->program, "kernel_mul_mat_q4_0_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_v = clCreateKernel(backend_ctx->program, "kernel_mul_mat_q4_0_f32_v", &err), err)); + + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_flat = clCreateKernel(backend_ctx->program, "kernel_mul_mat_q4_0_f32_flat", &err), err)); + CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program, "kernel_convert_block_q4_0", &err), err)); + CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program, "kernel_restore_block_q4_0", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_8x_flat = clCreateKernel(backend_ctx->program, "kernel_mul_mat_q4_0_f32_8x_flat", &err), err)); + + // Load additional mulmat kernels. +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src_1 { + #include "ggml-opencl_mm.cl.h" + }; +#else + const std::string kernel_src_1 = read_file("ggml-opencl_mm.cl"); +#endif + backend_ctx->program_1 = build_program_from_source(context, device, kernel_src_1.c_str(), compile_opts); + + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_1d_8x_flat = clCreateKernel(backend_ctx->program_1, "kernel_mul_mat_q4_0_f32_1d_8x_flat", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_1d_16x_flat = clCreateKernel(backend_ctx->program_1, "kernel_mul_mat_q4_0_f32_1d_16x_flat", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mv_q6_K_f32 = clCreateKernel(backend_ctx->program_1, "kernel_mul_mv_q6_K_f32", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_flat_v0 = clCreateKernel(backend_ctx->program_1, "kernel_mul_mat_q4_0_f32_flat_v0", &err), err)); + CL_CHECK((backend_ctx->kernel_mul_mat_q4_0_f32_flat_img_v0 = clCreateKernel(backend_ctx->program_1, "kernel_mul_mat_q4_0_f32_flat_img_v0", &err), err)); + + // Load additional data conversion kernels. +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src_2 { + #include "ggml-opencl_cvt.cl.h" + }; +#else + const std::string kernel_src_2 = read_file("ggml-opencl_cvt.cl"); +#endif + backend_ctx->program_2 = build_program_from_source(context, device, kernel_src_2.c_str(), compile_opts); + + CL_CHECK((backend_ctx->kernel_convert_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_2, "kernel_convert_block_q4_0_noshuffle", &err), err)); + + // Kernels for Adreno +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string transpose_32_src { + #include "ggml-opencl_transpose_32.cl.h" + }; +#else + const std::string transpose_32_src = read_file("ggml-opencl_transpose_32.cl"); +#endif + backend_ctx->program_transpose_32 = build_program_from_source(context, device, transpose_32_src.c_str(), compile_opts); + CL_CHECK((backend_ctx->kernel_transpose_32 = clCreateKernel(backend_ctx->program_transpose_32, "kernel_transpose_32", &err), err)); + +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string transpose_32_16_src { + #include "ggml-opencl_transpose_32_16.cl.h" + }; +#else + const std::string transpose_32_16_src = read_file("ggml-opencl_transpose_32_16.cl"); +#endif + backend_ctx->program_transpose_32_16 = build_program_from_source(context, device, transpose_32_16_src.c_str(), compile_opts); + CL_CHECK((backend_ctx->kernel_transpose_32_16 = clCreateKernel(backend_ctx->program_transpose_32_16, "kernel_transpose_32_16", &err), err)); + +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string transpose_16_src { + #include "ggml-opencl_transpose_16.cl.h" + }; +#else + const std::string transpose_16_src = read_file("ggml-opencl_transpose_16.cl"); +#endif + backend_ctx->program_transpose_16 = build_program_from_source(context, device, transpose_16_src.c_str(), compile_opts); + CL_CHECK((backend_ctx->kernel_transpose_16 = clCreateKernel(backend_ctx->program_transpose_16, "kernel_transpose_16", &err), err)); + + // Gemv general + std::string CL_gemv_compile_opts = + " -cl-std=CL2.0 " + " -cl-mad-enable " + " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + if (has_vector_subgroup_broadcast) { + CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; + } +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src_CL_gemv_general { + #include "ggml-opencl_gemv_noshuffle_general.cl.h" + }; +#else + const std::string kernel_src_CL_gemv_general = read_file("ggml-opencl_gemv_noshuffle_general.cl"); +#endif + + backend_ctx->program_CL_gemv_general = build_program_from_source( + context, device, kernel_src_CL_gemv_general.c_str(), CL_gemv_compile_opts); + CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general = clCreateKernel(backend_ctx->program_CL_gemv_general, "kernel_gemv_noshuffle", &err), err)); + + // Gemv 2048, 16384 + CL_gemv_compile_opts = + " -cl-std=CL2.0 " + " -cl-mad-enable " + " -DLINE_STRIDE_A=2048 " + " -DBLOCK_STRIDE_A=16384 " + " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + if (has_vector_subgroup_broadcast) { + CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; + } +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src_CL_gemv { + #include "ggml-opencl_gemv_noshuffle.cl.h" + }; +#else + const std::string kernel_src_CL_gemv = read_file("ggml-opencl_gemv_noshuffle.cl"); +#endif + + backend_ctx->program_CL_gemv_4096_1_4096 = build_program_from_source( + context, device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); + CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_4096_1_4096, "kernel_gemv_noshuffle", &err), err)); + + // Gemv 2048, 16384 + CL_gemv_compile_opts = + " -cl-std=CL2.0 " + " -cl-mad-enable " + " -DLINE_STRIDE_A=2048 " + " -DBLOCK_STRIDE_A=16384 " + " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + if (has_vector_subgroup_broadcast) { + CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; + } + + backend_ctx->program_CL_gemv_4096_1_11008 = build_program_from_source( + context, device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); + CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008 = clCreateKernel(backend_ctx->program_CL_gemv_4096_1_11008, "kernel_gemv_noshuffle", &err), err)); + + // Gemv 5504, 44032 + CL_gemv_compile_opts = + " -cl-std=CL2.0 " + " -cl-mad-enable " + " -DLINE_STRIDE_A=5504 " + " -DBLOCK_STRIDE_A=44032 " + " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + if (has_vector_subgroup_broadcast) { + CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; + } + + backend_ctx->program_CL_gemv_11008_1_4096 = build_program_from_source( + context, device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); + CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_11008_1_4096, "kernel_gemv_noshuffle", &err), err)); + + // Gemv 16000, 128000 + CL_gemv_compile_opts = + " -cl-std=CL2.0 " + " -cl-mad-enable " + " -DLINE_STRIDE_A=16000 " + " -DBLOCK_STRIDE_A=128000 " + " -DSIMDGROUP_WIDTH=" + std::to_string(backend_ctx->adreno_wave_size); + if (has_vector_subgroup_broadcast) { + CL_gemv_compile_opts += " -DVECTOR_SUB_GROUP_BROADCAT "; + } + + backend_ctx->program_CL_gemv_32000_1_4096 = build_program_from_source(context, device, kernel_src_CL_gemv.c_str(), CL_gemv_compile_opts); + CL_CHECK((backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096 = clCreateKernel(backend_ctx->program_CL_gemv_32000_1_4096, "kernel_gemv_noshuffle", &err), err)); + + // Gemm +#ifdef GGML_OPENCL_EMBED_KERNELS + const std::string kernel_src_CL_gemm { + #include "ggml-opencl_mul_mat_Ab_Bi_8x4.cl.h" + }; +#else + const std::string kernel_src_CL_gemm = read_file("ggml-opencl_mul_mat_Ab_Bi_8x4.cl"); +#endif + backend_ctx->program_CL_gemm = build_program_from_source(context, device, kernel_src_CL_gemm.c_str(), compile_opts); + CL_CHECK((backend_ctx->CL_mul_mat_Ab_Bi_8x4 = clCreateKernel(backend_ctx->program_CL_gemm, "kernel_mul_mat_Ab_Bi_8x4", &err), err)); + + // Allocate intermediate buffers and images + size_t max_A_q_d_bytes = 311164928; + size_t max_A_s_d_bytes = 38895616; + size_t max_B_d_bytes = 45088768; + + CL_CHECK((backend_ctx->A_q_d_max = clCreateBuffer(context, 0, max_A_q_d_bytes, NULL, &err), err)); + CL_CHECK((backend_ctx->A_s_d_max = clCreateBuffer(context, 0, max_A_s_d_bytes, NULL, &err), err)); + CL_CHECK((backend_ctx->B_d_max = clCreateBuffer(context, 0, max_B_d_bytes, NULL, &err), err)); +#endif // GGML_OPENCL_USE_ADRENO_KERNELS + + // For now we support a single devices + ggml_backend_opencl_n_devices = 1; + + return backend_ctx; +} + +static void ggml_cl2_free(void) { +#ifdef GGML_OPENCL_PROFILING + FILE * fperf = fopen("cl_profiling.csv", "w"); + if (!fperf) { + GGML_LOG_ERROR("Failed to open cl_profiling.csv\n"); + return; + } + + float total_kernel_time = 0; + fprintf(fperf, "op name, kernel name, duration (ms), global size, local size, output size\n"); + for (const ProfilingInfo & info : g_profiling_info) { + total_kernel_time += info.duration_ns/1.e6f; + fprintf(fperf, "%s,%s,%f,%zux%zux%zu,%zux%zux%zu,%zux%zux%zux%zu\n", + info.op_name.c_str(), info.kernel_name.c_str(), info.duration_ns/1.e6f, + info.global_size[0], info.global_size[1], info.global_size[2], + info.local_size[0], info.local_size[2], info.local_size[2], + info.output_size[0], info.output_size[1], info.output_size[2], info.output_size[3]); + } + fclose(fperf); + + GGML_LOG_INFO("ggml_opencl: total kernel time: %f\n", total_kernel_time); +#endif +} + +//------------------------------------------------------------------------------ +// Tensor extra management +//------------------------------------------------------------------------------ +struct ggml_tensor_extra_cl { + // The buffer object that holds the data. + cl_mem data_device; + // The offset into the buffer object. This is primarily for scratch buffer + // and view operation. + // NB: this offset no longer includes view offset (view_offs). Whenever this + // offset is used, view_offs should be considered. + cl_ulong offset; + // The actual size of the cl_mem object. This is needed when returning the + // block to the pool. + size_t actual_size; + + void reset() { + data_device = nullptr; + offset = 0; + actual_size = 0; + } +}; + +// Additional tensor extra structs for quantized tensors. +// These tensors are loaded from files and should not be allocated in scratch -- +// they should always be allocated from the pool. Hence, they do not have an +// `offset`, which indicate their locations in the scratch buffer. +struct ggml_tensor_extra_cl_q4_0 { + // Quantized values. + cl_mem q = nullptr; + // Quantized values in image1d_buffer_t. + cl_mem q_img = nullptr; + // Scales. + cl_mem d = nullptr; + // Scales in image1d_buffer_t. + cl_mem d_img = nullptr; + // Size of quantized values. + size_t size_q = 0; + // Size of scales. + size_t size_d = 0; + + ~ggml_tensor_extra_cl_q4_0() { + reset(); + } + + void reset() { + // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer. + // They must be properly released so that the original buffer can be + // properly released to avoid memory leak. + if (q != nullptr) { + CL_CHECK(clReleaseMemObject(q)); + q = nullptr; + } + if (d != nullptr) { + CL_CHECK(clReleaseMemObject(d)); + d = nullptr; + } + // Currently, q_img and d_img are only initialized when SMALL_ALLOC is + // enabled. They point to the images in ggml_backend_opencl_buffer_context. + // So, there is no need to release them here. + // TODO: initialize them for non SMALL_PATH path, or remove them. + q_img = nullptr; + d_img = nullptr; + size_q = 0; + size_d = 0; + } +}; + +//------------------------------------------------------------------------------ +// Backend API +//------------------------------------------------------------------------------ + +// +// backend +// +static const char * ggml_backend_opencl_name(ggml_backend_t backend) { + return "OpenCL"; + + UNUSED(backend); +} + +static void ggml_backend_opencl_free(ggml_backend_t backend) { + ggml_cl2_free(); + + GGML_UNUSED(backend); +} + +static void ggml_backend_opencl_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + GGML_UNUSED(backend); + GGML_UNUSED(tensor); + GGML_UNUSED(data); + GGML_UNUSED(offset); + GGML_UNUSED(size); +} + +static void ggml_backend_opencl_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + GGML_UNUSED(backend); + GGML_UNUSED(tensor); + GGML_UNUSED(data); + GGML_UNUSED(offset); + GGML_UNUSED(size); +} + +static bool ggml_backend_opencl_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) { + GGML_UNUSED(backend); + GGML_UNUSED(src); + GGML_UNUSED(dst); + return false; +} + +static void ggml_backend_opencl_synchronize(ggml_backend_t backend) { + GGML_UNUSED(backend); +} + +static ggml_status ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { + for (int i = 0; i < cgraph->n_nodes; i++) { + ggml_tensor * node = cgraph->nodes[i]; + + if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { + continue; + } + + bool ok = ggml_cl_compute_forward(backend, node); + if (!ok) { + GGML_LOG_ERROR("%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); + } + GGML_ASSERT(ok); + } + + return GGML_STATUS_SUCCESS; +} + +static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { + GGML_UNUSED(dev); + + switch (op->op) { + case GGML_OP_NONE: + return true; + case GGML_OP_GET_ROWS: + switch (op->src[0]->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + return true; + case GGML_TYPE_Q4_0: +#ifdef GGML_OPENCL_SOA_Q + // We do not support flattened Q4_0 (and possibly other Q's) + return false; +#else // GGML_OPENCL_SOA_Q + return true; +#endif // GGML_OPENCL_SOA_Q + default: + return false; + } + case GGML_OP_CPY: + case GGML_OP_DUP: + case GGML_OP_CONT: + switch (op->src[0]->type) { + case GGML_TYPE_F32: + switch (op->type) { + case GGML_TYPE_F16: + case GGML_TYPE_F32: + return true; + default: + return false; + } + case GGML_TYPE_F16: + switch (op->type) { + case GGML_TYPE_F16: + case GGML_TYPE_F32: + return true; + default: + return false; + } + default: + return false; + } + case GGML_OP_ADD: + case GGML_OP_SCALE: + case GGML_OP_MUL: + return true; + case GGML_OP_UNARY: + switch (ggml_get_unary_op(op)) { + case GGML_UNARY_OP_GELU: + case GGML_UNARY_OP_SILU: + case GGML_UNARY_OP_RELU: + return ggml_is_contiguous(op->src[0]); + default: + return false; + } + case GGML_OP_CLAMP: + case GGML_OP_SOFT_MAX: + case GGML_OP_NORM: + case GGML_OP_RMS_NORM: + return true; + case GGML_OP_MUL_MAT: + if (op->src[0]->type == GGML_TYPE_F16) { + return true; + } else if (op->src[0]->type == GGML_TYPE_F32) { + return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); + } else if (op->src[0]->type == GGML_TYPE_Q4_0 || + op->src[0]->type == GGML_TYPE_Q6_K) { + return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]); + } + return false; + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + return true; + case GGML_OP_DIAG_MASK_INF: + return op->ne[3] == 1; + case GGML_OP_ROPE: + return true; + default: + return false; + } +} + +// Forward declaration - implementation appears later in the file. +static const char * ggml_backend_opencl_buffer_type_get_name(ggml_backend_buffer_type_t buffer_type); + +static ggml_guid_t ggml_backend_opencl_guid() { + static ggml_guid guid = { 0xde, 0xe0, 0x70, 0xa2, 0x73, 0x4e, 0x4d, 0xbc, 0xb0, 0xc7, 0x4f, 0xd4, 0x6d, 0x4e, 0x90, 0xfe }; + return &guid; +} + +static ggml_backend_i ggml_backend_opencl_i = { + /* .get_name = */ ggml_backend_opencl_name, + /* .free = */ ggml_backend_opencl_free, + /* .set_tensor_async = */ NULL, /* ggml_backend_opencl_set_tensor_async */ + /* .get_tensor_async = */ NULL, /* ggml_backend_opencl_get_tensor_async */ + /* .cpy_tensor_async = */ NULL, /* ggml_backend_opencl_cpy_tensor_async */ + /* .synchronize = */ NULL, /* ggml_backend_opencl_synchronize */ + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_update = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_opencl_graph_compute, + /* .event_record = */ NULL, + /* .event_wait = */ NULL, +}; + +ggml_backend_t ggml_backend_opencl_init(void) { + ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_opencl_reg(), 0); + ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(dev); + + ggml_backend_t backend = new ggml_backend { + /* .guid = */ ggml_backend_opencl_guid(), + /* .interface = */ ggml_backend_opencl_i, + /* .device = */ dev, + /* .context = */ backend_ctx + }; + + return backend; +} + +bool ggml_backend_is_opencl(ggml_backend_t backend) { + return backend && backend->iface.get_name == ggml_backend_opencl_name; +} + +// +// buffer +// +struct ggml_backend_opencl_buffer_context { + // A buffer context can hold multiple cl_mem objects. This is for flattening + // quantized weights and should be used with GGML_OPENCL_SMALL_ALLOC where + // each tensor is allocated a separate buffer. When flattening is enabled + // with small allocation, each tensor is backed by two cl_mem objects (for + // quants and scales) packed into a backend_opencl_buffer. + ggml_backend_opencl_buffer_context(cl_mem buf) + : name("OpenCL") { + buffer.push_back(buf); + } + + ~ggml_backend_opencl_buffer_context() { + for (cl_mem buf : buffer) { + CL_CHECK(clReleaseMemObject(buf)); + } + for (cl_mem im : img) { + CL_CHECK(clReleaseMemObject(im)); + } + + // Delete all extras to trigger their destructors + for (ggml_tensor_extra_cl * e : temp_tensor_extras) { + delete e; + } + for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) { + delete e; + } + for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0) { + delete e; + } + for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0_in_use) { + delete e; + } + } + + ggml_tensor_extra_cl * ggml_opencl_alloc_temp_tensor_extra() { + ggml_tensor_extra_cl * extra; + if (temp_tensor_extras.empty()) { + extra = new ggml_tensor_extra_cl(); + } else { + extra = temp_tensor_extras.back(); + temp_tensor_extras.pop_back(); + } + + temp_tensor_extras_in_use.push_back(extra); + + extra->reset(); + return extra; + } + + ggml_tensor_extra_cl_q4_0 * ggml_opencl_alloc_temp_tensor_extra_q4_0() { + ggml_tensor_extra_cl_q4_0 * extra; + if (temp_tensor_extras_q4_0.empty()) { + extra = new ggml_tensor_extra_cl_q4_0(); + } else { + extra = temp_tensor_extras_q4_0.back(); + temp_tensor_extras_q4_0.pop_back(); + } + + temp_tensor_extras_q4_0_in_use.push_back(extra); + + extra->reset(); + return extra; + } + + void reset() { + for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) { + temp_tensor_extras.push_back(e); + } + temp_tensor_extras_in_use.clear(); + + for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0_in_use) { + temp_tensor_extras_q4_0.push_back(e); + } + temp_tensor_extras_q4_0_in_use.clear(); + } + + // Pools for extras. Available extras are in `temp_tensor_extras`. Extras + // being used are in `temp_tensor_extras_in_use`. At the first run, new + // extras get created and put in `in_use`. When the buffer is reset via + // the `reset` callback, all extras in `in_use` get moved to available extras + // for reuse. + std::vector temp_tensor_extras; + std::vector temp_tensor_extras_in_use; + std::vector temp_tensor_extras_q4_0; + std::vector temp_tensor_extras_q4_0_in_use; + + // The buffer_context is initially created by ggml_backend_buft_alloc_buffer + // before any tensor is initialized (at the beginning of alloc_tensor_range). + // Hence, there is alway a buffer object in this vector. When each tensor is + // being initialized, this original buffer object will be released if both + // flattening and small allocation are enabled, and additional buffer + // objects will be created in init_tensor to represent flattened quantized + // weights. + std::vector buffer; + // These are image1d_buffer_t objects that wrap around the quants and scales. + // For Q4_0 quantization, there should be two of them - one for quants and + // one for scales. They should be populated only when flattening and small + // allocation are enabled. + std::vector img; + std::string name; +}; + +static void * const cl_ptr_base = (void *)(uintptr_t) 0x1000; + +static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + delete ctx; +} + +static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) { + return cl_ptr_base; + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + + ggml_cl2_init(buffer->buft->device); + + if (tensor->view_src != nullptr) { + GGML_ASSERT(tensor->view_src->buffer->buft == buffer->buft); + + ggml_tensor_extra_cl * view_extra = (ggml_tensor_extra_cl *) tensor->view_src->extra; + GGML_ASSERT(view_extra && "view_extra is nullptr?"); + + // Reuse extra of the parent tensor. The offset of this view tensor + // becomes `extra->offset + view_offs` and needs to be calculated when + // it is used. This changes is needed because of the change to + // ggml_alloc.c in https://github.com/ggerganov/llama.cpp/pull/7640. + // `buffer` passed in here will always be `tensor->buffer`. It is OK + // to allocate extras from the same buffer context for ordinary + // intermediate tensors. But for views into kv cache tensors, doing so + // would mess up the extras used by kv cache. + // Before #7640, `buffer` is for intermediate tensors, which is always + // different from that of kv cache tensors. + // + // NB: now extra->offset no longer accounts for view_offs. + // NB: this should not apply to weight tensors (for end-to-end runs, but + // may apply for test-backend-ops). + // FIXME: if any unexpected results are seen, double check the offset - + // there could be other places that need fix. + tensor->extra = view_extra; + } else { + { + size_t offset = (char *)tensor->data - (char *)cl_ptr_base; + + ggml_tensor_extra_cl * extra = ctx->ggml_opencl_alloc_temp_tensor_extra(); + extra->offset = offset; + extra->data_device = ctx->buffer[0]; + extra->actual_size = ggml_nbytes(tensor); + + tensor->extra = extra; + } + } +} + +// The optimized gemm and gemv kernels are used for large matrices without batch. +// tensor is the quantized weights matrix. +inline bool use_adreno_kernels(const ggml_tensor *tensor) { + return tensor->ne[0] >= 512 && tensor->ne[1] >= 512 && + tensor->ne[2] == 1 && tensor->ne[3] == 1; +} + +static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer->buft->device); + + cl_context context = backend_ctx->context; + cl_command_queue queue = backend_ctx->queue; + +#ifdef GGML_OPENCL_SOA_Q + // We separate the quantized bits and scale from block_q4_0 by using an + // additional kernel, where each thread handles a block. We first read the + // original weights into a temporary buffer, then create two separate + // buffers for quantized bits and scales, which are then populated by the + // conversion kernel. + if (tensor->type == GGML_TYPE_Q4_0) { + // Tensors should have been preallocated, therefore they should + // already have ggml_tensor_extra_cl as extra. + ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra; + GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized"); + + // Allocate the new extra and create aliases from the original. + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + ggml_tensor_extra_cl_q4_0 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_q4_0(); + + size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t); + size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2; + GGML_ASSERT(size_d + size_q == ggml_nbytes(tensor) && "Incorrect tensor size"); + + cl_int err; + cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, + ggml_nbytes(tensor), NULL, &err); + CL_CHECK(err); + CL_CHECK(clEnqueueWriteBuffer( + queue, data_device, CL_TRUE, 0, + ggml_nbytes(tensor), data, 0, NULL, NULL)); + + // We consider the specified offset arg as always, although For weights + // the offset arg should be 0 (we do not assert this). + //GGML_ASSERT(offset == 0); + + // We create subbuffers from the original tensor buffer for scales and + // quants - i.e., scales and quants are aliases into the buffer obejct + // that backs the original tensor. This is a cleaner way to adapt to the + // new memory management. + // In the old code, we allocate new buffers for scales and quants + // respectively, which could still be done but would result in double + // allocation; properly deallocating the preallocated buffer that backs + // the tensors is tricky and would leak the backend specific information + // into the general backend code. + // Does this create misaligned subbuffers (alignment is 1024) in certain + // cases ? + cl_buffer_region region; + + // The original tensor memory is divided into scales and quants, i.e., + // we first store scales, then quants. + // Create subbuffer for scales. + region.origin = extra_orig->offset + tensor->view_offs + offset; + region.size = size_d; + extra->d = clCreateSubBuffer( + extra_orig->data_device, CL_MEM_READ_WRITE, + CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); + CL_CHECK(err); + + // Create subbuffer for quants. + region.origin = extra_orig->offset + tensor->view_offs + offset + size_d; + region.size = size_q; + extra->q = clCreateSubBuffer( + extra_orig->data_device, CL_MEM_READ_WRITE, + CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); + CL_CHECK(err); + + //cl_kernel kernel = backend_ctx->kernel_convert_block_q4_0; + #ifdef GGML_OPENCL_USE_ADRENO_KERNELS + cl_kernel kernel = backend_ctx->kernel_convert_block_q4_0; + + // The optimized kernels need weights in natural order, so unshuffle. + if (use_adreno_kernels(tensor)) { + kernel = backend_ctx->kernel_convert_block_q4_0_noshuffle; + } + #else + cl_kernel kernel = backend_ctx->kernel_convert_block_q4_0; + #endif // GGML_OPENCL_USE_ADRENO_KERNELS + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d)); + + size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + CL_CHECK(clReleaseMemObject(data_device)); + + tensor->extra = extra; + + // transpose the weights and scales + #ifdef GGML_OPENCL_USE_ADRENO_KERNELS + // Only do transpose for large, non batched matrix + // TODO: use preallocated images instead of sub-buffer then image + if (use_adreno_kernels(tensor)) { + // <----------------------------------------------------------------------------------> // + // start transpose + // <----------------------------------------------------------------------------------> // + int M = tensor->ne[1]; // ne01 + int K = tensor->ne[0]; // ne00 + + // transpose is out of place, so we need to allocate transposed buffers + // <----------------------------------------------------------------------------------> // + // use sub_buffer of max buffer size instead + + size_t q_size_bytes = K * M / 8 * sizeof(float); + cl_buffer_region region; + region.origin = 0; + region.size = q_size_bytes; + cl_mem qT_d = clCreateSubBuffer( + backend_ctx->A_q_d_max, + 0, + CL_BUFFER_CREATE_TYPE_REGION, + ®ion, + &err); + // cl_mem qT_d = clCreateBuffer(context, CL_MEM_READ_WRITE, q_size_bytes, NULL, &err); + CL_CHECK(err); + + // size_t d_size_bytes = M * (K / 32) / 2 * sizeof(float); + size_t d_size_bytes = M * (K / 32) * 2; + region.origin = 0; + region.size = d_size_bytes; + cl_mem dT_d = clCreateSubBuffer( + backend_ctx->A_s_d_max, + 0, + CL_BUFFER_CREATE_TYPE_REGION, + ®ion, + &err); + // cl_mem dT_d = clCreateBuffer(context, CL_MEM_READ_WRITE, d_size_bytes, NULL, &err); + CL_CHECK(err); + + // <----------------------------------------------------------------------------------> // + + + // create images from the buffers + // <----------------------------------------------------------------------------------> // + cl_mem q_d_image1D; + cl_mem d_d_image1D; + cl_mem qT_d_image1D; + cl_mem dT_d_image1D; + + cl_image_format img_fmt_1d = { CL_RGBA, CL_FLOAT }; + cl_image_desc img_desc_1d; + + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.image_width = M * K / 8 / 4; + img_desc_1d.buffer = extra->q; + q_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); + CL_CHECK(err); + + img_fmt_1d = { CL_RGBA, CL_FLOAT }; + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.image_width = M * K / 8 / 4; + img_desc_1d.buffer = qT_d; + qT_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); + CL_CHECK(err); + + img_fmt_1d = { CL_RGBA, CL_FLOAT }; + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.image_width = M * K / 32 / 4 / 2; + img_desc_1d.buffer = extra->d; + d_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); + CL_CHECK(err); + + img_fmt_1d = { CL_RGBA, CL_FLOAT }; + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.image_width = M * K / 32 / 4 / 2; + img_desc_1d.buffer = dT_d; + dT_d_image1D = clCreateImage(context, 0, &img_fmt_1d, &img_desc_1d, NULL, &err); + CL_CHECK(err); + // <----------------------------------------------------------------------------------> // + + // set up and call the transpose kernels + // <----------------------------------------------------------------------------------> // + // weights + int height_q = M / 8; + int width_q = K / 8 / 4; + kernel = backend_ctx->kernel_transpose_16; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q_d_image1D)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &qT_d_image1D)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_q)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_q)); + + size_t local_size_q[3] = {4, 16, 1}; + size_t global_size_q[3] = {static_cast(width_q), static_cast(height_q), 1}; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_size_q, local_size_q, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + + // scales + int height_s = M / 8; + int width_s = K / 32 / 8; + + kernel = backend_ctx->kernel_transpose_16; + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &d_d_image1D)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &dT_d_image1D)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_s)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_s)); + + size_t local_size_s[3] = {4, 16, 1}; + size_t global_size_s[3] = {static_cast(width_s), static_cast(height_s), 1}; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_size_s, local_size_s, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + // <----------------------------------------------------------------------------------> // + + // copy transposed buffer contents to original buffers + // <----------------------------------------------------------------------------------> // + // weights + CL_CHECK(clEnqueueCopyBuffer(queue, qT_d, extra->q, 0, 0, q_size_bytes, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + + // scales + CL_CHECK(clEnqueueCopyBuffer(queue, dT_d, extra->d, 0, 0, d_size_bytes, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + // <----------------------------------------------------------------------------------> // + + // deallocate transpose buffers + // <----------------------------------------------------------------------------------> // + CL_CHECK(clReleaseMemObject(qT_d)); + CL_CHECK(clReleaseMemObject(dT_d)); + + // deallocate temporary images + CL_CHECK(clReleaseMemObject(q_d_image1D)); + CL_CHECK(clReleaseMemObject(d_d_image1D)); + CL_CHECK(clReleaseMemObject(qT_d_image1D)); + CL_CHECK(clReleaseMemObject(dT_d_image1D)); + // <----------------------------------------------------------------------------------> // + // end transpose + // <----------------------------------------------------------------------------------> // + } + #endif // GGML_OPENCL_USE_ADRENO_KERNELS + + return; + } +#endif // GGML_OPENCL_SOA_Q + + ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; + GGML_ASSERT(extra); + + CL_CHECK(clEnqueueWriteBuffer( + queue, extra->data_device, CL_TRUE, extra->offset + offset, + size, data, 0, NULL, NULL)); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + GGML_ASSERT(tensor->extra); + + ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer->buft->device); + + cl_context context = backend_ctx->context; + cl_command_queue queue = backend_ctx->queue; + + // Make sure all previously submitted commands are finished. + CL_CHECK(clFinish(queue)); + +#ifdef GGML_OPENCL_SOA_Q + // In end-to-end runs, get_tensor is usually used to get back the logits, + // where we can simply do clEnqueueReadBuffer since they are f32. + // However, in test-backend-ops, the GPU graph is copied to the CPU backend, + // which requires reading back quantized weight tensors. + // To properly support this, we need to restore block_q4_0 struct arrays + // from the flattened buffers. + if (tensor->type == GGML_TYPE_Q4_0) { + ggml_tensor_extra_cl_q4_0 * extra = (ggml_tensor_extra_cl_q4_0 *)tensor->extra; + + cl_int err; + cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE, + ggml_nbytes(tensor), NULL, &err); + CL_CHECK(err); + + cl_kernel kernel = backend_ctx->kernel_restore_block_q4_0; + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->d)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device)); + + size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1}; + size_t local_work_size[] = {1, 1, 1}; + + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, + global_work_size, local_work_size, 0, NULL, &evt)); + CL_CHECK(clWaitForEvents(1, &evt)); + CL_CHECK(clEnqueueReadBuffer( + queue, data_device, CL_TRUE, offset, + size, data, 0, NULL, NULL)); + CL_CHECK(clReleaseMemObject(data_device)); + return; + } +#endif // GGML_OPENCL_SOA_Q + + ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; + + CL_CHECK(clEnqueueReadBuffer( + queue, extra->data_device, CL_TRUE, extra->offset + tensor->view_offs + offset, + size, data, 0, NULL, NULL)); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + ggml_backend_dev_t dev = buffer->buft->device; + ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(dev); + cl_command_queue queue = backend_ctx->queue; + + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + for (cl_mem buf : ctx->buffer) { + CL_CHECK(clEnqueueFillBuffer(queue, buf, &value, sizeof(value), 0, buffer->size, 0, NULL, NULL)); + } + CL_CHECK(clFinish(queue)); +} + +static void ggml_backend_opencl_buffer_reset(ggml_backend_buffer_t buffer) { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + ctx->reset(); +} + +static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = { + /* .free_buffer = */ ggml_backend_opencl_buffer_free_buffer, + /* .get_base = */ ggml_backend_opencl_buffer_get_base, + /* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor, + /* .memset_tensor = */ NULL, + /* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor, + /* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor, + /* .cpy_tensor = */ NULL, + /* .clear = */ ggml_backend_opencl_buffer_clear, + /* .reset = */ ggml_backend_opencl_buffer_reset, +}; + +// +// buffer type +// + +static const char * ggml_backend_opencl_buffer_type_get_name(ggml_backend_buffer_type_t buffer_type) { + return "OpenCL"; + + GGML_UNUSED(buffer_type); +} + +static ggml_backend_buffer_t ggml_backend_opencl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buffer_type, size_t size) { + ggml_backend_opencl_context *backend_ctx = ggml_cl2_init(buffer_type->device); + + // clCreateBuffer returns -61 for size 0 + size = std::max(size, (size_t)1); + + cl_int err; + cl_mem mem = clCreateBuffer(backend_ctx->context, CL_MEM_READ_WRITE, size, NULL, &err); + if (err != CL_SUCCESS) { + GGML_LOG_INFO("%s: failed to allocate %.2f MiB\n", __func__, size / 1024.0 / 1024.0); + return nullptr; + } + + ggml_backend_opencl_buffer_context * ctx = new ggml_backend_opencl_buffer_context(mem); + + return ggml_backend_buffer_init(buffer_type, ggml_backend_opencl_buffer_interface, ctx, size); +} + +static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) { + // FIXME: not thread safe, device may not be initialized yet + static cl_uint alignment = -1; + if (alignment == (cl_uint)-1) { + ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(buffer_type->device); + alignment = backend_ctx->alignment; + } + return alignment; +} + +static size_t ggml_backend_opencl_buffer_type_get_max_size(ggml_backend_buffer_type_t buffer_type) { + static size_t max_size = -1; + if (max_size == (size_t)-1) { + ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(buffer_type->device); + max_size = backend_ctx->max_alloc_size; + } + return max_size; +} + +static bool ggml_backend_opencl_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { + return ggml_backend_is_opencl(backend); + + UNUSED(buft); +} + +static ggml_backend_buffer_type_i ggml_backend_opencl_buffer_type_interface = { + /* .get_name = */ ggml_backend_opencl_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_opencl_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_opencl_buffer_type_get_alignment, + /* .get_max_size = */ ggml_backend_opencl_buffer_type_get_max_size, + /* .get_alloc_size = */ NULL, + /* .is_host = */ NULL, +}; + +ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type() { + static ggml_backend_buffer_type buffer_type = { + /* .iface = */ ggml_backend_opencl_buffer_type_interface, + /* .device = */ &g_ggml_backend_opencl_device, + /* .context = */ nullptr, + }; + + return &buffer_type; +} + +// +// backend device +// + +static const char * ggml_backend_opencl_device_get_name(ggml_backend_dev_t dev) { + return "GPUOpenCL"; + + GGML_UNUSED(dev); +} + +static const char * ggml_backend_opencl_device_get_description(ggml_backend_dev_t dev) { + ggml_backend_opencl_device_context *dev_ctx = (ggml_backend_opencl_device_context *) dev->context; + return dev_ctx->device_name.c_str(); +} + +static void ggml_backend_opencl_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { + *free = 1; + *total = 1; + + GGML_UNUSED(dev); +} + +static enum ggml_backend_dev_type ggml_backend_opencl_device_get_type(ggml_backend_dev_t dev) { + return GGML_BACKEND_DEVICE_TYPE_GPU; + + GGML_UNUSED(dev); +} + +static void ggml_backend_opencl_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { + props->name = ggml_backend_opencl_device_get_name(dev); + props->description = ggml_backend_opencl_device_get_description(dev); + props->type = ggml_backend_opencl_device_get_type(dev); + ggml_backend_opencl_device_get_memory(dev, &props->memory_free, &props->memory_total); + props->caps = ggml_backend_dev_caps { + /* .async = */ false, + /* .host_buffer = */ false, + /* .buffer_from_host_ptr = */ false, + /* .events = */ false, + }; +} + +static ggml_backend_t ggml_backend_opencl_device_init(ggml_backend_dev_t dev, const char * params) { + ggml_backend_opencl_context * backend_ctx = ggml_cl2_init(dev); + + ggml_backend_t backend = new ggml_backend { + /* .guid = */ ggml_backend_opencl_guid(), + /* .interface = */ ggml_backend_opencl_i, + /* .device = */ dev, + /* .context = */ backend_ctx, + }; + + return backend; + + GGML_UNUSED(params); +} + +static ggml_backend_buffer_type_t ggml_backend_opencl_device_get_buffer_type(ggml_backend_dev_t dev) { + return ggml_backend_opencl_buffer_type(); + + GGML_UNUSED(dev); +} + +static ggml_backend_buffer_t ggml_backend_opencl_device_buffer_from_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) { + GGML_UNUSED(dev); + GGML_UNUSED(ptr); + GGML_UNUSED(size); + GGML_UNUSED(max_tensor_size); + return nullptr; +} + +static bool ggml_backend_opencl_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { + return ggml_opencl_supports_op(dev, op); +} + +static bool ggml_backend_opencl_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) { + return buft->iface.get_name == ggml_backend_opencl_buffer_type_get_name; + + GGML_UNUSED(dev); +} + +static struct ggml_backend_device_i ggml_backend_opencl_device_i = { + /* .get_name = */ ggml_backend_opencl_device_get_name, + /* .get_description = */ ggml_backend_opencl_device_get_description, + /* .get_memory = */ ggml_backend_opencl_device_get_memory, + /* .get_type = */ ggml_backend_opencl_device_get_type, + /* .get_props = */ ggml_backend_opencl_device_get_props, + /* .init_backend = */ ggml_backend_opencl_device_init, + /* .get_buffer_type = */ ggml_backend_opencl_device_get_buffer_type, + /* .get_host_buffer_type = */ NULL, + /* .buffer_from_host_ptr = */ ggml_backend_opencl_device_buffer_from_ptr, + /* .supports_op = */ ggml_backend_opencl_device_supports_op, + /* .supports_buft = */ ggml_backend_opencl_device_supports_buft, + /* .offload_op = */ NULL, + /* .event_new = */ NULL, + /* .event_free = */ NULL, + /* .event_synchronize = */ NULL, +}; + +// Backend registry + +static const char * ggml_backend_opencl_reg_get_name(ggml_backend_reg_t reg) { + return "OpenCL"; + + GGML_UNUSED(reg); +} + +static size_t ggml_backend_opencl_reg_device_count(ggml_backend_reg_t reg) { + return ggml_backend_opencl_n_devices; + + GGML_UNUSED(reg); +} + +static ggml_backend_dev_t ggml_backend_opencl_reg_device_get(ggml_backend_reg_t reg, size_t index) { + GGML_ASSERT(index == 0); + + return &g_ggml_backend_opencl_device; + + GGML_UNUSED(reg); + GGML_UNUSED(index); +} + +static struct ggml_backend_reg_i ggml_backend_opencl_reg_i = { + /* .get_name = */ ggml_backend_opencl_reg_get_name, + /* .device_count = */ ggml_backend_opencl_reg_device_count, + /* .device_get = */ ggml_backend_opencl_reg_device_get, + /* .get_proc_address = */ NULL, +}; + +ggml_backend_reg_t ggml_backend_opencl_reg(void) { + // TODO: make this thread-safe somehow? + static ggml_backend_reg reg; + static bool initialized = false; + + if (!initialized) { + reg = ggml_backend_reg { + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_opencl_reg_i, + /* .context = */ NULL, + }; + + g_ggml_backend_opencl_device = ggml_backend_device { + /* .iface = */ ggml_backend_opencl_device_i, + /* .reg = */ ®, + /* .context = */ &g_ggml_ctx_dev_main, + }; + + ggml_cl2_init(&g_ggml_backend_opencl_device); + + initialized = true; + } + + return ® +} + +GGML_BACKEND_DL_IMPL(ggml_backend_opencl_reg) + +//------------------------------------------------------------------------------ +// Debugging utils +//------------------------------------------------------------------------------ +#if 0 +#define QK4_0 32 +typedef struct { + ggml_fp16_t d; // delta + uint8_t qs[QK4_0 / 2]; // nibbles / quants +} block_q4_0; +static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, + "wrong q4_0 block size/padding"); + +#include +#ifdef __cplusplus +#include "half.hpp" +#endif + +static void dump_tensor(ggml_backend_t backend, const struct ggml_tensor * tensor) { + void * buf = malloc(ggml_nbytes(tensor)); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; +#ifdef GGML_OPENCL_SOA_Q + void * buf_q; + void * buf_d; +#endif + +#ifdef GGML_USE_OPENCL + // Make sure everything is done. + CL_CHECK(clFinish(queue)); + +#ifdef GGML_OPENCL_SOA_Q + if (tensor->type == GGML_TYPE_Q4_0) { + ggml_tensor_extra_cl_q4_0 * extra = (ggml_tensor_extra_cl_q4_0 *) tensor->extra; + GGML_ASSERT(extra); + + size_t size_q = ggml_nelements(tensor)/QK4_0 * QK4_0/2; + size_t size_d = ggml_nelements(tensor)/QK4_0 * sizeof(ggml_fp16_t); + GGML_ASSERT(size_q + size_d == ggml_nbytes(tensor)); + buf_q = malloc(size_q); + buf_d = malloc(size_d); + + CL_CHECK(clEnqueueReadBuffer(queue, extra->q, CL_TRUE, 0, size_q, buf_q, 0, NULL, NULL)); + CL_CHECK(clEnqueueReadBuffer(queue, extra->d, CL_TRUE, 0, size_d, buf_d, 0, NULL, NULL)); + CL_CHECK(clFinish(queue)); + } else { + // Read out the tensor from GPU memory. + ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; + GGML_ASSERT(extra); + + CL_CHECK(clEnqueueReadBuffer(queue, extra->data_device, CL_TRUE, + extra->offset, ggml_nbytes(tensor), buf, 0, NULL, NULL)); + CL_CHECK(clFinish(queue)); + } +#else + // Read out the tensor from GPU memory. + ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra; + GGML_ASSERT(extra); + + CL_CHECK(clEnqueueReadBuffer(queue, extra->data_device, CL_TRUE, + extra->offset, ggml_nbytes(tensor), buf, 0, NULL, NULL)); + CL_CHECK(clFinish(queue)); +#endif // GGML_OPENCL_SOA_Q +#endif // GGML_USE_OPENCL + + // Open file and dump. + char fname[512]; + sprintf(fname, "./tensor-dumps/%s.txt", tensor->name); + FILE * f = fopen(fname, "w"); + if (!f) { + printf("Failed to open %s\n", fname); + return; + } + + if (tensor->type == GGML_TYPE_F32) { + float * data = (float *) buf; + for (int i = 0; i < ggml_nelements(tensor); ++i) { + if (isnan(data[i])) { + printf("NaN found: %s\n", tensor->name); + break; + } + fprintf(f, "%f\n", data[i]); + } + } else if (tensor->type == GGML_TYPE_I32) { + int * data = (int *) buf; + for (int i = 0; i < ggml_nelements(tensor); ++i) { + if (isnan(data[i])) { + printf("NaN found: %s\n", tensor->name); + break; + } + fprintf(f, "%d\n", data[i]); + } + } else if (tensor->type == GGML_TYPE_F16) { +#ifdef __cplusplus + half_float::half * data = (half_float::half *) buf; + for (int i = 0; i < ggml_nelements(tensor); ++i) { + if (std::isnan(data[i])) { + printf("NaN found: %s\n", tensor->name); + break; + } + fprintf(f, "%f\n", float(data[i])); + } +#endif + } else if (tensor->type == GGML_TYPE_Q4_0) { +#ifdef GGML_OPENCL_SOA_Q + ggml_fp16_t * data_d = (ggml_fp16_t *)buf_d; + unsigned char * data_q = (unsigned char *)buf_q; + + for (int i = 0; i < ggml_nelements(tensor)/QK4_0; ++i) { + fprintf(f, "%04x, ", data_d[i]); + for (int k = 0; k < QK4_0/2; ++k) { + fprintf(f, "%02x, ", data_q[k]); + } + fprintf(f, "\n"); + data_q += QK4_0/2; + } + free(buf_d); + free(buf_q); +#else + block_q4_0 * data = (block_q4_0 *) buf; + for (int i = 0; i < ggml_nelements(tensor)/QK4_0; ++i) { + fprintf(f, "%04x, ", data[i].d); + for (int k = 0; k < QK4_0/2; ++k) { + fprintf(f, "%02x, ", data[i].qs[k]); + } + fprintf(f, "\n"); + } +#endif // GGML_OPENCL_SOA_Q + } + free(buf); + fflush(f); + fclose(f); +} +#else +#define dump_tensor(tensor) +#endif + +//------------------------------------------------------------------------------ +// Profiling utility +//------------------------------------------------------------------------------ +#ifdef GGML_OPENCL_PROFILING +void populateProfilingInfo( + ProfilingInfo& info, cl_event evt, cl_kernel kernel, + size_t global_size[3], size_t local_size[3], + const ggml_tensor * tensor) { + cl_ulong start; + cl_ulong end; + CL_CHECK(clWaitForEvents(1, &evt)); + CL_CHECK(clGetEventProfilingInfo( + evt, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &start, NULL)); + CL_CHECK(clGetEventProfilingInfo( + evt, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &end, NULL)); + + char kernel_name[512]; + CL_CHECK(clGetKernelInfo(kernel, CL_KERNEL_FUNCTION_NAME, + sizeof(kernel_name), kernel_name, NULL)); + + info.duration_ns = end - start; + info.op_name = tensor->name; + info.kernel_name = kernel_name; + info.local_size[0] = local_size[0]; + info.local_size[1] = local_size[1]; + info.local_size[2] = local_size[2]; + info.global_size[0] = global_size[0]; + info.global_size[1] = global_size[1]; + info.global_size[2] = global_size[2]; + info.output_size[0] = tensor->ne[0]; + info.output_size[1] = tensor->ne[1]; + info.output_size[2] = tensor->ne[2]; + info.output_size[3] = tensor->ne[3]; +} +#endif + +//------------------------------------------------------------------------------ +// Ops +//------------------------------------------------------------------------------ + +static bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { + const int64_t ne10 = src1->ne[0]; + + const int64_t ne0 = dst->ne[0]; + const int64_t ne1 = dst->ne[1]; + + // TODO: find the optimal values for these + return (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && + src1->type == GGML_TYPE_F32 && + dst->type == GGML_TYPE_F32 && + (ne0 >= 32 && ne1 >= 32 && ne10 >= 32); +} + +static void ggml_cl_nop(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + UNUSED(backend); + UNUSED(src0); + UNUSED(src1); + UNUSED(dst); +} + +static void ggml_cl_get_rows(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + const int ne00 = src0 ? src0->ne[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const int ne10 = src1 ? src1->ne[0] : 0; + const cl_ulong nb10 = src1 ? src1->nb[0] : 0; + const int ne11 = src1 ? src1->ne[1] : 0; + const cl_ulong nb11 = src1 ? src1->nb[1] : 0; + const cl_ulong nb1 = dst ? dst->nb[1] : 0; + const cl_ulong nb2 = dst ? dst->nb[2] : 0; + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offset1 = extra1->offset + src1->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_kernel kernel; + + switch (src0->type) { + case GGML_TYPE_F32: + kernel = backend_ctx->kernel_get_rows_f32; + break; + case GGML_TYPE_F16: + kernel = backend_ctx->kernel_get_rows_f16; + break; + case GGML_TYPE_Q4_0: + kernel = backend_ctx->kernel_get_rows_q4_0; + break; + default: + GGML_ASSERT(false && "not implemented"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb10)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb1)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb2)); + + size_t global_work_size[] = {(size_t)ne10, (size_t)ne11, 1}; + size_t local_work_size[] = {1, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_add(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + + const cl_ulong nb00 = src0 ? src0->nb[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const cl_ulong nb03 = src0 ? src0->nb[3] : 0; + + const int ne10 = src1 ? src1->ne[0] : 0; + const int ne11 = src1 ? src1->ne[1] : 0; + const int ne12 = src1 ? src1->ne[2] : 0; + const int ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13); + + const cl_ulong nb10 = src1 ? src1->nb[0] : 0; + const cl_ulong nb11 = src1 ? src1->nb[1] : 0; + const cl_ulong nb12 = src1 ? src1->nb[2] : 0; + const cl_ulong nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13); + + const int ne0 = dst ? dst->ne[0] : 0; + const int ne1 = dst ? dst->ne[1] : 0; + const int ne2 = dst ? dst->ne[2] : 0; + const int ne3 = dst ? dst->ne[3] : 0; + + const cl_ulong nb0 = dst ? dst->nb[0] : 0; + const cl_ulong nb1 = dst ? dst->nb[1] : 0; + const cl_ulong nb2 = dst ? dst->nb[2] : 0; + const cl_ulong nb3 = dst ? dst->nb[3] : 0; + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offset1 = extra1->offset + src1->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + bool bcast_row = false; + cl_kernel kernel; + + if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { + GGML_ASSERT(ggml_is_contiguous(src0)); + + // src1 is a row + GGML_ASSERT(ne11 == 1); + + bcast_row = true; + int ne = ne00 / 4; + kernel = backend_ctx->kernel_add_row; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); + } else { + kernel = backend_ctx->kernel_add; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne03)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne13)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb10)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb13)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &ne2)); + CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &ne3)); + CL_CHECK(clSetKernelArg(kernel, 26, sizeof(cl_ulong), &nb0)); + CL_CHECK(clSetKernelArg(kernel, 27, sizeof(cl_ulong), &nb1)); + CL_CHECK(clSetKernelArg(kernel, 28, sizeof(cl_ulong), &nb2)); + CL_CHECK(clSetKernelArg(kernel, 29, sizeof(cl_ulong), &nb3)); + } + + if (bcast_row) { + int n = ggml_nelements(dst)/4; + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } else { + unsigned int nth = MIN(64, ne0); + size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; + size_t local_work_size[] = {nth, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } +} + +static void ggml_cl_mul(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + + const cl_ulong nb00 = src0 ? src0->nb[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const cl_ulong nb03 = src0 ? src0->nb[3] : 0; + + const int ne10 = src1 ? src1->ne[0] : 0; + const int ne11 = src1 ? src1->ne[1] : 0; + const int ne12 = src1 ? src1->ne[2] : 0; + const int ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13); + + const cl_ulong nb10 = src1 ? src1->nb[0] : 0; + const cl_ulong nb11 = src1 ? src1->nb[1] : 0; + const cl_ulong nb12 = src1 ? src1->nb[2] : 0; + const cl_ulong nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13); + + const int ne0 = dst ? dst->ne[0] : 0; + const int ne1 = dst ? dst->ne[1] : 0; + const int ne2 = dst ? dst->ne[2] : 0; + const int ne3 = dst ? dst->ne[3] : 0; + + const cl_ulong nb0 = dst ? dst->nb[0] : 0; + const cl_ulong nb1 = dst ? dst->nb[1] : 0; + const cl_ulong nb2 = dst ? dst->nb[2] : 0; + const cl_ulong nb3 = dst ? dst->nb[3] : 0; + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offset1 = extra1->offset + src1->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + bool bcast_row = false; + cl_kernel kernel; + + if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) { + GGML_ASSERT(ggml_is_contiguous(src0)); + + // src1 is a row + GGML_ASSERT(ne11 == 1); + + bcast_row = true; + int ne = ne00 / 4; + kernel = backend_ctx->kernel_mul_row; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne)); + } else { + kernel = backend_ctx->kernel_mul; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne03)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne13)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb10)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb13)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &ne2)); + CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &ne3)); + CL_CHECK(clSetKernelArg(kernel, 26, sizeof(cl_ulong), &nb0)); + CL_CHECK(clSetKernelArg(kernel, 27, sizeof(cl_ulong), &nb1)); + CL_CHECK(clSetKernelArg(kernel, 28, sizeof(cl_ulong), &nb2)); + CL_CHECK(clSetKernelArg(kernel, 29, sizeof(cl_ulong), &nb3)); + } + + if (bcast_row) { + int n = ggml_nelements(dst)/4; + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } else { + unsigned int nth = MIN(64, ne0); + size_t global_work_size[] = {ne01*nth, (size_t)ne02, (size_t)ne03}; + size_t local_work_size[] = {nth, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } +} + +static void ggml_cl_gelu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_kernel kernel; + + int n = ggml_nelements(dst); + + if (n % 4 == 0) { + kernel = backend_ctx->kernel_gelu_4; + n /= 4; + } else { + kernel = backend_ctx->kernel_gelu; + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL); +#endif +} + +static void ggml_cl_silu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_kernel kernel; + + int n = ggml_nelements(dst); + + if (n % 4 == 0) { + kernel = backend_ctx->kernel_silu_4; + n /= 4; + } else { + kernel = backend_ctx->kernel_silu; + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_relu(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_kernel kernel = backend_ctx->kernel_relu; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + + const int64_t n = ggml_nelements(dst); + + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_clamp(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + float min; + float max; + memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float)); + memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float)); + + cl_kernel kernel = backend_ctx->kernel_clamp; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(float), &min)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(float), &max)); + + const int64_t n = ggml_nelements(dst); + + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + const int ne00 = src0 ? src0->ne[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + + const int nth = MIN(64, ne00); + + cl_kernel kernel = backend_ctx->kernel_norm; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(float), &eps)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(float)*nth, NULL)); + + const int64_t nrows = ggml_nrows(src0); + + size_t global_work_size[] = {(size_t)nrows*nth, 1, 1}; + size_t local_work_size[] = {(size_t)nth, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_rms_norm(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_backend_opencl_device_context * dev_ctx = + (ggml_backend_opencl_device_context *)backend->device->context; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + const int ne00 = src0 ? src0->ne[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + + GGML_ASSERT(ne00 % 4 == 0); + GGML_ASSERT(ggml_is_contiguous_1(src0)); + + const int nth = MIN(64, ne00); + + const int64_t nrows = ggml_nrows(src0); + + size_t global_work_size[] = {(size_t)nrows*nth, 1, 1}; + size_t local_work_size[] = {(size_t)nth, 1, 1}; + + cl_kernel kernel = backend_ctx->kernel_rms_norm; + + // Note, this kernel declares local memory in kernel args and the size + // depends on subgroup size. + // Retrieve subgroup size. + // Note, this requires OpenCL 2.1 and above + size_t sgs; + CL_CHECK(clGetKernelSubGroupInfo(kernel, dev_ctx->device, + CL_KERNEL_MAX_SUB_GROUP_SIZE_FOR_NDRANGE, + sizeof(local_work_size), local_work_size, + sizeof(size_t), &sgs, NULL)); + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(float), &eps)); + // This is local memory - the size depends on subgroup size. + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(float)*nth/sgs, NULL)); + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT; + const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offset1 = extra1->offset + src1->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + +#ifdef GGML_OPENCL_SOA_Q + ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra; +#endif + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + + const cl_ulong nb00 = src0 ? src0->nb[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const cl_ulong nb03 = src0 ? src0->nb[3] : 0; + + const int ne10 = src1 ? src1->ne[0] : 0; + const int ne11 = src1 ? src1->ne[1] : 0; + const int ne12 = src1 ? src1->ne[2] : 0; + const int ne13 = src1 ? src1->ne[3] : 0; + + const cl_ulong nb10 = src1 ? src1->nb[0] : 0; + const cl_ulong nb11 = src1 ? src1->nb[1] : 0; + const cl_ulong nb12 = src1 ? src1->nb[2] : 0; + const cl_ulong nb13 = src1 ? src1->nb[3] : 0; + + const int ne0 = dst ? dst->ne[0] : 0; + const int ne1 = dst ? dst->ne[1] : 0; + + int r2 = ne12/ne02; + int r3 = ne13/ne03; + + GGML_ASSERT(ne00 == ne10); + + int nth0 = 32; + int nth1 = 1; + int nrows = 1; + // The number of values produced by each subgroup + int ndst = 4; + + cl_kernel kernel; + +#ifdef GGML_OPENCL_USE_ADRENO_KERNELS + cl_context context = backend_ctx->context; + + if (ne01 && ne1 && use_adreno_kernels(src0)) { + + // init CL objects + // <--------------------------------------------> // + cl_int status; + cl_image_format img_fmt_1d; + cl_image_desc img_desc_1d; + cl_buffer_region region; + cl_mem A_image1d; + cl_mem B_image1d; + cl_mem B_sub_buffer; + cl_mem C_d; + // for B transpose + cl_mem B_d; + cl_mem B_d_input_image; + // <--------------------------------------------> // + + // define matrix dimensions + // <--------------------------------------------> // + int M = ne01; + int N = ne1; + int K = ne00; + int padding; + // <--------------------------------------------> // + + // q4_0 x fp32 + if(src0t == GGML_TYPE_Q4_0 && src1t == GGML_TYPE_F32) { + // TODO: remove duplicate definitions of image description + format -- move to top + + // create an image for A + // <--------------------------------------------> // + if (N == 1) { + img_fmt_1d = { CL_R, CL_UNSIGNED_INT32}; + } else { + img_fmt_1d = { CL_R, CL_FLOAT}; + } + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.image_width = M * K / 2 / 4; // Divide by 4 for char -> float + img_desc_1d.buffer = extra0_q4_0->q; + A_image1d = clCreateImage( + context, + CL_MEM_READ_ONLY, + &img_fmt_1d, + &img_desc_1d, + NULL, + &status); + CL_CHECK(status); + // <--------------------------------------------> // + + + // create a sub_buffer for B + // <--------------------------------------------> // + region.origin = (extra1->offset); + region.size = K * N * sizeof(float); + B_sub_buffer = clCreateSubBuffer( + extra1->data_device, + 0, + CL_BUFFER_CREATE_TYPE_REGION, + ®ion, + &status); + CL_CHECK(status); + // <--------------------------------------------> // + + // transpose activation for Skyler's gemm + if (N != 1) { + //how many extra elements beyond multiple of 8 + int extra_elements = N % 8; + + //how much padding to add + padding = 0; + if (extra_elements > 0){ + padding = 8 - extra_elements; + } + + // Specify the starting offset (in bytes) + region.origin = 0; + // Specify the size of the sub-buffer (divide by 2 for FP16) + region.size = K * (N + padding) * sizeof(float)/2; + B_d = clCreateSubBuffer( + backend_ctx->B_d_max, + 0, + CL_BUFFER_CREATE_TYPE_REGION, + ®ion, + &status); + CL_CHECK(status); + + cl_image_format image_format_B_d_input = { CL_RGBA, CL_FLOAT }; + cl_image_desc image_desc_B_d_input = { + CL_MEM_OBJECT_IMAGE1D_BUFFER, + static_cast(K * N / 4), + 0, 0, 0, 0, 0, 0, 0, { B_sub_buffer } + }; + B_d_input_image = clCreateImage( + context, + 0, + &image_format_B_d_input, + &image_desc_B_d_input, + NULL, + &status); + CL_CHECK(status); + + cl_image_format image_format_B_d_output = { CL_RGBA, CL_HALF_FLOAT }; //(CL_HALF_FLOAT for FP16) + cl_image_desc image_desc_B_d_output = { + CL_MEM_OBJECT_IMAGE1D_BUFFER, + static_cast(K * (N + padding)/4), + 0, 0, 0, 0, 0, 0, 0, { B_d } + }; + B_image1d = clCreateImage( + context, + 0, + &image_format_B_d_output, + &image_desc_B_d_output, + NULL, + &status); + CL_CHECK(status); + + int height_B = N/4; + int width_B = K/4; + int padded_height_B = (N + padding)/4; + + kernel = backend_ctx->kernel_transpose_32_16; + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &B_d_input_image)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &B_image1d)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(int), &height_B)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(int), &width_B)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &padded_height_B)); + + size_t local_size_t[2] = { 1, 16 }; + //WGS tuning + if (ne0 == 4096 && ne1 == 128 && ne10 == 4096) { + local_size_t[0]=4; + local_size_t[1]=8; + } else if (ne0 == 11008 && ne1 == 128 && ne10 == 4096) { + local_size_t[0]=2; + local_size_t[1]=8; + } else if(ne0 == 4096 && ne1 == 128 && ne10 == 11008) { + local_size_t[0]=1; + local_size_t[1]=8; + } else if(ne0 == 32000 && ne1 == 128 && ne10 == 4096) { + local_size_t[0]=2; + local_size_t[1]=8; + } + + size_t global_size_t[2] = { + static_cast(width_B), + static_cast(padded_height_B) + }; + + #ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, global_size_t, local_size_t, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_size_t, local_size_t, dst); + #else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 2, NULL, global_size_t, local_size_t, 0, NULL, NULL)); + #endif + } else { + // no need to transpose B in other cases + // create an image for B from sub_buffer + // <--------------------------------------------> // + img_fmt_1d = {CL_RGBA, CL_FLOAT}; + + memset(&img_desc_1d, 0, sizeof(img_desc_1d)); + img_desc_1d.image_width = K * N / 4; + img_desc_1d.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER; + img_desc_1d.buffer = B_sub_buffer; + B_image1d = clCreateImage( + context, + CL_MEM_READ_ONLY, + &img_fmt_1d, + &img_desc_1d, + NULL, + &status); + CL_CHECK(status); + // <--------------------------------------------> // + } + + // choose gemm or gemv kernel + // <--------------------------------------------> // + if (N == 1) { + kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_general; + if (M == 4096 && K == 4096) { + kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_4096; + } else if (M == 4096 && K == 11008) { + kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_4096_1_11008; + } else if (M == 11008 && K == 4096) { + kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_11008_1_4096; + } else if (M == 32000 && K == 4096) { + kernel = backend_ctx->CL_mul_mat_vec_q4_0_f32_1d_4x_flat_32000_1_4096; + } + } else { + kernel = backend_ctx->CL_mul_mat_Ab_Bi_8x4; + } + // <--------------------------------------------> // + + // set kernel args + // <--------------------------------------------> // + cl_uint k_arg = 0; + + if (N == 1) { + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &A_image1d)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &extra0_q4_0->d)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &B_image1d)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_ulong), &extra1->offset)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(cl_ulong), &extrad->offset)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, k_arg++, sizeof(int), &r3)); + } else { + region.origin = extrad->offset; // Specify the starting offset (in bytes) + region.size = M * N * sizeof(float); // Specify the size of the sub-buffer + C_d = clCreateSubBuffer(extrad->data_device, CL_MEM_WRITE_ONLY, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &status); + CL_CHECK(status); + + int padded_N = ne1 + padding; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); //A_q_dextra0_q4_0->q + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); //A_s_d + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &B_image1d)); //B_d + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_mem), &C_d)); //C_d + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne01)); //M + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &padded_N)); //N with padding + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); //K + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne1)); //N without padding + } + // <--------------------------------------------> // + + // choose workgroup size + // <--------------------------------------------> // + size_t global_work_size[3] = { + 64, static_cast((M+63)/64), static_cast((N+31)/32)}; + size_t local_work_size[3] = {64, 2, 4}; + + global_work_size[0] = (size_t)(ceil((float)ne1/8)); + global_work_size[1] = (size_t)(ne01/4); + global_work_size[2] = (size_t)(1); + + local_work_size[0] = (size_t)(1); //4x32 for FP32 + local_work_size[1] = (size_t)(128); + local_work_size[2] = (size_t)(1); + + //WGS tuning + if (ne0 == 4096 && ne1 == 128 && ne10 == 4096) { + local_work_size[0] = 1; + local_work_size[1] = 128; + } else if (ne0 == 11008 && ne1 == 128 && ne10 == 4096) { + local_work_size[0] = 2; + local_work_size[1] = 64; + } else if (ne0 == 4096 && ne1 == 128 && ne10 == 11008) { + local_work_size[0] = 2; + local_work_size[1] = 64; + } else if (ne0 == 32000 && ne1 == 128 && ne10 == 4096) { + local_work_size[0] = 2; + local_work_size[1] = 64; + } + + if (N == 1) { + local_work_size[0] = backend_ctx->adreno_wave_size; // localsize + local_work_size[1] = 4; // reduce factor + local_work_size[2] = 1; + + global_work_size[0] = M / 2; + global_work_size[1] = 4; // reduce factor + global_work_size[2] = 1; + } + // <--------------------------------------------> // + + // enqueue kernel with profiling + // <--------------------------------------------> // + #ifdef GGML_OPENCL_PROFILING + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); + // enqueue kernel without profiling + #else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); + #endif + // <--------------------------------------------> // + + // deallocate sub buffers and images + // <--------------------------------------------> // + CL_CHECK(clReleaseMemObject(A_image1d)); + CL_CHECK(clReleaseMemObject(B_sub_buffer)); + CL_CHECK(clReleaseMemObject(B_image1d)); + + if (N != 1) { + CL_CHECK(clReleaseMemObject(B_d)); + CL_CHECK(clReleaseMemObject(B_d_input_image)); + CL_CHECK(clReleaseMemObject(C_d)); + } + // <--------------------------------------------> // + + return; + } + } // if (ne01 && ne1) +#endif // GGML_OPENCL_USE_ADRENO_KERNELS + + if (!ggml_is_transposed(src0) && + !ggml_is_transposed(src1) && + src1t == GGML_TYPE_F32 && + ne00%32 == 0 && + ne11 > 2) { +#ifdef GGML_OPENCL_SOA_Q + // Set up kernel. + switch(src0t) { + case GGML_TYPE_Q4_0: + // This should have been satisfied. + GGML_ASSERT(ne11 == ne1); + GGML_ASSERT(ne01 == ne0); + + if (backend_ctx->gpu_family == INTEL) { + nth0 = 16; + nth1 = 1; + + kernel = backend_ctx->kernel_mul_mat_q4_0_f32_1d_16x_flat; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 64; + nth1 = 1; + + kernel = backend_ctx->kernel_mul_mat_q4_0_f32_1d_8x_flat; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); + break; + default: + break; + } + + // Launch kernel. + if (src0t == GGML_TYPE_Q4_0) { + size_t global_work_size[] = {(size_t)(ne01 + 7)/8*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; + size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; + + if (backend_ctx->gpu_family == INTEL) { + // Set global size for Intel. It uses 16x output values. + global_work_size[0] = (size_t)(ne01 + 15)/16*nth0; + global_work_size[1] = (size_t)ne11*nth1; + global_work_size[2] = (size_t)ne12*ne13; + } + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + return; + } +#else // GGML_OPENCL_SOA_Q + // TODO: add block_q4_0 variant. +#endif // GGML_OPENCL_SOA_Q + } + + // use custom matrix x vector kernel + switch (src0t) { + case GGML_TYPE_F32: + //GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(src1t == GGML_TYPE_F32); + kernel = backend_ctx->kernel_mul_mat_f32_f32; + nrows = 4; + + if (backend_ctx->gpu_family == INTEL) { + nth0 = 32; + nth1 = 1; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 64; + nth1 = 1; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb10)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); + break; + case GGML_TYPE_F16: + //GGML_ASSERT(ne02 == ne12); + if (backend_ctx->gpu_family == INTEL) { + nth0 = 32; + nth1 = 1; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 64; + nth1 = 1; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + if (src1t == GGML_TYPE_F32) { + if (ne11 * ne12 < 4) { + kernel = backend_ctx->kernel_mul_mat_f16_f32_1row; + } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) { + kernel = backend_ctx->kernel_mul_mat_f16_f32_l4; + nrows = ne11; + } else { + kernel = backend_ctx->kernel_mul_mat_f16_f32; + nrows = 4; + } + } else { + kernel = backend_ctx->kernel_mul_mat_f16_f16; + nrows = 4; + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb10)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3)); + break; + case GGML_TYPE_Q4_0: + // This should have been satisfied. + GGML_ASSERT(ne11 == ne1); + GGML_ASSERT(ne01 == ne0); + +#ifdef GGML_OPENCL_SOA_Q + if (backend_ctx->gpu_family == INTEL) { + nth0 = 16; + nth1 = 1; + + kernel = backend_ctx->kernel_mul_mat_q4_0_f32_8x_flat; + ndst = 8; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 64; + nth1 = 1; + + kernel = backend_ctx->kernel_mul_mat_q4_0_f32_8x_flat; + ndst =8; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q4_0->q)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q4_0->d)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); +#else // GGML_OPENCL_SOA_Q + if (backend_ctx->gpu_family == INTEL) { + // Use 1D local size. Each workgroup is a SIMD group. Each SIMD + // group produces N_DST (4 for Q4_0 kernel) values in the result. + // The number of workgroups on dim 0 (the leading dimension) is + // the nearest multiple of 4 that covers ne0 (equals ne01). + nth0 = 16; + nth1 = 1; + + kernel = backend_ctx->kernel_mul_mat_q4_0_f32; + ndst = 4; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 64; + nth1 = 1; + + kernel = backend_ctx->kernel_mul_mat_q4_0_f32_v; + ndst = 4; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); +#endif // GGML_OPENCL_SOA_Q + break; + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + kernel = backend_ctx->kernel_mul_mv_q6_K_f32; + + if (backend_ctx->gpu_family == INTEL) { + nth0 = 2; + nth1 = 16; + } else if (backend_ctx->gpu_family == ADRENO) { + nth0 = 2; + nth1 = 64; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &r2)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3)); + break; + default: + GGML_ASSERT(false && "not implemented"); + } + + if (src0t == GGML_TYPE_Q4_0 || + src0t == GGML_TYPE_Q4_1 || + src0t == GGML_TYPE_Q8_0 || + src0t == GGML_TYPE_Q2_K) { + // Each SIMD group produces N_DST values in the result. Assuming each + // workgroup has N_SIMDGROUP SIMD groups, then each workgroup will + // produce N_DST*N_SIMDGROUP values in the result. Hence, the grid size + // (number of workgroups) will be a nearest multiple of + // N_DST*N_SIMDGROUP to cover the size of the dimension. Below, 4 is + // N_DST*N_SIMDGROUP (see the kernel for Q4_0 matmul). + size_t global_work_size[] = {(size_t)(ne01 + ndst-1)/ndst*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; + size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } else if (src0t == GGML_TYPE_Q4_K) { + GGML_ASSERT(false && "not implemented"); + } else if (src0t == GGML_TYPE_Q3_K) { + GGML_ASSERT(false && "not implemented"); + } else if (src0t == GGML_TYPE_Q5_K) { + GGML_ASSERT(false && "not implemented"); + } else if (src0t == GGML_TYPE_Q6_K) { + size_t global_work_size[] = {(size_t)(ne01+1)/2*nth0, (size_t)ne11*nth1, (size_t)ne12*ne13}; + size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } else { + int64_t ny = (ne11 + nrows - 1)/nrows; + + size_t global_work_size[] = {(size_t)ne01*nth0, (size_t)ny*nth1, (size_t)ne12*ne13}; + size_t local_work_size[] = {(size_t)nth0, (size_t)nth1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } +} + +static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + GGML_UNUSED(src1); + + GGML_ASSERT(ggml_is_contiguous(src0)); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + float scale; + memcpy(&scale, dst->op_params, sizeof(scale)); + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_kernel kernel = backend_ctx->kernel_scale; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(float), &scale)); + + int n = ggml_nelements(dst)/4; + + size_t global_work_size[] = {(size_t)n, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_cpy(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + + // GGML_OP_CPY happens between src0 and src1. + // GGML_OP_DUP and GGML_OP_CONT happen between src0 and dst. + UNUSED(dst); + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + + const cl_ulong nb00 = src0 ? src0->nb[0] : 0; + const cl_ulong nb01 = src0 ? src0->nb[1] : 0; + const cl_ulong nb02 = src0 ? src0->nb[2] : 0; + const cl_ulong nb03 = src0 ? src0->nb[3] : 0; + + const int ne10 = src1 ? src1->ne[0] : 0; + const int ne11 = src1 ? src1->ne[1] : 0; + const int ne12 = src1 ? src1->ne[2] : 0; + const int ne13 = src1 ? src1->ne[3] : 0; + + const cl_ulong nb10 = src1 ? src1->nb[0] : 0; + const cl_ulong nb11 = src1 ? src1->nb[1] : 0; + const cl_ulong nb12 = src1 ? src1->nb[2] : 0; + const cl_ulong nb13 = src1 ? src1->nb[3] : 0; + + const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT; + const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT; + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offset1 = extra1->offset + src1->view_offs; + + cl_kernel kernel; + + switch (src0t) { + case GGML_TYPE_F32: + switch (src1t) { + case GGML_TYPE_F16: + kernel = backend_ctx->kernel_cpy_f32_f16; + break; + case GGML_TYPE_F32: + kernel = backend_ctx->kernel_cpy_f32_f32; + break; + default: + GGML_ASSERT(false && "not implemented"); + } + break; + case GGML_TYPE_F16: + switch (src1t) { + case GGML_TYPE_F16: + kernel = backend_ctx->kernel_cpy_f16_f16; + break; + case GGML_TYPE_F32: + kernel = backend_ctx->kernel_cpy_f16_f32; + break; + default: + GGML_ASSERT(false && "not implemented"); + } + break; + default: + GGML_ASSERT(false && "not implemented"); + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne03)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne10)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne11)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne12)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne13)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb10)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(cl_ulong), &nb11)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb12)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb13)); + + const int nth = MIN(64, ne00); + + size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; + size_t local_work_size[] = {(size_t)nth, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, src1); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_dup(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + ggml_cl_cpy(backend, src0, dst, nullptr); + UNUSED(src1); +} + +static void ggml_cl_diag_mask_inf(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + UNUSED(src1); + + int n_past = ((int32_t *)(dst->op_params))[0]; + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_kernel kernel; + + if (ne00%8 == 0) { + kernel = backend_ctx->kernel_diag_mask_inf_8; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &n_past)); + + size_t global_work_size[] = {(size_t)ne00*ne01*ne02/8, 1, 1}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } else { + kernel = backend_ctx->kernel_diag_mask_inf; + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &n_past)); + + size_t global_work_size[] = {(size_t)ne00, (size_t)ne01, (size_t)ne02}; + size_t local_work_size[] = {64, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif + } +} + +static void ggml_cl_soft_max(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + // Softmax can now fuse KQ mask and KQ scale, which used to be two additional + // ops before softmax. It now also fuses alibi if `max_bias > 0`. For llama, + // alibi is not used; however, for some other models, it is used. + // KQ_mask + if (src1) { + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + } + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + ggml_tensor_extra_cl * extra1 = src1 ? (ggml_tensor_extra_cl *)src1->extra : nullptr; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + cl_ulong offset1 = extra1 ? extra1->offset + src1->view_offs : offset0; + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + + float scale, max_bias; + memcpy(&scale, dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, dst->op_params + 1, sizeof(float)); + + const int nrows_x = ggml_nrows(src0); + const int nrows_y = src0->ne[1]; + + const int n_head = nrows_x/nrows_y; + const int n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + // Local size must be wave size. Each workgroup is a wave, working on a row, + // where a row corresponds to leading dimension. + int nth = MIN(32, ne00); + + if (backend_ctx->gpu_family == INTEL) { + // This is the same as the initial value. + nth = MIN(32, ne00); + } + else if (backend_ctx->gpu_family == ADRENO) { + nth = 64; + } else { + GGML_ASSERT(false && "TODO: Unknown GPU"); + } + + cl_kernel kernel; + + if (ne00%4 == 0) { + kernel = backend_ctx->kernel_soft_max_4; + } else { + kernel = backend_ctx->kernel_soft_max; + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), extra1 ? &extra1->data_device : &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(float), &scale)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(float), &max_bias)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(float), &m0)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(float), &m1)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &n_head_log2)); + + size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; + size_t local_work_size[] = {(size_t)nth, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +static void ggml_cl_rope(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { + GGML_ASSERT(src0); + GGML_ASSERT(src0->extra); + GGML_ASSERT(src1); + GGML_ASSERT(src1->extra); + GGML_ASSERT(dst); + GGML_ASSERT(dst->extra); + + ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context; + cl_command_queue queue = backend_ctx->queue; + + ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra; + ggml_tensor_extra_cl * extra1 = (ggml_tensor_extra_cl *)src1->extra; + ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra; + + cl_ulong offset0 = extra0->offset + src0->view_offs; + cl_ulong offset1 = extra1->offset + src1->view_offs; + cl_ulong offsetd = extrad->offset + dst->view_offs; + + ggml_tensor * src2 = dst->src[2]; + ggml_tensor_extra_cl * extra2 = src2 ? (ggml_tensor_extra_cl *)src2->extra : nullptr; + + cl_ulong offset2 = extra2 ? extra2->offset + src2->view_offs : offset0; + + const int ne00 = src0 ? src0->ne[0] : 0; + const int ne01 = src0 ? src0->ne[1] : 0; + const int ne02 = src0 ? src0->ne[2] : 0; + const int ne03 = src0 ? src0->ne[3] : 0; + + const int nb00 = src0 ? src0->nb[0] : 0; + const int nb01 = src0 ? src0->nb[1] : 0; + const int nb02 = src0 ? src0->nb[2] : 0; + const int nb03 = src0 ? src0->nb[3] : 0; + + const int ne10 = src1 ? src1->ne[0] : 0; + const int ne11 = src1 ? src1->ne[1] : 0; UNUSED(ne11); + const int ne12 = src1 ? src1->ne[2] : 0; UNUSED(ne12); + const int ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13); + + const int ne0 = dst ? dst->ne[0] : 0; + const int ne1 = dst ? dst->ne[1] : 0; + const int ne2 = dst ? dst->ne[2] : 0; + const int ne3 = dst ? dst->ne[3] : 0; + + const int nb0 = dst ? dst->nb[0] : 0; + const int nb1 = dst ? dst->nb[1] : 0; + const int nb2 = dst ? dst->nb[2] : 0; + const int nb3 = dst ? dst->nb[3] : 0; + + GGML_ASSERT(ne10 == ne02); + + int nth = MIN(64, ne00); + + const int n_past = ((int *) dst->op_params)[0]; + const int n_dims = ((int *) dst->op_params)[1]; + const int mode = ((int *) dst->op_params)[2]; + const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + + float freq_base; + float freq_scale; + float ext_factor; + float attn_factor; + float beta_fast; + float beta_slow; + + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + + const bool is_neox = mode & 2; + + cl_kernel kernel; + + if (!is_neox) { + switch (src0->type) { + case GGML_TYPE_F32: + kernel = backend_ctx->kernel_rope_norm_f32; + break; + case GGML_TYPE_F16: + kernel = backend_ctx->kernel_rope_norm_f16; + break; + default: + GGML_ASSERT(false); + }; + } else { + switch (src0->type) { + case GGML_TYPE_F32: + kernel = backend_ctx->kernel_rope_neox_f32; + break; + case GGML_TYPE_F16: + kernel = backend_ctx->kernel_rope_neox_f16; + break; + default: + GGML_ASSERT(false); + }; + } + + CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0)); + CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device)); + CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1)); + CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), extra2 ? &extra2->data_device : &extra0->data_device)); + CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2)); + CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device)); + CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd)); + CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00)); + CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01)); + CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne02)); + CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne03)); + CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb00)); + CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb01)); + CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb02)); + CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb03)); + CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne0)); + CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne1)); + CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne2)); + CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne3)); + CL_CHECK(clSetKernelArg(kernel, 20, sizeof(cl_ulong), &nb0)); + CL_CHECK(clSetKernelArg(kernel, 21, sizeof(cl_ulong), &nb1)); + CL_CHECK(clSetKernelArg(kernel, 22, sizeof(cl_ulong), &nb2)); + CL_CHECK(clSetKernelArg(kernel, 23, sizeof(cl_ulong), &nb3)); + CL_CHECK(clSetKernelArg(kernel, 24, sizeof(int), &n_past)); + CL_CHECK(clSetKernelArg(kernel, 25, sizeof(int), &n_dims)); + CL_CHECK(clSetKernelArg(kernel, 26, sizeof(int), &n_ctx_orig)); + CL_CHECK(clSetKernelArg(kernel, 27, sizeof(float), &freq_base)); + CL_CHECK(clSetKernelArg(kernel, 28, sizeof(float), &freq_scale)); + CL_CHECK(clSetKernelArg(kernel, 29, sizeof(float), &ext_factor)); + CL_CHECK(clSetKernelArg(kernel, 30, sizeof(float), &attn_factor)); + CL_CHECK(clSetKernelArg(kernel, 31, sizeof(float), &beta_fast)); + CL_CHECK(clSetKernelArg(kernel, 32, sizeof(float), &beta_slow)); + + size_t global_work_size[] = {(size_t)ne01*nth, (size_t)ne02, (size_t)ne03}; + size_t local_work_size[] = {(size_t)nth, 1, 1}; + +#ifdef GGML_OPENCL_PROFILING + cl_event evt; + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt)); + + g_profiling_info.emplace_back(); + populateProfilingInfo(g_profiling_info.back(), evt, kernel, global_work_size, local_work_size, dst); +#else + CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, NULL)); +#endif +} + +//------------------------------------------------------------------------------ +// Op offloading +//------------------------------------------------------------------------------ + +typedef void (*ggml_cl_func_t)(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); + +bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor) { + ggml_cl_func_t func = nullptr; + + ggml_tensor * src0 = tensor->src[0]; + ggml_tensor * src1 = tensor->src[1]; + + const bool any_on_device = tensor->extra + || (src0 != nullptr && src0->extra) + || (src1 != nullptr && src1->extra); + + switch (tensor->op) { + case GGML_OP_GET_ROWS: + if (!any_on_device) { + return false; + } + func = ggml_cl_get_rows; + break; + case GGML_OP_CPY: + if (!any_on_device) { + return false; + } + func = ggml_cl_cpy; + break; + case GGML_OP_DUP: + case GGML_OP_CONT: + if (!any_on_device) { + return false; + } + func = ggml_cl_dup; + break; + case GGML_OP_ADD: + if (!any_on_device) { + return false; + } + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(src1)); + func = ggml_cl_add; + break; + case GGML_OP_MUL: + if (!any_on_device) { + return false; + } + func = ggml_cl_mul; + break; + case GGML_OP_UNARY: + switch (ggml_get_unary_op(tensor)) { + case GGML_UNARY_OP_GELU: + if (!any_on_device) { + return false; + } + func = ggml_cl_gelu; + break; + case GGML_UNARY_OP_SILU: + if (!any_on_device) { + return false; + } + func = ggml_cl_silu; + break; + case GGML_UNARY_OP_RELU: + if (!any_on_device) { + return false; + } + func = ggml_cl_relu; + break; + default: + return false; + } break; + case GGML_OP_CLAMP: + if (!any_on_device) { + return false; + } + func = ggml_cl_clamp; + break; + case GGML_OP_NORM: + if (!any_on_device) { + return false; + } + func = ggml_cl_norm; + break; + case GGML_OP_RMS_NORM: + if (!any_on_device) { + return false; + } + func = ggml_cl_rms_norm; + break; + case GGML_OP_MUL_MAT: + if (!any_on_device && !ggml_cl_can_mul_mat(tensor->src[0], tensor->src[1], tensor)) { + return false; + } + func = ggml_cl_mul_mat; + break; + case GGML_OP_SCALE: + if (!any_on_device) { + return false; + } + func = ggml_cl_scale; + break; + case GGML_OP_RESHAPE: + case GGML_OP_VIEW: + case GGML_OP_PERMUTE: + case GGML_OP_TRANSPOSE: + if (!any_on_device) { + return false; + } + func = ggml_cl_nop; + break; + case GGML_OP_DIAG_MASK_INF: + if (!any_on_device) { + return false; + } + func = ggml_cl_diag_mask_inf; + break; + case GGML_OP_SOFT_MAX: + if (!any_on_device) { + return false; + } + func = ggml_cl_soft_max; + break; + case GGML_OP_ROPE: + if (!any_on_device) { + return false; + } + func = ggml_cl_rope; + break; + default: + return false; + } + + func(backend, tensor->src[0], tensor->src[1], tensor); + return true; +} diff --git a/ggml/src/ggml-opencl/kernels/embed_kernel.py b/ggml/src/ggml-opencl/kernels/embed_kernel.py new file mode 100644 index 0000000000000..b5d1d7242b624 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/embed_kernel.py @@ -0,0 +1,26 @@ +# + +import sys +import logging +logger = logging.getLogger("opencl-embed-kernel") + + +def main(): + logging.basicConfig(level=logging.INFO) + + if len(sys.argv) != 3: + logger.info("Usage: python embed_kernel.py ") + sys.exit(1) + + ifile = open(sys.argv[1], "r") + ofile = open(sys.argv[2], "w") + + for i in ifile: + ofile.write('R"({})"\n'.format(i)) + + ifile.close() + ofile.close() + + +if __name__ == "__main__": + main() diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl.cl new file mode 100644 index 0000000000000..d1cdf709babc5 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl.cl @@ -0,0 +1,2683 @@ +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#elif defined(cl_amd_fp16) +#pragma OPENCL EXTENSION cl_amd_fp16 : enable +#else +#error "Half precision floating point not supportedby OpenCL implementation on your device." +#endif + +#ifdef cl_khr_subgroups +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#elif defined(cl_intel_subgroups) +#pragma OPENCL EXTENSION cl_intel_subgroups : enable +#else +#error "Subgroup not supported on your device." +#endif + +#ifdef cl_intel_required_subgroup_size +// Always use subgroup size of 32 on Intel. +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +// Always use subgroups size of 64 on Adreno. +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#else +// TODO: do not know how to choose subgroup size on other GPUs. +#error "Selecting subgroup size is not supported on your device." +#endif + +#define QK4_0 32 +#define QR4_0 2 +#define QK4_1 32 +#define QR4_1 2 +#define QK5_0 32 +#define QR5_0 2 +#define QK5_1 32 +#define QR5_1 2 +#define QK8_0 32 +#define QR8_0 1 +#define QK_K 256 +#define K_QUANTS_PER_ITERATION 2 + +typedef char int8_t; +typedef uchar uint8_t; +typedef short int16_t; +typedef ushort uint16_t; +typedef int int32_t; +typedef uint uint32_t; + +//------------------------------------------------------------------------------ +// block_q4_0 +//------------------------------------------------------------------------------ +struct block_q4_0 +{ + half d; + uint8_t qs[QK4_0 / 2]; +}; + +//------------------------------------------------------------------------------ +// block_q4_1 +//------------------------------------------------------------------------------ +struct block_q4_1 +{ + half d; + half m; + uint8_t qs[QK4_1 / 2]; +}; + +//------------------------------------------------------------------------------ +// block_q5_0 +//------------------------------------------------------------------------------ +struct block_q5_0 +{ + half d; + uint32_t qh; + uint8_t qs[QK5_0 / 2]; +}; + +//------------------------------------------------------------------------------ +// block_q5_1 +//------------------------------------------------------------------------------ +struct block_q5_1 +{ + half d; + half m; + uint32_t qh; + uint8_t qs[QK5_1 / 2]; +}; + +//------------------------------------------------------------------------------ +// block_q8_0 +//------------------------------------------------------------------------------ +struct block_q8_0 +{ + half d; + int8_t qs[QK8_0]; +}; + +//------------------------------------------------------------------------------ +// block_q2_K +//------------------------------------------------------------------------------ +struct block_q2_K +{ + uint8_t scales[16]; + uint8_t qs[64]; + half d; + half dmin; +}; + +//------------------------------------------------------------------------------ +// block_q3_K +//------------------------------------------------------------------------------ +struct block_q3_K +{ + uint8_t hmask[32]; + uint8_t qs[64]; + uint8_t scales[12]; + half d; +}; + +//------------------------------------------------------------------------------ +// block_q4_K +//------------------------------------------------------------------------------ +struct block_q4_K +{ + half d; + half dmin; + uint8_t scales[12]; + uint8_t qs[128]; +}; + +//------------------------------------------------------------------------------ +// block_q5_K +//------------------------------------------------------------------------------ +struct block_q5_K +{ + half d; + half dmin; + uint8_t scales[12]; + uint8_t qh[32]; + uint8_t qs[128]; +}; + +//------------------------------------------------------------------------------ +// block_q6_K +//------------------------------------------------------------------------------ +struct block_q6_K +{ + uint8_t ql[128]; + uint8_t qh[64]; + int8_t scales[16]; + half d; +}; + +//------------------------------------------------------------------------------ +// dequantize_q4_0_f32, dequantize_q4_0_f16 +//------------------------------------------------------------------------------ +void dequantize_q4_0_f32(global struct block_q4_0 * xb, short il, float16 * reg) { + global ushort * qs = ((global ushort *)xb + 1); + float d1 = il ? (xb->d / 16.h) : xb->d; + float d2 = d1 / 256.f; + float md = -8.h * xb->d; + ushort mask0 = il ? 0x00F0 : 0x000F; + ushort mask1 = mask0 << 8; + + reg->s0 = d1 * (qs[0] & mask0) + md; + reg->s1 = d2 * (qs[0] & mask1) + md; + + reg->s2 = d1 * (qs[1] & mask0) + md; + reg->s3 = d2 * (qs[1] & mask1) + md; + + reg->s4 = d1 * (qs[2] & mask0) + md; + reg->s5 = d2 * (qs[2] & mask1) + md; + + reg->s6 = d1 * (qs[3] & mask0) + md; + reg->s7 = d2 * (qs[3] & mask1) + md; + + reg->s8 = d1 * (qs[4] & mask0) + md; + reg->s9 = d2 * (qs[4] & mask1) + md; + + reg->sa = d1 * (qs[5] & mask0) + md; + reg->sb = d2 * (qs[5] & mask1) + md; + + reg->sc = d1 * (qs[6] & mask0) + md; + reg->sd = d2 * (qs[6] & mask1) + md; + + reg->se = d1 * (qs[7] & mask0) + md; + reg->sf = d2 * (qs[7] & mask1) + md; +} + +void dequantize_q4_0_f16(global struct block_q4_0 * xb, short il, half16 * reg) { + global ushort * qs = ((global ushort *)xb + 1); + half d1 = il ? (xb->d / 16.h) : xb->d; + half d2 = d1 / 256.h; + half md = -8.h * xb->d; + ushort mask0 = il ? 0x00F0 : 0x000F; + ushort mask1 = mask0 << 8; + + reg->s0 = d1 * (qs[0] & mask0) + md; + reg->s1 = d2 * (qs[0] & mask1) + md; + + reg->s2 = d1 * (qs[1] & mask0) + md; + reg->s3 = d2 * (qs[1] & mask1) + md; + + reg->s4 = d1 * (qs[2] & mask0) + md; + reg->s5 = d2 * (qs[2] & mask1) + md; + + reg->s6 = d1 * (qs[3] & mask0) + md; + reg->s7 = d2 * (qs[3] & mask1) + md; + + reg->s8 = d1 * (qs[4] & mask0) + md; + reg->s9 = d2 * (qs[4] & mask1) + md; + + reg->sa = d1 * (qs[5] & mask0) + md; + reg->sb = d2 * (qs[5] & mask1) + md; + + reg->sc = d1 * (qs[6] & mask0) + md; + reg->sd = d2 * (qs[6] & mask1) + md; + + reg->se = d1 * (qs[7] & mask0) + md; + reg->sf = d2 * (qs[7] & mask1) + md; +} + +//------------------------------------------------------------------------------ +// add +//------------------------------------------------------------------------------ + +// general-purpose kernel for addition of two tensors +// pros: works for non-contiguous tensors, supports broadcast across dims 1, 2 and 3 +// cons: not very efficient +kernel void kernel_add( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global char * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + int ne13, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3 +) { + src0 = src0 + offset0; + src1 = src1 + offset1; + dst = dst + offsetd; + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + int i13 = i03 % ne13; + int i12 = i02 % ne12; + int i11 = i01 % ne11; + + global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; + global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; + global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; + + for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { + const int i10 = i0 % ne10; + *((global float *)(dst_ptr + i0*nb0)) = *((global float *)(src0_ptr + i0*nb00)) + *((global float *)(src1_ptr + i10*nb10)); + } +} + +// assumption: src1 is a row +// broadcast src1 into src0 +kernel void kernel_add_row( + global float4 * src0, + ulong offset0, + global float4 * src1, + ulong offset1, + global float4 * dst, + ulong offsetd, + int ne +) { + src0 = (global float4*)((global char*)src0 + offset0); + src1 = (global float4*)((global char*)src1 + offset1); + dst = (global float4*)((global char*)dst + offsetd); + + // This performs better than using %. + uint gid = get_global_id(0); + uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne + dst[gid] = src0[gid] + src1[idx1]; +} + +//------------------------------------------------------------------------------ +// mul +//------------------------------------------------------------------------------ +kernel void kernel_mul( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global char * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + int ne13, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3 +) { + src0 = src0 + offset0; + src1 = src1 + offset1; + dst = dst + offsetd; + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + int i13 = i03 % ne13; + int i12 = i02 % ne12; + int i11 = i01 % ne11; + + global char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01; + global char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11; + global char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1; + + for (int i0 = get_local_id(0); i0 < ne0; i0 += get_local_size(0)) { + const int i10 = i0 % ne10; + *((global float *)(dst_ptr + i0*nb0)) = *((global float *)(src0_ptr + i0*nb00)) * *((global float *)(src1_ptr + i10*nb10)); + } +} + +// assumption: src1 is a row +// broadcast src1 into src0 +kernel void kernel_mul_row( + global float4 * src0, + ulong offset0, + global float4 * src1, + ulong offset1, + global float4 * dst, + ulong offsetd, + int ne +) { + src0 = (global float4*)((global char*)src0 + offset0); + src1 = (global float4*)((global char*)src1 + offset1); + dst = (global float4*)((global char*)dst + offsetd); + + // This performs better than using %. + uint gid = get_global_id(0); + uint idx1 = gid - (gid/ne)*ne; // get_global_id(0) % ne + dst[gid] = src0[gid] * src1[idx1]; +} + +//------------------------------------------------------------------------------ +// scale +//------------------------------------------------------------------------------ +kernel void kernel_scale( + global float4 * src0, + ulong offset0, + global float4 * dst, + ulong offsetd, + float scale +) { + src0 = (global float4*)((global char*)src0 + offset0); + dst = (global float4*)((global char*)dst + offsetd); + dst[get_global_id(0)] = src0[get_global_id(0)] * scale; +} + +//------------------------------------------------------------------------------ +// gelu +//------------------------------------------------------------------------------ +#define GELU_COEF_A 0.044715f +#define SQRT_2_OVER_PI 0.79788456080286535587989211986876f + +kernel void kernel_gelu( + global float * src0, + ulong offset0, + global float * dst, + ulong offsetd +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + float x = src0[get_global_id(0)]; + + dst[get_global_id(0)] = 0.5f*x*(1.0f + tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); +} + +kernel void kernel_gelu_4( + global float4 * src0, + ulong offset0, + global float4 * dst, + ulong offsetd +) { + src0 = (global float4*)((global char*)src0 + offset0); + dst = (global float4*)((global char*)dst + offsetd); + + float4 x = src0[get_global_id(0)]; + + dst[get_global_id(0)] = 0.5f*x*(1.0f + tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x))); +} + +//------------------------------------------------------------------------------ +// silu +//------------------------------------------------------------------------------ +kernel void kernel_silu( + global float * src0, + ulong offset0, + global float * dst, + ulong offsetd +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + float x = src0[get_global_id(0)]; + dst[get_global_id(0)] = x / (1.0f + exp(-x)); +} + +kernel void kernel_silu_4( + global float4 * src0, + ulong offset0, + global float4 * dst, + ulong offsetd +) { + src0 = (global float4*)((global char*)src0 + offset0); + dst = (global float4*)((global char*)dst + offsetd); + + float4 x = src0[get_global_id(0)]; + dst[get_global_id(0)] = x / (1.0f + exp(-x)); +} + +//------------------------------------------------------------------------------ +// relu +//------------------------------------------------------------------------------ +kernel void kernel_relu( + global float * src0, + ulong offset0, + global float * dst, + ulong offsetd +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + dst[get_global_id(0)] = fmax(0.0f, src0[get_global_id(0)]); +} + +//------------------------------------------------------------------------------ +// clamp +//------------------------------------------------------------------------------ +kernel void kernel_clamp( + global float * src0, + ulong offset0, + global float * dst, + ulong offsetd, + float min, + float max +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + dst[get_global_id(0)] = src0[get_global_id(0)] < min ? + min : + (src0[get_global_id(0)] > max ? max : src0[get_global_id(0)]); +} + +//------------------------------------------------------------------------------ +// norm +//------------------------------------------------------------------------------ +kernel void kernel_norm( + global void * src0, + ulong offset0, + global float * dst, + ulong offsetd, + int ne00, + ulong nb01, + float eps, + local float * sum +) { + src0 = (global void*)((global char*)src0 + offset0); + dst = (global void*)((global char*)dst + offsetd); + + global float * x = (global float *) ((global char *) src0 + get_group_id(0)*nb01); + + // MEAN + // parallel sum + sum[get_local_id(0)] = 0.0f; + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + sum[get_local_id(0)] += x[i00]; + } + // reduce + barrier(CLK_LOCAL_MEM_FENCE); + for (uint i = get_local_size(0)/2; i > 0; i /= 2) { + if (get_local_id(0) < i) { + sum[get_local_id(0)] += sum[get_local_id(0) + i]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + float mean = sum[0] / ne00; + + // recenter and VARIANCE + barrier(CLK_LOCAL_MEM_FENCE); + global float * y = dst + get_group_id(0)*ne00; + sum[get_local_id(0)] = 0.0f; + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + y[i00] = x[i00] - mean; + sum[get_local_id(0)] += y[i00] * y[i00]; + } + + // reduce + barrier(CLK_LOCAL_MEM_FENCE); + for (uint i = get_local_size(0)/2; i > 0; i /= 2) { + if (get_local_id(0) < i) { + sum[get_local_id(0)] += sum[get_local_id(0) + i]; + } + barrier(CLK_LOCAL_MEM_FENCE); + } + float variance = sum[0] / ne00; + + float scale = 1.0f/sqrt(variance + eps); + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + y[i00] = y[i00] * scale; + } +} + +//------------------------------------------------------------------------------ +// rms_norm +//------------------------------------------------------------------------------ +// This kernel depends on subgroup size. +kernel void kernel_rms_norm( + global void * src0, + ulong offset0, + global float * dst, + ulong offsetd, + int ne00, + ulong nb01, + float eps, + local float * sum // Note, the size depends on number of subgroups +) { + src0 = (global void*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + global float4 * x = (global float4 *) ((global char *) src0 + get_group_id(0)*nb01); + global float * x_scalar = (global float *) x; + float4 sumf = 0; + float all_sum = 0; + + // parallel sum + for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { + sumf += x[i00] * x[i00]; + } + all_sum = sumf.s0 + sumf.s1 + sumf.s2 + sumf.s3; + all_sum = sub_group_reduce_add(all_sum); + if (get_sub_group_local_id() == 0) { + sum[get_sub_group_id()] = all_sum; + } + + barrier(CLK_LOCAL_MEM_FENCE); + // broadcast + for (uint i = get_local_size(0) / get_max_sub_group_size() / 2; i > 0; i /= 2) { + if (get_local_id(0) < i) { + sum[get_local_id(0)] += sum[get_local_id(0) + i]; + } + } + if (get_local_id(0) == 0) { + for (int i = 4 * (ne00 / 4); i < ne00; i++) { + sum[0] += x_scalar[i]; + } + sum[0] /= ne00; + } + + barrier(CLK_LOCAL_MEM_FENCE); + + const float mean = sum[0]; + const float scale = 1.0f/sqrt(mean + eps); + + global float4 * y = (global float4 *) (dst + get_group_id(0)*ne00); + global float * y_scalar = (global float *) y; + for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { + y[i00] = x[i00] * scale; + } + if (get_local_id(0) == 0) { + for (int i00 = 4 * (ne00 / 4); i00 < ne00; i00++) { + y_scalar[i00] = x_scalar[i00] * scale; + } + } +} + +//------------------------------------------------------------------------------ +// diag_mask_inf kernels +//------------------------------------------------------------------------------ +kernel void kernel_diag_mask_inf( + global float * src0, + ulong offset0, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int n_past +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + int i02 = get_global_id(2); + int i01 = get_global_id(1); + int i00 = get_global_id(0); + + if (i00 > n_past + i01) { + dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY; + } else { + dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00]; + } +} + +kernel void kernel_diag_mask_inf_8( + global float4 * src0, + ulong offset0, + global float4 * dst, + ulong offsetd, + int ne00, + int ne01, + int n_past +) { + src0 = (global float4*)((global char*)src0 + offset0); + dst = (global float4*)((global char*)dst + offsetd); + + int i = 2*get_global_id(0); + + dst[i+0] = src0[i+0]; + dst[i+1] = src0[i+1]; + int i4 = 4*i; + int i02 = i4/(ne00*ne01); i4 -= i02*ne00*ne01; + int i01 = i4/(ne00); i4 -= i01*ne00; + int i00 = i4; + for (int k = 3; k >= 0; --k) { + if (i00 + 4 + k <= n_past + i01) { + break; + } + (&dst[i+1])[k] = -INFINITY; + if (i00 + k > n_past + i01) { + (&dst[i])[k] = -INFINITY; + } + } +} + +//------------------------------------------------------------------------------ +// softmax +//------------------------------------------------------------------------------ +kernel void kernel_soft_max( + global float * src0, + ulong offset0, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + float scale, + float max_bias, + float m0, + float m1, + int n_head_log2 +) { + src0 = (global float*)((global char*)src0 + offset0); + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + global float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + global float * pmask = src1 != src0 ? src1 + i01*ne00 : 0; + global float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + float slope = 1.0f; + + // ALiBi + if (max_bias > 0.0f) { + int h = i02; + + float base = h < n_head_log2 ? m0 : m1; + int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + + slope = pow(base, exp); + } + + // parallel max + float lmax = -INFINITY; + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + lmax = fmax(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)); + } + float max = sub_group_reduce_max(lmax); + + // parallel sum + float lsum = 0.0f; + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max); + lsum += exp_psrc0; + // Remember the result of exp here. exp is expensive, so we really do not + // wish to compute it twice. + pdst[i00] = exp_psrc0; + } + + const float sum = sub_group_reduce_add(lsum); + + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + pdst[i00] /= sum; + } +} + +#ifdef ADRENO_GPU +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_soft_max_4( + global float * src0, + ulong offset0, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + float scale, + float max_bias, + float m0, + float m1, + int n_head_log2 +) { + src0 = (global float*)((global char*)src0 + offset0); + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + global float4 * psrc4 = (global float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); + global float4 * pmask = src1 != src0 ? (global float4 *)(src1 + i01*ne00) : 0; + global float4 * pdst4 = (global float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00); + + float slope = 1.0f; + + // ALiBi + if (max_bias > 0.0f) { + int h = i02; + + float base = h < n_head_log2 ? m0 : m1; + int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + + slope = pow(base, exp); + } + + // parallel max + float4 lmax4 = -INFINITY; + for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { + lmax4 = fmax(lmax4, psrc4[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)); + } + float lmax = fmax(fmax(lmax4.s0, lmax4.s1), fmax(lmax4.s2, lmax4.s3)); + + const float max = sub_group_reduce_max(lmax); + + // parallel sum + float4 lsum4 = 0.0f; + for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { + const float4 exp_psrc4 = exp((psrc4[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max); + lsum4 += exp_psrc4; + pdst4[i00] = exp_psrc4; + } + float lsum = lsum4.s0 + lsum4.s1 + lsum4.s2 + lsum4.s3; + + const float sum = sub_group_reduce_add(lsum); + + for (int i00 = get_local_id(0); i00 < ne00/4; i00 += get_local_size(0)) { + pdst4[i00] /= sum; + } +} + +//------------------------------------------------------------------------------ +// kernel_rope +//------------------------------------------------------------------------------ +float rope_yarn_ramp(float low, float high, int i0) { + const float y = (i0 / 2 - low) / max(0.001f, high - low); + return 1.0f - min(1.0f, max(0.0f, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +float2 rope_yarn( + float theta_extrap, float freq_scale, float2 corr_dims, int i0, float ext_factor, float mscale +) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims.s0, corr_dims.s1, i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * log(1.0f / freq_scale); + } + return (float2)(cos(theta) * mscale, sin(theta) * mscale); +} + +// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get +// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))` +float rope_yarn_corr_factor(int n_dims, int n_ctx_orig, float n_rot, float base) { + return n_dims * log(n_ctx_orig / (n_rot * 2 * M_PI_F)) / (2 * log(base)); +} + +float2 rope_yarn_corr_dims( + int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow +) { + // start and end correction dims + return (float2)( + max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_fast, freq_base))), + min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_slow, freq_base))) + ); +} + +kernel void kernel_rope_norm_f32( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * src2, + ulong offset2, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3, + int n_past, + int n_dims, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + src2 = (global float*)((global char*)src2 + offset2); + dst = (global float*)((global char*)dst + offsetd); + + int i3 = get_group_id(2); + int i2 = get_group_id(1); + int i1 = get_group_id(0); + + float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); + + global int * pos = src1; + + float theta_base = (float) pos[i2]; + float inv_ndims = -1.f/n_dims; + + for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { + if (i0 < n_dims) { + int ic = i0/2; + + float theta = theta_base * pow(freq_base, inv_ndims*i0); + + float freq_factor = src2 != src0 ? src2[ic] : 1.0f; + + float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); + + global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + float x0 = src[0]; + float x1 = src[1]; + + dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; + dst_data[1] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; + } else { + global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } +} + +kernel void kernel_rope_norm_f16( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * src2, + ulong offset2, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3, + int n_past, + int n_dims, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + src2 = (global float*)((global char*)src2 + offset2); + dst = (global float*)((global char*)dst + offsetd); + + int i3 = get_group_id(2); + int i2 = get_group_id(1); + int i1 = get_group_id(0); + + float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); + + global int * pos = src1; + + float theta_base = (float) pos[i2]; + float inv_ndims = -1.f/n_dims; + + for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { + if (i0 < n_dims) { + int ic = i0/2; + + float theta = theta_base * pow(freq_base, inv_ndims*i0); + + float freq_factor = src2 != src0 ? src2[ic] : 1.0f; + + float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); + + global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + float x0 = src[0]; + float x1 = src[1]; + + dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; + dst_data[1] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; + } else { + global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } +} + +kernel void kernel_rope_neox_f32( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * src2, + ulong offset2, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3, + int n_past, + int n_dims, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + src2 = (global float*)((global char*)src2 + offset2); + dst = (global float*)((global char*)dst + offsetd); + + int i3 = get_group_id(2); + int i2 = get_group_id(1); + int i1 = get_group_id(0); + + float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); + + global int * pos = src1; + + float theta_base = (float) pos[i2]; + float inv_ndims = -1.f/n_dims; + + for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { + if (i0 < n_dims) { + int ic = i0/2; + + const float theta = theta_base * pow(freq_base, inv_ndims*i0); + + const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; + + float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); + + global float * src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); + global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); + + const float x0 = src[0]; + const float x1 = src[n_dims/2]; + + dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; + dst_data[n_dims/2] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; + } else { + global float * const src = (global float *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + global float * dst_data = (global float *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } +} + +kernel void kernel_rope_neox_f16( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * src2, + ulong offset2, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3, + int n_past, + int n_dims, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + src2 = (global float*)((global char*)src2 + offset2); + dst = (global float*)((global char*)dst + offsetd); + + int i3 = get_group_id(2); + int i2 = get_group_id(1); + int i1 = get_group_id(0); + + float2 corr_dims = rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow); + + global int * pos = src1; + + float theta_base = (float) pos[i2]; + float inv_ndims = -1.f/n_dims; + + for (int i0 = 2*get_local_id(0); i0 < ne0; i0 += 2*get_local_size(0)) { + if (i0 < n_dims) { + int ic = i0/2; + + const float theta = theta_base * pow(freq_base, inv_ndims*i0); + + const float freq_factor = src2 != src0 ? src2[ic] : 1.0f; + + float2 cos_sin_theta = rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor); + + global half * src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00); + global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0); + + const float x0 = src[0]; + const float x1 = src[n_dims/2]; + + dst_data[0] = x0*cos_sin_theta.s0 - x1*cos_sin_theta.s1; + dst_data[n_dims/2] = x0*cos_sin_theta.s1 + x1*cos_sin_theta.s0; + } else { + global half * const src = (global half *)((global char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00); + global half * dst_data = (global half *)((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } +} + +//------------------------------------------------------------------------------ +// cpy +//------------------------------------------------------------------------------ + +kernel void kernel_cpy_f16_f16( + global half * src0, + ulong offset0, + global half * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3 +) { + src0 = (global half*)((global char*)src0 + offset0); + dst = (global half*)((global char*)dst + offsetd); + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + int i3 = n / (ne2*ne1*ne0); + int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); + int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; + int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); + + global half * dst_data = (global half *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + global const half * src = (global half *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); + dst_data[i00] = src[0]; + } +} + +kernel void kernel_cpy_f16_f32( + global half * src0, + ulong offset0, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3 +) { + + src0 = (global half*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + int i3 = n / (ne2*ne1*ne0); + int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); + int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; + int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); + + global float * dst_data = (global float *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + global half * src = (global half *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); + dst_data[i00] = src[0]; + } +} + +kernel void kernel_cpy_f32_f16( + global float * src0, + ulong offset0, + global half * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3 +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global half*)((global char*)dst + offsetd); + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + int i3 = n / (ne2*ne1*ne0); + int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); + int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; + int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); + + global half * dst_data = (global half *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + global const float * src = (global float *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); + + dst_data[i00] = src[0]; + } +} + +kernel void kernel_cpy_f32_f32( + global float * src0, + ulong offset0, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne03, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne0, + int ne1, + int ne2, + int ne3, + ulong nb0, + ulong nb1, + ulong nb2, + ulong nb3 +) { + src0 = (global float*)((global char*)src0 + offset0); + dst = (global float*)((global char*)dst + offsetd); + + int i03 = get_group_id(2); + int i02 = get_group_id(1); + int i01 = get_group_id(0); + + int n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00; + + int i3 = n / (ne2*ne1*ne0); + int i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0); + int i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0; + int i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0); + + global float * dst_data = (global float *) ((global char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + for (int i00 = get_local_id(0); i00 < ne00; i00 += get_local_size(0)) { + global const float * src = (global float *)((global char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); + + dst_data[i00] = src[0]; + } +} + +//------------------------------------------------------------------------------ +// get_rows +//------------------------------------------------------------------------------ +kernel void kernel_get_rows_f32( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + ulong nb01, + ulong nb02, + int ne10, + ulong nb10, + ulong nb11, + ulong nb1, + ulong nb2 +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int i10 = get_group_id(0); + int i11 = get_group_id(1); + + int r = ((global int *) ((global char *) src1 + i11*nb11 + i10*nb10))[0]; + + int i02 = i11; + + for (int ind = get_local_id(0); ind < ne00; ind += get_local_size(0)) { + ((global float *) ((global char *) dst + i11*nb2 + i10*nb1))[ind] = + ((global float *) ((global char *) src0 + r*nb01 + i02*nb02))[ind]; + } +} + +kernel void kernel_get_rows_f16( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + ulong nb01, + ulong nb02, + int ne10, + ulong nb10, + ulong nb11, + ulong nb1, + ulong nb2 +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int i10 = get_group_id(0); + int i11 = get_group_id(1); + + int r = ((global int32_t *) ((global char *) src1 + i11*nb11 + i10*nb10))[0]; + + int i02 = i11; + + for (int ind = get_local_id(0); ind < ne00; ind += get_local_size(0)) { + ((global float *) ((global char *) dst + i11*nb2 + i10*nb1))[ind] = + ((global half *) ((global char *) src0 + r*nb01 + i02*nb02))[ind]; + } +} + +kernel void kernel_get_rows_q4_0( + global void * src0, + ulong offset0, + global int * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + ulong nb01, + ulong nb02, + int ne10, + ulong nb10, + ulong nb11, + ulong nb1, + ulong nb2 +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global int*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + const int NL = 2; + + int i10 = get_group_id(0); + int i11 = get_group_id(1); + + int r = ((global int32_t *) ((global char *) src1 + i11*nb11 + i10*nb10))[0]; + + int i02 = i11; + + for (int ind = get_local_id(0); ind < ne00/16; ind += get_local_size(0)) { + float16 temp; + dequantize_q4_0_f32( + ((global struct block_q4_0 *) ((global char *) src0 + r*nb01 + i02*nb02)) + ind/NL, ind%NL, &temp); + *(((global float16 *) ((global char *) dst + i11*nb2 + i10*nb1)) + ind) = temp; + } +} + +//------------------------------------------------------------------------------ +// mul_mat_f32_f32 +//------------------------------------------------------------------------------ +#define N_F32_F32 4 + +kernel void kernel_mul_mat_f32_f32( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global char*)((global char*)src0 + offset0); + src1 = (global char*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int r0 = get_group_id(0); + int rb = get_group_id(1)*N_F32_F32; + int im = get_group_id(2); + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + + global float * x = (global float *) (src0 + offset_src0); + + if (ne00 < 128) { + for (int row = 0; row < N_F32_F32; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global float * y = (global float *) (src1 + offset_src1); + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { + sumf += (float) x[i] * (float) y[i]; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } else { + global float4 * x4 = (global float4 *)x; + for (int row = 0; row < N_F32_F32; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global float * y = (global float *) (src1 + offset_src1); + global float4 * y4 = (global float4 *) y; + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { + sumf += (float) x4[i].s0 * y4[i].s0; + sumf += (float) x4[i].s1 * y4[i].s1; + sumf += (float) x4[i].s2 * y4[i].s2; + sumf += (float) x4[i].s3 * y4[i].s3; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + for (int i = 4*(ne00/4); i < ne00; ++i) { + all_sum += (float) x[i] * y[i]; + } + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } +} + +//------------------------------------------------------------------------------ +// mul_mat_f16_f16 +//------------------------------------------------------------------------------ +#define N_F16_F16 4 + +kernel void kernel_mul_mat_f16_f16( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3) +{ + src0 = (global char*)((global char*)src0 + offset0); + src1 = (global char*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int r0 = get_group_id(0); + int rb = get_group_id(1)*N_F16_F16; + int im = get_group_id(2); + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + + global half * x = (global half *) (src0 + offset_src0); + + if (ne00 < 128) { + for (int row = 0; row < N_F16_F16; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global half * y = (global half *) (src1 + offset_src1); + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { + sumf += (half) x[i] * (half) y[i]; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } else { + global half4 * x4 = (global half4 *)x; + for (int row = 0; row < N_F16_F16; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global half * y = (global half *) (src1 + offset_src1); + global half4 * y4 = (global half4 *) y; + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { + sumf += (half) x4[i].s0 * y4[i].s0; + sumf += (half) x4[i].s1 * y4[i].s1; + sumf += (half) x4[i].s2 * y4[i].s2; + sumf += (half) x4[i].s3 * y4[i].s3; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + for (int i = 4*(ne00/4); i < ne00; ++i) { + all_sum += (half) x[i] * y[i]; + } + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } +} + +//------------------------------------------------------------------------------ +// mul_mat_f16_f32_1row +//------------------------------------------------------------------------------ +kernel void kernel_mul_mat_f16_f32_1row( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global char*)((global char*)src0 + offset0); + src1 = (global char*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global half * x = (global half *) (src0 + offset_src0); + global float * y = (global float *) (src1 + offset_src1); + + float sumf = 0; + if (ne00 < 128) { + for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { + sumf += (float) x[i] * (float) y[i]; + } + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } else { + global half4 * x4 = (global half4 *) x; + global float4 * y4 = (global float4 *) y; + for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { + sumf += (float) x4[i].s0 * y4[i].s0; + sumf += (float) x4[i].s1 * y4[i].s1; + sumf += (float) x4[i].s2 * y4[i].s2; + sumf += (float) x4[i].s3 * y4[i].s3; + } + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + for (int i = 4*(ne00/4); i < ne00; ++i) { + all_sum += (float) x[i] * y[i]; + } + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + +} + +//------------------------------------------------------------------------------ +// mul_mat_f16_f32 +//------------------------------------------------------------------------------ +#define N_F16_F32 4 + +#ifdef ADRENO_GPU +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_f16_f32( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global char*)((global char*)src0 + offset0); + src1 = (global char*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int r0 = get_group_id(0); + int rb = get_group_id(1)*N_F16_F32; + int im = get_group_id(2); + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + + global half * x = (global half *) (src0 + offset_src0); + + if (ne00 < 128) { + for (int row = 0; row < N_F16_F32; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global float * y = (global float *) (src1 + offset_src1); + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00; i += get_max_sub_group_size()) { + sumf += convert_float(x[i]) * y[i]; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } else { + global half4 * x4 = (global half4 *)x; + for (int row = 0; row < N_F16_F32; ++row) { + int r1 = rb + row; + if (r1 >= ne11) { + break; + } + + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global float * y = (global float *) (src1 + offset_src1); + global float4 * y4 = (global float4 *) y; + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { + sumf += convert_float(x4[i].s0) * y4[i].s0; + sumf += convert_float(x4[i].s1) * y4[i].s1; + sumf += convert_float(x4[i].s2) * y4[i].s2; + sumf += convert_float(x4[i].s3) * y4[i].s3; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + for (int i = 4*(ne00/4); i < ne00; ++i) { + all_sum += (float) x[i] * y[i]; + } + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } + } +} + +//------------------------------------------------------------------------------ +// mul_mat_f16_f32_l4 +//------------------------------------------------------------------------------ +// Assumes row size (ne00) is a multiple of 4 +#ifdef ADRENO_GPU +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_f16_f32_l4( + global char * src0, + ulong offset0, + global char * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + ulong nb00, + ulong nb01, + ulong nb02, + ulong nb03, + int ne10, + int ne11, + int ne12, + ulong nb10, + ulong nb11, + ulong nb12, + ulong nb13, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global char*)((global char*)src0 + offset0); + src1 = (global char*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + int nrows = ne11; + int r0 = get_group_id(0); + int im = get_group_id(2); + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset_src0 = r0*nb01 + (i12/r2)*nb02 + (i13/r3)*nb03; + + global half4 * x4 = (global half4 *) (src0 + offset_src0); + + for (int r1 = 0; r1 < nrows; ++r1) { + ulong offset_src1 = r1*nb11 + (i12 )*nb12 + (i13 )*nb13; + + global float4 * y4 = (global float4 *) (src1 + offset_src1); + + float sumf = 0; + for (int i = get_sub_group_local_id(); i < ne00/4; i += get_max_sub_group_size()) { + sumf += convert_float(x4[i].s0) * y4[i].s0; + sumf += convert_float(x4[i].s1) * y4[i].s1; + sumf += convert_float(x4[i].s2) * y4[i].s2; + sumf += convert_float(x4[i].s3) * y4[i].s3; + } + + float all_sum = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + dst[im*ne1*ne0 + r1*ne0 + r0] = all_sum; + } + } +} + +//------------------------------------------------------------------------------ +// mul_vec_q_n_f32 +//------------------------------------------------------------------------------ +// function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i]) +// il indicates where the q4 quants begin (0 or QK4_0/4) +// we assume that the yl's have been multiplied with the appropriate scale factor +// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096) +inline float block_q_4_0_dot_y( + global struct block_q4_0 * qb_curr, + float sumy, + private float * yl, + int il +) { + float d = qb_curr->d; + float2 acc = 0.f; + global ushort * qs = ((global ushort *)qb_curr + 1 + il/2); + for (int i = 0; i < 8; i+=2) { + acc.s0 += yl[i + 0] * (qs[i / 2] & 0x000F) + + yl[i + 1] * (qs[i / 2] & 0x0F00); + acc.s1 += yl[i + 8] * (qs[i / 2] & 0x00F0) + + yl[i + 9] * (qs[i / 2] & 0xF000); + } + return d * (sumy * -8.f + acc.s0 + acc.s1); +} + +#ifdef INTEL_GPU +#define N_DST 4 // each SIMD group works on 4 rows +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // assuming SIMD group size is 16 +#elif defined (ADRENO_GPU) +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +inline void mul_vec_q_n_f32( + global void * src0, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + + const ulong nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + // (r0 * N_SIMDGROUP + get_sub_group_id()) is essenatially the linear global + // id of a SIMD group in the grid. + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + + global struct block_q4_0 * x = (global struct block_q4_0 *) src0 + offset0; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float yl[16]; // src1 vector cache + float sumf[N_DST]={0.f}; + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix * QK4_0 + il; + + // each thread in a SIMD group deals with half a block. + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0; + for (int i = 0; i < 8; i += 2) { + sumy += yb[i] + yb[i+1]; + yl[i+0] = yb[i+ 0]; + yl[i+1] = yb[i+ 1]/256.f; + sumy += yb[i+16] + yb[i+17]; + yl[i+8] = yb[i+16]/16.f; + yl[i+9] = yb[i+17]/4096.f; + } + + for (int row = 0; row < N_DST; row++) { + sumf[row] += block_q_4_0_dot_y(x+ib+row*nb, sumy, yl, il); + } + + // One thread in a SIMD group (i.e., subgroup) handles a half block, + // hence then entire SIMD group handles SIMDWIDTH/2 blocks. + // y points to the activation matrix (of type float). Therefore for + // one thread, the # of blocks y should advance is SIMDWIDTH/2 (because + // SIMDWIDTH/2 blocks are processed by a SIMD group) - in terms of + // floats, it is QK4_0 * (SIMDWIDTH/2), where QK4_0 is the block size. + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + // The above does not work for Adreno - it produces incorrect results for + // row = 1, 2, 3 and only row = 0 gives the correct result. + // If N_DST is changed, the below array must be initialized accordingly. + // This also seems to perform better on Intel. + float tot[N_DST] = { + sub_group_reduce_add(sumf[0]), sub_group_reduce_add(sumf[1]), + sub_group_reduce_add(sumf[2]), sub_group_reduce_add(sumf[3])}; + for (int row = 0; row < N_DST; ++row) { + if (get_sub_group_local_id() == 0 && first_row + row < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot[row]; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32( + global void * src0, + ulong offset0, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + mul_vec_q_n_f32(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} + +// +// This variant unrolls the loops and uses vector types instead of pointers. +// It improves performance on Adreno but not so much on Intel. +// +inline float block_q_4_0_dot_y_v( + global struct block_q4_0 * qb_curr, + float sumy, + float16 yl, + int il +) { + float d = qb_curr->d; + float acc = 0.f; + global ushort * qs = ((global ushort *)qb_curr + 1 + il/2); + + acc += yl.s0 * (qs[0] & 0x000F); + acc += yl.s1 * (qs[0] & 0x0F00); + acc += yl.s8 * (qs[0] & 0x00F0); + acc += yl.s9 * (qs[0] & 0xF000); + + acc += yl.s2 * (qs[1] & 0x000F); + acc += yl.s3 * (qs[1] & 0x0F00); + acc += yl.sa * (qs[1] & 0x00F0); + acc += yl.sb * (qs[1] & 0xF000); + + acc += yl.s4 * (qs[2] & 0x000F); + acc += yl.s5 * (qs[2] & 0x0F00); + acc += yl.sc * (qs[2] & 0x00F0); + acc += yl.sd * (qs[2] & 0xF000); + + acc += yl.s6 * (qs[3] & 0x000F); + acc += yl.s7 * (qs[3] & 0x0F00); + acc += yl.se * (qs[3] & 0x00F0); + acc += yl.sf * (qs[3] & 0xF000); + + return d * (sumy * -8.f + acc); +} + +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 4 // each SIMD group works on 4 rows +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // assuming SIMD group size is 16 +#elif defined (ADRENO_GPU) +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +inline void mul_vec_q_n_f32_v( + global void * src0, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + const ulong nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + // (r0 * N_SIMDGROUP + get_sub_group_id()) is essenatially the linear global + // id of a SIMD group in the grid. + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset0 = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + + global struct block_q4_0 * x = (global struct block_q4_0 *) src0 + offset0; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float16 yl; // src1 vector cache + float4 sumf = (float4)(0.f, 0.f, 0.f, 0.f); + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix * QK4_0 + il; + + // each thread in a SIMD group deals with half a block. + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0; + + sumy += yb[0]; + sumy += yb[1]; + sumy += yb[2]; + sumy += yb[3]; + sumy += yb[4]; + sumy += yb[5]; + sumy += yb[6]; + sumy += yb[7]; + + sumy += yb[16]; + sumy += yb[17]; + sumy += yb[18]; + sumy += yb[19]; + sumy += yb[20]; + sumy += yb[21]; + sumy += yb[22]; + sumy += yb[23]; + + + yl.s0 = yb[0]; + yl.s1 = yb[1]/256.f; + + yl.s2 = yb[2]; + yl.s3 = yb[3]/256.f; + + yl.s4 = yb[4]; + yl.s5 = yb[5]/256.f; + + yl.s6 = yb[6]; + yl.s7 = yb[7]/256.f; + + yl.s8 = yb[16]/16.f; + yl.s9 = yb[17]/4096.f; + + yl.sa = yb[18]/16.f; + yl.sb = yb[19]/4096.f; + + yl.sc = yb[20]/16.f; + yl.sd = yb[21]/4096.f; + + yl.se = yb[22]/16.f; + yl.sf = yb[23]/4096.f; + + sumf.s0 += block_q_4_0_dot_y_v(x+ib+0*nb, sumy, yl, il); + sumf.s1 += block_q_4_0_dot_y_v(x+ib+1*nb, sumy, yl, il); + sumf.s2 += block_q_4_0_dot_y_v(x+ib+2*nb, sumy, yl, il); + sumf.s3 += block_q_4_0_dot_y_v(x+ib+3*nb, sumy, yl, il); + + // One thread in a SIMD group (i.e., subgroup) handles a half block, + // hence then entire SIMD group handles SIMDWIDTH/2 blocks. + // y points to the activation matrix (of type float). Therefore for + // one thread, the # of blocks y should advance is SIMDWIDTH/2 (because + // SIMDWIDTH/2 blocks are processed by a SIMD group) - in terms of + // floats, it is QK4_0 * (SIMDWIDTH/2), where QK4_0 is the block size. + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + // The above does not work for Adreno - it produces incorrect results for + // row = 1, 2, 3 and only row = 0 gives the correct result. + // If N_DST is changed, the below array must be initialized accordingly. + // This also seems to perform better on Intel. + float4 tot = (float4)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), + sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_v( + global void * src0, + ulong offset0, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + mul_vec_q_n_f32_v(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} + +//------------------------------------------------------------------------------ +// kernel_convert_block_q4_0 +// Convert the block_q4_0 format to 2 separate arrays (AOS -> SOA). +// This kernel does not deshuffle the bits. +//------------------------------------------------------------------------------ +kernel void kernel_convert_block_q4_0( + global struct block_q4_0 * src0, + global uchar * dst_q, + global half * dst_d +) { + global struct block_q4_0 * b = (global struct block_q4_0 *) src0 + get_global_id(0); + global uchar * q = (global uchar *) dst_q + QK4_0/2*get_global_id(0); + global half * d = (global half *) dst_d + get_global_id(0); + + *d = b->d; + + for (int i = 0; i < QK4_0/2; ++i) { + q[i] = b->qs[i]; + } +} + +kernel void kernel_restore_block_q4_0( + global uchar * src_q, + global half * src_d, + global struct block_q4_0 * dst +) { + global struct block_q4_0 * b = (global struct block_q4_0 *) dst + get_global_id(0); + global uchar * q = (global uchar *) src_q + QK4_0/2*get_global_id(0); + global half * d = (global half *) src_d + get_global_id(0); + + b->d = *d; + for (int i = 0; i < QK4_0/2; ++i) { + b->qs[i] = q[i]; + } +} + +//------------------------------------------------------------------------------ +// mul_vec_q_n_f32_flat +// +// This variation uses flat arrays (struct of arrays, SOA) representation for +// quant tensors. +//------------------------------------------------------------------------------ + +// This function requires the original shuffled weights. +// As a reminder, the original weights are shuffled so that (q[0], q[16]) are +// packed together in a byte, so are (q[1], q[17]) and so on. +inline float block_q_4_0_dot_y_flat( + global uchar * x, + global half * dh, + float sumy, + float16 yl, + int il +) { + float d = *dh; + global ushort * qs = ((global ushort *)x + il/2); + float acc = 0.f; + + acc += yl.s0 * (qs[0] & 0x000F); + acc += yl.s1 * (qs[0] & 0x0F00); + acc += yl.s8 * (qs[0] & 0x00F0); + acc += yl.s9 * (qs[0] & 0xF000); + + acc += yl.s2 * (qs[1] & 0x000F); + acc += yl.s3 * (qs[1] & 0x0F00); + acc += yl.sa * (qs[1] & 0x00F0); + acc += yl.sb * (qs[1] & 0xF000); + + acc += yl.s4 * (qs[2] & 0x000F); + acc += yl.s5 * (qs[2] & 0x0F00); + acc += yl.sc * (qs[2] & 0x00F0); + acc += yl.sd * (qs[2] & 0xF000); + + acc += yl.s6 * (qs[3] & 0x000F); + acc += yl.s7 * (qs[3] & 0x0F00); + acc += yl.se * (qs[3] & 0x00F0); + acc += yl.sf * (qs[3] & 0xF000); + + return d * (sumy * -8.f + acc); +} + +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 4 // each SIMD group works on 4 rows +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // assuming SIMD group size is 32 +#elif defined (ADRENO_GPU) +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +inline void mul_vec_q_n_f32_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + const ulong nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of + // a SIMD group in the grid. Each SIMD group produces N_DST values in the + // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. + // Currently with llama2 7B, im is always 0. + // TODO: how to handle im/gqa*(nb*ne0)? + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; + + global uchar * x = (global uchar *) src0_q + offset0_q; + global half * d = (global half *) src0_d + offset0_d; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float16 yl; + float4 sumf = (float4)(0.f, 0.f, 0.f, 0.f); + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix*QK4_0 + il; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0.f; + + sumy += yb[0]; + sumy += yb[1]; + sumy += yb[2]; + sumy += yb[3]; + sumy += yb[4]; + sumy += yb[5]; + sumy += yb[6]; + sumy += yb[7]; + + sumy += yb[16]; + sumy += yb[17]; + sumy += yb[18]; + sumy += yb[19]; + sumy += yb[20]; + sumy += yb[21]; + sumy += yb[22]; + sumy += yb[23]; + + yl.s0 = yb[0]; + yl.s1 = yb[1]/256.f; + + yl.s2 = yb[2]; + yl.s3 = yb[3]/256.f; + + yl.s4 = yb[4]; + yl.s5 = yb[5]/256.f; + + yl.s6 = yb[6]; + yl.s7 = yb[7]/256.f; + + yl.s8 = yb[16]/16.f; + yl.s9 = yb[17]/4096.f; + + yl.sa = yb[18]/16.f; + yl.sb = yb[19]/4096.f; + + yl.sc = yb[20]/16.f; + yl.sd = yb[21]/4096.f; + + yl.se = yb[22]/16.f; + yl.sf = yb[23]/4096.f; + + sumf.s0 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); + sumf.s1 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); + sumf.s2 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); + sumf.s3 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); + + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + float4 tot = (float4)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), + sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + mul_vec_q_n_f32_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} + +// +// This variant outputs 8 values. +// +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 8 // each SIMD group works on 8 rows +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // assuming SIMD group size is 32 +#elif defined (ADRENO_GPU) +#define N_DST 8 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +inline void mul_vec_q_n_f32_8x_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + const ulong nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of + // a SIMD group in the grid. Each SIMD group produces N_DST values in the + // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. + // Currently with llama2 7B, im is always 0. + // TODO: how to handle im/gqa*(nb*ne0)? + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; + + global uchar * x = (global uchar *) src0_q + offset0_q; + global half * d = (global half *) src0_d + offset0_d; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float16 yl; + float8 sumf = 0.f; + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix*QK4_0 + il; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0.f; + + sumy += yb[0]; + sumy += yb[1]; + sumy += yb[2]; + sumy += yb[3]; + sumy += yb[4]; + sumy += yb[5]; + sumy += yb[6]; + sumy += yb[7]; + + sumy += yb[16]; + sumy += yb[17]; + sumy += yb[18]; + sumy += yb[19]; + sumy += yb[20]; + sumy += yb[21]; + sumy += yb[22]; + sumy += yb[23]; + + yl.s0 = yb[0]; + yl.s1 = yb[1]/256.f; + + yl.s2 = yb[2]; + yl.s3 = yb[3]/256.f; + + yl.s4 = yb[4]; + yl.s5 = yb[5]/256.f; + + yl.s6 = yb[6]; + yl.s7 = yb[7]/256.f; + + yl.s8 = yb[16]/16.f; + yl.s9 = yb[17]/4096.f; + + yl.sa = yb[18]/16.f; + yl.sb = yb[19]/4096.f; + + yl.sc = yb[20]/16.f; + yl.sd = yb[21]/4096.f; + + yl.se = yb[22]/16.f; + yl.sf = yb[23]/4096.f; + + sumf.s0 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); + sumf.s1 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); + sumf.s2 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); + sumf.s3 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); + + sumf.s4 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); + sumf.s5 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); + sumf.s6 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); + sumf.s7 += block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); + + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + float8 tot = (float8)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), + sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), + sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), + sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } + + if (first_row + 4 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; + } + if (first_row + 5 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; + } + if (first_row + 6 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; + } + if (first_row + 7 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_8x_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + mul_vec_q_n_f32_8x_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_cvt.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_cvt.cl new file mode 100644 index 0000000000000..e2024332f81b9 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_cvt.cl @@ -0,0 +1,106 @@ +//------------------------------------------------------------------------------ +// This file is contains additional kernels for data conversion. +// These kernels are used when loading the model, so its performance is less +// important. +//------------------------------------------------------------------------------ +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#elif defined(cl_amd_fp16) +#pragma OPENCL EXTENSION cl_amd_fp16 : enable +#else +#error "Half precision floating point not supportedby OpenCL implementation on your device." +#endif + +#ifdef cl_khr_subgroups +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#elif defined(cl_intel_subgroups) +#pragma OPENCL EXTENSION cl_intel_subgroups : enable +#else +#error "Subgroup not supported on your device." +#endif + +#ifdef cl_intel_required_subgroup_size +// Always use subgroup size of 32 on Intel. +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +// Always use subgroups size of 64 on Adreno. +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#else +// TODO: do not know how to choose subgroup size on other GPUs. +#error "Selecting subgroup size is not supported on your device." +#endif + +#define QK4_0 32 +#define QR4_0 2 +#define QK4_1 32 +#define QR4_1 2 +#define QK5_0 32 +#define QR5_0 2 +#define QK5_1 32 +#define QR5_1 2 +#define QK8_0 32 +#define QR8_0 1 +#define QK_K 256 +#define K_QUANTS_PER_ITERATION 2 + +typedef char int8_t; +typedef uchar uint8_t; +typedef short int16_t; +typedef ushort uint16_t; +typedef int int32_t; +typedef uint uint32_t; + +//------------------------------------------------------------------------------ +// block_q4_0 +//------------------------------------------------------------------------------ +struct block_q4_0 +{ + half d; + uint8_t qs[QK4_0 / 2]; +}; + +//------------------------------------------------------------------------------ +// mul_vec_q_n_f32_flat_noshuffle +// +// This variation uses flat arrays (struct of arrays, SOA) representation for +// quant tensors. It also uses non shuffled bit order for weights. +// +// The shuffled version is kept in the original file because moving it here +// seems to result in worse performance for adreno. +//------------------------------------------------------------------------------ + +kernel void kernel_convert_block_q4_0_noshuffle( + global struct block_q4_0 * src0, + global uchar * dst_q, + global half * dst_d +) { + global struct block_q4_0 * b = (global struct block_q4_0 *) src0 + get_global_id(0); + global uchar * q = (global uchar *) dst_q + QK4_0/2*get_global_id(0); + global half * d = (global half *) dst_d + get_global_id(0); + + *d = b->d; + for (int i = 0; i < QK4_0/4; ++i) { + uchar x0 = b->qs[2*i + 0]; + uchar x1 = b->qs[2*i + 1]; + + q[i + 0 ] = convert_uchar(x0 & 0x0F) | convert_uchar((x1 & 0x0F) << 4); + q[i + QK4_0/4] = convert_uchar((x0 & 0xF0) >> 4) | convert_uchar(x1 & 0xF0); + +#ifdef ADRENO_GPU + // Workaround for adreno - must have the following printf statement for + // the kernel to work properly. Otherwise it produces incorrect result. + // convert_uchar above also seems necessary. + // Compare against a large number so that it does not print anything. + // get_sub_group_local_id() also works. + if (get_global_id(0) == 65536*4096) { + printf("%04x - %02x\n", *(global ushort*)d, ((x0 & 0xF0) >> 4) | (x1 & 0xF0)); + } +#endif + } +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_gemv_noshuffle.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_gemv_noshuffle.cl new file mode 100644 index 0000000000000..5e195411d690e --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_gemv_noshuffle.cl @@ -0,0 +1,265 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#pragma OPENCL EXTENSION cl_qcom_subgroup_uniform_load: enable +#pragma OPENCL EXTENSION cl_qcom_subgroup_constant_load: enable +#pragma OPENCL EXTENSION cl_qcom_extra_vector_types : enable +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable + +// assume +#define QK4_0 32 +#define N_SIMDGROUP 4 + +#define dequantizeBlockAccum_ns_sgbroadcast_1_hi(total_sums, bits4, scale, y) \ + float shared_y; \ + shared_y = sub_group_broadcast(y.s0, 0); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 0); \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 0); \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 0); \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 0); \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 0); \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 0); \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 0); \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s0, 1); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 1); \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 1); \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 1); \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 1); \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 1); \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 1); \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 1); \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + + +#define dequantizeBlockAccum_ns_sgbroadcast_1_lo(total_sums, bits4, scale, y) \ + shared_y = sub_group_broadcast(y.s0, 2); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 2); \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 2); \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 2); \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 2); \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 2); \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 2); \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 2); \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s0, 3); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 3); \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 3); \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 3); \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 3); \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 3); \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 3); \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 3); \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + + +#define dequantizeBlockAccum_ns_sgbroadcast_8_hi(total_sums, bits4, scale, y) \ + float8 shared_y; \ + shared_y = sub_group_broadcast(y, 0); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + shared_y = sub_group_broadcast(y, 1); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + + +#define dequantizeBlockAccum_ns_sgbroadcast_8_lo(total_sums, bits4, scale, y) \ + shared_y = sub_group_broadcast(y, 2); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + shared_y = sub_group_broadcast(y, 3); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + + +__attribute__((qcom_reqd_sub_group_size("full"))) +__kernel void kernel_gemv_noshuffle( + __read_only image1d_buffer_t src0_q, // quantized A + global half2 * src0_d, // A scales + __read_only image1d_buffer_t src1, // B + ulong offset1, // offset to B (0) + global float * dst, // C + ulong offsetd, // offset to C (0) + uint K, // K + int ne01, // M + int ne02, // 1 + int ne10, // K + int ne12, // 1 + int ne0, // M + int ne1, // N + int r2, // 1 + int r3) +{ + uint groupId = get_local_id(1); + uint gid = get_global_id(0); + ushort slid = get_sub_group_local_id(); + + __private uint4 regA; + __private half2 regS; + __private float8 regB; + + __private float2 totalSum = (float2)(0.0f); + + // loop along K in block granularity, skip 4 blocks every iter + for (uint k = groupId; k < (K / QK4_0); k += N_SIMDGROUP) { + regS = src0_d[gid + k * LINE_STRIDE_A]; // each fiber loads scale of two rows + // first 4 fibers in each wave load 8 B values to its private scope + if (slid < 4) { + regB.s0123 = read_imagef(src1, (slid * 2 + k * 8)); + regB.s4567 = read_imagef(src1, (1 + slid * 2 + k * 8)); + } + + // load half weights for two blocks in consecutive rows + regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 0)).x; + regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 1)).x; + regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 2)).x; + regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 3)).x; +#ifdef VECTOR_SUB_GROUP_BROADCAT + dequantizeBlockAccum_ns_sgbroadcast_8_hi(totalSum, as_ushort8(regA), regS, regB); +#else + dequantizeBlockAccum_ns_sgbroadcast_1_hi(totalSum, as_ushort8(regA), regS, regB); +#endif // VECTOR_SUB_GROUP_BROADCAT + + regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 4)).x; + regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 5)).x; + regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 6)).x; + regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 7)).x; +#ifdef VECTOR_SUB_GROUP_BROADCAT + dequantizeBlockAccum_ns_sgbroadcast_8_lo(totalSum, as_ushort8(regA), regS, regB); +#else + dequantizeBlockAccum_ns_sgbroadcast_1_lo(totalSum, as_ushort8(regA), regS, regB); +#endif // VECTOR_SUB_GROUP_BROADCAT + } + + // reduction in local memory, assumes #wave=4 + __local float2 reduceLM[SIMDGROUP_WIDTH * 3]; + if (groupId == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = totalSum; + if (groupId == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = totalSum; + if (groupId == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = totalSum; + barrier(CLK_LOCAL_MEM_FENCE); + if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; + if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; + if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; + + // 2 outputs per fiber in wave 0 + if (groupId == 0) { + dst = (global float*)((global char*)dst + offsetd); + vstore2(totalSum, 0, &(dst[gid * 2])); + } + +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_gemv_noshuffle_general.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_gemv_noshuffle_general.cl new file mode 100644 index 0000000000000..5bdd4d067639a --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_gemv_noshuffle_general.cl @@ -0,0 +1,271 @@ +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#pragma OPENCL EXTENSION cl_qcom_subgroup_uniform_load: enable +#pragma OPENCL EXTENSION cl_qcom_subgroup_constant_load: enable +#pragma OPENCL EXTENSION cl_qcom_extra_vector_types : enable +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable + +// assume +#define QK4_0 32 +#define N_SIMDGROUP 4 + +#define dequantizeBlockAccum_ns_sgbroadcast_1_hi(total_sums, bits4, scale, y) \ + float shared_y; \ + shared_y = sub_group_broadcast(y.s0, 0); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 0); \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 0); \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 0); \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 0); \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 0); \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 0); \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 0); \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s0, 1); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 1); \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 1); \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 1); \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 1); \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 1); \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 1); \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 1); \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + + +#define dequantizeBlockAccum_ns_sgbroadcast_1_lo(total_sums, bits4, scale, y) \ + shared_y = sub_group_broadcast(y.s0, 2); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 2); \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 2); \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 2); \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 2); \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 2); \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 2); \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 2); \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s0, 3); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s1, 3); \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s2, 3); \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s3, 3); \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s4, 3); \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s5, 3); \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s6, 3); \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y; \ + shared_y = sub_group_broadcast(y.s7, 3); \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y; \ + + +#define dequantizeBlockAccum_ns_sgbroadcast_8_hi(total_sums, bits4, scale, y) \ + float8 shared_y; \ + shared_y = sub_group_broadcast(y, 0); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + shared_y = sub_group_broadcast(y, 1); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + + +#define dequantizeBlockAccum_ns_sgbroadcast_8_lo(total_sums, bits4, scale, y) \ + shared_y = sub_group_broadcast(y, 2); \ + total_sums.s0 += ((bits4.s0 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s0 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s0 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s0 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s2 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s2 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s2 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s2 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s1 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s1 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s1 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s1 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s3 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s3 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s3 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s3 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + shared_y = sub_group_broadcast(y, 3); \ + total_sums.s0 += ((bits4.s4 & 0x000F) - 8) * scale.s0 * shared_y.s0; \ + total_sums.s0 += (((bits4.s4 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s1; \ + total_sums.s0 += (((bits4.s4 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s2; \ + total_sums.s0 += (((bits4.s4 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s3; \ + total_sums.s0 += ((bits4.s6 & 0x000F) - 8) * scale.s0 * shared_y.s4; \ + total_sums.s0 += (((bits4.s6 & 0x00F0) >> 4) - 8) * scale.s0 * shared_y.s5; \ + total_sums.s0 += (((bits4.s6 & 0x0F00) >> 8) - 8) * scale.s0 * shared_y.s6; \ + total_sums.s0 += (((bits4.s6 & 0xF000) >> 12) - 8) * scale.s0 * shared_y.s7; \ + total_sums.s1 += ((bits4.s5 & 0x000F) - 8) * scale.s1 * shared_y.s0; \ + total_sums.s1 += (((bits4.s5 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s1; \ + total_sums.s1 += (((bits4.s5 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s2; \ + total_sums.s1 += (((bits4.s5 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s3; \ + total_sums.s1 += ((bits4.s7 & 0x000F) - 8) * scale.s1 * shared_y.s4; \ + total_sums.s1 += (((bits4.s7 & 0x00F0) >> 4) - 8) * scale.s1 * shared_y.s5; \ + total_sums.s1 += (((bits4.s7 & 0x0F00) >> 8) - 8) * scale.s1 * shared_y.s6; \ + total_sums.s1 += (((bits4.s7 & 0xF000) >> 12) - 8) * scale.s1 * shared_y.s7; \ + + +__attribute__((qcom_reqd_sub_group_size("full"))) +__kernel void kernel_gemv_noshuffle( + __read_only image1d_buffer_t src0_q, // quantized A + global half2 * src0_d, // A scales + __read_only image1d_buffer_t src1, // B + ulong offset1, // offset to B (0) + global float * dst, // C + ulong offsetd, // offset to C (0) + int ne00, // K + int ne01, // M + int ne02, // 1 + int ne10, // K + int ne12, // 1 + int ne0, // M + int ne1, // N + int r2, // 1 + int r3) +{ + uint groupId = get_local_id(1); + uint gid = get_global_id(0); + ushort slid = get_sub_group_local_id(); + + uint K = ne00; + uint M = ne01; + + uint LINE_STRIDE_A = M / 2; + uint BLOCK_STRIDE_A = N_SIMDGROUP * M; + + __private uint4 regA; + __private half2 regS; + __private float8 regB; + + __private float2 totalSum = (float2)(0.0f); + + // loop along K in block granularity, skip 4 blocks every iter + for (uint k = groupId; k < (K / QK4_0); k += N_SIMDGROUP) { + regS = src0_d[gid + k * LINE_STRIDE_A]; // each fiber loads scale of two rows + // first 4 fibers in each wave load 8 B values to its private scope + if (slid < 4) { + regB.s0123 = read_imagef(src1, (slid * 2 + k * 8)); + regB.s4567 = read_imagef(src1, (1 + slid * 2 + k * 8)); + } + + // load half weights for two blocks in consecutive rows + regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 0)).x; + regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 1)).x; + regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 2)).x; + regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 3)).x; +#ifdef VECTOR_SUB_GROUP_BROADCAT + dequantizeBlockAccum_ns_sgbroadcast_8_hi(totalSum, as_ushort8(regA), regS, regB); +#else + dequantizeBlockAccum_ns_sgbroadcast_1_hi(totalSum, as_ushort8(regA), regS, regB); +#endif // VECTOR_SUB_GROUP_BROADCAT + + regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 4)).x; + regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 5)).x; + regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 6)).x; + regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 7)).x; +#ifdef VECTOR_SUB_GROUP_BROADCAT + dequantizeBlockAccum_ns_sgbroadcast_8_lo(totalSum, as_ushort8(regA), regS, regB); +#else + dequantizeBlockAccum_ns_sgbroadcast_1_lo(totalSum, as_ushort8(regA), regS, regB); +#endif // VECTOR_SUB_GROUP_BROADCAT + } + + // reduction in local memory, assumes #wave=4 + __local float2 reduceLM[SIMDGROUP_WIDTH * 3]; + if (groupId == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = totalSum; + if (groupId == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = totalSum; + if (groupId == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = totalSum; + barrier(CLK_LOCAL_MEM_FENCE); + if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 0 + slid]; + if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 1 + slid]; + if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 2 + slid]; + + // 2 outputs per fiber in wave 0 + if (groupId == 0) { + dst = (global float*)((global char*)dst + offsetd); + vstore2(totalSum, 0, &(dst[gid * 2])); + } + +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_mm.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_mm.cl new file mode 100644 index 0000000000000..e19e9a2f43629 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_mm.cl @@ -0,0 +1,1225 @@ +//------------------------------------------------------------------------------ +// This file is contains additional mulmat kernels +// (and potentially other kernels). +//------------------------------------------------------------------------------ +#ifdef cl_khr_fp16 +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#elif defined(cl_amd_fp16) +#pragma OPENCL EXTENSION cl_amd_fp16 : enable +#else +#error "Half precision floating point not supportedby OpenCL implementation on your device." +#endif + +#ifdef cl_khr_subgroups +#pragma OPENCL EXTENSION cl_khr_subgroups : enable +#elif defined(cl_intel_subgroups) +#pragma OPENCL EXTENSION cl_intel_subgroups : enable +#else +#error "Subgroup not supported on your device." +#endif + +#ifdef cl_intel_required_subgroup_size +// Always use subgroup size of 32 on Intel. +#pragma OPENCL EXTENSION cl_intel_required_subgroup_size : enable +#define INTEL_GPU 1 +#define REQD_SUBGROUP_SIZE_16 __attribute__((intel_reqd_sub_group_size(16))) +#define REQD_SUBGROUP_SIZE_32 __attribute__((intel_reqd_sub_group_size(32))) +#elif defined(cl_qcom_reqd_sub_group_size) +// Always use subgroups size of 64 on Adreno. +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable +#define ADRENO_GPU 1 +#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half"))) +#define REQD_SUBGROUP_SIZE_128 __attribute__((qcom_reqd_sub_group_size("full"))) +#else +// TODO: do not know how to choose subgroup size on other GPUs. +#error "Selecting subgroup size is not supported on your device." +#endif + +#define QK4_0 32 +#define QR4_0 2 +#define QK4_1 32 +#define QR4_1 2 +#define QK5_0 32 +#define QR5_0 2 +#define QK5_1 32 +#define QR5_1 2 +#define QK8_0 32 +#define QR8_0 1 +#define QK_K 256 +#define K_QUANTS_PER_ITERATION 2 + +typedef char int8_t; +typedef uchar uint8_t; +typedef short int16_t; +typedef ushort uint16_t; +typedef int int32_t; +typedef uint uint32_t; + +//------------------------------------------------------------------------------ +// block_q4_0 +//------------------------------------------------------------------------------ +struct block_q4_0 +{ + half d; + uint8_t qs[QK4_0 / 2]; +}; + +//------------------------------------------------------------------------------ +// block_q6_K +//------------------------------------------------------------------------------ +// 6-bit quantization +// weight is represented as x = a * q +// 16 blocks of 16 elements each +// Effectively 6.5625 bits per weight +typedef struct { + uint8_t ql[QK_K/2]; // quants, lower 4 bits + uint8_t qh[QK_K/4]; // quants, upper 2 bits + int8_t scales[QK_K/16]; // scales, quantized with 8 bits + half d; // super-block scale +} block_q6_K; + +//------------------------------------------------------------------------------ +// These are the variant for matmatmul, based on the matvecmul kernel with +// flattened block_q4_0. +//------------------------------------------------------------------------------ + +// Common dot prod. +inline float mm_block_q_4_0_dot_y_flat( + global uchar * x, + global half * dh, + float sumy, + float16 yl, + int il +) { + float d = *dh; + global ushort * qs = ((global ushort *)x + il/2); + float acc = 0.f; + + acc += yl.s0 * (qs[0] & 0x000F); + acc += yl.s1 * (qs[0] & 0x0F00); + acc += yl.s8 * (qs[0] & 0x00F0); + acc += yl.s9 * (qs[0] & 0xF000); + + acc += yl.s2 * (qs[1] & 0x000F); + acc += yl.s3 * (qs[1] & 0x0F00); + acc += yl.sa * (qs[1] & 0x00F0); + acc += yl.sb * (qs[1] & 0xF000); + + acc += yl.s4 * (qs[2] & 0x000F); + acc += yl.s5 * (qs[2] & 0x0F00); + acc += yl.sc * (qs[2] & 0x00F0); + acc += yl.sd * (qs[2] & 0xF000); + + acc += yl.s6 * (qs[3] & 0x000F); + acc += yl.s7 * (qs[3] & 0x0F00); + acc += yl.se * (qs[3] & 0x00F0); + acc += yl.sf * (qs[3] & 0xF000); + + return d * (sumy * -8.f + acc); +} + +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 8 // each SIMD group works on 8 rows (in weights matrix) +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // assuming SIMD group size is 16 +#elif defined (ADRENO_GPU) +#define N_DST 8 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif +// +// This variant performs 1d blocking with 8x output. +// Eeach simdgroup outputs 8 values on `n0` dim (row in the output matrix). +// +inline void mul_mat_q_n_f32_1d_8x_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + const int nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of + // a SIMD group in the grid. Each SIMD group produces N_DST values in the + // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. + // Currently with llama2 7B, im is always 0. + // TODO: how to handle im/gqa*(nb*ne0)? + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; + + global uchar * x = (global uchar *) src0_q + offset0_q; + global half * d = (global half *) src0_d + offset0_d; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float16 yl; + float8 sumf = (float8)(0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f); + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix*QK4_0 + il; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0.f; + + sumy += yb[0]; + sumy += yb[1]; + sumy += yb[2]; + sumy += yb[3]; + sumy += yb[4]; + sumy += yb[5]; + sumy += yb[6]; + sumy += yb[7]; + + sumy += yb[16]; + sumy += yb[17]; + sumy += yb[18]; + sumy += yb[19]; + sumy += yb[20]; + sumy += yb[21]; + sumy += yb[22]; + sumy += yb[23]; + + yl.s0 = yb[0]; + yl.s1 = yb[1]/256.f; + + yl.s2 = yb[2]; + yl.s3 = yb[3]/256.f; + + yl.s4 = yb[4]; + yl.s5 = yb[5]/256.f; + + yl.s6 = yb[6]; + yl.s7 = yb[7]/256.f; + + yl.s8 = yb[16]/16.f; + yl.s9 = yb[17]/4096.f; + + yl.sa = yb[18]/16.f; + yl.sb = yb[19]/4096.f; + + yl.sc = yb[20]/16.f; + yl.sd = yb[21]/4096.f; + + yl.se = yb[22]/16.f; + yl.sf = yb[23]/4096.f; + + sumf.s0 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); + sumf.s1 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); + sumf.s2 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); + sumf.s3 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); + + sumf.s4 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); + sumf.s5 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); + sumf.s6 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); + sumf.s7 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); + + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + float8 tot = (float8)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), + sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), + sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), + sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } + + if (first_row + 4 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; + } + if (first_row + 5 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; + } + if (first_row + 6 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; + } + if (first_row + 7 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_1d_8x_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + mul_mat_q_n_f32_1d_8x_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} + +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 16 // each SIMD group works on 8 rows (in weights matrix) +#define N_SIMDGROUP 1 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // assuming SIMD group size is 16 +#elif defined (ADRENO_GPU) +#define N_DST 16 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif +// +// This variant performs 1d blocking with 16x output. +// Eeach simdgroup outputs 16 values on `n0` dim (row in the output matrix). +// +inline void mul_mat_q_n_f32_1d_16x_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + global float * dst, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + const int nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + // (r0 * N_SIMDGROUP + get_sub_group_id()) is the linear global id of + // a SIMD group in the grid. Each SIMD group produces N_DST values in the + // result, hence uses nb blocks, i.e., the offset becomes first_row*nb. + // Currently with llama2 7B, im is always 0. + // TODO: how to handle im/gqa*(nb*ne0)? + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; + + global uchar * x = (global uchar *) src0_q + offset0_q; + global half * d = (global half *) src0_d + offset0_d; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float16 yl; + float16 sumf = (float16)(0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, + 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f); + + int ix = get_sub_group_local_id()/2; + int il = 8*(get_sub_group_local_id()%2); + + global float * yb = y + ix*QK4_0 + il; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/2) { + float sumy = 0.f; + + sumy += yb[0]; + sumy += yb[1]; + sumy += yb[2]; + sumy += yb[3]; + sumy += yb[4]; + sumy += yb[5]; + sumy += yb[6]; + sumy += yb[7]; + + sumy += yb[16]; + sumy += yb[17]; + sumy += yb[18]; + sumy += yb[19]; + sumy += yb[20]; + sumy += yb[21]; + sumy += yb[22]; + sumy += yb[23]; + + yl.s0 = yb[0]; + yl.s1 = yb[1]/256.f; + + yl.s2 = yb[2]; + yl.s3 = yb[3]/256.f; + + yl.s4 = yb[4]; + yl.s5 = yb[5]/256.f; + + yl.s6 = yb[6]; + yl.s7 = yb[7]/256.f; + + yl.s8 = yb[16]/16.f; + yl.s9 = yb[17]/4096.f; + + yl.sa = yb[18]/16.f; + yl.sb = yb[19]/4096.f; + + yl.sc = yb[20]/16.f; + yl.sd = yb[21]/4096.f; + + yl.se = yb[22]/16.f; + yl.sf = yb[23]/4096.f; + + sumf.s0 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 0*nb*QK4_0/2, d + ib + 0*nb, sumy, yl, il); + sumf.s1 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 1*nb*QK4_0/2, d + ib + 1*nb, sumy, yl, il); + sumf.s2 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 2*nb*QK4_0/2, d + ib + 2*nb, sumy, yl, il); + sumf.s3 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 3*nb*QK4_0/2, d + ib + 3*nb, sumy, yl, il); + + sumf.s4 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 4*nb*QK4_0/2, d + ib + 4*nb, sumy, yl, il); + sumf.s5 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 5*nb*QK4_0/2, d + ib + 5*nb, sumy, yl, il); + sumf.s6 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 6*nb*QK4_0/2, d + ib + 6*nb, sumy, yl, il); + sumf.s7 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 7*nb*QK4_0/2, d + ib + 7*nb, sumy, yl, il); + + sumf.s8 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 8*nb*QK4_0/2, d + ib + 8*nb, sumy, yl, il); + sumf.s9 += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 9*nb*QK4_0/2, d + ib + 9*nb, sumy, yl, il); + sumf.sa += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 10*nb*QK4_0/2, d + ib + 10*nb, sumy, yl, il); + sumf.sb += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 11*nb*QK4_0/2, d + ib + 11*nb, sumy, yl, il); + + sumf.sc += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 12*nb*QK4_0/2, d + ib + 12*nb, sumy, yl, il); + sumf.sd += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 13*nb*QK4_0/2, d + ib + 13*nb, sumy, yl, il); + sumf.se += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 14*nb*QK4_0/2, d + ib + 14*nb, sumy, yl, il); + sumf.sf += mm_block_q_4_0_dot_y_flat(x + ib*QK4_0/2 + 15*nb*QK4_0/2, d + ib + 15*nb, sumy, yl, il); + + yb += QK4_0 * (N_SIMDWIDTH/2); + } + + float16 tot = (float16)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1), + sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3), + sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5), + sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7), + + sub_group_reduce_add(sumf.s8), sub_group_reduce_add(sumf.s9), + sub_group_reduce_add(sumf.sa), sub_group_reduce_add(sumf.sb), + sub_group_reduce_add(sumf.sc), sub_group_reduce_add(sumf.sd), + sub_group_reduce_add(sumf.se), sub_group_reduce_add(sumf.sf) + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } + + if (first_row + 4 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; + } + if (first_row + 5 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; + } + if (first_row + 6 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; + } + if (first_row + 7 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; + } + + if (first_row + 8 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 8] = tot.s8; + } + if (first_row + 9 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 9] = tot.s9; + } + if (first_row + 10 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 10] = tot.sa; + } + if (first_row + 11 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 11] = tot.sb; + } + + if (first_row + 12 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 12] = tot.sc; + } + if (first_row + 13 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 13] = tot.sd; + } + if (first_row + 14 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 14] = tot.se; + } + if (first_row + 15 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 15] = tot.sf; + } + } +} + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_1d_16x_flat( + global uchar * src0_q, + global half * src0_d, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + mul_mat_q_n_f32_1d_16x_flat(src0_q, src0_d, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3); +} + +//------------------------------------------------------------------------------ +// kernel_mul_mat_q4_0_f32_flat_v0 +//------------------------------------------------------------------------------ +inline float block_q_4_0_dot_y_flat_v2( + half x, + half d, + float sumy, + float4 yl +) { + uchar2 q = as_uchar2(x); + float acc = 0.0f; + + acc += (q.s0 & 0x0F) * yl.s0; + acc += (q.s1 & 0x0F) * yl.s1; + + acc += (q.s0 & 0xF0) * yl.s2; + acc += (q.s1 & 0xF0) * yl.s3; + + return d * (sumy * -8.f + acc);; +} + +inline float block_q_4_0_dot_y_flat_v4( + float x, + half d, + float sumy, + float8 yl +) { + uchar4 q = as_uchar4(x); + float acc = 0.0f; + + acc += (q.s0 & 0x0F) * yl.s0; + acc += (q.s1 & 0x0F) * yl.s1; + acc += (q.s2 & 0x0F) * yl.s2; + acc += (q.s3 & 0x0F) * yl.s3; + + acc += (q.s0 & 0xF0) * yl.s4; + acc += (q.s1 & 0xF0) * yl.s5; + acc += (q.s2 & 0xF0) * yl.s6; + acc += (q.s3 & 0xF0) * yl.s7; + + return d * (sumy * -8.f + acc);; +} + +inline float block_q_4_0_dot_y_flat_v8( + float2 x, + half d, + float sumy, + float16 yl +) { + uchar8 q = as_uchar8(x); + float acc = 0.0f; + + acc += (q.s0 & 0x0F) * yl.s0; + acc += (q.s1 & 0x0F) * yl.s1; + acc += (q.s2 & 0x0F) * yl.s2; + acc += (q.s3 & 0x0F) * yl.s3; + acc += (q.s4 & 0x0F) * yl.s4; + acc += (q.s5 & 0x0F) * yl.s5; + acc += (q.s6 & 0x0F) * yl.s6; + acc += (q.s7 & 0x0F) * yl.s7; + + acc += (q.s0 & 0xF0) * yl.s8; + acc += (q.s1 & 0xF0) * yl.s9; + acc += (q.s2 & 0xF0) * yl.sa; + acc += (q.s3 & 0xF0) * yl.sb; + acc += (q.s4 & 0xF0) * yl.sc; + acc += (q.s5 & 0xF0) * yl.sd; + acc += (q.s6 & 0xF0) * yl.se; + acc += (q.s7 & 0xF0) * yl.sf; + + return d * (sumy * -8.f + acc);; +} + +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define THREADS_PER_BLK 4 // Number of threads per block, or each thread process 1/THREADS_PER_BLK of a block +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 16 +#elif defined (ADRENO_GPU) +#define THREADS_PER_BLK 4 +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +#if THREADS_PER_BLK == 2 // Each thread processes 1/2 block +# define ACT_TY float16 +# define Q_BLK_LD_TY float2 +# define block_q_4_0_dot_y_flat block_q_4_0_dot_y_flat_v8 +#elif THREADS_PER_BLK == 4 // Each thread processes 1/4 block +# define ACT_TY float8 +# define Q_BLK_LD_TY float +# define block_q_4_0_dot_y_flat block_q_4_0_dot_y_flat_v4 +#elif THREADS_PER_BLK == 8 // Each thread processes 1/8 block +# define ACT_TY float4 +# define Q_BLK_LD_TY half +# define block_q_4_0_dot_y_flat block_q_4_0_dot_y_flat_v2 +#endif + +#define BTYES_PER_THREAD_IN_BLK (QK4_0/2/THREADS_PER_BLK) + +#if N_DST == 2 +# define SUM_TY float2 +#elif N_DST == 4 +# define SUM_TY float4 +#elif N_DST == 8 +# define SUM_TY float8 +#elif N_DST == 16 +# define SUM_TY float16 +#endif + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_flat_v0( + global uchar * src0_q, + global half * src0_d, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + const int nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = (first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02)) * QK4_0/2; + + global uchar * x = (global uchar *) src0_q + offset0_q; + global half * d = (global half *) src0_d + offset0_d; + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + int ix = get_sub_group_local_id()/THREADS_PER_BLK; + int il = get_sub_group_local_id()%THREADS_PER_BLK; + + global float * yb = y + ix*QK4_0 + BTYES_PER_THREAD_IN_BLK*il; + + // Registers for caching activation + ACT_TY yl = 0.f; + + // Registers for caching quants + Q_BLK_LD_TY q_blk_0 = 0, q_blk_1 = 0; +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + Q_BLK_LD_TY q_blk_2 = 0, q_blk_3 = 0; +#endif +#if N_DST == 8 || N_DST == 16 + Q_BLK_LD_TY q_blk_4 = 0, q_blk_5 = 0, q_blk_6 = 0, q_blk_7 = 0; +#endif + + // Partial sum + SUM_TY sumf = 0.f; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/THREADS_PER_BLK) { + float sumy = 0.f; + + q_blk_0 = *(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 0*nb*QK4_0/2); + q_blk_1 = *(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 1*nb*QK4_0/2); +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + q_blk_2 = *(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 2*nb*QK4_0/2); + q_blk_3 = *(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 3*nb*QK4_0/2); +#endif +#if N_DST == 8 || N_DST == 16 + q_blk_4 = (*(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 4*nb*QK4_0/2)); + q_blk_5 = (*(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 5*nb*QK4_0/2)); + q_blk_6 = (*(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 6*nb*QK4_0/2)); + q_blk_7 = (*(global Q_BLK_LD_TY*)(x + ib*QK4_0/2 + BTYES_PER_THREAD_IN_BLK*il + 7*nb*QK4_0/2)); +#endif + + // Load activation +#if THREADS_PER_BLK == 2 // Each thread processes 1/2 block + yl.s01234567 = *(global float8 *)(yb); + yl.s89abcdef = *(global float8 *)(yb + 16); + + sumy += yl.s0; + sumy += yl.s1; + sumy += yl.s2; + sumy += yl.s3; + sumy += yl.s4; + sumy += yl.s5; + sumy += yl.s6; + sumy += yl.s7; + sumy += yl.s8; yl.s8 /= 16.f; + sumy += yl.s9; yl.s9 /= 16.f; + sumy += yl.sa; yl.sa /= 16.f; + sumy += yl.sb; yl.sb /= 16.f; + sumy += yl.sc; yl.sc /= 16.f; + sumy += yl.sd; yl.sd /= 16.f; + sumy += yl.se; yl.se /= 16.f; + sumy += yl.sf; yl.sf /= 16.f; +#elif THREADS_PER_BLK == 4 // Each thread processes 1/4 block + yl.s0123 = *(global float4 *)(yb); + yl.s4567 = *(global float4 *)(yb + 16); + + sumy += yl.s0; + sumy += yl.s1; + sumy += yl.s2; + sumy += yl.s3; + sumy += yl.s4; yl.s4 /= 16.f; + sumy += yl.s5; yl.s5 /= 16.f; + sumy += yl.s6; yl.s6 /= 16.f; + sumy += yl.s7; yl.s7 /= 16.f; +#elif THREADS_PER_BLK == 8 // Each thread processes 1/8 block + yl.s01 = *(global float2 *)(yb); + yl.s23 = *(global float2 *)(yb + 16); + + sumy += yl.s0; + sumy += yl.s1; + sumy += yl.s2; yl.s2 /= 16.f; + sumy += yl.s3; yl.s3 /= 16.f; +#endif + + sumf.s0 += block_q_4_0_dot_y_flat(q_blk_0, *(d + ib + 0*nb), sumy, yl); + sumf.s1 += block_q_4_0_dot_y_flat(q_blk_1, *(d + ib + 1*nb), sumy, yl); +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + sumf.s2 += block_q_4_0_dot_y_flat(q_blk_2, *(d + ib + 2*nb), sumy, yl); + sumf.s3 += block_q_4_0_dot_y_flat(q_blk_3, *(d + ib + 3*nb), sumy, yl); +#endif +#if N_DST == 8 || N_DST == 16 + sumf.s4 += block_q_4_0_dot_y_flat(q_blk_4, *(d + ib + 4*nb), sumy, yl); + sumf.s5 += block_q_4_0_dot_y_flat(q_blk_5, *(d + ib + 5*nb), sumy, yl); + sumf.s6 += block_q_4_0_dot_y_flat(q_blk_6, *(d + ib + 6*nb), sumy, yl); + sumf.s7 += block_q_4_0_dot_y_flat(q_blk_7, *(d + ib + 7*nb), sumy, yl); +#endif + + yb += QK4_0 * (N_SIMDWIDTH/THREADS_PER_BLK); + } + + SUM_TY tot = (SUM_TY)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1) +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + , sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) +#endif +#if N_DST == 8 || N_DST == 16 + , sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5) + , sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) +#endif + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } +#endif +#if N_DST == 8 || N_DST == 16 + if (first_row + 4 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; + } + if (first_row + 5 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; + } + if (first_row + 6 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; + } + if (first_row + 7 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; + } +#endif + } +} + +//------------------------------------------------------------------------------ +// Using image1d_buffer_t + +#if defined(cl_qcom_subgroup_shuffle) +#pragma OPENCL EXTENSION cl_qcom_subgroup_shuffle : enable +float qcom_sub_group_reduce_add(float sum) { + sum += qcom_sub_group_shuffle_down(sum, 32, CLK_SUB_GROUP_SHUFFLE_WIDTH_WAVE_SIZE_QCOM, 0.f); + sum += qcom_sub_group_shuffle_down(sum, 16, CLK_SUB_GROUP_SHUFFLE_WIDTH_WAVE_SIZE_QCOM, 0.f); + sum += qcom_sub_group_shuffle_down(sum, 8, CLK_SUB_GROUP_SHUFFLE_WIDTH_WAVE_SIZE_QCOM, 0.f); + sum += qcom_sub_group_shuffle_down(sum, 4, CLK_SUB_GROUP_SHUFFLE_WIDTH_WAVE_SIZE_QCOM, 0.f); + sum += qcom_sub_group_shuffle_down(sum, 2, CLK_SUB_GROUP_SHUFFLE_WIDTH_WAVE_SIZE_QCOM, 0.f); + sum += qcom_sub_group_shuffle_down(sum, 1, CLK_SUB_GROUP_SHUFFLE_WIDTH_WAVE_SIZE_QCOM, 0.f); + return sum; +} +#define sub_group_reduce_add qcom_sub_group_reduce_add +#else +#define sub_group_reduce_add sub_group_reduce_add +#endif + +#undef THREADS_PER_BLK +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define THREADS_PER_BLK 4 // Number of threads per block, or each thread process 1/THREADS_PER_BLK of a block +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 16 +#elif defined (ADRENO_GPU) +#define THREADS_PER_BLK 4 +#define N_DST 4 +#define N_SIMDGROUP 1 +#define N_SIMDWIDTH 64 +#endif + +#if THREADS_PER_BLK == 2 // Each thread processes 1/2 block +# define ACT_TY float16 +# define Q_BLK_LD_TY float2 +# define EXTRACT_BLK_DATA(tmp, part) *((float2*)&tmp + part) +# define block_q_4_0_dot_y_flat block_q_4_0_dot_y_flat_v8 +#elif THREADS_PER_BLK == 4 // Each thread processes 1/4 block +# define ACT_TY float8 +# define Q_BLK_LD_TY float +# define EXTRACT_BLK_DATA(tmp, part) *((float*)&tmp + part) +# define block_q_4_0_dot_y_flat block_q_4_0_dot_y_flat_v4 +#elif THREADS_PER_BLK == 8 // Each thread processes 1/8 block +# define ACT_TY float4 +# define Q_BLK_LD_TY half +# define EXTRACT_BLK_DATA(tmp, part) *((half*)&tmp + part) +# define block_q_4_0_dot_y_flat block_q_4_0_dot_y_flat_v2 +#endif + +#define BTYES_PER_THREAD_IN_BLK (QK4_0/2/THREADS_PER_BLK) + +#if N_DST == 2 +# define SUM_TY float2 +#elif N_DST == 4 +# define SUM_TY float4 +#elif N_DST == 8 +# define SUM_TY float8 +#elif N_DST == 16 +# define SUM_TY float16 +#endif + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mat_q4_0_f32_flat_img_v0( + read_only image1d_buffer_t src0_q, + read_only image1d_buffer_t src0_d, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + const int nb = ne00/QK4_0; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + int first_row = (r0 * N_SIMDGROUP + get_sub_group_id()) * N_DST; + + int i12 = im%ne12; + int i13 = im/ne12; + + // The number of scales is the same as the number of blocks. + ulong offset0_d = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + // Each block contains QK4_0/2 uchars, hence offset for qs is as follows. + ulong offset0_q = first_row * nb + (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + + global float * y = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + int ix = get_sub_group_local_id()/THREADS_PER_BLK; + int il = get_sub_group_local_id()%THREADS_PER_BLK; + + global float * yb = y + ix*QK4_0 + BTYES_PER_THREAD_IN_BLK*il; + + // Registers for caching activation + ACT_TY yl = 0.f; + + // Registers for caching quants + Q_BLK_LD_TY q_blk_0 = 0, q_blk_1 = 0; +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + Q_BLK_LD_TY q_blk_2 = 0, q_blk_3 = 0; +#endif +#if N_DST == 8 || N_DST == 16 + Q_BLK_LD_TY q_blk_4 = 0, q_blk_5 = 0, q_blk_6 = 0, q_blk_7 = 0; +#endif + + // Partial sum + SUM_TY sumf = 0.f; + + for (int ib = ix; ib < nb; ib += N_SIMDWIDTH/THREADS_PER_BLK) { + float sumy = 0.f;; + + float4 tmp; + tmp = read_imagef(src0_q, offset0_q + ib + 0*nb); + q_blk_0 = EXTRACT_BLK_DATA(tmp, il); + tmp = read_imagef(src0_q, offset0_q + ib + 1*nb); + q_blk_1 = EXTRACT_BLK_DATA(tmp, il); +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + tmp = read_imagef(src0_q, offset0_q + ib + 2*nb); + q_blk_2 = EXTRACT_BLK_DATA(tmp, il); + tmp = read_imagef(src0_q, offset0_q + ib + 3*nb); + q_blk_3 = EXTRACT_BLK_DATA(tmp, il); +#endif +#if N_DST == 8 || N_DST == 16 + tmp = read_imagef(src0_q, offset0_q + ib + 4*nb); + q_blk_4 = EXTRACT_BLK_DATA(tmp, il); + tmp = read_imagef(src0_q, offset0_q + ib + 5*nb); + q_blk_5 = EXTRACT_BLK_DATA(tmp, il); + tmp = read_imagef(src0_q, offset0_q + ib + 6*nb); + q_blk_6 = EXTRACT_BLK_DATA(tmp, il); + tmp = read_imagef(src0_q, offset0_q + ib + 7*nb); + q_blk_7 = EXTRACT_BLK_DATA(tmp, il); +#endif + + // Load activation +#if THREADS_PER_BLK == 2 // Each thread processes 1/2 block + yl.s01234567 = *(global float8 *)(yb); + yl.s89abcdef = *(global float8 *)(yb + 16); + + sumy += yl.s0; + sumy += yl.s1; + sumy += yl.s2; + sumy += yl.s3; + sumy += yl.s4; + sumy += yl.s5; + sumy += yl.s6; + sumy += yl.s7; + sumy += yl.s8; yl.s8 /= 16.f; + sumy += yl.s9; yl.s9 /= 16.f; + sumy += yl.sa; yl.sa /= 16.f; + sumy += yl.sb; yl.sb /= 16.f; + sumy += yl.sc; yl.sc /= 16.f; + sumy += yl.sd; yl.sd /= 16.f; + sumy += yl.se; yl.se /= 16.f; + sumy += yl.sf; yl.sf /= 16.f; +#elif THREADS_PER_BLK == 4 // Each thread processes 1/4 block + yl.s0123 = *(global float4 *)(yb); + yl.s4567 = *(global float4 *)(yb + 16); + + sumy += yl.s0; + sumy += yl.s1; + sumy += yl.s2; + sumy += yl.s3; + sumy += yl.s4; yl.s4 /= 16.f; + sumy += yl.s5; yl.s5 /= 16.f; + sumy += yl.s6; yl.s6 /= 16.f; + sumy += yl.s7; yl.s7 /= 16.f; +#elif THREADS_PER_BLK == 8 // Each thread processes 1/8 block + yl.s01 = *(global float2 *)(yb); + yl.s23 = *(global float2 *)(yb + 16); + + sumy += yl.s0; + sumy += yl.s1; + sumy += yl.s2; yl.s2 /= 16.f; + sumy += yl.s3; yl.s3 /= 16.f; +#endif + + sumf.s0 += block_q_4_0_dot_y_flat(q_blk_0, read_imageh(src0_d, offset0_d + ib + 0*nb).s0, sumy, yl); + sumf.s1 += block_q_4_0_dot_y_flat(q_blk_1, read_imageh(src0_d, offset0_d + ib + 1*nb).s0, sumy, yl); +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + sumf.s2 += block_q_4_0_dot_y_flat(q_blk_2, read_imageh(src0_d, offset0_d + ib + 2*nb).s0, sumy, yl); + sumf.s3 += block_q_4_0_dot_y_flat(q_blk_3, read_imageh(src0_d, offset0_d + ib + 3*nb).s0, sumy, yl); +#endif +#if N_DST == 8 || N_DST == 16 + sumf.s4 += block_q_4_0_dot_y_flat(q_blk_4, read_imageh(src0_d, offset0_d + ib + 4*nb).s0, sumy, yl); + sumf.s5 += block_q_4_0_dot_y_flat(q_blk_5, read_imageh(src0_d, offset0_d + ib + 5*nb).s0, sumy, yl); + sumf.s6 += block_q_4_0_dot_y_flat(q_blk_6, read_imageh(src0_d, offset0_d + ib + 6*nb).s0, sumy, yl); + sumf.s7 += block_q_4_0_dot_y_flat(q_blk_7, read_imageh(src0_d, offset0_d + ib + 7*nb).s0, sumy, yl); +#endif + + yb += QK4_0 * (N_SIMDWIDTH/THREADS_PER_BLK); + } + + SUM_TY tot = (SUM_TY)( + sub_group_reduce_add(sumf.s0), sub_group_reduce_add(sumf.s1) +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + , sub_group_reduce_add(sumf.s2), sub_group_reduce_add(sumf.s3) +#endif +#if N_DST == 8 || N_DST == 16 + , sub_group_reduce_add(sumf.s4), sub_group_reduce_add(sumf.s5) + , sub_group_reduce_add(sumf.s6), sub_group_reduce_add(sumf.s7) +#endif + ); + + if (get_sub_group_local_id() == 0) { + if (first_row + 0 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 0] = tot.s0; + } + if (first_row + 1 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 1] = tot.s1; + } +#if N_DST == 4 || N_DST == 8 || N_DST == 16 + if (first_row + 2 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 2] = tot.s2; + } + if (first_row + 3 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 3] = tot.s3; + } +#endif +#if N_DST == 8 || N_DST == 16 + if (first_row + 4 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 4] = tot.s4; + } + if (first_row + 5 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 5] = tot.s5; + } + if (first_row + 6 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 6] = tot.s6; + } + if (first_row + 7 < ne01) { + dst[r1*ne0 + im*ne0*ne1 + first_row + 7] = tot.s7; + } +#endif + } +} + +//------------------------------------------------------------------------------ +// kernel_mul_mv_q6_K_f32 +//------------------------------------------------------------------------------ + +#undef N_DST +#undef N_SIMDGROUP +#undef N_SIMDWIDTH + +#ifdef INTEL_GPU +#define N_DST 1 // number of rows each SIMD group works on +#define N_SIMDGROUP 2 // number of SIMD groups in a thread group +#define N_SIMDWIDTH 16 // SIMD group size +#elif defined (ADRENO_GPU) +#define N_DST 1 +#define N_SIMDGROUP 2 +#define N_SIMDWIDTH 64 +#endif + +#define BLOCK_STRIDE (N_SIMDWIDTH/16) // number of blocks each subgroup processes + +#ifdef INTEL_GPU +REQD_SUBGROUP_SIZE_16 +#elif defined (ADRENO_GPU) +REQD_SUBGROUP_SIZE_64 +#endif +kernel void kernel_mul_mv_q6_K_f32( + global void * src0, + ulong offset0, + global float * src1, + ulong offset1, + global float * dst, + ulong offsetd, + int ne00, + int ne01, + int ne02, + int ne10, + int ne12, + int ne0, + int ne1, + int r2, + int r3 +) { + src0 = (global void*)((global char*)src0 + offset0); + src1 = (global float*)((global char*)src1 + offset1); + dst = (global float*)((global char*)dst + offsetd); + + uchar kmask1 = 0x03; + uchar kmask2 = 0x0C; + uchar kmask3 = 0x30; + uchar kmask4 = 0xC0; + + int nb = ne00/QK_K; + + int r0 = get_group_id(0); + int r1 = get_group_id(1); + int im = get_group_id(2); + + int row = N_SIMDGROUP * r0 + get_sub_group_id(); + + int i12 = im%ne12; + int i13 = im/ne12; + + ulong offset_src0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02); + + global block_q6_K * x = (global block_q6_K *) src0 + row*nb + offset_src0; + global float * yy = (global float *) src1 + r1*ne10 + im*ne00*ne1; + + float sumf = 0; + + // For Q6_K quantization, 16 values forms a subblock, 16 subblock forms a + // block. Values in a subblock shares a scale that is quantized with 8 bits; + // the entire block shares a single floating point scale. + // For work distribution, each thread processes a subblock (16 weights), hence + // 16 threads process a (super) block -- a subgroup thus handles SIMDWIDTH/16 + // (super) blocks -- this is the block stride. + // The 16 threads that process a (super) block are split into 2 portions, each has + // 8 threads; each portion works on 8 subblocks. + // For subgroup of 16 threads, the entire subgroup works on a single (super) block + // before moving to the next (super) block. Thread0 - thread7 work on the + // first 8 subblocks; thread8 - thread15 works on the last 8 subblocks. + // Thread0 - thread3 work on subblocks 0, 2, 4, 6; thread4 - thread7 work on + // subblocks 1, 3, 5, 7. Each thread does not work on an entire subblock, but + // works on a total of 16 weight values. + int tid = get_sub_group_local_id()/BLOCK_STRIDE; // first block_stride groups have tid=0 + int ix = get_sub_group_local_id()%BLOCK_STRIDE; // first block is 0..block_stride-1 + int ip = tid/8; // first or second half of (super) block (0 or 1) + int il = tid%8; // each half has 8 parts, one per scale + int n = 4; // 4 scales at a time (and 4 sums) + int l0 = n*il; // offset into half-block, 0..28 + int is = 8*ip + l0/16; // 0, 1, 8, 9 + + int y_offset = 128*ip + l0; + int q_offset_l = 64*ip + l0; + int q_offset_h = 32*ip + l0; + + for (int i = ix; i < nb; i += BLOCK_STRIDE) { + + global uint8_t * q1 = x[i].ql + q_offset_l; + global uint8_t * q2 = q1 + QK_K/8; + global uint8_t * qh = x[i].qh + q_offset_h; + global int8_t * sc = x[i].scales + is; + + global float * y = yy + i * QK_K + y_offset; + + float dall = x[i].d; + + float4 sums = {0.f, 0.f, 0.f, 0.f}; + + sums.s0 += y[0+ 0] * ((float)((q1[0] & 0xF) | ((qh[0] & kmask1) << 4)) - 32.f); + sums.s1 += y[0+32] * ((float)((q2[0] & 0xF) | ((qh[0] & kmask2) << 2)) - 32.f); + sums.s2 += y[0+64] * ((float)((q1[0] >> 4) | ((qh[0] & kmask3) << 0)) - 32.f); + sums.s3 += y[0+96] * ((float)((q2[0] >> 4) | ((qh[0] & kmask4) >> 2)) - 32.f); + + sums.s0 += y[1+ 0] * ((float)((q1[1] & 0xF) | ((qh[1] & kmask1) << 4)) - 32.f); + sums.s1 += y[1+32] * ((float)((q2[1] & 0xF) | ((qh[1] & kmask2) << 2)) - 32.f); + sums.s2 += y[1+64] * ((float)((q1[1] >> 4) | ((qh[1] & kmask3) << 0)) - 32.f); + sums.s3 += y[1+96] * ((float)((q2[1] >> 4) | ((qh[1] & kmask4) >> 2)) - 32.f); + + sums.s0 += y[2+ 0] * ((float)((q1[2] & 0xF) | ((qh[2] & kmask1) << 4)) - 32.f); + sums.s1 += y[2+32] * ((float)((q2[2] & 0xF) | ((qh[2] & kmask2) << 2)) - 32.f); + sums.s2 += y[2+64] * ((float)((q1[2] >> 4) | ((qh[2] & kmask3) << 0)) - 32.f); + sums.s3 += y[2+96] * ((float)((q2[2] >> 4) | ((qh[2] & kmask4) >> 2)) - 32.f); + + sums.s0 += y[3+ 0] * ((float)((q1[3] & 0xF) | ((qh[3] & kmask1) << 4)) - 32.f); + sums.s1 += y[3+32] * ((float)((q2[3] & 0xF) | ((qh[3] & kmask2) << 2)) - 32.f); + sums.s2 += y[3+64] * ((float)((q1[3] >> 4) | ((qh[3] & kmask3) << 0)) - 32.f); + sums.s3 += y[3+96] * ((float)((q2[3] >> 4) | ((qh[3] & kmask4) >> 2)) - 32.f); + + sumf += dall * (sums.s0 * sc[0] + sums.s1 * sc[2] + sums.s2 * sc[4] + sums.s3 * sc[6]); + } + + float tot = sub_group_reduce_add(sumf); + if (get_sub_group_local_id() == 0) { + dst[r1*ne0 + im*ne0*ne1 + row] = tot; + } +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl new file mode 100644 index 0000000000000..57768c80334eb --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_mul_mat_Ab_Bi_8x4.cl @@ -0,0 +1,130 @@ +// src0_q, src0_d, src1 are transposed as a preprocessing step +// 4-bit weights are transposed in groups of 4 (unsigned short int) +// consider weights originally "next to each other", now "on top of each other" +// each fiber computes a 8x4 tile of output elements +// using unshuffled weights + +#pragma OPENCL EXTENSION cl_khr_fp16 : enable +#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable + +__attribute__((qcom_reqd_sub_group_size("full"))) +kernel void kernel_mul_mat_Ab_Bi_8x4( + global const ushort * src0_q, // quantized A + global const half * src0_d, // A scales + __read_only image1d_buffer_t src1, // B (1d image) + global float * dst, // C + int m, // M + int n, // N with padding + int k, // K + int n_no_padding // N without padding +) { + + int m_4 = m >> 2; + int n_4 = n >> 2; + + int gy = get_global_id(0); + int gx = get_global_id(1); + int gx_2 = gx << 2; + + half8 c0 = 0, c1 = 0, c2 = 0, c3 = 0; // 8x4 output elements + half8 B; // registers for activations + half4 dequantized_weights; // registers for dequantized weights + __global const ushort* weight_ptr = src0_q + gx_2; // pointer for weights + __global const half* scale_ptr = src0_d + gx_2; // pointer for scales + + for(int i=0; i> 4) - 8) * scale.s0; // dequantize a row of the 16 weights + dequantized_weights.s1 = (((bits4.s1 & (0x00F0)) >> 4) - 8) * scale.s1; + dequantized_weights.s2 = (((bits4.s2 & (0x00F0)) >> 4) - 8) * scale.s2; + dequantized_weights.s3 = (((bits4.s3 & (0x00F0)) >> 4) - 8) * scale.s3; + c0 += B * dequantized_weights.s0; //vector-scalar multiplication to accumulate + c1 += B * dequantized_weights.s1; + c2 += B * dequantized_weights.s2; + c3 += B * dequantized_weights.s3; + + // j=2 + B.s0123 = read_imageh(src1, gy*2 + (i+2)*(n_4)); + B.s4567 = read_imageh(src1, gy*2 + (i+2)*(n_4)+1); + dequantized_weights.s0 = (((bits4.s0 & (0x0F00)) >> 8) - 8) * scale.s0; // dequantize a row of the 16 weights + dequantized_weights.s1 = (((bits4.s1 & (0x0F00)) >> 8) - 8) * scale.s1; + dequantized_weights.s2 = (((bits4.s2 & (0x0F00)) >> 8) - 8) * scale.s2; + dequantized_weights.s3 = (((bits4.s3 & (0x0F00)) >> 8) - 8) * scale.s3; + c0 += B * dequantized_weights.s0; // vector-scalar multiplication to accumulate + c1 += B * dequantized_weights.s1; + c2 += B * dequantized_weights.s2; + c3 += B * dequantized_weights.s3; + + // j=3 + B.s0123 = read_imageh(src1, gy*2 + (i+3)*(n_4)); + B.s4567 = read_imageh(src1, gy*2 + (i+3)*(n_4)+1); + dequantized_weights.s0 = (((bits4.s0 & (0xF000)) >> 12) - 8) * scale.s0; // dequantize a row of the 16 weights + dequantized_weights.s1 = (((bits4.s1 & (0xF000)) >> 12) - 8) * scale.s1; + dequantized_weights.s2 = (((bits4.s2 & (0xF000)) >> 12) - 8) * scale.s2; + dequantized_weights.s3 = (((bits4.s3 & (0xF000)) >> 12) - 8) * scale.s3; + c0 += B * dequantized_weights.s0; // vector-scalar multiplication to accumulate + c1 += B * dequantized_weights.s1; + c2 += B * dequantized_weights.s2; + c3 += B * dequantized_weights.s3; + } + + int idx = (gy<<3)*m + (gx<<2); // vectorized store 16 elements + + // conditional check if store is to a valid location. Required when N is not a multiple of 8 + // if statements allow registers to be reused for each store + // provides a performance boost due to reduced register footprint, which increases number of concurrent waves + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s0, c1.s0, c2.s0, c3.s0), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s1, c1.s1, c2.s1, c3.s1), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s2, c1.s2, c2.s2, c3.s2), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s3, c1.s3, c2.s3, c3.s3), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s4, c1.s4, c2.s4, c3.s4), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s5, c1.s5, c2.s5, c3.s5), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s6, c1.s6, c2.s6, c3.s6), 0, dst + idx); + idx += m; + } + if(idx+3 < m*n_no_padding){ + vstore4((float4)(c0.s7, c1.s7, c2.s7, c3.s7), 0, dst + idx); + } +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_16.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_16.cl new file mode 100644 index 0000000000000..d59a0c05ddfd0 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_16.cl @@ -0,0 +1,32 @@ +// 16-bit transpose, loading/storing an 8x8 tile of elements + +kernel void kernel_transpose_16( + __read_only image1d_buffer_t input, + __write_only image1d_buffer_t output, + const uint rows, + const uint cols +) { + + const int i = get_global_id(0); + const int j = get_global_id(1); + const int i_3 = i<<3; + const int j_3 = j<<3; + + ushort8 temp0 = as_ushort8(read_imagef(input, (j_3+0)*cols+i)); + ushort8 temp1 = as_ushort8(read_imagef(input, (j_3+1)*cols+i)); + ushort8 temp2 = as_ushort8(read_imagef(input, (j_3+2)*cols+i)); + ushort8 temp3 = as_ushort8(read_imagef(input, (j_3+3)*cols+i)); + ushort8 temp4 = as_ushort8(read_imagef(input, (j_3+4)*cols+i)); + ushort8 temp5 = as_ushort8(read_imagef(input, (j_3+5)*cols+i)); + ushort8 temp6 = as_ushort8(read_imagef(input, (j_3+6)*cols+i)); + ushort8 temp7 = as_ushort8(read_imagef(input, (j_3+7)*cols+i)); + + write_imagef(output, (i_3+0)*rows+j, as_float4((ushort8)(temp0.s0, temp1.s0, temp2.s0, temp3.s0, temp4.s0, temp5.s0, temp6.s0, temp7.s0))); + write_imagef(output, (i_3+1)*rows+j, as_float4((ushort8)(temp0.s1, temp1.s1, temp2.s1, temp3.s1, temp4.s1, temp5.s1, temp6.s1, temp7.s1))); + write_imagef(output, (i_3+2)*rows+j, as_float4((ushort8)(temp0.s2, temp1.s2, temp2.s2, temp3.s2, temp4.s2, temp5.s2, temp6.s2, temp7.s2))); + write_imagef(output, (i_3+3)*rows+j, as_float4((ushort8)(temp0.s3, temp1.s3, temp2.s3, temp3.s3, temp4.s3, temp5.s3, temp6.s3, temp7.s3))); + write_imagef(output, (i_3+4)*rows+j, as_float4((ushort8)(temp0.s4, temp1.s4, temp2.s4, temp3.s4, temp4.s4, temp5.s4, temp6.s4, temp7.s4))); + write_imagef(output, (i_3+5)*rows+j, as_float4((ushort8)(temp0.s5, temp1.s5, temp2.s5, temp3.s5, temp4.s5, temp5.s5, temp6.s5, temp7.s5))); + write_imagef(output, (i_3+6)*rows+j, as_float4((ushort8)(temp0.s6, temp1.s6, temp2.s6, temp3.s6, temp4.s6, temp5.s6, temp6.s6, temp7.s6))); + write_imagef(output, (i_3+7)*rows+j, as_float4((ushort8)(temp0.s7, temp1.s7, temp2.s7, temp3.s7, temp4.s7, temp5.s7, temp6.s7, temp7.s7))); +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_32.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_32.cl new file mode 100644 index 0000000000000..914ec0193e7e9 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_32.cl @@ -0,0 +1,25 @@ +// 32-bit transpose, loading/storing a 4x4 tile of elements + +kernel void kernel_transpose_32( + __read_only image1d_buffer_t input, + __write_only image1d_buffer_t output, + const uint rows, + const uint cols +) { + + const int i = get_global_id(0); + const int j = get_global_id(1); + const int i_2 = i<<2; + const int j_2 = j<<2; + + float4 temp0 = read_imagef(input, (j_2+0)*cols+i); + float4 temp1 = read_imagef(input, (j_2+1)*cols+i); + float4 temp2 = read_imagef(input, (j_2+2)*cols+i); + float4 temp3 = read_imagef(input, (j_2+3)*cols+i); + + write_imagef(output, (i_2+0)*rows+j, (float4)(temp0.s0, temp1.s0, temp2.s0, temp3.s0)); + write_imagef(output, (i_2+1)*rows+j, (float4)(temp0.s1, temp1.s1, temp2.s1, temp3.s1)); + write_imagef(output, (i_2+2)*rows+j, (float4)(temp0.s2, temp1.s2, temp2.s2, temp3.s2)); + write_imagef(output, (i_2+3)*rows+j, (float4)(temp0.s3, temp1.s3, temp2.s3, temp3.s3)); + +} diff --git a/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_32_16.cl b/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_32_16.cl new file mode 100644 index 0000000000000..d3bd1fabb76c3 --- /dev/null +++ b/ggml/src/ggml-opencl/kernels/ggml-opencl_transpose_32_16.cl @@ -0,0 +1,35 @@ +// 32-bit transpose, loading/storing a 4x4 tile of elements +// Only used for activations +// converts to FP16 +// also adds zero padding for non multiple of 8 prompt lengths +#pragma OPENCL EXTENSION cl_khr_fp16 : enable + +kernel void kernel_transpose_32_16(__read_only image1d_buffer_t input, __write_only image1d_buffer_t output, const uint rows, const uint cols, const uint padded_rows) { + + const int i = get_global_id(0); + const int j = get_global_id(1); + const int i_2 = i<<2; + const int j_2 = j<<2; + half4 temp0 = {0,0,0,0}; // initialize outputs to 0 + half4 temp1 = {0,0,0,0}; + half4 temp2 = {0,0,0,0}; + half4 temp3 = {0,0,0,0}; + + if((j_2+0)*cols+i*4+3 < rows*cols*16){ // only load from a valid location. Otherwise keep register data as 0 + temp0 = read_imageh(input, (j_2+0)*cols+i); + } + if((j_2+1)*cols+i*4+3 < rows*cols*16){ + temp1 = read_imageh(input, (j_2+1)*cols+i); + } + if((j_2+2)*cols+i*4+3 < rows*cols*16){ + temp2 = read_imageh(input, (j_2+2)*cols+i); + } + if((j_2+3)*cols+i*4+3 < rows*cols*16){ + temp3 = read_imageh(input, (j_2+3)*cols+i); + } + + write_imageh(output, (i_2+0)*padded_rows+j, (half4)(temp0.s0, temp1.s0, temp2.s0, temp3.s0)); // no conditionals for output, includes zero padding + write_imageh(output, (i_2+1)*padded_rows+j, (half4)(temp0.s1, temp1.s1, temp2.s1, temp3.s1)); + write_imageh(output, (i_2+2)*padded_rows+j, (half4)(temp0.s2, temp1.s2, temp2.s2, temp3.s2)); + write_imageh(output, (i_2+3)*padded_rows+j, (half4)(temp0.s3, temp1.s3, temp2.s3, temp3.s3)); +} diff --git a/ggml/src/ggml-opt.cpp b/ggml/src/ggml-opt.cpp index 040205a31bab0..7c3e24103a250 100644 --- a/ggml/src/ggml-opt.cpp +++ b/ggml/src/ggml-opt.cpp @@ -14,51 +14,51 @@ #include struct ggml_opt_dataset { - struct ggml_context * ctx; - ggml_backend_buffer_t buf; - struct ggml_tensor * data; - struct ggml_tensor * labels; + struct ggml_context * ctx = nullptr; + ggml_backend_buffer_t buf = nullptr; + struct ggml_tensor * data = nullptr; + struct ggml_tensor * labels = nullptr; - int64_t ndata; - int64_t ndata_shard; - size_t nbs_data; - size_t nbs_labels; + int64_t ndata = -1; + int64_t ndata_shard = -1; + size_t nbs_data = -1; + size_t nbs_labels = -1; std::vector permutation; }; struct ggml_opt_context { - ggml_backend_sched_t backend_sched; - ggml_cgraph * allocated_graph; - ggml_cgraph * allocated_graph_copy; - struct ggml_context * ctx_static; - struct ggml_context * ctx_static_cpu; - struct ggml_context * ctx_compute; - struct ggml_context * ctx_copy; - ggml_backend_buffer_t buf_static; - ggml_backend_buffer_t buf_static_cpu; + ggml_backend_sched_t backend_sched = nullptr; + ggml_cgraph * allocated_graph = nullptr; + ggml_cgraph * allocated_graph_copy = nullptr; + struct ggml_context * ctx_static = nullptr; + struct ggml_context * ctx_static_cpu = nullptr; + struct ggml_context * ctx_compute = nullptr; + struct ggml_context * ctx_copy = nullptr; + ggml_backend_buffer_t buf_static = nullptr; + ggml_backend_buffer_t buf_static_cpu = nullptr; std::mt19937 rng; - struct ggml_tensor * inputs; - struct ggml_tensor * outputs; - struct ggml_tensor * labels; + struct ggml_tensor * inputs = nullptr; + struct ggml_tensor * outputs = nullptr; + struct ggml_tensor * labels = nullptr; - struct ggml_tensor * loss; - struct ggml_tensor * pred; - struct ggml_tensor * ncorrect; + struct ggml_tensor * loss = nullptr; + struct ggml_tensor * pred = nullptr; + struct ggml_tensor * ncorrect = nullptr; - struct ggml_cgraph * gf; - struct ggml_cgraph * gb_grad; - struct ggml_cgraph * gb_opt; + struct ggml_cgraph * gf = nullptr; + struct ggml_cgraph * gb_grad = nullptr; + struct ggml_cgraph * gb_opt = nullptr; - int64_t iter; - int32_t opt_period; - int32_t opt_i; - bool loss_per_datapoint; + int64_t iter = 1; + int32_t opt_period = 1; + int32_t opt_i = 0; + bool loss_per_datapoint = false; - ggml_opt_get_optimizer_params get_opt_pars; - void * get_opt_pars_ud; - struct ggml_tensor * adamw_params; + ggml_opt_get_optimizer_params get_opt_pars = nullptr; + void * get_opt_pars_ud = nullptr; + struct ggml_tensor * adamw_params = nullptr; }; struct ggml_opt_result { @@ -67,8 +67,8 @@ struct ggml_opt_result { std::vector pred; int64_t ncorrect = 0; - bool loss_per_datapoint = false; - int64_t opt_period = -1; + int64_t opt_period = -1; + bool loss_per_datapoint = false; }; // ====== Dataset ====== @@ -188,11 +188,11 @@ struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * us } struct ggml_opt_params ggml_opt_default_params( - ggml_backend_sched_t backend_sched, - struct ggml_context * ctx_compute, - struct ggml_tensor * inputs, - struct ggml_tensor * outputs, - enum ggml_opt_loss_type loss_type) { + ggml_backend_sched_t backend_sched, + struct ggml_context * ctx_compute, + struct ggml_tensor * inputs, + struct ggml_tensor * outputs, + enum ggml_opt_loss_type loss_type) { return { /*backend_sched =*/ backend_sched, /*ctx_compute =*/ ctx_compute, @@ -237,25 +237,33 @@ static ggml_tensor * map_tensor(std::map & tensor_ return new_tensor; } -static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * graph) { +static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) { std::map tensor_map; - ggml_cgraph * new_graph = ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); + ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true); - for (int i = 0; i < graph->n_leafs; i++) { - ggml_build_forward_expand(new_graph, map_tensor(tensor_map, ctx, graph->leafs[i])); + for (int i = 0; i < src->n_leafs; i++) { + ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i])); } - for (int i = 0; i < graph->n_nodes; i++) { - ggml_build_forward_expand(new_graph, map_tensor(tensor_map, ctx, graph->nodes[i])); + GGML_ASSERT(dst->n_leafs == src->n_leafs); + for (int i = 0; i < src->n_nodes; i++) { + ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i])); } - for (int i = 0; i < graph->n_nodes; ++i) { - const size_t igrad_src = ggml_hash_find(&graph->visited_hash_set, graph->nodes[i]); - const size_t igrad_dst = ggml_hash_find(&new_graph->visited_hash_set, new_graph->nodes[i]); - graph->grads[igrad_dst] = new_graph->grads[igrad_src]; - graph->grad_accs[igrad_dst] = new_graph->grad_accs[igrad_src]; + GGML_ASSERT(dst->n_nodes == src->n_nodes); + for (int i = 0; i < src->n_nodes; ++i) { + const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]); + const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]); + + GGML_ASSERT(igrad_src != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src)); + GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst)); + + dst->grads[igrad_dst] = src->grads[igrad_src]; + dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src]; } - return new_graph; + return dst; } static void ggml_opt_alloc_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph) { @@ -284,18 +292,13 @@ static void ggml_opt_alloc_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { ggml_opt_context_t result = new struct ggml_opt_context; - result->backend_sched = params.backend_sched; - result->allocated_graph = nullptr; - result->allocated_graph_copy = nullptr; - result->ctx_compute = params.ctx_compute; - result->ctx_copy = nullptr; - result->inputs = params.inputs; - result->outputs = params.outputs; - result->iter = 1; - result->opt_period = params.opt_period; - result->opt_i = 0; - result->get_opt_pars = params.get_opt_pars; - result->get_opt_pars_ud = params.get_opt_pars_ud; + result->backend_sched = params.backend_sched; + result->ctx_compute = params.ctx_compute; + result->inputs = params.inputs; + result->outputs = params.outputs; + result->opt_period = params.opt_period; + result->get_opt_pars = params.get_opt_pars; + result->get_opt_pars_ud = params.get_opt_pars_ud; GGML_ASSERT(result->inputs->data && "the inputs must be allocated statically"); GGML_ASSERT(result->opt_period >= 1); @@ -348,7 +351,6 @@ ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { switch (params.loss_type) { case GGML_OPT_LOSS_TYPE_MEAN: { - result->labels = nullptr; result->loss = ggml_sum(result->ctx_static, result->outputs); ggml_set_name(result->loss, "loss_sum"); const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs)); @@ -358,7 +360,6 @@ ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { break; } case GGML_OPT_LOSS_TYPE_SUM: { - result->labels = nullptr; result->loss = ggml_sum(result->ctx_static, result->outputs); ggml_set_name(result->loss, "loss_sum"); result->loss_per_datapoint = false; @@ -413,14 +414,7 @@ ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { } if (params.build_type == GGML_OPT_BUILD_TYPE_FORWARD) { - result->gb_grad = nullptr; - result->gb_opt = nullptr; - result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0)); - result->buf_static_cpu = nullptr; - - ggml_opt_alloc_graph(result, result->gf); - return result; } @@ -429,14 +423,8 @@ ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { ggml_build_backward_expand(result->ctx_static, result->ctx_compute, result->gb_grad, accumulate); if (params.build_type == GGML_OPT_BUILD_TYPE_GRAD) { - result->gb_opt = nullptr; - result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0)); - result->buf_static_cpu = nullptr; - - ggml_opt_alloc_graph(result, result->gb_grad); ggml_graph_reset(result->gb_grad); - return result; } @@ -466,7 +454,6 @@ ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { result->buf_static_cpu = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx_static_cpu, ggml_backend_cpu_buffer_type()); - ggml_opt_alloc_graph(result, result->gb_opt); ggml_graph_reset(result->gb_opt); return result; diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c index 7301a9c6caab8..7918388ae9f2e 100644 --- a/ggml/src/ggml-quants.c +++ b/ggml/src/ggml-quants.c @@ -5220,15 +5220,6 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte { VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb); } break; - case GGML_TYPE_Q4_0_4_4: - case GGML_TYPE_Q4_0_4_8: - { - VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x4, data, nbytes / sizeof(block_q4_0x4), 4); - } break; - case GGML_TYPE_Q4_0_8_8: - { - VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x8, data, nbytes / sizeof(block_q4_0x8), 8); - } break; case GGML_TYPE_I8: case GGML_TYPE_I16: diff --git a/ggml/src/ggml-rpc/CMakeLists.txt b/ggml/src/ggml-rpc/CMakeLists.txt index a2d6770eb053f..f5acb8ec2cb28 100644 --- a/ggml/src/ggml-rpc/CMakeLists.txt +++ b/ggml/src/ggml-rpc/CMakeLists.txt @@ -1,10 +1,8 @@ message(STATUS "Using RPC backend") -add_library(ggml-rpc - ggml-rpc.cpp) - -target_link_libraries(ggml-rpc PRIVATE ggml-base) -target_include_directories(ggml-rpc PRIVATE . ..) +ggml_add_backend_library(ggml-rpc + ggml-rpc.cpp + ) if (WIN32) target_link_libraries(ggml-rpc PRIVATE ws2_32) diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp index 47357daabdf54..43108242639a3 100644 --- a/ggml/src/ggml-rpc/ggml-rpc.cpp +++ b/ggml/src/ggml-rpc/ggml-rpc.cpp @@ -1369,8 +1369,9 @@ static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = { ggml_backend_reg_t ggml_backend_rpc_reg(void) { static struct ggml_backend_reg ggml_backend_rpc_reg = { - /* .iface = */ ggml_backend_rpc_reg_i, - /* .context = */ NULL, + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_rpc_reg_i, + /* .context = */ NULL, }; return &ggml_backend_rpc_reg; @@ -1401,3 +1402,5 @@ ggml_backend_dev_t ggml_backend_rpc_add_device(const char * endpoint) { return dev; } + +GGML_BACKEND_DL_IMPL(ggml_backend_rpc_reg) diff --git a/ggml/src/ggml-sycl/CMakeLists.txt b/ggml/src/ggml-sycl/CMakeLists.txt index 03bf3cb929b32..3579a311aac07 100644 --- a/ggml/src/ggml-sycl/CMakeLists.txt +++ b/ggml/src/ggml-sycl/CMakeLists.txt @@ -16,12 +16,10 @@ endif() message(STATUS "SYCL found") #todo: AOT -add_library(ggml-sycl - ggml-sycl.cpp - ../../include/ggml-sycl.h) - -target_link_libraries(ggml-sycl PRIVATE ggml-base) -target_include_directories(ggml-sycl PRIVATE . ..) +ggml_add_backend_library(ggml-sycl + ggml-sycl.cpp + ../../include/ggml-sycl.h + ) if (GGML_SYCL_F16) if (GGML_SYCL_TARGET STREQUAL "AMD") @@ -70,12 +68,17 @@ else() target_link_libraries(ggml-sycl PRIVATE sycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread) elseif (GGML_SYCL_TARGET STREQUAL "NVIDIA") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda") - target_link_libraries(ggml-sycl PRIVATE sycl pthread m dl onemkl) + add_compile_definitions(GGML_SYCL_NVIDIA) + target_link_libraries(ggml-sycl PRIVATE sycl pthread m dl onemkl_blas_cublas) elseif (GGML_SYCL_TARGET STREQUAL "AMD") - if (GGML_SYCL_HIP_TARGET STREQUAL "") - message(ERROR "Can't enable SYCL hip backend, GGML_SYCL_HIP_TARGET has not been set.") + if (NOT GGML_SYCL_DEVICE_ARCH) + message(ERROR "Can't enable SYCL hip backend, GGML_SYCL_DEVICE_ARCH has not been set.") endif() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=amdgcn-amd-amdhsa -Xsycl-target-backend --offload-arch=${GGML_SYCL_HIP_TARGET}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=amdgcn-amd-amdhsa") target_link_libraries(ggml-sycl PRIVATE sycl pthread m dl onemkl) endif() + + if (GGML_SYCL_DEVICE_ARCH) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xsycl-target-backend --offload-arch=${GGML_SYCL_DEVICE_ARCH}") + endif() endif() diff --git a/ggml/src/ggml-sycl/common.cpp b/ggml/src/ggml-sycl/common.cpp index 0bd827eb1c2d4..60d96e9d87c83 100644 --- a/ggml/src/ggml-sycl/common.cpp +++ b/ggml/src/ggml-sycl/common.cpp @@ -11,6 +11,7 @@ // #include "common.hpp" +#include "ggml-impl.h" int get_current_device_id() { return dpct::dev_mgr::instance().current_device_id(); @@ -32,11 +33,7 @@ void* ggml_sycl_host_malloc(size_t size) try { // printf("zjy ggml_sycl_host_malloc ptr=%p queue=%p size=%lu \n", ptr,q, size); if (err != 0) { // clear the error - fprintf( - stderr, - "WARNING: failed to allocate %.2f MB of pinned memory: %s\n", - size / 1024.0 / 1024.0, - "syclGetErrorString is not supported"); + GGML_LOG_ERROR("WARNING: failed to allocate %.2f MB of pinned memory: %s\n", size / 1024.0 / 1024.0, "syclGetErrorString is not supported"); return nullptr; } @@ -88,7 +85,7 @@ void print_device_detail_part1(int id, sycl::device &device, std::string device_ auto global_mem_size = prop.get_global_mem_size()/1000000; - fprintf(stderr, "|%2d|%19s|%4s|%39s|%14luM|\n", id, device_type.c_str(), version.c_str(), + fprintf(stderr, "|%2d|%19s|%5s|%39s|%14luM|\n", id, device_type.c_str(), version.c_str(), name.c_str(), global_mem_size); } @@ -110,8 +107,8 @@ void ggml_backend_sycl_print_sycl_devices() { std::map DeviceNums; fprintf(stderr, "found %d SYCL devices:\n", device_count); fprintf(stderr, "Part1:\n"); - fprintf(stderr, "|ID| Device Type| Ver| Name|Global mem size|\n"); - fprintf(stderr, "|--|-------------------|----|---------------------------------------|---------------|\n"); + fprintf(stderr, "|ID| Device Type| Ver| Name|Global mem size|\n"); + fprintf(stderr, "|--|-------------------|-----|---------------------------------------|---------------|\n"); for (int id = 0; id < device_count; ++id) { sycl::device device = dpct::dev_mgr::instance().get_device(id); sycl::backend backend = device.get_backend(); @@ -212,18 +209,12 @@ int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, const ggml_sycl_op_flatten_t op) try { - const int64_t nrows0 = ggml_nrows(src0); const bool use_src1 = src1 != nullptr; - const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1; GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_TYPE_GPU_SPLIT); GGML_ASSERT( dst->backend != GGML_BACKEND_TYPE_GPU_SPLIT); - ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; - ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr; - ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; - // dd = data device float * src0_ddf = (float *) src0->data; float * src1_ddf = use_src1 ? (float *) src1->data : nullptr; diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 104c3223636d6..dfd53628b0336 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -618,6 +618,7 @@ struct bin_bcast_sycl { }); } } + GGML_UNUSED(ctx); } }; diff --git a/ggml/src/ggml-sycl/concat.cpp b/ggml/src/ggml-sycl/concat.cpp index c90c452d8783f..a240968ad2e48 100644 --- a/ggml/src/ggml-sycl/concat.cpp +++ b/ggml/src/ggml-sycl/concat.cpp @@ -47,7 +47,7 @@ static void concat_f32_dim1(const float *x, const float *y, float *dst, // operation int offset_dst = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); - if (item_ct1.get_group(1) < ne01) { // src0 + if (item_ct1.get_group(1) < (size_t) ne01) { // src0 int offset_src = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * ne01; dst[offset_dst] = x[offset_src]; @@ -70,7 +70,7 @@ static void concat_f32_dim2(const float *x, const float *y, float *dst, // operation int offset_dst = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); - if (item_ct1.get_group(0) < ne02) { // src0 + if (item_ct1.get_group(0) < (size_t) ne02) { // src0 int offset_src = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); dst[offset_dst] = x[offset_src]; diff --git a/ggml/src/ggml-sycl/convert.cpp b/ggml/src/ggml-sycl/convert.cpp index 326084a5048ba..fa66f1cc0fc63 100644 --- a/ggml/src/ggml-sycl/convert.cpp +++ b/ggml/src/ggml-sycl/convert.cpp @@ -424,7 +424,7 @@ static void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2); // make each work-item deal with more elements since sycl global range can not exceed max int - const src_t * x = (src_t *) vx; + const src_t * x = (const src_t *) vx; for (int64_t i = global_id; i < k; i += work_group_size * item_ct1.get_group_range(2)) { y[i] = x[i]; } diff --git a/ggml/src/ggml-sycl/dmmv.cpp b/ggml/src/ggml-sycl/dmmv.cpp index 179a1b03f973c..6270cc20bce0a 100644 --- a/ggml/src/ggml-sycl/dmmv.cpp +++ b/ggml/src/ggml-sycl/dmmv.cpp @@ -1066,9 +1066,9 @@ void ggml_sycl_op_dequantize_mul_mat_vec( break; } - (void) src1; - (void) dst; - (void) src1_ddq_i; - (void) src1_ncols; - (void) src1_padded_row_size; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddq_i); + GGML_UNUSED(src1_ncols); + GGML_UNUSED(src1_padded_row_size); } diff --git a/ggml/src/ggml-sycl/dpct.hpp b/ggml/src/ggml-sycl/dpct.hpp index 0b04ad99f1d8b..ab6ff350951e2 100644 --- a/ggml/src/ggml-sycl/dpct.hpp +++ b/ggml/src/ggml-sycl/dpct.hpp @@ -1247,7 +1247,7 @@ namespace dpct std::map::iterator get_map_iterator(const void *ptr) { - auto it = m_map.upper_bound((byte_t *)ptr); + auto it = m_map.upper_bound(const_cast(reinterpret_cast(ptr))); if (it == m_map.end()) { // Not a virtual pointer. @@ -1699,9 +1699,14 @@ namespace dpct auto data_a = get_memory(a); auto data_b = get_memory(b); auto data_c = get_memory(c); - oneapi::mkl::blas::column_major::gemm( - q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, - data_b, ldb, beta_value, data_c, ldc); +#ifdef GGML_SYCL_NVIDIA + oneapi::mkl::blas::column_major::gemm(oneapi::mkl::backend_selector{ q }, + a_trans, b_trans, m, n, k, alpha_value, data_a, lda, data_b, ldb, + beta_value, data_c, ldc); +#else + oneapi::mkl::blas::column_major::gemm(q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, data_b, ldb, + beta_value, data_c, ldc); +#endif } template @@ -1764,14 +1769,22 @@ namespace dpct matrix_info->ld_info[2] = ldc; matrix_info->groupsize_info = batch_size; +#ifdef GGML_SYCL_NVIDIA + sycl::event e = oneapi::mkl::blas::column_major::gemm_batch( + oneapi::mkl::backend_selector{ q }, matrix_info->transpose_info, + matrix_info->transpose_info + 1, matrix_info->size_info, matrix_info->size_info + 1, + matrix_info->size_info + 2, matrix_info->value_info, reinterpret_cast(a), + matrix_info->ld_info, reinterpret_cast(b), matrix_info->ld_info + 1, + matrix_info->value_info + 1, reinterpret_cast(c), matrix_info->ld_info + 2, 1, + &(matrix_info->groupsize_info)); +#else sycl::event e = oneapi::mkl::blas::column_major::gemm_batch( - q, matrix_info->transpose_info, matrix_info->transpose_info + 1, - matrix_info->size_info, matrix_info->size_info + 1, - matrix_info->size_info + 2, matrix_info->value_info, - reinterpret_cast(a), matrix_info->ld_info, - reinterpret_cast(b), matrix_info->ld_info + 1, - matrix_info->value_info + 1, reinterpret_cast(c), + q, matrix_info->transpose_info, matrix_info->transpose_info + 1, matrix_info->size_info, + matrix_info->size_info + 1, matrix_info->size_info + 2, matrix_info->value_info, + reinterpret_cast(a), matrix_info->ld_info, reinterpret_cast(b), + matrix_info->ld_info + 1, matrix_info->value_info + 1, reinterpret_cast(c), matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info)); +#endif q.submit([&](sycl::handler &cgh) { @@ -1793,10 +1806,16 @@ namespace dpct auto data_a = get_memory(a); auto data_b = get_memory(b); auto data_c = get_memory(c); +#ifdef GGML_SYCL_NVIDIA oneapi::mkl::blas::column_major::gemm_batch( - q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, - stride_a, data_b, ldb, stride_b, beta_value, - data_c, ldc, stride_c, batch_size); + oneapi::mkl::backend_selector{ q }, a_trans, b_trans, m, n, k, + alpha_value, data_a, lda, stride_a, data_b, ldb, stride_b, beta_value, data_c, ldc, stride_c, + batch_size); +#else + oneapi::mkl::blas::column_major::gemm_batch(q, a_trans, b_trans, m, n, k, alpha_value, data_a, lda, + stride_a, data_b, ldb, stride_b, beta_value, data_c, ldc, + stride_c, batch_size); +#endif } } // namespace detail diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index e5cd736eba989..d05a51f807c20 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -237,7 +237,7 @@ void upscale_f32(const float *x, float *dst, const int nb00, const int nb01, int i02 = i12 / sf2; int i03 = i13 / sf3; - dst[index] = *(float *)((char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); + dst[index] = *(const float *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); } void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const int ne01, const int ne02, @@ -251,8 +251,7 @@ void pad_f32(const float *x, float *dst, const int ne0, const int ne00, const i // operation int offset_dst = nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1); - if (nidx < ne00 && item_ct1.get_group(1) < ne01 && - item_ct1.get_group(0) < ne02) { + if (nidx < ne00 && item_ct1.get_group(1) < (size_t) ne01 && item_ct1.get_group(0) < (size_t) ne02) { int offset_src = nidx + item_ct1.get_group(1) * ne00 + item_ct1.get_group(0) * ne00 * ne01; dst[offset_dst] = x[offset_src]; @@ -520,9 +519,10 @@ inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, const ggml_tensor silu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -535,9 +535,10 @@ inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, const ggml_tensor gelu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, @@ -550,9 +551,10 @@ inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, const ggml_ gelu_quick_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -564,9 +566,10 @@ inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, const ggml_tensor GGML_ASSERT( dst->type == GGML_TYPE_F32); tanh_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -579,9 +582,10 @@ inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, const ggml_tensor relu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -595,9 +599,10 @@ inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, const ggml hardsigmoid_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -610,9 +615,10 @@ inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, const ggml_t hardswish_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -625,9 +631,10 @@ inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, const ggml_tensor exp_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -640,9 +647,10 @@ inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, const ggml_tensor log_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -655,9 +663,10 @@ inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, const ggml_ten sigmoid_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -670,9 +679,10 @@ inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, const ggml_tensor sqrt_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -685,9 +695,10 @@ inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, const ggml_tensor sin_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -700,9 +711,10 @@ inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, const ggml_tensor cos_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -715,9 +727,10 @@ inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, const ggml_tensor step_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -730,9 +743,10 @@ inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, const ggml_tensor neg_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -749,9 +763,10 @@ inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, const ggml_ leaky_relu_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), negative_slope, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -764,9 +779,10 @@ inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, const ggml_tensor sqr_f32_sycl(src0_dd, dst_dd, ggml_nelements(src0), main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -787,9 +803,10 @@ inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, const ggml_ten dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -805,9 +822,10 @@ inline void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, const ggml_tensor src0->ne[0], src0->ne[1], src0->ne[2], dst->ne[0], dst->ne[1], dst->ne[2], main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -827,7 +845,8 @@ inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, const ggml_tensor acc_f32_sycl(src0_dd, src1_dd, dst_dd, ggml_nelements(dst), src1->ne[0], src1->ne[1], src1->ne[2], nb1, nb2, offset, main_stream); - (void) dst; + GGML_UNUSED(dst); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_add(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, diff --git a/ggml/src/ggml-sycl/gemm.hpp b/ggml/src/ggml-sycl/gemm.hpp index 2ad9b36f419ce..3f0f34ad603f5 100644 --- a/ggml/src/ggml-sycl/gemm.hpp +++ b/ggml/src/ggml-sycl/gemm.hpp @@ -51,8 +51,8 @@ class DnnlGemmWrapper { const auto a_in_md = dnnl::memory::desc(a_dims, at, a_trans ? tag::ba : tag::ab); const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_trans ? tag::ba : tag::ab); const auto c_md = dnnl::memory::desc(c_dims, ct, tag::ab); - auto a_mem = dnnl::memory(a_in_md, eng, (void*)a); - auto b_mem = dnnl::memory(b_in_md, eng, (void*)b); + auto a_mem = dnnl::memory(a_in_md, eng, const_cast(a)); + auto b_mem = dnnl::memory(b_in_md, eng, const_cast(b)); auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md); auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c); @@ -79,8 +79,8 @@ class DnnlGemmWrapper { const auto a_in_md = dnnl::memory::desc(a_dims, at, a_trans ? tag::ba : tag::ab); const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_trans ? tag::ba : tag::ab); const auto c_md = dnnl::memory::desc(c_dims, ct, tag::ab); - auto a_mem = dnnl::memory(a_in_md, eng, (void*)a); - auto b_mem = dnnl::memory(b_in_md, eng, (void*)b); + auto a_mem = dnnl::memory(a_in_md, eng, const_cast(a)); + auto b_mem = dnnl::memory(b_in_md, eng, const_cast(b)); auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md); auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 6169dac32bcca..e793b70f82fdf 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -262,14 +262,12 @@ ggml_backend_sycl_buffer_cpy_tensor(ggml_backend_buffer_t buffer, return true; } return false; -} -catch (sycl::exception const &exc) { - std::cerr << exc.what() << "Exception caught at file:" << __FILE__ - << ", line:" << __LINE__ << std::endl; - std::exit(1); + GGML_UNUSED(buffer); +} catch (const sycl::exception & exc) { + std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl; + std::exit(1); } - static void ggml_backend_sycl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) try { ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context; @@ -927,10 +925,7 @@ struct ggml_sycl_pool_leg : public ggml_sycl_pool { ggml_sycl_buffer buffer_pool[MAX_SYCL_BUFFERS] = {}; size_t pool_size = 0; - explicit ggml_sycl_pool_leg(queue_ptr qptr_, int device_) : - qptr(qptr_), - device_id(device_) { - } + explicit ggml_sycl_pool_leg(queue_ptr qptr_, int device_) : device_id(device_), qptr(qptr_) {} ~ggml_sycl_pool_leg() { for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) { @@ -1072,7 +1067,7 @@ static void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, zeros[i] = 0.f; qzeros[i] = 0; } - const TC xi = ix < kx ? *(TC *)&x[iy * kx + ix] : zeros; + const TC xi = ix < kx ? *(const TC *)&x[iy * kx + ix] : zeros; float sum = xi[0]; float amax = sycl::fabs(xi[0]); #pragma unroll @@ -1633,6 +1628,9 @@ static void pool2d_nchw_kernel( switch (op) { case GGML_OP_POOL_AVG: res = 0; break; case GGML_OP_POOL_MAX: res = -FLT_MAX; break; + default: + res = (To) sycl::nan(uint32_t(0)); + break; } for (int i = bh; i < eh; i += 1) { @@ -1651,6 +1649,9 @@ static void pool2d_nchw_kernel( switch (op) { case GGML_OP_POOL_AVG: res += (cur / (kh * kw)); break; case GGML_OP_POOL_MAX: res = sycl::max(res, (To)cur); break; + default: + res = (To) sycl::nan(uint32_t(0)); + break; } } } @@ -1689,7 +1690,8 @@ static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *sr s3, nb01, nb02, nb03, s10, s11, s12, item_ct1); }); - (void) dst; + GGML_UNUSED(dst); + GGML_UNUSED(ctx); } template @@ -1727,10 +1729,10 @@ static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tens }); } - (void) dst; + GGML_UNUSED(dst); + GGML_UNUSED(ctx); } - static void quantize_row_q8_1_sycl(const float *x, void *vy, const int kx, const int ky, const int kx_padded, queue_ptr stream) { @@ -2325,8 +2327,8 @@ static void ggml_sycl_op_repeat(ggml_backend_sycl_context & ctx, const ggml_tens ggml_sycl_op_bin_bcast>(ctx, dst, src0, dst, nullptr, src0_d, dst_d, main_stream); - (void) src1; - (void) src1_d; + GGML_UNUSED(src1); + GGML_UNUSED(src1_d); } @@ -2345,17 +2347,18 @@ inline void ggml_sycl_op_mul_mat_sycl( const int64_t ne00 = src0->ne[0]; const int64_t ne10 = src1->ne[0]; - const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; int id; SYCL_CHECK( CHECK_TRY_ERROR(id = get_current_device_id())); - +#if !GGML_SYCL_DNNL + const int64_t ne0 = dst->ne[0]; // the main device has a larger memory buffer to hold the results from all GPUs // ldc == nrows of the matrix that cuBLAS writes into int ldc = id == ctx.device ? ne0 : row_diff; +#endif #ifdef GGML_SYCL_F16 bool use_fp16 = true; // TODO(Yu) SYCL capability check @@ -2392,9 +2395,9 @@ inline void ggml_sycl_op_mul_mat_sycl( : src1_as_f16.get(); ggml_sycl_pool_alloc dst_f16(ctx.pool(), row_diff * src1_ncols); - const sycl::half alpha_f16 = 1.0f; - const sycl::half beta_f16 = 0.0f; #if !GGML_SYCL_DNNL + const sycl::half alpha_f16 = 1.0f; + const sycl::half beta_f16 = 0.0f; SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm( *stream, oneapi::mkl::transpose::trans, oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10, @@ -2431,24 +2434,29 @@ inline void ggml_sycl_op_mul_mat_sycl( const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get(); const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get(); - const float alpha = 1.0f; - const float beta = 0.0f; #if !GGML_SYCL_DNNL + const float alpha = 1.0f; + const float beta = 0.0f; +# ifdef GGML_SYCL_NVIDIA SYCL_CHECK(CHECK_TRY_ERROR(oneapi::mkl::blas::column_major::gemm( - *stream, oneapi::mkl::transpose::trans, - oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10, - dpct::get_value(&alpha, *stream), src0_ddf_i, ne00, - src1_ddf1_i, ne10, dpct::get_value(&beta, *stream), + oneapi::mkl::backend_selector{ *stream }, oneapi::mkl::transpose::trans, + oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10, dpct::get_value(&alpha, *stream), src0_ddf_i, + ne00, src1_ddf1_i, ne10, dpct::get_value(&beta, *stream), dst_dd_i, ldc))); +# else + SYCL_CHECK(CHECK_TRY_ERROR(oneapi::mkl::blas::column_major::gemm( + *stream, oneapi::mkl::transpose::trans, oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10, + dpct::get_value(&alpha, *stream), src0_ddf_i, ne00, src1_ddf1_i, ne10, dpct::get_value(&beta, *stream), dst_dd_i, ldc))); +# endif #else auto dnnl_stream = ctx.stream_dnnl(stream); DnnlGemmWrapper::row_gemm(dnnl_stream, false, true, src1_ncols, row_diff, ne10, src1_ddf1_i, DnnlGemmWrapper::to_dt(), src0_ddf_i, DnnlGemmWrapper::to_dt(), dst_dd_i, DnnlGemmWrapper::to_dt()); #endif } - (void) dst; - (void) src1_ddq_i; - (void) src1_padded_row_size; + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddq_i); + GGML_UNUSED(src1_padded_row_size); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ @@ -2494,8 +2502,9 @@ static void ggml_sycl_op_pool2d(ggml_backend_sycl_context & ctx, const ggml_tens item_ct1); }); - (void) src1; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_sum(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -2510,9 +2519,10 @@ inline void ggml_sycl_op_sum(ggml_backend_sycl_context & ctx, const ggml_tensor sum_rows_f32_sycl(src0_dd, dst_dd, ne, 1, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -2529,9 +2539,10 @@ inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, const ggml_te sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_argsort(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -2550,9 +2561,10 @@ inline void ggml_sycl_op_argsort(ggml_backend_sycl_context & ctx, const ggml_ten argsort_f32_i32_sycl(src0_dd, (int *)dst_dd, ncols, nrows, order, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_argmax(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -2569,9 +2581,10 @@ inline void ggml_sycl_op_argmax(ggml_backend_sycl_context & ctx, const ggml_tens argmax_f32_i32_sycl(src0_dd, (int *)dst_dd, ncols, nrows, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_diag_mask_inf(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, @@ -2591,9 +2604,10 @@ inline void ggml_sycl_op_diag_mask_inf(ggml_backend_sycl_context & ctx, const gg diag_mask_inf_f32_sycl(src0_dd, dst_dd, ne00, nrows0, ne01, n_past, main_stream); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } @@ -2616,9 +2630,10 @@ inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, const ggml_tenso */ SYCL_CHECK(0); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -2641,9 +2656,10 @@ inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, const ggml_tenso */ SYCL_CHECK(0); - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } static void ggml_sycl_set_peer_access(const int n_tokens, int main_device) { @@ -2720,7 +2736,6 @@ static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_ten ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra; ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra; - ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra; const bool src0_is_contiguous = ggml_is_contiguous(src0); const bool src1_is_contiguous = ggml_is_contiguous(src1); @@ -3152,7 +3167,6 @@ static void ggml_sycl_mul_mat_batched_sycl(ggml_backend_sycl_context & ctx, GGML_TENSOR_BINARY_OP_LOCALS - const int64_t ne_dst = ggml_nelements(dst); SYCL_CHECK(ggml_sycl_set_device(ctx.device)); queue_ptr main_stream = ctx.stream();; @@ -3260,6 +3274,7 @@ catch (sycl::exception const &exc) { inline bool ggml_sycl_supports_mmq(enum ggml_type type) { // TODO: accuracy issues in MMQ + GGML_UNUSED(type); return false; } @@ -3324,8 +3339,15 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor #endif // SYCL_USE_XMX if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { - // KQ single-batch - ggml_sycl_mul_mat_vec_p021(ctx, src0, src1, dst); + // TODO: Refactor and cleanup of mul mat dispatching. + if (src0->ne[3] == 1 && src1->ne[3] == 1) { + // KQ single-batch + // mmv p021 was specific for these dimensions + ggml_sycl_mul_mat_vec_p021(ctx, src0, src1, dst); + } else { + // The kernel from the if path is faster for that specific case, but does not support all mul mats. + ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst); + } } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_sycl_mul_mat_vec_nc(ctx, src0, src1, dst); @@ -3625,7 +3647,7 @@ static void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor *sr GGML_ABORT("fatal error"); } - (void) dst; + GGML_UNUSED(dst); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ @@ -3636,7 +3658,7 @@ catch (sycl::exception const &exc) { static void ggml_sycl_dup(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { // TODO: why do we pass dst as src1 here? ggml_sycl_cpy(ctx, src0, dst, nullptr); - (void) src1; + GGML_UNUSED(src1); } static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { @@ -3681,13 +3703,14 @@ static void ggml_sycl_argmax(ggml_backend_sycl_context & ctx, const ggml_tensor } static void ggml_sycl_nop(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - (void) src0; - (void) src1; - (void) dst; + GGML_UNUSED(src0); + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(ctx); } void ggml_sycl_set_main_device(const int main_device_id) try { - if (dpct::get_current_device_id() == main_device_id) return; + if (dpct::get_current_device_id() == static_cast (main_device_id)) return; check_allow_device_id(main_device_id); dpct::select_device(main_device_id); @@ -4091,6 +4114,7 @@ try { ggml_backend_sycl_context *sycl_ctx = (ggml_backend_sycl_context *)backend->context; + sycl::event *sycl_event = static_cast(event->context); const queue_ptr &stream = sycl_ctx->stream(sycl_ctx->device, 0); @@ -4105,7 +4129,7 @@ catch (sycl::exception const &exc) } static void ggml_backend_sycl_event_wait(ggml_backend_t backend, ggml_backend_event_t event) try { - ggml_backend_sycl_context* sycl_ctx = static_cast(backend->context); + sycl::event* sycl_event = static_cast(event->context); if (ggml_backend_is_sycl(backend)) { @@ -4369,7 +4393,16 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_SOFT_MAX: return true; case GGML_OP_ROPE: - return ggml_is_contiguous(op->src[0]); + { + const int mode = ((const int32_t *) op->op_params)[2]; + if (mode & GGML_ROPE_TYPE_MROPE) { + return false; + } + if (mode & GGML_ROPE_TYPE_VISION) { + return false; + } + return ggml_is_contiguous(op->src[0]); + } case GGML_OP_IM2COL: // TODO: add support for the new F32 operations return op->src[0]->type == GGML_TYPE_F16; @@ -4404,7 +4437,7 @@ static bool ggml_backend_sycl_device_supports_buft(ggml_backend_dev_t dev, ggml_ static int64_t get_op_batch_size(const ggml_tensor * op) { switch (op->op) { case GGML_OP_GET_ROWS: - return op->ne[1]; // this will increse the speed of prefill in test + return 0; case GGML_OP_MUL_MAT: return op->ne[1]; case GGML_OP_MUL_MAT_ID: @@ -4518,13 +4551,14 @@ static void *ggml_backend_sycl_reg_get_proc_address(ggml_backend_reg_t reg, cons // SYCL doesn't support registering host memory, left here for reference // "ggml_backend_register_host_buffer" // "ggml_backend_unregister_host_buffer" + GGML_UNUSED(name); return nullptr; } static const ggml_backend_reg_i ggml_backend_sycl_reg_interface = { /* .get_name = */ ggml_backend_sycl_reg_get_name, /* .get_device_count = */ ggml_backend_sycl_reg_get_device_count, - /* .get_device_get = */ ggml_backend_sycl_reg_get_device, + /* .get_device = */ ggml_backend_sycl_reg_get_device, /* .get_proc_address = */ ggml_backend_sycl_reg_get_proc_address, }; @@ -4555,16 +4589,17 @@ ggml_backend_reg_t ggml_backend_sycl_reg() { dev_ctx->description = prop.get_name(); ggml_backend_dev_t dev = new ggml_backend_device { - /* .interface = */ ggml_backend_sycl_device_interface, - /* .reg = */ ®, - /* .context = */ dev_ctx + /* .iface = */ ggml_backend_sycl_device_interface, + /* .reg = */ ®, + /* .context = */ dev_ctx }; ctx->devices.push_back(dev); } reg = ggml_backend_reg { - /* .interface = */ ggml_backend_sycl_reg_interface, - /* .context = */ ctx + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_sycl_reg_interface, + /* .context = */ ctx }; } @@ -4599,3 +4634,4 @@ GGML_BACKEND_API void ggml_backend_sycl_set_single_device_mode(int main_gpu_id) fprintf(stderr, "ggml_backend_sycl_set_single_device: use single device: [%d]\n", main_gpu_id); ggml_sycl_info(main_gpu_id); } +GGML_BACKEND_DL_IMPL(ggml_backend_sycl_reg) diff --git a/ggml/src/ggml-sycl/im2col.cpp b/ggml/src/ggml-sycl/im2col.cpp index 6a0a0fcd08c68..6146a99edbe77 100644 --- a/ggml/src/ggml-sycl/im2col.cpp +++ b/ggml/src/ggml-sycl/im2col.cpp @@ -120,6 +120,7 @@ void ggml_sycl_op_im2col( im2col_sycl(src1_dd, (float *)dst_dd, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, main_stream); } - (void) src0; - (void) src0_dd; + GGML_UNUSED(src0); + GGML_UNUSED(src0_dd); + GGML_UNUSED(ctx); } diff --git a/ggml/src/ggml-sycl/mmq.cpp b/ggml/src/ggml-sycl/mmq.cpp index 9f2ee2c88b820..cc664069bb64b 100644 --- a/ggml/src/ggml-sycl/mmq.cpp +++ b/ggml/src/ggml-sycl/mmq.cpp @@ -813,7 +813,7 @@ load_tiles_q4_K(const void *__restrict__ vx, int *__restrict__ x_ql, x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx); } - const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256 + constexpr int blocks_per_tile_x_row = QI4_K > WARP_SIZE ? 1 : WARP_SIZE / QI4_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll @@ -961,7 +961,7 @@ load_tiles_q5_K(const void *__restrict__ vx, int *__restrict__ x_ql, x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1; } - const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256 + constexpr int blocks_per_tile_x_row = QI5_K > WARP_SIZE ? 1 : WARP_SIZE / QI5_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 #pragma unroll @@ -1109,7 +1109,7 @@ load_tiles_q6_K(const void *__restrict__ vx, int *__restrict__ x_ql, dpct::sub_sat()); } - const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 + constexpr int blocks_per_tile_x_row = QI6_K > WARP_SIZE ? 1 : WARP_SIZE / QI6_K; // == 1 if QK_K == 256 const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256 float * x_dmf = (float *) x_dm; @@ -3020,9 +3020,9 @@ void ggml_sycl_op_mul_mat_q( break; } - (void) src1; - (void) dst; - (void) src1_ddf_i; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddf_i); } catch (sycl::exception const &exc) { std::cerr << exc.what() << "Exception caught at file:" << __FILE__ diff --git a/ggml/src/ggml-sycl/mmvq.cpp b/ggml/src/ggml-sycl/mmvq.cpp index 7b10cf688148e..221f65c21ea36 100644 --- a/ggml/src/ggml-sycl/mmvq.cpp +++ b/ggml/src/ggml-sycl/mmvq.cpp @@ -753,11 +753,7 @@ static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy, const sycl::range<3> block_nums(1, 1, block_num_y); const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, QK_WARP_SIZE); { - - stream->submit([&](sycl::handler &cgh) { - auto iq2xs_grid_ptr_ct1 = &iq2xs_grid[0]; - auto ksigns64_ptr_ct1 = &ksigns64[0]; - + stream->submit([&](sycl::handler & cgh) { cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) @@ -780,9 +776,6 @@ static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy, { stream->submit([&](sycl::handler &cgh) { - auto iq2xs_grid_ptr_ct1 = &iq2xs_grid[0]; - auto ksigns64_ptr_ct1 = &ksigns64[0]; - cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) @@ -805,9 +798,6 @@ static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy, { stream->submit([&](sycl::handler &cgh) { - auto iq3xxs_grid_ptr_ct1 = &iq3xxs_grid[0]; - auto ksigns64_ptr_ct1 = &ksigns64[0]; - cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) @@ -830,8 +820,6 @@ static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy, { stream->submit([&](sycl::handler &cgh) { - auto iq3s_grid_ptr_ct1 = &iq3s_grid[0]; - cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) @@ -854,9 +842,6 @@ static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy, { stream->submit([&](sycl::handler &cgh) { - auto iq1s_grid_ptr_ct1 = &iq1s_grid_gpu[0]; - auto ksigns64_ptr_ct1 = &ksigns64[0]; - cgh.parallel_for( sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) @@ -954,7 +939,7 @@ void ggml_sycl_op_mul_mat_vec_q( const size_t q8_1_bs = QK8_1; // the main device has a larger memory buffer to hold the results from all GPUs // nrows_dst == nrows of the matrix that the kernel writes into - const int64_t nrows_dst = id == ctx.device ? ne00 : row_diff; + for (int i = 0; i < src1_ncols; i++) { const size_t src1_ddq_i_offset = i * src1_padded_col_size * q8_1_ts / q8_1_bs; @@ -1023,7 +1008,8 @@ void ggml_sycl_op_mul_mat_vec_q( break; } } - (void) src1; - (void) dst; - (void) src1_ddf_i; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_ddf_i); + GGML_UNUSED(ctx); } diff --git a/ggml/src/ggml-sycl/norm.cpp b/ggml/src/ggml-sycl/norm.cpp index 81da6e1c70f7d..eae0470eda674 100644 --- a/ggml/src/ggml-sycl/norm.cpp +++ b/ggml/src/ggml-sycl/norm.cpp @@ -32,7 +32,7 @@ static void norm_f32(const float* x, float* dst, const int ncols, const float ep */ item_ct1.barrier(sycl::access::fence_space::local_space); mean_var = 0.f; - int nreduce = nwarps / WARP_SIZE; + size_t nreduce = nwarps / WARP_SIZE; for (size_t i = 0; i < nreduce; i += 1) { mean_var += s_sum[lane_id + i * WARP_SIZE]; @@ -57,7 +57,7 @@ static void group_norm_f32(const float* x, float* dst, const int group_size, con const int nwarps = nthreads / WARP_SIZE; assert(nwarps % WARP_SIZE == 0); start += item_ct1.get_local_id(2); - int nreduce = nwarps / WARP_SIZE; + size_t nreduce = nwarps / WARP_SIZE; if (end >= ne_elements) { end = ne_elements; @@ -166,7 +166,7 @@ static void rms_norm_f32(const float* x, float* dst, const int ncols, const floa converged control flow. You may need to adjust the code. */ item_ct1.barrier(sycl::access::fence_space::local_space); - int nreduce = nwarps / WARP_SIZE; + size_t nreduce = nwarps / WARP_SIZE; tmp = 0.f; for (size_t i = 0; i < nreduce; i += 1) { @@ -358,6 +358,7 @@ void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* (void)src1; (void)dst; (void)src1_dd; + GGML_UNUSED(ctx); } void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, diff --git a/ggml/src/ggml-sycl/rope.cpp b/ggml/src/ggml-sycl/rope.cpp index 1f06f78fa3d91..1244b231af738 100644 --- a/ggml/src/ggml-sycl/rope.cpp +++ b/ggml/src/ggml-sycl/rope.cpp @@ -269,7 +269,8 @@ void ggml_sycl_op_rope( } } - (void) src1; - (void) dst; - (void) src1_dd; + GGML_UNUSED(src1); + GGML_UNUSED(dst); + GGML_UNUSED(src1_dd); + GGML_UNUSED(ctx); } diff --git a/ggml/src/ggml-sycl/softmax.cpp b/ggml/src/ggml-sycl/softmax.cpp index e9636ac5fdfa0..f074230964ec7 100644 --- a/ggml/src/ggml-sycl/softmax.cpp +++ b/ggml/src/ggml-sycl/softmax.cpp @@ -16,7 +16,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE; const int nthreads = block_size; const int nwarps = nthreads / WARP_SIZE; - int nreduce = nwarps / WARP_SIZE; + size_t nreduce = nwarps / WARP_SIZE; float slope = 1.0f; // ALiBi @@ -53,8 +53,9 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const if (block_size > WARP_SIZE) { if (warp_id == 0) { buf[lane_id] = -INFINITY; - for (size_t i = 1; i < nreduce; i += 1) + for (size_t i = 1; i < nreduce; i += 1) { buf[lane_id + i * WARP_SIZE] = -INFINITY; + } } item_ct1.barrier(sycl::access::fence_space::local_space); @@ -63,8 +64,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const } item_ct1.barrier(sycl::access::fence_space::local_space); max_val = buf[lane_id]; - for (size_t i = 1; i < nreduce; i += 1) - { + for (size_t i = 1; i < nreduce; i += 1) { max_val = std::max(max_val, buf[lane_id + i * WARP_SIZE]); } max_val = warp_reduce_max(max_val, item_ct1); @@ -89,8 +89,9 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const item_ct1.barrier(sycl::access::fence_space::local_space); if (warp_id == 0) { buf[lane_id] = 0.f; - for (size_t i = 1; i < nreduce; i += 1) + for (size_t i = 1; i < nreduce; i += 1) { buf[lane_id + i * WARP_SIZE] = 0.f; + } } item_ct1.barrier(sycl::access::fence_space::local_space); @@ -100,8 +101,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const item_ct1.barrier(sycl::access::fence_space::local_space); tmp = buf[lane_id]; - for (size_t i = 1; i < nreduce; i += 1) - { + for (size_t i = 1; i < nreduce; i += 1) { tmp += buf[lane_id + i * WARP_SIZE]; } tmp = warp_reduce_sum(tmp, item_ct1); diff --git a/ggml/src/ggml-sycl/tsembd.cpp b/ggml/src/ggml-sycl/tsembd.cpp index d5c227cd1abcd..2ffe3cca91725 100644 --- a/ggml/src/ggml-sycl/tsembd.cpp +++ b/ggml/src/ggml-sycl/tsembd.cpp @@ -68,4 +68,5 @@ void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, const ggml const int max_period = dst->op_params[1]; timestep_embedding_f32_sycl(src0_d, dst_d, src0->ne[0], dst->nb[1], dim, max_period, stream); + GGML_UNUSED(src1); } diff --git a/ggml/src/ggml-sycl/wkv6.cpp b/ggml/src/ggml-sycl/wkv6.cpp index 4c737f4bfce2f..75ddfb86ac0f7 100644 --- a/ggml/src/ggml-sycl/wkv6.cpp +++ b/ggml/src/ggml-sycl/wkv6.cpp @@ -59,7 +59,7 @@ static void rwkv_wkv_f32_kernel( float y = 0; // Process in chunks of 4 for better vectorization - sycl::float4 k4, r4, tf4, td4, s4, kv4; + sycl::float4 k4, r4, tf4, td4, s4; #pragma unroll for (int j = 0; j < head_size; j += 4) { // Load data in vec4 chunks @@ -135,4 +135,7 @@ void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, const ggml_tensor* s ); }); }); + + GGML_UNUSED(src0); + GGML_UNUSED(src1); } diff --git a/ggml/src/ggml-threading.h b/ggml/src/ggml-threading.h index ce975d880a70f..dec2c8840aa36 100644 --- a/ggml/src/ggml-threading.h +++ b/ggml/src/ggml-threading.h @@ -1,11 +1,13 @@ #pragma once +#include "ggml.h" + #ifdef __cplusplus extern "C" { #endif -void ggml_critical_section_start(void); -void ggml_critical_section_end(void); +GGML_API void ggml_critical_section_start(void); +GGML_API void ggml_critical_section_end(void); #ifdef __cplusplus } diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt index 1e85dd15b7ab1..6d46e5f24c169 100644 --- a/ggml/src/ggml-vulkan/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/CMakeLists.txt @@ -3,13 +3,27 @@ find_package(Vulkan COMPONENTS glslc REQUIRED) if (Vulkan_FOUND) message(STATUS "Vulkan found") - add_library(ggml-vulkan - ggml-vulkan.cpp - ../../include/ggml-vulkan.h - ) + ggml_add_backend_library(ggml-vulkan + ggml-vulkan.cpp + ../../include/ggml-vulkan.h + ) + + # Compile a test shader to determine whether GL_NV_cooperative_matrix2 is supported. + # If it's not, there will be an error to stderr. + # If it's supported, set a define to indicate that we should compile those shaders + execute_process(COMMAND ${Vulkan_GLSLC_EXECUTABLE} -o - -fshader-stage=compute --target-env=vulkan1.3 "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_coopmat2_support.comp" + OUTPUT_VARIABLE glslc_output + ERROR_VARIABLE glslc_error) + + if (${glslc_error} MATCHES ".*extension not supported: GL_NV_cooperative_matrix2.*") + message(STATUS "GL_NV_cooperative_matrix2 not supported by glslc") + else() + message(STATUS "GL_NV_cooperative_matrix2 supported by glslc") + add_compile_definitions(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + endif() - target_link_libraries(ggml-vulkan PRIVATE ggml-base Vulkan::Vulkan) - target_include_directories(ggml-vulkan PRIVATE . .. ${CMAKE_CURRENT_BINARY_DIR}) + target_link_libraries(ggml-vulkan PRIVATE Vulkan::Vulkan) + target_include_directories(ggml-vulkan PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) # Workaround to the "can't dereference invalidated vector iterator" bug in clang-cl debug build # Posssibly relevant: https://stackoverflow.com/questions/74748276/visual-studio-no-displays-the-correct-length-of-stdvector @@ -67,7 +81,7 @@ if (Vulkan_FOUND) --target-cpp ${_ggml_vk_source} --no-clean - DEPENDS ${_ggml_vk_shader_deps} + DEPENDS ${_ggml_vk_shader_deps} ${_ggml_vk_genshaders_cmd} COMMENT "Generate vulkan shaders" ) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 19951d9b6d60b..1696b6e27369d 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1,7 +1,8 @@ #include "ggml-vulkan.h" #include -#if defined(GGML_VULKAN_RUN_TESTS) || defined(GGML_VULKAN_PERF) +#if defined(GGML_VULKAN_RUN_TESTS) || defined(GGML_VULKAN_PERF) || defined(GGML_VULKAN_CHECK_RESULTS) #include +#include "ggml-cpu.h" #endif #include @@ -43,12 +44,6 @@ #define MAX_VK_BUFFERS 256 -#ifndef K_QUANTS_PER_ITERATION -#define K_QUANTS_PER_ITERATION 1 -#else -static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2"); -#endif - #define VK_CHECK(err, msg) \ do { \ vk::Result err_ = (err); \ @@ -158,29 +153,53 @@ struct vk_device_struct { std::string name; uint64_t max_memory_allocation_size; bool fp16; + bool pipeline_robustness; vk::Device device; uint32_t vendor_id; vk_queue compute_queue; vk_queue transfer_queue; bool single_queue; uint32_t subgroup_size; + uint32_t shader_core_count; bool uma; + bool float_controls_rte_fp16; + + bool subgroup_size_control; + uint32_t subgroup_min_size; + uint32_t subgroup_max_size; + bool subgroup_require_full_support; + + bool coopmat_support; + bool coopmat_acc_f32_support; + bool coopmat_acc_f16_support; + uint32_t coopmat_m; + uint32_t coopmat_n; + uint32_t coopmat_k; + bool coopmat2; size_t idx; + bool mul_mat_l; + bool mul_mat_m; + bool mul_mat_s; + bool mul_mat_id_l; + bool mul_mat_id_m; + bool mul_mat_id_s; + vk_matmul_pipeline pipeline_matmul_f32; vk_matmul_pipeline pipeline_matmul_f32_f16; vk_matmul_pipeline2 pipeline_matmul_f16; vk_matmul_pipeline2 pipeline_matmul_f16_f32; vk_pipeline pipeline_matmul_split_k_reduce; + vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_COUNT]; vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT]; vk_matmul_pipeline pipeline_matmul_id_f32; - vk_matmul_pipeline pipeline_matmul_id_f16; - vk_matmul_pipeline pipeline_matmul_id_f16_f32; + vk_matmul_pipeline2 pipeline_matmul_id_f16; + vk_matmul_pipeline2 pipeline_matmul_id_f16_f32; - vk_matmul_pipeline pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT]; + vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT]; vk_pipeline pipeline_dequant[GGML_TYPE_COUNT]; vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT]; @@ -218,6 +237,7 @@ struct vk_device_struct { vk_pipeline pipeline_tanh_f32; vk_pipeline pipeline_diag_mask_inf_f32; vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16; + vk_pipeline pipeline_soft_max_f32_wg512, pipeline_soft_max_f32_f16_wg512; vk_pipeline pipeline_rope_norm_f32, pipeline_rope_norm_f16; vk_pipeline pipeline_rope_neox_f32, pipeline_rope_neox_f16; vk_pipeline pipeline_argsort_f32; @@ -225,6 +245,15 @@ struct vk_device_struct { vk_pipeline pipeline_im2col_f32, pipeline_im2col_f32_f16; vk_pipeline pipeline_timestep_embedding_f32; vk_pipeline pipeline_pool2d_f32; + vk_pipeline pipeline_rwkv_wkv6_f32; + + // [2][2][2] is for {f16acc,f32acc}x{large,small_rows}x{unaligned, aligned} + vk_pipeline pipeline_flash_attn_f32_f16_D64[GGML_TYPE_COUNT][2][2][2]; + vk_pipeline pipeline_flash_attn_f32_f16_D80[GGML_TYPE_COUNT][2][2][2]; + vk_pipeline pipeline_flash_attn_f32_f16_D96[GGML_TYPE_COUNT][2][2][2]; + vk_pipeline pipeline_flash_attn_f32_f16_D112[GGML_TYPE_COUNT][2][2][2]; + vk_pipeline pipeline_flash_attn_f32_f16_D128[GGML_TYPE_COUNT][2][2][2]; + vk_pipeline pipeline_flash_attn_f32_f16_D256[GGML_TYPE_COUNT][2][2][2]; std::unordered_map pipelines; std::unordered_map pipeline_descriptor_set_requirements; @@ -337,6 +366,40 @@ struct vk_mat_vec_id_push_constants { uint32_t nei0; uint32_t ne11; }; +struct vk_flash_attn_push_constants { + uint32_t N; + uint32_t KV; + + uint32_t ne1; + uint32_t ne2; + uint32_t ne3; + + uint32_t neq2; + uint32_t neq3; + uint32_t nek2; + uint32_t nek3; + uint32_t nev2; + uint32_t nev3; + uint32_t nem1; + + uint32_t nb02; + uint32_t nb03; + uint32_t nb12; + uint32_t nb13; + uint32_t nb22; + uint32_t nb23; + uint32_t nb31; + + float scale; + float max_bias; + float logit_softcap; + + uint32_t mask; + uint32_t n_head_log2; + float m0; + float m1; +}; + struct vk_op_push_constants { uint32_t KX; uint32_t KY; @@ -350,7 +413,46 @@ struct vk_op_unary_push_constants { uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13; uint32_t d_offset; float param1; float param2; + uint32_t ne0_012mp; uint32_t ne0_012L; + uint32_t ne0_01mp; uint32_t ne0_01L; + uint32_t ne0_0mp; uint32_t ne0_0L; + uint32_t ne1_012mp; uint32_t ne1_012L; + uint32_t ne1_01mp; uint32_t ne1_01L; + uint32_t ne1_0mp; uint32_t ne1_0L; }; +static_assert(sizeof(vk_op_unary_push_constants) <= 128, "sizeof(vk_op_unary_push_constants) must be <= 128"); + +// See https://gmplib.org/~tege/divcnst-pldi94.pdf figure 4.1. +// Precompute mp (m' in the paper) and L such that division +// can be computed using a multiply (high 32b of 64b result) +// and a shift: +// +// n/d = (mulhi(n, mp) + n) >> L; +static void init_fastdiv_values(uint32_t d, uint32_t &mp, uint32_t &L) +{ + // compute L = ceil(log2(d)); + L = 0; + while (L < 32 && (uint32_t{1} << L) < d) { + L++; + } + + mp = (uint32_t)((uint64_t{1} << 32) * ((uint64_t{1} << L) - d) / d + 1); +} + +template void init_pushconst_fastdiv(T &p) { + GGML_UNUSED(p); + static_assert(!std::is_const::value, "unexpected type"); +} + +template <> void init_pushconst_fastdiv(vk_op_unary_push_constants &p) { + // Compute magic values to divide by these six numbers. + init_fastdiv_values(p.ne02*p.ne01*p.ne00, p.ne0_012mp, p.ne0_012L); + init_fastdiv_values(p.ne01*p.ne00, p.ne0_01mp, p.ne0_01L); + init_fastdiv_values(p.ne00, p.ne0_0mp, p.ne0_0L); + init_fastdiv_values(p.ne12*p.ne11*p.ne10, p.ne1_012mp, p.ne1_012L); + init_fastdiv_values(p.ne11*p.ne10, p.ne1_01mp, p.ne1_01L); + init_fastdiv_values(p.ne10, p.ne1_0mp, p.ne1_0L); +} struct vk_op_binary_push_constants { uint32_t ne; @@ -388,6 +490,7 @@ struct vk_op_soft_max_push_constants { float m0; float m1; uint32_t n_head_log2; + uint32_t nrows_x; }; struct vk_op_argsort_push_constants { @@ -426,6 +529,13 @@ struct vk_op_pool2d_push_constants { int32_t p0; int32_t p1; }; +struct vk_op_rwkv_wkv6_push_constants { + uint32_t B; + uint32_t T; + uint32_t C; + uint32_t H; +}; + // Allow pre-recording command buffers struct vk_staging_memcpy { vk_staging_memcpy(void * _dst, const void * _src, size_t _n) : dst(_dst), src(_src), n(_n) {} @@ -652,8 +762,12 @@ static uint32_t compile_count = 0; static std::mutex compile_count_mutex; static std::condition_variable compile_count_cond; -static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, const std::string name, size_t spv_size, const void* spv_data, const std::string entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, std::vector specialization_constants, uint32_t align) { - VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << ")"); +static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, const std::string name, size_t spv_size, const void* spv_data, const std::string entrypoint, + uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, std::vector specialization_constants, + uint32_t align, bool disable_robustness, bool require_full_subgroups, uint32_t required_subgroup_size) { + VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << + ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << + ", " << disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")"); GGML_ASSERT(parameter_count > 0); GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT @@ -712,16 +826,39 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin specialization_constants.data() ); + vk::PipelineShaderStageCreateFlags pipeline_shader_stage_create_flags{}; + + if (device->subgroup_require_full_support && require_full_subgroups) { + pipeline_shader_stage_create_flags |= vk::PipelineShaderStageCreateFlagBits::eRequireFullSubgroupsEXT; + } + vk::PipelineShaderStageCreateInfo pipeline_shader_create_info( - vk::PipelineShaderStageCreateFlags(), + pipeline_shader_stage_create_flags, vk::ShaderStageFlagBits::eCompute, pipeline->shader_module, entrypoint.c_str(), &specialization_info); + + vk::PipelineShaderStageRequiredSubgroupSizeCreateInfoEXT pipeline_shader_stage_required_subgroup_size_create_info; + pipeline_shader_stage_required_subgroup_size_create_info.requiredSubgroupSize = required_subgroup_size; + if (device->subgroup_size_control && required_subgroup_size > 0) { + GGML_ASSERT(device->subgroup_min_size <= required_subgroup_size && required_subgroup_size <= device->subgroup_max_size); + pipeline_shader_create_info.setPNext(&pipeline_shader_stage_required_subgroup_size_create_info); + } + vk::ComputePipelineCreateInfo compute_pipeline_create_info( - vk::PipelineCreateFlags(), + vk::PipelineCreateFlags{}, pipeline_shader_create_info, pipeline->layout); + + vk::PipelineRobustnessCreateInfoEXT rci; + + if (device->pipeline_robustness && disable_robustness) { + rci.storageBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled; + rci.uniformBuffers = vk::PipelineRobustnessBufferBehaviorEXT::eDisabled; + compute_pipeline_create_info.setPNext(&rci); + } + pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value; { @@ -1214,52 +1351,186 @@ static void ggml_vk_wait_events(vk_context& ctx, std::vector&& events ); } +// number of rows/cols for flash attention shader +static constexpr uint32_t flash_attention_num_small_rows = 32; +static std::array fa_rows_cols(uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) { + GGML_UNUSED(clamp); + + // small rows, large cols + if (small_rows) { + return {flash_attention_num_small_rows, 128}; + } + // small cols to reduce register count + if (ggml_is_quantized(type) || D == 256) { + return {64, 32}; + } + return {64, 64}; +}; + +static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vector& warptile, bool mul_mat_id) { + // Needs to be kept up to date on shader changes + const uint32_t bank_conflict_offset = device->coopmat_support ? 8 : 1; + const uint32_t type_size = device->fp16 ? sizeof(ggml_fp16_t) : sizeof(float); + const uint32_t warps = warptile[0] / warptile[10]; + + const uint32_t load_bufs = (warptile[1] + warptile[2]) * (warptile[3] + bank_conflict_offset) * type_size; + const uint32_t mmid_row_ids = mul_mat_id ? 3072 * sizeof(uint32_t) : 0; + const uint32_t coopmat_stage = device->coopmat_support ? warptile[7] * warptile[8] / warps * sizeof(float) : 0; + + return (load_bufs + mmid_row_ids + coopmat_stage) <= device->properties.limits.maxComputeSharedMemorySize; +} + static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); std::cerr << "ggml_vulkan: Compiling shaders"; - // mulmat - std::vector l_warptile, m_warptile, s_warptile, l_warptile_mmq, m_warptile_mmq, s_warptile_mmq; - std::array l_wg_denoms, m_wg_denoms, s_wg_denoms; - uint32_t l_align, m_align, s_align; - - l_warptile = { 128, 128, 128, 16, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size }; - m_warptile = { 128, 64, 64, 16, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size }; - s_warptile = { std::max(device->subgroup_size, 16u), 32, 32, 16, 32, 32, 2, 2, 2, device->subgroup_size }; - - l_warptile_mmq = { 128, 128, 128, 32, device->subgroup_size * 2, 64, 2, 4, 4, device->subgroup_size }; - m_warptile_mmq = { 128, 64, 64, 32, device->subgroup_size, 32, 2, 4, 2, device->subgroup_size }; - s_warptile_mmq = { std::max(device->subgroup_size, 16u), 32, 32, 32, 32, 32, 2, 2, 2, device->subgroup_size }; + // some shaders have a minimum subgroup size + const uint32_t subgroup_size_16 = std::max(device->subgroup_size, 16u); + const uint32_t subgroup_size_32 = std::max(device->subgroup_size, 32u); - l_wg_denoms = {128, 128, 1 }; - m_wg_denoms = { 64, 64, 1 }; - s_wg_denoms = { 32, 32, 1 }; + // mulmat + std::vector l_warptile, m_warptile, s_warptile, + l_warptile_mmq, m_warptile_mmq, s_warptile_mmq, + l_warptile_mmq_k, m_warptile_mmq_k, s_warptile_mmq_k, + l_warptile_mmqid, m_warptile_mmqid, s_warptile_mmqid; + std::array l_wg_denoms, m_wg_denoms, s_wg_denoms, + l_mmq_wg_denoms, m_mmq_wg_denoms, s_mmq_wg_denoms, + l_mmq_wg_denoms_k, m_mmq_wg_denoms_k, s_mmq_wg_denoms_k, + l_mmqid_wg_denoms, m_mmqid_wg_denoms, s_mmqid_wg_denoms; - l_align = 128; - m_align = 64; - s_align = 32; + uint32_t l_align, m_align, s_align; + if (device->coopmat2) { + // spec constants and tile sizes for non-quant matmul/matmul_id + l_warptile = { 256, 128, 256, 64 }; + m_warptile = { 256, 128, 128, 64 }; + s_warptile = { 128, 32, 16, 64 }; + l_wg_denoms = {128, 256, 1 }; + m_wg_denoms = {128, 128, 1 }; + s_wg_denoms = { 32, 16, 1 }; + + // spec constants and tile sizes for quant matmul (non-Qi_K) + l_warptile_mmq = { 256, 128, 256, 64 }; + m_warptile_mmq = { 256, 128, 128, 64 }; + s_warptile_mmq = { 256, 128, 128, 64 }; + l_mmq_wg_denoms = { 128, 256, 1 }; + m_mmq_wg_denoms = { 128, 128, 1 }; + s_mmq_wg_denoms = { 128, 128, 1 }; + + // spec constants and tile sizes for quant matmul (Qi_K) + l_warptile_mmq_k = { 256, 128, 512, 16 }; + m_warptile_mmq_k = { 256, 128, 256, 16 }; + s_warptile_mmq_k = { 256, 32, 128, 64 }; + l_mmq_wg_denoms_k = { 128, 512, 1 }; + m_mmq_wg_denoms_k = { 128, 256, 1 }; + s_mmq_wg_denoms_k = { 32, 128, 1 }; + + // spec constants and tile sizes for quant matmul_id + l_warptile_mmqid = { 256, 128, 128, 16 }; + m_warptile_mmqid = { 256, 128, 64, 16 }; + s_warptile_mmqid = { 256, 64, 64, 16 }; + l_mmqid_wg_denoms = { 128, 128, 1 }; + m_mmqid_wg_denoms = { 128, 64, 1 }; + s_mmqid_wg_denoms = { 64, 64, 1 }; + + l_align = 128; + m_align = 64; + s_align = 32; + } else { + // Matrix cores require different warp group sizes + const uint32_t tm_l = device->coopmat_support ? device->coopmat_m : 4; + const uint32_t tm_m = device->coopmat_support ? device->coopmat_m : 4; + const uint32_t tm_s = device->coopmat_support ? device->coopmat_m : 2; + const uint32_t tn_l = device->coopmat_support ? device->coopmat_n : 4; + const uint32_t tn_m = device->coopmat_support ? device->coopmat_n : 2; + const uint32_t tn_s = device->coopmat_support ? device->coopmat_n : 2; + const uint32_t tk_l = device->coopmat_support ? device->coopmat_k : 1; + const uint32_t tk_m = device->coopmat_support ? device->coopmat_k : 1; + const uint32_t tk_s = device->coopmat_support ? device->coopmat_k : 1; + + l_warptile = { 128, 128, 128, 16, device->subgroup_size * 2, 64, 2, tm_l, tn_l, tk_l, device->subgroup_size }; + m_warptile = { 128, 64, 64, 16, device->subgroup_size, 32, 2, tm_m, tn_m, tk_m, device->subgroup_size }; + s_warptile = { subgroup_size_16, 32, 32, 16, 32, 32, 2, tm_s, tn_s, tk_s, device->subgroup_size }; + + l_warptile_mmq = { 128, 128, 128, 32, device->subgroup_size * 2, 64, 2, tm_l, tn_l, tk_l, device->subgroup_size }; + m_warptile_mmq = { 128, 64, 64, 32, device->subgroup_size, 32, 2, tm_m, tn_m, tk_m, device->subgroup_size }; + s_warptile_mmq = { subgroup_size_32, 32, 32, 32, 32, 32, 2, tm_s, tn_s, tk_s, device->subgroup_size }; + + l_mmq_wg_denoms = l_wg_denoms = {128, 128, 1 }; + m_mmq_wg_denoms = m_wg_denoms = { 64, 64, 1 }; + s_mmq_wg_denoms = s_wg_denoms = { 32, 32, 1 }; + l_align = 128; + m_align = 64; + s_align = 32; + + // Fallback to smaller sizes if there's not enough shared memory. Given the current shaders + // and tile sizes, this should handle 16KB, 32KB, and 48KB+. + // This logic doesn't explicitly account for the 12KB row_ids in the mul_mat_mat_id shaders. + // But the numbers happen to work out for 32KB shared memory size that when using the medium + // size there's enough room for everything, and we assert for this. + uint32_t shmem_needed = (l_warptile[1] + l_warptile[2]) * (l_warptile[3] + 1) * sizeof(float); + if (shmem_needed > device->properties.limits.maxComputeSharedMemorySize) { + l_warptile = m_warptile; + l_wg_denoms = m_wg_denoms; + shmem_needed = (l_warptile[1] + l_warptile[2]) * (l_warptile[3] + 1) * sizeof(float); + GGML_ASSERT(shmem_needed <= device->properties.limits.maxComputeSharedMemorySize); + } + if (device->properties.limits.maxComputeSharedMemorySize >= 32768) { + // assert mul_mat_mat_id shaders will fit. + GGML_ASSERT(shmem_needed + 3072*4 <= device->properties.limits.maxComputeSharedMemorySize); + } + + shmem_needed = (l_warptile_mmq[1] + l_warptile_mmq[2]) * (l_warptile_mmq[3] + 1) * sizeof(float); + if (shmem_needed > device->properties.limits.maxComputeSharedMemorySize) { + if (device->properties.limits.maxComputeSharedMemorySize == 32768) { + l_warptile_mmq = m_warptile_mmq; + l_mmq_wg_denoms = m_mmq_wg_denoms; + } else { + l_warptile_mmq = s_warptile_mmq; + l_mmq_wg_denoms = s_mmq_wg_denoms; + } + shmem_needed = (l_warptile_mmq[1] + l_warptile_mmq[2]) * (l_warptile_mmq[3] + 1) * sizeof(float); + GGML_ASSERT(shmem_needed <= device->properties.limits.maxComputeSharedMemorySize); + } + if (device->properties.limits.maxComputeSharedMemorySize >= 32768) { + // assert mul_mat_mat_id shaders will fit. + GGML_ASSERT(shmem_needed + 3072*4 <= device->properties.limits.maxComputeSharedMemorySize); + } + // Disable medium and large matrix multiplication if not enough shared memory is available + // Check mmq warptiles as the largest configuration + // Throw an error if not enough for any matrix multiplication is available + if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmq, false)) { + std::cerr << "ggml_vulkan: Error: Shared memory size too small for matrix multiplication." << std::endl; + throw std::runtime_error("Shared memory size too small for matrix multiplication."); + } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmq, false)) { + device->mul_mat_m = false; + device->mul_mat_l = false; + } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmq, false)) { + device->mul_mat_l = false; + } + + // Disable mul_mat_id if not enough shared memory is available + if (!ggml_vk_matmul_shmem_support(device, s_warptile_mmq, true)) { + device->mul_mat_id_s = false; + device->mul_mat_id_m = false; + device->mul_mat_id_l = false; + } else if (!ggml_vk_matmul_shmem_support(device, m_warptile_mmq, true)) { + device->mul_mat_id_m = false; + device->mul_mat_id_l = false; + } else if (!ggml_vk_matmul_shmem_support(device, l_warptile_mmq, true)) { + device->mul_mat_id_l = false; + } + } device->pipeline_matmul_f32 = std::make_shared(); device->pipeline_matmul_f32_f16 = std::make_shared(); device->pipeline_matmul_id_f32 = std::make_shared(); - device->pipeline_matmul_id_f16_f32 = std::make_shared(); - device->pipeline_matmul_id_f16 = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K] = std::make_shared(); - device->pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL] = std::make_shared(); std::vector> compiles; - auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align) { + auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, + uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, + uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { { // wait until fewer than N compiles are in progress uint32_t N = std::max(1u, std::thread::hardware_concurrency()); @@ -1269,144 +1540,368 @@ static void ggml_vk_load_shaders(vk_device& device) { } compile_count++; } - compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), name, spv_size, spv_data, entrypoint, parameter_count, push_constant_size, wg_denoms, specialization_constants, align)); + compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), name, spv_size, spv_data, entrypoint, + parameter_count, push_constant_size, wg_denoms, specialization_constants, align, disable_robustness, require_full_subgroups, required_subgroup_size)); }; - if (device->fp16) { +#if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + if (device->coopmat2) { + + auto const &fa_wg_denoms = [&](uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::array { + return {fa_rows_cols(D, clamp, type, small_rows)[0], 1, 1}; + }; + + auto const &fa_spec_constants = [&](uint32_t D, uint32_t clamp, ggml_type type, bool small_rows) -> std::vector { + // For large number of rows, 128 invocations seems to work best. + // For small number of rows (e.g. N==1), 256 works better. But matrix granularity for 256 is 32, so we + // can't use 256 for D==80. + uint32_t wg_size = (small_rows && (D % 32) == 0) ? 256 : 128; + auto rows_cols = fa_rows_cols(D, clamp, type, small_rows); + return {wg_size, rows_cols[0], rows_cols[1], (D), clamp}; + }; + +#define CREATE_FA2(TYPE, NAMELC, D) \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][0][0], "flash_attn_f32_f16_D" #D "_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,false), fa_spec_constants(D,1,TYPE,false), 1); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][0][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,false), fa_spec_constants(D,0,TYPE,false), fa_rows_cols(D,0,TYPE,false)[1]); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][0][0], "flash_attn_f32_f16_D" #D "_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,false), fa_spec_constants(D,1,TYPE,false), 1); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][0][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,false), fa_spec_constants(D,0,TYPE,false), fa_rows_cols(D,0,TYPE,false)[1]); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][1][0], "flash_attn_f32_f16_D" #D "_f16acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,true), fa_spec_constants(D,1,TYPE,true), 1); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][0][1][1], "flash_attn_f32_f16_D" #D "_aligned_f16acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_len, flash_attn_f32_f16_ ## NAMELC ## _f16acc_cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,true), fa_spec_constants(D,0,TYPE,true), fa_rows_cols(D,0,TYPE,true)[1]); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][1][0], "flash_attn_f32_f16_D" #D "_f32acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,1,TYPE,true), fa_spec_constants(D,1,TYPE,true), 1); \ + ggml_vk_create_pipeline(device, device->pipeline_flash_attn_f32_f16_D ## D[TYPE][1][1][1], "flash_attn_f32_f16_D" #D "_aligned_f32acc_smallrows" #NAMELC, flash_attn_f32_f16_ ## NAMELC ## _cm2_len, flash_attn_f32_f16_ ## NAMELC ## _cm2_data, "main", 5, sizeof(vk_flash_attn_push_constants), fa_wg_denoms(D,0,TYPE,true), fa_spec_constants(D,0,TYPE,true), fa_rows_cols(D,0,TYPE,true)[1]); \ + +#define CREATE_FA(TYPE, NAMELC) \ + CREATE_FA2(TYPE, NAMELC, 64) \ + CREATE_FA2(TYPE, NAMELC, 80) \ + CREATE_FA2(TYPE, NAMELC, 96) \ + CREATE_FA2(TYPE, NAMELC, 112) \ + CREATE_FA2(TYPE, NAMELC, 128) \ + CREATE_FA2(TYPE, NAMELC, 256) + + CREATE_FA(GGML_TYPE_F16, f16) + CREATE_FA(GGML_TYPE_Q4_0, q4_0) + CREATE_FA(GGML_TYPE_Q4_1, q4_1) + CREATE_FA(GGML_TYPE_Q5_0, q5_0) + CREATE_FA(GGML_TYPE_Q5_1, q5_1) + CREATE_FA(GGML_TYPE_Q8_0, q8_0) + // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently + //CREATE_FA(GGML_TYPE_Q2_K, q2_k) + //CREATE_FA(GGML_TYPE_Q3_K, q3_k) + //CREATE_FA(GGML_TYPE_Q4_K, q4_k) + //CREATE_FA(GGML_TYPE_Q5_K, q5_k) + //CREATE_FA(GGML_TYPE_Q6_K, q6_k) + CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl) +#undef CREATE_FA + // Create 6 variants, {s,m,l}x{unaligned,aligned} #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \ - - CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_matmul_f16.f32acc, matmul_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_matmul_f16_f32.f32acc, matmul_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - - CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4); - CREATE_MM(pipeline_matmul_id_f16, matmul_id_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4); - CREATE_MM(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4); - - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _cm2_len, NAMELC ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _cm2_len, NAMELC ## _aligned ## F16ACC ## _cm2_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \ + + // Create 2 variants, {f16,f32} accumulator +#define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \ + CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \ + CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \ + + CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3) + + CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3) + CREATE_MM2(pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + + CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_id_push_constants, 4) + CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4) + CREATE_MM2(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4) + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) +#undef CREATE_MM +#undef CREATE_MM2 + } else +#endif // defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + if (device->coopmat_support) { + // Create 6 variants, {s,m,l}x{unaligned,aligned} +#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + if (device->mul_mat ## ID ## _l) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _coopmat_len, NAMELC ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1, false, true); \ + if (device->mul_mat ## ID ## _m) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _coopmat_len, NAMELC ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1, false, true); \ + if (device->mul_mat ## ID ## _s) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _coopmat_len, NAMELC ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1, false, true); \ + if (device->mul_mat ## ID ## _l) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _coopmat_len, NAMELC ## _aligned ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align, false, true); \ + if (device->mul_mat ## ID ## _m) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _coopmat_len, NAMELC ## _aligned ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align, false, true); \ + if (device->mul_mat ## ID ## _s) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _coopmat_len, NAMELC ## _aligned ## F16ACC ## _coopmat_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align, false, true); \ + + // Create 2 variants, {f16,f32} accumulator +#define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + if (device->coopmat_acc_f16_support) { \ + CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + } \ + if (device->coopmat_acc_f32_support) { \ + CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + } \ + + CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM2(pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + + if (device->coopmat_acc_f16_support) { + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + } else { + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + } + + // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. + if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) { + CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + CREATE_MM2(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + + if (device->coopmat_acc_f16_support) { + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + } else { + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + } + } +#undef CREATE_MM2 +#undef CREATE_MM + } else if (device->fp16) { + // Create 6 variants, {s,m,l}x{unaligned,aligned} +#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + if (device->mul_mat ## ID ## _l) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \ + if (device->mul_mat ## ID ## _m) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \ + if (device->mul_mat ## ID ## _s) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _len, NAMELC ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \ + if (device->mul_mat ## ID ## _l) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \ + if (device->mul_mat ## ID ## _m) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \ + if (device->mul_mat ## ID ## _s) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _len, NAMELC ## _aligned ## F16ACC ## _data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \ + + // Create 2 variants, {f16,f32} accumulator +#define CREATE_MM2(PIPELINE_NAME, NAMELC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + CREATE_MM(PIPELINE_NAME . f16acc, NAMELC, _f16acc, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + CREATE_MM(PIPELINE_NAME . f32acc, NAMELC, , WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + + CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM2(pipeline_matmul_f16, matmul_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM2(pipeline_matmul_f16_f32, matmul_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f16acc, matmul_q5_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f16acc, matmul_q5_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f16acc, matmul_q8_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f16acc, matmul_q2_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f16acc, matmul_q3_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + + // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. + if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) { + CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + CREATE_MM2(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f16acc, matmul_id_q5_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f16acc, matmul_id_q5_1_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f16acc, matmul_id_q8_0_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f16acc, matmul_id_q2_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f16acc, matmul_id_q3_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + } +#undef CREATE_MM2 #undef CREATE_MM } else { // Create 6 variants, {s,m,l}x{unaligned,aligned} -#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT) \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \ - ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \ - - CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_matmul_f16.f32acc, matmul_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_matmul_f16_f32.f32acc, matmul_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3); - - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3); - - CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4); - CREATE_MM(pipeline_matmul_id_f16, matmul_id_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4); - CREATE_MM(pipeline_matmul_id_f16_f32, matmul_id_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4); - - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0], matmul_id_q4_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1], matmul_id_q4_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0], matmul_id_q5_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1], matmul_id_q5_1_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0], matmul_id_q8_0_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K], matmul_id_q2_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K], matmul_id_q3_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K], matmul_id_q4_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K], matmul_id_q5_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K], matmul_id_q6_k_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL], matmul_id_iq4_nl_f32, , wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4); +#define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \ + if (device->mul_mat ## ID ## _l) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->l, #NAMELC #F16ACC "_l", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, 1); \ + if (device->mul_mat ## ID ## _m) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->m, #NAMELC #F16ACC "_m", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, 1); \ + if (device->mul_mat ## ID ## _s) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->s, #NAMELC #F16ACC "_s", NAMELC ## F16ACC ## _fp32_len, NAMELC ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, 1); \ + if (device->mul_mat ## ID ## _l) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_l, #NAMELC #F16ACC "_aligned_l", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), l_ ## WG_DENOMS, l_ ## WARPTILE, l_align); \ + if (device->mul_mat ## ID ## _m) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_m, #NAMELC #F16ACC "_aligned_m", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), m_ ## WG_DENOMS, m_ ## WARPTILE, m_align); \ + if (device->mul_mat ## ID ## _s) \ + ggml_vk_create_pipeline(device, device-> PIPELINE_NAME ->a_s, #NAMELC #F16ACC "_aligned_s", NAMELC ## _aligned ## F16ACC ## _fp32_len, NAMELC ## _aligned ## F16ACC ## _fp32_data, "main", PARAMCOUNT, sizeof(PUSHCONST), s_ ## WG_DENOMS, s_ ## WARPTILE, s_align); \ + + CREATE_MM(pipeline_matmul_f32, matmul_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_matmul_f32_f16, matmul_f32_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_matmul_f16.f32acc, matmul_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_matmul_f16_f32.f32acc, matmul_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 3, ); + + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f32acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f32acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_0].f32acc, matmul_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_1].f32acc, matmul_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q8_0].f32acc, matmul_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q2_K].f32acc, matmul_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q3_K].f32acc, matmul_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + + // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. + if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) { + CREATE_MM(pipeline_matmul_id_f32, matmul_id_f32_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + CREATE_MM(pipeline_matmul_id_f16.f32acc, matmul_id_f16, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + CREATE_MM(pipeline_matmul_id_f16_f32.f32acc, matmul_id_f16_f32, , wg_denoms, warptile, vk_mat_mat_push_constants, 4, _id); + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f32acc, matmul_id_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f32acc, matmul_id_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_0].f32acc, matmul_id_q5_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_1].f32acc, matmul_id_q5_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q8_0].f32acc, matmul_id_q8_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q2_K].f32acc, matmul_id_q2_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q3_K].f32acc, matmul_id_q3_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + } #undef CREATE_MM } // mul mat vec - // computing two rows per workgroup is a benefit for Q4_0 -> Q5_1, but not for Q8_0. + + // AMD GCN and Intel graphics cards perform best when the number of rows per shader is doubled + uint32_t rm = 1; + if ((device->vendor_id == VK_VENDOR_ID_AMD && device->subgroup_min_size == 64 && device->subgroup_max_size == 64) || device->vendor_id == VK_VENDOR_ID_INTEL) + rm = 2; + + // computing additional rows per workgroup is a benefit for Q4_0 -> Q5_1, but not for Q8_0. ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size, 1}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm, 1, 1}, {device->subgroup_size, 1*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {subgroup_size_16, 2*rm}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size, 1}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm, 1, 1}, {device->subgroup_size, 1*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm, 1, 1}, {subgroup_size_16, 2*rm}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {device->subgroup_size, 1}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_0], "mul_mat_vec_id_q4_0_f32", mul_mat_vec_id_q4_0_f32_len, mul_mat_vec_id_q4_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_1], "mul_mat_vec_id_q4_1_f32", mul_mat_vec_id_q4_1_f32_len, mul_mat_vec_id_q4_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_0], "mul_mat_vec_id_q5_0_f32", mul_mat_vec_id_q5_0_f32_len, mul_mat_vec_id_q5_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_1], "mul_mat_vec_id_q5_1_f32", mul_mat_vec_id_q5_1_f32_len, mul_mat_vec_id_q5_1_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {device->subgroup_size, 2*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q8_0], "mul_mat_vec_id_q8_0_f32", mul_mat_vec_id_q8_0_f32_len, mul_mat_vec_id_q8_0_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1*rm, 1, 1}, {device->subgroup_size, 1*rm}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q2_K], "mul_mat_vec_id_q2_k_f32", mul_mat_vec_id_q2_k_f32_len, mul_mat_vec_id_q2_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q3_K], "mul_mat_vec_id_q3_k_f32", mul_mat_vec_id_q3_k_f32_len, mul_mat_vec_id_q3_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {1, 1, 1}, {subgroup_size_16}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm, 1, 1}, {subgroup_size_16, 2*rm}, 1, true); // dequant shaders ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); @@ -1441,7 +1936,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_p021_f16_f32, "mul_mat_vec_p021_f16_f32", mul_mat_vec_p021_f16_f32_len, mul_mat_vec_p021_f16_f32_data, "main", 3, 6 * sizeof(uint32_t), {1, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_mul_mat_vec_nc_f16_f32, "mul_mat_vec_nc_f16_f32", mul_mat_vec_nc_f16_f32_len, mul_mat_vec_nc_f16_f32_data, "main", 3, 7 * sizeof(uint32_t), {1, 1, 1}, {}, 1); @@ -1497,26 +1992,39 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1); + ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_wg512, "soft_max_f32_wg512", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1); + ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1); + ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16_wg512, "soft_max_f32_f16_wg512", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1); ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f32, "rope_neox_f32", rope_neox_f32_len, rope_neox_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); + + if (device->float_controls_rte_fp16) { + ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_rte_len, rope_norm_f16_rte_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_rte_len, rope_neox_f16_rte_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); + } else { + ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rope_neox_f16, "rope_neox_f16", rope_neox_f16_len, rope_neox_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1); + } ggml_vk_create_pipeline(device, device->pipeline_argsort_f32, "argsort_f32", argsort_f32_len, argsort_f32_data, "main", 2, sizeof(vk_op_argsort_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_sum_rows_f32, "sum_rows_f32", sum_rows_f32_len, sum_rows_f32_data, "main", 2, sizeof(vk_op_push_constants), {1, 1, 1}, { device->subgroup_size }, 1); ggml_vk_create_pipeline(device, device->pipeline_im2col_f32, "im2col_f32", im2col_f32_len, im2col_f32_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); + if (device->float_controls_rte_fp16) { + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_rte_len, im2col_f32_f16_rte_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); + } else { + ggml_vk_create_pipeline(device, device->pipeline_im2col_f32_f16, "im2col_f32_f16", im2col_f32_f16_len, im2col_f32_f16_data, "main", 2, sizeof(vk_op_im2col_push_constants), {256, 1, 1}, {}, 1); + } ggml_vk_create_pipeline(device, device->pipeline_timestep_embedding_f32, "timestep_embedding_f32", timestep_embedding_f32_len, timestep_embedding_f32_data, "main", 2, sizeof(vk_op_timestep_embedding_push_constants), {256, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_pool2d_f32, "pool2d_f32", pool2d_f32_len, pool2d_f32_data, "main", 2, sizeof(vk_op_pool2d_push_constants), {512, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_rwkv_wkv6_f32, "rwkv_wkv6_f32", rwkv_wkv6_f32_len, rwkv_wkv6_f32_data, "main", 7, sizeof(vk_op_rwkv_wkv6_push_constants), {1, 1, 1}, {device->subgroup_size}, 1); + for (auto &c : compiles) { c.wait(); } @@ -1550,12 +2058,40 @@ static vk_device ggml_vk_get_device(size_t idx) { device->physical_device = physical_devices[dev_num]; const std::vector ext_props = device->physical_device.enumerateDeviceExtensionProperties(); + bool fp16_storage = false; + bool fp16_compute = false; bool maintenance4_support = false; + bool sm_builtins = false; + bool amd_shader_core_properties2 = false; + bool pipeline_robustness = false; + bool coopmat2_support = false; + device->coopmat_support = false; // Check if maintenance4 is supported for (const auto& properties : ext_props) { if (strcmp("VK_KHR_maintenance4", properties.extensionName) == 0) { maintenance4_support = true; + } else if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) { + fp16_storage = true; + } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) { + fp16_compute = true; + } else if (strcmp("VK_NV_shader_sm_builtins", properties.extensionName) == 0) { + sm_builtins = true; + } else if (strcmp("VK_AMD_shader_core_properties2", properties.extensionName) == 0) { + amd_shader_core_properties2 = true; + } else if (strcmp("VK_EXT_pipeline_robustness", properties.extensionName) == 0) { + pipeline_robustness = true; + } else if (strcmp("VK_EXT_subgroup_size_control", properties.extensionName) == 0) { + device->subgroup_size_control = true; + } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 && + !getenv("GGML_VK_DISABLE_COOPMAT")) { + device->coopmat_support = true; + device->coopmat_m = 0; + device->coopmat_n = 0; + device->coopmat_k = 0; + } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 && + !getenv("GGML_VK_DISABLE_COOPMAT2")) { + coopmat2_support = true; } } @@ -1563,18 +2099,51 @@ static vk_device ggml_vk_get_device(size_t idx) { vk::PhysicalDeviceMaintenance3Properties props3; vk::PhysicalDeviceMaintenance4Properties props4; vk::PhysicalDeviceSubgroupProperties subgroup_props; + vk::PhysicalDeviceDriverProperties driver_props; + vk::PhysicalDeviceShaderSMBuiltinsPropertiesNV sm_props; + vk::PhysicalDeviceShaderCoreProperties2AMD amd_shader_core_properties2_props; + vk::PhysicalDeviceVulkan12Properties vk12_props; + vk::PhysicalDeviceSubgroupSizeControlPropertiesEXT subgroup_size_control_props; + props2.pNext = &props3; props3.pNext = &subgroup_props; + subgroup_props.pNext = &driver_props; + driver_props.pNext = &vk12_props; + + VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_props; + if (maintenance4_support) { - subgroup_props.pNext = &props4; + last_struct->pNext = (VkBaseOutStructure *)&props4; + last_struct = (VkBaseOutStructure *)&props4; + } + if (sm_builtins) { + last_struct->pNext = (VkBaseOutStructure *)&sm_props; + last_struct = (VkBaseOutStructure *)&sm_props; + } + if (amd_shader_core_properties2) { + last_struct->pNext = (VkBaseOutStructure *)&amd_shader_core_properties2_props; + last_struct = (VkBaseOutStructure *)&amd_shader_core_properties2_props; } + if (device->subgroup_size_control) { + last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_props; + last_struct = (VkBaseOutStructure *)&subgroup_size_control_props; + } + +#if defined(VK_NV_cooperative_matrix2) + vk::PhysicalDeviceCooperativeMatrix2PropertiesNV coopmat2_props; + if (coopmat2_support) { + last_struct->pNext = (VkBaseOutStructure *)&coopmat2_props; + last_struct = (VkBaseOutStructure *)&coopmat2_props; + } +#endif + device->physical_device.getProperties2(&props2); device->properties = props2.properties; const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE"); if (GGML_VK_FORCE_MAX_ALLOCATION_SIZE != nullptr) { - device->max_memory_allocation_size = std::stoi(GGML_VK_FORCE_MAX_ALLOCATION_SIZE); + device->max_memory_allocation_size = std::stoul(GGML_VK_FORCE_MAX_ALLOCATION_SIZE); } else if (maintenance4_support) { device->max_memory_allocation_size = std::min(props3.maxMemoryAllocationSize, props4.maxBufferSize); } else { @@ -1584,23 +2153,25 @@ static vk_device ggml_vk_get_device(size_t idx) { device->vendor_id = device->properties.vendorID; device->subgroup_size = subgroup_props.subgroupSize; device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; - - bool fp16_storage = false; - bool fp16_compute = false; - - for (const auto& properties : ext_props) { - if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) { - fp16_storage = true; - } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) { - fp16_compute = true; - } + if (sm_builtins) { + device->shader_core_count = sm_props.shaderSMCount; + } else if (amd_shader_core_properties2) { + device->shader_core_count = amd_shader_core_properties2_props.activeComputeUnitCount; + } else { + device->shader_core_count = 0; } + device->float_controls_rte_fp16 = vk12_props.shaderRoundingModeRTEFloat16; - const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16"); - const bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr; + const bool force_disable_f16 = getenv("GGML_VK_DISABLE_F16") != nullptr; device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute; + if (device->vendor_id == VK_VENDOR_ID_INTEL || (device->vendor_id == VK_VENDOR_ID_AMD && (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource))) { + // Intel drivers don't support coopmat properly yet + // Only RADV supports coopmat properly on AMD + device->coopmat_support = false; + } + std::vector queue_family_props = device->physical_device.getQueueFamilyProperties(); // Try to find a non-graphics compute queue and transfer-focused queues @@ -1638,10 +2209,149 @@ static vk_device ggml_vk_get_device(size_t idx) { vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES; vk11_features.pNext = &vk12_features; + last_struct = (VkBaseOutStructure *)&vk12_features; + + VkPhysicalDevicePipelineRobustnessFeaturesEXT pl_robustness_features; + pl_robustness_features.pNext = nullptr; + pl_robustness_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_ROBUSTNESS_FEATURES_EXT; + pl_robustness_features.pipelineRobustness = VK_FALSE; + + if (pipeline_robustness) { + last_struct->pNext = (VkBaseOutStructure *)&pl_robustness_features; + last_struct = (VkBaseOutStructure *)&pl_robustness_features; + device_extensions.push_back("VK_EXT_pipeline_robustness"); + } + + VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroup_size_control_features; + subgroup_size_control_features.pNext = nullptr; + subgroup_size_control_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT; + subgroup_size_control_features.computeFullSubgroups = false; + subgroup_size_control_features.subgroupSizeControl = false; + + if (device->subgroup_size_control) { + last_struct->pNext = (VkBaseOutStructure *)&subgroup_size_control_features; + last_struct = (VkBaseOutStructure *)&subgroup_size_control_features; + } + + VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features; + coopmat_features.pNext = nullptr; + coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR; + coopmat_features.cooperativeMatrix = VK_FALSE; + + if (device->coopmat_support) { + last_struct->pNext = (VkBaseOutStructure *)&coopmat_features; + last_struct = (VkBaseOutStructure *)&coopmat_features; + } + +#if defined(VK_NV_cooperative_matrix2) + VkPhysicalDeviceCooperativeMatrix2FeaturesNV coopmat2_features {}; + coopmat2_features.pNext = nullptr; + coopmat2_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_2_FEATURES_NV; + if (coopmat2_support) { + last_struct->pNext = (VkBaseOutStructure *)&coopmat2_features; + last_struct = (VkBaseOutStructure *)&coopmat2_features; + device_extensions.push_back("VK_NV_cooperative_matrix2"); + } +#endif + vkGetPhysicalDeviceFeatures2(device->physical_device, &device_features2); device->fp16 = device->fp16 && vk12_features.shaderFloat16; + device->pipeline_robustness = pl_robustness_features.pipelineRobustness; + + if (device->subgroup_size_control) { + device->subgroup_min_size = subgroup_size_control_props.minSubgroupSize; + device->subgroup_max_size = subgroup_size_control_props.maxSubgroupSize; + } + + device->subgroup_size_control = device->subgroup_size_control && + (subgroup_size_control_props.requiredSubgroupSizeStages & vk::ShaderStageFlagBits::eCompute) && + subgroup_size_control_features.subgroupSizeControl; + + if (device->subgroup_size_control) { + device->subgroup_require_full_support = subgroup_size_control_features.computeFullSubgroups; + device_extensions.push_back("VK_EXT_subgroup_size_control"); + } + + device->coopmat_support = device->coopmat_support && coopmat_features.cooperativeMatrix; + + if (coopmat2_support) { +#if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + if (coopmat2_features.cooperativeMatrixWorkgroupScope && + coopmat2_features.cooperativeMatrixFlexibleDimensions && + coopmat2_features.cooperativeMatrixReductions && + coopmat2_features.cooperativeMatrixConversions && + coopmat2_features.cooperativeMatrixPerElementOperations && + coopmat2_features.cooperativeMatrixTensorAddressing && + coopmat2_features.cooperativeMatrixBlockLoads && + vk12_features.bufferDeviceAddress) { + + std::vector flexible_dimensions; + uint32_t count = 0; + + PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV + _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV = + (PFN_vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV) + vk_instance.instance.getProcAddr("vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV"); + + _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, nullptr); + + VkCooperativeMatrixFlexibleDimensionsPropertiesNV empty_prop {}; + empty_prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_FLEXIBLE_DIMENSIONS_PROPERTIES_NV; + flexible_dimensions.resize(count, empty_prop); + + _vkGetPhysicalDeviceCooperativeMatrixFlexibleDimensionsPropertiesNV(device->physical_device, &count, flexible_dimensions.data()); + + bool found_fp16_128 = false, + found_fp16_256 = false, + found_fp32_128 = false, + found_fp32_256 = false; + // need to support fp16*fp16 with fp16/fp32 accumulator, for workgroupsize 128 + // with 32x16x16 and 256 with 32x32x16. + for (auto &prop : flexible_dimensions) { + if (prop.saturatingAccumulation == VK_FALSE && + prop.scope == VK_SCOPE_WORKGROUP_KHR && + prop.AType == VK_COMPONENT_TYPE_FLOAT16_KHR && + prop.BType == VK_COMPONENT_TYPE_FLOAT16_KHR) { + + if (prop.workgroupInvocations == 128 && + prop.MGranularity <= 32 && + prop.NGranularity <= 16 && + prop.KGranularity <= 16) { + if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR && + prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) { + found_fp16_128 = true; + } + if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR && + prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) { + found_fp32_128 = true; + } + } + if (prop.workgroupInvocations == 256 && + prop.MGranularity <= 32 && + prop.NGranularity <= 32 && + prop.KGranularity <= 16) { + if (prop.CType == VK_COMPONENT_TYPE_FLOAT16_KHR && + prop.ResultType == VK_COMPONENT_TYPE_FLOAT16_KHR) { + found_fp16_256 = true; + } + if (prop.CType == VK_COMPONENT_TYPE_FLOAT32_KHR && + prop.ResultType == VK_COMPONENT_TYPE_FLOAT32_KHR) { + found_fp32_256 = true; + } + } + } + } + if (found_fp16_128 && found_fp16_256 && + found_fp32_128 && found_fp32_256 && + coopmat2_props.cooperativeMatrixFlexibleDimensionsMaxDimension >= 512) { + device->coopmat2 = true; + } + } +#endif + } + if (!vk11_features.storageBuffer16BitAccess) { std::cerr << "ggml_vulkan: device " << GGML_VK_NAME << idx << " does not support 16-bit storage." << std::endl; throw std::runtime_error("Unsupported device"); @@ -1656,6 +2366,74 @@ static vk_device ggml_vk_get_device(size_t idx) { if (device->fp16) { device_extensions.push_back("VK_KHR_shader_float16_int8"); } + + if (device->coopmat_support) { + // Query supported shapes + std::vector cm_props; + + PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR = + (PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR)vkGetInstanceProcAddr(vk_instance.instance, "vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR"); + + uint32_t cm_props_num; + + pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, nullptr); + + cm_props.resize(cm_props_num); + + for (auto& prop : cm_props) { + prop.sType = VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_KHR; + } + + pfn_vkGetPhysicalDeviceCooperativeMatrixPropertiesKHR(device->physical_device, &cm_props_num, cm_props.data()); + + VK_LOG_DEBUG("ggml_vulkan: Cooperative Matrix Shapes: " << cm_props.size()); + + for (auto& prop : cm_props) { + VK_LOG_DEBUG("ggml_vulkan: M: " << prop.MSize << " N: " << prop.NSize << " K: " << prop.KSize << " A: " << vk::to_string((vk::ComponentTypeKHR)prop.AType) << " B: " << vk::to_string((vk::ComponentTypeKHR)prop.BType) << " C: " << vk::to_string((vk::ComponentTypeKHR)prop.CType) << " Result: " << vk::to_string((vk::ComponentTypeKHR)prop.ResultType) << " saturatingAccumulation: " << prop.saturatingAccumulation << " scope: " << vk::to_string((vk::ScopeKHR)prop.scope)); + + if ((vk::ComponentTypeKHR)prop.AType == vk::ComponentTypeKHR::eFloat16 && + (vk::ComponentTypeKHR)prop.BType == vk::ComponentTypeKHR::eFloat16 && + (vk::ScopeKHR)prop.scope == vk::ScopeKHR::eSubgroup + ) { + if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat32 && + (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat32) { + // coopmat sizes not set yet + if (device->coopmat_m == 0) { + device->coopmat_acc_f32_support = true; + device->coopmat_m = prop.MSize; + device->coopmat_n = prop.NSize; + device->coopmat_k = prop.KSize; + } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) { + // Only enable if shape is identical + device->coopmat_acc_f32_support = true; + } + } else if ((vk::ComponentTypeKHR)prop.CType == vk::ComponentTypeKHR::eFloat16 && + (vk::ComponentTypeKHR)prop.ResultType == vk::ComponentTypeKHR::eFloat16) { + // coopmat sizes not set yet + if (device->coopmat_m == 0) { + device->coopmat_acc_f16_support = true; + device->coopmat_m = prop.MSize; + device->coopmat_n = prop.NSize; + device->coopmat_k = prop.KSize; + } else if (device->coopmat_m == prop.MSize && device->coopmat_n == prop.NSize && device->coopmat_k == prop.KSize) { + // Only enable if shape is identical + device->coopmat_acc_f16_support = true; + } + } + } + } + + if (device->coopmat_m == 0 || !device->coopmat_acc_f32_support) { + // No suitable matmul mode found + GGML_LOG_DEBUG("ggml_vulkan: WARNING: No suitable matrix core mode found. Disabling matrix cores.\n"); + device->coopmat_support = false; + } + } + + if (device->coopmat_support) { + device_extensions.push_back("VK_KHR_cooperative_matrix"); + } + device->name = GGML_VK_NAME + std::to_string(idx); device_create_info = { @@ -1671,6 +2449,37 @@ static vk_device ggml_vk_get_device(size_t idx) { ggml_vk_create_queue(device, device->compute_queue, compute_queue_family_index, 0, { vk::PipelineStageFlagBits::eComputeShader | vk::PipelineStageFlagBits::eTransfer }, false); // Shaders + // Disable matmul tile sizes early if performance low or not supported + switch (device->vendor_id) { +#ifndef GGML_VULKAN_RUN_TESTS + case VK_VENDOR_ID_AMD: + case VK_VENDOR_ID_INTEL: + device->mul_mat_l = false; + device->mul_mat_m = true; + device->mul_mat_s = true; + device->mul_mat_id_l = false; + device->mul_mat_id_m = true; + device->mul_mat_id_s = true; + break; + case VK_VENDOR_ID_APPLE: + device->mul_mat_l = false; + device->mul_mat_m = true; + device->mul_mat_s = false; + device->mul_mat_id_l = false; + device->mul_mat_id_m = true; + device->mul_mat_id_s = false; + break; +#endif + default: + device->mul_mat_l = true; + device->mul_mat_m = true; + device->mul_mat_s = true; + device->mul_mat_id_l = true; + device->mul_mat_id_m = true; + device->mul_mat_id_s = true; + break; + } + ggml_vk_load_shaders(device); if (!device->single_queue) { @@ -1728,15 +2537,31 @@ static void ggml_vk_print_gpu_info(size_t idx) { bool fp16_storage = false; bool fp16_compute = false; + bool coopmat_support = false; + bool coopmat2_support = false; for (auto properties : ext_props) { if (strcmp("VK_KHR_16bit_storage", properties.extensionName) == 0) { fp16_storage = true; } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) { fp16_compute = true; + } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 && + !getenv("GGML_VK_DISABLE_COOPMAT")) { + coopmat_support = true; +#if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 && + !getenv("GGML_VK_DISABLE_COOPMAT2")) { + coopmat2_support = true; +#endif } } + if (props2.properties.vendorID == VK_VENDOR_ID_INTEL || (props2.properties.vendorID == VK_VENDOR_ID_AMD && (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource))) { + // Intel drivers don't support coopmat properly yet + // Only RADV supports coopmat properly on AMD + coopmat_support = false; + } + const char* GGML_VK_DISABLE_F16 = getenv("GGML_VK_DISABLE_F16"); bool force_disable_f16 = GGML_VK_DISABLE_F16 != nullptr; @@ -1759,16 +2584,33 @@ static void ggml_vk_print_gpu_info(size_t idx) { vk12_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES; vk11_features.pNext = &vk12_features; + // Pointer to the last chain element + VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_features; + + VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features; + coopmat_features.pNext = nullptr; + coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR; + coopmat_features.cooperativeMatrix = VK_FALSE; + + if (coopmat_support) { + last_struct->pNext = (VkBaseOutStructure *)&coopmat_features; + last_struct = (VkBaseOutStructure *)&coopmat_features; + } + vkGetPhysicalDeviceFeatures2(physical_device, &device_features2); fp16 = fp16 && vk12_features.shaderFloat16; + coopmat_support = coopmat_support && coopmat_features.cooperativeMatrix; + + std::string matrix_cores = coopmat2_support ? "NV_coopmat2" : coopmat_support ? "KHR_coopmat" : "none"; + std::string device_name = props2.properties.deviceName.data(); - GGML_LOG_DEBUG("ggml_vulkan: %d = %s (%s) | uma: %d | fp16: %d | warp size: %d\n", - idx, device_name.c_str(), driver_props.driverName, uma, fp16, subgroup_size); + GGML_LOG_DEBUG("ggml_vulkan: %zu = %s (%s) | uma: %d | fp16: %d | warp size: %zu | matrix cores: %s\n", + idx, device_name.c_str(), driver_props.driverName.data(), uma, fp16, subgroup_size, matrix_cores.c_str()); if (props2.properties.deviceType == vk::PhysicalDeviceType::eCpu) { - std::cerr << "ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want." << std::endl; + GGML_LOG_DEBUG("ggml_vulkan: Warning: Device type is CPU. This is probably not the device you want.\n"); } } @@ -1937,8 +2779,7 @@ void ggml_vk_instance_init() { vk_instance.device_indices.push_back(0); } } - GGML_LOG_DEBUG("ggml_vulkan: Found %d Vulkan devices:\n", vk_instance.device_indices.size()); - + GGML_LOG_DEBUG("ggml_vulkan: Found %zu Vulkan devices:\n", vk_instance.device_indices.size()); for (size_t i = 0; i < vk_instance.device_indices.size(); i++) { ggml_vk_print_gpu_info(i); @@ -1994,7 +2835,7 @@ static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type return ctx->device->pipeline_dequant[type]; } -static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) { +static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) { VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_pipeline(" << ggml_type_name(src0_type) << ", " << ggml_type_name(src1_type) << ")"); if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { return ctx->device->pipeline_matmul_f32; @@ -2002,14 +2843,23 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_conte if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) { return ctx->device->pipeline_matmul_f32_f16; } - if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { - return ctx->device->pipeline_matmul_f16_f32.f32acc; - } - if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { - return ctx->device->pipeline_matmul_f16.f32acc; + if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) { + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { + return ctx->device->pipeline_matmul_f16_f32.f16acc; + } + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { + return ctx->device->pipeline_matmul_f16.f16acc; + } + } else { + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { + return ctx->device->pipeline_matmul_f16_f32.f32acc; + } + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { + return ctx->device->pipeline_matmul_f16.f32acc; + } } - if (src1_type != GGML_TYPE_F32) { + if (src1_type != GGML_TYPE_F32 && !ctx->device->coopmat2) { return nullptr; } @@ -2030,7 +2880,11 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_conte return nullptr; } - return ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc; + if (ctx->device->coopmat2) { + assert(src1_type == GGML_TYPE_F16); + return ctx->device->pipeline_dequant_mul_mat_mat_f16[src0_type].f16acc; + } + return ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc; } static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) { @@ -2059,16 +2913,25 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type]; } -static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type) { +static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) { VK_LOG_DEBUG("ggml_vk_get_mul_mat_mat_id_pipeline()"); if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) { return ctx->device->pipeline_matmul_id_f32; } - if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { - return ctx->device->pipeline_matmul_id_f16_f32; - } - if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { - return ctx->device->pipeline_matmul_id_f16; + if (prec == GGML_PREC_DEFAULT && ctx->device->fp16 && !(ctx->device->coopmat_support && !ctx->device->coopmat_acc_f16_support)) { + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { + return ctx->device->pipeline_matmul_id_f16_f32.f16acc; + } + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { + return ctx->device->pipeline_matmul_id_f16.f16acc; + } + } else { + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) { + return ctx->device->pipeline_matmul_id_f16_f32.f32acc; + } + if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) { + return ctx->device->pipeline_matmul_id_f16.f32acc; + } } GGML_ASSERT(src1_type == GGML_TYPE_F32); @@ -2090,7 +2953,7 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_co return nullptr; } - return ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type]; + return ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat_id[src0_type].f32acc; } static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) { @@ -2659,55 +3522,44 @@ static void ggml_vk_buffer_memset(vk_buffer& dst, size_t offset, uint32_t c, siz dst->device->device.resetFences({ dst->device->fence }); } -static uint32_t ggml_vk_guess_split_k(int m, int n, int k) { +static uint32_t ggml_vk_guess_split_k(ggml_backend_vk_context * ctx, int m, int n, int k, const vk_pipeline& pipeline) { VK_LOG_DEBUG("ggml_vk_guess_split_k(" << m << ", " << n << ", " << k << ")"); - // if (k > 128 && (m < 128 || n < 128) && m > 2 && n > 2) { - // return 4; - // } - return 1; - - GGML_UNUSED(m); GGML_UNUSED(n); GGML_UNUSED(k); -} - -static vk_pipeline ggml_vk_guess_matmul_pipeline_amd(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) { - if (m <= 32 || n <= 32) { - return aligned ? mmp->a_s : mmp->s; + uint32_t split_k = 1; + if (ctx->device->shader_core_count != 0 && m >= (int)pipeline->wg_denoms[0] && n >= (int)pipeline->wg_denoms[1]) { + // If k is 'large' and the SMs will fill less than halfway, use split_k. + uint32_t m_tiles = CEIL_DIV(m, pipeline->wg_denoms[0]); + uint32_t n_tiles = CEIL_DIV(n, pipeline->wg_denoms[1]); + if (k >= 2048 && m_tiles * n_tiles < ctx->device->shader_core_count / 2) { + split_k = ctx->device->shader_core_count / (m_tiles * n_tiles); + // Clamp to 2 or 4 + split_k = std::min(split_k, 4u); + if (split_k == 3) { + split_k = 2; + } + } } - return aligned ? mmp->a_m : mmp->m; - - GGML_UNUSED(ctx); -} -static vk_pipeline ggml_vk_guess_matmul_pipeline_apple(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) { - return aligned ? mmp->a_m : mmp->m; - - GGML_UNUSED(ctx); -} - -static vk_pipeline ggml_vk_guess_matmul_pipeline_intel(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, bool aligned) { - return aligned ? mmp->a_s : mmp->s; - - GGML_UNUSED(ctx); + return split_k; } static vk_pipeline ggml_vk_guess_matmul_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) { VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")"); - switch (ctx->device->vendor_id) { - case VK_VENDOR_ID_AMD: - return ggml_vk_guess_matmul_pipeline_amd(ctx, mmp, m, n, aligned); - case VK_VENDOR_ID_APPLE: - return ggml_vk_guess_matmul_pipeline_apple(ctx, mmp, aligned); - case VK_VENDOR_ID_INTEL: - return ggml_vk_guess_matmul_pipeline_intel(ctx, mmp, aligned); - default: - break; + + if (ctx->device->coopmat2) { + if ((ctx->device->mul_mat_l && (m % mmp->l->wg_denoms[0]) == 0 && (n % mmp->l->wg_denoms[1]) == 0) || (!ctx->device->mul_mat_m && !ctx->device->mul_mat_s)) { + return aligned ? mmp->a_l : mmp->l; + } + if ((ctx->device->mul_mat_m && (m % mmp->m->wg_denoms[0]) == 0 && (n % mmp->m->wg_denoms[1]) == 0) || !ctx->device->mul_mat_s) { + return aligned ? mmp->a_m : mmp->m; + } + return aligned ? mmp->a_s : mmp->s; } - if (m <= 32 || n <= 32) { + if ((ctx->device->mul_mat_s && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_m && !ctx->device->mul_mat_l)) { return aligned ? mmp->a_s : mmp->s; } - if (m <= 64 || n <= 64) { + if ((ctx->device->mul_mat_m && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_l) { return aligned ? mmp->a_m : mmp->m; } return aligned ? mmp->a_l : mmp->l; @@ -2742,6 +3594,33 @@ static void ggml_vk_matmul( ggml_vk_dispatch_pipeline(ctx, subctx, ctx->device->pipeline_matmul_split_k_reduce, { split_k_buffer, d }, pc2.size() * sizeof(uint32_t), pc2.data(), { m * n * batch, 1, 1 }); } +static vk_pipeline ggml_vk_guess_matmul_id_pipeline(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n, bool aligned) { + VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline(" << m << ", " << n << ", " << aligned << ")"); + + if (ctx->device->coopmat2) { + if ((ctx->device->mul_mat_id_l && (m % mmp->l->wg_denoms[0]) == 0 && (n % mmp->l->wg_denoms[1]) == 0) || (!ctx->device->mul_mat_id_m && !ctx->device->mul_mat_id_s)) { + return aligned ? mmp->a_l : mmp->l; + } + if ((ctx->device->mul_mat_id_m && (m % mmp->m->wg_denoms[0]) == 0 && (n % mmp->m->wg_denoms[1]) == 0) || !ctx->device->mul_mat_id_s) { + return aligned ? mmp->a_m : mmp->m; + } + return aligned ? mmp->a_s : mmp->s; + } + + if ((ctx->device->mul_mat_id_s && (m <= 32 || n <= 32)) || (!ctx->device->mul_mat_id_m && !ctx->device->mul_mat_id_l)) { + return aligned ? mmp->a_s : mmp->s; + } + if ((ctx->device->mul_mat_id_m && (m <= 64 || n <= 64)) || !ctx->device->mul_mat_id_l) { + return aligned ? mmp->a_m : mmp->m; + } + return aligned ? mmp->a_l : mmp->l; +} + +static uint32_t ggml_vk_guess_matmul_id_pipeline_align(ggml_backend_vk_context * ctx, vk_matmul_pipeline& mmp, int m, int n) { + VK_LOG_DEBUG("ggml_vk_guess_matmul_pipeline_align(" << m << ", " << n << ")"); + return ggml_vk_guess_matmul_id_pipeline(ctx, mmp, m, n, true)->align; +} + static void ggml_vk_matmul_id( ggml_backend_vk_context * ctx, vk_context& subctx, vk_pipeline& pipeline, vk_subbuffer&& a, vk_subbuffer&& b, vk_subbuffer&& d, vk_subbuffer&& ids, @@ -2812,13 +3691,15 @@ static void ggml_vk_cpy_to_contiguous(ggml_backend_vk_context * ctx, vk_context& elements = { ne, 1, 1 }; } - const vk_op_unary_push_constants pc = { + vk_op_unary_push_constants pc = { (uint32_t)ne, (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], (uint32_t)tensor->nb[0] / tensor_type_size, (uint32_t)tensor->nb[1] / tensor_type_size, (uint32_t)tensor->nb[2] / tensor_type_size, (uint32_t)tensor->nb[3] / tensor_type_size, (uint32_t)tensor->ne[0], (uint32_t)tensor->ne[1], (uint32_t)tensor->ne[2], (uint32_t)tensor->ne[3], 1 , (uint32_t)tensor->ne[0] , (uint32_t)(tensor->ne[0] * tensor->ne[1]) , (uint32_t)(tensor->ne[0] * tensor->ne[1] * tensor->ne[2]), 0, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; + init_pushconst_fastdiv(pc); ggml_vk_sync_buffers(subctx); ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { in, out }, sizeof(vk_op_unary_push_constants), &pc, elements); } @@ -2867,18 +3748,20 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub } const bool x_non_contig = !ggml_vk_dim01_contiguous(src0); - const bool y_non_contig = !ggml_vk_dim01_contiguous(src1); + // Reformat and convert to fp16 if src1 is non-contiguous, or for coopmat2 for better perf + const bool y_non_contig = (ctx->device->coopmat2 && src1->type == GGML_TYPE_F32) || + !ggml_vk_dim01_contiguous(src1); const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig; - vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type); + vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type, (ggml_prec)dst->op_params[0]); const bool qx_needs_dequant = mmp == nullptr || x_non_contig; const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig; if (qx_needs_dequant) { // Fall back to dequant + f16 mulmat - mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, GGML_TYPE_F16, y_f32_kernel ? GGML_TYPE_F32 : GGML_TYPE_F16); + mmp = ggml_vk_get_mul_mat_mat_pipeline(ctx, GGML_TYPE_F16, y_f32_kernel ? GGML_TYPE_F32 : GGML_TYPE_F16, (ggml_prec)dst->op_params[0]); } // Not implemented @@ -2891,10 +3774,10 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, ne11)); const bool aligned = ne10 == kpad && ne01 > 8 && ne11 > 8; - const uint32_t split_k = ggml_vk_guess_split_k(ne01, ne11, ne10); - vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, ne11, aligned); + const uint32_t split_k = ggml_vk_guess_split_k(ctx, ne01, ne11, ne10, pipeline); + const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type); const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); const uint64_t x_sz = !qx_needs_dequant ? qx_sz : sizeof(ggml_fp16_t) * x_ne; @@ -2920,7 +3803,7 @@ static void ggml_vk_mul_mat_q_f16(ggml_backend_vk_context * ctx, vk_context& sub if (dryrun) { const uint64_t x_sz_upd = x_sz * ne02 * ne03; const uint64_t y_sz_upd = y_sz * ne12 * ne13; - const uint64_t split_k_size = split_k > 1 ? d_sz * ne12 * ne13 * 4 : 0; + const uint64_t split_k_size = split_k > 1 ? d_sz * ne12 * ne13 * split_k : 0; if ( (qx_needs_dequant && x_sz_upd > ctx->device->max_memory_allocation_size) || (qy_needs_dequant && y_sz_upd > ctx->device->max_memory_allocation_size) || @@ -3187,7 +4070,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& if (ne01 > max_groups_x) { groups_z = 64; - groups_x /= groups_z; + groups_x = CEIL_DIV(groups_x, groups_z); } // compute @@ -3442,7 +4325,7 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& const bool y_f32_kernel = src1->type == GGML_TYPE_F32 && !y_non_contig; - vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type); + vk_matmul_pipeline mmp = ggml_vk_get_mul_mat_mat_id_pipeline(ctx, src0->type, y_non_contig ? GGML_TYPE_F16 : src1->type, (ggml_prec)dst->op_params[0]); const bool qx_needs_dequant = mmp == nullptr || x_non_contig; const bool qy_needs_dequant = (src1->type != GGML_TYPE_F16 && !y_f32_kernel) || y_non_contig; @@ -3458,10 +4341,10 @@ static void ggml_vk_mul_mat_id_q_f16(ggml_backend_vk_context * ctx, vk_context& const uint64_t y_ne = ne11 * ne10; const uint64_t d_ne = ne21 * ne20; - const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_pipeline_align(ctx, mmp, ne01, nei1)); + const uint32_t kpad = ggml_vk_align_size(ne10, ggml_vk_guess_matmul_id_pipeline_align(ctx, mmp, ne01, nei1)); const bool aligned = ne10 == kpad && ne01 > 8 && nei1 > 8; - vk_pipeline pipeline = ggml_vk_guess_matmul_pipeline(ctx, mmp, ne01, nei1, aligned); + vk_pipeline pipeline = ggml_vk_guess_matmul_id_pipeline(ctx, mmp, ne01, nei1, aligned); const uint64_t qx_sz = ggml_type_size(src0->type) * x_ne / ggml_blck_size(src0->type); const uint64_t qy_sz = ggml_type_size(src1->type) * y_ne / ggml_blck_size(src1->type); @@ -3764,7 +4647,7 @@ static void ggml_vk_mul_mat_vec_id_q_f16(ggml_backend_vk_context * ctx, vk_conte if (ne01 > max_groups_x) { groups_z = 64; - groups_x /= groups_z; + groups_x = CEIL_DIV(groups_x, groups_z); } // compute @@ -3789,6 +4672,167 @@ static void ggml_vk_mul_mat_id(ggml_backend_vk_context * ctx, vk_context& subctx } } +static void ggml_vk_flash_attn(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * q, const ggml_tensor * k, const ggml_tensor * v, const ggml_tensor * mask, ggml_tensor * dst, bool dryrun = false) { + VK_LOG_DEBUG("ggml_vk_flash_attn((" << q << ", name=" << q->name << ", type=" << q->type << ", ne0=" << q->ne[0] << ", ne1=" << q->ne[1] << ", ne2=" << q->ne[2] << ", ne3=" << q->ne[3] << ", nb0=" << q->nb[0] << ", nb1=" << q->nb[1] << ", nb2=" << q->nb[2] << ", nb3=" << q->nb[3]; + std::cerr << "), (" << k << ", name=" << k->name << ", type=" << k->type << ", ne0=" << k->ne[0] << ", ne1=" << k->ne[1] << ", ne2=" << k->ne[2] << ", ne3=" << k->ne[3] << ", nb0=" << k->nb[0] << ", nb1=" << k->nb[1] << ", nb2=" << k->nb[2] << ", nb3=" << k->nb[3]; + std::cerr << "), (" << v << ", name=" << v->name << ", type=" << v->type << ", ne0=" << v->ne[0] << ", ne1=" << v->ne[1] << ", ne2=" << v->ne[2] << ", ne3=" << v->ne[3] << ", nb0=" << v->nb[0] << ", nb1=" << v->nb[1] << ", nb2=" << v->nb[2] << ", nb3=" << v->nb[3]; + std::cerr << "), (" << dst << ", name=" << dst->name << ", type=" << dst->type << ", ne0=" << dst->ne[0] << ", ne1=" << dst->ne[1] << ", ne2=" << dst->ne[2] << ", ne3=" << dst->ne[3] << ", nb0=" << dst->nb[0] << ", nb1=" << dst->nb[1] << ", nb2=" << dst->nb[2] << ", nb3=" << dst->nb[3]; + std::cerr << "), " << (dryrun ? "dryrun" : "") << ")"); + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const uint32_t nem1 = mask ? mask->ne[1] : 0; + const uint32_t nbm1 = mask ? mask->nb[1] : 0; + + const uint32_t D = neq0; + const uint32_t N = neq1; + const uint32_t KV = nek1; + + GGML_ASSERT(ne0 == D); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == D); + GGML_ASSERT(nek0 == D); + GGML_ASSERT(nev0 == D); + + GGML_ASSERT(neq1 == N); + GGML_ASSERT(nev0 == D); + + GGML_ASSERT(nev1 == nek1); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + assert(dst->type == GGML_TYPE_F32); + assert(q->type == GGML_TYPE_F32); + assert(k->type == v->type); + + vk_pipeline *pipelines; + // XXX TODO other backends may be changing accumulator precision to default to f32 soon + bool f32acc = dst->op_params[3] == GGML_PREC_F32; + bool small_rows = N <= flash_attention_num_small_rows; + switch (D) { + case 64: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D64[k->type][f32acc][small_rows][0]; break; + case 80: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D80[k->type][f32acc][small_rows][0]; break; + case 96: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D96[k->type][f32acc][small_rows][0]; break; + case 112: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D112[k->type][f32acc][small_rows][0]; break; + case 128: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D128[k->type][f32acc][small_rows][0]; break; + case 256: pipelines = &ctx->device->pipeline_flash_attn_f32_f16_D256[k->type][f32acc][small_rows][0]; break; + default: + assert(!"unsupported D value"); + return; + } + assert(pipelines); + + bool aligned = (KV % pipelines[1]->align) == 0; + vk_pipeline pipeline = pipelines[aligned]; + assert(pipeline); + + if (dryrun) { + // Request descriptor sets + ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + return; + } + + float scale = 1.0f; + float max_bias = 0.0f; + float logit_softcap = 0.0f; + + memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); + memcpy(&logit_softcap, (const float *) dst->op_params + 2, sizeof(float)); + + if (logit_softcap != 0) { + scale /= logit_softcap; + } + + const uint32_t n_head_kv = neq2; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv)); + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + ggml_vk_sync_buffers(subctx); + + vk_buffer d_Q, d_K, d_V, d_D, d_M; + uint64_t q_buf_offset, k_buf_offset, v_buf_offset, d_buf_offset, m_buf_offset; + + bool Q_uma = false, K_uma = false, V_uma = false, D_uma = false, M_uma = false; + + if (ctx->device->uma) { + ggml_vk_host_get(ctx->device, q->data, d_Q, q_buf_offset); + ggml_vk_host_get(ctx->device, k->data, d_K, q_buf_offset); + ggml_vk_host_get(ctx->device, v->data, d_V, q_buf_offset); + ggml_vk_host_get(ctx->device, dst->data, d_D, q_buf_offset); + Q_uma = d_Q != nullptr; + K_uma = d_K != nullptr; + V_uma = d_V != nullptr; + D_uma = d_D != nullptr; + if (mask) { + ggml_vk_host_get(ctx->device, mask->data, d_M, q_buf_offset); + M_uma = d_M != nullptr; + } + } + + + ggml_backend_vk_buffer_context * d_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; + ggml_backend_vk_buffer_context * q_buf_ctx = (ggml_backend_vk_buffer_context *)q->buffer->context; + ggml_backend_vk_buffer_context * k_buf_ctx = (ggml_backend_vk_buffer_context *)k->buffer->context; + ggml_backend_vk_buffer_context * v_buf_ctx = (ggml_backend_vk_buffer_context *)v->buffer->context; + + if (!Q_uma) { + d_Q = q_buf_ctx->dev_buffer; + q_buf_offset = vk_tensor_offset(q) + q->view_offs; + } + if (!K_uma) { + d_K = k_buf_ctx->dev_buffer; + k_buf_offset = vk_tensor_offset(k) + k->view_offs; + } + if (!V_uma) { + d_V = v_buf_ctx->dev_buffer; + v_buf_offset = vk_tensor_offset(v) + v->view_offs; + } + if (!D_uma) { + d_D = d_buf_ctx->dev_buffer; + d_buf_offset = vk_tensor_offset(dst) + dst->view_offs; + } + + if (!M_uma) { + d_M = d_Q; + m_buf_offset = q_buf_offset; + if (mask) { + ggml_backend_vk_buffer_context * m_buf_ctx = (ggml_backend_vk_buffer_context*)mask->buffer->context; + d_M = m_buf_ctx->dev_buffer; + m_buf_offset = vk_tensor_offset(mask) + mask->view_offs; + } + } + + const vk_flash_attn_push_constants pc = { N, KV, (uint32_t)ne1, (uint32_t)ne2, (uint32_t)ne3, (uint32_t)neq2, (uint32_t)neq3, (uint32_t)nek2, (uint32_t)nek3, (uint32_t)nev2, (uint32_t)nev3, nem1, (uint32_t)nbq2, (uint32_t)nbq3, (uint32_t)nbk2, (uint32_t)nbk3, (uint32_t)nbv2, (uint32_t)nbv3, nbm1, scale, max_bias, logit_softcap, mask != nullptr, n_head_log2, m0, m1 }; + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, + { + vk_subbuffer{d_Q, q_buf_offset, VK_WHOLE_SIZE}, + vk_subbuffer{d_K, k_buf_offset, VK_WHOLE_SIZE}, + vk_subbuffer{d_V, v_buf_offset, VK_WHOLE_SIZE}, + vk_subbuffer{d_M, m_buf_offset, VK_WHOLE_SIZE}, + vk_subbuffer{d_D, d_buf_offset, VK_WHOLE_SIZE}, + }, + sizeof(vk_flash_attn_push_constants), &pc, { (uint32_t)neq1, (uint32_t)neq2, (uint32_t)neq3 }); +} + static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op) { switch (op) { case GGML_OP_GET_ROWS: @@ -3933,10 +4977,10 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16); if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) { - return ctx->device->pipeline_soft_max_f32; + return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_wg512 : ctx->device->pipeline_soft_max_f32; } if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) { - return ctx->device->pipeline_soft_max_f32_f16; + return src0->ne[0] > 1024 ? ctx->device->pipeline_soft_max_f32_f16_wg512 : ctx->device->pipeline_soft_max_f32_f16; } return nullptr; case GGML_OP_ROPE: @@ -3989,6 +5033,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const return ctx->device->pipeline_pool2d_f32; } return nullptr; + case GGML_OP_RWKV_WKV6: + if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { + return ctx->device->pipeline_rwkv_wkv6_f32; + } + return nullptr; case GGML_OP_LEAKY_RELU: if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) { return ctx->device->pipeline_leaky_relu_f32; @@ -4023,7 +5072,7 @@ static bool ggml_vk_op_supports_incontiguous(ggml_op op) { } template -static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, const PC&& pc, bool dryrun = false) { +static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, ggml_op op, PC&& pc, bool dryrun = false) { VK_LOG_DEBUG("ggml_vk_op_f32((" << src0 << ", name=" << src0->name << ", type=" << src0->type << ", ne0=" << src0->ne[0] << ", ne1=" << src0->ne[1] << ", ne2=" << src0->ne[2] << ", ne3=" << src0->ne[3] << ", nb0=" << src0->nb[0] << ", nb1=" << src0->nb[1] << ", nb2=" << src0->nb[2] << ", nb3=" << src0->nb[3]; if (src1 != nullptr) { std::cerr << "), (" << src1 << ", name=" << src1->name << ", type=" << src1->type << ", ne0=" << src1->ne[0] << ", ne1=" << src1->ne[1] << ", ne2=" << src1->ne[2] << ", ne3=" << src1->ne[3] << ", nb0=" << src1->nb[0] << ", nb1=" << src1->nb[1] << ", nb2=" << src1->nb[2] << ", nb3=" << src1->nb[3]; @@ -4063,6 +5112,8 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co const uint64_t ned3 = dst->ne[3]; const uint64_t ned = ned0 * ned1; + init_pushconst_fastdiv(pc); + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, dst, op); if (pipeline == nullptr) { @@ -4389,6 +5440,134 @@ static void ggml_vk_div(ggml_backend_vk_context * ctx, vk_context& subctx, const }, dryrun); } +static void ggml_vk_op_f32_rwkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, const vk_op_rwkv_wkv6_push_constants&& pc, bool dryrun = false) { + const ggml_tensor * k = dst->src[0]; + const ggml_tensor * v = dst->src[1]; + const ggml_tensor * r = dst->src[2]; + const ggml_tensor * tf = dst->src[3]; + const ggml_tensor * td = dst->src[4]; + const ggml_tensor * state = dst->src[5]; + + GGML_ASSERT(!ggml_is_quantized(k->type)); + GGML_ASSERT(!ggml_is_quantized(v->type)); + GGML_ASSERT(!ggml_is_quantized(r->type)); + GGML_ASSERT(!ggml_is_quantized(tf->type)); + GGML_ASSERT(!ggml_is_quantized(td->type)); + GGML_ASSERT(!ggml_is_quantized(state->type)); + GGML_ASSERT(dst->buffer != nullptr); + + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, k, v, r, dst, GGML_OP_RWKV_WKV6); + GGML_ASSERT(pipeline != nullptr); + + if (dryrun) { + ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + return; + } + + ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; + ggml_backend_vk_buffer_context * k_buf_ctx = (ggml_backend_vk_buffer_context *)k->buffer->context; + ggml_backend_vk_buffer_context * v_buf_ctx = (ggml_backend_vk_buffer_context *)v->buffer->context; + ggml_backend_vk_buffer_context * r_buf_ctx = (ggml_backend_vk_buffer_context *)r->buffer->context; + ggml_backend_vk_buffer_context * tf_buf_ctx = (ggml_backend_vk_buffer_context *)tf->buffer->context; + ggml_backend_vk_buffer_context * td_buf_ctx = (ggml_backend_vk_buffer_context *)td->buffer->context; + ggml_backend_vk_buffer_context * state_buf_ctx = (ggml_backend_vk_buffer_context *)state->buffer->context; + + ggml_vk_sync_buffers(subctx); + + vk_buffer d_D, d_K, d_V, d_R, d_TF, d_TD, d_State; + uint64_t k_offset, v_offset, r_offset, tf_offset, td_offset, state_offset, dst_offset; + bool K_uma = false, V_uma = false, R_uma = false, TF_uma = false, TD_uma = false, STATE_uma = false, DST_uma = false; + + if (ctx->device->uma) { + ggml_vk_host_get(ctx->device, k->data, d_K, k_offset); + ggml_vk_host_get(ctx->device, v->data, d_V, v_offset); + ggml_vk_host_get(ctx->device, r->data, d_R, r_offset); + ggml_vk_host_get(ctx->device, tf->data, d_TF, tf_offset); + ggml_vk_host_get(ctx->device, td->data, d_TD, td_offset); + ggml_vk_host_get(ctx->device, state->data, d_State, state_offset); + ggml_vk_host_get(ctx->device, dst->data, d_D, dst_offset); + + K_uma = d_K != nullptr; + V_uma = d_V != nullptr; + R_uma = d_R != nullptr; + TF_uma = d_TF != nullptr; + TD_uma = d_TD != nullptr; + STATE_uma = d_State != nullptr; + DST_uma = d_D != nullptr; + } + + if (!K_uma) { + d_K = k_buf_ctx->dev_buffer; + k_offset = vk_tensor_offset(k) + k->view_offs; + } + if (!V_uma) { + d_V = v_buf_ctx->dev_buffer; + v_offset = vk_tensor_offset(v) + v->view_offs; + } + if (!R_uma) { + d_R = r_buf_ctx->dev_buffer; + r_offset = vk_tensor_offset(r) + r->view_offs; + } + if (!TF_uma) { + d_TF = tf_buf_ctx->dev_buffer; + tf_offset = vk_tensor_offset(tf) + tf->view_offs; + } + if (!TD_uma) { + d_TD = td_buf_ctx->dev_buffer; + td_offset = vk_tensor_offset(td) + td->view_offs; + } + if (!STATE_uma) { + d_State = state_buf_ctx->dev_buffer; + state_offset = vk_tensor_offset(state) + state->view_offs; + } + if (!DST_uma) { + d_D = dst_buf_ctx->dev_buffer; + dst_offset = vk_tensor_offset(dst) + dst->view_offs; + } + + const uint64_t k_size = ggml_nbytes(k); + const uint64_t v_size = ggml_nbytes(v); + const uint64_t r_size = ggml_nbytes(r); + const uint64_t tf_size = ggml_nbytes(tf); + const uint64_t td_size = ggml_nbytes(td); + const uint64_t state_size = ggml_nbytes(state); + const uint64_t dst_size = ggml_nbytes(dst); + + std::array elements = { + (uint32_t)(pc.B * pc.H), + 1, + 1 + }; + + ggml_vk_dispatch_pipeline(ctx, subctx, pipeline, { + vk_subbuffer{ d_K, k_offset, k_size }, + vk_subbuffer{ d_V, v_offset, v_size }, + vk_subbuffer{ d_R, r_offset, r_size }, + vk_subbuffer{ d_TF, tf_offset, tf_size }, + vk_subbuffer{ d_TD, td_offset, td_size }, + vk_subbuffer{ d_State, state_offset, state_size }, + vk_subbuffer{ d_D, dst_offset, dst_size } + }, sizeof(vk_op_rwkv_wkv6_push_constants), &pc, elements); +} + +static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) { + const size_t seq_length = dst->src[0]->ne[3]; + const size_t n_embed = dst->ne[0]; + const size_t n_heads = dst->src[0]->ne[2]; + const size_t n_seqs = dst->src[5]->ne[1]; + + ggml_vk_op_f32_rwkv6( + ctx, subctx, dst, + { + (uint32_t)n_seqs, + (uint32_t)seq_length, + (uint32_t)n_embed, + (uint32_t)n_heads, + }, + dryrun + ); +} + static void ggml_vk_concat(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) { int * op_params = (int *)dst->op_params; @@ -4432,7 +5611,8 @@ static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, con (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size, (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, - op_params[0], 0.0f + op_params[0], 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4446,6 +5626,7 @@ static void ggml_vk_sqr(ggml_backend_vk_context * ctx, vk_context& subctx, const (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4459,6 +5640,7 @@ static void ggml_vk_sin(ggml_backend_vk_context * ctx, vk_context& subctx, const (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4472,6 +5654,7 @@ static void ggml_vk_cos(ggml_backend_vk_context * ctx, vk_context& subctx, const (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4486,6 +5669,7 @@ static void ggml_vk_clamp(ggml_backend_vk_context * ctx, vk_context& subctx, con (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, op_params[0], op_params[1], + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4499,6 +5683,7 @@ static void ggml_vk_pad(ggml_backend_vk_context * ctx, vk_context& subctx, const (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4512,6 +5697,7 @@ static void ggml_vk_repeat(ggml_backend_vk_context * ctx, vk_context& subctx, co (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, 0, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4526,6 +5712,7 @@ static void ggml_vk_cpy(ggml_backend_vk_context * ctx, vk_context& subctx, const (uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size, d_offset, 0.0f, 0.0f, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, dryrun); } @@ -4582,6 +5769,7 @@ static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, scale, max_bias, m0, m1, n_head_log2, + nrows_x, }, dryrun); } @@ -4878,19 +6066,27 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t for (size_t i = 0; i < x_ne; i++) { if (std::is_same()) { x[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f; + // x[i] = 1.0f; + // x[i] = i + 1; + // x[i] = (i % k == i / k) ? 1.0f : 0.0f; } else if (std::is_same()) { x[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f); + // x[i] = ggml_fp32_to_fp16(1.0f); + // x[i] = ggml_fp32_to_fp16(i + 1); + // x[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f); } else { GGML_ABORT("fatal error"); } } for (size_t i = 0; i < y_ne; i++) { if (std::is_same()) { - // y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f; - y[i] = (i % k == i / k) ? 1.0f : 0.0f; + y[i] = (rand() / (float)RAND_MAX) * 2.0f - 1.0f; + // y[i] = (i % k == i / k) ? 1.0f : 0.0f; + // y[i] = i + 1; } else if (std::is_same()) { - // y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f); - y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f); + y[i] = ggml_fp32_to_fp16((rand() / (float)RAND_MAX) * 2.0f - 1.0f); + // y[i] = ggml_fp32_to_fp16((i % k == i / k) ? 1.0f : 0.0f); + // y[i] = ggml_fp32_to_fp16(i + 1); } else { GGML_ABORT("fatal error"); } @@ -4900,16 +6096,16 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t ggml_vk_buffer_write(d_Y, 0, y, sizeof(Y_TYPE) * k * n * batch); vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + ggml_vk_ctx_begin(ctx->device, subctx); for (size_t i = 0; i < num_it; i++) { - ggml_vk_ctx_begin(ctx->device, subctx); ggml_vk_matmul( ctx, subctx, p, ggml_vk_subbuffer(d_X), ggml_vk_subbuffer(d_Y), ggml_vk_subbuffer(d_D), ggml_vk_subbuffer(ctx->prealloc_split_k), m, n, k, k, k, m, k*m, k*n, m*n, split_k, batch, batch, batch, 1, 1 ); - ggml_vk_ctx_end(subctx); } + ggml_vk_ctx_end(subctx); auto begin = std::chrono::high_resolution_clock::now(); ggml_vk_submit(subctx, ctx->fence); @@ -4974,7 +6170,7 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t double err = std::fabs(d[i] - d_chk[i]); avg_err += err; - if (err > 0.05f && first_err_n == -1) { + if ((err > 0.05f || std::isnan(err)) && first_err_n == -1) { first_err_b = i / (m * n); first_err_n = (i % (m * n)) / m; first_err_m = (i % (m * n)) % m; @@ -4987,12 +6183,10 @@ static void ggml_vk_test_matmul(ggml_backend_vk_context * ctx, size_t m, size_t std::cerr << "TEST " << shname << " m=" << m << " n=" << n << " k=" << k << " batch=" << batch << " split_k=" << split_k << " matmul " << time / num_it << "ms " << tflops << " TFLOPS avg_err=" << avg_err << std::endl; - if (avg_err > 0.1) { + if (avg_err > 0.1 || std::isnan(avg_err)) { std::cerr << "m = " << first_err_m << " n = " << first_err_n << " b = " << first_err_b << std::endl; std::cerr << "Actual result: " << std::endl << std::endl; ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b); - std::cerr << std::endl; - ggml_vk_print_matrix_area(d, GGML_TYPE_F32, m, n, first_err_m, first_err_n + 15, first_err_b); std::cerr << "Expected result: " << std::endl << std::endl; ggml_vk_print_matrix_area(d_chk, GGML_TYPE_F32, m, n, first_err_m, first_err_n, first_err_b); @@ -5175,13 +6369,13 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, vk_pipeline p; std::string shname; if (shader_size == 0) { - p = ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_s; + p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->a_s : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_s; shname = std::string(ggml_type_name(quant)) + "_ALIGNED_S"; } else if (shader_size == 1) { - p = ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_m; + p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->a_m : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_m; shname = std::string(ggml_type_name(quant)) + "_ALIGNED_M"; } else if (shader_size == 2) { - p = ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_l; + p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->a_l : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->a_l; shname = std::string(ggml_type_name(quant)) + "_ALIGNED_L"; } else { GGML_ASSERT(0); @@ -5191,13 +6385,13 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, if (k != kpad) { if (shader_size == 0) { - p = ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->s; + p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->s : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->s; shname = std::string(ggml_type_name(quant)) + "_S"; } else if (shader_size == 1) { - p = ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->m; + p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->m : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->m; shname = std::string(ggml_type_name(quant)) + "_M"; } else if (shader_size == 2) { - p = ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->l; + p = ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[quant].f16acc->l : ctx->device->pipeline_dequant_mul_mat_mat[quant].f32acc->l; shname = std::string(ggml_type_name(quant)) + "_L"; } else { GGML_ASSERT(0); @@ -5247,16 +6441,16 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, ggml_vk_buffer_write(y_buf, 0, y, y_sz); vk_context subctx = ggml_vk_create_context(ctx, ctx->device->compute_queue); + ggml_vk_ctx_begin(ctx->device, subctx); for (size_t i = 0; i < num_it; i++) { - ggml_vk_ctx_begin(ctx->device, subctx); ggml_vk_matmul( ctx, subctx, p, ggml_vk_subbuffer(qx_buf), ggml_vk_subbuffer(y_buf), ggml_vk_subbuffer(d_buf), ggml_vk_subbuffer(ctx->prealloc_split_k), m, n, k, k, k, m, k*m, k*n, m*n, split_k, batch, batch, batch, 1, 1 ); - ggml_vk_ctx_end(subctx); } + ggml_vk_ctx_end(subctx); auto begin = std::chrono::high_resolution_clock::now(); @@ -5356,105 +6550,13 @@ static void ggml_vk_test_dequant_matmul(ggml_backend_vk_context * ctx, size_t m, static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) { #if defined(GGML_VULKAN_RUN_TESTS) - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_F32); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_0); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_1); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_0); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_1); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q8_0); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q2_K); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q3_K); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q4_K); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q5_K); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_Q6_K); - ggml_vk_test_dequant(ctx, 7680, GGML_TYPE_IQ4_NL); - - ggml_vk_test_matmul(ctx, 512, 512, 100, 32, 100, 1, 2); - - ggml_vk_test_matmul(ctx, 128, 512, 512, 2, 100, 1, 0); - ggml_vk_test_matmul(ctx, 128, 512, 512, 2, 100, 1, 1); - ggml_vk_test_matmul(ctx, 128, 512, 512, 2, 100, 1, 2); - // ggml_vk_test_matmul(ctx, 128, 512, 512, 2, 100, 4, 0); - // ggml_vk_test_matmul(ctx, 128, 512, 512, 2, 100, 4, 1); - // ggml_vk_test_matmul(ctx, 128, 512, 512, 2, 100, 4, 2); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_0); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_0); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_0); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_1); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_1); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_1); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_1); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_1); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_1); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_0); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_0); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_0); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_1); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_1); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_1); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_1); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_1); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_1); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q8_0); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q8_0); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q8_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q8_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q8_0); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q8_0); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q2_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q2_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q2_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q2_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q2_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q2_K); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q3_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q3_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q3_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q3_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q3_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q3_K); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q4_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q4_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q4_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q4_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q4_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q4_K); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q5_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q5_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q5_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q5_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q5_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q5_K); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_Q6_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_Q6_K); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_Q6_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 0, GGML_TYPE_Q6_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 1, GGML_TYPE_Q6_K); - // ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 4, 2, GGML_TYPE_Q6_K); - - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 0, GGML_TYPE_IQ4_NL); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 1, GGML_TYPE_IQ4_NL); - ggml_vk_test_dequant_matmul(ctx, 128, 512, 512, 2, 100, 1, 2, GGML_TYPE_IQ4_NL); - - std::cerr << std::endl; - const std::vector vals { + 512, 512, 128, + 128, 512, 512, + 4096, 512, 4096, + 11008, 512, 4096, + 4096, 512, 11008, + 32000, 512, 4096, 8, 8, 8, 100, 46, 576, 623, 111, 128, @@ -5467,25 +6569,52 @@ static void ggml_vk_preallocate_buffers(ggml_backend_vk_context * ctx) { 49, 49, 128, 128, 49, 49, 4096, 49, 4096, - 11008, 49, 4096, - 4096, 49, 11008, - 32000, 49, 4096, - 512, 512, 128, - 128, 512, 512, - 4096, 512, 4096, - 11008, 512, 4096, - 4096, 512, 11008, - 32000, 512, 4096, }; - const size_t num_it = 1; + const size_t num_it = 100; + for (size_t i = 0; i < vals.size(); i += 3) { ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0); ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1); ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2); - // ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0); - // ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1); - // ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2); - std::cerr << std::endl; + std::cerr << '\n'; + ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0); + ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1); + ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2); + std::cerr << '\n'; + ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0); + ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1); + ggml_vk_test_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2); + std::cerr << '\n' << std::endl; + + if (vals[i + 2] % 32 == 0) { + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_0); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_0); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_0); + std::cerr << '\n'; + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_0); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_0); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_0); + std::cerr << '\n'; + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_0); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_0); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_0); + std::cerr << '\n' << std::endl; + } + + if (vals[i + 2] % 256 == 0) { + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 0, GGML_TYPE_Q4_K); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 1, GGML_TYPE_Q4_K); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 1, 2, GGML_TYPE_Q4_K); + std::cerr << '\n'; + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 0, GGML_TYPE_Q4_K); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 1, GGML_TYPE_Q4_K); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 2, 2, GGML_TYPE_Q4_K); + std::cerr << '\n'; + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 0, GGML_TYPE_Q4_K); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 1, GGML_TYPE_Q4_K); + ggml_vk_test_dequant_matmul(ctx, vals[i], vals[i + 1], vals[i + 2], 2, num_it, 4, 2, GGML_TYPE_Q4_K); + std::cerr << '\n' << std::endl; + } } GGML_ABORT("fatal error"); @@ -5532,6 +6661,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod const ggml_tensor * src0 = node->src[0]; const ggml_tensor * src1 = node->src[1]; const ggml_tensor * src2 = node->src[2]; + const ggml_tensor * src3 = node->src[3]; switch (node->op) { // Return on empty ops to avoid generating a compute_ctx and setting exit_tensor @@ -5583,7 +6713,9 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod case GGML_OP_IM2COL: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_POOL_2D: + case GGML_OP_RWKV_WKV6: case GGML_OP_LEAKY_RELU: + case GGML_OP_FLASH_ATTN_EXT: break; default: std::cerr << "ggml_vulkan: Error: Missing op: " << ggml_op_name(node->op) << std::endl; @@ -5601,6 +6733,48 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod } else { compute_ctx = ctx->compute_ctx.lock(); } + } else { + switch (node->op) { + case GGML_OP_REPEAT: + case GGML_OP_ACC: + case GGML_OP_GET_ROWS: + case GGML_OP_ADD: + case GGML_OP_MUL: + case GGML_OP_DIV: + case GGML_OP_CONCAT: + case GGML_OP_UPSCALE: + case GGML_OP_SCALE: + case GGML_OP_SQR: + case GGML_OP_SIN: + case GGML_OP_COS: + case GGML_OP_CLAMP: + case GGML_OP_PAD: + case GGML_OP_CPY: + case GGML_OP_CONT: + case GGML_OP_DUP: + case GGML_OP_NORM: + case GGML_OP_GROUP_NORM: + case GGML_OP_RMS_NORM: + case GGML_OP_UNARY: + case GGML_OP_DIAG_MASK_INF: + case GGML_OP_SOFT_MAX: + case GGML_OP_ROPE: + case GGML_OP_ARGSORT: + case GGML_OP_SUM_ROWS: + case GGML_OP_IM2COL: + case GGML_OP_TIMESTEP_EMBEDDING: + case GGML_OP_POOL_2D: + case GGML_OP_LEAKY_RELU: + { + // These operations all go through ggml_vk_op_f32, so short-circuit and + // do the only thing needed for the dryrun. + vk_pipeline pipeline = ggml_vk_op_get_pipeline(ctx, src0, src1, src2, node, node->op); + ggml_pipeline_request_descriptor_sets(ctx->device, pipeline, 1); + return false; + } + default: + break; + } } switch (node->op) { @@ -5734,6 +6908,16 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_tensor * nod case GGML_OP_MUL_MAT_ID: ggml_vk_mul_mat_id(ctx, compute_ctx, src0, src1, src2, node, dryrun); + break; + + case GGML_OP_FLASH_ATTN_EXT: + ggml_vk_flash_attn(ctx, compute_ctx, src0, src1, src2, src3, node, dryrun); + + break; + + case GGML_OP_RWKV_WKV6: + ggml_vk_rwkv_wkv6(ctx, compute_ctx, node, dryrun); + break; default: return false; @@ -5814,6 +6998,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * case GGML_OP_IM2COL: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_POOL_2D: + case GGML_OP_RWKV_WKV6: case GGML_OP_LEAKY_RELU: case GGML_OP_REPEAT: buf = tensor->buffer; @@ -5834,6 +7019,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_tensor * break; case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: + case GGML_OP_FLASH_ATTN_EXT: buf = tensor->buffer; break; @@ -6330,16 +7516,17 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg bool first_node_in_batch = true; // true if next node will be first node in a batch int submit_node_idx = 0; // index to first node in a batch - // submit work every submit_count node to overlap CPU cmdbuffer generation with GPU execution - constexpr int submit_count = 100; + // Submit work every nodes_per_submit nodes to overlap CPU cmdbuffer generation with GPU execution. + // Start with a smaller count to get work submitted right away, and increase it after each submit. + int nodes_per_submit = 20; int submitted_nodes = 0; + int submit_count = 0; for (int i = 0; i < cgraph->n_nodes; i++) { if (first_node_in_batch) { submit_node_idx = i; } - bool submit = (submitted_nodes >= submit_count) || (i == last_node); - + bool submit = (submitted_nodes >= nodes_per_submit) || (i == last_node); bool enqueued = ggml_vk_build_graph(ctx, cgraph->nodes[i], i, cgraph->nodes[submit_node_idx], submit_node_idx, false, i == last_node, submit); @@ -6356,6 +7543,15 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg if (submit) { first_node_in_batch = true; submitted_nodes = 0; + switch (submit_count) { + case 0: + nodes_per_submit = 50; + break; + default: + nodes_per_submit = 100; + break; + } + submit_count++; } } @@ -6512,6 +7708,12 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_MUL_MAT: case GGML_OP_MUL_MAT_ID: { + ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context; + const vk_device& device = ggml_vk_get_device(ctx->device); + if (op->op == GGML_OP_MUL_MAT_ID && !device->mul_mat_id_s && !device->mul_mat_id_m && !device->mul_mat_id_l) { + // If there's not enough shared memory for row_ids and the result tile, fallback to CPU + return false; + } switch (op->src[0]->type) { case GGML_TYPE_F32: case GGML_TYPE_F16: @@ -6549,6 +7751,57 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm return true; } break; + case GGML_OP_FLASH_ATTN_EXT: + { + ggml_backend_vk_device_context * ctx = (ggml_backend_vk_device_context *)dev->context; + if (!ggml_vk_get_device(ctx->device)->coopmat2) { + return false; + } + switch (op->src[0]->ne[0]) { + case 64: + case 80: + case 96: + case 112: + case 128: + case 256: + break; + default: + return false; + } + if (op->src[0]->type != GGML_TYPE_F32) { + return false; + } + if (op->type != GGML_TYPE_F32) { + return false; + } + if (op->src[3] && op->src[3]->type != GGML_TYPE_F16) { + return false; + } + // It's straightforward to support different K/V dequant, but would + // significantly increase the number of pipelines + if (op->src[1]->type != op->src[2]->type) { + return false; + } + switch (op->src[1]->type) { + case GGML_TYPE_F16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + // K dequants currently disabled because D dimension is rounded up to 256 and runs inefficiently + //case GGML_TYPE_Q2_K: + //case GGML_TYPE_Q3_K: + //case GGML_TYPE_Q4_K: + //case GGML_TYPE_Q5_K: + //case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ4_NL: + break; + default: + return false; + } + return true; + } case GGML_OP_GET_ROWS: { switch (op->src[0]->type) { @@ -6585,7 +7838,16 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_REPEAT: return ggml_type_size(op->type) == sizeof(float) && ggml_type_size(op->src[0]->type) == sizeof(float); case GGML_OP_ROPE: - return ggml_is_contiguous(op->src[0]); + { + const int mode = ((const int32_t *) op->op_params)[2]; + if (mode & GGML_ROPE_TYPE_MROPE) { + return false; + } + if (mode & GGML_ROPE_TYPE_VISION) { + return false; + } + return ggml_is_contiguous(op->src[0]); + } case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: @@ -6613,6 +7875,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_OP_IM2COL: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_POOL_2D: + case GGML_OP_RWKV_WKV6: case GGML_OP_LEAKY_RELU: return true; default: @@ -6709,8 +7972,9 @@ static const struct ggml_backend_reg_i ggml_backend_vk_reg_i = { ggml_backend_reg_t ggml_backend_vk_reg() { static ggml_backend_reg reg = { - /* .iface = */ ggml_backend_vk_reg_i, - /* .context = */ nullptr, + /* .api_version = */ GGML_BACKEND_API_VERSION, + /* .iface = */ ggml_backend_vk_reg_i, + /* .context = */ nullptr, }; return ® @@ -6862,6 +8126,7 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { ggml_tensor * src0 = tensor->src[0]; ggml_tensor * src1 = tensor->src[1]; ggml_tensor * src2 = tensor->src[2]; + ggml_tensor * src3 = tensor->src[3]; struct ggml_init_params iparams = { /*.mem_size =*/ 2ul*1024ul*1024ul*1024ul, @@ -6874,15 +8139,18 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { struct ggml_tensor * src0_clone = nullptr; struct ggml_tensor * src1_clone = nullptr; struct ggml_tensor * src2_clone = nullptr; + struct ggml_tensor * src3_clone = nullptr; struct ggml_tensor * tensor_clone = nullptr; size_t src0_size; size_t src1_size; size_t src2_size; + size_t src3_size; void * src0_buffer = nullptr; void * src1_buffer = nullptr; void * src2_buffer = nullptr; + void * src3_buffer = nullptr; if (src0 != nullptr) { src0_clone = ggml_dup_tensor(ggml_ctx, src0); @@ -7010,8 +8278,53 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { ggml_vk_print_tensor(src2, "src2"); } } + if (src3 != nullptr) { + src3_clone = ggml_dup_tensor(ggml_ctx, src3); - if (tensor->op == GGML_OP_MUL_MAT) { + src3_size = ggml_nbytes(src3); + + src3_buffer = malloc(src3_size); + src3_clone->data = src3_buffer; + if (ggml_backend_buffer_is_host(src3->buffer)) { + memcpy(src3_clone->data, src3->data, src3_size); + memcpy(src3_clone->nb, src3->nb, sizeof(size_t) * GGML_MAX_DIMS); + } else if (ggml_backend_buffer_is_vk(src3->buffer)) { + ggml_backend_vk_buffer_context * buf_ctx = (ggml_backend_vk_buffer_context *)src3->buffer->context; + vk_buffer& buffer_gpu = buf_ctx->dev_buffer; + uint64_t offset = vk_tensor_offset(src3) + src3->view_offs; + if (!ggml_is_contiguous(src3) && ggml_vk_dim01_contiguous(src3)) { + for (int i3 = 0; i3 < src3->ne[3]; i3++) { + for (int i2 = 0; i2 < src3->ne[2]; i2++) { + const int idx = i3*src3->ne[2] + i2; + ggml_vk_buffer_read(buffer_gpu, offset + idx * src3->nb[2], ((char *)src3_clone->data + idx * src3_clone->nb[2]), src3->ne[1] * src3->nb[1]); + } + } + + src3_clone->nb[0] = src3->nb[0]; + src3_clone->nb[1] = src3->nb[1]; + for (int i = 2; i < GGML_MAX_DIMS; i++) { + src3_clone->nb[i] = src3_clone->nb[i - 1]*src3_clone->ne[i - 1]; + } + } else { + if (offset + src3_size >= buffer_gpu->size) { + src3_size = buffer_gpu->size - offset; + } + ggml_vk_buffer_read(buffer_gpu, offset, src3_clone->data, src3_size); + memcpy(src3_clone->nb, src3->nb, sizeof(size_t) * GGML_MAX_DIMS); + } + } else { + GGML_ABORT("fatal error"); + } + + if (vk_output_tensor > 0 && vk_output_tensor == check_counter) { + ggml_vk_print_tensor(src3, "src3"); + } + } + + if (tensor->op == GGML_OP_FLASH_ATTN_EXT) { + const float *params = (const float *)tensor->op_params; + tensor_clone = ggml_flash_attn_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, src3_clone, params[0], params[1], params[2]); + } else if (tensor->op == GGML_OP_MUL_MAT) { tensor_clone = ggml_mul_mat(ggml_ctx, src0_clone, src1_clone); } else if (tensor->op == GGML_OP_MUL_MAT_ID) { tensor_clone = ggml_mul_mat_id(ggml_ctx, src0_clone, src1_clone, src2_clone); @@ -7127,7 +8440,7 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { const int32_t max_period = tensor->op_params[1]; tensor_clone = ggml_timestep_embedding(ggml_ctx, src0_clone, dim, max_period); } else if (tensor->op == GGML_OP_POOL_2D) { - enum ggml_op_pool op = static_cast(dst->op_params[0]); + enum ggml_op_pool op = static_cast(tensor->op_params[0]); const int32_t k0 = tensor->op_params[1]; const int32_t k1 = tensor->op_params[2]; const int32_t s0 = tensor->op_params[3]; @@ -7139,7 +8452,11 @@ static void ggml_vk_check_results_0(ggml_tensor * tensor) { } else if (tensor->op == GGML_OP_LEAKY_RELU) { const float * op_params = (const float *)tensor->op_params; tensor_clone = ggml_leaky_relu(ggml_ctx, src0_clone, op_params[0], false); - } else { + } else if (tensor->op == GGML_OP_RWKV_WKV6) { + tensor_clone = ggml_rwkv_wkv6(ggml_ctx, tensor->src[0], tensor->src[1], tensor->src[2], tensor->src[3], + tensor->src[4], tensor->src[5]); + } + else { std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl; GGML_ABORT("fatal error"); } @@ -7336,3 +8653,5 @@ static void ggml_vk_check_results_1(ggml_tensor * tensor) { VK_LOG_DEBUG("END ggml_vk_check_results_1(" << tensor->name << ")"); } #endif + +GGML_BACKEND_DL_IMPL(ggml_backend_vk_reg) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt index 10075db337737..bd0c74cb1c770 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt +++ b/ggml/src/ggml-vulkan/vulkan-shaders/CMakeLists.txt @@ -1,7 +1,9 @@ find_package (Threads REQUIRED) +find_package(Vulkan COMPONENTS glslc REQUIRED) set(TARGET vulkan-shaders-gen) add_executable(${TARGET} vulkan-shaders-gen.cpp) install(TARGETS ${TARGET} RUNTIME) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) target_link_libraries(vulkan-shaders-gen PUBLIC Threads::Threads) +target_link_libraries(vulkan-shaders-gen PRIVATE Vulkan::Vulkan) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp index d5b989735bc0b..91bb8f8db610e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp @@ -2,6 +2,15 @@ #extension GL_EXT_shader_explicit_arithmetic_types_int8 : require #endif +#include "types.comp" + +#if defined(A_TYPE_PACKED16) +layout (binding = 0) readonly buffer A_PACKED16 {A_TYPE_PACKED16 data_a_packed16[];}; +#endif +#if defined(A_TYPE_PACKED32) +layout (binding = 0) readonly buffer A_PACKED32 {A_TYPE_PACKED32 data_a_packed32[];}; +#endif + #if defined(DATA_A_F32) vec2 dequantize(uint ib, uint iqs, uint a_offset) { return vec2(data_a[a_offset + ib], data_a[a_offset + ib + 1]); @@ -16,53 +25,94 @@ vec2 dequantize(uint ib, uint iqs, uint a_offset) { #if defined(DATA_A_Q4_0) vec2 dequantize(uint ib, uint iqs, uint a_offset) { - const float d = float(data_a[a_offset + ib].d); const uint vui = uint(data_a[a_offset + ib].qs[iqs]); - return (vec2(vui & 0xF, vui >> 4) - 8.0f) * d; + return (vec2(vui & 0xF, vui >> 4) - 8.0f); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]); + return (vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12) - 8.0f); } #endif #if defined(DATA_A_Q4_1) vec2 dequantize(uint ib, uint iqs, uint a_offset) { - const float d = float(data_a[a_offset + ib].d); - const float m = float(data_a[a_offset + ib].m); const uint vui = uint(data_a[a_offset + ib].qs[iqs]); - return vec2(vui & 0xF, vui >> 4) * d + m; + return vec2(vui & 0xF, vui >> 4); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]); + return vec4(vui & 0xF, (vui >> 4) & 0xF, (vui >> 8) & 0xF, vui >> 12); } #endif #if defined(DATA_A_Q5_0) vec2 dequantize(uint ib, uint iqs, uint a_offset) { - const float d = float(data_a[a_offset + ib].d); const uint uint_qh = uint(data_a[a_offset + ib].qh[1]) << 16 | data_a[a_offset + ib].qh[0]; const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10); const uint vui = uint(data_a[a_offset + ib].qs[iqs]); - return (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f) * d; + return (vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) - 16.0f); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint uint_qh = uint(data_a_packed16[a_offset + ib].qh[1]) << 16 | data_a_packed16[a_offset + ib].qh[0]; + const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10); + const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10); + const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]); + return (vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y) - 16.0f); } #endif #if defined(DATA_A_Q5_1) vec2 dequantize(uint ib, uint iqs, uint a_offset) { - const float d = float(data_a[a_offset + ib].d); - const float m = float(data_a[a_offset + ib].m); const uint uint_qh = data_a[a_offset + ib].qh; const ivec2 qh = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10); const uint vui = uint(data_a[a_offset + ib].qs[iqs]); - return vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y) * d + m; + return vec2((vui & 0xF) | qh.x, (vui >> 4) | qh.y); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint uint_qh = data_a_packed16[a_offset + ib].qh; + const ivec2 qh0 = ivec2(((uint_qh >> iqs) << 4) & 0x10, (uint_qh >> (iqs + 12)) & 0x10); + const ivec2 qh1 = ivec2(((uint_qh >> (iqs + 1)) << 4) & 0x10, (uint_qh >> (iqs + 13)) & 0x10); + const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]); + return vec4((vui & 0xF) | qh0.x, ((vui >> 4) & 0xF) | qh0.y, ((vui >> 8) & 0xF) | qh1.x, (vui >> 12) | qh1.y); } #endif #if defined(DATA_A_Q8_0) vec2 dequantize(uint ib, uint iqs, uint a_offset) { - const float d = float(data_a[a_offset + ib].d); - return vec2(int(data_a[a_offset + ib].qs[iqs]), int(data_a[a_offset + ib].qs[iqs + 1])) * d; + return vec2(int(data_a[a_offset + ib].qs[iqs]), int(data_a[a_offset + ib].qs[iqs + 1])); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + uint32_t v0 = data_a_packed16[a_offset + ib].qs[iqs/2]; + uint32_t v1 = data_a_packed16[a_offset + ib].qs[iqs/2 + 1]; + return vec4(int8_t(v0 & 0xFF), int8_t(v0 >> 8), int8_t(v1 & 0xFF), int8_t(v1 >> 8)); } #endif #if defined(DATA_A_IQ4_NL) vec2 dequantize(uint ib, uint iqs, uint a_offset) { - const float d = float(data_a[a_offset + ib].d); const uint vui = uint(data_a[a_offset + ib].qs[iqs]); - return vec2(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[vui >> 4]) * d; + return vec2(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[vui >> 4]); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint vui = uint(data_a_packed16[a_offset + ib].qs[iqs/2]); + return vec4(kvalues_iq4nl[vui & 0xF], kvalues_iq4nl[(vui >> 4) & 0xF], kvalues_iq4nl[(vui >> 8) & 0xF], kvalues_iq4nl[vui >> 12]); +} +#endif + +#if defined(DATA_A_F32) || defined(DATA_A_F16) +vec2 get_dm(uint ib, uint a_offset) { + return vec2(0, 0); +} +#endif + +#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ4_NL) +vec2 get_dm(uint ib, uint a_offset) { + return vec2(float(data_a[a_offset + ib].d), 0); +} +#endif + +#if defined(DATA_A_Q4_1) || defined(DATA_A_Q5_1) +vec2 get_dm(uint ib, uint a_offset) { + return vec2(float(data_a[a_offset + ib].d), float(data_a[a_offset + ib].m)); } #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp new file mode 100644 index 0000000000000..a8707b621e7e2 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp @@ -0,0 +1,305 @@ + +#include "types.comp" + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ4_0 { + block_q4_0_packed16 block; +}; + +float16_t dequantFuncQ4_0(const in decodeBufQ4_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + const uint shift = (idx & 0x10) >> 2; + uint32_t qs = unpack8(uint32_t(bl.block.qs[(idx & 0xE) >> 1]))[idx & 1]; + qs >>= shift; + qs &= 0xF; + float16_t ret = (float16_t(qs) - float16_t(8)) * d; + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ4_1 { + block_q4_1 block; +}; + +float16_t dequantFuncQ4_1(const in decodeBufQ4_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const float16_t m = bl.block.m; + const uint idx = coordInBlock[1]; + const uint iqs = idx & 0xF; + const uint shift = (idx & 0x10) >> 2; + uint32_t qs = bl.block.qs[iqs]; + qs >>= shift; + qs &= 0xF; + float16_t ret = float16_t(qs) * d + m; + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ5_0 { + block_q5_0 block; +}; + +float16_t dequantFuncQ5_0(const in decodeBufQ5_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + const uint iqs = idx & 0xF; + + const uint uint_qh = uint(bl.block.qh[1]) << 16 | bl.block.qh[0]; + const uint qh = ((uint_qh >> idx) << 4) & 0x10; + + const uint shift = (idx & 0x10) >> 2; + uint32_t qs = bl.block.qs[iqs]; + qs >>= shift; + qs &= 0xF; + + float16_t ret = (float16_t(qs | qh) - float16_t(16)) * d; + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 8) buffer decodeBufQ5_1 { + block_q5_1 block; +}; + +float16_t dequantFuncQ5_1(const in decodeBufQ5_1 bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const float16_t m = bl.block.m; + const uint idx = coordInBlock[1]; + const uint iqs = idx & 0xF; + + const uint uint_qh = bl.block.qh; + const uint qh = ((uint_qh >> idx) << 4) & 0x10; + + const uint shift = (idx & 0x10) >> 2; + uint32_t qs = bl.block.qs[iqs]; + qs >>= shift; + qs &= 0xF; + + float16_t ret = float16_t(qs | qh) * d + m; + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ8_0 { + block_q8_0_packed16 block; +}; + +float16_t dequantFuncQ8_0(const in decodeBufQ8_0 bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + const uint iqs = idx; + + // Load 16b and select the byte for this element + int32_t qs = unpack8(int32_t(bl.block.qs[(iqs & 0x1E) >> 1]))[iqs & 1]; + float16_t ret = float16_t(qs) * d; + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 4) buffer decodeBufQ2_K { + block_q2_K block; +}; + +float16_t dequantFuncQ2_K(const in decodeBufQ2_K bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const f16vec2 d = bl.block.d; + const uint idx = coordInBlock[1]; + const uint iqs = idx; + + const uint qsi = (iqs / 128) * 32 + (iqs % 32); // 0..31 + const uint scalesi = iqs / 16; // 0..15 + const uint qsshift = ((iqs % 128) / 32) * 2; // 0,2,4,6 + + uint32_t qs = bl.block.qs[qsi]; + const uint scales = bl.block.scales[scalesi]; + float16_t ret = d.x * float16_t(scales & 0xF) * float16_t((qs >> qsshift) & 3) - d.y * float16_t(scales >> 4); + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ3_K { + block_q3_K block; +}; + +float16_t dequantFuncQ3_K(const in decodeBufQ3_K bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const uint idx = coordInBlock[1]; + const uint iqs = idx; + + const uint n = iqs / 128; // 0,1 + const uint qsi = n * 32 + (iqs % 32); // 0..63 + const uint hmi = (iqs % 32); // 0..31 + const uint j = (iqs % 128) / 8; // 0..15 + const uint is = iqs / 16; // 0..15 + const uint halfsplit = ((iqs % 128) / 32); // 0,1,2,3 + const uint qsshift = halfsplit * 2; // 0,2,4,6 + const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128 + + uint32_t scaleidx0 = (is < 8) ? is : (is-8); + uint32_t scaleidx0shift = (is < 8) ? 0 : 4; + uint32_t scaleidx1 = is + 8 - (is/4)*4; + uint32_t scaleidx1shift = (is/4)*2; + + const int8_t us = int8_t(((bl.block.scales[scaleidx0] >> scaleidx0shift) & 0xF) | (((bl.block.scales[scaleidx1] >> scaleidx1shift) & 3) << 4)); + + const float16_t dl = bl.block.d * float16_t(us - 32); + + float16_t ret = dl * float16_t(int8_t((bl.block.qs[qsi ] >> qsshift) & 3) - (((bl.block.hmask[hmi ] & m) != 0) ? 0 : 4)); + + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ4_K { + block_q4_K block; +}; + +float16_t dequantFuncQ4_K(const in decodeBufQ4_K bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const uint idx = coordInBlock[1]; + const uint iqs = idx; + + const uint n = iqs / 64; // 0,1,2,3 + const uint b = (iqs % 64) / 32; // 0,1 + const uint is = (idx & 0xE0) >> 5; // 0..7 + const uint qsi = n * 32 + (iqs % 32); // 0..127 + + const f16vec2 loadd = bl.block.d; + + uint32_t sc; + uint32_t mbyte; + + uint32_t scidx0 = (is < 4) ? is : (is + 4); + uint32_t scidx1 = (is < 4) ? is : (is - 4); + uint32_t scidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint32_t scidxshift1 = (is < 4) ? 0 : 2; + uint32_t mbidx0 = is + 4; + uint32_t mbidx1 = (is < 4) ? is + 4 : is; + uint32_t mbidxmask0 = (is < 4) ? 0xF : 0xF0; + uint32_t mbidxshift0 = (is < 4) ? 0 : 4; + uint32_t mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint32_t mbidxshift1 = (is < 4) ? 0 : 2; + + sc = uint8_t((bl.block.scales[scidx0] & 0xF) | ((bl.block.scales[scidx1] & scidxmask1) >> scidxshift1)); + mbyte = uint8_t(((bl.block.scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((bl.block.scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + + const float16_t d = loadd.x * float16_t(sc); + const float16_t m = loadd.y * float16_t(mbyte); + + uint32_t dmask = 0xF << (b * 4); + + float16_t ret = d * float16_t((bl.block.qs[qsi ] & dmask) >> (b * 4)) - m; + + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 16) buffer decodeBufQ5_K { + block_q5_K block; +}; + +float16_t dequantFuncQ5_K(const in decodeBufQ5_K bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const uint idx = coordInBlock[1]; + const uint iqs = idx; + + const uint n = iqs / 64; // 0,1,2,3 + const uint b = (iqs % 64) / 32; // 0,1 + const uint is = (idx & 0xE0) >> 5; // 0..7 + const uint qsi = n * 32 + (iqs % 32); // 0..127 + const uint qhi = (iqs % 32); // 0..31 + + const uint8_t hm = uint8_t(1 << (iqs / 32)); + + const f16vec2 loadd = bl.block.d; + + uint32_t sc; + uint32_t mbyte; + + uint32_t scidx0 = (is < 4) ? is : (is + 4); + uint32_t scidx1 = (is < 4) ? is : (is - 4); + uint32_t scidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint32_t scidxshift1 = (is < 4) ? 0 : 2; + uint32_t mbidx0 = is + 4; + uint32_t mbidx1 = (is < 4) ? is + 4 : is; + uint32_t mbidxmask0 = (is < 4) ? 0xF : 0xF0; + uint32_t mbidxshift0 = (is < 4) ? 0 : 4; + uint32_t mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint32_t mbidxshift1 = (is < 4) ? 0 : 2; + + sc = uint8_t((bl.block.scales[scidx0] & 0xF) | ((bl.block.scales[scidx1] & scidxmask1) >> scidxshift1)); + mbyte = uint8_t(((bl.block.scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((bl.block.scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + + const float16_t d = loadd.x * float16_t(sc); + const float16_t m = loadd.y * float16_t(mbyte); + + uint32_t dmask = 0xF << (b * 4); + + float16_t ret = d * (float16_t((bl.block.qs[qsi ] & dmask) >> (b * 4)) + float16_t((bl.block.qh[qhi ] & hm) != 0 ? 16 : 0)) - m; + + return ret; +} + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufQ6_K { + block_q6_K block; +}; + +float16_t dequantFuncQ6_K(const in decodeBufQ6_K bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const uint idx = coordInBlock[1]; + const uint iqs = idx; + + const uint n = iqs / 128; // 0,1 + const uint b = (iqs % 128) / 64; // 0,1 + const uint is_b = (iqs % 32) / 16; // 0,1 + const uint qhshift = ((iqs % 128) / 32) * 2;// 0,2,4,6 + const uint is = 8 * n + qhshift + is_b; // 0..15 + const uint qsi = n * 64 + (iqs % 64); // 0..127 + const uint qhi = n * 32 + (iqs % 32); // 0..63 + + const float16_t dscale = bl.block.d * float16_t(bl.block.scales[is]); + + float16_t ret = dscale * float16_t(int8_t(((bl.block.ql[qsi ] >> (b * 4)) & 0xF) | (((bl.block.qh[qhi ] >> qhshift) & 3) << 4)) - 32); + + return ret; +} + +#if defined(DATA_A_IQ4_NL) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_NL { + block_iq4_nl block; +}; + +float16_t dequantFuncIQ4_NL(const in decodeBufIQ4_NL bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + const uint iqs = idx & 0xF; + const uint shift = (idx & 0x10) >> 2; + uint32_t qs = bl.block.qs[iqs]; + qs >>= shift; + qs &= 0xF; + float16_t ret = float16_t(kvalues_iq4nl[qs]) * d; + return ret; +} +#endif + +#if defined(DATA_A_Q4_0) +#define dequantFuncA dequantFuncQ4_0 +#elif defined(DATA_A_Q4_1) +#define dequantFuncA dequantFuncQ4_1 +#elif defined(DATA_A_Q5_0) +#define dequantFuncA dequantFuncQ5_0 +#elif defined(DATA_A_Q5_1) +#define dequantFuncA dequantFuncQ5_1 +#elif defined(DATA_A_Q8_0) +#define dequantFuncA dequantFuncQ8_0 +#elif defined(DATA_A_Q2_K) +#define dequantFuncA dequantFuncQ2_K +#elif defined(DATA_A_Q3_K) +#define dequantFuncA dequantFuncQ3_K +#elif defined(DATA_A_Q4_K) +#define dequantFuncA dequantFuncQ4_K +#elif defined(DATA_A_Q5_K) +#define dequantFuncA dequantFuncQ5_K +#elif defined(DATA_A_Q6_K) +#define dequantFuncA dequantFuncQ6_K +#elif defined(DATA_A_IQ4_NL) +#define dequantFuncA dequantFuncIQ4_NL +#endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp index 34ef3da30b82c..8de14fc03f102 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp @@ -10,6 +10,8 @@ layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; void main() { const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64; + init_iq4nl_shmem(); + const uint tid = gl_LocalInvocationID.x % 64; const uint il = tid/32; const uint ir = tid%32; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp index 92acb75406db3..987f113a35ad0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q4_k.comp @@ -9,8 +9,8 @@ layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; void main() { [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) { - const uint i = gl_WorkGroupID.x * 256 + wgy; - if (i >= p.M * p.K / QUANT_K) { + const uint ib = gl_WorkGroupID.x * 256 + wgy; + if (ib >= p.M * p.K / QUANT_K) { return; } @@ -20,37 +20,49 @@ void main() { const uint is = 2 * il; const uint n = 4; - const FLOAT_TYPE dall = FLOAT_TYPE(data_a[i].d.x); - const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[i].d.y); + const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib].d.x); + const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib].d.y); - const uint y_idx = i * QUANT_K + 64 * il + n * ir; + const uint y_idx = ib * QUANT_K + 64 * il + n * ir; const uint qs_idx = 32*il + n * ir; - uint8_t sc; - uint8_t m; - if (is < 4) { - sc = uint8_t(data_a[i].scales[is] & 63); - m = uint8_t(data_a[i].scales[is + 4] & 63); - } else { - sc = uint8_t((data_a[i].scales[is + 4] & 0xF) | ((data_a[i].scales[is - 4] >> 6) << 4)); - m = uint8_t((data_a[i].scales[is + 4] >> 4) | ((data_a[i].scales[is ] >> 6) << 4)); - } + uint scidx0 = (is < 4) ? is : (is + 4); + uint scidx1 = (is < 4) ? is : (is - 4); + uint scidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint scidxshift1 = (is < 4) ? 0 : 2; + uint mbidx0 = is + 4; + uint mbidx1 = (is < 4) ? is + 4 : is; + uint mbidxmask0 = (is < 4) ? 0xF : 0xF0; + uint mbidxshift0 = (is < 4) ? 0 : 4; + uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint mbidxshift1 = (is < 4) ? 0 : 2; + + uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1)); + uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + const FLOAT_TYPE d1 = dall * sc; - const FLOAT_TYPE m1 = dmin * m; - - if (is < 4) { - sc = uint8_t(data_a[i].scales[is + 1] & 63); - m = uint8_t(data_a[i].scales[is + 5] & 63); - } else { - sc = uint8_t((data_a[i].scales[is + 5] & 0xF) | ((data_a[i].scales[is - 3] >> 6) << 4)); - m = uint8_t((data_a[i].scales[is + 5] >> 4) | ((data_a[i].scales[is + 1] >> 6) << 4)); - } + const FLOAT_TYPE m1 = dmin * mbyte; + + scidx0 = (is < 4) ? is + 1 : (is + 5); + scidx1 = (is < 4) ? is + 1 : (is - 3); + scidxmask1 = (is < 4) ? 0x30 : 0xC0; + scidxshift1 = (is < 4) ? 0 : 2; + mbidx0 = is + 5; + mbidx1 = (is < 4) ? is + 5 : is + 1; + mbidxmask0 = (is < 4) ? 0xF : 0xF0; + mbidxshift0 = (is < 4) ? 0 : 4; + mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + mbidxshift1 = (is < 4) ? 0 : 2; + + sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1)); + mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + const FLOAT_TYPE d2 = dall * sc; - const FLOAT_TYPE m2 = dmin * m; + const FLOAT_TYPE m2 = dmin * mbyte; [[unroll]] for (uint l = 0; l < n; ++l) { - data_b[y_idx + l ] = D_TYPE(d1 * FLOAT_TYPE(data_a[i].qs[qs_idx + l] & 0xF) - m1); - data_b[y_idx + l + 32] = D_TYPE(d2 * FLOAT_TYPE(data_a[i].qs[qs_idx + l] >> 4) - m2); + data_b[y_idx + l ] = D_TYPE(d1 * FLOAT_TYPE(data_a[ib].qs[qs_idx + l] & 0xF) - m1); + data_b[y_idx + l + 32] = D_TYPE(d2 * FLOAT_TYPE(data_a[ib].qs[qs_idx + l] >> 4) - m2); } } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp index f314a76d105c6..6db5403b6613e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_q5_k.comp @@ -9,8 +9,8 @@ layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; void main() { [[unroll]] for (uint wgy = 0; wgy < 256; wgy++) { - const uint i = gl_WorkGroupID.x * 256 + wgy; - if (i >= p.M * p.K / QUANT_K) { + const uint ib = gl_WorkGroupID.x * 256 + wgy; + if (ib >= p.M * p.K / QUANT_K) { return; } @@ -19,40 +19,52 @@ void main() { const uint ir = tid % 16; const uint is = 2 * il; - const FLOAT_TYPE dall = FLOAT_TYPE(data_a[i].d.x); - const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[i].d.y); + const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib].d.x); + const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib].d.y); - const uint y_idx = i * QUANT_K + 64 * il + 2 * ir; + const uint y_idx = ib * QUANT_K + 64 * il + 2 * ir; const uint qs_idx = 32*il + 2 * ir; const uint qh_idx = 2 * ir; - uint8_t sc; - uint8_t m; - if (is < 4) { - sc = uint8_t(data_a[i].scales[is] & 63); - m = uint8_t(data_a[i].scales[is + 4] & 63); - } else { - sc = uint8_t((data_a[i].scales[is + 4] & 0xF) | ((data_a[i].scales[is - 4] >> 6) << 4)); - m = uint8_t((data_a[i].scales[is + 4] >> 4) | ((data_a[i].scales[is ] >> 6) << 4)); - } + uint scidx0 = (is < 4) ? is : (is + 4); + uint scidx1 = (is < 4) ? is : (is - 4); + uint scidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint scidxshift1 = (is < 4) ? 0 : 2; + uint mbidx0 = is + 4; + uint mbidx1 = (is < 4) ? is + 4 : is; + uint mbidxmask0 = (is < 4) ? 0xF : 0xF0; + uint mbidxshift0 = (is < 4) ? 0 : 4; + uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + uint mbidxshift1 = (is < 4) ? 0 : 2; + + uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1)); + uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + const FLOAT_TYPE d1 = dall * sc; - const FLOAT_TYPE m1 = dmin * m; - - if (is < 4) { - sc = uint8_t(data_a[i].scales[is + 1] & 63); - m = uint8_t(data_a[i].scales[is + 5] & 63); - } else { - sc = uint8_t((data_a[i].scales[is + 5] & 0xF) | ((data_a[i].scales[is - 3] >> 6) << 4)); - m = uint8_t((data_a[i].scales[is + 5] >> 4) | ((data_a[i].scales[is + 1] >> 6) << 4)); - } + const FLOAT_TYPE m1 = dmin * mbyte; + + scidx0 = (is < 4) ? is + 1 : (is + 5); + scidx1 = (is < 4) ? is + 1 : (is - 3); + scidxmask1 = (is < 4) ? 0x30 : 0xC0; + scidxshift1 = (is < 4) ? 0 : 2; + mbidx0 = is + 5; + mbidx1 = (is < 4) ? is + 5 : is + 1; + mbidxmask0 = (is < 4) ? 0xF : 0xF0; + mbidxshift0 = (is < 4) ? 0 : 4; + mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + mbidxshift1 = (is < 4) ? 0 : 2; + + sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1)); + mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + const FLOAT_TYPE d2 = dall * sc; - const FLOAT_TYPE m2 = dmin * m; + const FLOAT_TYPE m2 = dmin * mbyte; const uint8_t hm1 = uint8_t(1 << (2 * il )); const uint8_t hm2 = uint8_t(1 << (2 * il + 1)); - data_b[y_idx ] = D_TYPE(d1 * FLOAT_TYPE((data_a[i].qs[qs_idx ] & 0xF) + (((data_a[i].qh[qh_idx ] & hm1) != 0) ? 16 : 0)) - m1); - data_b[y_idx + 1] = D_TYPE(d1 * FLOAT_TYPE((data_a[i].qs[qs_idx + 1] & 0xF) + (((data_a[i].qh[qh_idx + 1] & hm1) != 0) ? 16 : 0)) - m1); - data_b[y_idx + 32] = D_TYPE(d2 * FLOAT_TYPE((data_a[i].qs[qs_idx ] >> 4) + (((data_a[i].qh[qh_idx ] & hm2) != 0) ? 16 : 0)) - m2); - data_b[y_idx + 33] = D_TYPE(d2 * FLOAT_TYPE((data_a[i].qs[qs_idx + 1] >> 4) + (((data_a[i].qh[qh_idx + 1] & hm2) != 0) ? 16 : 0)) - m2); + data_b[y_idx ] = D_TYPE(d1 * FLOAT_TYPE((data_a[ib].qs[qs_idx ] & 0xF) + (((data_a[ib].qh[qh_idx ] & hm1) != 0) ? 16 : 0)) - m1); + data_b[y_idx + 1] = D_TYPE(d1 * FLOAT_TYPE((data_a[ib].qs[qs_idx + 1] & 0xF) + (((data_a[ib].qh[qh_idx + 1] & hm1) != 0) ? 16 : 0)) - m1); + data_b[y_idx + 32] = D_TYPE(d2 * FLOAT_TYPE((data_a[ib].qs[qs_idx ] >> 4) + (((data_a[ib].qh[qh_idx ] & hm2) != 0) ? 16 : 0)) - m2); + data_b[y_idx + 33] = D_TYPE(d2 * FLOAT_TYPE((data_a[ib].qs[qs_idx + 1] >> 4) + (((data_a[ib].qh[qh_idx + 1] & hm2) != 0) ? 16 : 0)) - m2); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp new file mode 100644 index 0000000000000..c5be8131b30db --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -0,0 +1,289 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : enable +#extension GL_EXT_shader_16bit_storage : require + +#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require + +#extension GL_KHR_memory_scope_semantics : enable +#extension GL_KHR_cooperative_matrix : enable +#extension GL_NV_cooperative_matrix2 : enable +#extension GL_EXT_buffer_reference : enable +#extension GL_KHR_shader_subgroup_ballot : enable +#extension GL_KHR_shader_subgroup_vote : enable +#extension GL_EXT_null_initializer : enable + +#include "types.comp" +#include "dequant_funcs_cm2.comp" + +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout (constant_id = 1) const uint32_t Br = 32; +layout (constant_id = 2) const uint32_t Bc = 32; +layout (constant_id = 3) const uint32_t D = 32; +layout (constant_id = 4) const uint32_t Clamp = gl_CooperativeMatrixClampModeConstantNV; + +layout (push_constant) uniform parameter { + uint32_t N; + uint32_t KV; + + uint32_t ne1; + uint32_t ne2; + uint32_t ne3; + + uint32_t neq2; + uint32_t neq3; + uint32_t nek2; + uint32_t nek3; + uint32_t nev2; + uint32_t nev3; + uint32_t nem1; + + uint32_t nb02; + uint32_t nb03; + uint32_t nb12; + uint32_t nb13; + uint32_t nb22; + uint32_t nb23; + uint32_t nb31; + + float scale; + float max_bias; + float logit_softcap; + + uint32_t mask; + uint32_t n_head_log2; + float m0; + float m1; +} p; + +layout (binding = 0) readonly buffer Q {uint8_t data_q[];}; +layout (binding = 1) readonly buffer K {uint8_t data_k[];}; +layout (binding = 2) readonly buffer V {uint8_t data_v[];}; +layout (binding = 3) readonly buffer M {uint8_t data_m[];}; +layout (binding = 4) writeonly buffer O {D_TYPE data_o[];}; + +#define CEIL_DIV(a, b) (((a) + (b) - 1) / (b)) + +ACC_TYPE maxReduce(const in ACC_TYPE x, const in ACC_TYPE y) { + return max(x, y); +} + +ACC_TYPE smearReduce(const in ACC_TYPE x, const in ACC_TYPE y) { + return x; +} + +// Replace matrix elements >= numRows or numCols with 'replace' +ACC_TYPE replacePadding(const in uint32_t row, const in uint32_t col, const in ACC_TYPE elem, const in ACC_TYPE replace, const in uint32_t numRows, const in uint32_t numCols) { + if (row >= numRows || col >= numCols) { + return replace; + } + return elem; +} + +ACC_TYPE Exp(const in uint32_t row, const in uint32_t col, const in ACC_TYPE elem) +{ + return exp(elem); +} + +ACC_TYPE Max(const in uint32_t row, const in uint32_t col, const in ACC_TYPE elem0, const in ACC_TYPE elem1) +{ + return max(elem0, elem1); +} + +#if defined(BLOCK_SIZE) +#define DECODEFUNC , DEQUANTFUNC +#else +#define DECODEFUNC +#endif + +void main() { +#if defined(DATA_A_IQ4_NL) + init_iq4nl_shmem(); +#endif + + const uint32_t N = p.N; + const uint32_t KV = p.KV; + + const uint32_t Tr = CEIL_DIV(N, Br); + const uint32_t Tc = CEIL_DIV(KV, Bc); + + const uint32_t i = gl_WorkGroupID.x; + + const uint32_t iq2 = gl_WorkGroupID.y; + const uint32_t iq3 = gl_WorkGroupID.z; + + // broadcast factors + const uint32_t rk2 = p.neq2/p.nek2; + const uint32_t rk3 = p.neq3/p.nek3; + + const uint32_t rv2 = p.neq2/p.nev2; + const uint32_t rv3 = p.neq3/p.nev3; + + // k indices + const uint32_t ik3 = iq3 / rk3; + const uint32_t ik2 = iq2 / rk2; + + // v indices + const uint32_t iv3 = iq3 / rv3; + const uint32_t iv2 = iq2 / rv2; + + tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutQ = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); + tensorLayoutNV<2, Clamp> tensorLayoutK = createTensorLayoutNV(2, Clamp); + tensorLayoutNV<2, Clamp> tensorLayoutV = createTensorLayoutNV(2, Clamp); + + tensorViewNV<2, false, 1, 0> tensorViewTranspose = createTensorViewNV(2, false, 1, 0); + +#if defined(BLOCK_SIZE) + tensorLayoutK = setTensorLayoutBlockSizeNV(tensorLayoutK, 1, BLOCK_SIZE); + tensorLayoutV = setTensorLayoutBlockSizeNV(tensorLayoutV, 1, BLOCK_SIZE); +#endif + + tensorLayoutQ = setTensorLayoutDimensionNV(tensorLayoutQ, N, D); + tensorLayoutK = setTensorLayoutDimensionNV(tensorLayoutK, KV, D); + tensorLayoutV = setTensorLayoutDimensionNV(tensorLayoutV, KV, D); + + coopmat Q; + coopmat Qf16; + + uint32_t q_offset = iq2*p.nb02+iq3*p.nb03; + coopMatLoadTensorNV(Q, data_q, q_offset, sliceTensorLayoutNV(tensorLayoutQ, i * Br, Br, 0, D)); + + Qf16 = coopmat(Q); + Qf16 *= float16_t(p.scale); + + coopmat O = coopmat(0); + + coopmat L, M; + + L = coopmat(0); + M = coopmat(-1.0/0.0); + + ACC_TYPE slope = ACC_TYPE(1.0); + + // ALiBi + if (p.max_bias > 0.0f) { + const uint32_t h = iq2; + + const ACC_TYPE base = ACC_TYPE(h < p.n_head_log2 ? p.m0 : p.m1); + const int exph = int(h < p.n_head_log2 ? h + 1 : 2*(h - p.n_head_log2) + 1); + + slope = pow(base, ACC_TYPE(exph)); + } + + [[dont_unroll]] + for (uint32_t j = 0; j < Tc; ++j) { + + coopmat S = coopmat(0); + + coopmat K_T; + + uint32_t k_offset = ik2*p.nb12 + ik3*p.nb13; + coopMatLoadTensorNV(K_T, data_k, k_offset, sliceTensorLayoutNV(tensorLayoutK, j * Bc, Bc, 0, D), tensorViewTranspose DECODEFUNC); + S = coopMatMulAdd(Qf16, K_T, S); + + if (p.logit_softcap != 0.0f) { + [[unroll]] + for (int k = 0; k < S.length(); ++k) { + S[k] = ACC_TYPE(p.logit_softcap)*tanh(S[k]); + } + } + + if (p.mask != 0) { + tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutM = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); + tensorLayoutM = setTensorLayoutDimensionNV(tensorLayoutM, p.nem1, KV); + + coopmat mv; + + coopMatLoadTensorNV(mv, data_m, 0, sliceTensorLayoutNV(tensorLayoutM, i * Br, Br, j * Bc, Bc)); + + S += slope*coopmat(mv); + } + + // Clear padding elements to -inf, so they don't contribute to rowmax + if (Clamp != 0 && + ((j + 1) * Bc > KV || + (i + 1) * Br > N)) { + + uint R = ((i + 1) * Br > N) ? (N % Br) : Br; + uint C = ((j + 1) * Bc > KV) ? (KV % Bc) : Bc; + + coopMatPerElementNV(S, S, replacePadding, ACC_TYPE(-1.0/0.0), R, C); + } + + coopmat rowmax, P, rowsum, eM; + + coopMatReduceNV(rowmax, S, gl_CooperativeMatrixReduceRowNV, maxReduce); + + coopmat Mold = M; + + // M = max(rowmax, Mold) + // P = e^(S - M) + // eM = e^(Mold - M) + coopMatPerElementNV(M, rowmax, Max, Mold); + coopMatPerElementNV(P, S - M, Exp); + coopMatPerElementNV(eM, Mold - M, Exp); + + // Clear padding elements to 0, so they don't contribute to rowsum + if (Clamp != 0 && + ((j + 1) * Bc > KV || + (i + 1) * Br > N)) { + + uint R = ((i + 1) * Br > N) ? (N % Br) : Br; + uint C = ((j + 1) * Bc > KV) ? (KV % Bc) : Bc; + + coopMatPerElementNV(P, P, replacePadding, ACC_TYPE(0.0), R, C); + } + + coopmat P_A = coopmat(P); + + // compute rowsum by multiplying by matrix of all ones. + coopmat One = coopmat(1.0); + + rowsum = coopmat(0.0); + rowsum = coopMatMulAdd(P_A, One, rowsum); + + coopmat V; + uint32_t v_offset = iv2*p.nb22 + iv3*p.nb23; + coopMatLoadTensorNV(V, data_v, v_offset, sliceTensorLayoutNV(tensorLayoutV, j * Bc, Bc, 0, D) DECODEFUNC); + + L = eM*L + rowsum; + + // This is the "diagonal" matrix in the paper, but since we do componentwise + // multiply rather than matrix multiply it has the diagonal element smeared + // across the row + coopmat eMdiag; + + // resize eM by using smear/reduce + coopMatReduceNV(eMdiag, eM, gl_CooperativeMatrixReduceRowNV, smearReduce); + + O = eMdiag * O; + + O = coopMatMulAdd(P_A, V, O); + } + + coopmat Ldiag; + + // resize L by using smear/reduce + coopMatReduceNV(Ldiag, L, gl_CooperativeMatrixReduceRowNV, smearReduce); + + [[unroll]] + for (int k = 0; k < Ldiag.length(); ++k) { + Ldiag[k] = ACC_TYPE(1.0) / Ldiag[k]; + } + + O = Ldiag*O; + + tensorLayoutNV<3, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutD = createTensorLayoutNV(3, gl_CooperativeMatrixClampModeConstantNV); + tensorLayoutD = setTensorLayoutDimensionNV(tensorLayoutD, p.ne2, p.ne1, D); + + // permute dimensions + tensorViewNV<3, false, 1, 0, 2> tensorViewPermute = createTensorViewNV(3, false, 1, 0, 2); + uint32_t o_offset = iq3*p.ne2*p.ne1; + + coopmat O_D = coopmat(O); + coopMatStoreTensorNV(O_D, data_o, o_offset, sliceTensorLayoutNV(tensorLayoutD, i * Br, Br, iq2, 1, 0, D), tensorViewPermute); +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp b/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp index 4e1fa3af3ad62..ab7c9d7eb1b09 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/generic_unary_head.comp @@ -8,6 +8,13 @@ layout (push_constant) uniform parameter uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13; uint d_offset; float param1; float param2; + + uint ne0_012mp; uint ne0_012L; + uint ne0_01mp; uint ne0_01L; + uint ne0_0mp; uint ne0_0L; + uint ne1_012mp; uint ne1_012L; + uint ne1_01mp; uint ne1_01L; + uint ne1_0mp; uint ne1_0L; } p; layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; @@ -17,22 +24,30 @@ uint get_idx() { return gl_GlobalInvocationID.z * 262144 + gl_GlobalInvocationID.y * 512 + gl_GlobalInvocationID.x; } +// see init_fastdiv_values in ggml-vulkan.cpp +uint fastdiv(uint n, uint mp, uint L) { + uint msbs, lsbs; + // msbs = mulhi(n, mp) + umulExtended(n, mp, msbs, lsbs); + return (msbs + n) >> L; +} + uint src0_idx(uint idx) { - const uint i03 = idx / (p.ne02*p.ne01*p.ne00); + const uint i03 = fastdiv(idx, p.ne0_012mp, p.ne0_012L); const uint i03_offset = i03 * p.ne02*p.ne01*p.ne00; - const uint i02 = (idx - i03_offset) / (p.ne01*p.ne00); + const uint i02 = fastdiv(idx - i03_offset, p.ne0_01mp, p.ne0_01L); const uint i02_offset = i02*p.ne01*p.ne00; - const uint i01 = (idx - i03_offset - i02_offset) / p.ne00; + const uint i01 = fastdiv(idx - i03_offset - i02_offset, p.ne0_0mp, p.ne0_0L); const uint i00 = idx - i03_offset - i02_offset - i01*p.ne00; return i03*p.nb03 + i02*p.nb02 + i01*p.nb01 + i00*p.nb00; } uint dst_idx(uint idx) { - const uint i13 = idx / (p.ne12*p.ne11*p.ne10); + const uint i13 = fastdiv(idx, p.ne1_012mp, p.ne1_012L); const uint i13_offset = i13 * p.ne12*p.ne11*p.ne10; - const uint i12 = (idx - i13_offset) / (p.ne11*p.ne10); + const uint i12 = fastdiv(idx - i13_offset, p.ne1_01mp, p.ne1_01L); const uint i12_offset = i12*p.ne11*p.ne10; - const uint i11 = (idx - i13_offset - i12_offset) / p.ne10; + const uint i11 = fastdiv(idx - i13_offset - i12_offset, p.ne1_0mp, p.ne1_0L); const uint i10 = idx - i13_offset - i12_offset - i11*p.ne10; return i13*p.nb13 + i12*p.nb12 + i11*p.nb11 + i10*p.nb10; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp index 8d30b63c165b2..1426fde6597b6 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp @@ -12,6 +12,10 @@ void main() { const uint i11 = (gl_GlobalInvocationID.z)/p.ne12; const uint i12 = (gl_GlobalInvocationID.z)%p.ne12; +#if defined(DATA_A_IQ4_NL) + init_iq4nl_shmem(); +#endif + if (i00 >= p.ne00) { return; } @@ -27,6 +31,8 @@ void main() { const uint y_offset = QUANT_R == 1 ? 1 : QUANT_K/2; vec2 v = dequantize(ib, iqs, 0); + const vec2 dm = get_dm(ib, 0); + v = v * dm.x + dm.y; data_d[d_offset + iybs + iqs ] = D_TYPE(v.x); data_d[d_offset + iybs + iqs + y_offset] = D_TYPE(v.y); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp index 5ad9b28daffaa..b6a0d56454951 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/group_norm.comp @@ -19,7 +19,7 @@ void main() { const uint tid = gl_LocalInvocationID.x; const uint start = gl_WorkGroupID.x * group_size + tid; - const uint end = start + group_size; + const uint end = (gl_WorkGroupID.x + 1) * group_size; tmp[tid] = 0.0f; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp index 4d48610a3adcb..966fedf8fa7a0 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/im2col.comp @@ -1,6 +1,11 @@ #version 450 #extension GL_EXT_shader_16bit_storage : require +#extension GL_EXT_spirv_intrinsics: enable + +#if RTE16 +spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits +#endif layout (push_constant) uniform parameter { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp index 825b91031f569..4c64fd47af718 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_split_k_reduce.comp @@ -5,7 +5,9 @@ layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; layout (binding = 0) readonly buffer A {float data_a[];}; +layout (binding = 0) readonly buffer A4 {vec4 data_a4[];}; layout (binding = 1) writeonly buffer D {float data_d[];}; +layout (binding = 1) writeonly buffer D4 {vec4 data_d4[];}; layout (push_constant) uniform parameter { uint ne; @@ -13,17 +15,34 @@ layout (push_constant) uniform parameter { } p; void main() { - const uint idx = gl_GlobalInvocationID.x; + // Each invocation handles four consecutive components + const uint idx = gl_GlobalInvocationID.x * 4; if (idx >= p.ne) { return; } - float result = 0.0f; + // Check if all four components are in bounds and aligned, + // then use vector loads + if (idx + 3 < p.ne && (p.ne % 4) == 0) { + vec4 result = vec4(0.0f); - [[unroll]] for (uint i = 0; i < p.k_num; i++) { - result += data_a[i * p.ne + idx]; - } + [[unroll]] for (uint i = 0; i < p.k_num; i++) { + result += data_a4[(i * p.ne + idx) / 4]; + } + + data_d4[idx / 4] = result; + } else { + [[unroll]] for (uint j = 0; j < 4; ++j) { + if (idx + j < p.ne) { + float result = 0.0f; - data_d[idx] = result; + [[unroll]] for (uint i = 0; i < p.k_num; i++) { + result += data_a[i * p.ne + idx + j]; + } + + data_d[idx + j] = result; + } + } + } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index ddbac5d2c9334..187c31916d16f 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -3,9 +3,7 @@ #ifdef FLOAT16 #extension GL_EXT_shader_explicit_arithmetic_types_float16 : require #endif -#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require - -#extension GL_EXT_null_initializer : enable +#extension GL_EXT_shader_explicit_arithmetic_types : require #include "mul_mat_vec_base.comp" @@ -14,16 +12,34 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (constant_id = 0) const uint BLOCK_SIZE = 32; layout (constant_id = 1) const uint NUM_ROWS = 1; +#if !defined(DATA_A_F32) && !defined(DATA_A_F16) +#define K_PER_ITER 8 +#else +#define K_PER_ITER 2 +#endif + + uint a_offset, b_offset, d_offset, y_offset; shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; void iter(inout FLOAT_TYPE temp[NUM_ROWS], const uint first_row, const uint num_rows, const uint tid, const uint i, bool lastiter) { - const uint col = i*BLOCK_SIZE + 2*tid; + const uint col = i*BLOCK_SIZE + K_PER_ITER*tid; const uint iqs = (col%QUANT_K)/QUANT_R; // quant index const uint iybs = col - col%QUANT_K; // y block start index +#if K_PER_ITER == 8 +#if QUANT_R == 2 + const B_TYPE_VEC4 bv02 = data_b_v4[(b_offset + iybs + iqs) / 4]; + const B_TYPE_VEC4 bv13 = data_b_v4[(b_offset + iybs + iqs + y_offset) / 4]; + const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y); + const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w); +#else + const vec4 bv0 = vec4(data_b_v4[(b_offset + iybs + iqs) / 4]); + const vec4 bv1 = vec4(data_b_v4[(b_offset + iybs + iqs) / 4 + 1]); +#endif +#else // Check if the second of the pair of elements is OOB, and don't fetch B or // accumulate it. We still fetch a pair of elements for A, which is fine for // quantized formats since they'll be within the same block. We should @@ -36,9 +52,31 @@ void iter(inout FLOAT_TYPE temp[NUM_ROWS], const uint first_row, const uint num_ if (!OOB) { b1 = FLOAT_TYPE(data_b[b_offset + iybs + iqs + y_offset]); } +#endif + uint ibi = first_row*p.ncols; [[unroll]] for (uint n = 0; n < num_rows; ++n) { - const uint ib = ((first_row + n)*p.ncols + col)/QUANT_K; // block index + const uint ib = (ibi + col)/QUANT_K; // block index + ibi += p.ncols; + +#if K_PER_ITER == 8 + vec4 v = dequantize4(ib, iqs, a_offset); + vec4 v2 = dequantize4(ib, iqs+(4/QUANT_R), a_offset); + + const vec2 dm = get_dm(ib, a_offset); + if (dm.y != 0) { // quant has min component + v = v * dm.x + dm.y; + v2 = v2 * dm.x + dm.y; + } + + // matrix multiplication + FLOAT_TYPE rowtmp = dot(bv0, v); + rowtmp += dot(bv1, v2); + + if (dm.y == 0) + rowtmp *= dm.x; + temp[n] += rowtmp; +#else const vec2 v = dequantize(ib, iqs, a_offset); // matrix multiplication @@ -46,6 +84,7 @@ void iter(inout FLOAT_TYPE temp[NUM_ROWS], const uint first_row, const uint num_ if (!OOB) { temp[n] = fma(FLOAT_TYPE(v.y), b1, temp[n]); } +#endif } } @@ -57,24 +96,39 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { y_offset = QUANT_R == 1 ? 1 : QUANT_K/2; - FLOAT_TYPE temp[NUM_ROWS] = {}; + FLOAT_TYPE temp[NUM_ROWS]; - const int unroll_count = 8; + for (uint i = 0; i < NUM_ROWS; ++i) { + temp[i] = FLOAT_TYPE(0); + } - const uint num_iters = (p.ncols >= 2*tid) ? ((p.ncols - 2*tid + BLOCK_SIZE - 1) / BLOCK_SIZE) : 0; - const uint unrolled_iters = num_iters & ~(2*unroll_count - 1); + uint num_iters = p.ncols / (K_PER_ITER * BLOCK_SIZE); + if (num_iters * K_PER_ITER * BLOCK_SIZE + K_PER_ITER*tid < p.ncols) { + num_iters++; + } + int unroll_count = 4; + uint unrolled_iters = num_iters & ~(unroll_count - 1); uint i = 0; while (i < unrolled_iters) { // Manually partially unroll the loop [[unroll]] for (uint k = 0; k < unroll_count; ++k) { - iter(temp, first_row, num_rows, tid, i, false); - i += 2; + iter(temp, first_row, num_rows, tid, i*K_PER_ITER, false); + i++; + } + } + unroll_count = 2; + unrolled_iters = num_iters & ~(unroll_count - 1); + while (i < unrolled_iters) { + // Manually partially unroll the loop + [[unroll]] for (uint k = 0; k < unroll_count; ++k) { + iter(temp, first_row, num_rows, tid, i*K_PER_ITER, false); + i++; } } while (i < num_iters) { - iter(temp, first_row, num_rows, tid, i, true); - i += 2; + iter(temp, first_row, num_rows, tid, i*K_PER_ITER, true); + i++; } // sum up partial sums and write back result @@ -100,10 +154,17 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { void main() { const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); +#if defined(DATA_A_IQ4_NL) + init_iq4nl_shmem(); +#endif + // do NUM_ROWS at a time, unless there aren't enough remaining rows if (first_row + NUM_ROWS <= p.stride_d) { compute_outputs(first_row, NUM_ROWS); } else { + if (first_row >= p.stride_d) { + return; + } compute_outputs(first_row, p.stride_d - first_row); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp index 5920bc93641c8..3894fca8260a5 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp @@ -2,8 +2,6 @@ #extension GL_EXT_shader_16bit_storage : require #extension GL_EXT_shader_8bit_storage : require -#define K_QUANTS_PER_ITERATION 2 - #ifdef MUL_MAT_ID #define EXPERT_COUNT 8 #endif @@ -12,6 +10,9 @@ layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; +layout (binding = 1) readonly buffer BV2 {B_TYPE_VEC2 data_b_v2[];}; +layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; + layout (binding = 2) writeonly buffer D {D_TYPE data_d[];}; #ifdef MUL_MAT_ID layout (binding = 3) readonly buffer IDS {int data_ids[];}; @@ -49,13 +50,16 @@ void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) { #endif #ifndef MUL_MAT_ID - const uint i13 = batch_idx / p.ne12; - const uint i12 = batch_idx % p.ne12; + uint batch_idx_a = 0; + if (batch_idx != 0) { + const uint i13 = batch_idx / p.ne12; + const uint i12 = batch_idx % p.ne12; - const uint i03 = i13 / p.broadcast3; - const uint i02 = i12 / p.broadcast2; + const uint i03 = i13 / p.broadcast3; + const uint i02 = i12 / p.broadcast2; - const uint batch_idx_a = i03 * p.ne02 + i02; + batch_idx_a = i03 * p.ne02 + i02; + } #else const uint expert_id = data_ids[expert_idx]; #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp index ec8eadcd5828a..1a5350d99ea14 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp @@ -1,68 +1,107 @@ #version 450 +#extension GL_EXT_shader_explicit_arithmetic_types : require #include "mul_mat_vec_base.comp" -layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -shared FLOAT_TYPE tmp[32]; +layout (constant_id = 0) const uint BLOCK_SIZE = 32; + +shared FLOAT_TYPE tmp[BLOCK_SIZE]; void main() { const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; + if (row >= p.stride_d) { + return; + } + uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; - const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 - const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 + // 16 threads are used to process each block + const uint it_size = gl_WorkGroupSize.x/16; + const uint tid = gl_LocalInvocationID.x; + const uint itid = tid%16; // 0...16 + const uint ix = tid/16; - const uint step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 + const uint step = 8; - const uint v_im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... - const uint v_in = tid - step*v_im; // 0...15 or 0...7 + const uint v_im = itid/step; // 0 or 1. 0 computes 0..., 1 computes 128... + const uint v_in = itid - step*v_im; // 0...15 or 0...7 - const uint l0 = K_QUANTS_PER_ITERATION*v_in; // 0...15 + const uint l0 = 2*v_in; // 0...15 const uint q_offset = 32*v_im + l0; const uint s_offset = 8*v_im; const uint y_offset = 128*v_im + l0; - tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp - [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib0 + i].d.x); - const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib0 + i].d.y); + f16vec2 d = data_a[ib0 + i].d; + const FLOAT_TYPE dall = d.x; + const FLOAT_TYPE dmin = d.y; + + B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; + B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; + B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; + B_TYPE_VEC2 b48 = data_b_v2[(b_offset + y_idx) / 2 + 24]; + B_TYPE_VEC2 b64 = data_b_v2[(b_offset + y_idx) / 2 + 32]; + B_TYPE_VEC2 b80 = data_b_v2[(b_offset + y_idx) / 2 + 40]; + B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; + B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; + + uint32_t s0_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 0]; + uint32_t s4_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 1]; + + uint32_t s0_lo4_u32 = s0_u32 & 0x0F0F0F0F; + uint32_t s0_hi4_u32 = (s0_u32 >> 4) & 0x0F0F0F0F; + uint32_t s4_lo4_u32 = s4_u32 & 0x0F0F0F0F; + uint32_t s4_hi4_u32 = (s4_u32 >> 4) & 0x0F0F0F0F; + + uvec4 s0_lo4 = uvec4(unpack8(s0_lo4_u32)); + uvec4 s4_lo4 = uvec4(unpack8(s4_lo4_u32)); + uvec4 s0_hi4 = uvec4(unpack8(s0_hi4_u32)); + uvec4 s4_hi4 = uvec4(unpack8(s4_hi4_u32)); + + uint16_t qs0_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 0]; + uint16_t qs16_u16 = data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]; + uvec2 qs0 = uvec2(unpack8(qs0_u16)); + uvec2 qs16 = uvec2(unpack8(qs16_u16)); FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); - for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { - sum1 = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 0) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 0) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 2) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 2) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 4) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 4) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l + 0] >> 6) & 3), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l +112]), FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7] & 0xF) * FLOAT_TYPE((data_a[ib0 + i].qs[q_offset + l +16] >> 6) & 3), sum1)))))))); - sum2 = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 0] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 1] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 2] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 3] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 4] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 5] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 6] >> 4) & 0xF), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l +112]), FLOAT_TYPE((data_a[ib0 + i].scales[s_offset + 7] >> 4) & 0xF), sum2)))))))); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); + sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); } - const uint tmp_idx = 16 * ix + tid; - tmp[tmp_idx] = fma(dall, sum1, fma(-dmin, sum2, tmp[tmp_idx])); + temp = fma(dall, sum1, fma(-dmin, sum2, temp)); } + tmp[gl_LocalInvocationID.x] = temp; + // sum up partial sums and write back result barrier(); - [[unroll]] for (uint s = 16; s > 0; s >>= 1) { + [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { if (tid < s) { tmp[tid] += tmp[tid + s]; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp index 3ca4ad85a5ca0..b19c3811136bb 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp @@ -1,61 +1,94 @@ #version 450 +#extension GL_EXT_shader_explicit_arithmetic_types : require #include "mul_mat_vec_base.comp" -layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -shared FLOAT_TYPE tmp[32]; +layout (constant_id = 0) const uint BLOCK_SIZE = 32; + +shared FLOAT_TYPE tmp[BLOCK_SIZE]; void main() { const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; + if (row >= p.stride_d) { + return; + } + uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; - const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 - const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 + // 16 threads are used to process each block + const uint it_size = gl_WorkGroupSize.x/16; + const uint tid = gl_LocalInvocationID.x; + const uint itid = tid%16; // 0...16 + const uint ix = tid/16; - const uint step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 + const uint step = 8; - const uint v_im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... - const uint v_in = tid - step*v_im; // 0...15 or 0...7 + const uint v_im = itid/step; // 0 or 1. 0 computes 0..., 1 computes 128... + const uint v_in = itid - step*v_im; // 0...15 or 0...7 const uint8_t m = uint8_t(1 << (4 * v_im)); - const uint l0 = K_QUANTS_PER_ITERATION*v_in; // 0...15 + const uint l0 = 2*v_in; // 0...15 const uint q_offset = 32*v_im + l0; const uint y_offset = 128*v_im + l0; - tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp const uint s_shift = 4 * v_im; - [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); + B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; + B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; + B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; + B_TYPE_VEC2 b48 = data_b_v2[(b_offset + y_idx) / 2 + 24]; + B_TYPE_VEC2 b64 = data_b_v2[(b_offset + y_idx) / 2 + 32]; + B_TYPE_VEC2 b80 = data_b_v2[(b_offset + y_idx) / 2 + 40]; + B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; + B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; + + uint16_t s0_16 = data_a_packed16[ib0 + i].scales[0]; + uint16_t s2_16 = data_a_packed16[ib0 + i].scales[1]; + uint16_t s4_16 = data_a_packed16[ib0 + i].scales[2]; + uint16_t s6_16 = data_a_packed16[ib0 + i].scales[3]; + uint16_t s8_16 = data_a_packed16[ib0 + i].scales[4]; + uint16_t s10_16 = data_a_packed16[ib0 + i].scales[5]; + u8vec2 s0 = unpack8(s0_16); + u8vec2 s2 = unpack8(s2_16); + u8vec2 s4 = unpack8(s4_16); + u8vec2 s6 = unpack8(s6_16); + u8vec2 s8 = unpack8(s8_16); + u8vec2 s10 = unpack8(s10_16); + FLOAT_TYPE sum = FLOAT_TYPE(0.0); - for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { - sum = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 0]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[0] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 32]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[2] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 64]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[4] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 8] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 96]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[6] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[10] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 16]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[1] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 48]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[3] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l + 80]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[5] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[ 9] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l +112]) * FLOAT_TYPE(int8_t(((data_a[ib0 + i].scales[7] >> s_shift) & 0xF) | ((data_a[ib0 + i].scales[11] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); } - const uint tmp_idx = 16 * ix + tid; - tmp[tmp_idx] = fma(d, sum, tmp[tmp_idx]); + temp = fma(d, sum, temp); } + tmp[gl_LocalInvocationID.x] = temp; + // sum up partial sums and write back result barrier(); - [[unroll]] for (uint s = 16; s > 0; s >>= 1) { + [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { if (tid < s) { tmp[tid] += tmp[tid + s]; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp index b7c9b722d126b..b86d28589c64c 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp @@ -4,48 +4,36 @@ #include "mul_mat_vec_base.comp" -layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; - -shared FLOAT_TYPE tmp[32]; - -// Declare aliased versions of A and B bindings that can use 16b/32b loads for -// the quantized values, and vec4 loads for B. -struct block_q4_K_u32 -{ - f16vec2 d; - uint32_t scales[3*QUANT_K/64/4]; - uint32_t qs[QUANT_K/2/4]; -}; - -struct block_q4_K_u16 -{ - f16vec2 d; - uint16_t scales[3*QUANT_K/64/2]; - uint16_t qs[QUANT_K/2/2]; -}; - -layout (binding = 0) readonly buffer A_u32 {block_q4_K_u32 data_a_u32[];}; -layout (binding = 0) readonly buffer A_u16 {block_q4_K_u16 data_a_u16[];}; -layout (binding = 1) readonly buffer BV4 {B_TYPE_VEC4 data_b_v4[];}; - -// This shader assumes K_QUANTS_PER_ITERATION == 2 for alignment of loads +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout (constant_id = 0) const uint BLOCK_SIZE = 32; + +shared FLOAT_TYPE tmp[BLOCK_SIZE]; + void main() { const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; + if (row >= p.stride_d) { + return; + } + uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; - const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 - const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 + // 16 threads are used to process each block + const uint it_size = gl_WorkGroupSize.x/16; + const uint tid = gl_LocalInvocationID.x; + const uint itid = tid%16; // 0...16 + const uint ix = tid/16; - const uint step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 + const uint step = 4; - const uint il = tid/step; // 0...3 - const uint ir = tid - step*il; // 0...7 or 0...3 - const uint n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 + const uint il = itid/step; // 0...3 + const uint ir = itid - step*il; // 0...7 or 0...3 + const uint n = 4; const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const uint v_in = il % 2; @@ -56,7 +44,7 @@ void main() { FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp - [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; @@ -64,9 +52,9 @@ void main() { const FLOAT_TYPE dall = FLOAT_TYPE(d.x); const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); - uint32_t scale0_u32 = data_a_u16[ib0 + i].scales[v_im ]; - uint32_t scale4_u32 = data_a_u16[ib0 + i].scales[v_im + 2]; - uint32_t scale8_u32 = data_a_u16[ib0 + i].scales[v_im + 4]; + uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ]; + uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2]; + uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4]; uvec4 scale0 = uvec4(unpack8(scale0_u32)); uvec4 scale4 = uvec4(unpack8(scale4_u32)); uvec4 scale8 = uvec4(unpack8(scale8_u32)); @@ -80,8 +68,8 @@ void main() { const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2)); const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2)); - uint32_t qs0_u32 = data_a_u32[ib0 + i].qs[q_offset / 4]; - uint32_t qs64_u32 = data_a_u32[ib0 + i].qs[q_offset / 4 + 16]; + uint32_t qs0_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4]; + uint32_t qs64_u32 = data_a_packed32[ib0 + i].qs[q_offset / 4 + 16]; uint32_t qs0_u32_lo4 = qs0_u32 & 0x0F0F0F0F; uint32_t qs0_u32_hi4 = (qs0_u32 >> 4) & 0x0F0F0F0F; @@ -131,7 +119,7 @@ void main() { // sum up partial sums and write back result barrier(); - [[unroll]] for (uint s = 16; s > 0; s >>= 1) { + [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { if (tid < s) { tmp[tid] += tmp[tid + s]; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp index 2306785af4226..fd243cf9161fc 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp @@ -1,25 +1,36 @@ #version 450 +#extension GL_EXT_shader_explicit_arithmetic_types : require + #include "mul_mat_vec_base.comp" -layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout (constant_id = 0) const uint BLOCK_SIZE = 32; -shared FLOAT_TYPE tmp[32]; +shared FLOAT_TYPE tmp[BLOCK_SIZE]; void main() { const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; + if (row >= p.stride_d) { + return; + } + uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; - const uint tid = gl_LocalInvocationID.x/2; // 0...31 or 0...16 - const uint ix = gl_LocalInvocationID.x%2; // 0 or 0, 1 + // 16 threads are used to process each block + const uint it_size = gl_WorkGroupSize.x/16; + const uint tid = gl_LocalInvocationID.x; + const uint itid = tid%16; // 0...16 + const uint ix = tid/16; - const uint il = tid/4; // 0...3 - const uint ir = tid - 4*il; // 0...7 or 0...3 + const uint il = itid/4; // 0...3 + const uint ir = itid - 4*il; // 0...7 or 0...3 const uint v_im = il / 2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const uint v_in = il % 2; @@ -28,76 +39,116 @@ void main() { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - const uint8_t hm1 = uint8_t(1 << (2*v_im)); - const uint8_t hm2 = uint8_t(hm1 << 4); - - tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp - [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += 2) { + [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - const FLOAT_TYPE dall = FLOAT_TYPE(data_a[ib0 + i].d.x); - const FLOAT_TYPE dmin = FLOAT_TYPE(data_a[ib0 + i].d.y); - - const uint8_t sc0 = uint8_t( data_a[ib0 + i].scales[v_im * 2 ] & 0x3f); - const uint8_t sc1 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 1] & 0x3f); - const uint8_t sc2 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 4] & 0x3f); - const uint8_t sc3 = uint8_t( data_a[ib0 + i].scales[v_im * 2 + 5] & 0x3f); - const uint8_t sc4 = uint8_t(( data_a[ib0 + i].scales[v_im * 2 + 8] & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 ] & 0xc0) >> 2)); - const uint8_t sc5 = uint8_t(( data_a[ib0 + i].scales[v_im * 2 + 9] & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 1] & 0xc0) >> 2)); - const uint8_t sc6 = uint8_t(((data_a[ib0 + i].scales[v_im * 2 + 8] >> 4) & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 4] & 0xc0) >> 2)); - const uint8_t sc7 = uint8_t(((data_a[ib0 + i].scales[v_im * 2 + 9] >> 4) & 0x0f) | ((data_a[ib0 + i].scales[v_im * 2 + 5] & 0xc0) >> 2)); - - const uint8_t q4_0 = uint8_t(data_a[ib0 + i].qs[q_offset ] & 0xf); - const uint8_t q4_1 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] & 0xf); - const uint8_t q4_2 = uint8_t(data_a[ib0 + i].qs[q_offset + 16] & 0xf); - const uint8_t q4_3 = uint8_t(data_a[ib0 + i].qs[q_offset + 17] & 0xf); - const uint8_t q4_4 = uint8_t(data_a[ib0 + i].qs[q_offset ] >> 4); - const uint8_t q4_5 = uint8_t(data_a[ib0 + i].qs[q_offset + 1] >> 4); - const uint8_t q4_6 = uint8_t(data_a[ib0 + i].qs[q_offset + 16] >> 4); - const uint8_t q4_7 = uint8_t(data_a[ib0 + i].qs[q_offset + 17] >> 4); - const uint8_t q4_8 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] & 0xf); - const uint8_t q4_9 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] & 0xf); - const uint8_t q4_10 = uint8_t(data_a[ib0 + i].qs[q_offset + 80] & 0xf); - const uint8_t q4_11 = uint8_t(data_a[ib0 + i].qs[q_offset + 81] & 0xf); - const uint8_t q4_12 = uint8_t(data_a[ib0 + i].qs[q_offset + 64] >> 4); - const uint8_t q4_13 = uint8_t(data_a[ib0 + i].qs[q_offset + 65] >> 4); - const uint8_t q4_14 = uint8_t(data_a[ib0 + i].qs[q_offset + 80] >> 4); - const uint8_t q4_15 = uint8_t(data_a[ib0 + i].qs[q_offset + 81] >> 4); + f16vec2 d = data_a[ib0 + i].d; + const FLOAT_TYPE dall = FLOAT_TYPE(d.x); + const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); + + uint32_t scale0_u32 = data_a_packed16[ib0 + i].scales[v_im ]; + uint32_t scale4_u32 = data_a_packed16[ib0 + i].scales[v_im + 2]; + uint32_t scale8_u32 = data_a_packed16[ib0 + i].scales[v_im + 4]; + uvec4 scale0 = uvec4(unpack8(scale0_u32)); + uvec4 scale4 = uvec4(unpack8(scale4_u32)); + uvec4 scale8 = uvec4(unpack8(scale8_u32)); + + const uint32_t sc0 = ( scale0.x & 0x3f); + const uint32_t sc1 = ( scale0.y & 0x3f); + const uint32_t sc2 = ( scale4.x & 0x3f); + const uint32_t sc3 = ( scale4.y & 0x3f); + const uint32_t sc4 = (( scale8.x & 0x0f) | ((scale0.x & 0xc0) >> 2)); + const uint32_t sc5 = (( scale8.y & 0x0f) | ((scale0.y & 0xc0) >> 2)); + const uint32_t sc6 = (((scale8.x >> 4) & 0x0f) | ((scale4.x & 0xc0) >> 2)); + const uint32_t sc7 = (((scale8.y >> 4) & 0x0f) | ((scale4.y & 0xc0) >> 2)); + + uint32_t qs0_16_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]) << 16); + uint32_t qs64_80_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 32]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 40]) << 16); + + uint32_t qs0_16_u32_lo4 = qs0_16_u32 & 0x0F0F0F0F; + uint32_t qs0_16_u32_hi4 = (qs0_16_u32 >> 4) & 0x0F0F0F0F; + uint32_t qs64_80_u32_lo4 = qs64_80_u32 & 0x0F0F0F0F; + uint32_t qs64_80_u32_hi4 = (qs64_80_u32 >> 4) & 0x0F0F0F0F; + + uint32_t qh = pack32(u16vec2(data_a_packed16[ib0 + i].qh[l0 / 2], data_a_packed16[ib0 + i].qh[l0 / 2 + 8])); + + uint32_t qs0_16_lo4_offset16 = ((qh >> (2*v_im)) & 0x01010101) << 4; + uint32_t qs0_16_hi4_offset16 = ((qh >> (2*v_im)) & 0x02020202) << 3; + uint32_t qs64_80_lo4_offset16 = ((qh >> (2*v_im)) & 0x10101010) << 0; + uint32_t qs64_80_hi4_offset16 = ((qh >> (2*v_im)) & 0x20202020) >> 1; + + qs0_16_u32_lo4 += qs0_16_lo4_offset16; + qs0_16_u32_hi4 += qs0_16_hi4_offset16; + qs64_80_u32_lo4 += qs64_80_lo4_offset16; + qs64_80_u32_hi4 += qs64_80_hi4_offset16; + + uvec4 qs0_16_lo4 = uvec4(unpack8(qs0_16_u32_lo4)); + uvec4 qs64_80_lo4 = uvec4(unpack8(qs64_80_u32_lo4)); + uvec4 qs0_16_hi4 = uvec4(unpack8(qs0_16_u32_hi4)); + uvec4 qs64_80_hi4 = uvec4(unpack8(qs64_80_u32_hi4)); + + const uint32_t q4_0 = qs0_16_lo4.x; + const uint32_t q4_1 = qs0_16_lo4.y; + const uint32_t q4_2 = qs0_16_lo4.z; + const uint32_t q4_3 = qs0_16_lo4.w; + const uint32_t q4_4 = qs0_16_hi4.x; + const uint32_t q4_5 = qs0_16_hi4.y; + const uint32_t q4_6 = qs0_16_hi4.z; + const uint32_t q4_7 = qs0_16_hi4.w; + const uint32_t q4_8 = qs64_80_lo4.x; + const uint32_t q4_9 = qs64_80_lo4.y; + const uint32_t q4_10 = qs64_80_lo4.z; + const uint32_t q4_11 = qs64_80_lo4.w; + const uint32_t q4_12 = qs64_80_hi4.x; + const uint32_t q4_13 = qs64_80_hi4.y; + const uint32_t q4_14 = qs64_80_hi4.z; + const uint32_t q4_15 = qs64_80_hi4.w; + + B_TYPE_VEC2 by10 = data_b_v2[(b_offset + y1_idx) / 2]; + B_TYPE_VEC2 by116 = data_b_v2[(b_offset + y1_idx) / 2 + 8]; + B_TYPE_VEC2 by132 = data_b_v2[(b_offset + y1_idx) / 2 + 16]; + B_TYPE_VEC2 by148 = data_b_v2[(b_offset + y1_idx) / 2 + 24]; + B_TYPE_VEC2 by20 = data_b_v2[(b_offset + y2_idx) / 2]; + B_TYPE_VEC2 by216 = data_b_v2[(b_offset + y2_idx) / 2 + 8]; + B_TYPE_VEC2 by232 = data_b_v2[(b_offset + y2_idx) / 2 + 16]; + B_TYPE_VEC2 by248 = data_b_v2[(b_offset + y2_idx) / 2 + 24]; const FLOAT_TYPE sx = - fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]), (q4_0 + (((data_a[ib0 + i].qh[l0 ] & hm1) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 1]), (q4_1 + (((data_a[ib0 + i].qh[l0 + 1] & hm1) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 16]), (q4_2 + (((data_a[ib0 + i].qh[l0 + 16] & hm1) != 0) ? 16 : 0)), - FLOAT_TYPE(data_b[b_offset + y1_idx + 17]) * (q4_3 + (((data_a[ib0 + i].qh[l0 + 17] & hm1) != 0) ? 16 : 0))))); + fma(FLOAT_TYPE(by10.x), q4_0, + fma(FLOAT_TYPE(by10.y), q4_1, + fma(FLOAT_TYPE(by116.x), q4_2, + FLOAT_TYPE(by116.y) * q4_3))); const FLOAT_TYPE sy = - fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]), (q4_4 + (((data_a[ib0 + i].qh[l0 ] & (hm1 << 1)) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 33]), (q4_5 + (((data_a[ib0 + i].qh[l0 + 1] & (hm1 << 1)) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 48]), (q4_6 + (((data_a[ib0 + i].qh[l0 + 16] & (hm1 << 1)) != 0) ? 16 : 0)), - FLOAT_TYPE(data_b[b_offset + y1_idx + 49]) * (q4_7 + (((data_a[ib0 + i].qh[l0 + 17] & (hm1 << 1)) != 0) ? 16 : 0))))); + fma(FLOAT_TYPE(by132.x), q4_4, + fma(FLOAT_TYPE(by132.y), q4_5, + fma(FLOAT_TYPE(by148.x), q4_6, + FLOAT_TYPE(by148.y) * q4_7))); const FLOAT_TYPE sz = - fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]), (q4_8 + (((data_a[ib0 + i].qh[l0 ] & hm2) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 1]), (q4_9 + (((data_a[ib0 + i].qh[l0 + 1] & hm2) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 16]), (q4_10 + (((data_a[ib0 + i].qh[l0 + 16] & hm2) != 0) ? 16 : 0)), - FLOAT_TYPE(data_b[b_offset + y2_idx + 17]) * (q4_11 + (((data_a[ib0 + i].qh[l0 + 17] & hm2) != 0) ? 16 : 0))))); + fma(FLOAT_TYPE(by20.x), q4_8, + fma(FLOAT_TYPE(by20.y), q4_9, + fma(FLOAT_TYPE(by216.x), q4_10, + FLOAT_TYPE(by216.y) * q4_11))); const FLOAT_TYPE sw = - fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 32]), (q4_12 + (((data_a[ib0 + i].qh[l0 ] & (hm2 << 1)) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 33]), (q4_13 + (((data_a[ib0 + i].qh[l0 + 1] & (hm2 << 1)) != 0) ? 16 : 0)), - fma(FLOAT_TYPE(data_b[b_offset + y2_idx + 48]), (q4_14 + (((data_a[ib0 + i].qh[l0 + 16] & (hm2 << 1)) != 0) ? 16 : 0)), - FLOAT_TYPE(data_b[b_offset + y2_idx + 49]) * (q4_15 + (((data_a[ib0 + i].qh[l0 + 17] & (hm2 << 1)) != 0) ? 16 : 0))))); + fma(FLOAT_TYPE(by232.x), q4_12, + fma(FLOAT_TYPE(by232.y), q4_13, + fma(FLOAT_TYPE(by248.x), q4_14, + FLOAT_TYPE(by248.y) * q4_15))); const FLOAT_TYPE smin = - fma(FLOAT_TYPE(data_b[b_offset + y1_idx ]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 1 ]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 17]), sc2, - fma(FLOAT_TYPE(data_b[b_offset + y1_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y1_idx + 49]), sc3, - fma(FLOAT_TYPE(data_b[b_offset + y2_idx ]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 1 ]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 16]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 17]), sc6, - (FLOAT_TYPE(data_b[b_offset + y2_idx + 32]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 33]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 48]) + FLOAT_TYPE(data_b[b_offset + y2_idx + 49])) * sc7))); - const uint tmp_idx = 16 * ix + tid; - tmp[tmp_idx] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, tmp[tmp_idx])); + fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, + fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, + fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, + (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); + temp = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp)); } + tmp[gl_LocalInvocationID.x] = temp; + // sum up partial sums and write back result barrier(); - [[unroll]] for (uint s = 16; s > 0; s >>= 1) { + [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { if (tid < s) { tmp[tid] += tmp[tid + s]; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index 95c286eeb17e1..760aff85499f4 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -1,76 +1,107 @@ #version 450 +#extension GL_EXT_shader_explicit_arithmetic_types : require + #include "mul_mat_vec_base.comp" -layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout (constant_id = 0) const uint BLOCK_SIZE = 32; -shared FLOAT_TYPE tmp[32]; +shared FLOAT_TYPE tmp[BLOCK_SIZE]; void main() { const uint row = gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z; + if (row >= p.stride_d) { + return; + } + uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); const uint num_blocks_per_row = p.ncols / QUANT_K; const uint ib0 = a_offset / QUANT_K + row*num_blocks_per_row; - const uint tid = gl_LocalInvocationID.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 - const uint ix = gl_LocalInvocationID.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 + // 16 threads are used to process each block + const uint it_size = gl_WorkGroupSize.x/16; + const uint tid = gl_LocalInvocationID.x; + const uint itid = tid%16; // 0...16 + const uint ix = tid/16; - const uint step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 + const uint step = 8; - const uint v_im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... - const uint v_in = tid - step*v_im; // 0...15 or 0...7 + const uint v_im = itid/step; // 0 or 1. 0 computes 0..., 1 computes 128... + const uint v_in = itid - step*v_im; // 0...15 or 0...7 -#if K_QUANTS_PER_ITERATION == 1 - const uint l0 = v_in; // 0...15 - const uint is = 0; -#else const uint l0 = 4 * v_in; // 0, 4, 8, ..., 28 const uint is = v_in / 4; -#endif const uint ql_offset = 64*v_im + l0; const uint qh_offset = 32*v_im + l0; const uint s_offset = 8*v_im + is; const uint y_offset = 128*v_im + l0; - tmp[16 * ix + tid] = FLOAT_TYPE(0.0); // partial sum for thread in warp + FLOAT_TYPE temp = FLOAT_TYPE(0.0); // partial sum for thread in warp - [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { + [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); -#if K_QUANTS_PER_ITERATION == 1 - const uint tmp_idx = 16 * ix + tid; - tmp[tmp_idx] = fma(FLOAT_TYPE(data_b[b_offset + y_idx + 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x03) << 4)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + 16]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 1]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x03) << 4)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + 32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x0c) << 2)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + 48]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 3]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] & 0xF) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x0c) << 2)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + 64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 0] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0x30) >> 0)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + 80]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 5]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 16] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0x30) >> 0)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + 96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 32] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 0] & 0xc0) >> 2)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx +112]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 7]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + 48] >> 4) | ((data_a[ib0 + i].qh[qh_offset + 16] & 0xc0) >> 2)) - 32), tmp[tmp_idx])))))))); -#else + FLOAT_TYPE scales[4]; + scales[0] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]); + scales[1] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]); + scales[2] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]); + scales[3] = FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]); + + uint32_t ql0_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 1]) << 16); + uint32_t ql32_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 16]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 17]) << 16); + + uint32_t ql0_u32_lo4 = ql0_u32 & 0x0F0F0F0F; + uint32_t ql0_u32_hi4 = (ql0_u32 >> 4) & 0x0F0F0F0F; + uint32_t ql32_u32_lo4 = ql32_u32 & 0x0F0F0F0F; + uint32_t ql32_u32_hi4 = (ql32_u32 >> 4) & 0x0F0F0F0F; + + uint32_t qh_u32 = uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2 + 1]) << 16); + uint32_t qh0_u32 = (qh_u32 & 0x03030303) << 4; + uint32_t qh2_u32 = (qh_u32 & 0x0C0C0C0C) << 2; + uint32_t qh4_u32 = (qh_u32 & 0x30303030) << 0; + uint32_t qh6_u32 = (qh_u32 & 0xC0C0C0C0) >> 2; + + uint32_t q0_u32 = ql0_u32_lo4 | qh0_u32; + uint32_t q1_u32 = ql32_u32_lo4 | qh2_u32; + uint32_t q2_u32 = ql0_u32_hi4 | qh4_u32; + uint32_t q3_u32 = ql32_u32_hi4 | qh6_u32; + + uvec4 q0 = uvec4(unpack8(q0_u32)); + uvec4 q1 = uvec4(unpack8(q1_u32)); + uvec4 q2 = uvec4(unpack8(q2_u32)); + uvec4 q3 = uvec4(unpack8(q3_u32)); + + B_TYPE_VEC4 by0 = data_b_v4[(b_offset + y_idx) / 4]; + B_TYPE_VEC4 by32 = data_b_v4[(b_offset + y_idx) / 4 + 8]; + B_TYPE_VEC4 by64 = data_b_v4[(b_offset + y_idx) / 4 + 16]; + B_TYPE_VEC4 by96 = data_b_v4[(b_offset + y_idx) / 4 + 24]; + FLOAT_TYPE sum = FLOAT_TYPE(0.0); [[unroll]] for (int l = 0; l < 4; ++l) { - sum = fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+ 0]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 0]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 0) & 3) << 4)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+32]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 2]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] & 0xF) | (((data_a[ib0 + i].qh[qh_offset + l] >> 2) & 3) << 4)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+64]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 4]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+ 0] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 4) & 3) << 4)) - 32), - fma(FLOAT_TYPE(data_b[b_offset + y_idx + l+96]) * FLOAT_TYPE(data_a[ib0 + i].scales[s_offset + 6]) * d, FLOAT_TYPE(int8_t((data_a[ib0 + i].ql[ql_offset + l+32] >> 4) | (((data_a[ib0 + i].qh[qh_offset + l] >> 6) & 3) << 4)) - 32), sum)))); + sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), + fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), + fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), + fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); } - tmp[16 * ix + tid] += sum; -#endif + temp += sum * d; } + tmp[gl_LocalInvocationID.x] = temp; // sum up partial sums and write back result + barrier(); - [[unroll]] for (uint s = 16; s > 0; s >>= 1) { + [[unroll]] for (uint s = gl_WorkGroupSize.x/2; s > 0; s >>= 1) { if (tid < s) { tmp[tid] += tmp[tid + s]; - } + } barrier(); } if (tid == 0) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp index fffdd18189d55..48122cbef906e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp @@ -7,6 +7,12 @@ #extension GL_EXT_shader_explicit_arithmetic_types_float16 : require #endif +#ifdef COOPMAT +#extension GL_KHR_cooperative_matrix : enable +#extension GL_KHR_memory_scope_semantics : enable +#extension GL_KHR_shader_subgroup_basic : enable +#endif + #ifdef MUL_MAT_ID #extension GL_EXT_shader_explicit_arithmetic_types_int16 : require #endif @@ -57,6 +63,7 @@ layout (push_constant) uniform parameter #endif } p; +layout (constant_id = 0) const uint BLOCK_SIZE = 64; layout (constant_id = 1) const uint BM = 64; layout (constant_id = 2) const uint BN = 64; layout (constant_id = 3) const uint BK = 16; // Assumed to be 32 if working with a quant @@ -65,16 +72,33 @@ layout (constant_id = 5) const uint WN = 32; layout (constant_id = 6) const uint WMITER = 2; layout (constant_id = 7) const uint TM = 4; layout (constant_id = 8) const uint TN = 2; -layout (constant_id = 9) const uint WARP = 32; +layout (constant_id = 9) const uint TK = 1; // Only needed for coopmat +layout (constant_id = 10) const uint WARP = 32; + +#ifdef COOPMAT +#define SHMEM_STRIDE (BK + 8) +#else +#define SHMEM_STRIDE (BK + 1) +#endif -shared FLOAT_TYPE buf_a[BM * (BK+1)]; -shared FLOAT_TYPE buf_b[BN * (BK+1)]; +shared FLOAT_TYPE buf_a[BM * SHMEM_STRIDE]; +shared FLOAT_TYPE buf_b[BN * SHMEM_STRIDE]; #ifdef MUL_MAT_ID shared u16vec2 row_ids[3072]; +#endif // MUL_MAT_ID + +#define NUM_WARPS (BLOCK_SIZE / WARP) + +#ifdef COOPMAT +shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS]; #endif void main() { +#if defined(DATA_A_IQ4_NL) + init_iq4nl_shmem(); +#endif + #ifdef MUL_MAT_ID const uint expert_idx = gl_GlobalInvocationID.z; #else @@ -94,17 +118,32 @@ void main() { const uint ik = gl_WorkGroupID.x / blocks_m; const uint ic = gl_WorkGroupID.y; - const uint warp_i = gl_LocalInvocationID.x / WARP; - const uint warp_r = warp_i % (BM / WM); - const uint warp_c = warp_i / (BM / WM); - const uint WNITER = (WM * WN) / (WARP * TM * TN * WMITER); const uint WSUBM = WM / WMITER; const uint WSUBN = WN / WNITER; +#ifdef COOPMAT + const uint warp_i = gl_SubgroupID; + + const uint tiw = gl_SubgroupInvocationID; + + const uint cms_per_row = WM / TM; + const uint cms_per_col = WN / TN; + + const uint storestride = WARP / TM; + const uint store_r = tiw % TM; + const uint store_c = tiw / TM; +#else + const uint warp_i = gl_LocalInvocationID.x / WARP; + const uint tiw = gl_LocalInvocationID.x % WARP; + const uint tiwr = tiw % (WSUBM / TM); const uint tiwc = tiw / (WSUBM / TM); +#endif + + const uint warp_r = warp_i % (BM / WM); + const uint warp_c = warp_i / (BM / WM); const uint loadr_a = gl_LocalInvocationID.x % (BK / LOAD_VEC_A); const uint loadc_a = gl_LocalInvocationID.x / (BK / LOAD_VEC_A); @@ -152,21 +191,31 @@ void main() { uint pos_b = (batch_idx * p.batch_stride_b + ic * BN * p.stride_b + start_k) / LOAD_VEC_B; #endif - float sums[WMITER * TM * WNITER * TN]; +#ifdef COOPMAT + coopmat cache_a; + coopmat cache_b; + coopmat sums[cms_per_row * cms_per_col]; + + [[unroll]] for (uint i = 0; i < cms_per_row * cms_per_col; i++) { + sums[i] = coopmat(0.0f); + } +#else + ACC_TYPE sums[WMITER * TM * WNITER * TN]; FLOAT_TYPE cache_a[WMITER * TM]; FLOAT_TYPE cache_b[WNITER * TN]; [[unroll]] for (uint i = 0; i < WMITER*TM*WNITER*TN; i++) { - sums[i] = 0.0f; + sums[i] = ACC_TYPE(0.0f); } +#endif - [[unroll]] for (uint block = start_k; block < end_k; block += BK) { + for (uint block = start_k; block < end_k; block += BK) { [[unroll]] for (uint l = 0; l < BM; l += loadstride_a) { #if defined(DATA_A_F32) || defined(DATA_A_F16) #if LOAD_VEC_A == 8 const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; buf_a[buf_idx ] = FLOAT_TYPE(data_a[idx][0].x); buf_a[buf_idx + 1] = FLOAT_TYPE(data_a[idx][0].y); buf_a[buf_idx + 2] = FLOAT_TYPE(data_a[idx][0].z); @@ -177,21 +226,21 @@ void main() { buf_a[buf_idx + 7] = FLOAT_TYPE(data_a[idx][1].w); #elif LOAD_VEC_A == 4 const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; buf_a[buf_idx ] = FLOAT_TYPE(data_a[idx].x); buf_a[buf_idx + 1] = FLOAT_TYPE(data_a[idx].y); buf_a[buf_idx + 2] = FLOAT_TYPE(data_a[idx].z); buf_a[buf_idx + 3] = FLOAT_TYPE(data_a[idx].w); #else if (ir * BM + loadc_a + l < p.M && block + loadr_a < end_k) { - buf_a[(loadc_a + l) * (BK+1) + loadr_a] = FLOAT_TYPE(data_a[pos_a + (loadc_a + l) * p.stride_a + loadr_a]); + buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = FLOAT_TYPE(data_a[pos_a + (loadc_a + l) * p.stride_a + loadr_a]); } else { - buf_a[(loadc_a + l) * (BK+1) + loadr_a] = FLOAT_TYPE(0.0f); + buf_a[(loadc_a + l) * SHMEM_STRIDE + loadr_a] = FLOAT_TYPE(0.0f); } #endif #elif defined(DATA_A_Q4_0) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a; const uint ib = idx / 16; const uint iqs = idx & 0xF; @@ -204,7 +253,7 @@ void main() { buf_a[buf_idx + 16] = FLOAT_TYPE(v.y); #elif defined(DATA_A_Q4_1) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a; const uint ib = idx / 16; const uint iqs = idx & 0xF; @@ -218,7 +267,7 @@ void main() { buf_a[buf_idx + 16] = FLOAT_TYPE(v.y); #elif defined(DATA_A_Q5_0) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a; const uint ib = idx / 16; const uint iqs = idx & 0xF; @@ -233,7 +282,7 @@ void main() { buf_a[buf_idx + 16] = FLOAT_TYPE(v.y); #elif defined(DATA_A_Q5_1) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a; const uint ib = idx / 16; const uint iqs = idx & 0xF; @@ -249,7 +298,7 @@ void main() { buf_a[buf_idx + 16] = FLOAT_TYPE(v.y); #elif defined(DATA_A_Q8_0) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; const uint ib = idx / 16; const uint iqs = (idx & 0xF) * 2; @@ -261,7 +310,7 @@ void main() { buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); #elif defined(DATA_A_Q2_K) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; const uint ib = idx / 128; // 2 values per idx const uint iqs = idx % 128; // 0..127 @@ -280,7 +329,7 @@ void main() { buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); #elif defined(DATA_A_Q3_K) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; const uint ib = idx / 128; // 2 values per idx const uint iqs = idx % 128; // 0..127 @@ -304,7 +353,7 @@ void main() { buf_a[buf_idx + 1] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi + 1] >> qsshift) & 3) - (((data_a[ib].hmask[hmi + 1] & m) != 0) ? 0 : 4))); #elif defined(DATA_A_Q4_K) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; const uint ib = idx / 128; // 2 values per idx const uint iqs = idx % 128; // 0..127 @@ -316,15 +365,20 @@ void main() { const vec2 loadd = vec2(data_a[ib].d); - uint8_t sc; - uint8_t mbyte; - if (is < 4) { - sc = uint8_t(data_a[ib].scales[is ] & 63); - mbyte = uint8_t(data_a[ib].scales[is + 4] & 63); - } else { - sc = uint8_t((data_a[ib].scales[is + 4] & 0xF) | ((data_a[ib].scales[is - 4] >> 6) << 4)); - mbyte = uint8_t((data_a[ib].scales[is + 4] >> 4) | ((data_a[ib].scales[is ] >> 6) << 4)); - } + const uint scidx0 = (is < 4) ? is : (is + 4); + const uint scidx1 = (is < 4) ? is : (is - 4); + const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0; + const uint scidxshift1 = (is < 4) ? 0 : 2; + const uint mbidx0 = is + 4; + const uint mbidx1 = (is < 4) ? is + 4 : is; + const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0; + const uint mbidxshift0 = (is < 4) ? 0 : 4; + const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + const uint mbidxshift1 = (is < 4) ? 0 : 2; + + const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1)); + const uint8_t mbyte = uint8_t((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0 | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + const float d = loadd.x * sc; const float m = -loadd.y * mbyte; @@ -332,7 +386,7 @@ void main() { buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF), m)); #elif defined(DATA_A_Q5_K) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; const uint ib = idx / 128; // 2 values per idx const uint iqs = idx % 128; // 0..127 @@ -347,15 +401,20 @@ void main() { const vec2 loadd = vec2(data_a[ib].d); - uint8_t sc; - uint8_t mbyte; - if (is < 4) { - sc = uint8_t(data_a[ib].scales[is ] & 63); - mbyte = uint8_t(data_a[ib].scales[is + 4] & 63); - } else { - sc = uint8_t((data_a[ib].scales[is + 4] & 0xF) | ((data_a[ib].scales[is - 4] >> 6) << 4)); - mbyte = uint8_t((data_a[ib].scales[is + 4] >> 4) | ((data_a[ib].scales[is ] >> 6) << 4)); - } + const uint scidx0 = (is < 4) ? is : (is + 4); + const uint scidx1 = (is < 4) ? is : (is - 4); + const uint scidxmask1 = (is < 4) ? 0x30 : 0xC0; + const uint scidxshift1 = (is < 4) ? 0 : 2; + const uint mbidx0 = is + 4; + const uint mbidx1 = (is < 4) ? is + 4 : is; + const uint mbidxmask0 = (is < 4) ? 0xF : 0xF0; + const uint mbidxshift0 = (is < 4) ? 0 : 4; + const uint mbidxmask1 = (is < 4) ? 0x30 : 0xC0; + const uint mbidxshift1 = (is < 4) ? 0 : 2; + + const uint8_t sc = uint8_t((data_a[ib].scales[scidx0] & 0xF) | ((data_a[ib].scales[scidx1] & scidxmask1) >> scidxshift1)); + const uint8_t mbyte = uint8_t(((data_a[ib].scales[mbidx0] & mbidxmask0) >> mbidxshift0) | ((data_a[ib].scales[mbidx1] & mbidxmask1) >> mbidxshift1)); + const float d = loadd.x * sc; const float m = -loadd.y * mbyte; @@ -363,7 +422,7 @@ void main() { buf_a[buf_idx + 1] = FLOAT_TYPE(fma(d, float((data_a[ib].qs[qsi + 1] >> (b * 4)) & 0xF) + float((data_a[ib].qh[qhi + 1] & hm) != 0 ? 16 : 0), m)); #elif defined(DATA_A_Q6_K) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a * LOAD_VEC_A; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; const uint ib = idx / 128; // 2 values per idx const uint iqs = idx % 128; // 0..127 @@ -382,7 +441,7 @@ void main() { buf_a[buf_idx + 1] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32)); #elif defined(DATA_A_IQ4_NL) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; - const uint buf_idx = (loadc_a + l) * (BK+1) + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a; const uint ib = idx / 16; const uint iqs = idx & 0xF; @@ -403,7 +462,7 @@ void main() { #else const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b; #endif - const uint buf_idx = (loadc_b + l) * (BK+1) + loadr_b * LOAD_VEC_B; + const uint buf_idx = (loadc_b + l) * SHMEM_STRIDE + loadr_b * LOAD_VEC_B; buf_b[buf_idx + 0] = FLOAT_TYPE(data_b[idx][0].x); buf_b[buf_idx + 1] = FLOAT_TYPE(data_b[idx][0].y); buf_b[buf_idx + 2] = FLOAT_TYPE(data_b[idx][0].z); @@ -419,24 +478,24 @@ void main() { #else const uint idx = pos_b + (loadc_b + l) * p.stride_b / LOAD_VEC_B + loadr_b; #endif - const uint buf_idx = (loadc_b + l) * (BK+1) + loadr_b * LOAD_VEC_B; + const uint buf_idx = (loadc_b + l) * SHMEM_STRIDE + loadr_b * LOAD_VEC_B; buf_b[buf_idx + 0] = FLOAT_TYPE(data_b[idx].x); buf_b[buf_idx + 1] = FLOAT_TYPE(data_b[idx].y); buf_b[buf_idx + 2] = FLOAT_TYPE(data_b[idx].z); buf_b[buf_idx + 3] = FLOAT_TYPE(data_b[idx].w); #elif !MUL_MAT_ID if (ic * BN + loadc_b + l < p.N && block + loadr_b < end_k) { - buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(data_b[pos_b + (loadc_b + l) * p.stride_b + loadr_b]); + buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(data_b[pos_b + (loadc_b + l) * p.stride_b + loadr_b]); } else { - buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(0.0f); + buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(0.0f); } #else const uint row_i = ic * BN + loadc_b + l; if (row_i < _ne1) { const u16vec2 row_idx = row_ids[row_i]; - buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(data_b[pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + loadr_b]); + buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(data_b[pos_b + row_idx.y * p.batch_stride_b + (row_idx.x % p.ne11) * p.stride_b + loadr_b]); } else { - buf_b[(loadc_b + l) * (BK+1) + loadr_b] = FLOAT_TYPE(0.0f); + buf_b[(loadc_b + l) * SHMEM_STRIDE + loadr_b] = FLOAT_TYPE(0.0f); } #endif } @@ -446,16 +505,30 @@ void main() { pos_a += BK / LOAD_VEC_A; pos_b += BK / LOAD_VEC_B; - for (uint i = 0; i < BK; i++) { +#ifdef COOPMAT + [[unroll]] for (uint i = 0; i < BK; i += TK) { + [[unroll]] for (uint cm_row = 0; cm_row < cms_per_row; cm_row++) { + // Load from shared into cache + coopMatLoad(cache_a, buf_a, (warp_r * WM + cm_row * TM) * SHMEM_STRIDE + i, SHMEM_STRIDE, gl_CooperativeMatrixLayoutRowMajor); + + [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) { + coopMatLoad(cache_b, buf_b, (warp_c * WN + cm_col * TN) * SHMEM_STRIDE + i, SHMEM_STRIDE, gl_CooperativeMatrixLayoutColumnMajor); + + sums[cm_col * cms_per_row + cm_row] = coopMatMulAdd(cache_a, cache_b, sums[cm_col * cms_per_row + cm_row]); + } + } + } +#else + [[unroll]] for (uint i = 0; i < BK; i++) { // Load from shared into cache [[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) { [[unroll]] for (uint j = 0; j < TM; j++) { - cache_a[wsir * TM + j] = buf_a[(warp_r * WM + wsir * WSUBM + tiwr * TM + j) * (BK+1) + i]; + cache_a[wsir * TM + j] = buf_a[(warp_r * WM + wsir * WSUBM + tiwr * TM + j) * SHMEM_STRIDE + i]; } } [[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) { [[unroll]] for (uint j = 0; j < TN; j++) { - cache_b[wsic * TN + j] = buf_b[(warp_c * WN + wsic * WSUBN + tiwc * TN + j) * (BK+1) + i]; + cache_b[wsic * TN + j] = buf_b[(warp_c * WN + wsic * WSUBN + tiwc * TN + j) * SHMEM_STRIDE + i]; } } @@ -464,12 +537,13 @@ void main() { [[unroll]] for (uint cc = 0; cc < TN; cc++) { [[unroll]] for (uint cr = 0; cr < TM; cr++) { const uint sums_idx = (wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr; - sums[sums_idx] = fma(float(cache_a[wsir * TM + cr]), float(cache_b[wsic * TN + cc]), sums[sums_idx]); + sums[sums_idx] = fma(ACC_TYPE(cache_a[wsir * TM + cr]), ACC_TYPE(cache_b[wsic * TN + cc]), sums[sums_idx]); } } } } } +#endif barrier(); } @@ -481,6 +555,54 @@ void main() { const uint offsets = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z; #endif +#ifdef COOPMAT +#ifdef MUL_MAT_ID + [[unroll]] for (uint cm_row = 0; cm_row < cms_per_row; cm_row++) { + [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) { + coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor); + + [[unroll]] for (uint col = 0; col < BN; col += storestride) { + const uint row_i = dc + cm_col * TN + col + store_c; + if (row_i >= _ne1) break; + + const u16vec2 row_idx = row_ids[row_i]; + + data_d[row_idx.y * p.batch_stride_d + row_idx.x * p.stride_d + dr + cm_row * TM + store_r] = D_TYPE(coopmat_stage[warp_i * TM * TN + (col + store_c) * TM + store_r]); + } + } + } +#else + const bool is_aligned = p.stride_d % 4 == 0; // Assumption: D_TYPE == float + + [[unroll]] for (uint cm_row = 0; cm_row < cms_per_row; cm_row++) { + [[unroll]] for (uint cm_col = 0; cm_col < cms_per_col; cm_col++) { + const bool is_in_bounds = dr + (cm_row + 1) * TM <= p.M && dc + (cm_col + 1) * TN <= p.N; + + if (is_aligned && is_in_bounds) { + // Full coopMat is within bounds and stride_d is aligned with 16B + coopmat cm_dtype = coopmat(sums[cm_col * cms_per_row + cm_row]); + coopMatStore(cm_dtype, data_d, offsets + (dc + cm_col * TN) * p.stride_d + dr + cm_row * TM, p.stride_d, gl_CooperativeMatrixLayoutColumnMajor); + } else if (is_in_bounds) { + // Full coopMat is within bounds, but stride_d is not aligned + coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor); + + [[unroll]] for (uint col = 0; col < TN; col += storestride) { + data_d[offsets + (dc + cm_col * TN + col + store_c) * p.stride_d + dr + cm_row * TM + store_r] = D_TYPE(coopmat_stage[warp_i * TM * TN + (col + store_c) * TM + store_r]); + } + } else if (dr + cm_row * TM < p.M && dc + cm_col * TN < p.N) { + // Partial coopMat is within bounds + coopMatStore(sums[cm_col * cms_per_row + cm_row], coopmat_stage, warp_i * TM * TN, TM, gl_CooperativeMatrixLayoutColumnMajor); + + [[unroll]] for (uint col = 0; col < TN; col += storestride) { + if (dr + cm_row * TM + store_r < p.M && dc + cm_col * TN + col + store_c < p.N) { + data_d[offsets + (dc + cm_col * TN + col + store_c) * p.stride_d + dr + cm_row * TM + store_r] = D_TYPE(coopmat_stage[warp_i * TM * TN + (col + store_c) * TM + store_r]); + } + } + } + } + } +#endif // MUL_MAT_ID +#else [[unroll]] for (uint wsic = 0; wsic < WNITER; wsic++) { [[unroll]] for (uint wsir = 0; wsir < WMITER; wsir++) { @@ -492,7 +614,7 @@ void main() { if (row_i >= _ne1) break; const u16vec2 row_idx = row_ids[row_i]; -#endif +#endif // MUL_MAT_ID [[unroll]] for (uint cr = 0; cr < TM; cr++) { #ifdef MUL_MAT_ID data_d[row_idx.y * p.batch_stride_d + row_idx.x * p.stride_d + dr_warp + cr] = D_TYPE(sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr]); @@ -500,9 +622,10 @@ void main() { if (dr_warp + cr < p.M && dc_warp + cc < p.N) { data_d[offsets + (dc_warp + cc) * p.stride_d + dr_warp + cr] = D_TYPE(sums[(wsic * TN + cc) * (WMITER * TM) + wsir * TM + cr]); } -#endif +#endif // MUL_MAT_ID } } } } +#endif // COOPMAT } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp new file mode 100644 index 0000000000000..cbfa5dce1b091 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp @@ -0,0 +1,328 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : enable +#extension GL_EXT_shader_16bit_storage : require + +#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require + +#extension GL_KHR_memory_scope_semantics : enable +#extension GL_KHR_cooperative_matrix : enable +#extension GL_NV_cooperative_matrix2 : enable +#extension GL_EXT_buffer_reference : enable +#extension GL_KHR_shader_subgroup_ballot : enable +#extension GL_KHR_shader_subgroup_vote : enable + +#include "types.comp" + +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; + +layout (constant_id = 1) const uint BM = 64; +layout (constant_id = 2) const uint BN = 64; +layout (constant_id = 3) const uint BK = 16; // Assumed to be 32 if working with a quant + +layout (push_constant) uniform parameter +{ + uint M; + uint N; + uint K; + uint stride_a; + uint stride_b; + uint stride_d; + + uint batch_stride_a; + uint batch_stride_b; + uint batch_stride_d; + +#ifdef MUL_MAT_ID + uint nei0; + uint nei1; + uint nbi1; + uint ne11; +#else + uint k_split; + uint ne02; + uint ne12; + uint broadcast2; + uint broadcast3; +#endif +} p; + + +layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; +layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; +layout (binding = 2) writeonly buffer D {D_TYPE data_d[];}; + +#if QUANT_K > 1 +#define DECODEFUNCA , dequantFuncA +#define MAT_A_TYPE float16_t + +#include "dequant_funcs_cm2.comp" + +#else +#define DECODEFUNCA +#define MAT_A_TYPE A_TYPE +#endif + +#define MAT_B_TYPE B_TYPE + +#ifdef MUL_MAT_ID +layout (binding = 3) readonly buffer IDS {int data_ids[];}; + +shared u16vec4 row_ids[3072]; + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufB { + B_TYPE b[]; +}; + +uint _ne1; +shared uint _ne1_sh; + +B_TYPE decodeFuncB(const in decodeBufB bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const uint row_i = blockCoords[0]; + + if (row_i >= _ne1) { + return B_TYPE(0.0); + } + + const u16vec4 row_idx = row_ids[row_i]; + B_TYPE ret = data_b[row_idx.y * p.batch_stride_b + row_idx.x * p.stride_b + blockCoords[1]]; + + return ret; +} + +D_TYPE perElemOpD(const in uint32_t r, const in uint32_t c, const in D_TYPE elem, const in uint32_t ir, const in uint32_t ic) +{ + uint dr = ir * BM + r; + uint dc = ic * BN + c; + + if (dr < p.M && dc < _ne1) { + uint row_i = dc; + const u16vec4 row_idx = row_ids[row_i]; + data_d[row_idx.y * p.batch_stride_d + row_idx.z * p.stride_d + dr] = elem; + } + return elem; +} + +#endif + +void main() { +#if defined(DATA_A_IQ4_NL) + init_iq4nl_shmem(); +#endif + +#ifdef MUL_MAT_ID + const uint expert_idx = gl_GlobalInvocationID.z; +#else + const uint batch_idx = gl_GlobalInvocationID.z; + + const uint i13 = batch_idx / p.ne12; + const uint i12 = batch_idx % p.ne12; + + const uint i03 = i13 / p.broadcast3; + const uint i02 = i12 / p.broadcast2; + + const uint batch_idx_a = i03 * p.ne02 + i02; +#endif + + const uint blocks_m = (p.M + BM - 1) / BM; + const uint ir = gl_WorkGroupID.x % blocks_m; + const uint ik = gl_WorkGroupID.x / blocks_m; + const uint ic = gl_WorkGroupID.y; + +#ifdef MUL_MAT_ID + // Spread the search across all elements in the first subgroup + if (gl_SubgroupID == 0) { + _ne1 = 0; + uint num_elements = p.nei1 * p.nei0; + + for (uint i = gl_SubgroupInvocationID; subgroupAny(i < num_elements); i += gl_SubgroupSize) { + bool in_range = i < num_elements; + uint ii0 = i % p.nei0; + uint ii1 = i / p.nei0; + uint id = in_range ? data_ids[ii1*p.nbi1 + ii0] : 0; + uvec4 ballot = subgroupBallot(in_range && id == expert_idx); + uint idx = subgroupBallotExclusiveBitCount(ballot); + if (in_range && id == expert_idx) { + row_ids[_ne1 + idx] = u16vec4(ii0 % p.ne11, ii1, ii0, 0); + } + _ne1 += subgroupBallotBitCount(ballot); + } + _ne1_sh = _ne1; + } + + barrier(); + + _ne1 = _ne1_sh; + + // Workgroup has no work + if (ic * BN >= _ne1) return; +#endif + +#ifdef MUL_MAT_ID + uint start_k = 0; + const uint end_k = p.K; +#else + uint start_k = ik * p.k_split; + const uint end_k = min(p.K, (ik + 1) * p.k_split); +#endif + + coopmat sum; + sum = coopmat(0.0); + +#ifdef MUL_MAT_ID + uint pos_a = (expert_idx * p.batch_stride_a) / QUANT_K; + uint pos_b = 0; +#else + uint pos_a = (batch_idx_a * p.batch_stride_a) / QUANT_K; + uint pos_b = batch_idx * p.batch_stride_b; +#endif + + uint stride_a = p.stride_a / QUANT_K; + uint stride_b = p.stride_b; + + // Hint to the compiler that values are aligned (want 16B alignment). + // Quants are always block-aligned, no alignment needed. +#if ALIGNED +#if QUANT_K == 1 + stride_a &= ~7; +#endif + stride_b &= ~7; +#endif + + // Create layouts for both clamped and unclamped accesses + tensorLayoutNV<2> tensorLayoutA = createTensorLayoutNV(2); + tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutAClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); + tensorLayoutNV<2> tensorLayoutB = createTensorLayoutNV(2); + tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutBClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); + tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutD = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV); + +#if QUANT_K > 1 + tensorLayoutA = setTensorLayoutBlockSizeNV(tensorLayoutA, 1, QUANT_K); + tensorLayoutAClamp = setTensorLayoutBlockSizeNV(tensorLayoutAClamp, 1, QUANT_K); +#endif + + // Use end_k rather than p.K as the dimension because that's what + // we need to bound check against when using split_k + tensorLayoutA = setTensorLayoutDimensionNV(tensorLayoutA, p.M, end_k); + tensorLayoutB = setTensorLayoutDimensionNV(tensorLayoutB, p.N, end_k); + tensorLayoutD = setTensorLayoutDimensionNV(tensorLayoutD, p.N, p.M); + tensorLayoutAClamp = setTensorLayoutDimensionNV(tensorLayoutAClamp, p.M, end_k); + tensorLayoutBClamp = setTensorLayoutDimensionNV(tensorLayoutBClamp, p.N, end_k); + + tensorViewNV<2, false, 1, 0> tensorViewTranspose = createTensorViewNV(2, false, 1, 0); + +#if !defined(MUL_MAT_ID) + // Detect a fast path where all loads are entirely in bounds and no clamping is required + if ((ir + 1) * BM <= p.M && (ic + 1) * BN <= p.N && (start_k % BK) == 0 && (end_k % BK) == 0 && +#if QUANT_K == 1 + (stride_a % 8) == 0 && +#endif + (stride_b % 8) == 0 && (start_k % 8) == 0) { + // Hint to the compiler that values are aligned (want 16B alignment) + start_k &= ~7; + stride_b &= ~7; +#if QUANT_K == 1 + stride_a &= ~7; +#endif + + tensorLayoutA = setTensorLayoutStrideNV(tensorLayoutA, stride_a, 1); + tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1); + + uint k_iters = (end_k - start_k + BK - 1) / BK; + + for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) { + + coopmat mat_a; + coopmat mat_b; + + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA); + coopmat mat_a_ft = coopmat(mat_a); + + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose); + coopmat mat_b_ft = coopmat(mat_b); + + sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum); + } + } else +#endif // !defined(MUL_MAT_ID) + { + tensorLayoutA = setTensorLayoutStrideNV(tensorLayoutA, stride_a, 1); + + tensorLayoutAClamp = setTensorLayoutStrideNV(tensorLayoutAClamp, stride_a, 1); + + tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1); + + tensorLayoutBClamp = setTensorLayoutStrideNV(tensorLayoutBClamp, stride_b, 1); + + [[dont_unroll]] + for (uint block_k = start_k; block_k < end_k; block_k += BK) { + + coopmat mat_a; + coopmat mat_b; + coopmat mat_a_ft; + coopmat mat_b_ft; + + // Clamping is expensive, so detect different code paths for each combination + // of A and B needing clamping. + bool unclampedA = (ir + 1) * BM <= p.M && block_k + BK <= end_k && (block_k % 8) == 0; +#ifdef MUL_MAT_ID + bool unclampedB = true; +#else + bool unclampedB = (ic + 1) * BN <= p.N && block_k + BK <= end_k && (block_k % 8) == 0; +#endif + if (unclampedA && unclampedB) { + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, (block_k & ~7), BK) DECODEFUNCA); +#ifdef MUL_MAT_ID + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose, decodeFuncB); +#else + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, (block_k & ~7), BK), tensorViewTranspose); +#endif + mat_a_ft = coopmat(mat_a); + mat_b_ft = coopmat(mat_b); + sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum); + } else if (unclampedA && !unclampedB) { + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, (block_k & ~7), BK) DECODEFUNCA); + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutBClamp, ic * BN, BN, block_k, BK), tensorViewTranspose); + + mat_a_ft = coopmat(mat_a); + mat_b_ft = coopmat(mat_b); + sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum); + } else if (!unclampedA && unclampedB) { + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutAClamp, ir * BM, BM, block_k, BK) DECODEFUNCA); +#ifdef MUL_MAT_ID + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose, decodeFuncB); +#else + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, (block_k & ~7), BK), tensorViewTranspose); +#endif + mat_a_ft = coopmat(mat_a); + mat_b_ft = coopmat(mat_b); + sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum); + } else if (!unclampedA && !unclampedB) { + coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutAClamp, ir * BM, BM, block_k, BK) DECODEFUNCA); + coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutBClamp, ic * BN, BN, block_k, BK), tensorViewTranspose); + + mat_a_ft = coopmat(mat_a); + mat_b_ft = coopmat(mat_b); + sum = coopMatMulAdd(mat_a_ft, mat_b_ft, sum); + } + } + } + + // Convert from ACC_TYPE to D_TYPE + coopmat mat_d; + mat_d = coopmat(sum); + +#ifdef MUL_MAT_ID + // Call callback to store each element, remapping row through shared memory + coopMatPerElementNV(mat_d, mat_d, perElemOpD, ir, ic); +#else + tensorLayoutD = setTensorLayoutStrideNV(tensorLayoutD, p.stride_d, 1); + + uint pos_d = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z; + coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose); +#endif +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp b/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp index ea89542261cc4..574b51ca55389 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/rope_head.comp @@ -1,6 +1,11 @@ #include "types.comp" #extension GL_EXT_shader_16bit_storage : require +#extension GL_EXT_spirv_intrinsics: enable + +#if RTE16 +spirv_execution_mode(capabilities = [4467], 4462, 16); // RoundingModeRTE, 16 bits +#endif layout(local_size_x = 1, local_size_y = 256, local_size_z = 1) in; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp b/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp index 0bd51ecab5870..a25808e16568a 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp @@ -1,6 +1,7 @@ #version 450 -#extension GL_EXT_shader_16bit_storage : require +#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require +#extension GL_EXT_control_flow_attributes : enable layout (push_constant) uniform parameter { @@ -11,14 +12,13 @@ layout (push_constant) uniform parameter float m0; float m1; uint n_head_log2; + uint nrows_x; } p; #include "types.comp" -#extension GL_EXT_control_flow_attributes : enable -#define BLOCK_SIZE 512 - -layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; +layout(constant_id = 0) const uint BLOCK_SIZE = 32; +layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; layout (binding = 0) readonly buffer X {A_TYPE data_a[];}; layout (binding = 1) readonly buffer Y {B_TYPE data_b[];}; @@ -26,10 +26,17 @@ layout (binding = 2) buffer D {D_TYPE data_d[];}; shared FLOAT_TYPE vals[BLOCK_SIZE]; -void main() { +// num_iters is the number of BLOCK_SIZE loop iterations we need to iterate +// over all the columns. The main function tries to pass a constant here, +// as if it were a template function, to allow unrolling. +void soft_max(uint num_iters) { const uint tid = gl_LocalInvocationID.x; const uint rowx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x; - const uint rowy = rowx % p.KY; + const uint rowy = (p.KY > 0) ? (rowx % p.KY) : 0; + + if (rowx >= p.nrows_x) { + return; + } float slope = 1.0f; @@ -46,19 +53,39 @@ void main() { // Find max FLOAT_TYPE max_val = uintBitsToFloat(0xFF800000); - [[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) { + // Cache values while we compute the max, so we don't need to read them + // again when we're ready to compute exp(x-max). + const uint DATA_CACHE_SIZE = 16; + FLOAT_TYPE data_cache[DATA_CACHE_SIZE]; + + [[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) { const uint col = col0 + tid; - if (col >= p.KX) { - break; + FLOAT_TYPE a = FLOAT_TYPE(0); + if (col < p.KX) { + a = data_a[rowx * p.KX + col]; + } + + FLOAT_TYPE b = FLOAT_TYPE(0); + if (p.KY > 0 && col < p.KX) { + b = data_b[rowy * p.KX + col]; } - max_val = max(max_val, FLOAT_TYPE(data_a[rowx * p.KX + col]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f))); + FLOAT_TYPE v = a * p.scale + slope * b; + + if (col < p.KX) { + max_val = max(max_val, v); + } + + if (idx < DATA_CACHE_SIZE) { + data_cache[idx] = v; + } } - vals[tid] = max_val; + // reduce across the workgroup + vals[tid] = max_val; barrier(); - [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tid < s) { vals[tid] = max(vals[tid], vals[tid + s]); } @@ -68,39 +95,80 @@ void main() { max_val = vals[0]; barrier(); - // Sum up values - vals[tid] = FLOAT_TYPE(0.0f); + FLOAT_TYPE sum = FLOAT_TYPE(0.0f); - [[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) { + // Compute sum{exp(x - max)} + [[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) { const uint col = col0 + tid; if (col >= p.KX) { break; } + // compute exp(a*scale+b*slope), add it to sum, and cache the new value + // in data_cache if possible. const uint i = rowx * p.KX + col; - const FLOAT_TYPE val = exp(FLOAT_TYPE(data_a[i]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)) - max_val); - vals[tid] += val; - data_d[i] = D_TYPE(val); + FLOAT_TYPE val; + if (idx < DATA_CACHE_SIZE) { + val = exp(data_cache[idx] - max_val); + } else { + val = exp(FLOAT_TYPE(data_a[i]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)) - max_val); + } + sum += val; + if (idx < DATA_CACHE_SIZE) { + data_cache[idx] = val; + } else { + data_d[i] = D_TYPE(val); + } } + // reduce across the workgroup + vals[tid] = sum; barrier(); - [[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) { + [[unroll]] for (uint s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (tid < s) { vals[tid] += vals[tid + s]; } barrier(); } + sum = vals[0]; - const D_TYPE divisor = D_TYPE(vals[0]); + FLOAT_TYPE rcpdivisor = 1.0/sum; - [[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) { + [[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) { const uint col = col0 + tid; if (col >= p.KX) { - break; + continue; + } + + if (idx < DATA_CACHE_SIZE) { + data_d[rowx*p.KX + col] = D_TYPE(data_cache[idx] * rcpdivisor); + } else { + data_d[rowx*p.KX + col] *= D_TYPE(rcpdivisor); } + } +} - data_d[rowx*p.KX + col] /= divisor; +void main() { + // instantiate the soft_max function for several different + // dimensions, to allow loop unrolling + uint num_blocks = (p.KX + BLOCK_SIZE - 1) / BLOCK_SIZE; + if (num_blocks > 32) { + soft_max(num_blocks); + } else if (num_blocks > 16) { + soft_max(32); + } else if (num_blocks > 8) { + soft_max(16); + } else if (num_blocks > 4) { + soft_max(8); + } else if (num_blocks == 4) { + soft_max(4); + } else if (num_blocks == 3) { + soft_max(3); + } else if (num_blocks == 2) { + soft_max(2); + } else if (num_blocks == 1) { + soft_max(1); } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp b/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp index 74630dc7fef12..495f966bdc6e5 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/tanh.comp @@ -16,6 +16,5 @@ void main() { if (i >= p.KX) { return; } - - data_d[i] = D_TYPE(tanh(data_a[i])); + data_d[i] = D_TYPE(1. - 2. / (exp(2.*data_a[i]) + 1.)); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp b/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp new file mode 100644 index 0000000000000..28eb24e11f871 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat2_support.comp @@ -0,0 +1,7 @@ +#version 460 + +#extension GL_NV_cooperative_matrix2 : require + +void main() +{ +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp index 21dce72fc7dfb..eecc47f3a9764 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp @@ -1,6 +1,8 @@ -#if !defined(DATA_A_F32) && !defined(DATA_A_F16) -#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require -#endif + +#if !defined(GGML_TYPES_COMP) +#define GGML_TYPES_COMP + +#extension GL_EXT_shader_explicit_arithmetic_types : require #if defined(DATA_A_F32) #define QUANT_K 1 @@ -28,24 +30,29 @@ #endif #endif -#if defined(DATA_A_Q4_0) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 32 -#define QUANT_R 2 +#define QUANT_K_Q4_0 32 +#define QUANT_R_Q4_0 2 struct block_q4_0 { float16_t d; uint8_t qs[16]; }; +struct block_q4_0_packed16 +{ + float16_t d; + uint16_t qs[16/2]; +}; +#if defined(DATA_A_Q4_0) +#define QUANT_K QUANT_K_Q4_0 +#define QUANT_R QUANT_R_Q4_0 #define A_TYPE block_q4_0 +#define A_TYPE_PACKED16 block_q4_0_packed16 #endif -#if defined(DATA_A_Q4_1) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 32 -#define QUANT_R 2 +#define QUANT_K_Q4_1 32 +#define QUANT_R_Q4_1 2 struct block_q4_1 { @@ -54,14 +61,22 @@ struct block_q4_1 uint8_t qs[16]; }; +struct block_q4_1_packed16 +{ + float16_t d; + float16_t m; + uint16_t qs[16/2]; +}; + +#if defined(DATA_A_Q4_1) +#define QUANT_K QUANT_K_Q4_1 +#define QUANT_R QUANT_R_Q4_1 #define A_TYPE block_q4_1 +#define A_TYPE_PACKED16 block_q4_1_packed16 #endif -#if defined(DATA_A_Q5_0) -#extension GL_EXT_shader_16bit_storage : require -#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require -#define QUANT_K 32 -#define QUANT_R 2 +#define QUANT_K_Q5_0 32 +#define QUANT_R_Q5_0 2 struct block_q5_0 { @@ -70,14 +85,22 @@ struct block_q5_0 uint8_t qs[16]; }; +struct block_q5_0_packed16 +{ + float16_t d; + uint16_t qh[2]; + uint16_t qs[16/2]; +}; + +#if defined(DATA_A_Q5_0) +#define QUANT_K QUANT_K_Q5_0 +#define QUANT_R QUANT_R_Q5_0 #define A_TYPE block_q5_0 +#define A_TYPE_PACKED16 block_q5_0_packed16 #endif -#if defined(DATA_A_Q5_1) -#extension GL_EXT_shader_16bit_storage : require -#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require -#define QUANT_K 32 -#define QUANT_R 2 +#define QUANT_K_Q5_1 32 +#define QUANT_R_Q5_1 2 struct block_q5_1 { @@ -87,114 +110,214 @@ struct block_q5_1 uint8_t qs[16]; }; +struct block_q5_1_packed16 +{ + float16_t d; + float16_t m; + uint qh; + uint16_t qs[16/2]; +}; + +#if defined(DATA_A_Q5_1) +#define QUANT_K QUANT_K_Q5_1 +#define QUANT_R QUANT_R_Q5_1 #define A_TYPE block_q5_1 +#define A_TYPE_PACKED16 block_q5_1_packed16 #endif -#if defined(DATA_A_Q8_0) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 32 -#define QUANT_R 1 +#define QUANT_K_Q8_0 32 +#define QUANT_R_Q8_0 1 struct block_q8_0 { float16_t d; int8_t qs[32]; }; +struct block_q8_0_packed16 +{ + float16_t d; + uint16_t qs[32/2]; +}; +#if defined(DATA_A_Q8_0) +#define QUANT_K QUANT_K_Q8_0 +#define QUANT_R QUANT_R_Q8_0 #define A_TYPE block_q8_0 +#define A_TYPE_PACKED16 block_q8_0_packed16 #endif // K-quants -#if defined(DATA_A_Q2_K) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 256 +#define QUANT_K_Q2_K 256 struct block_q2_K { - uint8_t scales[QUANT_K/16]; - uint8_t qs[QUANT_K/4]; + uint8_t scales[QUANT_K_Q2_K/16]; + uint8_t qs[QUANT_K_Q2_K/4]; + f16vec2 d; +}; + +struct block_q2_K_packed16 +{ + uint16_t scales[QUANT_K_Q2_K/16/2]; + uint16_t qs[QUANT_K_Q2_K/4/2]; + f16vec2 d; +}; + +struct block_q2_K_packed32 +{ + uint32_t scales[QUANT_K_Q2_K/16/4]; + uint32_t qs[QUANT_K_Q2_K/4/4]; f16vec2 d; }; +#if defined(DATA_A_Q2_K) +#define QUANT_K QUANT_K_Q2_K #define A_TYPE block_q2_K +#define A_TYPE_PACKED16 block_q2_K_packed16 +#define A_TYPE_PACKED32 block_q2_K_packed32 #endif -#if defined(DATA_A_Q3_K) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 256 +#define QUANT_K_Q3_K 256 struct block_q3_K { - uint8_t hmask[QUANT_K/8]; - uint8_t qs[QUANT_K/4]; + uint8_t hmask[QUANT_K_Q3_K/8]; + uint8_t qs[QUANT_K_Q3_K/4]; uint8_t scales[12]; float16_t d; }; +struct block_q3_K_packed16 +{ + uint16_t hmask[QUANT_K_Q3_K/8/2]; + uint16_t qs[QUANT_K_Q3_K/4/2]; + uint16_t scales[12/2]; + float16_t d; +}; + +#if defined(DATA_A_Q3_K) +#define QUANT_K QUANT_K_Q3_K #define A_TYPE block_q3_K +#define A_TYPE_PACKED16 block_q3_K_packed16 #endif -#if defined(DATA_A_Q4_K) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 256 +#define QUANT_K_Q4_K 256 struct block_q4_K { f16vec2 d; - uint8_t scales[3*QUANT_K/64]; - uint8_t qs[QUANT_K/2]; + uint8_t scales[3*QUANT_K_Q4_K/64]; + uint8_t qs[QUANT_K_Q4_K/2]; +}; + +struct block_q4_K_packed16 +{ + f16vec2 d; + uint16_t scales[3*QUANT_K_Q4_K/64/2]; + uint16_t qs[QUANT_K_Q4_K/2/2]; }; +struct block_q4_K_packed32 +{ + f16vec2 d; + uint32_t scales[3*QUANT_K_Q4_K/64/4]; + uint32_t qs[QUANT_K_Q4_K/2/4]; +}; + +#if defined(DATA_A_Q4_K) +#define QUANT_K QUANT_K_Q4_K #define A_TYPE block_q4_K +#define A_TYPE_PACKED16 block_q4_K_packed16 +#define A_TYPE_PACKED32 block_q4_K_packed32 #endif -#if defined(DATA_A_Q5_K) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 256 +#define QUANT_K_Q5_K 256 struct block_q5_K { f16vec2 d; uint8_t scales[12]; - uint8_t qh[QUANT_K/8]; - uint8_t qs[QUANT_K/2]; + uint8_t qh[QUANT_K_Q5_K/8]; + uint8_t qs[QUANT_K_Q5_K/2]; }; +struct block_q5_K_packed16 +{ + f16vec2 d; + uint16_t scales[12/2]; + uint16_t qh[QUANT_K_Q5_K/8/2]; + uint16_t qs[QUANT_K_Q5_K/2/2]; +}; + +#if defined(DATA_A_Q5_K) +#define QUANT_K QUANT_K_Q5_K #define A_TYPE block_q5_K +#define A_TYPE_PACKED16 block_q5_K_packed16 #endif -#if defined(DATA_A_Q6_K) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 256 +#define QUANT_K_Q6_K 256 struct block_q6_K { - uint8_t ql[QUANT_K/2]; - uint8_t qh[QUANT_K/4]; - int8_t scales[QUANT_K/16]; + uint8_t ql[QUANT_K_Q6_K/2]; + uint8_t qh[QUANT_K_Q6_K/4]; + int8_t scales[QUANT_K_Q6_K/16]; float16_t d; }; +struct block_q6_K_packed16 +{ + uint16_t ql[QUANT_K_Q6_K/2/2]; + uint16_t qh[QUANT_K_Q6_K/4/2]; + int8_t scales[QUANT_K_Q6_K/16]; + float16_t d; +}; + +#if defined(DATA_A_Q6_K) +#define QUANT_K QUANT_K_Q6_K #define A_TYPE block_q6_K +#define A_TYPE_PACKED16 block_q6_K_packed16 #endif // IQuants -#if defined(DATA_A_IQ4_NL) -#extension GL_EXT_shader_16bit_storage : require -#define QUANT_K 32 -#define QUANT_R 2 +#define QUANT_K_IQ4_NL 32 +#define QUANT_R_IQ4_NL 2 struct block_iq4_nl { float16_t d; - uint8_t qs[QUANT_K/2]; + uint8_t qs[QUANT_K_IQ4_NL/2]; }; -#define A_TYPE block_iq4_nl +struct block_iq4_nl_packed16 +{ + float16_t d; + uint16_t qs[QUANT_K_IQ4_NL/2/2]; +}; + +#if defined(DATA_A_IQ4_NL) -const int8_t kvalues_iq4nl[16] = { +const int8_t kvalues_iq4nl_const[16] = { int8_t(-127), int8_t(-104), int8_t(-83), int8_t(-65), int8_t(-49), int8_t(-35), int8_t(-22), int8_t(-10), int8_t(1), int8_t(13), int8_t(25), int8_t(38), int8_t(53), int8_t(69), int8_t(89), int8_t(113) }; + +shared FLOAT_TYPE kvalues_iq4nl[16]; + +void init_iq4nl_shmem() +{ + // copy the table into shared memory and sync + if (gl_LocalInvocationIndex.x < 16) { + kvalues_iq4nl[gl_LocalInvocationIndex.x] = FLOAT_TYPE(kvalues_iq4nl_const[gl_LocalInvocationIndex.x]); + } + barrier(); +} + +#define QUANT_K QUANT_K_IQ4_NL +#define QUANT_R QUANT_R_IQ4_NL +#define A_TYPE block_iq4_nl +#define A_TYPE_PACKED16 block_iq4_nl_packed16 #endif + +#endif // !defined(GGML_TYPES_COMP) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index fe3e4cb39ff47..7a0d7285dcb23 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -30,6 +30,8 @@ #include #endif +#include + #define ASYNCIO_CONCURRENCY 64 std::mutex lock; @@ -58,6 +60,7 @@ const std::vector type_names = { "iq4_nl" }; +namespace { void execute_command(const std::string& command, std::string& stdout_str, std::string& stderr_str) { #ifdef _WIN32 HANDLE stdout_read, stdout_write; @@ -196,15 +199,20 @@ static uint32_t compile_count = 0; static std::mutex compile_count_mutex; static std::condition_variable compile_count_cond; -void string_to_spv_func(const std::string& _name, const std::string& in_fname, const std::map& defines, bool fp16 = true) { - std::string name = _name + (fp16 ? "" : "_fp32"); +void string_to_spv_func(const std::string& _name, const std::string& in_fname, const std::map& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) { + std::string name = _name + (f16acc ? "_f16acc" : "") + (coopmat ? "_coopmat" : "") + (coopmat2 ? "_cm2" : (fp16 ? "" : "_fp32")); std::string out_fname = join_paths(output_dir, name + ".spv"); std::string in_path = join_paths(input_dir, in_fname); + std::string target_env = (name.find("_cm2") != std::string::npos) ? "--target-env=vulkan1.3" : "--target-env=vulkan1.2"; + + // disable spirv-opt for coopmat shaders for https://github.com/ggerganov/llama.cpp/issues/10734 + std::string opt_level = coopmat ? "" : "-O"; + #ifdef _WIN32 - std::vector cmd = {GLSLC, "-fshader-stage=compute", "--target-env=vulkan1.2", "-O", "\"" + in_path + "\"", "-o", "\"" + out_fname + "\""}; + std::vector cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, "\"" + in_path + "\"", "-o", "\"" + out_fname + "\""}; #else - std::vector cmd = {GLSLC, "-fshader-stage=compute", "--target-env=vulkan1.2", "-O", in_path, "-o", out_fname}; + std::vector cmd = {GLSLC, "-fshader-stage=compute", target_env, opt_level, in_path, "-o", out_fname}; #endif #ifdef GGML_VULKAN_SHADER_DEBUG_INFO @@ -254,7 +262,7 @@ std::map merge_maps(const std::map> compiles; -void string_to_spv(const std::string& _name, const std::string& in_fname, const std::map& defines, bool fp16 = true) { +void string_to_spv(const std::string& _name, const std::string& in_fname, const std::map& defines, bool fp16 = true, bool coopmat = false, bool coopmat2 = false, bool f16acc = false) { { // wait until fewer than N compiles are in progress. // 16 is an arbitrary limit, the goal is to avoid "failed to create pipe" errors. @@ -265,15 +273,15 @@ void string_to_spv(const std::string& _name, const std::string& in_fname, const } compile_count++; } - compiles.push_back(std::async(string_to_spv_func, _name, in_fname, defines, fp16)); + compiles.push_back(std::async(string_to_spv_func, _name, in_fname, defines, fp16, coopmat, coopmat2, f16acc)); } -void matmul_shaders(bool fp16, bool matmul_id) { - std::string load_vec = fp16 ? "8" : "4"; - std::string aligned_b_type_f32 = fp16 ? "mat2x4" : "vec4"; - std::string aligned_b_type_f16 = fp16 ? "f16mat2x4" : "f16vec4"; +void matmul_shaders(bool fp16, bool matmul_id, bool coopmat, bool coopmat2, bool f16acc) { + std::string load_vec = coopmat2 ? "1" : fp16 ? "8" : "4"; + std::string aligned_b_type_f32 = coopmat2 ? "float" : fp16 ? "mat2x4" : "vec4"; + std::string aligned_b_type_f16 = coopmat2 ? "float16_t" : fp16 ? "f16mat2x4" : "f16vec4"; - std::map base_dict = {{"FLOAT_TYPE", fp16 ? "float16_t" : "float"}}; + std::map base_dict = {{"FLOAT_TYPE", (coopmat2 || fp16) ? "float16_t" : "float"}}; std::string shader_name = "matmul"; if (matmul_id) { @@ -285,21 +293,37 @@ void matmul_shaders(bool fp16, bool matmul_id) { base_dict["FLOAT16"] = "1"; } + base_dict["ACC_TYPE"] = f16acc ? "float16_t" : "float"; + + if (coopmat) { + base_dict["COOPMAT"] = "1"; + } + + base_dict["ACC_TYPE"] = f16acc ? "float16_t" : "float"; + + std::string source_name = coopmat2 ? "mul_mm_cm2.comp" : "mul_mm.comp"; + // Shaders with f16 B_TYPE - string_to_spv(shader_name + "_f32_f16", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16); - string_to_spv(shader_name + "_f32_f16_aligned", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}}), fp16); + string_to_spv(shader_name + "_f32_f16", source_name, merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}, }), fp16, coopmat, coopmat2, f16acc); + string_to_spv(shader_name + "_f32_f16_aligned", source_name, merge_maps(base_dict, {{"DATA_A_F32", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc); - string_to_spv(shader_name + "_f16", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16); - string_to_spv(shader_name + "_f16_aligned", "mul_mm.comp", merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}}), fp16); + string_to_spv(shader_name + "_f16_aligned", source_name, merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"LOAD_VEC_A", load_vec}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc); + string_to_spv(shader_name + "_f16", source_name, merge_maps(base_dict, {{"DATA_A_F16", "1"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}), fp16, coopmat, coopmat2, f16acc); for (const auto& tname : type_names) { std::string data_a_key = "DATA_A_" + to_uppercase(tname); // For unaligned, load one at a time for f32/f16, or two at a time for quants - std::string load_vec_a_unaligned = (tname == "f32" || tname == "f16") ? "1" : "2"; + std::string load_vec_a_unaligned = (coopmat2 || tname == "f32" || tname == "f16") ? "1" : "2"; // For aligned matmul loads - std::string load_vec_a = (tname == "f32" || tname == "f16") ? load_vec : "2"; - string_to_spv(shader_name + "_" + tname + "_f32", "mul_mm.comp", merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}), fp16); - string_to_spv(shader_name + "_" + tname + "_f32_aligned", "mul_mm.comp", merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"D_TYPE", "float"}}), fp16); + std::string load_vec_a = (coopmat2 || tname == "f32" || tname == "f16") ? load_vec : "2"; + + string_to_spv(shader_name + "_" + tname + "_f32", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}}), fp16, coopmat, coopmat2, f16acc); + string_to_spv(shader_name + "_" + tname + "_f32_aligned", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f32}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc); + + if (tname != "f16" && tname != "f32") { + string_to_spv(shader_name + "_" + tname + "_f16", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a_unaligned}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}}), fp16, coopmat, coopmat2, f16acc); + string_to_spv(shader_name + "_" + tname + "_f16_aligned", source_name, merge_maps(base_dict, {{data_a_key, "1"}, {"LOAD_VEC_A", load_vec_a}, {"LOAD_VEC_B", load_vec}, {"B_TYPE", aligned_b_type_f16}, {"D_TYPE", "float"}, {"B_IS_FLOAT", "1"}, {"ALIGNED", "1"}}), fp16, coopmat, coopmat2, f16acc); + } } } @@ -307,20 +331,58 @@ void process_shaders() { std::cout << "ggml_vulkan: Generating and compiling shaders to SPIR-V" << std::endl; std::map base_dict = {{"FLOAT_TYPE", "float"}}; - for (const auto& fp16 : {false, true}) { - matmul_shaders(fp16, false); - matmul_shaders(fp16, true); + // matmul + for (const auto& matmul_id : {false, true}) { + // No coopmats + // fp32 + matmul_shaders(false, matmul_id, false, false, false); + + // fp16, fp32acc and fp16acc + matmul_shaders(true, matmul_id, false, false, false); + matmul_shaders(true, matmul_id, false, false, true); + + // Coopmat, fp32acc and fp16acc + matmul_shaders(true, matmul_id, true, false, false); + matmul_shaders(true, matmul_id, true, false, true); + +#if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + // Coopmat2, fp32acc and fp16acc + matmul_shaders(true, matmul_id, false, true, false); + matmul_shaders(true, matmul_id, false, true, true); +#endif } +#if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) + // flash attention + for (const auto& f16acc : {false, true}) { + std::string acctype = f16acc ? "float16_t" : "float"; + + for (const auto& tname : type_names) { + if (tname == "f32") { + continue; + } + + if (tname == "f16") { + string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp", + merge_maps(base_dict, {{"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}}), true, false, true, f16acc); + } else { + std::string data_a_key = "DATA_A_" + to_uppercase(tname); + string_to_spv("flash_attn_f32_f16_" + tname, "flash_attn_cm2.comp", + merge_maps(base_dict, {{data_a_key, "1"}, {"Q_TYPE", "float"}, {"D_TYPE", "float"}, {"ACC_TYPE", acctype}, {"DEQUANTFUNC", "dequantFunc"+to_uppercase(tname) }, {"BLOCK_SIZE", "QUANT_K_"+to_uppercase(tname) }}), true, false, true, f16acc); + } + } + } +#endif + for (const auto& tname : type_names) { // mul mat vec std::string data_a_key = "DATA_A_" + to_uppercase(tname); std::string shader = (string_ends_with(tname, "_k")) ? "mul_mat_vec_" + tname + ".comp" : "mul_mat_vec.comp"; - string_to_spv("mul_mat_vec_" + tname + "_f32_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}})); - string_to_spv("mul_mat_vec_" + tname + "_f16_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float16_t"}, {"B_TYPE_VEC4", "f16vec4"}, {"D_TYPE", "float"}})); + string_to_spv("mul_mat_vec_" + tname + "_f32_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float"}, {"B_TYPE_VEC2", "vec2"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}})); + string_to_spv("mul_mat_vec_" + tname + "_f16_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "float16_t"}, {"B_TYPE_VEC2", "f16vec2"}, {"B_TYPE_VEC4", "f16vec4"}, {"D_TYPE", "float"}})); - string_to_spv("mul_mat_vec_id_" + tname + "_f32", shader, merge_maps(base_dict, {{"MUL_MAT_ID", "1"}, {data_a_key, "1"}, {"B_TYPE", "float"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}})); + string_to_spv("mul_mat_vec_id_" + tname + "_f32", shader, merge_maps(base_dict, {{"MUL_MAT_ID", "1"}, {data_a_key, "1"}, {"B_TYPE", "float"}, {"B_TYPE_VEC2", "vec2"}, {"B_TYPE_VEC4", "vec4"}, {"D_TYPE", "float"}})); // Dequant shaders if (tname != "f16") { @@ -331,11 +393,11 @@ void process_shaders() { shader = (tname == "f32" || tname == "f16") ? "get_rows.comp" : "get_rows_quant.comp"; if (tname == "f16") { - string_to_spv("get_rows_" + tname, shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}}); + string_to_spv("get_rows_" + tname, shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}, {"OPTIMIZATION_ERROR_WORKAROUND", "1"}})); } else { - string_to_spv("get_rows_" + tname, shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}}); + string_to_spv("get_rows_" + tname, shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float16_t"}})); } - string_to_spv("get_rows_" + tname + "_f32", shader, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float"}}); + string_to_spv("get_rows_" + tname + "_f32", shader, merge_maps(base_dict, {{data_a_key, "1"}, {"B_TYPE", "int"}, {"D_TYPE", "float"}})); } } @@ -399,9 +461,11 @@ void process_shaders() { string_to_spv("rope_norm_f32", "rope_norm.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("rope_norm_f16", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("rope_norm_f16_rte", "rope_norm.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}}); string_to_spv("rope_neox_f32", "rope_neox.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}}); string_to_spv("rope_neox_f16", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}}); + string_to_spv("rope_neox_f16_rte", "rope_neox.comp", {{"A_TYPE", "float16_t"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}}); string_to_spv("argsort_f32", "argsort.comp", {{"A_TYPE", "float"}}); @@ -409,11 +473,14 @@ void process_shaders() { string_to_spv("im2col_f32", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("im2col_f32_f16", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}})); + string_to_spv("im2col_f32_f16_rte", "im2col.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float16_t"}, {"RTE16", "1"}})); string_to_spv("timestep_embedding_f32", "timestep_embedding.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); string_to_spv("pool2d_f32", "pool2d.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"D_TYPE", "float"}})); + string_to_spv("rwkv_wkv6_f32", "wkv6.comp", merge_maps(base_dict, {{"A_TYPE", "float"}})); + for (auto &c : compiles) { c.wait(); } @@ -471,12 +538,19 @@ void write_output_files() { fclose(hdr); fclose(src); } +} int main(int argc, char** argv) { std::map args; - for (int i = 1; i < argc; i += 2) { - if (i + 1 < argc) { - args[argv[i]] = argv[i + 1]; + for (int i = 1; i < argc; ++i) { + std::string arg = argv[i]; + if (arg.rfind("--", 0) == 0) { + if (i + 1 < argc && argv[i + 1][0] != '-') { + args[arg] = argv[i + 1]; + ++i; + } else { + args[arg] = ""; + } } } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp b/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp new file mode 100644 index 0000000000000..35cc6c45f90a5 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/wkv6.comp @@ -0,0 +1,87 @@ +#version 450 + +#extension GL_EXT_control_flow_attributes : require + +#define BLOCK_SIZE 64 +layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in; + +layout(push_constant) uniform Parameters { + uint B; + uint T; + uint C; + uint H; +}; + +layout(binding = 0) readonly buffer KBuf { A_TYPE k[]; }; +layout(binding = 1) readonly buffer VBuf { A_TYPE v[]; }; +layout(binding = 2) readonly buffer RBuf { A_TYPE r[]; }; +layout(binding = 3) readonly buffer TimeFBuf { A_TYPE tf[]; }; +layout(binding = 4) readonly buffer TimeDBuf { A_TYPE td[]; }; +layout(binding = 5) readonly buffer StateBuf { A_TYPE state_in[]; }; +layout(binding = 6) buffer DstBuf { A_TYPE dst[]; }; + +shared A_TYPE _k[BLOCK_SIZE], _r[BLOCK_SIZE], _tf[BLOCK_SIZE], _td[BLOCK_SIZE]; + +void main() { + const uint head_size = BLOCK_SIZE; + const uint batch_id = gl_WorkGroupID.x / H; + const uint head_id = gl_WorkGroupID.x % H; + const uint tid = gl_LocalInvocationID.x; + + const uint state_size = C * head_size; + const uint n_seq_tokens = T / B; + + if (batch_id >= B || head_id >= H) { + return; + } + + A_TYPE state[BLOCK_SIZE]; + [[unroll]] for (uint i = 0; i < head_size; i++) { + state[i] = state_in[batch_id * state_size + head_id * head_size * head_size + + i * head_size + tid]; + } + + barrier(); + _tf[tid] = tf[head_id * head_size + tid]; + barrier(); + + const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid; + const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid; + + for (uint t = start_t; t < end_t; t += C) { + barrier(); + _k[tid] = k[t]; + _r[tid] = r[t]; + _td[tid] = td[t]; + barrier(); + + const A_TYPE v_val = v[t]; + A_TYPE y = 0.0; + + [[unroll]] for (uint j = 0; j < head_size; j += 4) { + vec4 k_vec = vec4(_k[j], _k[j+1], _k[j+2], _k[j+3]); + vec4 r_vec = vec4(_r[j], _r[j+1], _r[j+2], _r[j+3]); + vec4 tf_vec = vec4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]); + vec4 td_vec = vec4(_td[j], _td[j+1], _td[j+2], _td[j+3]); + vec4 s_vec = vec4(state[j], state[j+1], state[j+2], state[j+3]); + + vec4 kv = k_vec * v_val; + + vec4 temp = tf_vec * kv + s_vec; + y += dot(r_vec, temp); + + s_vec = s_vec * td_vec + kv; + state[j] = s_vec.x; + state[j+1] = s_vec.y; + state[j+2] = s_vec.z; + state[j+3] = s_vec.w; + } + + dst[t] = y; + } + + [[unroll]] for (uint i = 0; i < head_size; i++) { + dst[T * C + batch_id * state_size + head_id * head_size * head_size + + i * head_size + tid] = state[i]; + } +} diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index c797172abe3d5..4b16fef2f1f51 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -8,7 +8,10 @@ // FIXME: required here for quantization functions #include "ggml-quants.h" -#include "ggml-aarch64.h" + +#ifdef GGML_USE_CPU_HBM +#include +#endif #if defined(_MSC_VER) || defined(__MINGW32__) #include // using malloc.h with MSC/MINGW @@ -788,32 +791,23 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { .to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row, .from_float_ref = (ggml_from_float_t) ggml_fp32_to_bf16_row_ref, }, - [GGML_TYPE_Q4_0_4_4] = { - .type_name = "q4_0_4x4", - .blck_size = QK4_0, - .blck_size_interleave = 4, - .type_size = sizeof(block_q4_0), - .is_quantized = true, - .to_float = NULL, - .from_float_ref = NULL, + [31] = { // GGML_TYPE_Q4_0_4_4 + .type_name = "TYPE_Q4_0_4_4 REMOVED, use Q4_0 with runtime repacking", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, }, - [GGML_TYPE_Q4_0_4_8] = { - .type_name = "q4_0_4x8", - .blck_size = QK4_0, - .blck_size_interleave = 8, - .type_size = sizeof(block_q4_0), - .is_quantized = true, - .to_float = NULL, - .from_float_ref = NULL, + [32] = { // GGML_TYPE_Q4_0_4_8 + .type_name = "TYPE_Q4_0_4_8 REMOVED, use Q4_0 with runtime repacking", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, }, - [GGML_TYPE_Q4_0_8_8] = { - .type_name = "q4_0_8x8", - .blck_size = QK4_0, - .blck_size_interleave = 8, - .type_size = sizeof(block_q4_0), - .is_quantized = true, - .to_float = NULL, - .from_float_ref = NULL, + [33] = { // GGML_TYPE_Q4_0_8_8 + .type_name = "TYPE_Q4_0_8_8 REMOVED, use Q4_0 with runtime repacking", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, }, [GGML_TYPE_TQ1_0] = { .type_name = "tq1_0", @@ -831,6 +825,24 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = { .to_float = (ggml_to_float_t) dequantize_row_tq2_0, .from_float_ref = (ggml_from_float_t) quantize_row_tq2_0_ref, }, + [36] = { // GGML_TYPE_IQ4_NL_4_4 + .type_name = "TYPE_IQ4_NL_4_4 REMOVED, use IQ4_NL with runtime repacking", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, + }, + [37] = { // GGML_TYPE_IQ4_NL_4_8 + .type_name = "TYPE_IQ4_NL_4_8 REMOVED, use IQ4_NL with runtime repacking", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, + }, + [38] = { // GGML_TYPE_IQ4_NL_8_8 + .type_name = "TYPE_IQ4_NL_8_8 REMOVED, use IQ4_NL with runtime repacking", + .blck_size = 0, + .type_size = 0, + .is_quantized = false, + }, }; const struct ggml_type_traits * ggml_get_type_traits(enum ggml_type type) { @@ -941,6 +953,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "POOL_2D_BACK", "UPSCALE", "PAD", + "PAD_REFLECT_1D", "ARANGE", "TIMESTEP_EMBEDDING", "ARGSORT", @@ -974,7 +987,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "OPT_STEP_ADAMW", }; -static_assert(GGML_OP_COUNT == 81, "GGML_OP_COUNT != 81"); +static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1036,6 +1049,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "pool_2d_back(x)", "upscale(x)", "pad(x)", + "pad_reflect_1d(x)", "arange(start, stop, step)", "timestep_embedding(timesteps, dim, max_period)", "argsort(x)", @@ -1069,7 +1083,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "adamw(x)", }; -static_assert(GGML_OP_COUNT == 81, "GGML_OP_COUNT != 81"); +static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -1259,9 +1273,6 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break; case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break; case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break; - case GGML_FTYPE_MOSTLY_Q4_0_4_4: wtype = GGML_TYPE_Q4_0_4_4; break; - case GGML_FTYPE_MOSTLY_Q4_0_4_8: wtype = GGML_TYPE_Q4_0_4_8; break; - case GGML_FTYPE_MOSTLY_Q4_0_8_8: wtype = GGML_TYPE_Q4_0_8_8; break; case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break; case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break; } @@ -2255,6 +2266,7 @@ struct ggml_tensor * ggml_argmax( struct ggml_context * ctx, struct ggml_tensor * a) { GGML_ASSERT(ggml_is_matrix(a)); + GGML_ASSERT(a->ne[0] <= INT32_MAX); struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, a->ne[1]); @@ -3505,15 +3517,18 @@ static struct ggml_tensor * ggml_rope_impl( GGML_ASSERT(c->ne[0] >= n_dims / 2); } + int sections[4] = {0, 0, 0, 0}; + struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a); - int32_t params[11] = { /*n_past*/ 0, n_dims, mode, /*n_ctx*/ 0, n_ctx_orig }; + int32_t params[15] = { /*n_past*/ 0, n_dims, mode, /*n_ctx*/ 0, n_ctx_orig }; memcpy(params + 5, &freq_base, sizeof(float)); memcpy(params + 6, &freq_scale, sizeof(float)); memcpy(params + 7, &ext_factor, sizeof(float)); memcpy(params + 8, &attn_factor, sizeof(float)); memcpy(params + 9, &beta_fast, sizeof(float)); memcpy(params + 10, &beta_slow, sizeof(float)); + memcpy(params + 11, §ions, sizeof(int)*4); ggml_set_op_params(result, params, sizeof(params)); result->op = GGML_OP_ROPE; @@ -3535,6 +3550,53 @@ struct ggml_tensor * ggml_rope( ); } +struct ggml_tensor * ggml_rope_multi( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + struct ggml_tensor * c, + int n_dims, + int sections[4], + int mode, + int n_ctx_orig, + float freq_base, + float freq_scale, + float ext_factor, + float attn_factor, + float beta_fast, + float beta_slow) { + // Multimodal Rotary Position Embedding + GGML_ASSERT((mode & 1) == 0 && "mode & 1 == 1 is no longer supported"); + + GGML_ASSERT(ggml_is_vector(b)); + GGML_ASSERT(b->type == GGML_TYPE_I32); + GGML_ASSERT(a->ne[2] * 4 == b->ne[0]); // mrope expecting 4 position ids per token + + if (c) { + GGML_ASSERT(c->type == GGML_TYPE_F32); + GGML_ASSERT(c->ne[0] >= n_dims / 2); + } + + struct ggml_tensor * result = ggml_dup_tensor(ctx, a); + + int32_t params[11 + 4] = { /*n_past*/ 0, n_dims, mode, /*n_ctx*/ 0, n_ctx_orig }; + memcpy(params + 5, &freq_base, sizeof(float)); + memcpy(params + 6, &freq_scale, sizeof(float)); + memcpy(params + 7, &ext_factor, sizeof(float)); + memcpy(params + 8, &attn_factor, sizeof(float)); + memcpy(params + 9, &beta_fast, sizeof(float)); + memcpy(params + 10, &beta_slow, sizeof(float)); + memcpy(¶ms[11], sections, sizeof(int)*4); + ggml_set_op_params(result, params, sizeof(params)); + + result->op = GGML_OP_ROPE; + result->src[0] = a; + result->src[1] = b; + result->src[2] = c; + + return result; +} + struct ggml_tensor * ggml_rope_inplace( struct ggml_context * ctx, struct ggml_tensor * a, @@ -3698,13 +3760,84 @@ struct ggml_tensor * ggml_clamp( return result; } -// ggml_conv_1d - static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p, int d) { return (ins + 2 * p - d * (ks - 1) - 1) / s + 1; } -GGML_API struct ggml_tensor * ggml_conv_1d( +// im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] +// a: [OC,IC, KH, KW] +// b: [N, IC, IH, IW] +// result: [N, OH, OW, IC*KH*KW] +struct ggml_tensor * ggml_im2col( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1, + bool is_2D, + enum ggml_type dst_type) { + if (is_2D) { + GGML_ASSERT(a->ne[2] == b->ne[2]); + } else { + //GGML_ASSERT(b->ne[1] % a->ne[1] == 0); + GGML_ASSERT(b->ne[1] == a->ne[1]); + GGML_ASSERT(b->ne[3] == 1); + } + + const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0; + const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + + GGML_ASSERT((!is_2D || OH > 0) && "b too small compared to a"); + GGML_ASSERT((OW > 0) && "b too small compared to a"); + + const int64_t ne[4] = { + is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0], + OW, + is_2D ? OH : b->ne[2], + is_2D ? b->ne[3] : 1, + }; + + struct ggml_tensor * result = ggml_new_tensor(ctx, dst_type, 4, ne); + int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) }; + ggml_set_op_params(result, params, sizeof(params)); + + result->op = GGML_OP_IM2COL; + result->src[0] = a; + result->src[1] = b; + + return result; +} + +struct ggml_tensor * ggml_im2col_back( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int64_t * ne, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1, + bool is_2D) { + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) }; + ggml_set_op_params(result, params, sizeof(params)); + + result->op = GGML_OP_IM2COL_BACK; + result->src[0] = a; + result->src[1] = b; + + return result; +} + +// ggml_conv_1d + +struct ggml_tensor * ggml_conv_1d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, @@ -3734,137 +3867,75 @@ struct ggml_tensor* ggml_conv_1d_ph( return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d); } -// ggml_conv_transpose_1d - -static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) { - return (ins - 1) * s - 2 * p + d * (ks - 1) + 1; -} +// ggml_conv_1d_dw -GGML_API struct ggml_tensor * ggml_conv_transpose_1d( +struct ggml_tensor * ggml_conv_1d_dw( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, int p0, int d0) { - GGML_ASSERT(ggml_is_matrix(b)); - GGML_ASSERT(a->ne[2] == b->ne[1]); - GGML_ASSERT(a->ne[3] == 1); + struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], 1, a->ne[1], a->ne[2]); + struct ggml_tensor * new_b = ggml_reshape_4d(ctx, b, b->ne[0], 1, b->ne[1], b->ne[2]); - GGML_ASSERT(p0 == 0); - GGML_ASSERT(d0 == 1); + struct ggml_tensor * im2col = ggml_im2col(ctx, new_a, new_b, s0, 0, p0, 0, d0, 0, false, GGML_TYPE_F16); - const int64_t ne[4] = { - ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/), - a->ne[1], b->ne[2], 1, - }; - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + struct ggml_tensor * result = ggml_mul_mat(ctx, im2col, a); - int32_t params[] = { s0, p0, d0 }; - ggml_set_op_params(result, params, sizeof(params)); - - result->op = GGML_OP_CONV_TRANSPOSE_1D; - result->src[0] = a; - result->src[1] = b; + result = ggml_reshape_3d(ctx, result, b->ne[0], b->ne[1], 1); return result; } -// ggml_conv_depthwise +// ggml_conv_1d_dw_ph -struct ggml_tensor * ggml_conv_depthwise_2d( +struct ggml_tensor * ggml_conv_1d_dw_ph( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, - int s1, - int p0, - int p1, - int d0, - int d1) { - struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]); - struct ggml_tensor * im2col = ggml_im2col(ctx, new_a, - ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]), - s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N * IC, OH, OW, KH * KW] - struct ggml_tensor * new_b = ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3]); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW] + int d0) { + return ggml_conv_1d_dw(ctx, a, b, s0, a->ne[0] / 2, d0); +} - new_a = ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1); // [OC,1, KH, KW] => [1, OC, 1, KH * KW] - struct ggml_tensor * result = ggml_mul_mat(ctx, new_a, new_b); - result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW] +// ggml_conv_transpose_1d - return result; +static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) { + return (ins - 1) * s - 2 * p + d * (ks - 1) + 1; } -// ggml_conv_2d -// im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] -// a: [OC,IC, KH, KW] -// b: [N, IC, IH, IW] -// result: [N, OH, OW, IC*KH*KW] -struct ggml_tensor * ggml_im2col( +GGML_API struct ggml_tensor * ggml_conv_transpose_1d( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int s0, - int s1, int p0, - int p1, - int d0, - int d1, - bool is_2D, - enum ggml_type dst_type) { - if(is_2D) { - GGML_ASSERT(a->ne[2] == b->ne[2]); - } else { - GGML_ASSERT(a->ne[1] == b->ne[1]); - GGML_ASSERT(b->ne[3] == 1); - } - - const int64_t OH = is_2D ? ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1) : 0; - const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0); + int d0) { + GGML_ASSERT(ggml_is_matrix(b)); + GGML_ASSERT(a->ne[2] == b->ne[1]); + GGML_ASSERT(a->ne[3] == 1); - GGML_ASSERT((!is_2D || OH > 0) && "b too small compared to a"); - GGML_ASSERT((OW > 0) && "b too small compared to a"); + GGML_ASSERT(p0 == 0); + GGML_ASSERT(d0 == 1); const int64_t ne[4] = { - is_2D ? (a->ne[2] * a->ne[1] * a->ne[0]) : a->ne[1] * a->ne[0], - OW, - is_2D ? OH : b->ne[2], - is_2D ? b->ne[3] : 1, + ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/), + a->ne[1], b->ne[2], 1, }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); - struct ggml_tensor * result = ggml_new_tensor(ctx, dst_type, 4, ne); - int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) }; + int32_t params[] = { s0, p0, d0 }; ggml_set_op_params(result, params, sizeof(params)); - result->op = GGML_OP_IM2COL; + result->op = GGML_OP_CONV_TRANSPOSE_1D; result->src[0] = a; result->src[1] = b; return result; } -struct ggml_tensor * ggml_im2col_back( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int64_t * ne, - int s0, - int s1, - int p0, - int p1, - int d0, - int d1, - bool is_2D) { - struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); - int32_t params[] = { s0, s1, p0, p1, d0, d1, (is_2D ? 1 : 0) }; - ggml_set_op_params(result, params, sizeof(params)); - - result->op = GGML_OP_IM2COL_BACK; - result->src[0] = a; - result->src[1] = b; - - return result; -} +// ggml_conv_2d // a: [OC,IC, KH, KW] // b: [N, IC, IH, IW] @@ -3911,6 +3982,31 @@ struct ggml_tensor * ggml_conv_2d_s1_ph( return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1); } +// ggml_conv_2d_dw + +struct ggml_tensor * ggml_conv_2d_dw( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + struct ggml_tensor * new_a = ggml_reshape_4d(ctx, a, a->ne[0], a->ne[1], 1, a->ne[2] * a->ne[3]); + struct ggml_tensor * im2col = ggml_im2col(ctx, new_a, + ggml_reshape_4d(ctx, b, b->ne[0], b->ne[1], 1, b->ne[2] * b->ne[3]), + s0, s1, p0, p1, d0, d1, true, GGML_TYPE_F16); // [N * IC, OH, OW, KH * KW] + struct ggml_tensor * new_b = ggml_reshape_4d(ctx, im2col, im2col->ne[0], im2col->ne[2] * im2col->ne[1], b->ne[2], b->ne[3]); // [N * IC, OH, OW, KH * KW] => [N, IC, OH * OW, KH * KW] + + new_a = ggml_reshape_4d(ctx, new_a, (new_a->ne[0] * new_a->ne[1]), new_a->ne[2], new_a->ne[3], 1); // [OC,1, KH, KW] => [1, OC, 1, KH * KW] + struct ggml_tensor * result = ggml_mul_mat(ctx, new_a, new_b); + result = ggml_reshape_4d(ctx, result, im2col->ne[1], im2col->ne[2], b->ne[2], b->ne[3]); // [N, OC, OH, OW] + + return result; +} + // ggml_conv_transpose_2d_p0 static int64_t ggml_calc_conv_transpose_output_size(int64_t ins, int64_t ks, int s, int p) { @@ -4087,6 +4183,37 @@ struct ggml_tensor * ggml_pad( return result; } +// ggml_pad_reflect_1d + +struct ggml_tensor * ggml_pad_reflect_1d( + struct ggml_context * ctx, + struct ggml_tensor * a, + int p0, + int p1) { + GGML_ASSERT(p0 >= 0); + GGML_ASSERT(p1 >= 0); + + GGML_ASSERT(p0 < a->ne[0]); // padding length on each size must be less than the + GGML_ASSERT(p1 < a->ne[0]); // existing length of the dimension being padded + + GGML_ASSERT(ggml_is_contiguous(a)); + GGML_ASSERT(a->type == GGML_TYPE_F32); + + struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, + a->ne[0] + p0 + p1, + a->ne[1], + a->ne[2], + a->ne[3]); + + int32_t params[] = { p0, p1 }; + ggml_set_op_params(result, params, sizeof(params)); + + result->op = GGML_OP_PAD_REFLECT_1D; + result->src[0] = a; + + return result; +} + // ggml_arange struct ggml_tensor * ggml_arange( @@ -4138,6 +4265,7 @@ struct ggml_tensor * ggml_argsort( struct ggml_context * ctx, struct ggml_tensor * a, enum ggml_sort_order order) { + GGML_ASSERT(a->ne[0] <= INT32_MAX); struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_I32, GGML_MAX_DIMS, a->ne); ggml_set_op_params_i32(result, 0, (int32_t) order); @@ -5019,8 +5147,10 @@ static void ggml_hash_map_free(struct hash_map * map) { } // utility functions to change gradients -// if a is in acc_table, modify gradients in-place and mark result as gradient accumulator -// else if a is in zero_table, replace a +// isrc is the index of tensor in cgraph->visited_has_set.keys +// the corresponding gradient (accumulators) are also at position isrc +// if tensor has a gradient accumulator, modify that accumulator in-place +// else if there is no gradient for tensor, set the corresponding value // else, just add/subtract/etc. the gradients static void ggml_add_or_set( @@ -5028,11 +5158,14 @@ static void ggml_add_or_set( struct ggml_cgraph * cgraph, size_t isrc, struct ggml_tensor * tensor) { + struct ggml_tensor * src = cgraph->visited_hash_set.keys[isrc]; + GGML_ASSERT(src); if (cgraph->grads[isrc]) { - cgraph->grads[isrc] = ggml_add_impl(ctx, cgraph->grads[isrc], tensor, cgraph->grad_accs[isrc]); + cgraph->grads[isrc] = ggml_add_impl(ctx, cgraph->grads[isrc], tensor, /*inplace =*/ cgraph->grad_accs[isrc]); } else { cgraph->grads[isrc] = tensor; } + ggml_format_name(cgraph->grads[isrc], "grad for %s", src->name); ggml_build_forward_expand(cgraph, cgraph->grads[isrc]); } @@ -5040,18 +5173,20 @@ static void ggml_acc_or_set( struct ggml_context * ctx, struct ggml_cgraph * cgraph, size_t isrc, - struct ggml_tensor * src, struct ggml_tensor * tensor, const size_t nb1, const size_t nb2, const size_t nb3, const size_t offset) { + struct ggml_tensor * src = cgraph->visited_hash_set.keys[isrc]; + GGML_ASSERT(src); if (cgraph->grads[isrc]) { cgraph->grads[isrc] = ggml_acc_impl(ctx, cgraph->grads[isrc], tensor, nb1, nb2, nb3, offset, cgraph->grad_accs[isrc]); } else { struct ggml_tensor * a_zero = ggml_scale(ctx, src, 0.0f); // FIXME this is going to produce NaN if a contains inf/NaN cgraph->grads[isrc] = ggml_acc_impl(ctx, a_zero, tensor, nb1, nb2, nb3, offset, false); } + ggml_format_name(cgraph->grads[isrc], "grad for %s", cgraph->visited_hash_set.keys[isrc]->name); ggml_build_forward_expand(cgraph, cgraph->grads[isrc]); } @@ -5059,13 +5194,15 @@ static void ggml_add1_or_set( struct ggml_context * ctx, struct ggml_cgraph * cgraph, size_t isrc, - struct ggml_tensor * src, struct ggml_tensor * tensor) { + struct ggml_tensor * src = cgraph->visited_hash_set.keys[isrc]; + GGML_ASSERT(src); if (cgraph->grads[isrc]) { cgraph->grads[isrc] = ggml_add1_impl(ctx, cgraph->grads[isrc], tensor, cgraph->grad_accs[isrc]); } else { cgraph->grads[isrc] = ggml_repeat(ctx, tensor, src); } + ggml_format_name(cgraph->grads[isrc], "grad for %s", src->name); ggml_build_forward_expand(cgraph, cgraph->grads[isrc]); } @@ -5074,11 +5211,14 @@ static void ggml_sub_or_set( struct ggml_cgraph * cgraph, size_t isrc, struct ggml_tensor * tensor) { + struct ggml_tensor * src = cgraph->visited_hash_set.keys[isrc]; + GGML_ASSERT(src); if (cgraph->grads[isrc]) { cgraph->grads[isrc] = ggml_sub_impl(ctx, cgraph->grads[isrc], tensor, cgraph->grad_accs[isrc]); } else { cgraph->grads[isrc] = ggml_neg(ctx, tensor); } + ggml_format_name(cgraph->grads[isrc], "grad for %s", src->name); ggml_build_forward_expand(cgraph, cgraph->grads[isrc]); } @@ -5095,12 +5235,12 @@ static void ggml_compute_backward( struct ggml_tensor * src1 = tensor->src[1]; struct ggml_tensor * src2 = tensor->src[2]; struct ggml_hash_set * hash_set = &cgraph->visited_hash_set; - const size_t isrc0 = ggml_hash_find(hash_set, src0); - const size_t isrc1 = ggml_hash_find(hash_set, src1); - const size_t isrc2 = ggml_hash_find(hash_set, src2); - const bool src0_needs_grads = isrc0 != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, isrc0) && grads_needed[isrc0]; - const bool src1_needs_grads = isrc1 != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, isrc1) && grads_needed[isrc1]; - const bool src2_needs_grads = isrc2 != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, isrc2) && grads_needed[isrc2]; + const size_t isrc0 = src0 ? ggml_hash_find(hash_set, src0) : (size_t) -1; + const size_t isrc1 = src1 ? ggml_hash_find(hash_set, src1) : (size_t) -1; + const size_t isrc2 = src2 ? ggml_hash_find(hash_set, src2) : (size_t) -1; + const bool src0_needs_grads = src0 && isrc0 != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, isrc0) && grads_needed[isrc0]; + const bool src1_needs_grads = src1 && isrc1 != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, isrc1) && grads_needed[isrc1]; + const bool src2_needs_grads = src2 && isrc2 != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, isrc2) && grads_needed[isrc2]; switch (tensor->op) { case GGML_OP_DUP: { @@ -5200,7 +5340,7 @@ static void ggml_compute_backward( } break; case GGML_OP_SUM: { if (src0_needs_grads) { - ggml_add1_or_set(ctx, cgraph, isrc0, src0, grad); + ggml_add1_or_set(ctx, cgraph, isrc0, grad); } } break; case GGML_OP_SUM_ROWS: { @@ -5210,7 +5350,7 @@ static void ggml_compute_backward( } break; case GGML_OP_MEAN: { if (src0_needs_grads) { - ggml_add1_or_set(ctx, cgraph, isrc0, src0, ggml_scale_impl(ctx, grad, 1.0f/src0->ne[0], false)); + ggml_add1_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, 1.0f/src0->ne[0], false)); } } break; case GGML_OP_REPEAT: { @@ -5363,7 +5503,7 @@ static void ggml_compute_backward( nb3 = (nb3 / n0) * ng; } - ggml_acc_or_set(ctx, cgraph, isrc0, src0, grad, nb1, nb2, nb3, offset); + ggml_acc_or_set(ctx, cgraph, isrc0, grad, nb1, nb2, nb3, offset); } } break; case GGML_OP_PERMUTE: { @@ -5597,10 +5737,9 @@ void ggml_build_backward_expand( const int n_nodes_f = cgraph->n_nodes; - const size_t hash_size = ggml_hash_size(2*cgraph->size); - memset(cgraph->grads, 0, hash_size*sizeof(struct ggml_tensor *)); - memset(cgraph->grad_accs, 0, hash_size*sizeof(struct ggml_tensor *)); - bool * grads_needed = calloc(hash_size, sizeof(bool)); + memset(cgraph->grads, 0, cgraph->visited_hash_set.size*sizeof(struct ggml_tensor *)); + memset(cgraph->grad_accs, 0, cgraph->visited_hash_set.size*sizeof(struct ggml_tensor *)); + bool * grads_needed = calloc(cgraph->visited_hash_set.size, sizeof(bool)); { bool any_params = false; @@ -5621,7 +5760,7 @@ void ggml_build_backward_expand( continue; } - bool node_needs_grad = node->flags & GGML_TENSOR_FLAG_PARAM; + bool node_needs_grad = (node->flags & GGML_TENSOR_FLAG_PARAM) || (node->flags & GGML_TENSOR_FLAG_LOSS); bool ignore_src[GGML_MAX_SRC] = {false}; switch (node->op) { // gradients in node->src[0] for one reason or another have no effect on output gradients @@ -5638,7 +5777,7 @@ void ggml_build_backward_expand( } break; // gradients in node->src[1] for one reason or another have no effect on output gradients - case GGML_OP_CPY: // gradients in CPY target are irrelevant + case GGML_OP_CPY: // gradients in CPY target are irrelevant case GGML_OP_GET_ROWS: // row indices not differentiable case GGML_OP_GET_ROWS_BACK: // same as for GET_ROWS case GGML_OP_ROPE: // positions not differentiable @@ -5665,9 +5804,12 @@ void ggml_build_backward_expand( node->op == GGML_OP_RESHAPE || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_TRANSPOSE); const size_t igrad = ggml_hash_find(&cgraph->visited_hash_set, node); + GGML_ASSERT(igrad != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(cgraph->visited_hash_set.used, igrad)); if ((accumulate && (node->flags & GGML_TENSOR_FLAG_PARAM)) || (node->flags & GGML_TENSOR_FLAG_LOSS)) { - cgraph->grads[igrad] = ggml_dup_tensor(ctx_static, node); - cgraph->grad_accs[igrad] = cgraph->grads[igrad]; + cgraph->grad_accs[igrad] = ggml_dup_tensor(ctx_static, node); + cgraph->grads[igrad] = cgraph->grad_accs[igrad]; + ggml_format_name(cgraph->grad_accs[igrad], "grad acc for %s", node->name); } grads_needed[igrad] = true; } @@ -5761,15 +5903,15 @@ struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) { struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph0, int i0, int i1) { struct ggml_cgraph cgraph = { - /*.size =*/ 0, - /*.n_nodes =*/ i1 - i0, - /*.n_leafs =*/ 0, - /*.nodes =*/ cgraph0->nodes + i0, - /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL, - /*.grad_accs =*/ cgraph0->grad_accs ? cgraph0->grad_accs + i0 : NULL, - /*.leafs =*/ NULL, - /*.hash_table =*/ { 0, NULL, NULL }, - /*.order =*/ cgraph0->order, + /*.size =*/ 0, + /*.n_nodes =*/ i1 - i0, + /*.n_leafs =*/ 0, + /*.nodes =*/ cgraph0->nodes + i0, + /*.grads =*/ NULL, // gradients would need visited_hash_set + /*.grad_accs =*/ NULL, + /*.leafs =*/ NULL, + /*.visited_hash_set =*/ { 0, NULL, NULL }, + /*.order =*/ cgraph0->order, }; return cgraph; @@ -5799,12 +5941,22 @@ void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) { } } + if (dst->grads) { + memset(dst->grads, 0, dst->visited_hash_set.size*sizeof(struct ggml_tensor *)); + memset(dst->grad_accs, 0, dst->visited_hash_set.size*sizeof(struct ggml_tensor *)); + } if (src->grads) { GGML_ASSERT(dst->grads != NULL); GGML_ASSERT(dst->grad_accs != NULL); for (int i = 0; i < src->n_nodes; ++i) { const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]); const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]); + + GGML_ASSERT(igrad_src != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src)); + GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst)); + dst->grads[igrad_dst] = src->grads[igrad_src]; dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src]; } @@ -5839,12 +5991,8 @@ void ggml_graph_reset(struct ggml_cgraph * cgraph) { if (node->op == GGML_OP_OPT_STEP_ADAMW) { // clear momenta - if (node->src[2]->data) { - ggml_set_zero(node->src[2]); - } - if (node->src[3]->data) { - ggml_set_zero(node->src[3]); - } + ggml_set_zero(node->src[2]); + ggml_set_zero(node->src[3]); } // initial gradients of loss should be 1, 0 otherwise @@ -5923,12 +6071,12 @@ struct ggml_tensor * ggml_graph_get_tensor(const struct ggml_cgraph * cgraph, co struct ggml_tensor * ggml_graph_get_grad(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { const size_t igrad = ggml_hash_find(&cgraph->visited_hash_set, node); - return igrad != GGML_HASHSET_FULL && ggml_bitset_get(cgraph->visited_hash_set.used, igrad) ? cgraph->grads[igrad] : NULL; + return igrad != GGML_HASHSET_FULL && ggml_bitset_get(cgraph->visited_hash_set.used, igrad) && cgraph->grads ? cgraph->grads[igrad] : NULL; } struct ggml_tensor * ggml_graph_get_grad_acc(const struct ggml_cgraph * cgraph, const struct ggml_tensor * node) { const size_t igrad = ggml_hash_find(&cgraph->visited_hash_set, node); - return igrad != GGML_HASHSET_FULL && ggml_bitset_get(cgraph->visited_hash_set.used, igrad) ? cgraph->grad_accs[igrad] : NULL; + return igrad != GGML_HASHSET_FULL && ggml_bitset_get(cgraph->visited_hash_set.used, igrad) && cgraph->grad_accs ? cgraph->grad_accs[igrad] : NULL; } void ggml_graph_print(const struct ggml_cgraph * cgraph) { @@ -6240,9 +6388,6 @@ size_t ggml_quantize_chunk( case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_Q4_0_4_4: result = quantize_q4_0_4x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_Q4_0_4_8: result = quantize_q4_0_4x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; - case GGML_TYPE_Q4_0_8_8: result = quantize_q4_0_8x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_F16: { size_t elemsize = sizeof(ggml_fp16_t); @@ -6378,7 +6523,7 @@ struct gguf_context { void * data; }; -static size_t gguf_type_size(enum gguf_type type) { +size_t gguf_type_size(enum gguf_type type) { GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT); return GGUF_TYPE_SIZE[type]; } @@ -6506,13 +6651,7 @@ struct gguf_context * gguf_init_empty(void) { return ctx; } -struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { - FILE * file = ggml_fopen(fname, "rb"); - if (!file) { - fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno)); - return NULL; - } - +struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params) { // offset from start of file size_t offset = 0; @@ -6525,7 +6664,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p for (uint32_t i = 0; i < sizeof(magic); i++) { if (magic[i] != GGUF_MAGIC[i]) { fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]); - fclose(file); return NULL; } } @@ -6536,7 +6674,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p struct gguf_context * ctx = calloc(1, sizeof(struct gguf_context)); if (!ctx) { fprintf(stderr, "%s: failed to allocate memory for context\n", __func__); - fclose(file); return NULL; } @@ -6554,7 +6691,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (ctx->header.version == 1) { fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6567,7 +6703,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read header\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6577,12 +6712,13 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p { const uint64_t n_kv = ctx->header.n_kv; - ctx->kv = calloc(n_kv, sizeof(struct gguf_kv)); - if (!ctx->kv) { - fprintf(stderr, "%s: failed to allocate memory for kv pairs\n", __func__); - fclose(file); - gguf_free(ctx); - return NULL; + if (n_kv > 0) { + ctx->kv = calloc(n_kv, sizeof(struct gguf_kv)); + if (!ctx->kv) { + fprintf(stderr, "%s: failed to allocate memory for kv pairs\n", __func__); + gguf_free(ctx); + return NULL; + } } for (uint64_t i = 0; i < n_kv; ++i) { @@ -6629,7 +6765,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // prevent from integer overflow in the malloc below if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) { fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n); - fclose(file); gguf_free(ctx); return NULL; } @@ -6637,7 +6772,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p kv->value.arr.data = calloc(kv->value.arr.n, gguf_type_size(kv->value.arr.type)); if (!kv->value.arr.data) { fprintf(stderr, "%s: failed to allocate memory for array\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6649,7 +6783,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // prevent from integer overflow in the malloc below if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) { fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n); - fclose(file); gguf_free(ctx); return NULL; } @@ -6657,7 +6790,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p kv->value.arr.data = calloc(kv->value.arr.n, sizeof(struct gguf_str)); if (!kv->value.arr.data) { fprintf(stderr, "%s: failed to allocate memory for array\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6688,7 +6820,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read key-value pairs\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6699,7 +6830,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ctx->infos = calloc(ctx->header.n_tensors, sizeof(struct gguf_tensor_info)); if (!ctx->infos) { fprintf(stderr, "%s: failed to allocate memory for tensor infos\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6735,7 +6865,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read tensor info\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6774,10 +6903,17 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p (int64_t) info->ne[2] * (int64_t) info->ne[3]; - if (ggml_blck_size(info->type) == 0 || ne % ggml_blck_size(info->type) != 0) { + if (ggml_blck_size(info->type) == 0 ) { + // this tensor type support have been removed: + fprintf(stderr, "%s: tensor '%s' of type %d: %s\n", + __func__, info->name.data, (int) info->type, ggml_type_name(info->type)); + gguf_free(ctx); + return NULL; + } + + if (ne % ggml_blck_size(info->type) != 0) { fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n", __func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type)); - fclose(file); gguf_free(ctx); return NULL; } @@ -6809,7 +6945,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p *params.ctx = ggml_init(pdata); if (*params.ctx == NULL) { fprintf(stderr, "%s: failed to initialize context\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6828,7 +6963,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read tensor data\n", __func__); - fclose(file); ggml_free(ctx_data); gguf_free(ctx); return NULL; @@ -6867,7 +7001,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read the tensor data\n", __func__); - fclose(file); ggml_free(ctx_data); gguf_free(ctx); return NULL; @@ -6876,11 +7009,21 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ggml_set_no_alloc(ctx_data, params.no_alloc); } - fclose(file); - return ctx; } +struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { + FILE * file = ggml_fopen(fname, "rb"); + if (!file) { + fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno)); + return NULL; + } + + struct gguf_context * result = gguf_init_from_file_impl(file, params); + fclose(file); + return result; +} + void gguf_free(struct gguf_context * ctx) { if (ctx == NULL) { return; @@ -7340,13 +7483,7 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo // fwrite(val, sizeof(char), size, file); //} -struct gguf_buf { - void * data; - size_t size; - size_t offset; -}; - -static struct gguf_buf gguf_buf_init(size_t size) { +struct gguf_buf gguf_buf_init(size_t size) { struct gguf_buf buf = { /*buf.data =*/ size == 0 ? NULL : GGML_CALLOC(1, size), /*buf.size =*/ size, @@ -7356,7 +7493,7 @@ static struct gguf_buf gguf_buf_init(size_t size) { return buf; } -static void gguf_buf_free(struct gguf_buf buf) { +void gguf_buf_free(struct gguf_buf buf) { if (buf.data) { GGML_FREE(buf.data); } @@ -7394,7 +7531,7 @@ static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_si buf->offset += el_size; } -static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { +void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { // write header gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic)); gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version)); @@ -7549,3 +7686,26 @@ void ggml_log_set(ggml_log_callback log_callback, void * user_data) { g_logger_state.log_callback = log_callback ? log_callback : ggml_log_callback_default; g_logger_state.log_callback_user_data = user_data; } + +void ggml_threadpool_params_init(struct ggml_threadpool_params * p, int n_threads) { + p->n_threads = n_threads; + p->prio = 0; // default priority (usually means normal or inherited) + p->poll = 50; // hybrid-polling enabled + p->strict_cpu = false; // no strict placement (all threads share same cpumask) + p->paused = false; // threads are ready to go + memset(p->cpumask, 0, GGML_MAX_N_THREADS); // all-zero means use the default affinity (usually inherited) +} + +struct ggml_threadpool_params ggml_threadpool_params_default(int n_threads) { + struct ggml_threadpool_params p; + ggml_threadpool_params_init(&p, n_threads); + return p; +} + +bool ggml_threadpool_params_match(const struct ggml_threadpool_params * p0, const struct ggml_threadpool_params * p1) { + if (p0->n_threads != p1->n_threads ) return false; + if (p0->prio != p1->prio ) return false; + if (p0->poll != p1->poll ) return false; + if (p0->strict_cpu != p1->strict_cpu ) return false; + return memcmp(p0->cpumask, p1->cpumask, GGML_MAX_N_THREADS) == 0; +} diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index bc2b649d1a83d..a40df974d1fca 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -90,6 +90,7 @@ class LLM: VOCAB_SIZE = "{arch}.vocab_size" CONTEXT_LENGTH = "{arch}.context_length" EMBEDDING_LENGTH = "{arch}.embedding_length" + FEATURES_LENGTH = "{arch}.features_length" BLOCK_COUNT = "{arch}.block_count" LEADING_DENSE_BLOCK_COUNT = "{arch}.leading_dense_block_count" FEED_FORWARD_LENGTH = "{arch}.feed_forward_length" @@ -122,6 +123,8 @@ class Attention: VALUE_LENGTH = "{arch}.attention.value_length" LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" + GROUPNORM_EPS = "{arch}.attention.group_norm_epsilon" + GROUPNORM_GROUPS = "{arch}.attention.group_norm_groups" CAUSAL = "{arch}.attention.causal" Q_LORA_RANK = "{arch}.attention.q_lora_rank" KV_LORA_RANK = "{arch}.attention.kv_lora_rank" @@ -131,6 +134,7 @@ class Attention: class Rope: DIMENSION_COUNT = "{arch}.rope.dimension_count" + DIMENSION_SECTIONS = "{arch}.rope.dimension_sections" FREQ_BASE = "{arch}.rope.freq_base" SCALING_TYPE = "{arch}.rope.scaling.type" SCALING_FACTOR = "{arch}.rope.scaling.factor" @@ -154,6 +158,14 @@ class SSM: class WKV: HEAD_SIZE = "{arch}.wkv.head_size" + class PosNet: + EMBEDDING_LENGTH = "{arch}.posnet.embedding_length" + BLOCK_COUNT = "{arch}.posnet.block_count" + + class ConvNext: + EMBEDDING_LENGTH = "{arch}.convnext.embedding_length" + BLOCK_COUNT = "{arch}.convnext.block_count" + class Tokenizer: MODEL = "tokenizer.ggml.model" PRE = "tokenizer.ggml.pre" @@ -208,55 +220,59 @@ class GGUFType: class MODEL_ARCH(IntEnum): - LLAMA = auto() - FALCON = auto() - BAICHUAN = auto() - GROK = auto() - GPT2 = auto() - GPTJ = auto() - GPTNEOX = auto() - MPT = auto() - STARCODER = auto() - REFACT = auto() - BERT = auto() - NOMIC_BERT = auto() - JINA_BERT_V2 = auto() - BLOOM = auto() - STABLELM = auto() - QWEN = auto() - QWEN2 = auto() - QWEN2MOE = auto() - PHI2 = auto() - PHI3 = auto() - PLAMO = auto() - CODESHELL = auto() - ORION = auto() - INTERNLM2 = auto() - MINICPM = auto() - MINICPM3 = auto() - GEMMA = auto() - GEMMA2 = auto() - STARCODER2 = auto() - RWKV6 = auto() - MAMBA = auto() - XVERSE = auto() - COMMAND_R = auto() - DBRX = auto() - OLMO = auto() - OLMOE = auto() - OPENELM = auto() - ARCTIC = auto() - DEEPSEEK2 = auto() - CHATGLM = auto() - BITNET = auto() - T5 = auto() - T5ENCODER = auto() - JAIS = auto() - NEMOTRON = auto() - EXAONE = auto() - GRANITE = auto() - GRANITE_MOE = auto() - CHAMELEON = auto() + LLAMA = auto() + FALCON = auto() + BAICHUAN = auto() + GROK = auto() + GPT2 = auto() + GPTJ = auto() + GPTNEOX = auto() + MPT = auto() + STARCODER = auto() + REFACT = auto() + BERT = auto() + NOMIC_BERT = auto() + JINA_BERT_V2 = auto() + BLOOM = auto() + STABLELM = auto() + QWEN = auto() + QWEN2 = auto() + QWEN2MOE = auto() + QWEN2VL = auto() + PHI2 = auto() + PHI3 = auto() + PLAMO = auto() + CODESHELL = auto() + ORION = auto() + INTERNLM2 = auto() + MINICPM = auto() + MINICPM3 = auto() + GEMMA = auto() + GEMMA2 = auto() + STARCODER2 = auto() + RWKV6 = auto() + MAMBA = auto() + XVERSE = auto() + COMMAND_R = auto() + DBRX = auto() + OLMO = auto() + OLMO2 = auto() + OLMOE = auto() + OPENELM = auto() + ARCTIC = auto() + DEEPSEEK = auto() + DEEPSEEK2 = auto() + CHATGLM = auto() + BITNET = auto() + T5 = auto() + T5ENCODER = auto() + JAIS = auto() + NEMOTRON = auto() + EXAONE = auto() + GRANITE = auto() + GRANITE_MOE = auto() + CHAMELEON = auto() + WAVTOKENIZER_DEC = auto() class MODEL_TENSOR(IntEnum): @@ -366,58 +382,78 @@ class MODEL_TENSOR(IntEnum): ENC_OUTPUT_NORM = auto() CLS = auto() # classifier CLS_OUT = auto() # classifier output projection + CONV1D = auto() + CONVNEXT_DW = auto() + CONVNEXT_NORM = auto() + CONVNEXT_PW1 = auto() + CONVNEXT_PW2 = auto() + CONVNEXT_GAMMA = auto() + POSNET_CONV1 = auto() + POSNET_CONV2 = auto() + POSNET_NORM = auto() + POSNET_NORM1 = auto() + POSNET_NORM2 = auto() + POSNET_ATTN_NORM = auto() + POSNET_ATTN_Q = auto() + POSNET_ATTN_K = auto() + POSNET_ATTN_V = auto() + POSNET_ATTN_OUT = auto() MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { - MODEL_ARCH.LLAMA: "llama", - MODEL_ARCH.FALCON: "falcon", - MODEL_ARCH.BAICHUAN: "baichuan", - MODEL_ARCH.GROK: "grok", - MODEL_ARCH.GPT2: "gpt2", - MODEL_ARCH.GPTJ: "gptj", - MODEL_ARCH.GPTNEOX: "gptneox", - MODEL_ARCH.MPT: "mpt", - MODEL_ARCH.STARCODER: "starcoder", - MODEL_ARCH.REFACT: "refact", - MODEL_ARCH.BERT: "bert", - MODEL_ARCH.NOMIC_BERT: "nomic-bert", - MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2", - MODEL_ARCH.BLOOM: "bloom", - MODEL_ARCH.STABLELM: "stablelm", - MODEL_ARCH.QWEN: "qwen", - MODEL_ARCH.QWEN2: "qwen2", - MODEL_ARCH.QWEN2MOE: "qwen2moe", - MODEL_ARCH.PHI2: "phi2", - MODEL_ARCH.PHI3: "phi3", - MODEL_ARCH.PLAMO: "plamo", - MODEL_ARCH.CODESHELL: "codeshell", - MODEL_ARCH.ORION: "orion", - MODEL_ARCH.INTERNLM2: "internlm2", - MODEL_ARCH.MINICPM: "minicpm", - MODEL_ARCH.MINICPM3: "minicpm3", - MODEL_ARCH.GEMMA: "gemma", - MODEL_ARCH.GEMMA2: "gemma2", - MODEL_ARCH.STARCODER2: "starcoder2", - MODEL_ARCH.RWKV6: "rwkv6", - MODEL_ARCH.MAMBA: "mamba", - MODEL_ARCH.XVERSE: "xverse", - MODEL_ARCH.COMMAND_R: "command-r", - MODEL_ARCH.DBRX: "dbrx", - MODEL_ARCH.OLMO: "olmo", - MODEL_ARCH.OLMOE: "olmoe", - MODEL_ARCH.OPENELM: "openelm", - MODEL_ARCH.ARCTIC: "arctic", - MODEL_ARCH.DEEPSEEK2: "deepseek2", - MODEL_ARCH.CHATGLM: "chatglm", - MODEL_ARCH.BITNET: "bitnet", - MODEL_ARCH.T5: "t5", - MODEL_ARCH.T5ENCODER: "t5encoder", - MODEL_ARCH.JAIS: "jais", - MODEL_ARCH.NEMOTRON: "nemotron", - MODEL_ARCH.EXAONE: "exaone", - MODEL_ARCH.GRANITE: "granite", - MODEL_ARCH.GRANITE_MOE: "granitemoe", - MODEL_ARCH.CHAMELEON: "chameleon", + MODEL_ARCH.LLAMA: "llama", + MODEL_ARCH.FALCON: "falcon", + MODEL_ARCH.BAICHUAN: "baichuan", + MODEL_ARCH.GROK: "grok", + MODEL_ARCH.GPT2: "gpt2", + MODEL_ARCH.GPTJ: "gptj", + MODEL_ARCH.GPTNEOX: "gptneox", + MODEL_ARCH.MPT: "mpt", + MODEL_ARCH.STARCODER: "starcoder", + MODEL_ARCH.REFACT: "refact", + MODEL_ARCH.BERT: "bert", + MODEL_ARCH.NOMIC_BERT: "nomic-bert", + MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2", + MODEL_ARCH.BLOOM: "bloom", + MODEL_ARCH.STABLELM: "stablelm", + MODEL_ARCH.QWEN: "qwen", + MODEL_ARCH.QWEN2: "qwen2", + MODEL_ARCH.QWEN2MOE: "qwen2moe", + MODEL_ARCH.QWEN2VL: "qwen2vl", + MODEL_ARCH.PHI2: "phi2", + MODEL_ARCH.PHI3: "phi3", + MODEL_ARCH.PLAMO: "plamo", + MODEL_ARCH.CODESHELL: "codeshell", + MODEL_ARCH.ORION: "orion", + MODEL_ARCH.INTERNLM2: "internlm2", + MODEL_ARCH.MINICPM: "minicpm", + MODEL_ARCH.MINICPM3: "minicpm3", + MODEL_ARCH.GEMMA: "gemma", + MODEL_ARCH.GEMMA2: "gemma2", + MODEL_ARCH.STARCODER2: "starcoder2", + MODEL_ARCH.RWKV6: "rwkv6", + MODEL_ARCH.MAMBA: "mamba", + MODEL_ARCH.XVERSE: "xverse", + MODEL_ARCH.COMMAND_R: "command-r", + MODEL_ARCH.DBRX: "dbrx", + MODEL_ARCH.OLMO: "olmo", + MODEL_ARCH.OLMO2: "olmo2", + MODEL_ARCH.OLMOE: "olmoe", + MODEL_ARCH.OPENELM: "openelm", + MODEL_ARCH.ARCTIC: "arctic", + MODEL_ARCH.DEEPSEEK: "deepseek", + MODEL_ARCH.DEEPSEEK2: "deepseek2", + MODEL_ARCH.CHATGLM: "chatglm", + MODEL_ARCH.BITNET: "bitnet", + MODEL_ARCH.T5: "t5", + MODEL_ARCH.T5ENCODER: "t5encoder", + MODEL_ARCH.JAIS: "jais", + MODEL_ARCH.NEMOTRON: "nemotron", + MODEL_ARCH.EXAONE: "exaone", + MODEL_ARCH.GRANITE: "granite", + MODEL_ARCH.GRANITE_MOE: "granitemoe", + MODEL_ARCH.CHAMELEON: "chameleon", + MODEL_ARCH.WAVTOKENIZER_DEC: "wavtokenizer-dec", } TENSOR_NAMES: dict[MODEL_TENSOR, str] = { @@ -527,6 +563,22 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.ENC_OUTPUT_NORM: "enc.output_norm", MODEL_TENSOR.CLS: "cls", MODEL_TENSOR.CLS_OUT: "cls.output", + MODEL_TENSOR.CONV1D: "conv1d", + MODEL_TENSOR.CONVNEXT_DW: "convnext.{bid}.dw", + MODEL_TENSOR.CONVNEXT_NORM: "convnext.{bid}.norm", + MODEL_TENSOR.CONVNEXT_PW1: "convnext.{bid}.pw1", + MODEL_TENSOR.CONVNEXT_PW2: "convnext.{bid}.pw2", + MODEL_TENSOR.CONVNEXT_GAMMA: "convnext.{bid}.gamma", + MODEL_TENSOR.POSNET_CONV1: "posnet.{bid}.conv1", + MODEL_TENSOR.POSNET_CONV2: "posnet.{bid}.conv2", + MODEL_TENSOR.POSNET_NORM: "posnet.{bid}.norm", + MODEL_TENSOR.POSNET_NORM1: "posnet.{bid}.norm1", + MODEL_TENSOR.POSNET_NORM2: "posnet.{bid}.norm2", + MODEL_TENSOR.POSNET_ATTN_NORM: "posnet.{bid}.attn_norm", + MODEL_TENSOR.POSNET_ATTN_Q: "posnet.{bid}.attn_q", + MODEL_TENSOR.POSNET_ATTN_K: "posnet.{bid}.attn_k", + MODEL_TENSOR.POSNET_ATTN_V: "posnet.{bid}.attn_v", + MODEL_TENSOR.POSNET_ATTN_OUT: "posnet.{bid}.attn_output", } MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { @@ -756,6 +808,21 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_UP, ], MODEL_ARCH.QWEN2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], + MODEL_ARCH.QWEN2VL: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.OUTPUT, @@ -894,6 +961,8 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.OUTPUT, MODEL_TENSOR.OUTPUT_NORM, MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ROPE_FACTORS_LONG, + MODEL_TENSOR.ROPE_FACTORS_SHORT, MODEL_TENSOR.ATTN_NORM, MODEL_TENSOR.ATTN_Q, MODEL_TENSOR.ATTN_K, @@ -1069,6 +1138,22 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.OLMO2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_POST_NORM, + MODEL_TENSOR.ATTN_Q_NORM, + MODEL_TENSOR.ATTN_K_NORM, + MODEL_TENSOR.FFN_POST_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], MODEL_ARCH.OLMOE: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, @@ -1120,6 +1205,29 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN_EXP, MODEL_TENSOR.FFN_UP_EXP, ], + MODEL_ARCH.DEEPSEEK: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.ATTN_ROT_EMBD, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + MODEL_TENSOR.FFN_GATE_SHEXP, + MODEL_TENSOR.FFN_DOWN_SHEXP, + MODEL_TENSOR.FFN_UP_SHEXP, + ], MODEL_ARCH.DEEPSEEK2: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, @@ -1309,6 +1417,28 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.WAVTOKENIZER_DEC: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.TOKEN_EMBD_NORM, + MODEL_TENSOR.CONV1D, + MODEL_TENSOR.CONVNEXT_DW, + MODEL_TENSOR.CONVNEXT_NORM, + MODEL_TENSOR.CONVNEXT_PW1, + MODEL_TENSOR.CONVNEXT_PW2, + MODEL_TENSOR.CONVNEXT_GAMMA, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.POSNET_CONV1, + MODEL_TENSOR.POSNET_CONV2, + MODEL_TENSOR.POSNET_NORM, + MODEL_TENSOR.POSNET_NORM1, + MODEL_TENSOR.POSNET_NORM2, + MODEL_TENSOR.POSNET_ATTN_NORM, + MODEL_TENSOR.POSNET_ATTN_Q, + MODEL_TENSOR.POSNET_ATTN_K, + MODEL_TENSOR.POSNET_ATTN_V, + MODEL_TENSOR.POSNET_ATTN_OUT, + ], # TODO } @@ -1342,6 +1472,10 @@ class MODEL_TENSOR(IntEnum): MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ATTN_ROT_EMBD, ], + MODEL_ARCH.DEEPSEEK: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], MODEL_ARCH.DEEPSEEK2: [ MODEL_TENSOR.ROPE_FREQS, MODEL_TENSOR.ATTN_ROT_EMBD, @@ -1370,9 +1504,10 @@ class TokenType(IntEnum): class RopeScalingType(Enum): - NONE = 'none' - LINEAR = 'linear' - YARN = 'yarn' + NONE = 'none' + LINEAR = 'linear' + YARN = 'yarn' + LONGROPE = 'longrope' class PoolingType(IntEnum): @@ -1411,9 +1546,6 @@ class GGMLQuantizationType(IntEnum): F64 = 28 IQ1_M = 29 BF16 = 30 - Q4_0_4_4 = 31 - Q4_0_4_8 = 32 - Q4_0_8_8 = 33 TQ1_0 = 34 TQ2_0 = 35 @@ -1457,9 +1589,9 @@ class LlamaFileType(IntEnum): MOSTLY_IQ4_XS = 30 # except 1d tensors MOSTLY_IQ1_M = 31 # except 1d tensors MOSTLY_BF16 = 32 # except 1d tensors - MOSTLY_Q4_0_4_4 = 33 # except 1d tensors - MOSTLY_Q4_0_4_8 = 34 # except 1d tensors - MOSTLY_Q4_0_8_8 = 35 # except 1d tensors + # MOSTLY_Q4_0_4_4 = 33 # removed from gguf files, use Q4_0 and runtime repack + # MOSTLY_Q4_0_4_8 = 34 # removed from gguf files, use Q4_0 and runtime repack + # MOSTLY_Q4_0_8_8 = 35 # removed from gguf files, use Q4_0 and runtime repack MOSTLY_TQ1_0 = 36 # except 1d tensors MOSTLY_TQ2_0 = 37 # except 1d tensors @@ -1535,9 +1667,6 @@ def get_type(val: Any) -> GGUFValueType: GGMLQuantizationType.F64: (1, 8), GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32), GGMLQuantizationType.BF16: (1, 2), - GGMLQuantizationType.Q4_0_4_4:(32, 2 + 16), - GGMLQuantizationType.Q4_0_4_8:(32, 2 + 16), - GGMLQuantizationType.Q4_0_8_8:(32, 2 + 16), GGMLQuantizationType.TQ1_0: (256, 2 + 4 * 13), GGMLQuantizationType.TQ2_0: (256, 2 + 64), } diff --git a/gguf-py/gguf/gguf_reader.py b/gguf-py/gguf/gguf_reader.py index e8e61abf86ae4..e17a4e83147d5 100644 --- a/gguf-py/gguf/gguf_reader.py +++ b/gguf-py/gguf/gguf_reader.py @@ -145,11 +145,10 @@ def _get( count = int(count) itemsize = int(np.empty([], dtype = dtype).itemsize) end_offs = offset + itemsize * count - return ( - self.data[offset:end_offs] - .view(dtype = dtype)[:count] - .newbyteorder(override_order or self.byte_order) - ) + arr = self.data[offset:end_offs].view(dtype=dtype)[:count] + if override_order is None: + return arr + return arr.view(arr.dtype.newbyteorder(override_order)) def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int: if field.name in self.fields: diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 7a55d12965362..3023b539ae82b 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -631,6 +631,21 @@ def add_context_length(self, length: int) -> None: def add_embedding_length(self, length: int) -> None: self.add_uint32(Keys.LLM.EMBEDDING_LENGTH.format(arch=self.arch), length) + def add_features_length(self, length: int) -> None: + self.add_uint32(Keys.LLM.FEATURES_LENGTH.format(arch=self.arch), length) + + def add_posnet_embedding_length(self, length: int) -> None: + self.add_uint32(Keys.PosNet.EMBEDDING_LENGTH.format(arch=self.arch), length) + + def add_posnet_block_count(self, length: int) -> None: + self.add_uint32(Keys.PosNet.BLOCK_COUNT.format(arch=self.arch), length) + + def add_convnext_embedding_length(self, length: int) -> None: + self.add_uint32(Keys.ConvNext.EMBEDDING_LENGTH.format(arch=self.arch), length) + + def add_convnext_block_count(self, length: int) -> None: + self.add_uint32(Keys.ConvNext.BLOCK_COUNT.format(arch=self.arch), length) + def add_block_count(self, length: int) -> None: self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length) @@ -727,6 +742,12 @@ def add_layer_norm_eps(self, value: float) -> None: def add_layer_norm_rms_eps(self, value: float) -> None: self.add_float32(Keys.Attention.LAYERNORM_RMS_EPS.format(arch=self.arch), value) + def add_group_norm_eps(self, value: float) -> None: + self.add_float32(Keys.Attention.GROUPNORM_EPS.format(arch=self.arch), value) + + def add_group_norm_groups(self, value: int) -> None: + self.add_uint32(Keys.Attention.GROUPNORM_GROUPS.format(arch=self.arch), value) + def add_causal_attention(self, value: bool) -> None: self.add_bool(Keys.Attention.CAUSAL.format(arch=self.arch), value) @@ -751,6 +772,9 @@ def add_pooling_type(self, value: PoolingType) -> None: def add_rope_dimension_count(self, count: int) -> None: self.add_uint32(Keys.Rope.DIMENSION_COUNT.format(arch=self.arch), count) + def add_rope_dimension_sections(self, dims: Sequence[int]) -> None: + self.add_array(Keys.Rope.DIMENSION_SECTIONS.format(arch=self.arch), dims) + def add_rope_freq_base(self, value: float) -> None: self.add_float32(Keys.Rope.FREQ_BASE.format(arch=self.arch), value) diff --git a/gguf-py/gguf/metadata.py b/gguf-py/gguf/metadata.py index 321cbcd4c5507..962c27b204464 100644 --- a/gguf-py/gguf/metadata.py +++ b/gguf-py/gguf/metadata.py @@ -545,7 +545,10 @@ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter): gguf_writer.add_size_label(self.size_label) if self.license is not None: - gguf_writer.add_license(self.license) + if isinstance(self.license, list): + gguf_writer.add_license(",".join(self.license)) + else: + gguf_writer.add_license(self.license) if self.license_name is not None: gguf_writer.add_license_name(self.license_name) if self.license_link is not None: diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index f4a787c56993a..82cdb121a1f26 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -13,7 +13,7 @@ class TensorNameMap: "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone "transformer.word_embeddings", # falcon "word_embeddings", # bloom - "model.embed_tokens", # llama-hf nemotron olmoe + "model.embed_tokens", # llama-hf nemotron olmoe olmo2 "tok_embeddings", # llama-pth "embeddings.word_embeddings", # bert nomic-bert "language_model.embedding.word_embeddings", # persimmon @@ -42,6 +42,7 @@ class TensorNameMap: "emb_ln", # nomic-bert "transformer.norm", # openelm "rwkv.blocks.0.pre_ln", # rwkv + "backbone.norm", # wavtokenizer ), # Position embeddings @@ -54,19 +55,20 @@ class TensorNameMap: # Output MODEL_TENSOR.OUTPUT: ( "embed_out", # gptneox - "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe + "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 "output", # llama-pth bloom internlm2 "word_embeddings_for_head", # persimmon "lm_head.linear", # phi2 "output_layer", # chatglm "head", # rwkv + "head.out", # wavtokenizer ), # Output norm MODEL_TENSOR.OUTPUT_NORM: ( "gpt_neox.final_layer_norm", # gptneox "transformer.ln_f", # gpt2 gpt-j falcon jais exaone - "model.norm", # llama-hf baichuan internlm2 olmoe + "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 "norm", # llama-pth "transformer.norm_f", # mpt dbrx "ln_f", # refact bloom qwen gpt2 @@ -80,6 +82,7 @@ class TensorNameMap: "transformer.norm", # openelm "model.norm", # nemotron "rwkv.ln_out", # rwkv + "backbone.final_layer_norm", # wavtokenizer ), # Rope frequencies @@ -90,6 +93,10 @@ class TensorNameMap: MODEL_TENSOR.ROPE_FACTORS_LONG: (), MODEL_TENSOR.ROPE_FACTORS_SHORT: (), + + MODEL_TENSOR.CONV1D: ( + "backbone.embed", # roberta + ), } block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = { @@ -145,7 +152,8 @@ class TensorNameMap: # Attention query MODEL_TENSOR.ATTN_Q: ( - "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe + "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom "layers.{bid}.attention.wq", # llama-pth "encoder.layer.{bid}.attention.self.query", # bert "transformer.h.{bid}.attn.q_proj", # gpt-j @@ -157,7 +165,8 @@ class TensorNameMap: # Attention key MODEL_TENSOR.ATTN_K: ( - "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe + "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom "layers.{bid}.attention.wk", # llama-pth "encoder.layer.{bid}.attention.self.key", # bert "transformer.h.{bid}.attn.k_proj", # gpt-j @@ -170,7 +179,7 @@ class TensorNameMap: # Attention value MODEL_TENSOR.ATTN_V: ( - "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe + "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 "layers.{bid}.attention.wv", # llama-pth "encoder.layer.{bid}.attention.self.value", # bert "transformer.h.{bid}.attn.v_proj", # gpt-j @@ -188,7 +197,7 @@ class TensorNameMap: "transformer.blocks.{bid}.attn.out_proj", # mpt "transformer.h.{bid}.self_attention.dense", # falcon "h.{bid}.self_attention.dense", # bloom - "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe + "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 "layers.{bid}.attention.wo", # llama-pth "encoder.layer.{bid}.attention.output.dense", # bert "transformer.h.{bid}.attn.out_proj", # gpt-j @@ -215,7 +224,7 @@ class TensorNameMap: ), MODEL_TENSOR.ATTN_POST_NORM: ( - "model.layers.{bid}.post_attention_layernorm", # gemma2 + "model.layers.{bid}.post_attention_layernorm", # gemma2 olmo2 ), # Rotary embeddings @@ -250,7 +259,7 @@ class TensorNameMap: # Post feed-forward norm MODEL_TENSOR.FFN_POST_NORM: ( - "model.layers.{bid}.post_feedforward_layernorm", # gemma2 + "model.layers.{bid}.post_feedforward_layernorm", # gemma2 olmo2 ), MODEL_TENSOR.FFN_GATE_INP: ( @@ -273,7 +282,7 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.up_proj", # mpt "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon "h.{bid}.mlp.dense_h_to_4h", # bloom - "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron + "model.layers.{bid}.mlp.up_proj", # llama-hf refact nemotron olmo2 "layers.{bid}.feed_forward.w3", # llama-pth "encoder.layer.{bid}.intermediate.dense", # bert "transformer.h.{bid}.mlp.fc_in", # gpt-j @@ -304,7 +313,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_UP_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe - "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2 + "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek deepseek2 ), # AWQ-activation gate @@ -314,7 +323,7 @@ class TensorNameMap: # Feed-forward gate MODEL_TENSOR.FFN_GATE: ( - "model.layers.{bid}.mlp.gate_proj", # llama-hf refact + "model.layers.{bid}.mlp.gate_proj", # llama-hf refact olmo2 "layers.{bid}.feed_forward.w1", # llama-pth "transformer.h.{bid}.mlp.w2", # qwen "transformer.h.{bid}.mlp.c_fc2", # jais @@ -336,7 +345,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_GATE_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe - "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2 + "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek deepseek2 ), # Feed-forward down @@ -346,7 +355,7 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.down_proj", # mpt "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon "h.{bid}.mlp.dense_4h_to_h", # bloom - "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron + "model.layers.{bid}.mlp.down_proj", # llama-hf nemotron olmo2 "layers.{bid}.feed_forward.w2", # llama-pth "encoder.layer.{bid}.output.dense", # bert "transformer.h.{bid}.mlp.fc_out", # gpt-j @@ -377,13 +386,13 @@ class TensorNameMap: MODEL_TENSOR.FFN_DOWN_SHEXP: ( "model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe - "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2 + "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek deepseek2 ), MODEL_TENSOR.ATTN_Q_NORM: ( "language_model.encoder.layers.{bid}.self_attention.q_layernorm", "model.layers.{bid}.self_attn.q_layernorm", # persimmon - "model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon + "model.layers.{bid}.self_attn.q_norm", # cohere olmoe chameleon olmo2 "transformer.blocks.{bid}.attn.q_ln", # sea-lion "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2 "transformer.layers.{bid}.attn.q_norm", # openelm @@ -392,7 +401,7 @@ class TensorNameMap: MODEL_TENSOR.ATTN_K_NORM: ( "language_model.encoder.layers.{bid}.self_attention.k_layernorm", "model.layers.{bid}.self_attn.k_layernorm", # persimmon - "model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon + "model.layers.{bid}.self_attn.k_norm", # cohere olmoe chameleon olmo2 "transformer.blocks.{bid}.attn.k_ln", # sea-lion "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2 "transformer.layers.{bid}.attn.k_norm", # openelm @@ -679,6 +688,8 @@ class TensorNameMap: "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5 ), + ############################################################################ + # TODO: these do not belong to block_mappings_cfg - move them to mappings_cfg MODEL_TENSOR.ENC_OUTPUT_NORM: ( "encoder.final_layer_norm", # t5 ), @@ -691,6 +702,67 @@ class TensorNameMap: MODEL_TENSOR.CLS_OUT: ( "classifier.out_proj", # roberta ), + ############################################################################# + + MODEL_TENSOR.CONVNEXT_DW: ( + "backbone.convnext.{bid}.dwconv", # wavtokenizer + ), + + MODEL_TENSOR.CONVNEXT_NORM: ( + "backbone.convnext.{bid}.norm", # wavtokenizer + ), + + MODEL_TENSOR.CONVNEXT_PW1: ( + "backbone.convnext.{bid}.pwconv1", # wavtokenizer + ), + + MODEL_TENSOR.CONVNEXT_PW2: ( + "backbone.convnext.{bid}.pwconv2", # wavtokenizer + ), + + MODEL_TENSOR.CONVNEXT_GAMMA: ( + "backbone.convnext.{bid}.gamma", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_CONV1: ( + "backbone.posnet.{bid}.conv1", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_CONV2: ( + "backbone.posnet.{bid}.conv2", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_NORM: ( + "backbone.posnet.{bid}.norm", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_NORM1: ( + "backbone.posnet.{bid}.norm1", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_NORM2: ( + "backbone.posnet.{bid}.norm2", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_ATTN_NORM: ( + "backbone.posnet.{bid}.norm", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_ATTN_Q: ( + "backbone.posnet.{bid}.q", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_ATTN_K: ( + "backbone.posnet.{bid}.k", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_ATTN_V: ( + "backbone.posnet.{bid}.v", # wavtokenizer + ), + + MODEL_TENSOR.POSNET_ATTN_OUT: ( + "backbone.posnet.{bid}.proj_out", # wavtokenizer + ), } # architecture-specific block mappings diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index 33cfe26b7fe30..9c39562560c9e 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.10.0" +version = "0.13.0" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ diff --git a/gguf-py/tests/test_quants.py b/gguf-py/tests/test_quants.py index 762067814224e..f04d5acce2793 100755 --- a/gguf-py/tests/test_quants.py +++ b/gguf-py/tests/test_quants.py @@ -136,7 +136,7 @@ def compare_tensors(t1: np.ndarray, t2: np.ndarray, qtype: GGMLQuantizationType) logger.debug(f"Sample bad block ({diff_bits[bad_block_id]} differing bits):\n{t1[bad_block_id]}\nReference:\n{t2[bad_block_id]}") sum_diff_bits = np.sum(diff_bits) - logger.debug(f"{sum_diff_bits} bits differ ({100 * sum_diff_bits/(x.size * 8):.6f}%)") + logger.debug(f"{sum_diff_bits} bits differ ({100 * sum_diff_bits / (x.size * 8):.6f}%)") return False diff --git a/grammars/README.md b/grammars/README.md index 4e57bca5f300b..9769540919f98 100644 --- a/grammars/README.md +++ b/grammars/README.md @@ -46,7 +46,7 @@ Terminals support the full range of Unicode. Unicode characters can be specified Character ranges can be negated with `^`: ``` -single-line ::= [^\n]+ "\n"` +single-line ::= [^\n]+ "\n" ``` ## Sequences and Alternatives diff --git a/grammars/english.gbnf b/grammars/english.gbnf new file mode 100644 index 0000000000000..2e53686c82151 --- /dev/null +++ b/grammars/english.gbnf @@ -0,0 +1,6 @@ +# note: this might be incomplete, mostly an example +root ::= en-char+ ([ \t\n] en-char+)* +en-char ::= letter | digit | punctuation +letter ::= [a-zA-Z] +digit ::= [0-9] +punctuation ::= [!"#$%&'()*+,-./:;<=>?@[\\\]^_`{|}~] diff --git a/include/llama-cpp.h b/include/llama-cpp.h new file mode 100644 index 0000000000000..daa04d4d84ac2 --- /dev/null +++ b/include/llama-cpp.h @@ -0,0 +1,25 @@ +#pragma once + +#ifndef __cplusplus +#error "This header is for C++ only" +#endif + +#include + +#include "llama.h" + +struct llama_model_deleter { + void operator()(llama_model * model) { llama_free_model(model); } +}; + +struct llama_context_deleter { + void operator()(llama_context * context) { llama_free(context); } +}; + +struct llama_sampler_deleter { + void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } +}; + +typedef std::unique_ptr llama_model_ptr; +typedef std::unique_ptr llama_context_ptr; +typedef std::unique_ptr llama_sampler_ptr; diff --git a/include/llama.h b/include/llama.h index bc268e7996baa..a4abf395bcd93 100644 --- a/include/llama.h +++ b/include/llama.h @@ -104,12 +104,15 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, + LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, }; enum llama_rope_type { - LLAMA_ROPE_TYPE_NONE = -1, - LLAMA_ROPE_TYPE_NORM = 0, - LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, + LLAMA_ROPE_TYPE_NONE = -1, + LLAMA_ROPE_TYPE_NORM = 0, + LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, + LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE, + LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION, }; enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file @@ -171,9 +174,9 @@ extern "C" { LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors + //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack + //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack + //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors @@ -185,7 +188,8 @@ extern "C" { LLAMA_ROPE_SCALING_TYPE_NONE = 0, LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, LLAMA_ROPE_SCALING_TYPE_YARN = 2, - LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN, + LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, + LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE, }; enum llama_pooling_type { @@ -272,6 +276,9 @@ extern "C" { }; struct llama_model_params { + // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) + ggml_backend_dev_t * devices; + int32_t n_gpu_layers; // number of layers to store in VRAM enum llama_split_mode split_mode; // how to split the model across multiple GPUs @@ -451,6 +458,7 @@ extern "C" { // Functions to access the model's GGUF metadata scalar values // - The functions return the length of the string on success, or -1 on failure // - The output string is always null-terminated and cleared on failure + // - When retrieving a string, an extra byte must be allocated to account for the null terminator // - GGUF array values are not supported by these functions // Get metadata value as a string by key name @@ -474,9 +482,6 @@ extern "C" { // Returns the total number of parameters in the model LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); - // Get a llama model tensor - LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name); - // Returns true if the model contains an encoder that requires llama_encode() call LLAMA_API bool llama_model_has_encoder(const struct llama_model * model); @@ -667,6 +672,9 @@ extern "C" { // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); + // Check if the context supports KV cache shifting + LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); + // // State / sessions // @@ -984,6 +992,9 @@ extern "C" { char * buf, int32_t length); + // Get list of built-in chat templates + LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len); + // // Sampling API // @@ -1125,16 +1136,12 @@ extern "C" { const char * grammar_str, const char * grammar_root); + /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. LLAMA_API struct llama_sampler * llama_sampler_init_penalties( - int32_t n_vocab, // llama_n_vocab() - llama_token special_eos_id, // llama_token_eos() - llama_token linefeed_id, // llama_token_nl() - int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) - float penalty_repeat, // 1.0 = disabled - float penalty_freq, // 0.0 = disabled - float penalty_present, // 0.0 = disabled - bool penalize_nl, // consider newlines as a repeatable token - bool ignore_eos); // ignore the end-of-sequence token + int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) + float penalty_repeat, // 1.0 = disabled + float penalty_freq, // 0.0 = disabled + float penalty_present); // 0.0 = disabled /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 LLAMA_API struct llama_sampler * llama_sampler_init_dry( diff --git a/models/ggml-vocab-roberta-bpe.gguf.inp b/models/ggml-vocab-roberta-bpe.gguf.inp new file mode 100644 index 0000000000000..9baf7d77ae6b5 --- /dev/null +++ b/models/ggml-vocab-roberta-bpe.gguf.inp @@ -0,0 +1,112 @@ +ied 4 ½ months +__ggml_vocab_test__ +Führer +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + +__ggml_vocab_test__ + + +__ggml_vocab_test__ + + + +__ggml_vocab_test__ + + + + +__ggml_vocab_test__ + + +__ggml_vocab_test__ +Hello world +__ggml_vocab_test__ + Hello world +__ggml_vocab_test__ +Hello World +__ggml_vocab_test__ + Hello World +__ggml_vocab_test__ + Hello World! +__ggml_vocab_test__ +Hello, world! +__ggml_vocab_test__ + Hello, world! +__ggml_vocab_test__ + this is šŸ¦™.cpp +__ggml_vocab_test__ +w048 7tuijk dsdfhu +__ggml_vocab_test__ +нещо на Š‘ŃŠŠ»Š³Š°Ń€ŃŠŗŠø +__ggml_vocab_test__ +įž€įž¶įž“įŸ‹įžįŸ‚įž–įž·įžŸįŸįžŸįž¢įž¶įž…įžįž›įž…įŸįž‰ +__ggml_vocab_test__ +šŸš€ (normal) šŸ˜¶ā€šŸŒ«ļø (multiple emojis concatenated) āœ… (only emoji that has its own token) +__ggml_vocab_test__ +Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello +__ggml_vocab_test__ + Hello + Hello +__ggml_vocab_test__ + ( +__ggml_vocab_test__ + + = +__ggml_vocab_test__ +' era +__ggml_vocab_test__ +Hello, y'all! How are you 😁 ?ęˆ‘ęƒ³åœØapple巄作1314151å¤©ļ½ž +__ggml_vocab_test__ +!!!!!! +__ggml_vocab_test__ +3 +__ggml_vocab_test__ +33 +__ggml_vocab_test__ +333 +__ggml_vocab_test__ +3333 +__ggml_vocab_test__ +33333 +__ggml_vocab_test__ +333333 +__ggml_vocab_test__ +3333333 +__ggml_vocab_test__ +33333333 +__ggml_vocab_test__ +333333333 +__ggml_vocab_test__ +Cį»­a Việt +__ggml_vocab_test__ + discards +__ggml_vocab_test__ + + + + + + + + + + + +šŸš€ (normal) šŸ˜¶ā€šŸŒ«ļø (multiple emojis concatenated) āœ… šŸ¦™šŸ¦™ 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 įž€įž¶įž“įŸ‹įžįŸ‚įž–įž·įžŸįŸįžŸįž¢įž¶įž…šŸ˜ ?ęˆ‘ęƒ³åœØapple巄作1314151å¤©ļ½ž ------======= нещо на Š‘ŃŠŠ»Š³Š°Ń€ŃŠŗŠø ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL +__ggml_vocab_test__ diff --git a/models/ggml-vocab-roberta-bpe.gguf.out b/models/ggml-vocab-roberta-bpe.gguf.out new file mode 100644 index 0000000000000..f181ac3dcc5bc --- /dev/null +++ b/models/ggml-vocab-roberta-bpe.gguf.out @@ -0,0 +1,46 @@ + 2550 204 18430 377 + 597 2768 298 8564 + + 1437 + 1437 1437 + 1437 1437 1437 + 50117 + 50118 + 50140 + 50140 50118 + 50117 50118 + 31414 232 + 20920 232 + 31414 623 + 20920 623 + 20920 623 328 + 31414 6 232 328 + 20920 6 232 328 + 42 16 8103 18164 27 4 49317 + 605 40976 262 10109 18474 385 29 36807 6455 + 36765 25482 22063 23171 34251 18697 10809 26161 18697 3602 22063 27969 40966 25417 15264 26161 24269 36709 41171 35328 + 1376 17772 7471 1376 17772 19002 1376 17772 9085 1376 4333 13859 1376 17772 9357 1376 4333 9264 1376 17772 25448 1376 17772 18400 1376 17772 4333 1376 4333 10172 1376 17772 4333 1376 17772 7258 1376 17772 19002 1376 17772 5782 1376 17772 10172 1376 17772 3726 1376 17772 5782 1376 4333 10172 1376 17772 23171 + 6569 15113 7471 36 21113 43 17841 19002 17 8384 6569 14285 4958 12605 36 34654 2841 4203 354 10146 26511 1070 43 36174 5782 36 8338 21554 14 34 63 308 19233 43 + 31414 + 20920 + 1437 20920 + 1437 1437 20920 + 1437 1437 1437 20920 + 1437 1437 1437 20920 50118 1437 1437 1437 20920 + 36 + 50118 5457 + 108 3567 + 31414 6 1423 108 1250 328 1336 32 47 17841 10172 17487 47876 3602 48617 15264 46537 11423 27326 48494 8210 49233 1558 1570 27761 49429 43251 10809 17772 + 32376 12846 + 246 + 3103 + 25631 + 46152 + 3103 25631 + 46152 3103 + 46152 25631 + 46152 46152 + 46152 3103 25631 + 347 1376 2023 12410 102 16376 1376 2023 6382 90 + 9553 5954 + 50118 1437 50140 1437 50140 50118 1437 50117 1437 50117 50117 1437 50117 50118 1437 1437 50118 1437 1437 1437 50118 1437 1437 1437 1437 50118 1437 1437 1437 1437 1437 50118 6569 15113 7471 36 21113 43 17841 19002 17 8384 6569 14285 4958 12605 36 34654 2841 4203 354 10146 26511 1070 43 36174 5782 8103 18164 27 6569 18164 27 155 2357 30242 155 25631 30242 3103 30242 25631 30242 46152 30242 3103 25631 155 4 246 155 7586 246 155 734 246 25974 17772 7471 1376 17772 19002 1376 17772 9085 1376 4333 13859 1376 17772 9357 1376 4333 9264 1376 17772 25448 1376 17772 18400 1376 17772 4333 1376 4333 10172 1376 17772 4333 1376 17772 7258 1376 17772 19002 1376 17772 5782 18636 10172 17487 47876 3602 48617 15264 46537 11423 27326 48494 8210 49233 1558 1570 27761 49429 43251 10809 17772 36738 48332 47463 18697 10809 25482 22063 23171 34251 18697 10809 26161 18697 3602 22063 27969 40966 25417 15264 26161 24269 36709 41171 35328 128 49690 108 49972 49519 12905 48149 48149 43796 32376 12846 27282 28749 38 348 57 128 41042 37 18 89 6 128 4629 47 686 116 128 448 45 686 38 581 146 24 6 128 495 47 101 103 6845 116 166 108 30660 10 108 462 574 diff --git a/pocs/CMakeLists.txt b/pocs/CMakeLists.txt index 03e1d2c04be65..d49d14dee4351 100644 --- a/pocs/CMakeLists.txt +++ b/pocs/CMakeLists.txt @@ -8,5 +8,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR}) if (EMSCRIPTEN) else() - add_subdirectory(vdot) + if (NOT GGML_BACKEND_DL) + add_subdirectory(vdot) + endif() endif() diff --git a/pocs/vdot/CMakeLists.txt b/pocs/vdot/CMakeLists.txt index d5405ad2991d1..6235aec1fdade 100644 --- a/pocs/vdot/CMakeLists.txt +++ b/pocs/vdot/CMakeLists.txt @@ -1,9 +1,9 @@ set(TARGET llama-vdot) add_executable(${TARGET} vdot.cpp) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) set(TARGET llama-q8dot) add_executable(${TARGET} q8dot.cpp) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_11) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/scripts/compare-commits.sh b/scripts/compare-commits.sh index 8b9b1ad39f384..e40d1cc6d988f 100755 --- a/scripts/compare-commits.sh +++ b/scripts/compare-commits.sh @@ -16,15 +16,23 @@ bench_args="${@:3}" rm -f llama-bench.sqlite > /dev/null # to test a backend, call the script with the corresponding environment variable (e.g. GGML_CUDA=1 ./scripts/compare-commits.sh ...) +if [ -n "$GGML_CUDA" ]; then + cmake_opts="-DGGML_CUDA=ON" +fi + +dir="build-bench" + +function run { + rm -fr ${dir} > /dev/null + cmake -B ${dir} -S . $cmake_opts > /dev/null + cmake --build ${dir} -t llama-bench > /dev/null + ${dir}/bin/llama-bench -o sql -oe md $bench_args | sqlite3 llama-bench.sqlite +} git checkout $1 > /dev/null -make clean > /dev/null -make -j$(nproc) $make_opts llama-bench > /dev/null -./llama-bench -o sql -oe md $bench_args | sqlite3 llama-bench.sqlite +run git checkout $2 > /dev/null -make clean > /dev/null -make -j$(nproc) $make_opts llama-bench > /dev/null -./llama-bench -o sql -oe md $bench_args | sqlite3 llama-bench.sqlite +run ./scripts/compare-llama-bench.py -b $1 -c $2 diff --git a/scripts/pod-llama.sh b/scripts/pod-llama.sh deleted file mode 100644 index 6e56e1ed0908c..0000000000000 --- a/scripts/pod-llama.sh +++ /dev/null @@ -1,212 +0,0 @@ -#!/bin/bash -# -# Use this script only on fresh pods (runpod.io)! -# Otherwise, it can break your environment! -# - -if [ -z "$1" ]; then - echo "Usage: $0 " - echo " 0: no models" - echo " 1: tinyllama-1b" - echo " 2: codellama-7b" - echo " 3: codellama-13b" - echo " 4: codellama-34b" - echo " 5: codellama-7b-instruct" - echo " 6: codellama-13b-instruct" - echo " 7: codellama-34b-instruct" - - exit 1 -fi - -set -x - -# setup deps -apt-get update -apt-get install -y git-lfs cmake cmake-curses-gui vim ruby -git-lfs install - -if [ ! -d "/workspace" ]; then - ln -sfn $(pwd) /workspace -fi - -# download data -cd /workspace - -# this is useful to git clone repos without doubling the disk size due to .git -git clone https://github.com/iboB/git-lfs-download -ln -sfn /workspace/git-lfs-download/git-lfs-download /usr/local/bin/git-lfs-download - -# llama.cpp -cd /workspace -git clone https://github.com/ggerganov/llama.cpp - -cd llama.cpp - -GGML_CUDA=1 make -j - -ln -sfn /workspace/TinyLlama-1.1B-Chat-v0.3 ./models/tinyllama-1b -ln -sfn /workspace/CodeLlama-7b-hf ./models/codellama-7b -ln -sfn /workspace/CodeLlama-13b-hf ./models/codellama-13b -ln -sfn /workspace/CodeLlama-34b-hf ./models/codellama-34b -ln -sfn /workspace/CodeLlama-7b-Instruct-hf ./models/codellama-7b-instruct -ln -sfn /workspace/CodeLlama-13b-Instruct-hf ./models/codellama-13b-instruct -ln -sfn /workspace/CodeLlama-34b-Instruct-hf ./models/codellama-34b-instruct - -pip install -r requirements.txt - -# cmake -cd /workspace/llama.cpp - -mkdir build-cublas -cd build-cublas - -cmake -DGGML_CUDA=1 ../ -make -j - -if [ "$1" -eq "0" ]; then - exit 0 -fi - -# more models -if [ "$1" -eq "1" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/PY007/TinyLlama-1.1B-Chat-v0.3 - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/tinyllama-1b --outfile ./models/tinyllama-1b/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/tinyllama-1b/ggml-model-f16.gguf ./models/tinyllama-1b/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/tinyllama-1b/ggml-model-f16.gguf ./models/tinyllama-1b/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/tinyllama-1b/ggml-model-f16.gguf ./models/tinyllama-1b/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "2" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/codellama/CodeLlama-7b-hf --without *safetensors* - rm -v ./CodeLlama-7b-hf/*safetensors* - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/codellama-7b --outfile ./models/codellama-7b/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/codellama-7b/ggml-model-f16.gguf ./models/codellama-7b/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/codellama-7b/ggml-model-f16.gguf ./models/codellama-7b/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/codellama-7b/ggml-model-f16.gguf ./models/codellama-7b/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "3" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/codellama/CodeLlama-13b-hf --without *safetensors* - rm -v ./CodeLlama-13b-hf/*safetensors* - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/codellama-13b --outfile ./models/codellama-13b/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/codellama-13b/ggml-model-f16.gguf ./models/codellama-13b/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/codellama-13b/ggml-model-f16.gguf ./models/codellama-13b/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/codellama-13b/ggml-model-f16.gguf ./models/codellama-13b/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "4" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/codellama/CodeLlama-34b-hf --without *safetensors* - rm -v ./CodeLlama-34b-hf/*safetensors* - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/codellama-34b --outfile ./models/codellama-34b/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/codellama-34b/ggml-model-f16.gguf ./models/codellama-34b/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/codellama-34b/ggml-model-f16.gguf ./models/codellama-34b/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/codellama-34b/ggml-model-f16.gguf ./models/codellama-34b/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "5" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf --without *safetensors* - rm -v ./CodeLlama-7b-Instruct-hf/*safetensors* - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/codellama-7b-instruct --outfile ./models/codellama-7b-instruct/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/codellama-7b-instruct/ggml-model-f16.gguf ./models/codellama-7b-instruct/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/codellama-7b-instruct/ggml-model-f16.gguf ./models/codellama-7b-instruct/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/codellama-7b-instruct/ggml-model-f16.gguf ./models/codellama-7b-instruct/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "6" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf --without *safetensors* - rm -v ./CodeLlama-13b-Instruct-hf/*safetensors* - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/codellama-13b-instruct --outfile ./models/codellama-13b-instruct/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/codellama-13b-instruct/ggml-model-f16.gguf ./models/codellama-13b-instruct/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/codellama-13b-instruct/ggml-model-f16.gguf ./models/codellama-13b-instruct/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/codellama-13b-instruct/ggml-model-f16.gguf ./models/codellama-13b-instruct/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "7" ]; then - cd /workspace - - git-lfs-download https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf --without *safetensors* - rm -v ./CodeLlama-34b-Instruct-hf/*safetensors* - - cd /workspace/llama.cpp - - python3 examples/convert_legacy_llama.py ./models/codellama-34b-instruct --outfile ./models/codellama-34b-instruct/ggml-model-f16.gguf --outtype f16 - - ./llama-quantize ./models/codellama-34b-instruct/ggml-model-f16.gguf ./models/codellama-34b-instruct/ggml-model-q4_0.gguf q4_0 - ./llama-quantize ./models/codellama-34b-instruct/ggml-model-f16.gguf ./models/codellama-34b-instruct/ggml-model-q4_k.gguf q4_k - ./llama-quantize ./models/codellama-34b-instruct/ggml-model-f16.gguf ./models/codellama-34b-instruct/ggml-model-q8_0.gguf q8_0 -fi - -if [ "$1" -eq "1" ]; then - # perf + perplexity - cd /workspace/llama.cpp/build-cublas - - make -j && ../scripts/run-all-perf.sh tinyllama-1b "f16" "-ngl 99 -t 1 -p 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,32,64,128,256,512,1024,2048 -n 128" - - ../scripts/get-wikitext-2.sh - unzip wikitext-2-raw-v1.zip - - make -j && ./bin/llama-perplexity -m ../models/tinyllama-1b/ggml-model-f16.gguf -f ./wikitext-2-raw/wiki.test.raw -ngl 100 --chunks 32 - - # batched - cd /workspace/llama.cpp - - GGML_CUDA=1 make -j && ./llama-batched ./models/tinyllama-1b/ggml-model-f16.gguf "Hello, my name is" 8 128 999 - - # batched-bench - cd /workspace/llama.cpp - - GGML_CUDA=1 make -j && ./llama-batched-bench ./models/tinyllama-1b/ggml-model-f16.gguf 4608 1 99 0 512 128 1,2,3,4,5,6,7,8,16,32 - - # parallel - cd /workspace/llama.cpp - - GGML_CUDA=1 make -j && ./llama-parallel -m ./models/tinyllama-1b/ggml-model-f16.gguf -t 1 -ngl 100 -c 4096 -b 512 -s 1 -np 8 -ns 128 -n 100 -cb - -fi - -# speculative -#if [ "$1" -eq "7" ]; then -# cd /workspace/llama.cpp -# -# GGML_CUDA=1 make -j && ./llama-speculative -m ./models/codellama-34b-instruct/ggml-model-f16.gguf -md ./models/codellama-7b-instruct/ggml-model-q4_0.gguf -p "# Dijkstra's shortest path algorithm in Python (4 spaces indentation) + complexity analysis:\n\n" -e -ngl 999 -ngld 999 -t 4 -n 512 -c 4096 -s 21 --draft 16 -np 1 --temp 0.0 -#fi - -# more benches -#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-7b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1 -#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-13b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1 diff --git a/scripts/server-llm.sh b/scripts/server-llm.sh deleted file mode 100644 index 802592a3e0d3b..0000000000000 --- a/scripts/server-llm.sh +++ /dev/null @@ -1,418 +0,0 @@ -#!/bin/bash -# -# Helper script for deploying llama.cpp server with a single Bash command -# -# - Works on Linux and macOS -# - Supports: CPU, CUDA, Metal -# - Can run all GGUF models from HuggingFace -# - Can serve requests in parallel -# - Always builds latest llama.cpp from GitHub -# -# Limitations -# -# - Chat templates are poorly supported (base models recommended) -# - Might be unstable! -# -# Usage: -# ./server-llm.sh [--port] [--repo] [--wtype] [--backend] [--gpu-id] [--n-parallel] [--n-kv] [--verbose] [-non-interactive] -# -# --port: port number, default is 8888 -# --repo: path to a repo containing GGUF model files -# --wtype: weights type (f16, q8_0, q4_0, q4_1), default is user-input -# --backend: cpu, cuda, metal, depends on the OS -# --gpu-id: gpu id, default is 0 -# --n-parallel: number of parallel requests, default is 8 -# --n-kv: KV cache size, default is 4096 -# --verbose: verbose output -# --non-interactive: run without asking a permission to run -# -# Example: -# -# bash -c "$(curl -s https://ggml.ai/server-llm.sh)" -# - -set -e - -# required utils: curl, git, make -if ! command -v curl &> /dev/null; then - printf "[-] curl not found\n" - exit 1 -fi -if ! command -v git &> /dev/null; then - printf "[-] git not found\n" - exit 1 -fi -if ! command -v make &> /dev/null; then - printf "[-] make not found\n" - exit 1 -fi - -# parse arguments -is_interactive=1 -port=8888 -repo="" -wtype="" -backend="cpu" - -# if macOS, use metal backend by default -if [[ "$OSTYPE" == "darwin"* ]]; then - backend="metal" -elif command -v nvcc &> /dev/null; then - backend="cuda" -fi - -gpu_id=0 -n_parallel=8 -n_kv=4096 -verbose=0 - -function print_usage { - printf "Usage:\n" - printf " ./server-llm.sh [--port] [--repo] [--wtype] [--backend] [--gpu-id] [--n-parallel] [--n-kv] [--verbose] [-non-interactive]\n\n" - printf " --port: port number, default is 8888\n" - printf " --repo: path to a repo containing GGUF model files\n" - printf " --wtype: weights type (f16, q8_0, q4_0, q4_1), default is user-input\n" - printf " --backend: cpu, cuda, metal, depends on the OS\n" - printf " --gpu-id: gpu id, default is 0\n" - printf " --n-parallel: number of parallel requests, default is 8\n" - printf " --n-kv: KV cache size, default is 4096\n" - printf " --verbose: verbose output\n\n" - printf " --non-interactive: run without asking a permission to run\n" - printf "Example:\n\n" - printf ' bash -c "$(curl -s https://ggml.ai/server-llm.sh)"\n\n' -} - -while [[ $# -gt 0 ]]; do - key="$1" - case $key in - --non-interactive) - is_interactive=0 - shift - ;; - --port) - port="$2" - shift - shift - ;; - --repo) - repo="$2" - shift - shift - ;; - --wtype) - wtype="$2" - shift - shift - ;; - --backend) - backend="$2" - shift - shift - ;; - --gpu-id) - gpu_id="$2" - shift - shift - ;; - --n-parallel) - n_parallel="$2" - shift - shift - ;; - --n-kv) - n_kv="$2" - shift - shift - ;; - --verbose) - verbose=1 - shift - ;; - --help) - print_usage - exit 0 - ;; - *) - echo "Unknown argument: $key" - print_usage - exit 1 - ;; - esac -done - -# available weights types -wtypes=("F16" "Q8_0" "Q4_0" "Q4_1" "Q5_0" "Q5_1" "Q6_K" "Q5_K_M" "Q5_K_S" "Q4_K_M" "Q4_K_S" "Q3_K_L" "Q3_K_M" "Q3_K_S" "Q2_K") - -wfiles=() -for wt in "${wtypes[@]}"; do - wfiles+=("") -done - -# map wtype input to index -if [[ ! -z "$wtype" ]]; then - iw=-1 - is=0 - for wt in "${wtypes[@]}"; do - # uppercase - uwt=$(echo "$wt" | tr '[:lower:]' '[:upper:]') - if [[ "$uwt" == "$wtype" ]]; then - iw=$is - break - fi - is=$((is+1)) - done - - if [[ $iw -eq -1 ]]; then - printf "[-] Invalid weight type: %s\n" "$wtype" - exit 1 - fi - - wtype="$iw" -fi - -# sample repos -repos=( - "https://huggingface.co/TheBloke/Llama-2-7B-GGUF" - "https://huggingface.co/TheBloke/Llama-2-13B-GGUF" - "https://huggingface.co/TheBloke/Llama-2-70B-GGUF" - "https://huggingface.co/TheBloke/CodeLlama-7B-GGUF" - "https://huggingface.co/TheBloke/CodeLlama-13B-GGUF" - "https://huggingface.co/TheBloke/CodeLlama-34B-GGUF" - "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF" - "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF" - "https://huggingface.co/TheBloke/OpenHermes-2-Mistral-7B-GGUF" - "https://huggingface.co/TheBloke/CausalLM-7B-GGUF" -) -if [ $is_interactive -eq 1 ]; then - printf "\n" - printf "[I] This is a helper script for deploying llama.cpp's server on this machine.\n\n" - printf " Based on the options that follow, the script might download a model file\n" - printf " from the internet, which can be a few GBs in size. The script will also\n" - printf " build the latest llama.cpp source code from GitHub, which can be unstable.\n" - printf "\n" - printf " Upon success, an HTTP server will be started and it will serve the selected\n" - printf " model using llama.cpp for demonstration purposes.\n" - printf "\n" - printf " Please note:\n" - printf "\n" - printf " - All new data will be stored in the current folder\n" - printf " - The server will be listening on all network interfaces\n" - printf " - The server will run with default settings which are not always optimal\n" - printf " - Do not judge the quality of a model based on the results from this script\n" - printf " - Do not use this script to benchmark llama.cpp\n" - printf " - Do not use this script in production\n" - printf " - This script is only for demonstration purposes\n" - printf "\n" - printf " If you don't know what you are doing, please press Ctrl-C to abort now\n" - printf "\n" - printf " Press Enter to continue ...\n\n" - - read -fi - -if [[ -z "$repo" ]]; then - printf "[+] No repo provided from the command line\n" - printf " Please select a number from the list below or enter an URL:\n\n" - - is=0 - for r in "${repos[@]}"; do - printf " %2d) %s\n" $is "$r" - is=$((is+1)) - done - - # ask for repo until index of sample repo is provided or an URL - while [[ -z "$repo" ]]; do - printf "\n Or choose one from: https://huggingface.co/models?sort=trending&search=gguf\n\n" - read -p "[+] Select repo: " repo - - # check if the input is a number - if [[ "$repo" =~ ^[0-9]+$ ]]; then - if [[ "$repo" -ge 0 && "$repo" -lt ${#repos[@]} ]]; then - repo="${repos[$repo]}" - else - printf "[-] Invalid repo index: %s\n" "$repo" - repo="" - fi - elif [[ "$repo" =~ ^https?:// ]]; then - repo="$repo" - else - printf "[-] Invalid repo URL: %s\n" "$repo" - repo="" - fi - done -fi - -# remove suffix -repo=$(echo "$repo" | sed -E 's/\/tree\/main$//g') - -printf "[+] Checking for GGUF model files in %s\n" "$repo" - -# find GGUF files in the source -# TODO: better logic -model_tree="${repo%/}/tree/main" -model_files=$(curl -s "$model_tree" | grep -i "\\.gguf" | sed -E 's/.*(.*)<\/span><\/a>/\1/g') - -# list all files in the provided git repo -printf "[+] Model files:\n\n" -for file in $model_files; do - # determine iw by grepping the filename with wtypes - iw=-1 - is=0 - for wt in "${wtypes[@]}"; do - # uppercase - ufile=$(echo "$file" | tr '[:lower:]' '[:upper:]') - if [[ "$ufile" =~ "$wt" ]]; then - iw=$is - break - fi - is=$((is+1)) - done - - if [[ $iw -eq -1 ]]; then - continue - fi - - wfiles[$iw]="$file" - - have=" " - if [[ -f "$file" ]]; then - have="*" - fi - - printf " %2d) %s %s\n" $iw "$have" "$file" -done - -wfile="${wfiles[$wtype]}" - -# ask for weights type until provided and available -while [[ -z "$wfile" ]]; do - printf "\n" - read -p "[+] Select weight type: " wtype - wfile="${wfiles[$wtype]}" - - if [[ -z "$wfile" ]]; then - printf "[-] Invalid weight type: %s\n" "$wtype" - wtype="" - fi -done - -printf "[+] Selected weight type: %s (%s)\n" "$wtype" "$wfile" - -url="${repo%/}/resolve/main/$wfile" - -# check file if the model has been downloaded before -chk="$wfile.chk" - -# check if we should download the file -# - if $wfile does not exist -# - if $wfile exists but $chk does not exist -# - if $wfile exists and $chk exists but $wfile is newer than $chk -# TODO: better logic using git lfs info - -do_download=0 - -if [[ ! -f "$wfile" ]]; then - do_download=1 -elif [[ ! -f "$chk" ]]; then - do_download=1 -elif [[ "$wfile" -nt "$chk" ]]; then - do_download=1 -fi - -if [[ $do_download -eq 1 ]]; then - printf "[+] Downloading weights from %s\n" "$url" - - # download the weights file - curl -o "$wfile" -# -L "$url" - - # create a check file if successful - if [[ $? -eq 0 ]]; then - printf "[+] Creating check file %s\n" "$chk" - touch "$chk" - fi -else - printf "[+] Using cached weights %s\n" "$wfile" -fi - -# get latest llama.cpp and build - -printf "[+] Downloading latest llama.cpp\n" - -llama_cpp_dir="__llama_cpp_port_${port}__" - -if [[ -d "$llama_cpp_dir" && ! -f "$llama_cpp_dir/__ggml_script__" ]]; then - # if the dir exists and there isn't a file "__ggml_script__" in it, abort - printf "[-] Directory %s already exists\n" "$llama_cpp_dir" - printf "[-] Please remove it and try again\n" - exit 1 -elif [[ -d "$llama_cpp_dir" ]]; then - printf "[+] Directory %s already exists\n" "$llama_cpp_dir" - printf "[+] Using cached llama.cpp\n" - - cd "$llama_cpp_dir" - git reset --hard - git fetch - git checkout origin/master - - cd .. -else - printf "[+] Cloning llama.cpp\n" - - git clone https://github.com/ggerganov/llama.cpp "$llama_cpp_dir" -fi - -# mark that that the directory is made by this script -touch "$llama_cpp_dir/__ggml_script__" - -if [[ $verbose -eq 1 ]]; then - set -x -fi - -# build -cd "$llama_cpp_dir" - -make clean - -log="--silent" -if [[ $verbose -eq 1 ]]; then - log="" -fi - -if [[ "$backend" == "cuda" ]]; then - printf "[+] Building with CUDA backend\n" - GGML_CUDA=1 make -j llama-server $log -elif [[ "$backend" == "cpu" ]]; then - printf "[+] Building with CPU backend\n" - make -j llama-server $log -elif [[ "$backend" == "metal" ]]; then - printf "[+] Building with Metal backend\n" - make -j llama-server $log -else - printf "[-] Unknown backend: %s\n" "$backend" - exit 1 -fi - -# run the server - -printf "[+] Running server\n" - -args="" -if [[ "$backend" == "cuda" ]]; then - export CUDA_VISIBLE_DEVICES=$gpu_id - args="-ngl 999" -elif [[ "$backend" == "cpu" ]]; then - args="-ngl 0" -elif [[ "$backend" == "metal" ]]; then - args="-ngl 999" -else - printf "[-] Unknown backend: %s\n" "$backend" - exit 1 -fi - -if [[ $verbose -eq 1 ]]; then - args="$args --verbose" -fi - -./llama-server -m "../$wfile" --host 0.0.0.0 --port "$port" -c $n_kv -np "$n_parallel" $args - -exit 0 diff --git a/scripts/sync-ggml-am.sh b/scripts/sync-ggml-am.sh index d0815cf89e2f5..8cf25b77f98ea 100755 --- a/scripts/sync-ggml-am.sh +++ b/scripts/sync-ggml-am.sh @@ -73,7 +73,6 @@ while read c; do src/ggml*.h \ src/ggml*.c \ src/ggml*.cpp \ - src/ggml-amx/* \ src/ggml-blas/* \ src/ggml-cann/* \ src/ggml-cpu/* \ @@ -124,7 +123,6 @@ if [ -f $SRC_LLAMA/ggml-src.patch ]; then # src/ggml*.c -> ggml/src/ggml*.c # src/ggml*.cpp -> ggml/src/ggml*.cpp # src/ggml*.h -> ggml/src/ggml*.h - # src/ggml-amx/* -> ggml/src/ggml-amx/* # src/ggml-blas/* -> ggml/src/ggml-blas/* # src/ggml-cann/* -> ggml/src/ggml-cann/* # src/ggml-cpu/* -> ggml/src/ggml-cpu/* @@ -151,7 +149,6 @@ if [ -f $SRC_LLAMA/ggml-src.patch ]; then -e 's/([[:space:]]|[ab]\/)src\/ggml(.*)\.c/\1ggml\/src\/ggml\2.c/g' \ -e 's/([[:space:]]|[ab]\/)src\/ggml(.*)\.cpp/\1ggml\/src\/ggml\2.cpp/g' \ -e 's/([[:space:]]|[ab]\/)src\/ggml(.*)\.h/\1ggml\/src\/ggml\2.h/g' \ - -e 's/([[:space:]]|[ab]\/)src\/ggml-amx\//\1ggml\/src\/ggml-amx\//g' \ -e 's/([[:space:]]|[ab]\/)src\/ggml-blas\//\1ggml\/src\/ggml-blas\//g' \ -e 's/([[:space:]]|[ab]\/)src\/ggml-cann\//\1ggml\/src\/ggml-cann\//g' \ -e 's/([[:space:]]|[ab]\/)src\/ggml-cpu\//\1ggml\/src\/ggml-cpu\//g' \ diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 6ddb71ab133bf..b4ac38bbf4669 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -9d0708e863f3aa2fc1eb0b75d433303c30bd0dbc +e6d93f40dffe8733d5d72f1d8fa6b3ca27ae899f diff --git a/scripts/sync-ggml.sh b/scripts/sync-ggml.sh index 000270afbfd1e..f81615bb6b76d 100755 --- a/scripts/sync-ggml.sh +++ b/scripts/sync-ggml.sh @@ -7,7 +7,6 @@ cp -rpv ../ggml/cmake/FindSIMD.cmake ./ggml/cmake/FindSIMD.cmake cp -rpv ../ggml/src/ggml*.c ./ggml/src/ cp -rpv ../ggml/src/ggml*.cpp ./ggml/src/ cp -rpv ../ggml/src/ggml*.h ./ggml/src/ -cp -rpv ../ggml/src/ggml-amx/* ./ggml/src/ggml-amx/ cp -rpv ../ggml/src/ggml-blas/* ./ggml/src/ggml-blas/ cp -rpv ../ggml/src/ggml-cann/* ./ggml/src/ggml-cann/ cp -rpv ../ggml/src/ggml-cpu/* ./ggml/src/ggml-cpu/ diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a86624750305e..2d3ea09945790 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,9 +1,4 @@ -# TODO: should not use this -if (WIN32) - if (BUILD_SHARED_LIBS) - set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON) - endif() -endif() +llama_add_compile_flags() # # libraries @@ -23,7 +18,7 @@ add_library(llama ) target_include_directories(llama PUBLIC . ../include) -target_compile_features (llama PUBLIC cxx_std_11) # don't bump +target_compile_features (llama PUBLIC cxx_std_17) # don't bump target_link_libraries(llama PUBLIC ggml) diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index 74e9f64b393b2..76d0cb3a2ff78 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -822,15 +822,11 @@ llama_grammar_stacks & llama_grammar_get_stacks(struct llama_grammar * grammar) return grammar->stacks; } -void llama_grammar_accept( - const llama_grammar_rules & rules, - const llama_grammar_stacks & stacks, - const uint32_t chr, - llama_grammar_stacks & stacks_new) { - stacks_new.clear(); - stacks_new.reserve(stacks.size()); +void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr) { + llama_grammar_stacks stacks_new; + stacks_new.reserve(grammar->stacks.size()); - for (const auto & stack : stacks) { + for (const auto & stack : grammar->stacks) { if (stack.empty()) { continue; } @@ -844,9 +840,11 @@ void llama_grammar_accept( if (!llama_grammar_is_end_of_sequence(pos)) { new_stack.push_back(pos); } - llama_grammar_advance_stack(rules, new_stack, stacks_new); + llama_grammar_advance_stack(grammar->rules, new_stack, stacks_new); } } + + grammar->stacks = std::move(stacks_new); } llama_grammar_candidates llama_grammar_reject_candidates_for_stack( @@ -1051,7 +1049,12 @@ void llama_grammar_free_impl(struct llama_grammar * grammar) { } struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & grammar) { - llama_grammar * result = new llama_grammar { grammar.vocab, grammar.rules, grammar.stacks, grammar.partial_utf8, }; + llama_grammar * result = new llama_grammar { + grammar.vocab, + grammar.rules, + grammar.stacks, + grammar.partial_utf8, + }; // redirect elements in stacks to point to new rules for (size_t is = 0; is < result->stacks.size(); is++) { @@ -1059,7 +1062,7 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra for (size_t ir0 = 0; ir0 < grammar.rules.size(); ir0++) { for (size_t ir1 = 0; ir1 < grammar.rules[ir0].size(); ir1++) { if (grammar.stacks[is][ie] == &grammar.rules[ir0][ir1]) { - result->stacks[is][ie] = &result->rules[ir0][ir1]; + result->stacks[is][ie] = &result->rules[ir0][ir1]; } } } @@ -1126,11 +1129,8 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token const auto decoded = decode_utf8(piece, grammar.partial_utf8); const auto & code_points = decoded.first; - llama_grammar_stacks stacks_new; - for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { - llama_grammar_accept(grammar.rules, grammar.stacks, *it, stacks_new); - grammar.stacks = std::move(stacks_new); + llama_grammar_accept(&grammar, *it); } grammar.partial_utf8 = decoded.second; diff --git a/src/llama-grammar.h b/src/llama-grammar.h index f529ce351e416..13e940fb52e24 100644 --- a/src/llama-grammar.h +++ b/src/llama-grammar.h @@ -58,6 +58,7 @@ using llama_grammar_rules = std::vector; using llama_grammar_stacks = std::vector; using llama_grammar_candidates = std::vector; +// TODO: remove, needed for tests atm const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar * grammar); llama_grammar_stacks & llama_grammar_get_stacks( struct llama_grammar * grammar); @@ -65,11 +66,7 @@ const llama_grammar_rules & llama_grammar_get_rules (const struct llama_grammar // be positioned at a character range (see `llama_grammar_advance_stack`), and // produces the N possible stacks if the given char is accepted at those // positions -void llama_grammar_accept( - const llama_grammar_rules & rules, - const llama_grammar_stacks & stacks, - uint32_t chr, - llama_grammar_stacks & stacks_new); +void llama_grammar_accept(struct llama_grammar * grammar, uint32_t chr); std::vector llama_grammar_reject_candidates_for_stack( const llama_grammar_rules & rules, diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index fd8ca8a9edfac..bebff77cfa09d 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1396,19 +1396,15 @@ struct llama_sampler * llama_sampler_init_grammar_impl(const struct llama_vocab // penalties struct llama_sampler_penalties { - const int32_t n_vocab; - const llama_token special_eos_id; - const llama_token linefeed_id; - const int32_t penalty_last_n; const float penalty_repeat; const float penalty_freq; const float penalty_present; - const bool penalize_nl; - const bool ignore_eos; - ring_buffer prev; + + // a frequency map to count token occurrences + std::unordered_map token_count; }; static const char * llama_sampler_penalties_name(const struct llama_sampler * /*smpl*/) { @@ -1421,76 +1417,50 @@ static void llama_sampler_penalties_accept(struct llama_sampler * smpl, llama_to return; } - ctx->prev.push_back(token); -} - -static void llama_sampler_penalties_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { - auto * ctx = (llama_sampler_penalties *) smpl->ctx; + ctx->token_count[token]++; - if (ctx->ignore_eos) { - assert(ctx->special_eos_id >= 0); + // if the ring buffer is full, remove the oldest token + if (ctx->prev.size() >= (size_t) ctx->penalty_last_n) { + const auto old = ctx->prev.front(); - // optimistically check if the candidates are not yet sorted/shuffled/truncated - if (cur_p->size > (size_t) ctx->special_eos_id && cur_p->data[ctx->special_eos_id].id == ctx->special_eos_id) { - cur_p->data[ctx->special_eos_id].logit = -INFINITY; - } else { - // else, search for the special EOS token - for (size_t i = 0; i < cur_p->size; ++i) { - if (cur_p->data[i].id == ctx->special_eos_id) { - cur_p->data[i].logit = -INFINITY; - break; - } - } + ctx->token_count[old]--; + if (ctx->token_count[old] == 0) { + ctx->token_count.erase(old); } } - if ((ctx->penalty_last_n == 0) || - (ctx->penalty_repeat == 1.0f && ctx->penalty_freq == 0.0f && ctx->penalty_present == 0.0f)) { - return; - } - - bool nl_found = false; - size_t nl_idx = 0; - float nl_logit = -INFINITY; - if (!ctx->penalize_nl) { - assert(ctx->linefeed_id >= 0); + ctx->prev.push_back(token); - // optimistically check if the candidates are not yet sorted/shuffled/truncated - if (cur_p->size > (size_t) ctx->linefeed_id && cur_p->data[ctx->linefeed_id].id == ctx->linefeed_id) { - nl_found = true; - nl_idx = ctx->linefeed_id; - nl_logit = cur_p->data[ctx->linefeed_id].logit; - } else { - // else, search for the linefeed token - for (size_t i = 0; i < cur_p->size; ++i) { - if (cur_p->data[i].id == ctx->linefeed_id) { - nl_found = true; - nl_idx = i; - nl_logit = cur_p->data[i].logit; - break; - } - } - } +#if 0 + // sanity check + std::unordered_map tmp; + for (int i = 0; i < std::min(ctx->penalty_last_n, ctx->prev.size()); ++i) { + tmp[ctx->prev.rat(i)]++; } - // Create a frequency map to count occurrences of each token in last_tokens - // TODO: optimize this by maintaining the token count in the sampler context - using llama_token_cnt = std::unordered_map; - llama_token_cnt token_count; + assert(ctx->token_count == tmp); +#endif +} + +static void llama_sampler_penalties_apply(struct llama_sampler * smpl, llama_token_data_array * cur_p) { + auto * ctx = (llama_sampler_penalties *) smpl->ctx; - for (int i = 0; i < std::min(ctx->penalty_last_n, ctx->prev.size()); ++i) { - token_count[ctx->prev.rat(i)]++; + if ((ctx->penalty_last_n == 0) || + (ctx->penalty_repeat == 1.0f && ctx->penalty_freq == 0.0f && ctx->penalty_present == 0.0f)) { + return; } // Apply frequency and presence penalties to the cur_p for (size_t i = 0; i < cur_p->size; ++i) { - const auto token_iter = token_count.find(cur_p->data[i].id); - if (token_iter == token_count.end()) { + const auto token_iter = ctx->token_count.find(cur_p->data[i].id); + if (token_iter == ctx->token_count.end()) { continue; } const int count = token_iter->second; + assert(count > 0 && count <= ctx->penalty_last_n); + // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong. // This is common fix for this problem, which is to multiply by the penalty instead of dividing. if (cur_p->data[i].logit <= 0) { @@ -1503,30 +1473,21 @@ static void llama_sampler_penalties_apply(struct llama_sampler * smpl, llama_tok } cur_p->sorted = false; - - if (!ctx->penalize_nl && nl_found) { - // restore the logit of the newline token if it was penalized - cur_p->data[nl_idx].logit = nl_logit; - } } static void llama_sampler_penalties_reset(struct llama_sampler * smpl) { auto * ctx = (llama_sampler_penalties *) smpl->ctx; ctx->prev.clear(); + ctx->token_count.clear(); } static struct llama_sampler * llama_sampler_penalties_clone(const struct llama_sampler * smpl) { const auto * ctx = (const llama_sampler_penalties *) smpl->ctx; auto * result = llama_sampler_init_penalties( - ctx->n_vocab, - ctx->special_eos_id, - ctx->linefeed_id, ctx->penalty_last_n, ctx->penalty_repeat, ctx->penalty_freq, - ctx->penalty_present, - ctx->penalize_nl, - ctx->ignore_eos); + ctx->penalty_present); // copy the state { @@ -1552,38 +1513,21 @@ static struct llama_sampler_i llama_sampler_penalties_i = { }; struct llama_sampler * llama_sampler_init_penalties( - int32_t n_vocab, - llama_token special_eos_id, - llama_token linefeed_id, int32_t penalty_last_n, float penalty_repeat, float penalty_freq, - float penalty_present, - bool penalize_nl, - bool ignore_eos) { - if (linefeed_id == LLAMA_TOKEN_NULL) { - penalize_nl = true; - } - - if (special_eos_id == LLAMA_TOKEN_NULL) { - ignore_eos = false; - } - + float penalty_present) { penalty_last_n = std::max(penalty_last_n, 0); return new llama_sampler { /* .iface = */ &llama_sampler_penalties_i, /* .ctx = */ new llama_sampler_penalties { - /* .n_vocab = */ n_vocab, - /* .special_eos_id = */ special_eos_id, - /* .linefeed_id = */ linefeed_id, /* .penalty_last_n = */ penalty_last_n, /* .penalty_repeat = */ penalty_repeat, /* .penalty_freq = */ penalty_freq, /* .penalty_present = */ penalty_present, - /* .penalize_nl = */ penalize_nl, - /* .ignore_eos = */ ignore_eos, /* .prev = */ ring_buffer(penalty_last_n), + /* .token_count = */ {}, }, }; } @@ -1611,7 +1555,8 @@ static void get_overlapping_token_sequences(const llama_vocab & vocab, const std if (word.find(str) != std::string::npos) { token_sequences.emplace(token_id, std::vector()); } else { - size_t word_len = word.size(), str_len = str.size(); + size_t word_len = word.size(); + size_t str_len = str.size(); size_t pos = -1; while ((pos = word.find(str[0], pos + 1)) != std::string::npos) { bool match = true; diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index d1dc96276c2a2..7f2725f94be13 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -418,6 +418,7 @@ struct llm_tokenizer_bpe : llm_tokenizer { case LLAMA_VOCAB_PRE_TYPE_SMOLLM: case LLAMA_VOCAB_PRE_TYPE_CODESHELL: case LLAMA_VOCAB_PRE_TYPE_EXAONE: + case LLAMA_VOCAB_PRE_TYPE_MINERVA: regex_exprs = { "\\p{N}", "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)", @@ -737,7 +738,7 @@ struct llm_tokenizer_wpm_session { std::vector words(1, ""); for (const uint32_t cpt : cpts_nfd) { - const auto flags = unicode_cpt_flags(cpt); + const auto flags = unicode_cpt_flags_from_cpt(cpt); if (flags.is_whitespace) { if (words.back().size()) { // finish previous word if any @@ -1866,6 +1867,10 @@ int32_t llama_detokenize_impl( int32_t text_len_max, bool remove_special, bool unparse_special) { + if (vocab.type == LLAMA_VOCAB_TYPE_NONE) { + return 0; + } + GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first."); int32_t avail = text_len_max; diff --git a/src/llama.cpp b/src/llama.cpp index 25d5caeb3f80e..3e64640432f7a 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -167,6 +167,7 @@ enum llm_arch { LLM_ARCH_QWEN, LLM_ARCH_QWEN2, LLM_ARCH_QWEN2MOE, + LLM_ARCH_QWEN2VL, LLM_ARCH_PHI2, LLM_ARCH_PHI3, LLM_ARCH_PLAMO, @@ -183,9 +184,11 @@ enum llm_arch { LLM_ARCH_COMMAND_R, LLM_ARCH_DBRX, LLM_ARCH_OLMO, + LLM_ARCH_OLMO2, LLM_ARCH_OLMOE, LLM_ARCH_OPENELM, LLM_ARCH_ARCTIC, + LLM_ARCH_DEEPSEEK, LLM_ARCH_DEEPSEEK2, LLM_ARCH_CHATGLM, LLM_ARCH_BITNET, @@ -198,60 +201,65 @@ enum llm_arch { LLM_ARCH_GRANITE, LLM_ARCH_GRANITE_MOE, LLM_ARCH_CHAMELEON, + LLM_ARCH_WAVTOKENIZER_DEC, LLM_ARCH_UNKNOWN, }; static const std::map LLM_ARCH_NAMES = { - { LLM_ARCH_LLAMA, "llama" }, - { LLM_ARCH_FALCON, "falcon" }, - { LLM_ARCH_GROK, "grok" }, - { LLM_ARCH_GPT2, "gpt2" }, - { LLM_ARCH_GPTJ, "gptj" }, - { LLM_ARCH_GPTNEOX, "gptneox" }, - { LLM_ARCH_MPT, "mpt" }, - { LLM_ARCH_BAICHUAN, "baichuan" }, - { LLM_ARCH_STARCODER, "starcoder" }, - { LLM_ARCH_REFACT, "refact" }, - { LLM_ARCH_BERT, "bert" }, - { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, - { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, - { LLM_ARCH_BLOOM, "bloom" }, - { LLM_ARCH_STABLELM, "stablelm" }, - { LLM_ARCH_QWEN, "qwen" }, - { LLM_ARCH_QWEN2, "qwen2" }, - { LLM_ARCH_QWEN2MOE, "qwen2moe" }, - { LLM_ARCH_PHI2, "phi2" }, - { LLM_ARCH_PHI3, "phi3" }, - { LLM_ARCH_PLAMO, "plamo" }, - { LLM_ARCH_CODESHELL, "codeshell" }, - { LLM_ARCH_ORION, "orion" }, - { LLM_ARCH_INTERNLM2, "internlm2" }, - { LLM_ARCH_MINICPM, "minicpm" }, - { LLM_ARCH_MINICPM3, "minicpm3" }, - { LLM_ARCH_GEMMA, "gemma" }, - { LLM_ARCH_GEMMA2, "gemma2" }, - { LLM_ARCH_STARCODER2, "starcoder2" }, - { LLM_ARCH_MAMBA, "mamba" }, - { LLM_ARCH_XVERSE, "xverse" }, - { LLM_ARCH_COMMAND_R, "command-r" }, - { LLM_ARCH_DBRX, "dbrx" }, - { LLM_ARCH_OLMO, "olmo" }, - { LLM_ARCH_OLMOE, "olmoe" }, - { LLM_ARCH_OPENELM, "openelm" }, - { LLM_ARCH_ARCTIC, "arctic" }, - { LLM_ARCH_DEEPSEEK2, "deepseek2" }, - { LLM_ARCH_CHATGLM, "chatglm" }, - { LLM_ARCH_BITNET, "bitnet" }, - { LLM_ARCH_T5, "t5" }, - { LLM_ARCH_T5ENCODER, "t5encoder" }, - { LLM_ARCH_JAIS, "jais" }, - { LLM_ARCH_NEMOTRON, "nemotron" }, - { LLM_ARCH_EXAONE, "exaone" }, - { LLM_ARCH_RWKV6, "rwkv6" }, - { LLM_ARCH_GRANITE, "granite" }, - { LLM_ARCH_GRANITE_MOE, "granitemoe" }, - { LLM_ARCH_CHAMELEON, "chameleon" }, - { LLM_ARCH_UNKNOWN, "(unknown)" }, + { LLM_ARCH_LLAMA, "llama" }, + { LLM_ARCH_FALCON, "falcon" }, + { LLM_ARCH_GROK, "grok" }, + { LLM_ARCH_GPT2, "gpt2" }, + { LLM_ARCH_GPTJ, "gptj" }, + { LLM_ARCH_GPTNEOX, "gptneox" }, + { LLM_ARCH_MPT, "mpt" }, + { LLM_ARCH_BAICHUAN, "baichuan" }, + { LLM_ARCH_STARCODER, "starcoder" }, + { LLM_ARCH_REFACT, "refact" }, + { LLM_ARCH_BERT, "bert" }, + { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, + { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, + { LLM_ARCH_BLOOM, "bloom" }, + { LLM_ARCH_STABLELM, "stablelm" }, + { LLM_ARCH_QWEN, "qwen" }, + { LLM_ARCH_QWEN2, "qwen2" }, + { LLM_ARCH_QWEN2MOE, "qwen2moe" }, + { LLM_ARCH_QWEN2VL, "qwen2vl" }, + { LLM_ARCH_PHI2, "phi2" }, + { LLM_ARCH_PHI3, "phi3" }, + { LLM_ARCH_PLAMO, "plamo" }, + { LLM_ARCH_CODESHELL, "codeshell" }, + { LLM_ARCH_ORION, "orion" }, + { LLM_ARCH_INTERNLM2, "internlm2" }, + { LLM_ARCH_MINICPM, "minicpm" }, + { LLM_ARCH_MINICPM3, "minicpm3" }, + { LLM_ARCH_GEMMA, "gemma" }, + { LLM_ARCH_GEMMA2, "gemma2" }, + { LLM_ARCH_STARCODER2, "starcoder2" }, + { LLM_ARCH_MAMBA, "mamba" }, + { LLM_ARCH_XVERSE, "xverse" }, + { LLM_ARCH_COMMAND_R, "command-r" }, + { LLM_ARCH_DBRX, "dbrx" }, + { LLM_ARCH_OLMO, "olmo" }, + { LLM_ARCH_OLMO2, "olmo2" }, + { LLM_ARCH_OLMOE, "olmoe" }, + { LLM_ARCH_OPENELM, "openelm" }, + { LLM_ARCH_ARCTIC, "arctic" }, + { LLM_ARCH_DEEPSEEK, "deepseek" }, + { LLM_ARCH_DEEPSEEK2, "deepseek2" }, + { LLM_ARCH_CHATGLM, "chatglm" }, + { LLM_ARCH_BITNET, "bitnet" }, + { LLM_ARCH_T5, "t5" }, + { LLM_ARCH_T5ENCODER, "t5encoder" }, + { LLM_ARCH_JAIS, "jais" }, + { LLM_ARCH_NEMOTRON, "nemotron" }, + { LLM_ARCH_EXAONE, "exaone" }, + { LLM_ARCH_RWKV6, "rwkv6" }, + { LLM_ARCH_GRANITE, "granite" }, + { LLM_ARCH_GRANITE_MOE, "granitemoe" }, + { LLM_ARCH_CHAMELEON, "chameleon" }, + { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, + { LLM_ARCH_UNKNOWN, "(unknown)" }, }; enum llm_kv { @@ -271,6 +279,7 @@ enum llm_kv { LLM_KV_VOCAB_SIZE, LLM_KV_CONTEXT_LENGTH, LLM_KV_EMBEDDING_LENGTH, + LLM_KV_FEATURES_LENGTH, LLM_KV_BLOCK_COUNT, LLM_KV_LEADING_DENSE_BLOCK_COUNT, LLM_KV_FEED_FORWARD_LENGTH, @@ -302,6 +311,8 @@ enum llm_kv { LLM_KV_ATTENTION_VALUE_LENGTH, LLM_KV_ATTENTION_LAYERNORM_EPS, LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, + LLM_KV_ATTENTION_GROUPNORM_EPS, + LLM_KV_ATTENTION_GROUPNORM_GROUPS, LLM_KV_ATTENTION_CAUSAL, LLM_KV_ATTENTION_Q_LORA_RANK, LLM_KV_ATTENTION_KV_LORA_RANK, @@ -310,6 +321,7 @@ enum llm_kv { LLM_KV_ATTENTION_SCALE, LLM_KV_ROPE_DIMENSION_COUNT, + LLM_KV_ROPE_DIMENSION_SECTIONS, LLM_KV_ROPE_FREQ_BASE, LLM_KV_ROPE_SCALE_LINEAR, LLM_KV_ROPE_SCALING_TYPE, @@ -364,6 +376,12 @@ enum llm_kv { LLM_KV_ADAPTER_TYPE, LLM_KV_ADAPTER_LORA_ALPHA, + LLM_KV_POSNET_EMBEDDING_LENGTH, + LLM_KV_POSNET_BLOCK_COUNT, + + LLM_KV_CONVNEXT_EMBEDDING_LENGTH, + LLM_KV_CONVNEXT_BLOCK_COUNT, + // deprecated: LLM_KV_TOKENIZER_PREFIX_ID, LLM_KV_TOKENIZER_SUFFIX_ID, @@ -387,6 +405,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_VOCAB_SIZE, "%s.vocab_size" }, { LLM_KV_CONTEXT_LENGTH, "%s.context_length" }, { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" }, + { LLM_KV_FEATURES_LENGTH, "%s.features_length" }, { LLM_KV_BLOCK_COUNT, "%s.block_count" }, { LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" }, { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" }, @@ -418,6 +437,8 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_EPS, "%s.attention.group_norm_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_GROUPS, "%s.attention.group_norm_groups" }, { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, @@ -426,6 +447,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, + { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" }, { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" }, { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" }, { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" }, @@ -447,6 +469,12 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" }, + { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" }, + { LLM_KV_POSNET_BLOCK_COUNT, "%s.posnet.block_count" }, + + { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" }, + { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" }, + { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, @@ -605,6 +633,22 @@ enum llm_tensor { LLM_TENSOR_ENC_OUTPUT_NORM, LLM_TENSOR_CLS, LLM_TENSOR_CLS_OUT, + LLM_TENSOR_CONV1D, + LLM_TENSOR_CONVNEXT_DW, + LLM_TENSOR_CONVNEXT_NORM, + LLM_TENSOR_CONVNEXT_PW1, + LLM_TENSOR_CONVNEXT_PW2, + LLM_TENSOR_CONVNEXT_GAMMA, + LLM_TENSOR_POS_NET_CONV1, + LLM_TENSOR_POS_NET_CONV2, + LLM_TENSOR_POS_NET_NORM, + LLM_TENSOR_POS_NET_NORM1, + LLM_TENSOR_POS_NET_NORM2, + LLM_TENSOR_POS_NET_ATTN_NORM, + LLM_TENSOR_POS_NET_ATTN_Q, + LLM_TENSOR_POS_NET_ATTN_K, + LLM_TENSOR_POS_NET_ATTN_V, + LLM_TENSOR_POS_NET_ATTN_OUT, }; static const std::map> LLM_TENSOR_NAMES = { @@ -900,6 +944,23 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_QWEN2VL, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_QWEN2MOE, { @@ -1038,6 +1099,8 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, { LLM_TENSOR_OUTPUT, "output" }, { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, @@ -1211,6 +1274,25 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_OLMO2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_OLMOE, { @@ -1269,6 +1351,33 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, }, }, + { + LLM_ARCH_DEEPSEEK, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, { LLM_ARCH_DEEPSEEK2, { @@ -1524,6 +1633,31 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, }, }, + { + LLM_ARCH_WAVTOKENIZER_DEC, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_CONV1D, "conv1d" }, + { LLM_TENSOR_CONVNEXT_DW, "convnext.%d.dw" }, + { LLM_TENSOR_CONVNEXT_NORM, "convnext.%d.norm" }, + { LLM_TENSOR_CONVNEXT_PW1, "convnext.%d.pw1" }, + { LLM_TENSOR_CONVNEXT_PW2, "convnext.%d.pw2" }, + { LLM_TENSOR_CONVNEXT_GAMMA, "convnext.%d.gamma" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_POS_NET_CONV1, "posnet.%d.conv1" }, + { LLM_TENSOR_POS_NET_CONV2, "posnet.%d.conv2" }, + { LLM_TENSOR_POS_NET_NORM, "posnet.%d.norm" }, + { LLM_TENSOR_POS_NET_NORM1, "posnet.%d.norm1" }, + { LLM_TENSOR_POS_NET_NORM2, "posnet.%d.norm2" }, + { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" }, + { LLM_TENSOR_POS_NET_ATTN_Q, "posnet.%d.attn_q" }, + { LLM_TENSOR_POS_NET_ATTN_K, "posnet.%d.attn_k" }, + { LLM_TENSOR_POS_NET_ATTN_V, "posnet.%d.attn_v" }, + { LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" }, + }, + }, { LLM_ARCH_UNKNOWN, { @@ -1532,6 +1666,69 @@ static const std::map> LLM_TENSOR_N }, }; +enum llm_chat_template { + LLM_CHAT_TEMPLATE_CHATML, + LLM_CHAT_TEMPLATE_LLAMA_2, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP, + LLM_CHAT_TEMPLATE_MISTRAL_V1, + LLM_CHAT_TEMPLATE_MISTRAL_V3, + LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, + LLM_CHAT_TEMPLATE_MISTRAL_V7, + LLM_CHAT_TEMPLATE_PHI_3, + LLM_CHAT_TEMPLATE_ZEPHYR, + LLM_CHAT_TEMPLATE_MONARCH, + LLM_CHAT_TEMPLATE_GEMMA, + LLM_CHAT_TEMPLATE_ORION, + LLM_CHAT_TEMPLATE_OPENCHAT, + LLM_CHAT_TEMPLATE_VICUNA, + LLM_CHAT_TEMPLATE_VICUNA_ORCA, + LLM_CHAT_TEMPLATE_DEEPSEEK, + LLM_CHAT_TEMPLATE_DEEPSEEK_2, + LLM_CHAT_TEMPLATE_COMMAND_R, + LLM_CHAT_TEMPLATE_LLAMA_3, + LLM_CHAT_TEMPLATE_CHATGML_3, + LLM_CHAT_TEMPLATE_CHATGML_4, + LLM_CHAT_TEMPLATE_MINICPM, + LLM_CHAT_TEMPLATE_EXAONE_3, + LLM_CHAT_TEMPLATE_RWKV_WORLD, + LLM_CHAT_TEMPLATE_GRANITE, + LLM_CHAT_TEMPLATE_GIGACHAT, + LLM_CHAT_TEMPLATE_UNKNOWN, +}; + +static const std::map LLM_CHAT_TEMPLATES = { + { "chatml", LLM_CHAT_TEMPLATE_CHATML }, + { "llama2", LLM_CHAT_TEMPLATE_LLAMA_2 }, + { "llama2-sys", LLM_CHAT_TEMPLATE_LLAMA_2_SYS }, + { "llama2-sys-bos", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS }, + { "llama2-sys-strip", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP }, + { "mistral-v1", LLM_CHAT_TEMPLATE_MISTRAL_V1 }, + { "mistral-v3", LLM_CHAT_TEMPLATE_MISTRAL_V3 }, + { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, + { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, + { "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, + { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, + { "monarch", LLM_CHAT_TEMPLATE_MONARCH }, + { "gemma", LLM_CHAT_TEMPLATE_GEMMA }, + { "orion", LLM_CHAT_TEMPLATE_ORION }, + { "openchat", LLM_CHAT_TEMPLATE_OPENCHAT }, + { "vicuna", LLM_CHAT_TEMPLATE_VICUNA }, + { "vicuna-orca", LLM_CHAT_TEMPLATE_VICUNA_ORCA }, + { "deepseek", LLM_CHAT_TEMPLATE_DEEPSEEK }, + { "deepseek2", LLM_CHAT_TEMPLATE_DEEPSEEK_2 }, + { "command-r", LLM_CHAT_TEMPLATE_COMMAND_R }, + { "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 }, + { "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 }, + { "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 }, + { "minicpm", LLM_CHAT_TEMPLATE_MINICPM }, + { "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 }, + { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD }, + { "granite", LLM_CHAT_TEMPLATE_GRANITE }, + { "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT }, +}; + static llm_arch llm_arch_from_string(const std::string & name) { for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT if (kv.second == name) { @@ -1605,9 +1802,10 @@ struct LLM_TN { // static const std::map LLAMA_ROPE_SCALING_TYPES = { - { LLAMA_ROPE_SCALING_TYPE_NONE, "none" }, - { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" }, - { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" }, + { LLAMA_ROPE_SCALING_TYPE_NONE, "none" }, + { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" }, + { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" }, + { LLAMA_ROPE_SCALING_TYPE_LONGROPE, "longrope" }, }; static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) { @@ -1713,7 +1911,7 @@ struct llama_file { DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL); if (!bufLen) { - ret = format("Win32 error code: %s", error_code); + ret = format("Win32 error code: %lx", error_code); } else { ret = lpMsgBuf; LocalFree(lpMsgBuf); @@ -2051,7 +2249,7 @@ struct llama_mmap { HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); // may fail on pre-Windows 8 systems - pPrefetchVirtualMemory = reinterpret_cast (GetProcAddress(hKernel32, "PrefetchVirtualMemory")); + pPrefetchVirtualMemory = (decltype(pPrefetchVirtualMemory))(void *) GetProcAddress(hKernel32, "PrefetchVirtualMemory"); if (pPrefetchVirtualMemory) { // advise the kernel to preload the mapped memory @@ -2324,6 +2522,7 @@ enum e_model { MODEL_16B, MODEL_20B, MODEL_30B, + MODEL_32B, MODEL_34B, MODEL_35B, MODEL_40B, @@ -2349,15 +2548,26 @@ static const size_t kiB = 1024; static const size_t MiB = 1024*kiB; static const size_t GiB = 1024*MiB; +struct llama_hparams_posnet { + uint32_t n_embd; + uint32_t n_layer; +}; + +struct llama_hparams_convnext { + uint32_t n_embd; + uint32_t n_layer; +}; + struct llama_hparams { bool vocab_only; bool rope_finetuned; bool use_par_res; bool swin_norm; - uint32_t n_vocab; + uint32_t n_vocab = 0; uint32_t n_ctx_train; // context size the model was trained on uint32_t n_embd; + uint32_t n_embd_features = 0; uint32_t n_layer; uint32_t n_rot; uint32_t n_swa = 0; // sliding window attention (SWA) @@ -2368,6 +2578,10 @@ struct llama_hparams { uint32_t n_vocab_type = 0; // for BERT-style token types uint32_t n_rel_attn_bkts = 0; + // for WavTokenizer + struct llama_hparams_posnet posnet; + struct llama_hparams_convnext convnext; + std::array n_head_arr; std::array n_head_kv_arr; std::array n_ff_arr; @@ -2382,6 +2596,9 @@ struct llama_hparams { float f_norm_eps; float f_norm_rms_eps; + float f_norm_group_eps; + + uint32_t n_norm_groups; float f_attn_logit_softcapping = 50.0f; float f_final_logit_softcapping = 30.0f; @@ -2392,11 +2609,12 @@ struct llama_hparams { uint32_t time_decay_extra_dim = 0; uint32_t wkv_head_size = 0; - float rope_attn_factor = 1.0f; - float rope_freq_base_train; - float rope_freq_scale_train; - uint32_t n_ctx_orig_yarn; - float rope_yarn_log_mul; + float rope_attn_factor = 1.0f; + float rope_freq_base_train; + float rope_freq_scale_train; + uint32_t n_ctx_orig_yarn; + float rope_yarn_log_mul; + int rope_sections[4]; // for State Space Models uint32_t ssm_d_conv = 0; @@ -2426,63 +2644,6 @@ struct llama_hparams { enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; - bool operator!=(const llama_hparams & other) const { - if (this->vocab_only != other.vocab_only) return true; - if (this->n_vocab != other.n_vocab) return true; - if (this->n_ctx_train != other.n_ctx_train) return true; - if (this->n_embd != other.n_embd) return true; - if (this->n_layer != other.n_layer) return true; - if (this->n_rot != other.n_rot) return true; - if (this->n_swa != other.n_swa) return true; - if (this->n_embd_head_k != other.n_embd_head_k) return true; - if (this->n_embd_head_v != other.n_embd_head_v) return true; - if (this->n_expert != other.n_expert) return true; - if (this->n_expert_used != other.n_expert_used) return true; - - if (this->n_head_arr != other.n_head_arr) return true; - if (this->n_head_kv_arr != other.n_head_kv_arr) return true; - if (this->n_ff_arr != other.n_ff_arr) return true; - - if (this->n_rel_attn_bkts != other.n_rel_attn_bkts) return true; - if (this->n_layer_dense_lead != other.n_layer_dense_lead) return true; - if (this->n_lora_q != other.n_lora_q) return true; - if (this->n_lora_kv != other.n_lora_kv) return true; - if (this->n_ff_exp != other.n_ff_exp) return true; - if (this->n_ff_shexp != other.n_ff_shexp) return true; - if (this->n_expert_shared != other.n_expert_shared) return true; - - if (this->rope_finetuned != other.rope_finetuned) return true; - if (this->n_ctx_orig_yarn != other.n_ctx_orig_yarn) return true; - - if (this->ssm_d_conv != other.ssm_d_conv) return true; - if (this->ssm_d_inner != other.ssm_d_inner) return true; - if (this->ssm_d_state != other.ssm_d_state) return true; - if (this->ssm_dt_rank != other.ssm_dt_rank) return true; - if (this->ssm_dt_b_c_rms != other.ssm_dt_b_c_rms) return true; - - if (this->rescale_every_n_layers != other.rescale_every_n_layers) return true; - if (this->time_mix_extra_dim != other.time_mix_extra_dim) return true; - if (this->time_decay_extra_dim != other.time_decay_extra_dim) return true; - if (this->wkv_head_size != other.wkv_head_size) return true; - - if (this->dec_start_token_id != other.dec_start_token_id) return true; - - const float EPSILON = 1e-9f; - - if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true; - if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true; - if (!is_float_close(this->rope_attn_factor, other.rope_attn_factor, EPSILON)) return true; - if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true; - if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true; - if (!is_float_close(this->expert_weights_scale, other.expert_weights_scale, EPSILON)) return true; - if (!is_float_close(this->rope_yarn_log_mul, other.rope_yarn_log_mul, EPSILON)) return true; - if (!is_float_close(this->f_residual_scale, other.f_residual_scale, EPSILON)) return true; - if (!is_float_close(this->f_embedding_scale, other.f_embedding_scale, EPSILON)) return true; - if (!is_float_close(this->f_attention_scale, other.f_attention_scale, EPSILON)) return true; - - return false; - } - uint32_t n_head(uint32_t il = 0) const { if (il < n_layer) { return n_head_arr[il]; @@ -2535,21 +2696,21 @@ struct llama_hparams { if (wkv_head_size != 0) { // for RWKV models return 2 * n_embd; - } else { - // TODO: maybe support other convolution strides than 1 - // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed - return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; } + + // TODO: maybe support other convolution strides than 1 + // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed + return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; } uint32_t n_embd_v_s() const { // dimension of the recurrent state embeddings if (wkv_head_size != 0) { // corresponds to RWKV's wkv_states size return n_embd * wkv_head_size; - } else { - // corresponds to Mamba's ssm_states size - return ssm_d_state * ssm_d_inner; } + + // corresponds to Mamba's ssm_states size + return ssm_d_state * ssm_d_inner; } }; @@ -2587,142 +2748,187 @@ struct llama_cparams { void * cb_eval_user_data; }; -// TODO: separate into "llama_layer_enc" and "llama_layer_dec" -struct llama_layer { - llama_layer() { - // initialize all pointers to NULL - std::memset(this, 0, sizeof(*this)); - } +struct llama_layer_posnet { + // resnet + struct ggml_tensor * norm1 = nullptr; + struct ggml_tensor * norm1_b = nullptr; + + struct ggml_tensor * conv1 = nullptr; + struct ggml_tensor * conv1_b = nullptr; + + struct ggml_tensor * norm2 = nullptr; + struct ggml_tensor * norm2_b = nullptr; + + struct ggml_tensor * conv2 = nullptr; + struct ggml_tensor * conv2_b = nullptr; + // attention + struct ggml_tensor * attn_norm = nullptr; + struct ggml_tensor * attn_norm_b = nullptr; + + struct ggml_tensor * attn_q = nullptr; + struct ggml_tensor * attn_q_b = nullptr; + + struct ggml_tensor * attn_k = nullptr; + struct ggml_tensor * attn_k_b = nullptr; + + struct ggml_tensor * attn_v = nullptr; + struct ggml_tensor * attn_v_b = nullptr; + + struct ggml_tensor * attn_o = nullptr; + struct ggml_tensor * attn_o_b = nullptr; + + // normalize + struct ggml_tensor * norm = nullptr; + struct ggml_tensor * norm_b = nullptr; +}; + +struct llama_layer_convnext { + struct ggml_tensor * dw = nullptr; + struct ggml_tensor * dw_b = nullptr; + + struct ggml_tensor * norm = nullptr; + struct ggml_tensor * norm_b = nullptr; + + struct ggml_tensor * pw1 = nullptr; + struct ggml_tensor * pw1_b = nullptr; + + struct ggml_tensor * pw2 = nullptr; + struct ggml_tensor * pw2_b = nullptr; + + struct ggml_tensor * gamma = nullptr; +}; + +struct llama_layer { // normalization - struct ggml_tensor * attn_norm; - struct ggml_tensor * attn_norm_b; - struct ggml_tensor * attn_norm_2; - struct ggml_tensor * attn_norm_2_b; - struct ggml_tensor * attn_q_norm; - struct ggml_tensor * attn_q_norm_b; - struct ggml_tensor * attn_k_norm; - struct ggml_tensor * attn_k_norm_b; - struct ggml_tensor * attn_out_norm; - struct ggml_tensor * attn_out_norm_b; - struct ggml_tensor * attn_q_a_norm; - struct ggml_tensor * attn_kv_a_norm; - struct ggml_tensor * attn_sub_norm; - struct ggml_tensor * attn_post_norm; - struct ggml_tensor * ffn_sub_norm; - struct ggml_tensor * attn_norm_cross; - struct ggml_tensor * attn_norm_enc; + struct ggml_tensor * attn_norm = nullptr; + struct ggml_tensor * attn_norm_b = nullptr; + struct ggml_tensor * attn_norm_2 = nullptr; + struct ggml_tensor * attn_norm_2_b = nullptr; + struct ggml_tensor * attn_q_norm = nullptr; + struct ggml_tensor * attn_q_norm_b = nullptr; + struct ggml_tensor * attn_k_norm = nullptr; + struct ggml_tensor * attn_k_norm_b = nullptr; + struct ggml_tensor * attn_out_norm = nullptr; + struct ggml_tensor * attn_out_norm_b = nullptr; + struct ggml_tensor * attn_q_a_norm = nullptr; + struct ggml_tensor * attn_kv_a_norm = nullptr; + struct ggml_tensor * attn_sub_norm = nullptr; + struct ggml_tensor * attn_post_norm = nullptr; + struct ggml_tensor * ffn_sub_norm = nullptr; + struct ggml_tensor * attn_norm_cross = nullptr; + struct ggml_tensor * attn_norm_enc = nullptr; // attention - struct ggml_tensor * wq; - struct ggml_tensor * wk; - struct ggml_tensor * wv; - struct ggml_tensor * wo; - struct ggml_tensor * wqkv; - struct ggml_tensor * wq_a; - struct ggml_tensor * wq_b; - struct ggml_tensor * wkv_a_mqa; - struct ggml_tensor * wkv_b; - struct ggml_tensor * wq_cross; - struct ggml_tensor * wk_cross; - struct ggml_tensor * wv_cross; - struct ggml_tensor * wo_cross; - struct ggml_tensor * wq_enc; - struct ggml_tensor * wk_enc; - struct ggml_tensor * wv_enc; - struct ggml_tensor * wo_enc; + struct ggml_tensor * wq = nullptr; + struct ggml_tensor * wk = nullptr; + struct ggml_tensor * wv = nullptr; + struct ggml_tensor * wo = nullptr; + struct ggml_tensor * wqkv = nullptr; + struct ggml_tensor * wq_a = nullptr; + struct ggml_tensor * wq_b = nullptr; + struct ggml_tensor * wkv_a_mqa = nullptr; + struct ggml_tensor * wkv_b = nullptr; + struct ggml_tensor * wq_cross = nullptr; + struct ggml_tensor * wk_cross = nullptr; + struct ggml_tensor * wv_cross = nullptr; + struct ggml_tensor * wo_cross = nullptr; + struct ggml_tensor * wq_enc = nullptr; + struct ggml_tensor * wk_enc = nullptr; + struct ggml_tensor * wv_enc = nullptr; + struct ggml_tensor * wo_enc = nullptr; // attention bias - struct ggml_tensor * bq; - struct ggml_tensor * bk; - struct ggml_tensor * bv; - struct ggml_tensor * bo; - struct ggml_tensor * bqkv; + struct ggml_tensor * bq = nullptr; + struct ggml_tensor * bk = nullptr; + struct ggml_tensor * bv = nullptr; + struct ggml_tensor * bo = nullptr; + struct ggml_tensor * bqkv = nullptr; // relative position bias - struct ggml_tensor * attn_rel_b; - struct ggml_tensor * attn_rel_b_enc; - struct ggml_tensor * attn_rel_b_cross; + struct ggml_tensor * attn_rel_b = nullptr; + struct ggml_tensor * attn_rel_b_enc = nullptr; + struct ggml_tensor * attn_rel_b_cross = nullptr; // normalization - struct ggml_tensor * ffn_norm; - struct ggml_tensor * ffn_norm_b; - struct ggml_tensor * ffn_post_norm; - struct ggml_tensor * layer_out_norm; - struct ggml_tensor * layer_out_norm_b; - struct ggml_tensor * ffn_norm_exps; - struct ggml_tensor * ffn_norm_enc; + struct ggml_tensor * ffn_norm = nullptr; + struct ggml_tensor * ffn_norm_b = nullptr; + struct ggml_tensor * ffn_post_norm = nullptr; + struct ggml_tensor * layer_out_norm = nullptr; + struct ggml_tensor * layer_out_norm_b = nullptr; + struct ggml_tensor * ffn_norm_exps = nullptr; + struct ggml_tensor * ffn_norm_enc = nullptr; // ff - struct ggml_tensor * ffn_gate; // w1 - struct ggml_tensor * ffn_down; // w2 - struct ggml_tensor * ffn_up; // w3 - struct ggml_tensor * ffn_gate_enc; - struct ggml_tensor * ffn_down_enc; - struct ggml_tensor * ffn_up_enc; + struct ggml_tensor * ffn_gate = nullptr; // w1 + struct ggml_tensor * ffn_down = nullptr; // w2 + struct ggml_tensor * ffn_up = nullptr; // w3 + struct ggml_tensor * ffn_gate_enc = nullptr; + struct ggml_tensor * ffn_down_enc = nullptr; + struct ggml_tensor * ffn_up_enc = nullptr; // ff MoE - struct ggml_tensor * ffn_gate_inp; - struct ggml_tensor * ffn_gate_exps; - struct ggml_tensor * ffn_down_exps; - struct ggml_tensor * ffn_up_exps ; + struct ggml_tensor * ffn_gate_inp = nullptr; + struct ggml_tensor * ffn_gate_exps = nullptr; + struct ggml_tensor * ffn_down_exps = nullptr; + struct ggml_tensor * ffn_up_exps = nullptr; // ff shared expert (shexp) - struct ggml_tensor * ffn_gate_inp_shexp; - struct ggml_tensor * ffn_gate_shexp; - struct ggml_tensor * ffn_down_shexp; - struct ggml_tensor * ffn_up_shexp; + struct ggml_tensor * ffn_gate_inp_shexp = nullptr; + struct ggml_tensor * ffn_gate_shexp = nullptr; + struct ggml_tensor * ffn_down_shexp = nullptr; + struct ggml_tensor * ffn_up_shexp = nullptr; // ff bias - struct ggml_tensor * ffn_gate_b; - struct ggml_tensor * ffn_down_b; // b2 - struct ggml_tensor * ffn_up_b; // b3 - struct ggml_tensor * ffn_act; + struct ggml_tensor * ffn_gate_b = nullptr; + struct ggml_tensor * ffn_down_b = nullptr; // b2 + struct ggml_tensor * ffn_up_b = nullptr; // b3 + struct ggml_tensor * ffn_act = nullptr; // mamba proj - struct ggml_tensor * ssm_in; - struct ggml_tensor * ssm_x; - struct ggml_tensor * ssm_dt; - struct ggml_tensor * ssm_out; + struct ggml_tensor * ssm_in = nullptr; + struct ggml_tensor * ssm_x = nullptr; + struct ggml_tensor * ssm_dt = nullptr; + struct ggml_tensor * ssm_out = nullptr; // mamba - struct ggml_tensor * ssm_conv1d; - struct ggml_tensor * ssm_a; - struct ggml_tensor * ssm_d; + struct ggml_tensor * ssm_conv1d = nullptr; + struct ggml_tensor * ssm_a = nullptr; + struct ggml_tensor * ssm_d = nullptr; // mamba bias - struct ggml_tensor * ssm_conv1d_b; - struct ggml_tensor * ssm_dt_b; + struct ggml_tensor * ssm_conv1d_b = nullptr; + struct ggml_tensor * ssm_dt_b = nullptr; // rwkv - struct ggml_tensor * time_mix_w1; - struct ggml_tensor * time_mix_w2; - struct ggml_tensor * time_mix_lerp_x; - struct ggml_tensor * time_mix_lerp_w; - struct ggml_tensor * time_mix_lerp_k; - struct ggml_tensor * time_mix_lerp_v; - struct ggml_tensor * time_mix_lerp_r; - struct ggml_tensor * time_mix_lerp_g; - - struct ggml_tensor * time_mix_first; - struct ggml_tensor * time_mix_decay; - struct ggml_tensor * time_mix_decay_w1; - struct ggml_tensor * time_mix_decay_w2; - struct ggml_tensor * time_mix_key; - struct ggml_tensor * time_mix_value; - struct ggml_tensor * time_mix_receptance; - struct ggml_tensor * time_mix_gate; - - struct ggml_tensor * time_mix_ln; - struct ggml_tensor * time_mix_ln_b; - struct ggml_tensor * time_mix_output; - - struct ggml_tensor * channel_mix_lerp_k; - struct ggml_tensor * channel_mix_lerp_r; - - struct ggml_tensor * channel_mix_key; - struct ggml_tensor * channel_mix_receptance; - struct ggml_tensor * channel_mix_value; + struct ggml_tensor * time_mix_w1 = nullptr; + struct ggml_tensor * time_mix_w2 = nullptr; + struct ggml_tensor * time_mix_lerp_x = nullptr; + struct ggml_tensor * time_mix_lerp_w = nullptr; + struct ggml_tensor * time_mix_lerp_k = nullptr; + struct ggml_tensor * time_mix_lerp_v = nullptr; + struct ggml_tensor * time_mix_lerp_r = nullptr; + struct ggml_tensor * time_mix_lerp_g = nullptr; + + struct ggml_tensor * time_mix_first = nullptr; + struct ggml_tensor * time_mix_decay = nullptr; + struct ggml_tensor * time_mix_decay_w1 = nullptr; + struct ggml_tensor * time_mix_decay_w2 = nullptr; + struct ggml_tensor * time_mix_key = nullptr; + struct ggml_tensor * time_mix_value = nullptr; + struct ggml_tensor * time_mix_receptance = nullptr; + struct ggml_tensor * time_mix_gate = nullptr; + + struct ggml_tensor * time_mix_ln = nullptr; + struct ggml_tensor * time_mix_ln_b = nullptr; + struct ggml_tensor * time_mix_output = nullptr; + + struct ggml_tensor * channel_mix_lerp_k = nullptr; + struct ggml_tensor * channel_mix_lerp_r = nullptr; + + struct ggml_tensor * channel_mix_key = nullptr; + struct ggml_tensor * channel_mix_receptance = nullptr; + struct ggml_tensor * channel_mix_value = nullptr; // long rope factors struct ggml_tensor * rope_long = nullptr; @@ -2730,13 +2936,17 @@ struct llama_layer { struct ggml_tensor * rope_freqs = nullptr; // bitnet scale - struct ggml_tensor * wq_scale; - struct ggml_tensor * wk_scale; - struct ggml_tensor * wv_scale; - struct ggml_tensor * wo_scale; - struct ggml_tensor * ffn_gate_scale; - struct ggml_tensor * ffn_up_scale; - struct ggml_tensor * ffn_down_scale; + struct ggml_tensor * wq_scale = nullptr; + struct ggml_tensor * wk_scale = nullptr; + struct ggml_tensor * wv_scale = nullptr; + struct ggml_tensor * wo_scale = nullptr; + struct ggml_tensor * ffn_gate_scale = nullptr; + struct ggml_tensor * ffn_up_scale = nullptr; + struct ggml_tensor * ffn_down_scale = nullptr; + + struct llama_layer_posnet posnet; + + struct llama_layer_convnext convnext; }; // very similar to llama_batch, @@ -2867,6 +3077,9 @@ struct llama_model { struct ggml_tensor * cls_out = nullptr; struct ggml_tensor * cls_out_b = nullptr; + struct ggml_tensor * conv1d = nullptr; + struct ggml_tensor * conv1d_b = nullptr; + std::vector layers; // gguf metadata @@ -2951,6 +3164,7 @@ struct llama_sbatch { // batch indices of the output std::vector out_ids; std::vector seq; + const llama_batch * batch = nullptr; // buffers for the ubatch @@ -3296,6 +3510,11 @@ struct llama_context { // whether we are computing encoder output or decoder output bool is_encoding = false; + // TODO: find a better way to accommodate mutli-dimension position encoding methods + // number of position id each token get, 1 for each token in most cases. + // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate. + int n_pos_per_token = 1; + // output of the encoder part of the encoder-decoder models std::vector embd_enc; std::vector> seq_ids_enc; @@ -3366,6 +3585,17 @@ static int llama_get_device_count(const llama_model & model) { return (int) model.devices.size(); } +static struct ggml_tensor * llama_get_model_tensor(const struct llama_model * model, const char * name) { + auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(), + [name](const std::pair & it) { + return it.first == name; + }); + if (it == model->tensors_by_name.end()) { + return nullptr; + } + return it->second; +} + template static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) { ggml_init_params params = { @@ -3419,7 +3649,9 @@ static bool llama_kv_cache_init( const struct llama_hparams & hparams = model.hparams; - const int64_t n_layer = hparams.n_layer; + const int32_t n_layer = hparams.n_layer; + + LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d\n", __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer); cache.has_shift = false; @@ -3460,10 +3692,12 @@ static bool llama_kv_cache_init( cache.k_l.reserve(n_layer); cache.v_l.reserve(n_layer); - for (int i = 0; i < (int) n_layer; i++) { + for (int i = 0; i < n_layer; i++) { const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); + LLAMA_LOG_DEBUG("%s: layer %d: n_embd_k_gqa = %d, n_embd_v_gqa = %d\n", __func__, i, n_embd_k_gqa, n_embd_v_gqa); + ggml_backend_buffer_type_t buft; if (offload) { auto * dev = model.dev_layer.at(i).dev; @@ -4496,9 +4730,6 @@ struct llama_model_loader { case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break; case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break; case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break; - case GGML_TYPE_Q4_0_4_4: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_4; break; - case GGML_TYPE_Q4_0_4_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_8; break; - case GGML_TYPE_Q4_0_8_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_8_8; break; default: { LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max)); @@ -4849,7 +5080,9 @@ struct llama_model_loader { mappings.reserve(files.size()); mmaps_used.reserve(files.size()); for (const auto & file : files) { - std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, ggml_is_numa())); + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); + auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); + std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn())); mmaps_used.emplace_back(mapping->size, 0); if (mlock_mmaps) { std::unique_ptr mlock_mmap(new llama_mlock()); @@ -5260,9 +5493,6 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw"; case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw"; case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw"; - case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4"; - case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8"; - case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: return "Q4_0_8_8"; default: return "unknown, may not work"; } @@ -5311,6 +5541,7 @@ static const char * llama_model_type_name(e_model type) { case MODEL_16B: return "16B"; case MODEL_20B: return "20B"; case MODEL_30B: return "30B"; + case MODEL_32B: return "32B"; case MODEL_34B: return "34B"; case MODEL_35B: return "35B"; case MODEL_40B: return "40B"; @@ -5379,7 +5610,7 @@ static void llm_load_hparams( ml.get_key(LLM_KV_GENERAL_NAME, model.name, false); // get hparams kv - ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab); + ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false); // everything past this point is not vocab-related if (hparams.vocab_only) { @@ -5392,6 +5623,16 @@ static void llm_load_hparams( ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false); ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false); + if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) { + ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features); + + ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd); + ml.get_key(LLM_KV_POSNET_BLOCK_COUNT, hparams.posnet.n_layer); + + ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd); + ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT, hparams.convnext.n_layer); + } + GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS); GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert); if (hparams.n_expert > 0) { @@ -5400,13 +5641,13 @@ static void llm_load_hparams( GGML_ASSERT(hparams.n_expert_used == 0); } - // zero-out the per-layer hparams + // zero-out the array hparams std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0); std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0); - ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer); - ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer); + ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false); + ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false); // n_head_kv is optional, default to n_head hparams.n_head_kv_arr = hparams.n_head_arr; @@ -5498,8 +5739,12 @@ static void llm_load_hparams( case LLM_ARCH_MINICPM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); switch (hparams.n_layer) { + case 52: model.type = e_model::MODEL_1B; break; case 40: model.type = e_model::MODEL_2B; break; default: model.type = e_model::MODEL_UNKNOWN; } @@ -5664,6 +5909,13 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_QWEN2VL: + { + std::array section_dims; + ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, section_dims, 4, true); + std::copy(section_dims.begin(), section_dims.begin() + 4, std::begin(hparams.rope_sections)); + } + // fall through case LLM_ARCH_QWEN2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -5671,7 +5923,10 @@ static void llm_load_hparams( case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break; case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break; case 32: model.type = e_model::MODEL_7B; break; + case 36: model.type = e_model::MODEL_3B; break; case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break; + case 48: model.type = e_model::MODEL_14B; break; + case 64: model.type = e_model::MODEL_32B; break; case 80: model.type = e_model::MODEL_70B; break; default: model.type = e_model::MODEL_UNKNOWN; } @@ -5881,6 +6136,17 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_OLMO2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; case LLM_ARCH_OLMOE: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -5960,6 +6226,19 @@ static void llm_load_hparams( model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_DEEPSEEK: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + + switch (hparams.n_layer) { + case 28: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; case LLM_ARCH_DEEPSEEK2: { bool is_lite = (hparams.n_layer == 27); @@ -6113,6 +6392,13 @@ static void llm_load_hparams( default: model.type = e_model::MODEL_UNKNOWN; } } break; + case LLM_ARCH_WAVTOKENIZER_DEC: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS, hparams.f_norm_group_eps); + ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + } break; default: (void)0; } @@ -6142,7 +6428,7 @@ static void llm_load_vocab( ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model); ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false); - if (tokenizer_model == "no_vocab") { + if (tokenizer_model == "no_vocab" || tokenizer_model == "none") { vocab.type = LLAMA_VOCAB_TYPE_NONE; // default special tokens @@ -6306,10 +6592,12 @@ static void llm_load_vocab( tokenizer_pre == "phi-2" || tokenizer_pre == "jina-es" || tokenizer_pre == "jina-de" || + tokenizer_pre == "gigachat" || tokenizer_pre == "jina-v1-en" || tokenizer_pre == "jina-v2-es" || tokenizer_pre == "jina-v2-de" || - tokenizer_pre == "jina-v2-code") { + tokenizer_pre == "jina-v2-code" || + tokenizer_pre == "roberta-bpe") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; } else if ( tokenizer_pre == "refact") { @@ -6376,6 +6664,9 @@ static void llm_load_vocab( vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON; vocab.tokenizer_add_bos = true; vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "minerva-7b") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -6954,6 +7245,13 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) { LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len); + if (model.arch == LLM_ARCH_DEEPSEEK) { + LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead); + LLAMA_LOG_INFO("%s: n_ff_exp = %d\n", __func__, hparams.n_ff_exp); + LLAMA_LOG_INFO("%s: n_expert_shared = %d\n", __func__, hparams.n_expert_shared); + LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n", __func__, hparams.expert_weights_scale); + } + if (model.arch == LLM_ARCH_DEEPSEEK2) { LLAMA_LOG_INFO("%s: n_layer_dense_lead = %d\n", __func__, hparams.n_layer_dense_lead); LLAMA_LOG_INFO("%s: n_lora_q = %d\n", __func__, hparams.n_lora_q); @@ -6969,7 +7267,7 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) { LLAMA_LOG_INFO("%s: n_ff_shexp = %d\n", __func__, hparams.n_ff_shexp); } - if (model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) { + if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) { LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale); LLAMA_LOG_INFO("%s: f_residual_scale = %f\n", __func__, hparams.f_residual_scale); LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale); @@ -7110,6 +7408,22 @@ static const std::map llm_tensor_info_mapping = { {LLM_TENSOR_FFN_UP_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, // this tensor is loaded for T5, but never used {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}}, + {LLM_TENSOR_CONV1D, {LLM_TENSOR_LAYER_INPUT, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_NORM1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_NORM2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_CONV1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_CONV2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_DW, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_CONVNEXT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CONVNEXT_PW1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_PW2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, }; // checks if the weight tensor can be used with the specified buffer type and device @@ -7153,12 +7467,12 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w } break; case GGML_OP_ADD: { - ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, w->ne[0], 512); + ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); op_tensor = ggml_add(ctx, a, w); } break; case GGML_OP_MUL: { - ggml_tensor * a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, w->ne[0], 512); + ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); op_tensor = ggml_mul(ctx, a, w); } break; case GGML_OP_DIV: @@ -7214,6 +7528,12 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w ggml_tensor * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H); op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state); } break; + case GGML_OP_IM2COL: + { + const int n_embd = hparams.n_embd; + ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1); + op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16); + } break; default: GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name); } @@ -7344,7 +7664,8 @@ static bool llm_load_tensors( model.main_gpu = main_gpu; model.n_gpu_layers = n_gpu_layers; - const int n_layer = hparams.n_layer; + const int n_layer = hparams.n_layer; + bool use_mmap_buffer = true; // build a list of buffer types for the CPU and GPU devices @@ -7599,7 +7920,13 @@ static bool llm_load_tensors( layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); - layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) { + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + } + else { + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + } if (n_expert == 0) { layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); @@ -8066,6 +8393,7 @@ static bool llm_load_tensors( } } break; case LLM_ARCH_QWEN2: + case LLM_ARCH_QWEN2VL: { model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -8568,6 +8896,31 @@ static bool llm_load_tensors( layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); } } break; + case LLM_ARCH_OLMO2: + { + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0); + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); + } + } break; case LLM_ARCH_OLMOE: { model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -8701,6 +9054,55 @@ static bool llm_load_tensors( layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); } } break; + case LLM_ARCH_DEEPSEEK: + { + + const int64_t n_ff_exp = hparams.n_ff_exp; + const int64_t n_expert_shared = hparams.n_expert_shared; + + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (i < (int) hparams.n_layer_dense_lead) { + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } else { + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + + if (n_expert == 0) { + throw std::runtime_error("n_expert must be > 0"); + } + if (n_expert_used == 0) { + throw std::runtime_error("n_expert_used must be > 0"); + } + + // MoE branch + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + + // Shared expert branch + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), { n_ff_exp * n_expert_shared, n_embd}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0); + } + } + } break; case LLM_ARCH_DEEPSEEK2: { const bool is_lite = (hparams.n_layer == 27); @@ -9071,9 +9473,9 @@ static bool llm_load_tensors( } break; case LLM_ARCH_CHAMELEON: { - model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - // output + // output model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED); // if output is NULL, init from the input tok embed @@ -9102,13 +9504,116 @@ static bool llm_load_tensors( layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); } } break; - default: - throw std::runtime_error("unknown architecture"); - } + case LLM_ARCH_WAVTOKENIZER_DEC: + { + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0); - if (n_moved_tensors > 0) { - LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n", - __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1, + model.conv1d = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0); + model.conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"), {1, hparams.posnet.n_embd}, 0); + + // posnet + { + const int64_t n_embd = hparams.posnet.n_embd; + + for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) { + auto & layer = model.layers[i].posnet; + + // posnet: + // + // - resnet + // - resnet + // - attn + // - resnet + // - resnet + // - norm + // + switch (i) { + case 0: + case 1: + case 3: + case 4: + { + layer.norm1 = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0); + layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias", i), {1, n_embd}, 0); + + layer.conv1 = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0); + layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias", i), {1, n_embd}, 0); + + layer.norm2 = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0); + layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias", i), {1, n_embd}, 0); + + layer.conv2 = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0); + layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias", i), {1, n_embd}, 0); + } break; + case 2: + { + layer.attn_norm = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias", i), {1, n_embd}, 0); + + layer.attn_q = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q, "weight", i), {1, n_embd, n_embd}, 0); + layer.attn_q_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q, "bias", i), {1, n_embd}, 0); + + layer.attn_k = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K, "weight", i), {1, n_embd, n_embd}, 0); + layer.attn_k_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K, "bias", i), {1, n_embd}, 0); + + layer.attn_v = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V, "weight", i), {1, n_embd, n_embd}, 0); + layer.attn_v_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V, "bias", i), {1, n_embd}, 0); + + layer.attn_o = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT, "weight", i), {1, n_embd, n_embd}, 0); + layer.attn_o_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT, "bias", i), {1, n_embd}, 0); + } break; + case 5: + { + layer.norm = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0); + layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias", i), {1, n_embd}, 0); + } break; + default: GGML_ABORT("unknown posnet layer"); + }; + } + } + + GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd); + + model.tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0); + model.tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {hparams.posnet.n_embd}, 0); + + // convnext + { + const int64_t n_embd = hparams.convnext.n_embd; + + for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) { + auto & layer = model.layers[i].convnext; + + layer.dw = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW, "weight", i), {7, 1, n_embd}, 0); + layer.dw_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW, "bias", i), {1, n_embd}, 0); + + layer.norm = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM, "weight", i), {n_embd}, 0); + layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM, "bias", i), {n_embd}, 0); + + layer.pw1 = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1, "weight", i), {n_embd, n_ff}, 0); + layer.pw1_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1, "bias", i), {n_ff}, 0); + + layer.pw2 = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2, "weight", i), {n_ff, n_embd}, 0); + layer.pw2_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2, "bias", i), {n_embd}, 0); + + layer.gamma = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0); + } + + // output + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + } + + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0); + model.output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_embd}, 0); + } break; + default: + throw std::runtime_error("unknown architecture"); + } + + if (n_moved_tensors > 0) { + LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n", + __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1, ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft)); } } @@ -9142,7 +9647,7 @@ static bool llm_load_tensors( ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft); if (!dev) { // FIXME: workaround for CPU backend buft having a NULL device - dev = ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0); + dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); } ggml_backend_dev_props props; ggml_backend_dev_get_props(dev, &props); @@ -9321,6 +9826,7 @@ enum llm_ffn_gate_type { enum llm_norm_type { LLM_NORM, LLM_NORM_RMS, + LLM_NORM_GROUP, }; static struct ggml_tensor * llm_build_inp_embd( @@ -9341,7 +9847,7 @@ static struct ggml_tensor * llm_build_inp_embd( inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens); } else { - lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens); + lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens); inpL = lctx.inp_embd; ggml_set_input(lctx.inp_embd); } @@ -9462,8 +9968,14 @@ static struct ggml_tensor * llm_build_norm( const llm_build_cb & cb, int il) { switch (type) { - case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break; - case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break; + case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break; + case LLM_NORM_RMS: cur = ggml_rms_norm (ctx, cur, hparams.f_norm_rms_eps); break; + case LLM_NORM_GROUP: + { + cur = ggml_reshape_3d(ctx, cur, cur->ne[0], 1, cur->ne[1]); + cur = ggml_group_norm(ctx, cur, hparams.n_norm_groups, hparams.f_norm_group_eps); + cur = ggml_reshape_2d(ctx, cur, cur->ne[0], cur->ne[2]); + } break; } if (mw || mb) { @@ -12430,6 +12942,124 @@ struct llm_build_context { return gf; } + struct ggml_cgraph * build_qwen2vl() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + // inp_pos - contains the positions + lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens * 4); + cb(lctx.inp_pos, "inp_pos", -1); + ggml_set_input(lctx.inp_pos); + struct ggml_tensor * inp_pos = lctx.inp_pos; + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); + int sections[4]; + std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections); + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + + Qcur = ggml_rope_multi( + ctx0, + ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, + n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_multi( + ctx0, + ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, + n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = llm_build_kv(ctx0, lctx, kv_self, gf, + model.layers[il].wo, model.layers[il].bo, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, lctx, cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cur = lctx.cvec.apply_to(ctx0, cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } + struct ggml_cgraph * build_qwen2moe() { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); @@ -12713,7 +13343,13 @@ struct llm_build_context { struct ggml_tensor * inp_pos = build_inp_pos(); // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa(); + struct ggml_tensor * KQ_mask = nullptr; + if (hparams.n_swa == 0) { + // Phi-4 doesn't use sliding window attention + KQ_mask = build_inp_KQ_mask(); + } else { + KQ_mask = build_inp_KQ_mask_swa(); + } for (int il = 0; il < n_layer; ++il) { auto residual = inpL; @@ -12771,7 +13407,7 @@ struct llm_build_context { cur = llm_build_kv(ctx0, lctx, kv_self, gf, model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il); + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); } if (il == n_layer - 1) { @@ -13381,21 +14017,18 @@ struct llm_build_context { return gf; } - // ref: https://arxiv.org/abs/2203.03466 - // https://github.com/ggerganov/llama.cpp/issues/5276#issuecomment-1925774738 - // based on the original build_llama() function - struct ggml_cgraph * build_minicpm() { + struct ggml_cgraph * build_minicpm3() { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); - const int64_t n_embd_head = hparams.n_embd_head_v; - GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); - GGML_ASSERT(n_embd_head == hparams.n_rot); - - const int64_t n_embd = hparams.n_embd; //TODO: if the model varies, these parameters need to be read from the model const int64_t n_embd_base = 256; const float scale_embd = 12.0f; const float scale_depth = 1.4f; + const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k)); + + const uint32_t n_embd_head_qk_rope = hparams.n_rot; + const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; + const uint32_t kv_lora_rank = hparams.n_lora_kv; struct ggml_tensor * cur; struct ggml_tensor * inpL; @@ -13415,209 +14048,65 @@ struct llm_build_context { for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; + struct ggml_tensor * rope_factors = build_rope_factors(il); // norm cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, cb, il); cb(cur, "attn_norm", il); - // self-attention + // self_attention { - // compute Q and K and RoPE them - struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); - cb(Qcur, "Qcur", il); - if (model.layers[il].bq) { - Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); - cb(Qcur, "Qcur", il); - } - - struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); - cb(Kcur, "Kcur", il); - if (model.layers[il].bk) { - Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); - cb(Kcur, "Kcur", il); - } + struct ggml_tensor * q = NULL; + // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens} + q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); + cb(q, "q", il); - struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); - cb(Vcur, "Vcur", il); - if (model.layers[il].bv) { - Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); - cb(Vcur, "Vcur", il); - } + q = llm_build_norm(ctx0, q, hparams, + model.layers[il].attn_q_a_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(q, "q", il); - Qcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Qcur, "Qcur", il); + // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens} + q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); + cb(q, "q", il); - Kcur = ggml_rope_ext( - ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr, - n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, - ext_factor, attn_factor, beta_fast, beta_slow - ); - cb(Kcur, "Kcur", il); + // split into {n_head * n_embd_head_qk_nope, n_tokens} + struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, + ggml_row_size(q->type, hparams.n_embd_head_k), + ggml_row_size(q->type, hparams.n_embd_head_k * n_head), + 0); + cb(q_nope, "q_nope", il); - cur = llm_build_kv(ctx0, lctx, kv_self, gf, - model.layers[il].wo, model.layers[il].bo, - Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); - } + // and {n_head * n_embd_head_qk_rope, n_tokens} + struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, + ggml_row_size(q->type, hparams.n_embd_head_k), + ggml_row_size(q->type, hparams.n_embd_head_k * n_head), + ggml_row_size(q->type, n_embd_head_qk_nope)); + cb(q_pe, "q_pe", il); - if (il == n_layer - 1) { - // skip computing output for unused tokens - struct ggml_tensor * inp_out_ids = build_inp_out_ids(); - cur = ggml_get_rows(ctx0, cur, inp_out_ids); - inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); - } + // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens} + struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); + cb(kv_pe_compresseed, "kv_pe_compresseed", il); - // scale_res - scale the hidden states for residual connection - const float scale_res = scale_depth/sqrtf(float(n_layer)); - cur = ggml_scale(ctx0, cur, scale_res); - cb(cur, "hidden_scaled", -1); + // split into {kv_lora_rank, n_tokens} + struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens, + kv_pe_compresseed->nb[1], + 0); + cb(kv_compressed, "kv_compressed", il); - struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); - cb(ffn_inp, "ffn_inp", il); + // and {n_embd_head_qk_rope, n_tokens} + struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens, + kv_pe_compresseed->nb[1], + kv_pe_compresseed->nb[1], + ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); + cb(k_pe, "k_pe", il); - // feed-forward network - { - cur = llm_build_norm(ctx0, ffn_inp, hparams, - model.layers[il].ffn_norm, NULL, + kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous norm + kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams, + model.layers[il].attn_kv_a_norm, NULL, LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm", il); - - cur = llm_build_ffn(ctx0, lctx, cur, - model.layers[il].ffn_up, NULL, NULL, - model.layers[il].ffn_gate, NULL, NULL, - model.layers[il].ffn_down, NULL, NULL, - NULL, - LLM_FFN_SILU, LLM_FFN_PAR, cb, il); - cb(cur, "ffn_out", il); - } - - // scale the hidden states for residual connection - cur = ggml_scale(ctx0, cur, scale_res); - cb(cur, "hidden_scaled_ffn", -1); - - cur = ggml_add(ctx0, cur, ffn_inp); - cur = lctx.cvec.apply_to(ctx0, cur, il); - cb(cur, "l_out", il); - - // input for next layer - inpL = cur; - } - - cur = inpL; - - cur = llm_build_norm(ctx0, cur, hparams, - model.output_norm, NULL, - LLM_NORM_RMS, cb, -1); - cb(cur, "result_norm", -1); - - // lm_head scaling - const float scale_lmhead = float(n_embd_base)/float(n_embd); - cur = ggml_scale(ctx0, cur, scale_lmhead); - cb(cur, "lmhead_scaling", -1); - - // lm_head - cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); - cb(cur, "result_output", -1); - - ggml_build_forward_expand(gf, cur); - - return gf; - } - - struct ggml_cgraph * build_minicpm3() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); - - //TODO: if the model varies, these parameters need to be read from the model - const int64_t n_embd_base = 256; - const float scale_embd = 12.0f; - const float scale_depth = 1.4f; - const float kq_scale = 1.0f / sqrtf(float(hparams.n_embd_head_k)); - - const uint32_t n_embd_head_qk_rope = hparams.n_rot; - const uint32_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; - const uint32_t kv_lora_rank = hparams.n_lora_kv; - - struct ggml_tensor * cur; - struct ggml_tensor * inpL; - - inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); - - // scale the input embeddings - inpL = ggml_scale(ctx0, inpL, scale_embd); - cb(inpL, "inp_scaled", -1); - - // inp_pos - contains the positions - struct ggml_tensor * inp_pos = build_inp_pos(); - - // KQ_mask (mask for 1 head, it will be broadcasted to all heads) - struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); - - for (int il = 0; il < n_layer; ++il) { - struct ggml_tensor * inpSA = inpL; - - struct ggml_tensor * rope_factors = build_rope_factors(il); - // norm - cur = llm_build_norm(ctx0, inpL, hparams, - model.layers[il].attn_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(cur, "attn_norm", il); - - // self_attention - { - struct ggml_tensor * q = NULL; - // {n_embd, q_lora_rank} * {n_embd, n_tokens} -> {q_lora_rank, n_tokens} - q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur); - cb(q, "q", il); - - q = llm_build_norm(ctx0, q, hparams, - model.layers[il].attn_q_a_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(q, "q", il); - - // {q_lora_rank, n_head * hparams.n_embd_head_k} * {q_lora_rank, n_tokens} -> {n_head * hparams.n_embd_head_k, n_tokens} - q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q); - cb(q, "q", il); - - // split into {n_head * n_embd_head_qk_nope, n_tokens} - struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, - ggml_row_size(q->type, hparams.n_embd_head_k), - ggml_row_size(q->type, hparams.n_embd_head_k * n_head), - 0); - cb(q_nope, "q_nope", il); - - // and {n_head * n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, - ggml_row_size(q->type, hparams.n_embd_head_k), - ggml_row_size(q->type, hparams.n_embd_head_k * n_head), - ggml_row_size(q->type, n_embd_head_qk_nope)); - cb(q_pe, "q_pe", il); - - // {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur); - cb(kv_pe_compresseed, "kv_pe_compresseed", il); - - // split into {kv_lora_rank, n_tokens} - struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens, - kv_pe_compresseed->nb[1], - 0); - cb(kv_compressed, "kv_compressed", il); - - // and {n_embd_head_qk_rope, n_tokens} - struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens, - kv_pe_compresseed->nb[1], - kv_pe_compresseed->nb[1], - ggml_row_size(kv_pe_compresseed->type, kv_lora_rank)); - cb(k_pe, "k_pe", il); - - kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous norm - kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams, - model.layers[il].attn_kv_a_norm, NULL, - LLM_NORM_RMS, cb, il); - cb(kv_compressed, "kv_compressed", il); + cb(kv_compressed, "kv_compressed", il); // {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens} struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed); @@ -14433,6 +14922,130 @@ struct llm_build_context { return gf; } + struct ggml_cgraph * build_olmo2() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); + + // mutable variable, needed during the last layer of the computation to skip unused tokens + int32_t n_tokens = this->n_tokens; + + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = build_inp_pos(); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); + + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + cur = inpL; + + // self_attention + { + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + + struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + + struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + + Qcur = llm_build_norm(ctx0, Qcur, hparams, model.layers[il].attn_q_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(Qcur, "Qcur_normed", il); + + Kcur = llm_build_norm(ctx0, Kcur, hparams, model.layers[il].attn_k_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(Kcur, "Kcur_normed", il); + + Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); + Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); + + Qcur = ggml_rope_ext( + ctx0, Qcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur_rope", il); + + Kcur = ggml_rope_ext( + ctx0, Kcur, inp_pos, nullptr, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur_rope", il); + + cur = llm_build_kv(ctx0, lctx, kv_self, gf, + model.layers[il].wo, NULL, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il); + } + + cur = llm_build_norm(ctx0, cur, hparams, + model.layers[il].attn_post_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_post_norm", il); + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + n_tokens = n_outputs; + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = llm_build_ffn(ctx0, lctx, ffn_inp, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + + cur = llm_build_norm(ctx0, cur, hparams, + model.layers[il].ffn_post_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "ffn_post_norm", -1); + + cur = ggml_add(ctx0, cur, ffn_inp); + cb(cur, "ffn_out", il); + + cur = lctx.cvec.apply_to(ctx0, cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } + // based on the build_qwen2moe() function, changes: // * removed shared experts // * removed bias @@ -14914,29 +15527,183 @@ struct llm_build_context { LLM_FFN_SILU, LLM_FFN_PAR, cb, il); cb(cur, "ffn_out", il); - struct ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp); - cb(ffn_out, "ffn_out", il); + struct ggml_tensor * ffn_out = ggml_add(ctx0, cur, ffn_inp); + cb(ffn_out, "ffn_out", il); + + // MoE + cur = llm_build_norm(ctx0, inpSA, hparams, + model.layers[il].ffn_norm_exps, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm_exps", il); + + cur = llm_build_moe_ffn(ctx0, lctx, cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + cb, il); + cb(cur, "ffn_moe_out", il); + + cur = ggml_add(ctx0, cur, ffn_out); + cb(cur, "ffn_out", il); + + cur = lctx.cvec.apply_to(ctx0, cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, NULL, + LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + // lm_head + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } + + struct ggml_cgraph * build_deepseek() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); + + // mutable variable, needed during the last layer of the computation to skip unused tokens + int32_t n_tokens = this->n_tokens; + + const int64_t n_embd_head = hparams.n_embd_head_v; + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); + GGML_ASSERT(n_embd_head == hparams.n_rot); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + // inp_pos - contains the positions + struct ggml_tensor * inp_pos = build_inp_pos(); + + // KQ_mask (mask for 1 head, it will be broadcasted to all heads) + struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); + const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale; + for (int il = 0; il < n_layer; ++il) { + struct ggml_tensor * inpSA = inpL; + + // norm + cur = llm_build_norm(ctx0, inpL, hparams, + model.layers[il].attn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "attn_norm", il); + + // self-attention + { + // rope freq factors for llama3; may return nullptr for llama2 and other models + struct ggml_tensor * rope_factors = build_rope_factors(il); + + // compute Q and K and RoPE them + struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); + cb(Qcur, "Qcur", il); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + cb(Qcur, "Qcur", il); + } + + struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); + cb(Kcur, "Kcur", il); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + cb(Kcur, "Kcur", il); + } + + struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); + cb(Vcur, "Vcur", il); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + cb(Vcur, "Vcur", il); + } + + Qcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Qcur, "Qcur", il); + + Kcur = ggml_rope_ext( + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors, + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, + ext_factor, attn_factor, beta_fast, beta_slow + ); + cb(Kcur, "Kcur", il); + + cur = llm_build_kv(ctx0, lctx, kv_self, gf, + model.layers[il].wo, model.layers[il].bo, + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il); + } + + if (il == n_layer - 1) { + // skip computing output for unused tokens + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + n_tokens = n_outputs; + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); + } + - // MoE - cur = llm_build_norm(ctx0, inpSA, hparams, - model.layers[il].ffn_norm_exps, NULL, + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); + cb(ffn_inp, "ffn_inp", il); + + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, cb, il); - cb(cur, "ffn_norm_exps", il); + cb(cur, "ffn_norm", il); - cur = llm_build_moe_ffn(ctx0, lctx, cur, - model.layers[il].ffn_gate_inp, - model.layers[il].ffn_up_exps, - model.layers[il].ffn_gate_exps, - model.layers[il].ffn_down_exps, - n_expert, n_expert_used, - LLM_FFN_SILU, true, - false, 0.0, - cb, il); - cb(cur, "ffn_moe_out", il); + if ((uint32_t) il < hparams.n_layer_dense_lead) { + cur = llm_build_ffn(ctx0, lctx, cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + } else { + // MoE branch + ggml_tensor * moe_out = + llm_build_moe_ffn(ctx0, lctx, cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + n_expert, n_expert_used, + LLM_FFN_SILU, false, + false, hparams.expert_weights_scale, + cb, il); + cb(moe_out, "ffn_moe_out", il); - cur = ggml_add(ctx0, cur, ffn_out); - cb(cur, "ffn_out", il); + // FFN shared expert + { + ggml_tensor * ffn_shexp = llm_build_ffn(ctx0, lctx, cur, + model.layers[il].ffn_up_shexp, NULL, NULL, + model.layers[il].ffn_gate_shexp, NULL, NULL, + model.layers[il].ffn_down_shexp, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(ffn_shexp, "ffn_shexp", il); + + cur = ggml_add(ctx0, moe_out, ffn_shexp); + cb(cur, "ffn_out", il); + } + } + cur = ggml_add(ctx0, cur, ffn_inp); cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); @@ -14953,6 +15720,7 @@ struct llm_build_context { // lm_head cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); @@ -15339,7 +16107,7 @@ struct llm_build_context { return gf; } - struct ggml_cgraph * build_t5_encoder() { + struct ggml_cgraph * build_t5_enc() { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); // mutable variable, needed during the last layer of the computation to skip unused tokens @@ -15471,7 +16239,7 @@ struct llm_build_context { return gf; } - struct ggml_cgraph * build_t5_decoder() { + struct ggml_cgraph * build_t5_dec() { struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); // mutable variable, needed during the last layer of the computation to skip unused tokens @@ -16420,6 +17188,158 @@ struct llm_build_context { return gf; } + + struct ggml_cgraph * build_wavtokenizer_dec() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); + + cur = ggml_conv_1d_ph(ctx0, model.conv1d, cur, 1, 1); + cur = ggml_add(ctx0, cur, model.conv1d_b); + + // posnet + for (uint32_t il = 0; il < hparams.posnet.n_layer; ++il) { + const auto & layer = model.layers[il].posnet; + + inpL = cur; + + switch (il) { + case 0: + case 1: + case 3: + case 4: + { + cur = llm_build_norm(ctx0, cur, hparams, + layer.norm1, + layer.norm1_b, + LLM_NORM_GROUP, cb, 0); + + cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur); + + cur = ggml_conv_1d_ph(ctx0, layer.conv1, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.conv1_b); + + cur = llm_build_norm(ctx0, cur, hparams, + layer.norm2, + layer.norm2_b, + LLM_NORM_GROUP, cb, 0); + + cur = ggml_mul(ctx0, ggml_sigmoid(ctx0, cur), cur); + + cur = ggml_conv_1d_ph(ctx0, layer.conv2, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.conv2_b); + + cur = ggml_add(ctx0, cur, inpL); + } break; + case 2: + { + cur = llm_build_norm(ctx0, cur, hparams, + layer.attn_norm, + layer.attn_norm_b, + LLM_NORM_GROUP, cb, 0); + + struct ggml_tensor * q; + struct ggml_tensor * k; + struct ggml_tensor * v; + + q = ggml_conv_1d_ph(ctx0, layer.attn_q, cur, 1, 1); + k = ggml_conv_1d_ph(ctx0, layer.attn_k, cur, 1, 1); + v = ggml_conv_1d_ph(ctx0, layer.attn_v, cur, 1, 1); + + q = ggml_add(ctx0, q, layer.attn_q_b); + k = ggml_add(ctx0, k, layer.attn_k_b); + v = ggml_add(ctx0, v, layer.attn_v_b); + + q = ggml_cont(ctx0, ggml_transpose(ctx0, q)); + k = ggml_cont(ctx0, ggml_transpose(ctx0, k)); + + struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q); + + kq = ggml_soft_max_ext(ctx0, kq, nullptr, 1.0f/sqrtf(float(hparams.posnet.n_embd)), 0.0f); + + cur = ggml_mul_mat(ctx0, kq, v); + + cur = ggml_conv_1d_ph(ctx0, layer.attn_o, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.attn_o_b); + + cur = ggml_add(ctx0, cur, inpL); + } break; + case 5: + { + cur = llm_build_norm(ctx0, cur, hparams, + layer.norm, + layer.norm_b, + LLM_NORM_GROUP, cb, 0); + } break; + default: GGML_ABORT("unknown posnet layer"); + }; + } + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + cur = llm_build_norm(ctx0, cur, hparams, + model.tok_norm, + model.tok_norm_b, + LLM_NORM, cb, -1); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + inpL = cur; + + // convnext + for (uint32_t il = 0; il < hparams.convnext.n_layer; ++il) { + const auto & layer = model.layers[il].convnext; + + cur = inpL; + + cur = ggml_conv_1d_dw_ph(ctx0, layer.dw, cur, 1, 1); + cur = ggml_add(ctx0, cur, layer.dw_b); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + cur = llm_build_norm(ctx0, cur, hparams, + layer.norm, + layer.norm_b, + LLM_NORM, cb, -1); + + cur = llm_build_ffn(ctx0, lctx, cur, + layer.pw1, layer.pw1_b, NULL, + NULL, NULL, NULL, + layer.pw2, layer.pw2_b, NULL, + NULL, + LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); + + cur = ggml_mul(ctx0, cur, layer.gamma); + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + inpL = ggml_add(ctx0, cur, inpL); + } + + cur = inpL; + + cur = ggml_cont(ctx0, ggml_transpose(ctx0, cur)); + + cur = llm_build_norm(ctx0, cur, hparams, + model.output_norm, + model.output_norm_b, + LLM_NORM, cb, -1); + + // lm_head + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + + cur = ggml_add(ctx0, cur, model.output_b); + cb(cur, "result_embd", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } }; static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector & ids) { @@ -16502,6 +17422,7 @@ static struct ggml_cgraph * llama_build_graph( switch (model.arch) { case LLM_ARCH_LLAMA: + case LLM_ARCH_MINICPM: case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: { @@ -16553,6 +17474,11 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_qwen2(); } break; + case LLM_ARCH_QWEN2VL: + { + lctx.n_pos_per_token = 4; + result = llm.build_qwen2vl(); + } break; case LLM_ARCH_QWEN2MOE: { result = llm.build_qwen2moe(); @@ -16585,10 +17511,6 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_internlm2(); } break; - case LLM_ARCH_MINICPM: - { - result = llm.build_minicpm(); - } break; case LLM_ARCH_MINICPM3: { result = llm.build_minicpm3(); @@ -16625,6 +17547,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_olmo(); } break; + case LLM_ARCH_OLMO2: + { + result = llm.build_olmo2(); + } break; case LLM_ARCH_OLMOE: { result = llm.build_olmoe(); @@ -16641,6 +17567,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_arctic(); } break; + case LLM_ARCH_DEEPSEEK: + { + result = llm.build_deepseek(); + } break; case LLM_ARCH_DEEPSEEK2: { result = llm.build_deepseek2(); @@ -16656,14 +17586,14 @@ static struct ggml_cgraph * llama_build_graph( case LLM_ARCH_T5: { if (lctx.is_encoding) { - result = llm.build_t5_encoder(); + result = llm.build_t5_enc(); } else { - result = llm.build_t5_decoder(); + result = llm.build_t5_dec(); } } break; case LLM_ARCH_T5ENCODER: { - result = llm.build_t5_encoder(); + result = llm.build_t5_enc(); } break; case LLM_ARCH_JAIS: { @@ -16685,6 +17615,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_chameleon(); } break; + case LLM_ARCH_WAVTOKENIZER_DEC: + { + result = llm.build_wavtokenizer_dec(); + } break; default: GGML_ABORT("fatal error"); } @@ -16771,35 +17705,40 @@ static void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) if (ubatch.pos && lctx.inp_pos) { const int64_t n_tokens = ubatch.n_tokens; - - ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos)); + auto n_pos = lctx.n_pos_per_token; + ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*n_pos*ggml_element_size(lctx.inp_pos)); } if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { - GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs"); - const int64_t n_tokens = ubatch.n_tokens; + //GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs"); + + if (!lctx.inp_out_ids) { + LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__); + } else { + const int64_t n_tokens = ubatch.n_tokens; - GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer)); - int32_t * data = (int32_t *) lctx.inp_out_ids->data; + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer)); + int32_t * data = (int32_t *) lctx.inp_out_ids->data; - if (lctx.n_outputs == n_tokens) { - for (int i = 0; i < n_tokens; ++i) { - data[i] = i; - } - } else if (ubatch.output) { - int32_t n_outputs = 0; - for (int i = 0; i < n_tokens; ++i) { - if (ubatch.output[i]) { - data[n_outputs++] = i; + if (lctx.n_outputs == n_tokens) { + for (int i = 0; i < n_tokens; ++i) { + data[i] = i; } + } else if (ubatch.output) { + int32_t n_outputs = 0; + for (int i = 0; i < n_tokens; ++i) { + if (ubatch.output[i]) { + data[n_outputs++] = i; + } + } + // the graph needs to have been passed the correct number of outputs + GGML_ASSERT(lctx.n_outputs == n_outputs); + } else if (lctx.n_outputs == 1) { + // only keep last output + data[0] = n_tokens - 1; + } else { + GGML_ASSERT(lctx.n_outputs == 0); } - // the graph needs to have been passed the correct number of outputs - GGML_ASSERT(lctx.n_outputs == n_outputs); - } else if (lctx.n_outputs == 1) { - // only keep last output - data[0] = n_tokens - 1; - } else { - GGML_ASSERT(lctx.n_outputs == 0); } } @@ -17267,8 +18206,9 @@ static enum ggml_status llama_graph_compute( int n_threads, ggml_threadpool * threadpool) { if (lctx.backend_cpu != nullptr) { - ggml_backend_cpu_set_threadpool(lctx.backend_cpu, threadpool); - ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data); + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(lctx.backend_cpu)); + auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool"); + set_threadpool_fn(lctx.backend_cpu, threadpool); } // set the number of threads for all the backends @@ -17469,6 +18409,7 @@ static int llama_decode_internal( embd = nullptr; // do not extract embeddings when not needed GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor"); } + // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); ggml_backend_sched_alloc_graph(lctx.sched.get(), gf); @@ -18035,13 +18976,13 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) { static void llama_kv_cache_update_internal(struct llama_context & lctx) { bool need_reserve = false; - // apply K-shift if needed - if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) { - if (lctx.model.arch == LLM_ARCH_DEEPSEEK2) { // not supported due to MLA - GGML_ABORT("Deepseek2 does not support K-shift"); + if (lctx.kv_self.has_shift) { + if (!llama_kv_cache_can_shift(&lctx)) { + GGML_ABORT("The current context does not support K-shift"); } - { + // apply K-shift if needed + if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE) { ggml_backend_sched_reset(lctx.sched.get()); ggml_cgraph * gf = llama_build_graph_k_shift(lctx); @@ -18256,10 +19197,6 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) { new_type = GGML_TYPE_IQ3_S; } - else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || - new_type == GGML_TYPE_Q4_0_8_8) { - new_type = GGML_TYPE_Q4_0; - } else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) { new_type = GGML_TYPE_Q4_K; } @@ -18582,9 +19519,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break; case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break; case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break; - case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: default_type = GGML_TYPE_Q4_0_4_4; break; - case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break; - case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: default_type = GGML_TYPE_Q4_0_8_8; break; default: throw std::runtime_error(format("invalid output file type %d\n", ftype)); } @@ -18923,14 +19857,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s f32_data = (float *) f32_conv_buf.data(); } - int chunk_size_multiplier = 1; - if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) { - if ((new_type == GGML_TYPE_Q4_0_8_8) && (tensor->ne[1] % 8 != 0)) new_type = GGML_TYPE_Q4_0; - else if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_Q4_0; - if (new_type == GGML_TYPE_Q4_0_8_8) chunk_size_multiplier = 8; - else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8) chunk_size_multiplier = 4; - } - LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type)); fflush(stdout); @@ -18943,8 +19869,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s const int64_t nrows = tensor->ne[1]; static const int64_t min_chunk_size = 32 * 512; - const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)) * - chunk_size_multiplier; + const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)); const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1]; const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size; @@ -19185,6 +20110,7 @@ void llama_lora_adapter_free(struct llama_lora_adapter * adapter) { // struct llama_model_params llama_model_default_params() { struct llama_model_params result = { + /*.devices =*/ nullptr, /*.n_gpu_layers =*/ 0, /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER, /*.main_gpu =*/ 0, @@ -19302,7 +20228,11 @@ void llama_backend_init(void) { void llama_numa_init(enum ggml_numa_strategy numa) { if (numa != GGML_NUMA_STRATEGY_DISABLED) { - ggml_numa_init(numa); + auto * dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + GGML_ASSERT(dev && "CPU backend is not loaded"); + auto * reg = ggml_backend_dev_backend_reg(dev); + auto * numa_init_fn = (decltype(ggml_numa_init) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_numa_init"); + numa_init_fn(numa); } } @@ -19393,19 +20323,24 @@ struct llama_model * llama_load_model_from_file( } // create list of devices to use with this model - // currently, we use all available devices - // TODO: rework API to give user more control over device selection - for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { - ggml_backend_dev_t dev = ggml_backend_dev_get(i); - switch (ggml_backend_dev_type(dev)) { - case GGML_BACKEND_DEVICE_TYPE_CPU: - case GGML_BACKEND_DEVICE_TYPE_ACCEL: - // skip CPU backends since they are handled separately - break; + if (params.devices) { + for (ggml_backend_dev_t * dev = params.devices; *dev; ++dev) { + model->devices.push_back(*dev); + } + } else { + // use all available devices + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + ggml_backend_dev_t dev = ggml_backend_dev_get(i); + switch (ggml_backend_dev_type(dev)) { + case GGML_BACKEND_DEVICE_TYPE_CPU: + case GGML_BACKEND_DEVICE_TYPE_ACCEL: + // skip CPU backends since they are handled separately + break; - case GGML_BACKEND_DEVICE_TYPE_GPU: - model->devices.push_back(dev); - break; + case GGML_BACKEND_DEVICE_TYPE_GPU: + model->devices.push_back(dev); + break; + } } } @@ -19576,9 +20511,6 @@ struct llama_context * llama_new_context_with_model( __func__, n_ctx_per_seq, hparams.n_ctx_train); } - ctx->abort_callback = params.abort_callback; - ctx->abort_callback_data = params.abort_callback_data; - ctx->logits_all = params.logits_all; // build worst-case graph for encoder if a model contains encoder @@ -19627,7 +20559,7 @@ struct llama_context * llama_new_context_with_model( } // add CPU backend - ctx->backend_cpu = ggml_backend_cpu_init(); + ctx->backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); if (ctx->backend_cpu == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__); llama_free(ctx); @@ -19647,6 +20579,8 @@ struct llama_context * llama_new_context_with_model( } } + llama_set_abort_callback(ctx, params.abort_callback, params.abort_callback_data); + if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) { LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); llama_free(ctx); @@ -19692,7 +20626,8 @@ struct llama_context * llama_new_context_with_model( std::vector backend_ptrs; for (auto & backend : ctx->backends) { auto * buft = ggml_backend_get_default_buffer_type(backend.get()); - if (ggml_backend_is_cpu(backend.get()) && !model->devices.empty()) { + auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); + if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model->devices.empty()) { // use the host buffer of the first device CPU for faster transfer of the intermediate state auto * dev = model->devices[0]; auto * host_buft = ggml_backend_dev_host_buffer_type(dev); @@ -19720,7 +20655,8 @@ struct llama_context * llama_new_context_with_model( // pipeline parallelism requires support for async compute and events in all devices if (pipeline_parallel) { for (auto & backend : ctx->backends) { - if (ggml_backend_is_cpu(backend.get())) { + auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); + if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) { // ignore CPU backend continue; } @@ -19862,6 +20798,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_T5ENCODER: case LLM_ARCH_JAIS: case LLM_ARCH_RWKV6: + case LLM_ARCH_WAVTOKENIZER_DEC: return LLAMA_ROPE_TYPE_NONE; // use what we call a normal RoPE, operating on pairs of consecutive head values @@ -19876,6 +20813,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_COMMAND_R: case LLM_ARCH_OLMO: case LLM_ARCH_ARCTIC: + case LLM_ARCH_DEEPSEEK: case LLM_ARCH_DEEPSEEK2: case LLM_ARCH_CHATGLM: case LLM_ARCH_GRANITE: @@ -19894,6 +20832,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_QWEN: case LLM_ARCH_QWEN2: case LLM_ARCH_QWEN2MOE: + case LLM_ARCH_OLMO2: case LLM_ARCH_OLMOE: case LLM_ARCH_PHI2: case LLM_ARCH_PHI3: @@ -19908,6 +20847,9 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_MINICPM3: return LLAMA_ROPE_TYPE_NEOX; + case LLM_ARCH_QWEN2VL: + return LLAMA_ROPE_TYPE_MROPE; + // all model arches should be listed explicitly here case LLM_ARCH_UNKNOWN: GGML_ABORT("unknown architecture"); @@ -19974,17 +20916,6 @@ uint64_t llama_model_n_params(const struct llama_model * model) { return model->n_elements; } -struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) { - auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(), - [name](const std::pair & it) { - return it.first == name; - }); - if (it == model->tensors_by_name.end()) { - return nullptr; - } - return it->second; -} - bool llama_model_has_encoder(const struct llama_model * model) { switch (model->arch) { case LLM_ARCH_T5: return true; @@ -20285,6 +21216,10 @@ void llama_kv_cache_update(struct llama_context * ctx) { llama_kv_cache_update_internal(*ctx); } +bool llama_kv_cache_can_shift(struct llama_context * ctx) { + return !ctx->kv_self.recurrent && ctx->model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA +} + // deprecated size_t llama_get_state_size(struct llama_context * ctx) { return llama_state_get_size(ctx); @@ -21269,6 +22204,14 @@ int32_t llama_n_threads_batch(struct llama_context * ctx) { void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { ctx->abort_callback = abort_callback; ctx->abort_callback_data = abort_callback_data; + + for (auto & backend : ctx->backends) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); + auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); + if (set_abort_callback_fn) { + set_abort_callback_fn(backend.get(), ctx->abort_callback, ctx->abort_callback_data); + } + } } void llama_set_embeddings(struct llama_context * ctx, bool embeddings) { @@ -21464,7 +22407,7 @@ float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) { throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); } } else if ((size_t) i >= ctx->output_ids.size()) { - throw std::runtime_error(format("out of range [0, %lu)", ctx->output_ids.size())); + throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); } else { j = ctx->output_ids[i]; } @@ -21635,18 +22578,111 @@ int32_t llama_detokenize( // chat templates // +static llm_chat_template llama_chat_detect_template(const std::string & tmpl) { + if (LLM_CHAT_TEMPLATES.find(tmpl) != LLM_CHAT_TEMPLATES.end()) { + return LLM_CHAT_TEMPLATES.at(tmpl); + } + auto tmpl_contains = [&tmpl](const char * haystack) -> bool { + return tmpl.find(haystack) != std::string::npos; + }; + if (tmpl_contains("<|im_start|>")) { + return LLM_CHAT_TEMPLATE_CHATML; + } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) { + if (tmpl_contains("[SYSTEM_PROMPT]")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V7; + } else if ( + // catches official 'v1' template + tmpl_contains("' [INST] ' + system_message") + // catches official 'v3' and 'v3-tekken' templates + || tmpl_contains("[AVAILABLE_TOOLS]") + ) { + // Official mistral 'v1', 'v3' and 'v3-tekken' templates + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md + if (tmpl_contains(" [INST]")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V1; + } else if (tmpl_contains("\"[INST]\"")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN; + } + return LLM_CHAT_TEMPLATE_MISTRAL_V3; + } else { + // llama2 template and its variants + // [variant] support system message + // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + bool support_system_message = tmpl_contains("<>"); + bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]"); + bool strip_message = tmpl_contains("content.strip()"); + if (strip_message) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP; + } else if (add_bos_inside_history) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS; + } else if (support_system_message) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS; + } else { + return LLM_CHAT_TEMPLATE_LLAMA_2; + } + } + } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { + return LLM_CHAT_TEMPLATE_PHI_3; + } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) { + return LLM_CHAT_TEMPLATE_ZEPHYR; + } else if (tmpl_contains("bos_token + message['role']")) { + return LLM_CHAT_TEMPLATE_MONARCH; + } else if (tmpl_contains("")) { + return LLM_CHAT_TEMPLATE_GEMMA; + } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) { + // OrionStarAI/Orion-14B-Chat + return LLM_CHAT_TEMPLATE_ORION; + } else if (tmpl_contains("GPT4 Correct ")) { + // openchat/openchat-3.5-0106 + return LLM_CHAT_TEMPLATE_OPENCHAT; + } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) { + // eachadea/vicuna-13b-1.1 (and Orca variant) + if (tmpl_contains("SYSTEM: ")) { + return LLM_CHAT_TEMPLATE_VICUNA_ORCA; + } + return LLM_CHAT_TEMPLATE_VICUNA; + } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) { + // deepseek-ai/deepseek-coder-33b-instruct + return LLM_CHAT_TEMPLATE_DEEPSEEK; + } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) { + // CohereForAI/c4ai-command-r-plus + return LLM_CHAT_TEMPLATE_COMMAND_R; + } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) { + return LLM_CHAT_TEMPLATE_LLAMA_3; + } else if (tmpl_contains("[gMASK]sop")) { + // chatglm3-6b + return LLM_CHAT_TEMPLATE_CHATGML_3; + } else if (tmpl_contains("[gMASK]")) { + return LLM_CHAT_TEMPLATE_CHATGML_4; + } else if (tmpl_contains(LU8("<ē”Øęˆ·>"))) { + // MiniCPM-3B-OpenHermes-2.5-v2-GGUF + return LLM_CHAT_TEMPLATE_MINICPM; + } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) { + return LLM_CHAT_TEMPLATE_DEEPSEEK_2; + } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) { + // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb + // EXAONE-3.0-7.8B-Instruct + return LLM_CHAT_TEMPLATE_EXAONE_3; + } else if (tmpl_contains("rwkv-world")) { + return LLM_CHAT_TEMPLATE_RWKV_WORLD; + } else if (tmpl_contains("<|start_of_role|>")) { + return LLM_CHAT_TEMPLATE_GRANITE; + } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) { + return LLM_CHAT_TEMPLATE_GIGACHAT; + } + return LLM_CHAT_TEMPLATE_UNKNOWN; +} + // Simple version of "llama_apply_chat_template" that only works with strings // This function uses heuristic checks to determine commonly used template. It is not a jinja parser. static int32_t llama_chat_apply_template_internal( - const std::string & tmpl, + const llm_chat_template tmpl, const std::vector & chat, std::string & dest, bool add_ass) { // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527 std::stringstream ss; - auto tmpl_contains = [&tmpl](std::string haystack) -> bool { - return tmpl.find(haystack) != std::string::npos; - }; - if (tmpl == "chatml" || tmpl_contains("<|im_start|>")) { + if (tmpl == LLM_CHAT_TEMPLATE_CHATML) { // chatml template for (auto message : chat) { ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n"; @@ -21654,16 +22690,59 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|im_start|>assistant\n"; } - } else if (tmpl == "llama2" || tmpl == "mistral" || tmpl_contains("[INST]")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) { + // Official mistral 'v7' template + // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7 + for (auto message : chat) { + std::string role(message->role); + std::string content(message->content); + if (role == "system") { + ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]"; + } else if (role == "user") { + ss << "[INST] " << content << "[/INST]"; + } + else { + ss << " " << content << ""; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 + || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3 + || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) { + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md + std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : ""; + std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " "; + bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3; + bool is_inside_turn = false; + for (auto message : chat) { + if (!is_inside_turn) { + ss << leading_space << "[INST]" << trailing_space; + is_inside_turn = true; + } + std::string role(message->role); + std::string content(message->content); + if (role == "system") { + ss << content << "\n\n"; + } else if (role == "user") { + ss << content << leading_space << "[/INST]"; + } else { + ss << trailing_space << (trim_assistant_message ? trim(content) : content) << ""; + is_inside_turn = false; + } + } + } else if ( + tmpl == LLM_CHAT_TEMPLATE_LLAMA_2 + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) { // llama2 template and its variants // [variant] support system message - bool support_system_message = tmpl_contains("<>") || tmpl == "mistral"; - // [variant] space before + after response - bool space_around_response = tmpl_contains("' ' + eos_token"); + // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2; // [variant] add BOS inside history - bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]"); + bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS; // [variant] trim spaces from the input message - bool strip_message = tmpl_contains("content.strip()"); + bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP; // construct the prompt bool is_inside_turn = true; // skip BOS at the beginning ss << "[INST] "; @@ -21684,12 +22763,11 @@ static int32_t llama_chat_apply_template_internal( } else if (role == "user") { ss << content << " [/INST]"; } else { - ss << (space_around_response ? " " : "") << content << (space_around_response ? " " : "") << ""; + ss << content << ""; is_inside_turn = false; } } - // llama2 templates seem to not care about "add_generation_prompt" - } else if (tmpl == "phi3" || (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>"))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) { // Phi 3 for (auto message : chat) { std::string role(message->role); @@ -21698,7 +22776,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|assistant|>\n"; } - } else if (tmpl == "zephyr" || tmpl_contains("<|user|>")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) { // zephyr template for (auto message : chat) { ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n"; @@ -21706,7 +22784,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|assistant|>\n"; } - } else if (tmpl == "monarch" || tmpl_contains("bos_token + message['role']")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) { // mlabonne/AlphaMonarch-7B template (the is included inside history) for (auto message : chat) { std::string bos = (message == chat.front()) ? "" : ""; // skip BOS for first message @@ -21715,7 +22793,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "assistant\n"; } - } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl_contains("")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) { // google/gemma-7b-it std::string system_prompt = ""; for (auto message : chat) { @@ -21737,7 +22815,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "model\n"; } - } else if (tmpl == "orion" || tmpl_contains("'\\n\\nAssistant: ' + eos_token")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) { // OrionStarAI/Orion-14B-Chat std::string system_prompt = ""; for (auto message : chat) { @@ -21757,7 +22835,7 @@ static int32_t llama_chat_apply_template_internal( ss << message->content << ""; } } - } else if (tmpl == "openchat" || tmpl_contains("GPT4 Correct ")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) { // openchat/openchat-3.5-0106, for (auto message : chat) { std::string role(message->role); @@ -21771,13 +22849,13 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "GPT4 Correct Assistant:"; } - } else if (tmpl == "vicuna" || tmpl == "vicuna-orca" || (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: "))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) { // eachadea/vicuna-13b-1.1 (and Orca variant) for (auto message : chat) { std::string role(message->role); if (role == "system") { // Orca-Vicuna variant uses a system prefix - if (tmpl == "vicuna-orca" || tmpl_contains("SYSTEM: ")) { + if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) { ss << "SYSTEM: " << message->content << "\n"; } else { ss << message->content << "\n\n"; @@ -21791,7 +22869,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "ASSISTANT:"; } - } else if (tmpl == "deepseek" || (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>"))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) { // deepseek-ai/deepseek-coder-33b-instruct for (auto message : chat) { std::string role(message->role); @@ -21806,7 +22884,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "### Response:\n"; } - } else if (tmpl == "command-r" || (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>"))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) { // CohereForAI/c4ai-command-r-plus for (auto message : chat) { std::string role(message->role); @@ -21821,7 +22899,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"; } - } else if (tmpl == "llama3" || (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>"))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) { // Llama 3 for (auto message : chat) { std::string role(message->role); @@ -21830,7 +22908,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|start_header_id|>assistant<|end_header_id|>\n\n"; } - } else if (tmpl == "chatglm3" || tmpl_contains("[gMASK]sop")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) { // chatglm3-6b ss << "[gMASK]" << "sop"; for (auto message : chat) { @@ -21840,7 +22918,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|assistant|>"; } - } else if (tmpl == "chatglm4" || tmpl_contains("[gMASK]")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) { ss << "[gMASK]" << ""; for (auto message : chat) { std::string role(message->role); @@ -21849,7 +22927,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|assistant|>"; } - } else if (tmpl == "minicpm" || tmpl_contains(LU8("<ē”Øęˆ·>"))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF for (auto message : chat) { std::string role(message->role); @@ -21861,7 +22939,7 @@ static int32_t llama_chat_apply_template_internal( ss << trim(message->content); } } - } else if (tmpl == "deepseek2" || tmpl_contains("'Assistant: ' + message['content'] + eos_token")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) { // DeepSeek-V2 for (auto message : chat) { std::string role(message->role); @@ -21876,7 +22954,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "Assistant:"; } - } else if (tmpl == "exaone3" || (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]"))) { + } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) { // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb // EXAONE-3.0-7.8B-Instruct for (auto message : chat) { @@ -21892,7 +22970,7 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "[|assistant|]"; } - } else if (tmpl == "rwkv-world" || tmpl_contains("rwkv-world")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) { // this template requires the model to have "\n\n" as EOT token for (auto message : chat) { std::string role(message->role); @@ -21902,7 +22980,7 @@ static int32_t llama_chat_apply_template_internal( ss << message->content << "\n\n"; } } - } else if (tmpl == "granite" || tmpl_contains("<|start_of_role|>")) { + } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) { // IBM Granite template for (const auto & message : chat) { std::string role(message->role); @@ -21915,6 +22993,32 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "<|start_of_role|>assistant<|end_of_role|>\n"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) { + // GigaChat template + bool has_system = !chat.empty() && std::string(chat[0]->role) == "system"; + + // Handle system message if present + if (has_system) { + ss << "" << chat[0]->content << "<|message_sep|>"; + } else { + ss << ""; + } + + // Process remaining messages + for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) { + std::string role(chat[i]->role); + if (role == "user") { + ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>" + << "available functions<|role_sep|>[]<|message_sep|>"; + } else if (role == "assistant") { + ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>"; + } + } + + // Add generation prompt if needed + if (add_ass) { + ss << "assistant<|role_sep|>"; + } } else { // template not supported return -1; @@ -21934,15 +23038,15 @@ int32_t llama_chat_apply_template( std::string curr_tmpl(tmpl == nullptr ? "" : tmpl); if (tmpl == nullptr) { GGML_ASSERT(model != nullptr); - // load template from model - std::vector model_template(2048, 0); // longest known template is about 1200 bytes - std::string template_key = "tokenizer.chat_template"; - int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); - if (res < 0) { + + // load template from model, if available + const auto & it = model->gguf_kv.find("tokenizer.chat_template"); + if (it != model->gguf_kv.end() && it->second.size() > 0) { + curr_tmpl = it->second; + } + else { // worst case: there is no information about template, we will use chatml by default - curr_tmpl = "chatml"; // see llama_chat_apply_template_internal - } else { - curr_tmpl = std::string(model_template.data(), model_template.size()); + curr_tmpl = "chatml"; // see llama_chat_apply_template_internal } } @@ -21954,7 +23058,11 @@ int32_t llama_chat_apply_template( } std::string formatted_chat; - int32_t res = llama_chat_apply_template_internal(curr_tmpl, chat_vec, formatted_chat, add_ass); + llm_chat_template detected_tmpl = llama_chat_detect_template(curr_tmpl); + if (detected_tmpl == LLM_CHAT_TEMPLATE_UNKNOWN) { + return -1; + } + int32_t res = llama_chat_apply_template_internal(detected_tmpl, chat_vec, formatted_chat, add_ass); if (res < 0) { return res; } @@ -21964,6 +23072,15 @@ int32_t llama_chat_apply_template( return res; } +int32_t llama_chat_builtin_templates(const char ** output, size_t len) { + auto it = LLM_CHAT_TEMPLATES.begin(); + for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) { + output[i] = it->first.c_str(); + std::advance(it, 1); + } + return (int32_t) LLM_CHAT_TEMPLATES.size(); +} + // // sampling // @@ -22010,32 +23127,23 @@ int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int } const char * llama_print_system_info(void) { - ggml_cpu_init(); // some ARM features are detected at runtime - static std::string s; - s = ""; - s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; - s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | "; - s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; - s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; - s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | "; - s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | "; - s += "AVX512_BF16 = " + std::to_string(ggml_cpu_has_avx512_bf16()) + " | "; - s += "AMX_INT8 = " + std::to_string(ggml_cpu_has_amx_int8()) + " | "; - s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | "; - s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | "; - s += "SVE = " + std::to_string(ggml_cpu_has_sve()) + " | "; - s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | "; - s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | "; - s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | "; - s += "RISCV_VECT = " + std::to_string(ggml_cpu_has_riscv_v()) + " | "; - s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | "; - s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | "; - s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | "; - s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; - s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | "; - s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | "; + for (size_t i = 0; i < ggml_backend_reg_count(); i++) { + auto * reg = ggml_backend_reg_get(i); + auto * get_features_fn = (ggml_backend_get_features_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_get_features"); + if (get_features_fn) { + ggml_backend_feature * features = get_features_fn(reg); + s += ggml_backend_reg_name(reg); + s += " : "; + for (; features->name; features++) { + s += features->name; + s += " = "; + s += features->value; + s += " | "; + } + } + } return s.c_str(); } diff --git a/src/unicode.cpp b/src/unicode.cpp index 50b35bbbc918c..8ed6b1a51c251 100644 --- a/src/unicode.cpp +++ b/src/unicode.cpp @@ -71,15 +71,15 @@ uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { throw std::invalid_argument("failed to convert utf8 to codepoint"); } -//static std::vector unicode_cpt_to_utf16(uint32_t cp) { +//static std::vector unicode_cpt_to_utf16(uint32_t cpt) { // std::vector result; -// if (/* 0x0000 <= cp && */ cp <= 0xffff) { -// result.emplace_back(cp); +// if (/* 0x0000 <= cpt && */ cpt <= 0xffff) { +// result.emplace_back(cpt); // return result; // } -// if (0x10000 <= cp && cp <= 0x10ffff) { -// result.emplace_back(0xd800 | ((cp - 0x10000) >> 10)); -// result.emplace_back(0xdc00 | ((cp - 0x10000) & 0x03ff)); +// if (0x10000 <= cpt && cpt <= 0x10ffff) { +// result.emplace_back(0xd800 | ((cpt - 0x10000) >> 10)); +// result.emplace_back(0xdc00 | ((cpt - 0x10000) & 0x03ff)); // return result; // } // throw std::invalid_argument("failed to convert codepoint to utf16"); @@ -120,8 +120,8 @@ uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) { // return result; //} -static std::vector unicode_cpt_flags_array() { - std::vector cpt_flags(MAX_CODEPOINTS, codepoint_flags::UNDEFINED); +static std::vector unicode_cpt_flags_array() { + std::vector cpt_flags(MAX_CODEPOINTS, unicode_cpt_flags::UNDEFINED); assert (unicode_ranges_flags.begin()[0].first == 0); assert (unicode_ranges_flags.begin()[unicode_ranges_flags.size()-1].first == MAX_CODEPOINTS); @@ -201,7 +201,18 @@ static std::unordered_map unicode_utf8_to_byte_map() { } static inline std::wstring unicode_wstring_from_utf8(const std::string & s) { +#if defined(__clang__) + // disable C++17 deprecation warning for std::codecvt_utf8 +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + std::wstring_convert> conv; + +#if defined(__clang__) +# pragma clang diagnostic pop +#endif + return conv.from_bytes(s); } @@ -242,8 +253,8 @@ static std::vector unicode_regex_split_custom_gpt2(const std::string & t return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; }; - auto _get_flags = [&] (const size_t pos) -> codepoint_flags { - return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : codepoint_flags{}; + auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags { + return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{}; }; size_t _prev_end = offset_ini; @@ -360,8 +371,8 @@ static std::vector unicode_regex_split_custom_llama3(const std::string & return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : OUT_OF_RANGE; }; - auto _get_flags = [&] (const size_t pos) -> codepoint_flags { - return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags(cpts[pos]) : codepoint_flags{}; + auto _get_flags = [&] (const size_t pos) -> unicode_cpt_flags { + return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_flags_from_cpt(cpts[pos]) : unicode_cpt_flags{}; }; size_t _prev_end = offset_ini; @@ -561,29 +572,29 @@ static std::vector unicode_regex_split_custom(const std::string & text, // interface // -std::string unicode_cpt_to_utf8(uint32_t cp) { +std::string unicode_cpt_to_utf8(uint32_t cpt) { std::string result; - if (/* 0x00 <= cp && */ cp <= 0x7f) { - result.push_back(cp); + if (/* 0x00 <= cpt && */ cpt <= 0x7f) { + result.push_back(cpt); return result; } - if (0x80 <= cp && cp <= 0x7ff) { - result.push_back(0xc0 | ((cp >> 6) & 0x1f)); - result.push_back(0x80 | (cp & 0x3f)); + if (0x80 <= cpt && cpt <= 0x7ff) { + result.push_back(0xc0 | ((cpt >> 6) & 0x1f)); + result.push_back(0x80 | (cpt & 0x3f)); return result; } - if (0x800 <= cp && cp <= 0xffff) { - result.push_back(0xe0 | ((cp >> 12) & 0x0f)); - result.push_back(0x80 | ((cp >> 6) & 0x3f)); - result.push_back(0x80 | (cp & 0x3f)); + if (0x800 <= cpt && cpt <= 0xffff) { + result.push_back(0xe0 | ((cpt >> 12) & 0x0f)); + result.push_back(0x80 | ((cpt >> 6) & 0x3f)); + result.push_back(0x80 | (cpt & 0x3f)); return result; } - if (0x10000 <= cp && cp <= 0x10ffff) { - result.push_back(0xf0 | ((cp >> 18) & 0x07)); - result.push_back(0x80 | ((cp >> 12) & 0x3f)); - result.push_back(0x80 | ((cp >> 6) & 0x3f)); - result.push_back(0x80 | (cp & 0x3f)); + if (0x10000 <= cpt && cpt <= 0x10ffff) { + result.push_back(0xf0 | ((cpt >> 18) & 0x07)); + result.push_back(0x80 | ((cpt >> 12) & 0x3f)); + result.push_back(0x80 | ((cpt >> 6) & 0x3f)); + result.push_back(0x80 | (cpt & 0x3f)); return result; } @@ -613,19 +624,19 @@ std::vector unicode_cpts_from_utf8(const std::string & utf8) { return result; } -codepoint_flags unicode_cpt_flags(const uint32_t cp) { - static const codepoint_flags undef(codepoint_flags::UNDEFINED); +unicode_cpt_flags unicode_cpt_flags_from_cpt(const uint32_t cpt) { + static const unicode_cpt_flags undef(unicode_cpt_flags::UNDEFINED); static const auto cpt_flags = unicode_cpt_flags_array(); - return cp < cpt_flags.size() ? cpt_flags[cp] : undef; + return cpt < cpt_flags.size() ? cpt_flags[cpt] : undef; } -codepoint_flags unicode_cpt_flags(const std::string & utf8) { - static const codepoint_flags undef(codepoint_flags::UNDEFINED); +unicode_cpt_flags unicode_cpt_flags_from_utf8(const std::string & utf8) { + static const unicode_cpt_flags undef(unicode_cpt_flags::UNDEFINED); if (utf8.empty()) { return undef; // undefined } size_t offset = 0; - return unicode_cpt_flags(unicode_cpt_from_utf8(utf8, offset)); + return unicode_cpt_flags_from_cpt(unicode_cpt_from_utf8(utf8, offset)); } std::string unicode_byte_to_utf8(uint8_t byte) { @@ -638,41 +649,41 @@ uint8_t unicode_utf8_to_byte(const std::string & utf8) { return map.at(utf8); } -uint32_t unicode_tolower(uint32_t cp) { +uint32_t unicode_tolower(uint32_t cpt) { // binary search - auto it = std::lower_bound(unicode_map_lowercase.begin(), unicode_map_lowercase.end(), cp, + auto it = std::lower_bound(unicode_map_lowercase.begin(), unicode_map_lowercase.end(), cpt, [](const std::pair & pair, uint32_t value) { return pair.first < value; }); - if (it != unicode_map_lowercase.end() && it->first == cp) { + if (it != unicode_map_lowercase.end() && it->first == cpt) { return it->second; } - return cp; // Return the original code point if no lowercase mapping is found + return cpt; // Return the original code point if no lowercase mapping is found } std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs) { // unicode categories static const std::map k_ucat_enum = { - { "\\p{N}", codepoint_flags::NUMBER }, - { "\\p{L}", codepoint_flags::LETTER }, - { "\\p{P}", codepoint_flags::PUNCTUATION }, + { "\\p{N}", unicode_cpt_flags::NUMBER }, + { "\\p{L}", unicode_cpt_flags::LETTER }, + { "\\p{P}", unicode_cpt_flags::PUNCTUATION }, }; static const std::map k_ucat_cpt = { - { codepoint_flags::NUMBER, 0xD1 }, - { codepoint_flags::LETTER, 0xD2 }, - { codepoint_flags::PUNCTUATION, 0xD3 }, + { unicode_cpt_flags::NUMBER, 0xD1 }, + { unicode_cpt_flags::LETTER, 0xD2 }, + { unicode_cpt_flags::PUNCTUATION, 0xD3 }, }; static const std::map k_ucat_map = { - { codepoint_flags::NUMBER, "\x30-\x39" }, // 0-9 - { codepoint_flags::LETTER, "\x41-\x5A\x61-\x7A" }, // A-Za-z - { codepoint_flags::PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\} + { unicode_cpt_flags::NUMBER, "\x30-\x39" }, // 0-9 + { unicode_cpt_flags::LETTER, "\x41-\x5A\x61-\x7A" }, // A-Za-z + { unicode_cpt_flags::PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\} }; // compute collapsed codepoints only if needed by at least one regex bool need_collapse = false; - for (auto & regex_expr : regex_exprs) { + for (const auto & regex_expr : regex_exprs) { // search for unicode categories for (const auto & ucat : k_ucat_enum) { if (std::string::npos != regex_expr.find(ucat.first)) { @@ -698,7 +709,7 @@ std::vector unicode_regex_split(const std::string & text, const std continue; } - const auto flags = unicode_cpt_flags(cpts[i]); + const auto flags = unicode_cpt_flags_from_cpt(cpts[i]); if (flags.is_whitespace) { //NOTE: C++ std::regex \s does not mach 0x85, Rust and Python regex does. @@ -714,7 +725,7 @@ std::vector unicode_regex_split(const std::string & text, const std std::vector bpe_offsets = { cpts.size() }; - for (auto & regex_expr : regex_exprs) { + for (const auto & regex_expr : regex_exprs) { // first, see if we have an efficient custom regex implementation auto tmp = unicode_regex_split_custom(text, regex_expr, bpe_offsets); @@ -728,7 +739,7 @@ std::vector unicode_regex_split(const std::string & text, const std // if a unicode category is used in the regex, we use the collapsed text and replace the unicode category // with the corresponding collapsed representation bool use_collapsed = false; - for (auto & ucat : k_ucat_enum) { + for (const auto & ucat : k_ucat_enum) { if (std::string::npos != regex_expr.find(ucat.first)) { use_collapsed = true; break; @@ -794,7 +805,7 @@ std::vector unicode_regex_split(const std::string & text, const std // std::wregex \s does not mach non-ASCII whitespaces, using 0x0B as fallback std::wstring wtext(cpts.begin(), cpts.end()); for (size_t i = 0; i < wtext.size(); ++i) { - if (wtext[i] > 0x7F && unicode_cpt_flags(wtext[i]).is_whitespace) { + if (wtext[i] > 0x7F && unicode_cpt_flags_from_cpt(wtext[i]).is_whitespace) { wtext[i] = 0x0B; } } diff --git a/src/unicode.h b/src/unicode.h index 008532a242ab8..c27098df7d4be 100644 --- a/src/unicode.h +++ b/src/unicode.h @@ -4,9 +4,7 @@ #include #include -// TODO: prefix all symbols with "llama_" - -struct codepoint_flags { +struct unicode_cpt_flags { enum { UNDEFINED = 0x0001, NUMBER = 0x0002, // regex: \p{N} @@ -35,7 +33,7 @@ struct codepoint_flags { uint16_t is_nfd : 1; // decode from uint16 - inline codepoint_flags(const uint16_t flags=0) { + inline unicode_cpt_flags(const uint16_t flags = 0) { *reinterpret_cast(this) = flags; } @@ -50,18 +48,19 @@ struct codepoint_flags { size_t unicode_len_utf8(char src); -std::string unicode_cpt_to_utf8(uint32_t cp); -uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset); +std::string unicode_cpt_to_utf8 (uint32_t cpt); +uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset); + std::vector unicode_cpts_from_utf8(const std::string & utf8); std::vector unicode_cpts_normalize_nfd(const std::vector & cpts); -codepoint_flags unicode_cpt_flags(const uint32_t cp); -codepoint_flags unicode_cpt_flags(const std::string & utf8); +unicode_cpt_flags unicode_cpt_flags_from_cpt (uint32_t cpt); +unicode_cpt_flags unicode_cpt_flags_from_utf8(const std::string & utf8); std::string unicode_byte_to_utf8(uint8_t byte); -uint8_t unicode_utf8_to_byte(const std::string & utf8); +uint8_t unicode_utf8_to_byte(const std::string & utf8); -uint32_t unicode_tolower(uint32_t cp); +uint32_t unicode_tolower(uint32_t cpt); std::vector unicode_regex_split(const std::string & text, const std::vector & regex_exprs); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index b06f122e89873..2b5e5fd4abe95 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -84,55 +84,66 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) -# build test-tokenizer-1-bpe target once and add many tests -add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp) -target_link_libraries(test-tokenizer-1-bpe PRIVATE common) -install(TARGETS test-tokenizer-1-bpe RUNTIME) - -# TODO: disabled due to slowness -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf --ignore-merges) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) -#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) - -# build test-tokenizer-1-spm target once and add many tests -add_executable(test-tokenizer-1-spm test-tokenizer-1-spm.cpp) -target_link_libraries(test-tokenizer-1-spm PRIVATE common) -install(TARGETS test-tokenizer-1-spm RUNTIME) - -llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf) -#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf) - -# llama_target_and_test(test-double-float.cpp) # SLOW + +if (NOT WIN32) + # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API + llama_target_and_test(test-sampling.cpp) + llama_target_and_test(test-grammar-parser.cpp) + llama_target_and_test(test-grammar-integration.cpp) + llama_target_and_test(test-llama-grammar.cpp) + # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8 + if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..) + target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../examples/server) + endif() + + + # build test-tokenizer-1-bpe target once and add many tests + add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp) + target_link_libraries(test-tokenizer-1-bpe PRIVATE common) + install(TARGETS test-tokenizer-1-bpe RUNTIME) + + # TODO: disabled due to slowness + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf --ignore-merges) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) + #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) + + # build test-tokenizer-1-spm target once and add many tests + add_executable(test-tokenizer-1-spm test-tokenizer-1-spm.cpp) + target_link_libraries(test-tokenizer-1-spm PRIVATE common) + install(TARGETS test-tokenizer-1-spm RUNTIME) + + llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf) + #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf) + + # llama_target_and_test(test-double-float.cpp) # SLOW +endif() + llama_target_and_test(test-log.cpp) llama_target_and_test(test-arg-parser.cpp) -llama_target_and_test(test-quantize-fns.cpp) -llama_target_and_test(test-quantize-perf.cpp) -llama_target_and_test(test-sampling.cpp) llama_target_and_test(test-chat-template.cpp) -llama_target_and_test(test-grammar-parser.cpp) -llama_target_and_test(test-grammar-integration.cpp) -llama_target_and_test(test-llama-grammar.cpp) -llama_target_and_test(test-barrier.cpp) # llama_target_and_test(test-opt.cpp) # SLOW +llama_target_and_test(test-gguf.cpp) llama_target_and_test(test-backend-ops.cpp) -llama_target_and_test(test-rope.cpp) - llama_target_and_test(test-model-load-cancel.cpp LABEL "model") llama_target_and_test(test-autorelease.cpp LABEL "model") -# TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8 -if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") - llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..) - target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../examples/server) +if (NOT GGML_BACKEND_DL) + # these tests use the backends directly and cannot be built with dynamic loading + llama_target_and_test(test-barrier.cpp) + llama_target_and_test(test-quantize-fns.cpp) + llama_target_and_test(test-quantize-perf.cpp) + llama_target_and_test(test-rope.cpp) endif() + # dummy executable - not installed get_filename_component(TEST_TARGET test-c.c NAME_WE) add_executable(${TEST_TARGET} test-c.c) diff --git a/tests/test-arg-parser.cpp b/tests/test-arg-parser.cpp index 3665238b5a2d8..69604b87ceec4 100644 --- a/tests/test-arg-parser.cpp +++ b/tests/test-arg-parser.cpp @@ -70,7 +70,7 @@ int main(void) { // non-existence arg in specific example (--draft cannot be used outside llama-speculative) argv = {"binary_name", "--draft", "123"}; - assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_SERVER)); + assert(false == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_EMBEDDING)); printf("test-arg-parser: test valid usage\n\n"); @@ -96,7 +96,7 @@ int main(void) { // --draft cannot be used outside llama-speculative argv = {"binary_name", "--draft", "123"}; assert(true == common_params_parse(argv.size(), list_str_to_char(argv).data(), params, LLAMA_EXAMPLE_SPECULATIVE)); - assert(params.n_draft == 123); + assert(params.speculative.n_max == 123); // skip this part on windows, because setenv is not supported #ifdef _WIN32 diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index f8a59b6df175f..ccdd3fb57a504 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -16,7 +16,6 @@ #include -#include #include #include @@ -26,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -639,19 +637,20 @@ struct test_case { // determine number of runs int n_runs; + bool is_cpu = ggml_backend_dev_type(ggml_backend_get_device(backend)) == GGML_BACKEND_DEVICE_TYPE_CPU; if (op_flops(out) > 0) { // based on flops const uint64_t GFLOP = 1000 * 1000 * 1000; const uint64_t target_flops_cpu = 8ULL * GFLOP; const uint64_t target_flops_gpu = 100ULL * GFLOP; - uint64_t target_flops = ggml_backend_is_cpu(backend) ? target_flops_cpu : target_flops_gpu; + uint64_t target_flops = is_cpu ? target_flops_cpu : target_flops_gpu; n_runs = std::min(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_flops / op_flops(out)) + 1; } else { // based on memory size const size_t GB = 1ULL << 30; const size_t target_size_cpu = 8 * GB; const size_t target_size_gpu = 32 * GB; - size_t target_size = ggml_backend_is_cpu(backend) ? target_size_cpu : target_size_gpu; + size_t target_size = is_cpu ? target_size_cpu : target_size_gpu; n_runs = std::min(ggml_graph_size(gf) - ggml_graph_n_nodes(gf), target_size / op_size(out)) + 1; } @@ -819,7 +818,6 @@ struct test_case { } } - // TODO: refactor so that this check is only needed once for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { if (!ggml_backend_supports_op(backend, t)) { printf("not supported [%s] ", ggml_backend_name(backend)); @@ -1155,6 +1153,26 @@ struct test_argmax : public test_case { return out; } + void initialize_tensors(ggml_context * ctx) override { + std::random_device rd; + std::default_random_engine rng(rd()); + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + if (t->type == GGML_TYPE_F32) { + // initialize with unique values to avoid ties + for (int64_t r = 0; r < ggml_nrows(t); r++) { + std::vector data(t->ne[0]); + for (int i = 0; i < t->ne[0]; i++) { + data[i] = i; + } + std::shuffle(data.begin(), data.end(), rng); + ggml_backend_tensor_set(t, data.data(), r * t->nb[1], t->ne[0] * sizeof(float)); + } + } else { + init_tensor_uniform(t); + } + } + } + double max_nmse_err() override { return 0.0; } @@ -2183,7 +2201,15 @@ struct test_rope : public test_case { ggml_set_name(a, "a"); } - ggml_tensor * pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]); + const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE; + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + ggml_tensor * pos; + if (is_mrope || is_vision) { + pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2] * 4); + } else { + pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ne_a[2]); + } ggml_set_name(pos, "pos"); ggml_tensor * freq = nullptr; @@ -2192,7 +2218,20 @@ struct test_rope : public test_case { ggml_set_name(freq, "freq"); } - ggml_tensor * out = ggml_rope_ext(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); + ggml_tensor * out; + if (is_mrope) { + if (is_vision) { + GGML_ASSERT(n_dims/4 > 0); + int rope_sections[4] = {n_dims/4, n_dims/4, 0, 0}; // Vision-RoPE only use first two dimension for image (x, y) coordinate + out = ggml_rope_multi(ctx, a, pos, freq, n_dims/2, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); + } else { + GGML_ASSERT(n_dims/3 > 0); + int rope_sections[4] = {n_dims/3, n_dims/3, n_dims/3, 0}; + out = ggml_rope_multi(ctx, a, pos, freq, n_dims, rope_sections, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); + } + } else { + out = ggml_rope_ext(ctx, a, pos, freq, n_dims, mode, 0, 10000.0f, fs, ef, af, 1.0f, 1.0f); + } ggml_set_name(out, "out"); return out; @@ -2202,11 +2241,12 @@ struct test_rope : public test_case { for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { if (t->type == GGML_TYPE_I32) { // pos - std::vector data(ne_a[2]); - for (int i = 0; i < ne_a[2]; i++) { + const int num_pos_ids = (mode & GGML_ROPE_TYPE_MROPE) ? ne_a[2] * 4 : ne_a[2]; + std::vector data(num_pos_ids); + for (int i = 0; i < num_pos_ids; i++) { data[i] = rand() % n_ctx; } - ggml_backend_tensor_set(t, data.data(), 0, ne_a[2] * sizeof(int)); + ggml_backend_tensor_set(t, data.data(), 0, num_pos_ids * sizeof(int)); } else { if (t->ne[0] == n_dims/2) { // frequency factors in the range [0.9f, 1.1f] @@ -2679,6 +2719,33 @@ struct test_pad : public test_case { } }; +// GGML_OP_PAD_REFLECT_1D +struct test_pad_reflect_1d : public test_case { + const ggml_type type; + const std::array ne_a; + const int pad_0; + const int pad_1; + + std::string vars() override { + return VARS_TO_STR4(type, ne_a, pad_0, pad_1); + } + + test_pad_reflect_1d(ggml_type type = GGML_TYPE_F32, + std::array ne_a = {512, 34, 2, 1}, + int pad_0 = 10, int pad_1 = 9) + : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 2, ne_a.data()); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_pad_reflect_1d(ctx, a, pad_0, pad_1); + ggml_set_name(out, "out"); + + return out; + } +}; + // GGML_OP_ARANGE struct test_arange : public test_case { const ggml_type type; @@ -3316,7 +3383,9 @@ static const ggml_type all_types[] = { static const ggml_type base_types[] = { GGML_TYPE_F32, GGML_TYPE_F16, + GGML_TYPE_Q8_0, // for I8MM tests GGML_TYPE_Q4_0, + GGML_TYPE_Q4_1, // for I8MM tests GGML_TYPE_Q4_K, GGML_TYPE_IQ2_XXS }; @@ -3440,9 +3509,15 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1)); test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1)); - test_cases.emplace_back(new test_argmax()); test_cases.emplace_back(new test_count_equal()); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 1, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {100, 10, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 12, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {2000, 10, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {5438, 3, 1, 1})); + for (int ne3 : {1, 3}) { // CUDA backward pass only supports ne3 == 1 test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {1, 1, 1, 1})); test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 5, 4, ne3}, {2, 1, 1, 1})); @@ -3468,10 +3543,14 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_set(GGML_TYPE_F32, GGML_TYPE_F32, {6, 5, 4, 3}, dim)); } + for (int dim = 1; dim < GGML_MAX_DIMS; ++dim) { + test_cases.emplace_back(new test_set(GGML_TYPE_I32, GGML_TYPE_I32, {6, 5, 4, 3}, dim)); + } + for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) { for (ggml_type type_dst : all_types) { - test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4})); - test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows + test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4})); + test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows } } for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) { @@ -3547,6 +3626,19 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 4)); test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 128, 4)); + for (int i = 1; i < 9; ++i) { + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_1, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q5_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q5_1, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q8_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_K, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q5_K, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q6_K, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(GGML_TYPE_IQ4_NL, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); + } + #if 1 for (ggml_type type_a : base_types) { for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) { @@ -3743,6 +3835,12 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_rope(type, { 80, 32, 2, 1}, 32, 2, 512, fs, ef, af, ff, v)); // neox (phi-2) } + if (all) { + test_cases.emplace_back(new test_rope(type, {128, 12, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v)); // rope_multi,m-rope (qwen2vl 2B) + test_cases.emplace_back(new test_rope(type, {128, 28, 2, 1}, 128, GGML_ROPE_TYPE_MROPE, 512, fs, ef, af, ff, v)); // rope_multi,m-rope (qwen2vl 7B) + test_cases.emplace_back(new test_rope(type, { 80, 16, 2, 1}, 80, GGML_ROPE_TYPE_VISION, 512, fs, ef, af, ff, v)); // rope_multi,m-rope (qwen2vl ViT) + } + test_cases.emplace_back(new test_rope(type, { 64, 128, 2, 1}, 64, 2, 512, fs, ef, af, ff, v)); // neox (falcon 40B) } } @@ -3773,9 +3871,11 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_upscale()); test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true)); test_cases.emplace_back(new test_upscale_ext()); - test_cases.emplace_back(new test_group_norm()); + test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {64, 64, 320, 1})); + test_cases.emplace_back(new test_group_norm(GGML_TYPE_F32, {9, 9, 1280, 1})); test_cases.emplace_back(new test_acc()); test_cases.emplace_back(new test_pad()); + test_cases.emplace_back(new test_pad_reflect_1d()); test_cases.emplace_back(new test_arange()); test_cases.emplace_back(new test_timestep_embedding()); test_cases.emplace_back(new test_leaky_relu()); @@ -3822,6 +3922,20 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_bin_bcast(ggml_add, GGML_TYPE_F32, {4096, 1, 1, 1}, {1, 512, 1, 1})); test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F16, {512, 3072, 1, 1})); + test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3})); + + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, 1.0f, 0.0f)); + + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); + test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1})); for (int bs : {1, 512}) { for (ggml_type type_a : all_types) { @@ -3837,7 +3951,11 @@ static std::vector> make_test_cases_perf() { static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op_name) { if (mode == MODE_TEST) { auto test_cases = make_test_cases_eval(); - ggml_backend_t backend_cpu = ggml_backend_cpu_init(); + ggml_backend_t backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, NULL); + if (backend_cpu == NULL) { + printf(" Failed to initialize CPU backend\n"); + return false; + } size_t n_ok = 0; for (auto & test : test_cases) { @@ -3917,7 +4035,9 @@ int main(int argc, char ** argv) { } } - // enumerate backends + // load and enumerate backends + ggml_backend_load_all(); + printf("Testing %zu devices\n\n", ggml_backend_dev_count()); size_t n_ok = 0; @@ -3933,16 +4053,15 @@ int main(int argc, char ** argv) { continue; } - ggml_backend_t backend = ggml_backend_dev_init(dev, NULL); - GGML_ASSERT(backend != NULL); - - if (backend_filter == NULL && ggml_backend_is_cpu(backend) && mode != MODE_GRAD) { + if (backend_filter == NULL && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU && mode != MODE_GRAD) { printf(" Skipping CPU backend\n"); - ggml_backend_free(backend); n_ok++; continue; } + ggml_backend_t backend = ggml_backend_dev_init(dev, NULL); + GGML_ASSERT(backend != NULL); + ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); if (ggml_backend_set_n_threads_fn) { diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 03e897e66dca4..30a910ad5c55d 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -21,7 +21,7 @@ int main(void) { std::vector templates = { // teknium/OpenHermes-2.5-Mistral-7B "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\\n' }}{% endif %}", - // mistralai/Mistral-7B-Instruct-v0.2 + // mistralai/Mistral-7B-Instruct-v0.2 (NOTE: Old pre-v1 without a system prompt) "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}", // TheBloke/FusionNet_34Bx2_MoE-AWQ "{%- for idx in range(0, messages|length) -%}\\n{%- if messages[idx]['role'] == 'user' -%}\\n{%- if idx > 1 -%}\\n{{- bos_token + '[INST] ' + messages[idx]['content'] + ' [/INST]' -}}\\n{%- else -%}\\n{{- messages[idx]['content'] + ' [/INST]' -}}\\n{%- endif -%}\\n{% elif messages[idx]['role'] == 'system' %}\\n{{- '[INST] <>\\\\n' + messages[idx]['content'] + '\\\\n<>\\\\n\\\\n' -}}\\n{%- elif messages[idx]['role'] == 'assistant' -%}\\n{{- ' ' + messages[idx]['content'] + ' ' + eos_token -}}\\n{% endif %}\\n{% endfor %}", @@ -67,16 +67,26 @@ int main(void) { "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}", // ibm-granite/granite-3.0-8b-instruct "{%- if tools %}\n {{- '<|start_of_role|>available_tools<|end_of_role|>\n' }}\n {%- for tool in tools %}\n {{- tool | tojson(indent=4) }}\n {%- if not loop.last %}\n {{- '\n\n' }}\n {%- endif %}\n {%- endfor %}\n {{- '<|end_of_text|>\n' }}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{- '<|start_of_role|>system<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'user' %}\n {{- '<|start_of_role|>user<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'assistant' %}\n {{- '<|start_of_role|>assistant<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'assistant_tool_call' %}\n {{- '<|start_of_role|>assistant<|end_of_role|><|tool_call|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- elif message['role'] == 'tool_response' %}\n {{- '<|start_of_role|>tool_response<|end_of_role|>' + message['content'] + '<|end_of_text|>\n' }}\n {%- endif %}\n {%- if loop.last and add_generation_prompt %}\n {{- '<|start_of_role|>assistant<|end_of_role|>' }}\n {%- endif %}\n{%- endfor %}", + // mistralai/Mistral-7B-Instruct-v0.2 (mistralai 'v1' template with a system prompt) + "{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content'] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{- raise_exception('After the optional system message, conversation roles must alternate user/assistant/user/assistant/...') }}\n {%- endif %}\n {%- if message['role'] == 'user' %}\n {%- if loop.first and system_message is defined %}\n {{- ' [INST] ' + system_message + '\\n\\n' + message['content'] + ' [/INST]' }}\n {%- else %}\n {{- ' [INST] ' + message['content'] + ' [/INST]' }}\n {%- endif %}\n {%- elif message['role'] == 'assistant' %}\n {{- ' ' + message['content'] + eos_token}}\n {%- else %}\n {{- raise_exception('Only user and assistant roles are supported, with the exception of an initial optional system message!') }}\n {%- endif %}\n{%- endfor %}\n", + // Mistral-Large-Instruct-2407 (mistralai 'v3' template) + "{%- if messages[0][\"role\"] == \"system\" %}\n {%- set system_message = messages[0][\"content\"] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n{%- set user_messages = loop_messages | selectattr(\"role\", \"equalto\", \"user\") | list %}\n\n{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n{%- set ns = namespace() %}\n{%- set ns.index = 0 %}\n{%- for message in loop_messages %}\n {%- if not (message.role == \"tool\" or message.role == \"tool_results\" or (message.tool_calls is defined and message.tool_calls is not none)) %}\n {%- if (message[\"role\"] == \"user\") != (ns.index % 2 == 0) %}\n {{- raise_exception(\"After the optional system message, conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif %}\n {%- set ns.index = ns.index + 1 %}\n {%- endif %}\n{%- endfor %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if message[\"role\"] == \"user\" %}\n {%- if tools is not none and (message == user_messages[-1]) %}\n {{- \"[AVAILABLE_TOOLS] [\" }}\n {%- for tool in tools %}\n {%- set tool = tool.function %}\n {{- '{\"type\": \"function\", \"function\": {' }}\n {%- for key, val in tool.items() if key != \"return\" %}\n {%- if val is string %}\n {{- '\"' + key + '\": \"' + val + '\"' }}\n {%- else %}\n {{- '\"' + key + '\": ' + val|tojson }}\n {%- endif %}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \"}}\" }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"[/AVAILABLE_TOOLS]\" }}\n {%- endif %}\n {%- if loop.last and system_message is defined %}\n {{- \"[INST] \" + system_message + \"\\n\\n\" + message[\"content\"] + \"[/INST]\" }}\n {%- else %}\n {{- \"[INST] \" + message[\"content\"] + \"[/INST]\" }}\n {%- endif %}\n {%- elif message.tool_calls is defined and message.tool_calls is not none %}\n {{- \"[TOOL_CALLS] [\" }}\n {%- for tool_call in message.tool_calls %}\n {%- set out = tool_call.function|tojson %}\n {{- out[:-1] }}\n {%- if not tool_call.id is defined or tool_call.id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- ', \"id\": \"' + tool_call.id + '\"}' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" + eos_token }}\n {%- endif %}\n {%- endfor %}\n {%- elif message[\"role\"] == \"assistant\" %}\n {{- \" \" + message[\"content\"]|trim + eos_token}}\n {%- elif message[\"role\"] == \"tool_results\" or message[\"role\"] == \"tool\" %}\n {%- if message.content is defined and message.content.content is defined %}\n {%- set content = message.content.content %}\n {%- else %}\n {%- set content = message.content %}\n {%- endif %}\n {{- '[TOOL_RESULTS] {\"content\": ' + content|string + \", \" }}\n {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- '\"call_id\": \"' + message.tool_call_id + '\"}[/TOOL_RESULTS]' }}\n {%- else %}\n {{- raise_exception(\"Only user and assistant roles are supported, with the exception of an initial optional system message!\") }}\n {%- endif %}\n{%- endfor %}\n", + // Mistral-Nemo-Instruct-2407 (mistralai 'v3-tekken' template) + "{%- if messages[0][\"role\"] == \"system\" %}\n {%- set system_message = messages[0][\"content\"] %}\n {%- set loop_messages = messages[1:] %}\n{%- else %}\n {%- set loop_messages = messages %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n{%- set user_messages = loop_messages | selectattr(\"role\", \"equalto\", \"user\") | list %}\n\n{#- This block checks for alternating user/assistant messages, skipping tool calling messages #}\n{%- set ns = namespace() %}\n{%- set ns.index = 0 %}\n{%- for message in loop_messages %}\n {%- if not (message.role == \"tool\" or message.role == \"tool_results\" or (message.tool_calls is defined and message.tool_calls is not none)) %}\n {%- if (message[\"role\"] == \"user\") != (ns.index % 2 == 0) %}\n {{- raise_exception(\"After the optional system message, conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif %}\n {%- set ns.index = ns.index + 1 %}\n {%- endif %}\n{%- endfor %}\n\n{{- bos_token }}\n{%- for message in loop_messages %}\n {%- if message[\"role\"] == \"user\" %}\n {%- if tools is not none and (message == user_messages[-1]) %}\n {{- \"[AVAILABLE_TOOLS][\" }}\n {%- for tool in tools %}\n {%- set tool = tool.function %}\n {{- '{\"type\": \"function\", \"function\": {' }}\n {%- for key, val in tool.items() if key != \"return\" %}\n {%- if val is string %}\n {{- '\"' + key + '\": \"' + val + '\"' }}\n {%- else %}\n {{- '\"' + key + '\": ' + val|tojson }}\n {%- endif %}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \"}}\" }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" }}\n {%- endif %}\n {%- endfor %}\n {{- \"[/AVAILABLE_TOOLS]\" }}\n {%- endif %}\n {%- if loop.last and system_message is defined %}\n {{- \"[INST]\" + system_message + \"\\n\\n\" + message[\"content\"] + \"[/INST]\" }}\n {%- else %}\n {{- \"[INST]\" + message[\"content\"] + \"[/INST]\" }}\n {%- endif %}\n {%- elif (message.tool_calls is defined and message.tool_calls is not none) %}\n {{- \"[TOOL_CALLS][\" }}\n {%- for tool_call in message.tool_calls %}\n {%- set out = tool_call.function|tojson %}\n {{- out[:-1] }}\n {%- if not tool_call.id is defined or tool_call.id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- ', \"id\": \"' + tool_call.id + '\"}' }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- else %}\n {{- \"]\" + eos_token }}\n {%- endif %}\n {%- endfor %}\n {%- elif message[\"role\"] == \"assistant\" %}\n {{- message[\"content\"] + eos_token}}\n {%- elif message[\"role\"] == \"tool_results\" or message[\"role\"] == \"tool\" %}\n {%- if message.content is defined and message.content.content is defined %}\n {%- set content = message.content.content %}\n {%- else %}\n {%- set content = message.content %}\n {%- endif %}\n {{- '[TOOL_RESULTS]{\"content\": ' + content|string + \", \" }}\n {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %}\n {{- raise_exception(\"Tool call IDs should be alphanumeric strings with length 9!\") }}\n {%- endif %}\n {{- '\"call_id\": \"' + message.tool_call_id + '\"}[/TOOL_RESULTS]' }}\n {%- else %}\n {{- raise_exception(\"Only user and assistant roles are supported, with the exception of an initial optional system message!\") }}\n {%- endif %}\n{%- endfor %}\n", + // mistralai/Mistral-Large-Instruct-2411 (mistralai 'v7' template) + "{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + '[/INST]' }}{% elif message['role'] == 'system' %}{{ '[SYSTEM_PROMPT] ' + message['content'] + '[/SYSTEM_PROMPT]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + message['content'] + eos_token }}{% else %}{{ raise_exception('Only user, system and assistant roles are supported!') }}{% endif %}{% endfor %}", + // ai-sage/GigaChat-20B-A3B-instruct + "{% if messages[0]['role'] == 'system' -%}\n {%- set loop_messages = messages[1:] -%}\n {%- set system_message = bos_token + messages[0]['content'] + additional_special_tokens[1] -%}\n{%- else -%}\n {%- set loop_messages = messages -%}\n {%- set system_message = bos_token + '' -%}\n{%- endif -%}\n{%- for message in loop_messages %}\n {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {% endif %}\n \n {%- if loop.index0 == 0 -%}\n {{ system_message -}}\n {%- endif -%}\n {%- if message['role'] == 'user' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {{ 'available functions' + additional_special_tokens[0] + additional_special_tokens[2] + additional_special_tokens[3] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if message['role'] == 'assistant' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if loop.last and add_generation_prompt -%}\n {{ 'assistant' + additional_special_tokens[0] -}}\n {%- endif -%}\n{%- endfor %}", }; std::vector expected_output = { // teknium/OpenHermes-2.5-Mistral-7B "<|im_start|>system\nYou are a helpful assistant<|im_end|>\n<|im_start|>user\nHello<|im_end|>\n<|im_start|>assistant\nHi there<|im_end|>\n<|im_start|>user\nWho are you<|im_end|>\n<|im_start|>assistant\n I am an assistant <|im_end|>\n<|im_start|>user\nAnother question<|im_end|>\n<|im_start|>assistant\n", - // mistralai/Mistral-7B-Instruct-v0.2 + // mistralai/Mistral-7B-Instruct-v0.2 (NOTE: Old pre-v1 without a system prompt) "[INST] You are a helpful assistant\nHello [/INST]Hi there[INST] Who are you [/INST] I am an assistant [INST] Another question [/INST]", // TheBloke/FusionNet_34Bx2_MoE-AWQ - "[INST] <>\nYou are a helpful assistant\n<>\n\nHello [/INST] Hi there [INST] Who are you [/INST] I am an assistant [INST] Another question [/INST]", + "[INST] <>\nYou are a helpful assistant\n<>\n\nHello [/INST]Hi there[INST] Who are you [/INST] I am an assistant [INST] Another question [/INST]", // bofenghuang/vigogne-2-70b-chat - "[INST] <>\nYou are a helpful assistant\n<>\n\nHello [/INST] Hi there [INST] Who are you [/INST] I am an assistant [INST] Another question [/INST]", + "[INST] <>\nYou are a helpful assistant\n<>\n\nHello [/INST]Hi there[INST] Who are you [/INST]I am an assistant[INST] Another question [/INST]", // mlabonne/AlphaMonarch-7B "system\nYou are a helpful assistant\nuser\nHello\nassistant\nHi there\nuser\nWho are you\nassistant\n I am an assistant \nuser\nAnother question\nassistant\n", // google/gemma-7b-it @@ -113,10 +123,31 @@ int main(void) { u8"You are a helpful assistant\n\nUser: Hello\n\nAssistant: Hi there<|end▁of▁sentence|>User: Who are you\n\nAssistant: I am an assistant <|end▁of▁sentence|>User: Another question\n\nAssistant:", // ibm-granite/granite-3.0-8b-instruct "<|start_of_role|>system<|end_of_role|>You are a helpful assistant<|end_of_text|>\n<|start_of_role|>user<|end_of_role|>Hello<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>Hi there<|end_of_text|>\n<|start_of_role|>user<|end_of_role|>Who are you<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|> I am an assistant <|end_of_text|>\n<|start_of_role|>user<|end_of_role|>Another question<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>\n", + // mistralai/Mistral-7B-Instruct-v0.2 (mistralai 'v1' template with a system prompt) + " [INST] You are a helpful assistant\n\nHello [/INST] Hi there [INST] Who are you [/INST] I am an assistant [INST] Another question [/INST]", + // Mistral-Large-Instruct-2407 (mistralai 'v3' template; modified to have system prompt at start) + "[INST] You are a helpful assistant\n\nHello[/INST] Hi there[INST] Who are you[/INST] I am an assistant[INST] Another question[/INST]", + // Mistral-Nemo-Instruct-2407 (mistralai 'v3-tekken' template; modified to have system prompt at start) + "[INST]You are a helpful assistant\n\nHello[/INST]Hi there[INST]Who are you[/INST] I am an assistant [INST]Another question[/INST]", + // mistralai/Mistral-Large-Instruct-2411 (mistralai 'v7' template) + "[SYSTEM_PROMPT] You are a helpful assistant[/SYSTEM_PROMPT][INST] Hello[/INST] Hi there[INST] Who are you[/INST] I am an assistant [INST] Another question[/INST]", + // ai-sage/GigaChat-20B-A3B-instruct + "You are a helpful assistant<|message_sep|>user<|role_sep|>Hello<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>Hi there<|message_sep|>user<|role_sep|>Who are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|> I am an assistant <|message_sep|>user<|role_sep|>Another question<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>", }; std::vector formatted_chat(1024); int32_t res; + // list all supported templates + std::vector supported_tmpl; + res = llama_chat_builtin_templates(nullptr, 0); + assert(res > 0); + supported_tmpl.resize(res); + res = llama_chat_builtin_templates(supported_tmpl.data(), supported_tmpl.size()); + printf("Built-in chat templates:\n"); + for (auto tmpl : supported_tmpl) { + printf(" %s\n", tmpl); + } + // test invalid chat template res = llama_chat_apply_template(nullptr, "INVALID TEMPLATE", conversation, message_count, true, formatted_chat.data(), formatted_chat.size()); assert(res < 0); @@ -154,9 +185,16 @@ int main(void) { return output; }; assert(fmt_sys("chatml") == "<|im_start|>system\nYou are a helpful assistant<|im_end|>\n"); + assert(fmt_sys("mistral-v1") == " [INST] You are a helpful assistant\n\n"); + assert(fmt_sys("mistral-v3") == "[INST] You are a helpful assistant\n\n"); + assert(fmt_sys("mistral-v3-tekken") == "[INST]You are a helpful assistant\n\n"); + assert(fmt_sys("mistral-v7") == "[SYSTEM_PROMPT] You are a helpful assistant[/SYSTEM_PROMPT]"); assert(fmt_sys("llama2") == "[INST] You are a helpful assistant\n"); + assert(fmt_sys("llama2-sys") == "[INST] <>\nYou are a helpful assistant\n<>\n\n"); + assert(fmt_sys("mistral") == "[INST] You are a helpful assistant\n"); // for old pre-v1 templates assert(fmt_sys("gemma") == ""); // for gemma, system message is merged with user message assert(fmt_sys("llama3") == "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|>"); + assert(fmt_sys("gigachat") == "You are a helpful assistant<|message_sep|>"); // test llama_chat_format_single for user message @@ -173,9 +211,17 @@ int main(void) { return output; }; assert(fmt_single("chatml") == "\n<|im_start|>user\nHow are you<|im_end|>\n<|im_start|>assistant\n"); + assert(fmt_single("mistral-v1") == " [INST] How are you [/INST]"); + assert(fmt_single("mistral-v3") == "[INST] How are you[/INST]"); + assert(fmt_single("mistral-v3-tekken") == "[INST]How are you[/INST]"); + assert(fmt_single("mistral-v7") == "[INST] How are you[/INST]"); assert(fmt_single("llama2") == "[INST] How are you [/INST]"); + assert(fmt_single("mistral") == "[INST] How are you [/INST]"); // for old pre-v1 templates assert(fmt_single("gemma") == "\nuser\nHow are you\nmodel\n"); assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"); + assert(fmt_single("gigachat") == "user<|role_sep|>How are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>"); + + printf("Test chat templates: OK\n"); return 0; } diff --git a/tests/test-gguf.cpp b/tests/test-gguf.cpp new file mode 100644 index 0000000000000..1bb5fb47c4317 --- /dev/null +++ b/tests/test-gguf.cpp @@ -0,0 +1,1303 @@ +#include "ggml.h" +#include "ggml-backend.h" +#include "../ggml/src/ggml-impl.h" + +#include +#include +#include +#include +#include +#include +#include + +constexpr int offset_has_kv = 1000; +constexpr int offset_has_tensors = 2000; +constexpr int offset_has_data = 3000; + +enum handcrafted_file_type { + HANDCRAFTED_HEADER_BAD_MAGIC = 10, + HANDCRAFTED_HEADER_BAD_VERSION_1 = 20, + HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30, + HANDCRAFTED_HEADER_BAD_N_TENSORS = 40, + HANDCRAFTED_HEADER_BAD_N_KV = 50, + HANDCRAFTED_HEADER_EMPTY = 800, + + HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv, + HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv, + HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv, + HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv, + HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv, + + HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors, + HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors, + HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_ALIGNMENT = 80 + offset_has_tensors, + HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors, + HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors, + + HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data, + HANDCRAFTED_DATA_BAD_ALIGNMENT = 20 + offset_has_data, + HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data, + HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data, +}; + +std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) { + switch (hft) { + case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC"; + case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1"; + case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE"; + case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV"; + case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS"; + case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY"; + + case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE"; + case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE"; + case HANDCRAFTED_KV_BAD_VALUE_SIZE: return "KV_BAD_VALUE_SIZE"; + case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY"; + case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV"; + + case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE"; + case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS"; + case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE"; + case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG"; + case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE"; + case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET"; + case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME"; + case HANDCRAFTED_TENSORS_BAD_ALIGNMENT: return "TENSORS_BAD_ALIGNMENT"; + case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS"; + case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN"; + + case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA"; + case HANDCRAFTED_DATA_BAD_ALIGNMENT: return "DATA_BAD_ALIGNMENT"; + case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS"; + case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN"; + } + GGML_ABORT("fatal error"); +} + +static bool expect_context_not_null(const enum handcrafted_file_type hft) { + if (hft < offset_has_kv) { + return hft >= HANDCRAFTED_HEADER_EMPTY; + } + if (hft < offset_has_tensors) { + return hft >= HANDCRAFTED_KV_SUCCESS; + } + if (hft < offset_has_data) { + return hft >= HANDCRAFTED_TENSORS_SUCCESS; + } + return hft >= HANDCRAFTED_DATA_SUCCESS; +} + +typedef std::pair> tensor_config_t; + +std::vector get_tensor_configs(std::mt19937 & rng) { + std::vector tensor_configs; + tensor_configs.reserve(100); + + for (int i = 0; i < 100; ++i) { + const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT); + if (ggml_type_size(type) == 0) { + continue; + } + + std::array shape = {1, 1, 1, 1}; + shape[0] = (1 + rng() % 10) * ggml_blck_size(type); + const int n_dims = 1 + rng() % GGML_MAX_DIMS; + for (int i = 1; i < n_dims; ++i) { + shape[i] = 1 + rng() % 10; + } + + tensor_configs.push_back(std::make_pair(type, shape)); + } + + return tensor_configs; +} + +std::vector> get_kv_types(std::mt19937 rng) { + std::vector> kv_types; + kv_types.reserve(100); + + for (int i = 0; i < 100; ++i) { + const gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT); + + if (type == GGUF_TYPE_ARRAY) { + const gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT); + if (type_arr == GGUF_TYPE_ARRAY) { + continue; + } + kv_types.push_back(std::make_pair(type, type_arr)); + continue; + } + + kv_types.push_back(std::make_pair(type, gguf_type(-1))); + } + std::shuffle(kv_types.begin(), kv_types.end(), rng); + + return kv_types; +} + +static void helper_write(const void * data, const size_t nbytes, FILE * file) { + GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes); +} + +static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) { + FILE * file = tmpfile(); + + std::mt19937 rng(seed); + + if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) { + const char bad_magic[4] = {'F', 'U', 'G', 'G'}; + helper_write(bad_magic, sizeof(bad_magic), file); + } else { + helper_write(GGUF_MAGIC, 4, file); + } + + if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) { + const uint32_t version = 1; + helper_write(&version, sizeof(version), file); + } else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) { + const uint32_t version = GGUF_VERSION + 1; + helper_write(&version, sizeof(version), file); + } else { + const uint32_t version = GGUF_VERSION; + helper_write(&version, sizeof(version), file); + } + + std::vector tensor_configs; + if (hft >= offset_has_tensors) { + tensor_configs = get_tensor_configs(rng); + } + + if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) { + const uint64_t n_tensors = -1; + helper_write(&n_tensors, sizeof(n_tensors), file); + } else { + const uint64_t n_tensors = tensor_configs.size(); + helper_write(&n_tensors, sizeof(n_tensors), file); + } + + std::vector> kv_types; + if (hft >= offset_has_kv) { + kv_types = get_kv_types(rng); + } + { + uint64_t n_kv = kv_types.size(); + if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) { + n_kv += 1; + } else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) { + n_kv = -1; + } + helper_write(&n_kv, sizeof(n_kv), file); + } + + if (hft < offset_has_kv) { + for (int i = 0; i < extra_bytes; ++i) { + const char tmp = 0; + helper_write(&tmp, sizeof(tmp), file); + } + rewind(file); + return file; + } + + for (int i = 0; i < int(kv_types.size()); ++i) { + const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].first); + const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].second); + + const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i)); + + if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) { + const uint64_t n = -1; + helper_write(&n, sizeof(n), file); + } else { + const uint64_t n = key.length(); + helper_write(&n, sizeof(n), file); + } + helper_write(key.data(), key.length(), file); + + { + const int32_t type32 = int32_t(type); + helper_write(&type32, sizeof(type32), file); + } + + uint32_t data[16]; + for (int j = 0; j < 16; ++j) { + data[j] = rng(); + if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) { + data[j] |= 0x01010101; // avoid random null-termination of string + } + } + + if (type == GGUF_TYPE_STRING) { + const uint64_t n = rng() % sizeof(data); + helper_write(&n, sizeof(n), file); + helper_write(data, n, file); + continue; + } + + if (type == GGUF_TYPE_ARRAY) { + { + const int32_t type32 = int32_t(type_arr); + helper_write(&type32, sizeof(type32), file); + } + if (type_arr == GGUF_TYPE_STRING) { + const uint64_t nstr = rng() % (16 + 1); + helper_write(&nstr, sizeof(nstr), file); + for (uint64_t istr = 0; istr < nstr; ++istr) { + const uint64_t n = rng() % (sizeof(uint32_t) + 1); + helper_write(&n, sizeof(n), file); + helper_write(&data[istr], n, file); + } + continue; + } + const size_t type_size = gguf_type_size(type_arr); + const uint64_t n = (rng() % sizeof(data)) / type_size; + helper_write(&n, sizeof(n), file); + helper_write(&data, n*type_size, file); + continue; + } + + size_t type_size = hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type); + if (hft == HANDCRAFTED_KV_BAD_VALUE_SIZE) { + type_size += rng() % 3; + } + helper_write(data, type_size, file); + } + + if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) { + const std::string key = "general.alignment"; + { + const uint64_t n = key.length(); + helper_write(&n, sizeof(n), file); + } + helper_write(key.data(), key.length(), file); + + const int32_t type = gguf_type(GGUF_TYPE_UINT32); + helper_write(&type, sizeof(type), file); + + const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT + 1; + helper_write(&alignment, sizeof(alignment), file); + } + + if (hft < offset_has_tensors) { + for (int i = 0; i < extra_bytes; ++i) { + const char tmp = 0; + helper_write(&tmp, sizeof(tmp), file); + } + rewind(file); + return file; + } + + uint32_t alignment = GGUF_DEFAULT_ALIGNMENT; + if (hft == HANDCRAFTED_TENSORS_BAD_ALIGNMENT || hft == HANDCRAFTED_DATA_BAD_ALIGNMENT) { + alignment -= 1; + } else if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) { + alignment += 1; + } + + uint64_t offset = 0; + for (int i = 0; i < int(tensor_configs.size()); ++i) { + const ggml_type type = tensor_configs[i].first; + const std::array shape = tensor_configs[i].second; + + std::string name = "my_tensor"; + if (hft != HANDCRAFTED_TENSORS_DUPLICATE_NAME) { + name += "_" + std::to_string(i); + } + if (hft == HANDCRAFTED_TENSORS_BAD_NAME_SIZE) { + name += "_with_a_very_long_name_which_is_longer_than_what_is_allowed_for_ggml_tensors"; + GGML_ASSERT(name.length() >= GGML_MAX_NAME); + } + { + const uint64_t n = name.length(); + helper_write(&n, sizeof(n), file); + } + helper_write(name.data(), name.length(), file); + + uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1; + for (int i = GGML_MAX_DIMS-1; i >= 1; --i) { + if (shape[i] != 1) { + n_dims = i + 1; + break; + } + } + if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) { + const uint32_t n_dims_bad = GGML_MAX_DIMS + 1; + helper_write(&n_dims_bad, sizeof(n_dims_bad), file); + } else { + helper_write(&n_dims, sizeof(n_dims), file); + } + + if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) { + for (uint32_t j = 0; j < n_dims; ++j) { + const int64_t bad_dim = -1; + helper_write(&bad_dim, sizeof(bad_dim), file); + } + } else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){ + for (uint32_t j = 0; j < n_dims; ++j) { + const int64_t big_dim = 4*int64_t(INT32_MAX); + helper_write(&big_dim, sizeof(big_dim), file); + } + } else { + helper_write(shape.data(), n_dims*sizeof(int64_t), file); + } + + { + const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? -1 : int32_t(type); + helper_write(&type32, sizeof(type32), file); + } + + if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) { + const uint64_t bad_offset = -1; + helper_write(&bad_offset, sizeof(bad_offset), file); + } else { + helper_write(&offset, sizeof(offset), file); + } + + int64_t ne = shape[0]; + for (uint32_t i = 1; i < n_dims; ++i) { + ne *= shape[i]; + } + offset += GGML_PAD(ggml_row_size(type, ne), alignment); + } + + const uint32_t alignment_overshoot = ftell(file) % alignment; + if (alignment_overshoot != 0) { + for (size_t i = alignment_overshoot; i < alignment; ++i) { + const char pad = 0; + helper_write(&pad, sizeof(pad), file); + } + } + + if (hft >= offset_has_data) { + rng.seed(seed + 1); + uint64_t nbytes = offset; + if (hft == HANDCRAFTED_DATA_NOT_ENOUGH_DATA) { + nbytes -= 1; + } + for (uint64_t i = 0; i < nbytes; ++i) { + const uint8_t random_byte = i % 256; + helper_write(&random_byte, sizeof(random_byte), file); + } + } + + for (int i = 0; i < extra_bytes; ++i) { + const char tmp = 0; + helper_write(&tmp, sizeof(tmp), file); + } + rewind(file); + return file; +} + +static bool handcrafted_check_header(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_kv, const bool has_tensors, const bool alignment_defined) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs; + if (has_tensors) { + tensor_configs = get_tensor_configs(rng); + } + std::vector> kv_types; + if (has_kv) { + kv_types = get_kv_types(rng); + } + + bool ok = true; + + if (gguf_get_version(gguf_ctx) != GGUF_VERSION) { + ok = false; + } + if (gguf_get_n_tensors(gguf_ctx) != int(tensor_configs.size())) { + ok = false; + } + if (gguf_get_n_kv(gguf_ctx) != int(alignment_defined ? kv_types.size() + 1 : kv_types.size())) { + ok = false; + } + + return ok; +} + +static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_tensors, const bool alignment_defined) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs; + if (has_tensors) { + tensor_configs = get_tensor_configs(rng); + } + + std::vector> kv_types = get_kv_types(rng); + + bool ok = true; + + for (int i = 0; i < int(kv_types.size()); ++i) { + const enum gguf_type type = gguf_type(kv_types[i].first); + const enum gguf_type type_arr = gguf_type(kv_types[i].second); + + const std::string key = "my_key_" + std::to_string(i); + + uint32_t data[16]; + for (int j = 0; j < 16; ++j) { + data[j] = rng(); + if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) { + data[j] |= 0x01010101; // avoid random null-termination of string + } + } + + const char * data8 = reinterpret_cast(data); + const int id = gguf_find_key(gguf_ctx, key.c_str()); + + if (type == GGUF_TYPE_STRING) { + const char * str = gguf_get_val_str(gguf_ctx, id); + const uint64_t n = strlen(str); + const uint64_t n_expected = rng() % sizeof(data); + if (n != n_expected) { + ok = false; + continue; + } + if (!std::equal(str, str + n, data8)) { + ok = false; + } + continue; + } + + if (type == GGUF_TYPE_ARRAY) { + const size_t type_size = gguf_type_size(type_arr); + const uint64_t arr_n = gguf_get_arr_n(gguf_ctx, id); + + if (type_arr == GGUF_TYPE_STRING) { + const uint64_t nstr_expected = rng() % (16 + 1); + if (arr_n != nstr_expected) { + ok = false; + continue; + } + for (uint64_t istr = 0; istr < nstr_expected; ++istr) { + const char * str = gguf_get_arr_str(gguf_ctx, id, istr); + const uint64_t n = strlen(str); + const uint64_t n_expected = rng() % (sizeof(uint32_t) + 1); + + if (n != n_expected) { + ok = false; + continue; + } + const char * str_expected = reinterpret_cast(&data[istr]); + if (strncmp(str, str_expected, n) != 0) { + ok = false; + continue; + } + } + continue; + } + + const uint64_t arr_n_expected = (rng() % sizeof(data)) / type_size; + if (arr_n != arr_n_expected) { + ok = false; + continue; + } + + const char * data_gguf = reinterpret_cast(gguf_get_arr_data(gguf_ctx, id)); + if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) { + ok = false; + } + continue; + } + + const char * data_gguf = reinterpret_cast(gguf_get_val_data(gguf_ctx, id)); + if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) { + ok = false; + } + } + + const uint32_t expected_alignment = alignment_defined ? GGUF_DEFAULT_ALIGNMENT + 1 : GGUF_DEFAULT_ALIGNMENT; + if (gguf_get_alignment(gguf_ctx) != expected_alignment) { + ok = false; + } + + return ok; +} + +static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsigned int seed) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs = get_tensor_configs(rng); + + // Call get_kv_types to get the same RNG state: + get_kv_types(rng); + + bool ok = true; + + const int id_alignment = gguf_find_key(gguf_ctx, "general.alignment"); + const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT; + + uint64_t expected_offset = 0; + for (int i = 0; i < int(tensor_configs.size()); ++i) { + const ggml_type type = tensor_configs[i].first; + const std::array shape = tensor_configs[i].second; + + const std::string name = "my_tensor_" + std::to_string(i); + const int id = gguf_find_tensor(gguf_ctx, name.c_str()); + + if (id >= 0) { + if (std::string(gguf_get_tensor_name(gguf_ctx, id)) != name) { + ok = false; + } + + if (gguf_get_tensor_type(gguf_ctx, id) != type) { + ok = false; + } + } else { + ok = false; + continue; + } + + const size_t offset = gguf_get_tensor_offset(gguf_ctx, id); + + if (offset != expected_offset) { + ok = false; + } + + int64_t ne = shape[0]; + for (size_t j = 1; j < GGML_MAX_DIMS; ++j) { + ne *= shape[j]; + } + expected_offset += GGML_PAD(ggml_row_size(type, ne), alignment); + } + + return ok; +} + +static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const unsigned int seed, FILE * file) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs = get_tensor_configs(rng); + + bool ok = true; + + const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT; + + for (int i = 0; i < int(tensor_configs.size()); ++i) { + const ggml_type type = tensor_configs[i].first; + const std::array shape = tensor_configs[i].second; + + int64_t ne = shape[0]; + for (size_t j = 1; j < GGML_MAX_DIMS; ++j) { + ne *= shape[j]; + } + const size_t size = ggml_row_size(type, ne); + + const std::string name = "my_tensor_" + std::to_string(i); + const size_t offset = gguf_get_tensor_offset(gguf_ctx, gguf_find_tensor(gguf_ctx, name.c_str())); + + std::vector data(size); + GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0); + GGML_ASSERT(fread(data.data(), 1, size, file) == size); + + for (size_t j = 0; j < size; ++j) { + const uint8_t expected_byte = (j + offset) % 256; + if (data[j] != expected_byte) { + ok = false; + } + } + } + + return ok; +} + +static std::pair test_handcrafted_file(const unsigned int seed) { + int npass = 0; + int ntest = 0; + + const std::vector hfts = { + HANDCRAFTED_HEADER_BAD_MAGIC, + HANDCRAFTED_HEADER_BAD_VERSION_1, + // HANDCRAFTED_FILE_TYPE_BAD_VERSION_FUTURE, // FIXME + HANDCRAFTED_HEADER_BAD_N_KV, + HANDCRAFTED_HEADER_BAD_N_TENSORS, + HANDCRAFTED_HEADER_EMPTY, + + HANDCRAFTED_KV_BAD_KEY_SIZE, + HANDCRAFTED_KV_BAD_TYPE, + // HANDCRAFTED_KV_BAD_VALUE_SIZE, // FIXME sanitizer limit + // HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME + HANDCRAFTED_KV_SUCCESS, + + HANDCRAFTED_TENSORS_BAD_NAME_SIZE, + HANDCRAFTED_TENSORS_BAD_N_DIMS, + HANDCRAFTED_TENSORS_BAD_SHAPE, + HANDCRAFTED_TENSORS_NE_TOO_BIG, + HANDCRAFTED_TENSORS_BAD_TYPE, + // HANDCRAFTED_TENSORS_BAD_OFFSET, // FIXME + HANDCRAFTED_TENSORS_DUPLICATE_NAME, + // HANDCRAFTED_TENSORS_BAD_ALIGNMENT, // FIXME + HANDCRAFTED_TENSORS_SUCCESS, + HANDCRAFTED_TENSORS_CUSTOM_ALIGN, + + HANDCRAFTED_DATA_NOT_ENOUGH_DATA, + // HANDCRAFTED_DATA_BAD_ALIGNMENT, // FIXME + HANDCRAFTED_DATA_SUCCESS, + HANDCRAFTED_DATA_CUSTOM_ALIGN, + }; + + for (enum handcrafted_file_type hft : hfts) { + printf("%s: handcrafted_file_type=%s\n", __func__, handcrafted_file_type_name(hft).c_str()); + FILE * file = get_handcrafted_file(seed, hft); + +#ifdef _WIN32 + if (!file) { + printf("%s: failed to create tmpfile(), needs elevated privileges on Windows"); + printf("%s: skipping tests"); + continue; + } +#else + GGML_ASSERT(file); +#endif // _WIN32 + + struct ggml_context * ctx = nullptr; + struct gguf_init_params gguf_params = { + /*no_alloc =*/ false, + /*ctx =*/ hft >= offset_has_data ? &ctx : nullptr, + }; + struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params); + + if (expect_context_not_null(hft)) { + printf("%s: - context_not_null: ", __func__); + } else { + printf("%s: - context_null: ", __func__); + } + if (bool(gguf_ctx) == expect_context_not_null(hft)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + if (false && hft >= offset_has_data && !expect_context_not_null(hft)) { // FIXME + printf("%s: - no_dangling_ggml_context_pointer: ", __func__); + if (ctx) { + printf("\033[1;31mFAIL\033[0m\n"); + } else { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } + ntest++; + } + + if (false && expect_context_not_null(hft)) { // FIXME + FILE * file_eb = get_handcrafted_file(seed, hft, /*extra_bytes =*/ 1); + struct gguf_context * gguf_ctx_eb = gguf_init_from_file_impl(file_eb, gguf_params); + + printf("%s: - context_null_with_extra_bytes: ", __func__); + if (gguf_ctx_eb) { + printf("\033[1;31mFAIL\033[0m\n"); + } else { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } + ntest++; + + gguf_free(gguf_ctx_eb); + fclose(file_eb); + } + + const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN; + + if (expect_context_not_null(hft)) { + printf("%s: - check_header: ", __func__); + if (handcrafted_check_header(gguf_ctx, seed, hft >= offset_has_kv, hft >= offset_has_tensors, alignment_defined)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (expect_context_not_null(hft) && hft >= offset_has_kv) { + printf("%s: - check_kv: ", __func__); + if (handcrafted_check_kv(gguf_ctx, seed, hft >= offset_has_tensors, alignment_defined)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (expect_context_not_null(hft) && hft >= offset_has_tensors) { + printf("%s: - check_tensors: ", __func__); + if (handcrafted_check_tensors(gguf_ctx, seed)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (expect_context_not_null(hft) && hft >= offset_has_data) { + printf("%s: - check_tensor_data: ", __func__); + if (handcrafted_check_tensor_data(gguf_ctx, seed, file)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (gguf_ctx) { + ggml_free(ctx); + gguf_free(gguf_ctx); + } + fclose(file); + printf("\n"); + } + + return std::make_pair(npass, ntest); +} + +struct random_gguf_context_result { + struct gguf_context * gguf_ctx; + struct ggml_context * ctx; + ggml_backend_buffer_t buffer; +}; + +static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t backend, const unsigned int seed) { + std::mt19937 rng(seed); + + struct gguf_context * gguf_ctx = gguf_init_empty(); + + for (int i = 0; i < 256; ++i) { + const std::string key = "my_key_" + std::to_string(rng() % 1024); + const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT); + + if (type == GGUF_TYPE_STRING || type == GGUF_TYPE_ARRAY) { + continue; // FIXME memory leak + } + + switch (type) { + case GGUF_TYPE_UINT8: gguf_set_val_u8 (gguf_ctx, key.c_str(), rng() % (1 << 7)); break; + case GGUF_TYPE_INT8: gguf_set_val_i8 (gguf_ctx, key.c_str(), rng() % (1 << 7) - (1 << 6)); break; + case GGUF_TYPE_UINT16: gguf_set_val_u16 (gguf_ctx, key.c_str(), rng() % (1 << 15)); break; + case GGUF_TYPE_INT16: gguf_set_val_i16 (gguf_ctx, key.c_str(), rng() % (1 << 15) - (1 << 14)); break; + case GGUF_TYPE_UINT32: gguf_set_val_u32 (gguf_ctx, key.c_str(), rng()); break; + case GGUF_TYPE_INT32: gguf_set_val_i32 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break; + case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break; + case GGUF_TYPE_BOOL: gguf_set_val_bool(gguf_ctx, key.c_str(), rng() % 2 == 0); break; + case GGUF_TYPE_STRING: gguf_set_val_str (gguf_ctx, key.c_str(), std::to_string(rng()).c_str()); break; + case GGUF_TYPE_UINT64: gguf_set_val_u64 (gguf_ctx, key.c_str(), rng()); break; + case GGUF_TYPE_INT64: gguf_set_val_i64 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break; + case GGUF_TYPE_FLOAT64: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break; + case GGUF_TYPE_ARRAY: { + const enum gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT); + const uint64_t ne = rng() % 1024; + + switch (type_arr) { + case GGUF_TYPE_UINT8: + case GGUF_TYPE_INT8: + case GGUF_TYPE_UINT16: + case GGUF_TYPE_INT16: + case GGUF_TYPE_UINT32: + case GGUF_TYPE_INT32: + case GGUF_TYPE_FLOAT32: + case GGUF_TYPE_BOOL: + case GGUF_TYPE_UINT64: + case GGUF_TYPE_INT64: + case GGUF_TYPE_FLOAT64: { + const size_t nbytes = ne*gguf_type_size(type_arr); + std::vector random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t)); + for (size_t j = 0; j < random_data.size(); ++j) { + random_data[j] = rng(); + } + gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne); + } break; + case GGUF_TYPE_STRING: { + std::vector data_cpp(ne); + std::vector data_c(ne); + for (size_t j = 0; j < data_cpp.size(); ++j) { + data_cpp[j] = std::to_string(rng()); + data_c[j] = data_cpp[j].c_str(); + } + gguf_set_arr_str(gguf_ctx, key.c_str(), data_c.data(), ne); + } break; + case GGUF_TYPE_ARRAY: { + break; // not supported + } + case GGUF_TYPE_COUNT: + default: { + GGML_ABORT("fatal error"); + } break; + } + } break; + case GGUF_TYPE_COUNT: + default: { + GGML_ABORT("fatal error"); + } break; + } + } + + struct ggml_init_params ggml_params = { + /*.mem_size =*/ 256*ggml_tensor_overhead(), + /*.mem_buffer =*/ nullptr, + /*.no_alloc =*/ true, + }; + struct ggml_context * ctx = ggml_init(ggml_params); + + for (int i = 0; i < 256; ++i) { + const std::string name = "my_tensor_" + std::to_string(i); + const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT); + const size_t type_size = ggml_type_size(type); + + if (type_size == 0) { + continue; + } + + const int n_dims = 1 + rng() % GGML_MAX_DIMS; + int64_t ne[GGML_MAX_DIMS]; + ne[0] = (1 + rng() % 10) * ggml_blck_size(type); + for (int j = 1; j < n_dims; ++j) { + ne[j] = 1 + rng() % 10; + } + + struct ggml_tensor * tensor = ggml_new_tensor(ctx, type, n_dims, ne); + ggml_set_name(tensor, name.c_str()); + } + + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend); + for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { + const size_t nbytes = ggml_nbytes(t); + std::vector random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t)); + for (size_t j = 0; j < random_data.size(); ++j) { + random_data[j] = rng(); + } + ggml_backend_tensor_set(t, random_data.data(), 0, nbytes); + + gguf_add_tensor(gguf_ctx, t); + } + + return {gguf_ctx, ctx, buf}; +} + +static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other) { + bool ok = true; + + const int n_kv = gguf_get_n_kv(ctx); + for (int id = 0; id < n_kv; ++id) { + const char * name = gguf_get_key(ctx, id); + + const int idx_other = gguf_find_key(other, name); + if (idx_other < 0) { + ok = false; + continue; + } + + const gguf_type type = gguf_get_kv_type(ctx, id); + if (type != gguf_get_kv_type(other, idx_other)) { + ok = false; + continue; + } + + if (type == GGUF_TYPE_ARRAY) { + const int arr_n = gguf_get_arr_n(ctx, id); + if (arr_n != gguf_get_arr_n(other, idx_other)) { + ok = false; + continue; + } + + const gguf_type type_arr = gguf_get_arr_type(ctx, id); + if (type_arr != gguf_get_arr_type(other, idx_other)) { + ok = false; + continue; + } + + if (type_arr == GGUF_TYPE_STRING) { + for (int arr_i = 0; arr_i < arr_n; ++arr_i) { + const std::string str = gguf_get_arr_str(ctx, id, arr_i); + const std::string str_other = gguf_get_arr_str(other, idx_other, arr_i); + if (str != str_other) { + ok = false; + } + } + continue; + } + + const char * data = reinterpret_cast(gguf_get_arr_data(ctx, id)); + const char * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other)); + if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) { + ok = false; + } + continue; + } + + if (type == GGUF_TYPE_STRING) { + const std::string str = gguf_get_val_str(ctx, id); + const std::string str_other = gguf_get_val_str(other, idx_other); + if (str != str_other) { + ok = false; + } + continue; + } + + const char * data = reinterpret_cast(gguf_get_val_data(ctx, id)); + const char * data_other = reinterpret_cast(gguf_get_val_data(other, idx_other)); + if (!std::equal(data, data + gguf_type_size(type), data_other)) { + ok = false; + } + } + + return ok; +} + +static bool all_tensors_in_other(const gguf_context * ctx, const gguf_context * other) { + bool ok = true; + + const int n_tensors = gguf_get_n_tensors(ctx); + for (int id = 0; id < n_tensors; ++id) { + const std::string name = gguf_get_tensor_name(ctx, id); + + const int idx_other = gguf_find_tensor(other, name.c_str()); + if (id != idx_other) { + ok = false; + if (idx_other < 0) { + continue; + } + } + + const ggml_type type = gguf_get_tensor_type(ctx, id); + if (type != gguf_get_tensor_type(other, id)) { + ok = false; + } + + const size_t offset = gguf_get_tensor_offset(ctx, id); + if (offset != gguf_get_tensor_offset(other, id)) { + ok = false; + } + } + + return ok; +} + +static bool same_tensor_data(const struct ggml_context * orig, const struct ggml_context * read) { + bool ok = true; + + struct ggml_tensor * t_orig = ggml_get_first_tensor(orig); + struct ggml_tensor * t_read = ggml_get_first_tensor(read); + while (t_orig) { + if (!t_read) { + ok = false; + break; + } + + const size_t nbytes = ggml_nbytes(t_orig); + if (ggml_nbytes(t_read) != nbytes) { + ok = false; + break; + } + std::vector data_orig(nbytes); + ggml_backend_tensor_get(t_orig, data_orig.data(), 0, nbytes); + if (!std::equal(data_orig.data(), data_orig.data() + nbytes, reinterpret_cast(t_read->data))) { + ok = false; + } + + t_orig = ggml_get_next_tensor(orig, t_orig); + t_read = ggml_get_next_tensor(orig, t_read); + } + if (t_read) { + ok = false; + } + + return true; +} + +static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) { + FILE * file = tmpfile(); +#ifdef _WIN32 + if (!file) { + printf("%s: failed to create tmpfile(), needs elevated privileges on Windows"); + printf("%s: skipping tests"); + return std::make_pair(0, 0); + } +#else + GGML_ASSERT(file); +#endif // _WIN32 + + if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) { + return std::make_pair(0, 0); // FIXME + } + + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + printf("%s: device=%s, backend=%s, only_meta=%s\n", + __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no"); + + int npass = 0; + int ntest = 0; + + struct gguf_context * gguf_ctx_0; + struct ggml_context * ctx_0; + ggml_backend_buffer_t bbuf; + { + struct random_gguf_context_result result = get_random_gguf_context(backend, seed); + gguf_ctx_0 = result.gguf_ctx; + ctx_0 = result.ctx; + bbuf = result.buffer; + } + + struct gguf_buf gbuf = gguf_buf_init(16 * 1024); + gguf_write_to_buf(gguf_ctx_0, &gbuf, only_meta); + helper_write(gbuf.data, gbuf.offset, file); + rewind(file); + + struct ggml_context * ctx_1 = nullptr; + struct gguf_init_params gguf_params = { + /*no_alloc =*/ false, + /*ctx =*/ only_meta ? nullptr : &ctx_1, + }; + struct gguf_context * gguf_ctx_1 = gguf_init_from_file_impl(file, gguf_params); + + printf("%s: same_version: ", __func__); + if (gguf_get_version(gguf_ctx_0) == gguf_get_version(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: same_n_kv: ", __func__); + if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: same_n_tensors: ", __func__); + if (gguf_get_n_tensors(gguf_ctx_0) == gguf_get_n_tensors(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_orig_kv_in_read: ", __func__); + if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_read_kv_in_orig: ", __func__); + if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_orig_tensors_in_read: ", __func__); + if (all_tensors_in_other(gguf_ctx_0, gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_read_tensors_in_orig: ", __func__); + if (all_tensors_in_other(gguf_ctx_1, gguf_ctx_0)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + if (!only_meta) { + printf("%s: same_tensor_data: ", __func__); + if (same_tensor_data(ctx_0, ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + ggml_backend_buffer_free(bbuf); + ggml_free(ctx_0); + ggml_free(ctx_1); + gguf_free(gguf_ctx_0); + gguf_free(gguf_ctx_1); + gguf_buf_free(gbuf); + ggml_backend_free(backend); + GGML_ASSERT(fclose(file) == 0); + + printf("\n"); + return std::make_pair(npass, ntest); +} + +static std::pair test_gguf_set_kv(ggml_backend_dev_t dev, const unsigned int seed) { + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + printf("%s: device=%s, backend=%s\n", __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend)); + + int npass = 0; + int ntest = 0; + + struct gguf_context * gguf_ctx_0; + struct ggml_context * ctx_0; + ggml_backend_buffer_t bbuf_0; + { + struct random_gguf_context_result result = get_random_gguf_context(backend, seed); + gguf_ctx_0 = result.gguf_ctx; + ctx_0 = result.ctx; + bbuf_0 = result.buffer; + } + + struct gguf_context * gguf_ctx_1; + struct ggml_context * ctx_1; + ggml_backend_buffer_t bbuf_1; + { + struct random_gguf_context_result result = get_random_gguf_context(backend, seed + 1); + gguf_ctx_1 = result.gguf_ctx; + ctx_1 = result.ctx; + bbuf_1 = result.buffer; + } + + struct gguf_context * gguf_ctx_2 = gguf_init_empty(); + + gguf_set_kv(gguf_ctx_1, gguf_ctx_0); + gguf_set_kv(gguf_ctx_2, gguf_ctx_0); + + printf("%s: same_n_kv: ", __func__); + if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_2)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_kv_0_in_1: ", __func__); + if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_kv_0_in_2: ", __func__); + if (all_kv_in_other(gguf_ctx_0, gguf_ctx_2)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + gguf_set_kv(gguf_ctx_0, gguf_ctx_1); + + printf("%s: same_n_kv_after_double_copy: ", __func__); + if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_kv_1_in_0_after_double_copy: ", __func__); + if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + ggml_backend_buffer_free(bbuf_0); + ggml_backend_buffer_free(bbuf_1); + ggml_free(ctx_0); + ggml_free(ctx_1); + gguf_free(gguf_ctx_0); + gguf_free(gguf_ctx_1); + gguf_free(gguf_ctx_2); + ggml_backend_free(backend); + + printf("\n"); + return std::make_pair(npass, ntest); +} + +static void print_usage() { + printf("usage: test-gguf [seed]\n"); + printf(" if no seed is unspecified then a random seed is used\n"); +} + +int main(int argc, char ** argv) { + if (argc > 2) { + print_usage(); + return 1; + } + + std::random_device rd; + const unsigned int seed = argc < 2 ? rd() : std::stoi(argv[1]); + + // Initialize ggml backends early so the prints aren't interleaved with the test results: + ggml_backend_dev_count(); + fprintf(stderr, "\n"); + + int npass = 0; + int ntest = 0; + { + std::pair result = test_handcrafted_file(seed); + npass += result.first; + ntest += result.second; + } + + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + ggml_backend_dev_t dev = ggml_backend_dev_get(i); + + for (bool only_meta : {true, false}) { + std::pair result = test_roundtrip(dev, seed, only_meta); + npass += result.first; + ntest += result.second; + } + + { + std::pair result = test_gguf_set_kv(dev, seed); + npass += result.first; + ntest += result.second; + } + } + + printf("%d/%d tests passed\n", npass, ntest); + if (npass != ntest) { + printf("\033[1;31mFAIL\033[0m\n"); + return 1; + } + printf("\033[1;32mOK\033[0m\n"); + return 0; +} diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index 5cc0cdb04751f..e1bdbb9250fca 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -32,13 +32,10 @@ static bool test_build_grammar_fails(const std::string & grammar_str) { static bool match_string(const std::string & input, llama_grammar * grammar) { const auto cpts = unicode_cpts_from_utf8(input); - const llama_grammar_rules & rules = llama_grammar_get_rules (grammar); - llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar); + auto & stacks_cur = llama_grammar_get_stacks(grammar); for (const auto & cpt : cpts) { - const llama_grammar_stacks stacks_prev = llama_grammar_get_stacks(grammar); // copy - - llama_grammar_accept(rules, stacks_prev, cpt, stacks_cur); + llama_grammar_accept(grammar, cpt); if (stacks_cur.empty()) { // no stacks means that the grammar failed to match at this point @@ -63,7 +60,7 @@ static void test(const std::string & test_desc, const std::string & grammar_str, auto * grammar = build_grammar(grammar_str); // Save the original grammar stacks so that we can reset after every new string we want to test - const llama_grammar_stacks stacks_org = llama_grammar_get_stacks(grammar); + const llama_grammar_stacks stacks_org = llama_grammar_get_stacks(grammar); // copy llama_grammar_stacks & stacks_cur = llama_grammar_get_stacks(grammar); diff --git a/tests/test-llama-grammar.cpp b/tests/test-llama-grammar.cpp index 6f1374ca8ed58..e2129206be156 100644 --- a/tests/test-llama-grammar.cpp +++ b/tests/test-llama-grammar.cpp @@ -113,12 +113,10 @@ int main() } } - llama_grammar * grammar = NULL; std::vector grammar_rules(parsed_grammar.c_rules()); - grammar = llama_grammar_init_impl(nullptr, grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); - if (grammar == nullptr) - { + llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); + if (grammar == nullptr) { throw std::runtime_error("Failed to initialize llama_grammar"); } diff --git a/tests/test-lora-conversion-inference.sh b/tests/test-lora-conversion-inference.sh index fe90ce0d1b801..fb308a9ffb76f 100755 --- a/tests/test-lora-conversion-inference.sh +++ b/tests/test-lora-conversion-inference.sh @@ -10,11 +10,16 @@ declare -a params=( MODELS_REPO=lora-tests MODELS_REPO_URL=https://huggingface.co/ggml-org/$MODELS_REPO +COMMIT=c26d5fb85b4070a9e9c4e65d132c783b98086890 # Clone the Hugging Face repository if the directory does not exist if [ ! -d "$MODELS_REPO" ]; then echo "Cloning the Hugging Face repository..." git clone $MODELS_REPO_URL --depth 1 + cd $MODELS_REPO + git fetch --depth=1 origin $COMMIT + git reset --hard $COMMIT + cd - else echo "Repository already exists. Skipping clone." fi diff --git a/tests/test-quantize-fns.cpp b/tests/test-quantize-fns.cpp index 8d0bf0470fe4e..c77c8ed1388d7 100644 --- a/tests/test-quantize-fns.cpp +++ b/tests/test-quantize-fns.cpp @@ -79,9 +79,9 @@ static float dot_product(const float * a1, const float * a2, size_t test_size) { } // Total dot product error -static float dot_product_error( - const ggml_type_traits * qfns, const ggml_type_traits_cpu * qfns_cpu, size_t test_size, const float * test_data1, const float *test_data2 -) { +static float dot_product_error(const ggml_type_traits * qfns, const ggml_type_traits_cpu * qfns_cpu, size_t test_size, const float * test_data1, const float * test_data2) { + GGML_UNUSED(qfns); + std::vector tmp_q1(2*test_size); std::vector tmp_q2(2*test_size); diff --git a/tests/test-rope.cpp b/tests/test-rope.cpp index 4656b30f09cbd..322b8bb99ec6c 100644 --- a/tests/test-rope.cpp +++ b/tests/test-rope.cpp @@ -138,7 +138,7 @@ int main(int /*argc*/, const char ** /*argv*/) { struct ggml_tensor * x; // rope f32 - for (int m = 0; m < 3; ++m) { + for (int m = 0; m < 5; ++m) { const int ndims = 4; const int64_t n_rot = 128; @@ -147,28 +147,69 @@ int main(int /*argc*/, const char ** /*argv*/) { const int n_past_0 = 100; const int n_past_2 = 33; - struct ggml_tensor * p0 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - struct ggml_tensor * p1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - struct ggml_tensor * p2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - - for (int i = 0; i < ne[2]; ++i) { - ((int32_t *) p0->data)[i] = n_past_0 + i; - ((int32_t *) p1->data)[i] = n_past_2 - n_past_0; - ((int32_t *) p2->data)[i] = n_past_2 + i; - } - - // test mode 0, 2, 4 (standard, GPT-NeoX, GLM) - const int mode = m == 0 ? 0 : m == 1 ? 2 : 4; - + struct ggml_tensor * r0; + struct ggml_tensor * r1; + struct ggml_tensor * r2; x = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f); + int mode = -1; - // 100, 101, 102, ..., 172 - struct ggml_tensor * r0 = ggml_rope(ctx0, x, p0, n_rot, mode); - // -67, -67, -67, ..., -67 - struct ggml_tensor * r1 = ggml_rope(ctx0, r0, p1, n_rot, mode); // "context swap", i.e. forget n_past_0 - n_past_2 tokens + if (m < 3) { + struct ggml_tensor * p0 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); + struct ggml_tensor * p1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); + struct ggml_tensor * p2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2]); - // 33, 34, 35, ..., 105 - struct ggml_tensor * r2 = ggml_rope(ctx0, x, p2, n_rot, mode); + for (int i = 0; i < ne[2]; ++i) { + ((int32_t *) p0->data)[i] = n_past_0 + i; + ((int32_t *) p1->data)[i] = n_past_2 - n_past_0; + ((int32_t *) p2->data)[i] = n_past_2 + i; + } + // test mode 0, 2, 4 (standard, GPT-NeoX, GLM) + mode = m == 0 ? 0 : m == 1 ? 2 : 4; + + // 100, 101, 102, ..., 172 + r0 = ggml_rope(ctx0, x, p0, n_rot, mode); + // -67, -67, -67, ..., -67 + r1 = ggml_rope(ctx0, r0, p1, n_rot, mode); // "context swap", i.e. forget n_past_0 - n_past_2 tokens + + // 33, 34, 35, ..., 105 + r2 = ggml_rope(ctx0, x, p2, n_rot, mode); + } else { + // testing multi-dimension rope position embedding mode + struct ggml_tensor * p0 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2] * 4); + struct ggml_tensor * p1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2] * 4); + struct ggml_tensor * p2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, ne[2] * 4); + + int sections[4] = {16, 24, 24, 0}; + mode = (m == 3) ? GGML_ROPE_TYPE_MROPE : GGML_ROPE_TYPE_VISION; + + for (int i = 0; i < ne[2]; ++i) { + for (int j = 0; j < 4; ++j) { + ((int32_t *) p0->data)[i + ne[2] * j] = n_past_0 + i + j; + ((int32_t *) p1->data)[i + ne[2] * j] = n_past_2 - n_past_0; + ((int32_t *) p2->data)[i + ne[2] * j] = n_past_2 + i + j; + } + } + + // [[100, 101, 102, ..., 172], + // [101, 102, 103, ..., 173], + // [102, 103, 104, ..., 174]] + r0 = ggml_rope_multi( + ctx0, x, p0, nullptr, + n_rot, sections, mode, 32768, 1000000, 1, 0, 1, 32, 1); + // [[-67, -67, -67, ..., -67] + // [-67, -67, -67, ..., -67] + // [-67, -67, -67, ..., -67]] + r1 = ggml_rope_multi( + ctx0, r0, p1, nullptr, + n_rot, sections, mode, 32768, 1000000, 1, 0, 1, 32, 1); + + // [[33, 34, 35, ..., 105] + // [34, 35, 36, ..., 106] + // [35, 36, 37, ..., 107]] + r2 = ggml_rope_multi( + ctx0, x, p2, nullptr, + n_rot, sections, mode, 32768, 1000000, 1, 0, 1, 32, 1); + } ggml_cgraph * gf = ggml_new_graph(ctx0); diff --git a/tests/test-sampling.cpp b/tests/test-sampling.cpp index be370044d6f58..c0dcb4848b403 100644 --- a/tests/test-sampling.cpp +++ b/tests/test-sampling.cpp @@ -145,7 +145,7 @@ static void test_penalties( sampler_tester tester(probs, probs_expected); const size_t n_vocab = probs.size(); - auto * sampler = llama_sampler_init_penalties(n_vocab, LLAMA_TOKEN_NULL, LLAMA_TOKEN_NULL, last_tokens.size(), repeat_penalty, alpha_frequency, alpha_presence, false, false); + auto * sampler = llama_sampler_init_penalties(last_tokens.size(), repeat_penalty, alpha_frequency, alpha_presence); for (size_t i = 0; i < last_tokens.size(); i++) { llama_sampler_accept(sampler, last_tokens[i]); @@ -284,7 +284,7 @@ static void test_perf() { data.reserve(n_vocab); for (int i = 0; i < n_vocab; i++) { - const float logit = 2.0f*((float)(rand())/RAND_MAX - 0.5f); + const float logit = 2.0f*((double)(rand())/RAND_MAX - 0.5); data.emplace_back(llama_token_data{i, logit, 0.0f}); }