From 7ba257e5dac58f8796a12260776f27fd989a1182 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 02:33:55 -0500 Subject: [PATCH 01/55] feat: Update llama.cpp --- llama_cpp/llama_cpp.py | 32 ++++++++++++++++++++++++++++++++ vendor/llama.cpp | 2 +- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index bb2ba1993..24663146c 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -221,6 +221,7 @@ # LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24, # LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, # LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, +# LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, # }; LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 @@ -249,6 +250,7 @@ LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH = 24 LLAMA_VOCAB_PRE_TYPE_EXAONE = 25 LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26 +LLAMA_VOCAB_PRE_TYPE_MINERVA = 27 # // note: these values should be synchronized with ggml_rope @@ -392,12 +394,14 @@ # LLAMA_ROPE_SCALING_TYPE_NONE = 0, # LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, # LLAMA_ROPE_SCALING_TYPE_YARN = 2, +# LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, # LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN, # }; LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1 LLAMA_ROPE_SCALING_TYPE_NONE = 0 LLAMA_ROPE_SCALING_TYPE_LINEAR = 1 LLAMA_ROPE_SCALING_TYPE_YARN = 2 +LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3 LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN # enum llama_pooling_type { @@ -2933,6 +2937,34 @@ def llama_chat_apply_template( ... +# // Get list of built-in chat templates +# LLAMA_API int32_t llama_chat_builtin_templates(const char ** output, size_t len); +@ctypes_function( + "llama_chat_builtin_templates", + [ + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ], + ctypes.c_int32, +) +def llama_chat_builtin_templates( + output: CtypesArray[bytes], + len: Union[ctypes.c_size_t, int], + /, +) -> int: + """Get list of built-in chat templates. + + Args: + output: Output buffer to store template names. + len: Length of the output buffer. + + Returns: + Number of templates available. + Returns a negative number on error. + """ + ... + + # // # // Sampling API # // diff --git a/vendor/llama.cpp b/vendor/llama.cpp index dc2234408..7736837d6 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit dc22344088a7ee81a1e4f096459b03a72f24ccdc +Subproject commit 7736837d62efed1dbebfe579472fca041eda12d6 From 9d06e36c00aea381dd995abc800ce9727da13a82 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 02:49:20 -0500 Subject: [PATCH 02/55] fix(ci): Explicitly install arm64 python version --- .github/workflows/test.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 8f619cf1f..01b2f4c40 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -126,6 +126,7 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} + architecture: "arm64" cache: 'pip' - name: Restore model cache @@ -166,6 +167,7 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.9" + architecture: "arm64" - name: Restore model cache uses: actions/cache@v4 From fb0b8fefd41dc8c467b0e353638f7345254274b5 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 02:53:06 -0500 Subject: [PATCH 03/55] fix(ci): Explicitly set cmake osx architecture --- CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index c9f3c2b36..584e54d39 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,6 +70,8 @@ if (LLAMA_BUILD) set(GGML_AVX2 "Off" CACHE BOOL "ggml: enable AVX2" FORCE) set(GGML_FMA "Off" CACHE BOOL "gml: enable FMA" FORCE) set(GGML_F16C "Off" CACHE BOOL "gml: enable F16C" FORCE) + + set(CMAKE_OSX_ARCHITECTURES "arm64" CACHE STRING "Build architecture for OS X" FORCE) endif() if (APPLE) From 72ed7b8855a168a454026b98c7f86a452524a7d1 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 02:55:57 -0500 Subject: [PATCH 04/55] fix(ci): Explicitly test on arm64 macos runner --- .github/workflows/test.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 01b2f4c40..445554e6b 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -113,7 +113,7 @@ jobs: build-macos: needs: download-model - runs-on: macos-latest + runs-on: macos-14-arm64 strategy: matrix: python-version: ["3.9", "3.10", "3.11", "3.12"] @@ -157,7 +157,7 @@ jobs: build-macos-metal: needs: download-model - runs-on: macos-latest + runs-on: macos-14-arm64 steps: - uses: actions/checkout@v4 with: From 8988aaf7f64a4ffbe30fb1b94d786848ea1efd3c Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:02:04 -0500 Subject: [PATCH 05/55] fix(ci): Use macos-14 runner --- .github/workflows/test.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 445554e6b..f5a08f6cb 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -113,7 +113,7 @@ jobs: build-macos: needs: download-model - runs-on: macos-14-arm64 + runs-on: macos-14 strategy: matrix: python-version: ["3.9", "3.10", "3.11", "3.12"] @@ -157,7 +157,7 @@ jobs: build-macos-metal: needs: download-model - runs-on: macos-14-arm64 + runs-on: macos-14 steps: - uses: actions/checkout@v4 with: From f11a781d86c914ab12dc620edf64c049c6fb0ee7 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:04:48 -0500 Subject: [PATCH 06/55] fix(ci): Use macos-13 runner --- .github/workflows/test.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index f5a08f6cb..1631d63de 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -113,7 +113,7 @@ jobs: build-macos: needs: download-model - runs-on: macos-14 + runs-on: macos-13 strategy: matrix: python-version: ["3.9", "3.10", "3.11", "3.12"] @@ -157,7 +157,7 @@ jobs: build-macos-metal: needs: download-model - runs-on: macos-14 + runs-on: macos-13 steps: - uses: actions/checkout@v4 with: From 9a09fc78487b08423f4e2c9f02b7bc308dc0e2e7 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:19:36 -0500 Subject: [PATCH 07/55] fix(ci): Debug print python system architecture --- .github/workflows/test.yaml | 49 ++++++++++++------------------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 1631d63de..ee8680a9b 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -51,19 +51,11 @@ jobs: path: ~/.cache/huggingface/hub key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} - name: Install dependencies (Linux/MacOS) - if: runner.os != 'Windows' run: | python -m pip install --upgrade pip python -m pip install uv python -m uv pip install -e .[all] --verbose shell: bash - - name: Install dependencies (Windows) - if: runner.os == 'Windows' - run: | - python -m pip install --upgrade pip - python -m pip install uv - python -m uv pip install -e .[all] --verbose - shell: cmd - name: Test with pytest run: | python -m pytest @@ -90,22 +82,13 @@ jobs: with: path: ~/.cache/huggingface/hub key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} - - - name: Install dependencies (Linux/MacOS) - if: runner.os != 'Windows' - run: | - python -m pip install --upgrade pip - python -m pip install uv - python -m uv pip install -e .[all] --verbose - shell: bash - name: Install dependencies (Windows) - if: runner.os == 'Windows' run: | python -m pip install --upgrade pip python -m pip install uv python -m uv pip install -e .[all] --verbose - shell: cmd + shell: cmd - name: Test with pytest run: | @@ -129,6 +112,12 @@ jobs: architecture: "arm64" cache: 'pip' + - name: System Info + run: | + uname -a + sysctl -n machdep.cpu.brand_string + python3 -c "import platform; print(platform.machine(), platform.architecture())" + - name: Restore model cache uses: actions/cache@v4 with: @@ -136,21 +125,13 @@ jobs: key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} - name: Install dependencies (Linux/MacOS) - if: runner.os != 'Windows' run: | python -m pip install --upgrade pip python -m pip install uv python -m uv pip install -e .[all] --verbose + CMAKE_ARGS="-DLLAMA_METAL=off" python -m uv pip install .[all] --verbose shell: bash - - name: Install dependencies (Windows) - if: runner.os == 'Windows' - run: | - python -m pip install --upgrade pip - python -m pip install uv - python -m uv pip install -e .[all] --verbose - shell: cmd - - name: Test with pytest run: | python -m pytest @@ -169,25 +150,25 @@ jobs: python-version: "3.9" architecture: "arm64" + - name: System Info + run: | + uname -a + sysctl -n machdep.cpu.brand_string + python3 -c "import platform; print(platform.machine(), platform.architecture())" + - name: Restore model cache uses: actions/cache@v4 with: path: ~/.cache/huggingface/hub key: ${{ runner.os }}-model-${{ env.REPO_ID }}-${{ env.MODEL_FILE }} - - name: Install dependencies (Linux/MacOS) - if: runner.os != 'Windows' + - name: Install dependencies run: | python -m pip install --upgrade pip python -m pip install uv CMAKE_ARGS="-DLLAMA_METAL=on" python -m uv pip install .[all] --verbose shell: bash - - name: Install dependencies (Windows) - if: runner.os == 'Windows' - run: | - python -m pip install --upgrade pip - CMAKE_ARGS="-DGGML_METAL=on" python -m pip install .[all] --verbose - name: Test with pytest run: | python -m pytest From a412ba5539be6a5499acfcb0a2c234665fcbda11 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:38:12 -0500 Subject: [PATCH 08/55] fix(ci): Update config --- .github/workflows/test.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index ee8680a9b..f8b7893f8 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -109,7 +109,6 @@ jobs: uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - architecture: "arm64" cache: 'pip' - name: System Info @@ -138,7 +137,7 @@ jobs: build-macos-metal: needs: download-model - runs-on: macos-13 + runs-on: macos-14 steps: - uses: actions/checkout@v4 with: From df05096fc56347efe14ee550f065a1b15933c322 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:42:33 -0500 Subject: [PATCH 09/55] fix(ci): Install with regular pip --- .github/workflows/test.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index f8b7893f8..9332e0830 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -164,8 +164,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install uv - CMAKE_ARGS="-DLLAMA_METAL=on" python -m uv pip install .[all] --verbose + CMAKE_ARGS="-DLLAMA_METAL=on" python -m pip install .[all] --verbose shell: bash - name: Test with pytest From 1cd3f2cc6aceaa3167c943eb33b4076ae7a9ba53 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:52:54 -0500 Subject: [PATCH 10/55] fix(ci): gg --- .github/workflows/test.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 9332e0830..a4d56110a 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -110,6 +110,7 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: 'pip' + architecture: "arm64" - name: System Info run: | @@ -137,7 +138,7 @@ jobs: build-macos-metal: needs: download-model - runs-on: macos-14 + runs-on: macos-13 steps: - uses: actions/checkout@v4 with: @@ -163,10 +164,10 @@ jobs: - name: Install dependencies run: | - python -m pip install --upgrade pip - CMAKE_ARGS="-DLLAMA_METAL=on" python -m pip install .[all] --verbose + python3 -m pip install --upgrade pip + CMAKE_ARGS="-DLLAMA_METAL=on" python3 -m pip install .[all] --verbose shell: bash - name: Test with pytest run: | - python -m pytest + python3 -m pytest From b34f20046b9fc058ed8581291a724c6723b06fe4 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 03:59:51 -0500 Subject: [PATCH 11/55] fix(ci): Use python3 --- .github/workflows/test.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index a4d56110a..7f4774959 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -126,15 +126,15 @@ jobs: - name: Install dependencies (Linux/MacOS) run: | - python -m pip install --upgrade pip - python -m pip install uv - python -m uv pip install -e .[all] --verbose - CMAKE_ARGS="-DLLAMA_METAL=off" python -m uv pip install .[all] --verbose + python3 -m pip install --upgrade pip + python3 -m pip install uv + python3 -m uv pip install -e .[all] --verbose + CMAKE_ARGS="-DLLAMA_METAL=off" python3 -m uv pip install .[all] --verbose shell: bash - name: Test with pytest run: | - python -m pytest + python3 -m pytest build-macos-metal: needs: download-model From d8cc231fd00b67528e7e4563c2b4c514acbcd8f9 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 04:06:07 -0500 Subject: [PATCH 12/55] fix(ci): Use default architecture chosen by action --- .github/workflows/test.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 7f4774959..95f6e5a27 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -110,7 +110,6 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: 'pip' - architecture: "arm64" - name: System Info run: | @@ -148,7 +147,6 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.9" - architecture: "arm64" - name: System Info run: | From d5d50990d016333982ff350c9c151efe5732d76b Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 04:14:59 -0500 Subject: [PATCH 13/55] fix(ci): Update CMakeLists.txt for macos --- CMakeLists.txt | 43 +++++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 584e54d39..64a0304a1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,20 +62,35 @@ if (LLAMA_BUILD) # Enable building of the common library set(LLAMA_BUILD_COMMON ON CACHE BOOL "Build llama.cpp common library" FORCE) - # Building llama - if (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm64") - # Need to disable these llama.cpp flags on Apple x86_64, - # otherwise users may encounter invalid instruction errors - set(GGML_AVX "Off" CACHE BOOL "ggml: enable AVX" FORCE) - set(GGML_AVX2 "Off" CACHE BOOL "ggml: enable AVX2" FORCE) - set(GGML_FMA "Off" CACHE BOOL "gml: enable FMA" FORCE) - set(GGML_F16C "Off" CACHE BOOL "gml: enable F16C" FORCE) - - set(CMAKE_OSX_ARCHITECTURES "arm64" CACHE STRING "Build architecture for OS X" FORCE) - endif() - + # Architecture detection and settings for Apple platforms if (APPLE) - set(GGML_METAL_EMBED_LIBRARY "On" CACHE BOOL "llama: embed metal library" FORCE) + # Get the target architecture + execute_process( + COMMAND uname -m + OUTPUT_VARIABLE HOST_ARCH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + + # If CMAKE_OSX_ARCHITECTURES is not set, use the host architecture + if(NOT CMAKE_OSX_ARCHITECTURES) + set(CMAKE_OSX_ARCHITECTURES ${HOST_ARCH} CACHE STRING "Build architecture for macOS" FORCE) + endif() + + message(STATUS "Host architecture: ${HOST_ARCH}") + message(STATUS "Target architecture: ${CMAKE_OSX_ARCHITECTURES}") + + # Configure based on target architecture + if(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64") + # Intel Mac settings + set(GGML_AVX "OFF" CACHE BOOL "ggml: enable AVX" FORCE) + set(GGML_AVX2 "OFF" CACHE BOOL "ggml: enable AVX2" FORCE) + set(GGML_FMA "OFF" CACHE BOOL "ggml: enable FMA" FORCE) + set(GGML_F16C "OFF" CACHE BOOL "ggml: enable F16C" FORCE) + endif() + + # Metal settings (enable for both architectures) + set(GGML_METAL "ON" CACHE BOOL "ggml: enable Metal" FORCE) + set(GGML_METAL_EMBED_LIBRARY "ON" CACHE BOOL "ggml: embed metal library" FORCE) endif() add_subdirectory(vendor/llama.cpp) @@ -130,7 +145,7 @@ if (LLAMA_BUILD) # Building llava add_subdirectory(vendor/llama.cpp/examples/llava) set_target_properties(llava_shared PROPERTIES OUTPUT_NAME "llava") - # Set CUDA_ARCHITECTURES to OFF on Windows + if (WIN32) set_target_properties(llava_shared PROPERTIES CUDA_ARCHITECTURES OFF) endif() From 4f17ae5af073f3741be7235f86e2b183db62a260 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=CE=9BBL=C3=98=20=E1=84=83=CE=9E?= Date: Fri, 6 Dec 2024 04:31:01 -0500 Subject: [PATCH 14/55] fix(ci): Remove cuda version 12.5.0 incompatibility with VS (#1838) Co-authored-by: Andrei --- .github/workflows/build-wheels-cuda.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-wheels-cuda.yaml b/.github/workflows/build-wheels-cuda.yaml index 647b171e8..bde626828 100644 --- a/.github/workflows/build-wheels-cuda.yaml +++ b/.github/workflows/build-wheels-cuda.yaml @@ -22,7 +22,7 @@ jobs: $matrix = @{ 'os' = @('ubuntu-latest', 'windows-2019') 'pyver' = @("3.9", "3.10", "3.11", "3.12") - 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1", "12.5.0") + 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1") 'releasetag' = @("basic") } From 991d9cde1b81dfdda4d3117b508c16c6fe7dbdca Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 04:32:19 -0500 Subject: [PATCH 15/55] fix(ci): Remove CUDA 12.5 from index --- .github/workflows/generate-index-from-release.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/generate-index-from-release.yaml b/.github/workflows/generate-index-from-release.yaml index 8d80e7e47..51961088d 100644 --- a/.github/workflows/generate-index-from-release.yaml +++ b/.github/workflows/generate-index-from-release.yaml @@ -44,7 +44,6 @@ jobs: ./scripts/releases-to-pep-503.sh index/whl/cu122 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu122$' ./scripts/releases-to-pep-503.sh index/whl/cu123 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu123$' ./scripts/releases-to-pep-503.sh index/whl/cu124 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' - ./scripts/releases-to-pep-503.sh index/whl/cu125 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu125$' ./scripts/releases-to-pep-503.sh index/whl/metal '^[v]?[0-9]+\.[0-9]+\.[0-9]+-metal$' - name: Upload artifact uses: actions/upload-pages-artifact@v3 From 27953036b1bb1da9e4b71b875f66ba8f1aefbf6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Dec 2024 04:32:37 -0500 Subject: [PATCH 16/55] chore(deps): bump pypa/cibuildwheel from 2.21.1 to 2.22.0 (#1844) Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.21.1 to 2.22.0. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/v2.21.1...v2.22.0) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-and-release.yaml | 4 ++-- .github/workflows/build-wheels-metal.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-and-release.yaml b/.github/workflows/build-and-release.yaml index bfa97decd..1d549ed68 100644 --- a/.github/workflows/build-and-release.yaml +++ b/.github/workflows/build-and-release.yaml @@ -42,7 +42,7 @@ jobs: shell: cmd - name: Build wheels - uses: pypa/cibuildwheel@v2.21.1 + uses: pypa/cibuildwheel@v2.22.0 env: # disable repair CIBW_REPAIR_WHEEL_COMMAND: "" @@ -69,7 +69,7 @@ jobs: platforms: linux/arm64 - name: Build wheels - uses: pypa/cibuildwheel@v2.21.1 + uses: pypa/cibuildwheel@v2.22.0 env: CIBW_SKIP: "*musllinux* pp*" CIBW_REPAIR_WHEEL_COMMAND: "" diff --git a/.github/workflows/build-wheels-metal.yaml b/.github/workflows/build-wheels-metal.yaml index 8e6fb5cb7..01da3cef5 100644 --- a/.github/workflows/build-wheels-metal.yaml +++ b/.github/workflows/build-wheels-metal.yaml @@ -43,7 +43,7 @@ jobs: shell: cmd - name: Build wheels - uses: pypa/cibuildwheel@v2.21.1 + uses: pypa/cibuildwheel@v2.22.0 env: # disable repair CIBW_REPAIR_WHEEL_COMMAND: "" From 2523472c3eccb9ab9277117cc4ff705212b6888a Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 04:36:22 -0500 Subject: [PATCH 17/55] fix: Fix pickling of Llama class by setting seed from _seed member. Closes #1769 --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index d15a88b00..ce1a26649 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -2073,7 +2073,7 @@ def __getstate__(self): use_mlock=self.model_params.use_mlock, kv_overrides=self.kv_overrides, # Context Params - seed=self.context_params.seed, + seed=self._seed, n_ctx=self.context_params.n_ctx, n_batch=self.n_batch, n_ubatch=self.context_params.n_ubatch, From ddac04c78e504196207292aeca4c3adccc655a88 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 6 Dec 2024 04:39:27 -0500 Subject: [PATCH 18/55] chore(deps): bump conda-incubator/setup-miniconda from 3.0.4 to 3.1.0 (#1821) Bumps [conda-incubator/setup-miniconda](https://github.com/conda-incubator/setup-miniconda) from 3.0.4 to 3.1.0. - [Release notes](https://github.com/conda-incubator/setup-miniconda/releases) - [Changelog](https://github.com/conda-incubator/setup-miniconda/blob/main/CHANGELOG.md) - [Commits](https://github.com/conda-incubator/setup-miniconda/compare/v3.0.4...v3.1.0) --- updated-dependencies: - dependency-name: conda-incubator/setup-miniconda dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build-wheels-cuda.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-wheels-cuda.yaml b/.github/workflows/build-wheels-cuda.yaml index bde626828..3d9c5d9d4 100644 --- a/.github/workflows/build-wheels-cuda.yaml +++ b/.github/workflows/build-wheels-cuda.yaml @@ -59,7 +59,7 @@ jobs: cache: 'pip' - name: Setup Mamba - uses: conda-incubator/setup-miniconda@v3.0.4 + uses: conda-incubator/setup-miniconda@v3.1.0 with: activate-environment: "build" python-version: ${{ matrix.pyver }} From fa04cdc497e2c72888cccf6268836fe3291c3d42 Mon Sep 17 00:00:00 2001 From: ddh0 Date: Fri, 6 Dec 2024 03:40:47 -0600 Subject: [PATCH 19/55] fix logit-bias type hint (#1802) `Optional[Dict[str, float]]` --> `Optional[Dict[int, float]]` --- llama_cpp/llama.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index ce1a26649..2fd7ff193 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -1141,7 +1141,7 @@ def _create_completion( stopping_criteria: Optional[StoppingCriteriaList] = None, logits_processor: Optional[LogitsProcessorList] = None, grammar: Optional[LlamaGrammar] = None, - logit_bias: Optional[Dict[str, float]] = None, + logit_bias: Optional[Dict[int, float]] = None, ) -> Union[ Iterator[CreateCompletionResponse], Iterator[CreateCompletionStreamResponse] ]: @@ -1761,7 +1761,7 @@ def create_completion( stopping_criteria: Optional[StoppingCriteriaList] = None, logits_processor: Optional[LogitsProcessorList] = None, grammar: Optional[LlamaGrammar] = None, - logit_bias: Optional[Dict[str, float]] = None, + logit_bias: Optional[Dict[int, float]] = None, ) -> Union[CreateCompletionResponse, Iterator[CreateCompletionStreamResponse]]: """Generate text from a prompt. @@ -1858,7 +1858,7 @@ def __call__( stopping_criteria: Optional[StoppingCriteriaList] = None, logits_processor: Optional[LogitsProcessorList] = None, grammar: Optional[LlamaGrammar] = None, - logit_bias: Optional[Dict[str, float]] = None, + logit_bias: Optional[Dict[int, float]] = None, ) -> Union[CreateCompletionResponse, Iterator[CreateCompletionStreamResponse]]: """Generate text from a prompt. @@ -1951,7 +1951,7 @@ def create_chat_completion( model: Optional[str] = None, logits_processor: Optional[LogitsProcessorList] = None, grammar: Optional[LlamaGrammar] = None, - logit_bias: Optional[Dict[str, float]] = None, + logit_bias: Optional[Dict[int, float]] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, ) -> Union[ From 38fbd2968d3e937c9f4157a54372b02fa527233b Mon Sep 17 00:00:00 2001 From: Rich Dougherty Date: Fri, 6 Dec 2024 22:41:52 +1300 Subject: [PATCH 20/55] docs: Remove ref to llama_eval in llama_cpp.py docs (#1819) The llama_eval function was removed in 0.2.14 (0d37ce5) but the docs still reference it. Updated to match the llama.h docs from upstream. --- llama_cpp/llama_cpp.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 24663146c..cd59bb430 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -772,7 +772,7 @@ class llama_context_params(ctypes.Structure): cb_eval_user_data (ctypes.ctypes.c_void_p): user data for cb_eval type_k (int): data type for K cache type_v (int): data type for V cache - logits_all (bool): the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) + logits_all (bool): the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) embeddings (bool): if true, extract embeddings (together with logits) offload_kqv (bool): whether to offload the KQV ops (including the KV cache) to GPU flash_attn (bool): whether to use flash attention @@ -2469,10 +2469,10 @@ def llama_synchronize(ctx: llama_context_p, /): "llama_get_logits", [llama_context_p_ctypes], ctypes.POINTER(ctypes.c_float) ) def llama_get_logits(ctx: llama_context_p, /) -> CtypesArray[ctypes.c_float]: - """Token logits obtained from the last call to llama_eval() - The logits for the last token are stored in the last row - Logits for which llama_batch.logits[i] == 0 are undefined - Rows: n_tokens provided with llama_batch + """Token logits obtained from the last call to llama_decode() + The logits for which llama_batch.logits[i] != 0 are stored contiguously + in the order they have appeared in the batch. + Rows: number of tokens for which llama_batch.logits[i] != 0 Cols: n_vocab Returns: From 4192210232ee20ceb028ebd0ee68a6b964f5ffc1 Mon Sep 17 00:00:00 2001 From: Philippe Martin Date: Fri, 6 Dec 2024 10:47:38 +0100 Subject: [PATCH 21/55] fix: make content not required in ChatCompletionRequestAssistantMessage (#1807) --- llama_cpp/llama_types.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama_types.py b/llama_cpp/llama_types.py index bbb58afc3..57836ee76 100644 --- a/llama_cpp/llama_types.py +++ b/llama_cpp/llama_types.py @@ -214,7 +214,7 @@ class ChatCompletionRequestAssistantMessageFunctionCall(TypedDict): class ChatCompletionRequestAssistantMessage(TypedDict): role: Literal["assistant"] - content: Optional[str] + content: NotRequired[str] tool_calls: NotRequired[ChatCompletionMessageToolCalls] function_call: NotRequired[ ChatCompletionRequestAssistantMessageFunctionCall From 77a12a39663bbe5a5d22a7cc3d5d4f1c591c2851 Mon Sep 17 00:00:00 2001 From: Olivier DEBAUCHE Date: Fri, 6 Dec 2024 11:01:17 +0100 Subject: [PATCH 22/55] fix: Re-add suport for CUDA 12.5, add CUDA 12.6 (#1775) Add support for Cuda 12.6.1 Update version of Cuda 12.5.0 to 12.5.1 Co-authored-by: Andrei --- .github/workflows/build-wheels-cuda.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-wheels-cuda.yaml b/.github/workflows/build-wheels-cuda.yaml index 3d9c5d9d4..8624ec6d9 100644 --- a/.github/workflows/build-wheels-cuda.yaml +++ b/.github/workflows/build-wheels-cuda.yaml @@ -22,7 +22,7 @@ jobs: $matrix = @{ 'os' = @('ubuntu-latest', 'windows-2019') 'pyver' = @("3.9", "3.10", "3.11", "3.12") - 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1") + 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1", "12.5.1", "12.6.1") 'releasetag' = @("basic") } From 073b7e421e419ec389d8c064bbdae26c482ec17c Mon Sep 17 00:00:00 2001 From: "Ignaz \"Ian\" Kraft" Date: Fri, 6 Dec 2024 11:02:34 +0100 Subject: [PATCH 23/55] fix: added missing exit_stack.close() to /v1/chat/completions (#1796) * fix: added missing exit_stack.close() to /v1/chat/completions * fix: added missing exit_stack.close() to /v1/completions --- llama_cpp/server/app.py | 23 ++++++++++++++++------- llama_cpp/server/errors.py | 6 ++++-- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index cd3255176..9b8dce418 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -314,10 +314,14 @@ async def create_completion( else: kwargs["logits_processor"].extend(_min_tokens_logits_processor) - iterator_or_completion: Union[ - llama_cpp.CreateCompletionResponse, - Iterator[llama_cpp.CreateCompletionStreamResponse], - ] = await run_in_threadpool(llama, **kwargs) + try: + iterator_or_completion: Union[ + llama_cpp.CreateCompletionResponse, + Iterator[llama_cpp.CreateCompletionStreamResponse], + ] = await run_in_threadpool(llama, **kwargs) + except Exception as err: + exit_stack.close() + raise err if isinstance(iterator_or_completion, Iterator): # EAFP: It's easier to ask for forgiveness than permission @@ -344,6 +348,7 @@ def iterator() -> Iterator[llama_cpp.CreateCompletionStreamResponse]: ping_message_factory=_ping_message_factory, ) else: + exit_stack.close() return iterator_or_completion @@ -508,9 +513,13 @@ async def create_chat_completion( else: kwargs["logits_processor"].extend(_min_tokens_logits_processor) - iterator_or_completion: Union[ - llama_cpp.ChatCompletion, Iterator[llama_cpp.ChatCompletionChunk] - ] = await run_in_threadpool(llama.create_chat_completion, **kwargs) + try: + iterator_or_completion: Union[ + llama_cpp.ChatCompletion, Iterator[llama_cpp.ChatCompletionChunk] + ] = await run_in_threadpool(llama.create_chat_completion, **kwargs) + except Exception as err: + exit_stack.close() + raise err if isinstance(iterator_or_completion, Iterator): # EAFP: It's easier to ask for forgiveness than permission diff --git a/llama_cpp/server/errors.py b/llama_cpp/server/errors.py index fbf9fd80d..d0eda5664 100644 --- a/llama_cpp/server/errors.py +++ b/llama_cpp/server/errors.py @@ -134,8 +134,6 @@ def error_message_wrapper( ] = None, ) -> Tuple[int, ErrorResponse]: """Wraps error message in OpenAI style error response""" - print(f"Exception: {str(error)}", file=sys.stderr) - traceback.print_exc(file=sys.stderr) if body is not None and isinstance( body, ( @@ -149,6 +147,10 @@ def error_message_wrapper( if match is not None: return callback(body, match) + # Only print the trace on unexpected exceptions + print(f"Exception: {str(error)}", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + # Wrap other errors as internal server error return 500, ErrorResponse( message=str(error), From 9bd0c95424aad6a80a7305761b465006af3dde0d Mon Sep 17 00:00:00 2001 From: Graeme Power Date: Fri, 6 Dec 2024 10:03:41 +0000 Subject: [PATCH 24/55] fix: Avoid thread starvation on many concurrent requests by making use of asyncio to lock llama_proxy context (#1798) * fix: make use of asyncio to lock llama_proxy context * fix: use aclose instead of close for AsyncExitStack * fix: don't call exit stack close in stream iterator as it will be called by finally from on_complete anyway * fix: use anyio.Lock instead of asyncio.Lock --------- Co-authored-by: Andrei --- llama_cpp/server/app.py | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index 9b8dce418..f7c028475 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -5,7 +5,7 @@ import typing import contextlib -from threading import Lock +from anyio import Lock from functools import partial from typing import Iterator, List, Optional, Union, Dict @@ -70,14 +70,14 @@ def set_llama_proxy(model_settings: List[ModelSettings]): _llama_proxy = LlamaProxy(models=model_settings) -def get_llama_proxy(): +async def get_llama_proxy(): # NOTE: This double lock allows the currently streaming llama model to # check if any other requests are pending in the same thread and cancel # the stream if so. - llama_outer_lock.acquire() + await llama_outer_lock.acquire() release_outer_lock = True try: - llama_inner_lock.acquire() + await llama_inner_lock.acquire() try: llama_outer_lock.release() release_outer_lock = False @@ -159,7 +159,7 @@ async def get_event_publisher( request: Request, inner_send_chan: MemoryObjectSendStream[typing.Any], iterator: Iterator[typing.Any], - on_complete: typing.Optional[typing.Callable[[], None]] = None, + on_complete: typing.Optional[typing.Callable[[], typing.Awaitable[None]]] = None, ): server_settings = next(get_server_settings()) interrupt_requests = ( @@ -182,7 +182,7 @@ async def get_event_publisher( raise e finally: if on_complete: - on_complete() + await on_complete() def _logit_bias_tokens_to_input_ids( @@ -267,10 +267,8 @@ async def create_completion( request: Request, body: CreateCompletionRequest, ) -> llama_cpp.Completion: - exit_stack = contextlib.ExitStack() - llama_proxy = await run_in_threadpool( - lambda: exit_stack.enter_context(contextlib.contextmanager(get_llama_proxy)()) - ) + exit_stack = contextlib.AsyncExitStack() + llama_proxy = await exit_stack.enter_async_context(contextlib.asynccontextmanager(get_llama_proxy)()) if llama_proxy is None: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, @@ -332,7 +330,6 @@ async def create_completion( def iterator() -> Iterator[llama_cpp.CreateCompletionStreamResponse]: yield first_response yield from iterator_or_completion - exit_stack.close() send_chan, recv_chan = anyio.create_memory_object_stream(10) return EventSourceResponse( @@ -342,13 +339,13 @@ def iterator() -> Iterator[llama_cpp.CreateCompletionStreamResponse]: request=request, inner_send_chan=send_chan, iterator=iterator(), - on_complete=exit_stack.close, + on_complete=exit_stack.aclose, ), sep="\n", ping_message_factory=_ping_message_factory, ) else: - exit_stack.close() + await exit_stack.aclose() return iterator_or_completion @@ -477,10 +474,8 @@ async def create_chat_completion( # where the dependency is cleaned up before a StreamingResponse # is complete. # https://github.com/tiangolo/fastapi/issues/11143 - exit_stack = contextlib.ExitStack() - llama_proxy = await run_in_threadpool( - lambda: exit_stack.enter_context(contextlib.contextmanager(get_llama_proxy)()) - ) + exit_stack = contextlib.AsyncExitStack() + llama_proxy = exit_stack.enter_async_context(contextlib.asynccontextmanager(get_llama_proxy)()) if llama_proxy is None: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, @@ -530,7 +525,6 @@ async def create_chat_completion( def iterator() -> Iterator[llama_cpp.ChatCompletionChunk]: yield first_response yield from iterator_or_completion - exit_stack.close() send_chan, recv_chan = anyio.create_memory_object_stream(10) return EventSourceResponse( @@ -540,13 +534,13 @@ def iterator() -> Iterator[llama_cpp.ChatCompletionChunk]: request=request, inner_send_chan=send_chan, iterator=iterator(), - on_complete=exit_stack.close, + on_complete=exit_stack.aclose, ), sep="\n", ping_message_factory=_ping_message_factory, ) else: - exit_stack.close() + await exit_stack.aclose() return iterator_or_completion From 1ea615424b7647a56946ce4fc3ea72b6ebaa867c Mon Sep 17 00:00:00 2001 From: Florents Tselai Date: Fri, 6 Dec 2024 12:11:50 +0200 Subject: [PATCH 25/55] fix(docs): Update development instructions (#1833) --- README.md | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index dbaec5077..e00456580 100644 --- a/README.md +++ b/README.md @@ -752,15 +752,29 @@ pip install --upgrade pip pip install -e . # if you want to use the fastapi / openapi server -pip install -e .[server] +pip install -e '.[server]' # to install all optional dependencies -pip install -e .[all] +pip install -e '.[all]' # to clear the local build cache make clean ``` +Now try running the tests + +```bash +pytest +``` + +There's a `Makefile` available with useful targets. +A typical workflow would look like this: + +```bash +make build +make test +``` + You can also test out specific commits of `llama.cpp` by checking out the desired commit in the `vendor/llama.cpp` submodule and then running `make clean` and `pip install -e .` again. Any changes in the `llama.h` API will require changes to the `llama_cpp/llama_cpp.py` file to match the new API (additional changes may be required elsewhere). From d610477a0659fd7f56c5712adccebde4135a250c Mon Sep 17 00:00:00 2001 From: Luke Stanley <306671+lukestanley@users.noreply.github.com> Date: Fri, 6 Dec 2024 10:22:26 +0000 Subject: [PATCH 26/55] fix(examples): Refactor Batching notebook to use new sampler chain API (#1793) - Replaced deprecated llama_sample_token with llama_sampler_sample - Updated llama_token_to_piece signature to include lstrip and special arguments - Changed llama_load_model_from_file to use positional argument for params (currently does not accept a keyword argument for it) --- examples/notebooks/Batching.ipynb | 557 ++++++++---------------------- 1 file changed, 146 insertions(+), 411 deletions(-) diff --git a/examples/notebooks/Batching.ipynb b/examples/notebooks/Batching.ipynb index 73b28c744..be7fe9b52 100644 --- a/examples/notebooks/Batching.ipynb +++ b/examples/notebooks/Batching.ipynb @@ -6,6 +6,7 @@ "metadata": {}, "outputs": [], "source": [ + "import ctypes\n", "import llama_cpp" ] }, @@ -13,18 +14,7 @@ "cell_type": "code", "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "ggml_init_cublas: GGML_CUDA_FORCE_MMQ: no\n", - "ggml_init_cublas: CUDA_USE_TENSOR_CORES: yes\n", - "ggml_init_cublas: found 1 CUDA devices:\n", - " Device 0: NVIDIA GeForce RTX 2060, compute capability 7.5\n" - ] - } - ], + "outputs": [], "source": [ "llama_cpp.llama_backend_init(numa=False)" ] @@ -38,354 +28,94 @@ "name": "stderr", "output_type": "stream", "text": [ - "llama_model_loader: loaded meta data with 16 key-value pairs and 291 tensors from ../../models/mistral-7b-v0.1-GGUF/ggml-model-Q4_K.gguf (version GGUF V2)\n", - "llama_model_loader: - tensor 0: token_embd.weight q4_K [ 4096, 32000, 1, 1 ]\n", - "llama_model_loader: - tensor 1: output_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 2: output.weight q6_K [ 4096, 32000, 1, 1 ]\n", - "llama_model_loader: - tensor 3: blk.0.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 4: blk.0.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 5: blk.0.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 6: blk.0.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 7: blk.0.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 8: blk.0.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 9: blk.0.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 10: blk.0.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 11: blk.0.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 12: blk.1.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 13: blk.1.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 14: blk.1.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 15: blk.1.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 16: blk.1.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 17: blk.1.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 18: blk.1.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 19: blk.1.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 20: blk.1.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 21: blk.2.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 22: blk.2.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 23: blk.2.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 24: blk.2.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 25: blk.2.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 26: blk.2.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 27: blk.2.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 28: blk.2.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 29: blk.2.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 30: blk.3.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 31: blk.3.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 32: blk.3.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 33: blk.3.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 34: blk.3.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 35: blk.3.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 36: blk.3.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 37: blk.3.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 38: blk.3.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 39: blk.4.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 40: blk.4.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 41: blk.4.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 42: blk.4.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 43: blk.4.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 44: blk.4.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 45: blk.4.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 46: blk.4.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 47: blk.4.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 48: blk.5.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 49: blk.5.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 50: blk.5.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 51: blk.5.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 52: blk.5.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 53: blk.5.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 54: blk.5.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 55: blk.5.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 56: blk.5.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 57: blk.6.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 58: blk.6.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 59: blk.6.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 60: blk.6.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 61: blk.6.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 62: blk.6.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 63: blk.6.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 64: blk.6.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 65: blk.6.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 66: blk.7.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 67: blk.7.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 68: blk.7.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 69: blk.7.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 70: blk.7.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 71: blk.7.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 72: blk.7.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 73: blk.7.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 74: blk.7.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 75: blk.8.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 76: blk.8.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 77: blk.8.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 78: blk.8.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 79: blk.8.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 80: blk.8.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 81: blk.8.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 82: blk.8.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 83: blk.8.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 84: blk.9.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 85: blk.9.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 86: blk.9.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 87: blk.9.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 88: blk.9.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 89: blk.9.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 90: blk.9.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 91: blk.9.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 92: blk.9.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 93: blk.10.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 94: blk.10.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 95: blk.10.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 96: blk.10.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 97: blk.10.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 98: blk.10.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 99: blk.10.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 100: blk.10.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 101: blk.10.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 102: blk.11.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 103: blk.11.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 104: blk.11.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 105: blk.11.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 106: blk.11.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 107: blk.11.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 108: blk.11.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 109: blk.11.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 110: blk.11.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 111: blk.12.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 112: blk.12.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 113: blk.12.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 114: blk.12.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 115: blk.12.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 116: blk.12.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 117: blk.12.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 118: blk.12.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 119: blk.12.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 120: blk.13.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 121: blk.13.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 122: blk.13.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 123: blk.13.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 124: blk.13.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 125: blk.13.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 126: blk.13.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 127: blk.13.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 128: blk.13.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 129: blk.14.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 130: blk.14.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 131: blk.14.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 132: blk.14.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 133: blk.14.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 134: blk.14.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 135: blk.14.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 136: blk.14.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 137: blk.14.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 138: blk.15.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 139: blk.15.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 140: blk.15.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 141: blk.15.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 142: blk.15.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 143: blk.15.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 144: blk.15.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 145: blk.15.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 146: blk.15.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 147: blk.16.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 148: blk.16.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 149: blk.16.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 150: blk.16.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 151: blk.16.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 152: blk.16.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 153: blk.16.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 154: blk.16.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 155: blk.16.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 156: blk.17.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 157: blk.17.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 158: blk.17.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 159: blk.17.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 160: blk.17.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 161: blk.17.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 162: blk.17.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 163: blk.17.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 164: blk.17.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 165: blk.18.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 166: blk.18.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 167: blk.18.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 168: blk.18.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 169: blk.18.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 170: blk.18.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 171: blk.18.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 172: blk.18.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 173: blk.18.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 174: blk.19.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 175: blk.19.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 176: blk.19.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 177: blk.19.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 178: blk.19.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 179: blk.19.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 180: blk.19.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 181: blk.19.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 182: blk.19.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 183: blk.20.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 184: blk.20.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 185: blk.20.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 186: blk.20.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 187: blk.20.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 188: blk.20.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 189: blk.20.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 190: blk.20.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 191: blk.20.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 192: blk.21.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 193: blk.21.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 194: blk.21.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 195: blk.21.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 196: blk.21.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 197: blk.21.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 198: blk.21.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 199: blk.21.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 200: blk.21.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 201: blk.22.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 202: blk.22.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 203: blk.22.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 204: blk.22.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 205: blk.22.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 206: blk.22.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 207: blk.22.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 208: blk.22.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 209: blk.22.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 210: blk.23.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 211: blk.23.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 212: blk.23.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 213: blk.23.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 214: blk.23.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 215: blk.23.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 216: blk.23.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 217: blk.23.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 218: blk.23.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 219: blk.24.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 220: blk.24.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 221: blk.24.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 222: blk.24.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 223: blk.24.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 224: blk.24.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 225: blk.24.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 226: blk.24.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 227: blk.24.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 228: blk.25.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 229: blk.25.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 230: blk.25.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 231: blk.25.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 232: blk.25.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 233: blk.25.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 234: blk.25.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 235: blk.25.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 236: blk.25.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 237: blk.26.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 238: blk.26.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 239: blk.26.attn_v.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 240: blk.26.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 241: blk.26.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 242: blk.26.ffn_down.weight q4_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 243: blk.26.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 244: blk.26.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 245: blk.26.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 246: blk.27.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 247: blk.27.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 248: blk.27.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 249: blk.27.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 250: blk.27.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 251: blk.27.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 252: blk.27.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 253: blk.27.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 254: blk.27.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 255: blk.28.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 256: blk.28.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 257: blk.28.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 258: blk.28.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 259: blk.28.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 260: blk.28.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 261: blk.28.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 262: blk.28.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 263: blk.28.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 264: blk.29.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 265: blk.29.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 266: blk.29.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 267: blk.29.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 268: blk.29.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 269: blk.29.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 270: blk.29.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 271: blk.29.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 272: blk.29.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 273: blk.30.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 274: blk.30.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 275: blk.30.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 276: blk.30.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 277: blk.30.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 278: blk.30.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 279: blk.30.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 280: blk.30.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 281: blk.30.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 282: blk.31.attn_q.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 283: blk.31.attn_k.weight q4_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 284: blk.31.attn_v.weight q6_K [ 4096, 1024, 1, 1 ]\n", - "llama_model_loader: - tensor 285: blk.31.attn_output.weight q4_K [ 4096, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 286: blk.31.ffn_gate.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 287: blk.31.ffn_down.weight q6_K [ 14336, 4096, 1, 1 ]\n", - "llama_model_loader: - tensor 288: blk.31.ffn_up.weight q4_K [ 4096, 14336, 1, 1 ]\n", - "llama_model_loader: - tensor 289: blk.31.attn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - tensor 290: blk.31.ffn_norm.weight f32 [ 4096, 1, 1, 1 ]\n", - "llama_model_loader: - kv 0: general.architecture str \n", - "llama_model_loader: - kv 1: general.name str \n", - "llama_model_loader: - kv 2: llama.context_length u32 \n", - "llama_model_loader: - kv 3: llama.embedding_length u32 \n", - "llama_model_loader: - kv 4: llama.block_count u32 \n", - "llama_model_loader: - kv 5: llama.feed_forward_length u32 \n", - "llama_model_loader: - kv 6: llama.rope.dimension_count u32 \n", - "llama_model_loader: - kv 7: llama.attention.head_count u32 \n", - "llama_model_loader: - kv 8: llama.attention.head_count_kv u32 \n", - "llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 \n", - "llama_model_loader: - kv 10: general.file_type u32 \n", - "llama_model_loader: - kv 11: tokenizer.ggml.model str \n", - "llama_model_loader: - kv 12: tokenizer.ggml.tokens arr \n", - "llama_model_loader: - kv 13: tokenizer.ggml.scores arr \n", - "llama_model_loader: - kv 14: tokenizer.ggml.token_type arr \n", - "llama_model_loader: - kv 15: general.quantization_version u32 \n", + "llama_model_loader: loaded meta data with 20 key-value pairs and 291 tensors from /workspaces/llama-cpp-python/mistral-7b-v0.1.Q2_K.gguf (version GGUF V2)\n", + "llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", + "llama_model_loader: - kv 0: general.architecture str = llama\n", + "llama_model_loader: - kv 1: general.name str = mistralai_mistral-7b-v0.1\n", + "llama_model_loader: - kv 2: llama.context_length u32 = 32768\n", + "llama_model_loader: - kv 3: llama.embedding_length u32 = 4096\n", + "llama_model_loader: - kv 4: llama.block_count u32 = 32\n", + "llama_model_loader: - kv 5: llama.feed_forward_length u32 = 14336\n", + "llama_model_loader: - kv 6: llama.rope.dimension_count u32 = 128\n", + "llama_model_loader: - kv 7: llama.attention.head_count u32 = 32\n", + "llama_model_loader: - kv 8: llama.attention.head_count_kv u32 = 8\n", + "llama_model_loader: - kv 9: llama.attention.layer_norm_rms_epsilon f32 = 0.000010\n", + "llama_model_loader: - kv 10: llama.rope.freq_base f32 = 10000.000000\n", + "llama_model_loader: - kv 11: general.file_type u32 = 10\n", + "llama_model_loader: - kv 12: tokenizer.ggml.model str = llama\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "llama_model_loader: - kv 13: tokenizer.ggml.tokens arr[str,32000] = [\"\", \"\", \"\", \"<0x00>\", \"<...\n", + "llama_model_loader: - kv 14: tokenizer.ggml.scores arr[f32,32000] = [0.000000, 0.000000, 0.000000, 0.0000...\n", + "llama_model_loader: - kv 15: tokenizer.ggml.token_type arr[i32,32000] = [2, 3, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...\n", + "llama_model_loader: - kv 16: tokenizer.ggml.bos_token_id u32 = 1\n", + "llama_model_loader: - kv 17: tokenizer.ggml.eos_token_id u32 = 2\n", + "llama_model_loader: - kv 18: tokenizer.ggml.unknown_token_id u32 = 0\n", + "llama_model_loader: - kv 19: general.quantization_version u32 = 2\n", "llama_model_loader: - type f32: 65 tensors\n", - "llama_model_loader: - type q4_K: 193 tensors\n", - "llama_model_loader: - type q6_K: 33 tensors\n", - "llm_load_vocab: special tokens definition check successful ( 259/32000 ).\n", + "llama_model_loader: - type q2_K: 65 tensors\n", + "llama_model_loader: - type q3_K: 160 tensors\n", + "llama_model_loader: - type q6_K: 1 tensors\n", + "llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", + "llm_load_vocab: special tokens cache size = 3\n", + "llm_load_vocab: token to piece cache size = 0.1637 MB\n", "llm_load_print_meta: format = GGUF V2\n", "llm_load_print_meta: arch = llama\n", "llm_load_print_meta: vocab type = SPM\n", "llm_load_print_meta: n_vocab = 32000\n", "llm_load_print_meta: n_merges = 0\n", - "llm_load_print_meta: n_ctx_train = 4096\n", + "llm_load_print_meta: vocab_only = 0\n", + "llm_load_print_meta: n_ctx_train = 32768\n", "llm_load_print_meta: n_embd = 4096\n", + "llm_load_print_meta: n_layer = 32\n", "llm_load_print_meta: n_head = 32\n", "llm_load_print_meta: n_head_kv = 8\n", - "llm_load_print_meta: n_layer = 32\n", "llm_load_print_meta: n_rot = 128\n", + "llm_load_print_meta: n_swa = 0\n", + "llm_load_print_meta: n_embd_head_k = 128\n", + "llm_load_print_meta: n_embd_head_v = 128\n", "llm_load_print_meta: n_gqa = 4\n", + "llm_load_print_meta: n_embd_k_gqa = 1024\n", + "llm_load_print_meta: n_embd_v_gqa = 1024\n", "llm_load_print_meta: f_norm_eps = 0.0e+00\n", "llm_load_print_meta: f_norm_rms_eps = 1.0e-05\n", "llm_load_print_meta: f_clamp_kqv = 0.0e+00\n", "llm_load_print_meta: f_max_alibi_bias = 0.0e+00\n", + "llm_load_print_meta: f_logit_scale = 0.0e+00\n", "llm_load_print_meta: n_ff = 14336\n", + "llm_load_print_meta: n_expert = 0\n", + "llm_load_print_meta: n_expert_used = 0\n", + "llm_load_print_meta: causal attn = 1\n", + "llm_load_print_meta: pooling type = 0\n", + "llm_load_print_meta: rope type = 0\n", + "llm_load_print_meta: rope scaling = linear\n", "llm_load_print_meta: freq_base_train = 10000.0\n", "llm_load_print_meta: freq_scale_train = 1\n", + "llm_load_print_meta: n_ctx_orig_yarn = 32768\n", + "llm_load_print_meta: rope_finetuned = unknown\n", + "llm_load_print_meta: ssm_d_conv = 0\n", + "llm_load_print_meta: ssm_d_inner = 0\n", + "llm_load_print_meta: ssm_d_state = 0\n", + "llm_load_print_meta: ssm_dt_rank = 0\n", + "llm_load_print_meta: ssm_dt_b_c_rms = 0\n", "llm_load_print_meta: model type = 7B\n", - "llm_load_print_meta: model ftype = mostly Q4_K - Medium\n", + "llm_load_print_meta: model ftype = Q2_K - Medium\n", "llm_load_print_meta: model params = 7.24 B\n", - "llm_load_print_meta: model size = 4.07 GiB (4.83 BPW) \n", - "llm_load_print_meta: general.name = LLaMA v2\n", - "llm_load_print_meta: BOS token = 1 ''\n", - "llm_load_print_meta: EOS token = 2 ''\n", - "llm_load_print_meta: UNK token = 0 ''\n", - "llm_load_print_meta: LF token = 13 '<0x0A>'\n", - "llm_load_tensors: ggml ctx size = 0.10 MB\n", - "llm_load_tensors: using CUDA for GPU acceleration\n", - "llm_load_tensors: mem required = 70.41 MB\n", - "llm_load_tensors: offloading 32 repeating layers to GPU\n", - "llm_load_tensors: offloading non-repeating layers to GPU\n", - "llm_load_tensors: offloaded 35/35 layers to GPU\n", - "llm_load_tensors: VRAM used: 4095.05 MB\n", - ".................................................................................................\n" + "llm_load_print_meta: model size = 2.87 GiB (3.41 BPW) \n", + "llm_load_print_meta: general.name = mistralai_mistral-7b-v0.1\n", + "llm_load_print_meta: BOS token = 1 ''\n", + "llm_load_print_meta: EOS token = 2 ''\n", + "llm_load_print_meta: UNK token = 0 ''\n", + "llm_load_print_meta: LF token = 13 '<0x0A>'\n", + "llm_load_print_meta: EOG token = 2 ''\n", + "llm_load_print_meta: max token length = 48\n", + "llm_load_tensors: ggml ctx size = 0.14 MiB\n", + "llm_load_tensors: CPU buffer size = 2939.57 MiB\n", + "..................................................................................................\n" ] } ], @@ -393,7 +123,7 @@ "params = llama_cpp.llama_model_default_params()\n", "params.n_gpu_layers = 35\n", "model = llama_cpp.llama_load_model_from_file(\n", - " b\"../../models/mistral-7b-v0.1-GGUF/ggml-model-Q4_K.gguf\", params=params\n", + " b\"/workspaces/llama-cpp-python/mistral-7b-v0.1.Q2_K.gguf\", params\n", ") # Update this to whatever" ] }, @@ -406,7 +136,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "[1, 1014, 2936, 9060, 285, 1142]\n", + "[1, 415, 2936, 9060, 285, 1142]\n", "58\n" ] } @@ -436,17 +166,18 @@ "name": "stderr", "output_type": "stream", "text": [ - "llama_new_context_with_model: n_ctx = 58\n", + "llama_new_context_with_model: n_ctx = 64\n", + "llama_new_context_with_model: n_batch = 32\n", + "llama_new_context_with_model: n_ubatch = 32\n", + "llama_new_context_with_model: flash_attn = 0\n", "llama_new_context_with_model: freq_base = 10000.0\n", "llama_new_context_with_model: freq_scale = 1\n", - "llama_kv_cache_init: offloading v cache to GPU\n", - "llama_kv_cache_init: offloading k cache to GPU\n", - "llama_kv_cache_init: VRAM kv self = 7.25 MB\n", - "llama_new_context_with_model: kv self size = 7.25 MB\n", - "llama_build_graph: non-view tensors processed: 740/740\n", - "llama_new_context_with_model: compute buffer total size = 10.63 MB\n", - "llama_new_context_with_model: VRAM scratch buffer: 4.51 MB\n", - "llama_new_context_with_model: total VRAM used: 4106.81 MB (model: 4095.05 MB, context: 11.76 MB)\n" + "llama_kv_cache_init: CPU KV buffer size = 8.00 MiB\n", + "llama_new_context_with_model: KV self size = 8.00 MiB, K (f16): 4.00 MiB, V (f16): 4.00 MiB\n", + "llama_new_context_with_model: CPU output buffer size = 0.12 MiB\n", + "llama_new_context_with_model: CPU compute buffer size = 5.01 MiB\n", + "llama_new_context_with_model: graph nodes = 1030\n", + "llama_new_context_with_model: graph splits = 1\n" ] } ], @@ -476,7 +207,7 @@ "metadata": {}, "outputs": [], "source": [ - "import ctypes\n", + "\n", "\n", "batch.n_tokens = tokens_len\n", "for i in range(tokens_len):\n", @@ -502,10 +233,35 @@ " llama_cpp.llama_kv_cache_seq_cp(ctx, 0, i, 0, batch.n_tokens)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": 9, "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Initialize sampler chain with default parameters\n", + "sparams = llama_cpp.llama_sampler_chain_default_params()\n", + "sampler_chain = llama_cpp.llama_sampler_chain_init(sparams)\n", + "\n", + "# Add top_k, top_p, temperature, and final distribution-based sampler\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_top_k(40))\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_top_p(0.9, 1))\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_temp(0.4))\n", + "llama_cpp.llama_sampler_chain_add(sampler_chain, llama_cpp.llama_sampler_init_dist(1234)) # Final \"dist\" sampler" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, "outputs": [ { "name": "stdout", @@ -514,61 +270,59 @@ "7\n", "[' j', ' jumped']\n", "8\n", - "[' jumps', ' jumped over']\n", + "[' j over', ' jumped over']\n", "9\n", - "[' jumps over', ' jumped over the']\n", + "[' j over the', ' jumped over the']\n", "10\n", - "[' jumps over the', ' jumped over the lazy']\n", + "[' j over the lazy', ' jumped over the lazy']\n", "11\n", - "[' jumps over the lazy', ' jumped over the lazy dog']\n", + "[' j over the lazy dog', ' jumped over the lazy dog']\n", "12\n", - "[' jumps over the lazy dog', ' jumped over the lazy dog.']\n", + "[' j over the lazy dog.', ' jumped over the lazy dog\\n']\n", "13\n", - "[' jumps over the lazy dog.', ' jumped over the lazy dog.\\n']\n", + "[' j over the lazy dog. También', ' jumped over the lazy dog\\nGroupLayout']\n", "14\n", - "[' jumps over the lazy dog.\\n', ' jumped over the lazy dog.\\n\\n']\n", + "[' j over the lazy dog. También:', ' jumped over the lazy dog\\nGroupLayouting']\n", "15\n", - "[' jumps over the lazy dog.\\n\\n', ' jumped over the lazy dog.\\n\\nThe']\n", + "[' j over the lazy dog. También: is', ' jumped over the lazy dog\\nGroupLayouting is']\n", "16\n", - "[' jumps over the lazy dog.\\n\\nI', ' jumped over the lazy dog.\\n\\nThe quick']\n", + "[' j over the lazy dog. También: is a', ' jumped over the lazy dog\\nGroupLayouting is a']\n", "17\n", - "[' jumps over the lazy dog.\\n\\nI’', ' jumped over the lazy dog.\\n\\nThe quick brown']\n", + "[' j over the lazy dog. También: is a technique', ' jumped over the lazy dog\\nGroupLayouting is a common']\n", "18\n", - "[' jumps over the lazy dog.\\n\\nI’m', ' jumped over the lazy dog.\\n\\nThe quick brown f']\n", + "[' j over the lazy dog. También: is a technique practice', ' jumped over the lazy dog\\nGroupLayouting is a common practice']\n", "19\n", - "[' jumps over the lazy dog.\\n\\nI’m not', ' jumped over the lazy dog.\\n\\nThe quick brown fox']\n", + "[' j over the lazy dog. También: is a technique practice in', ' jumped over the lazy dog\\nGroupLayouting is a common practice in']\n", "20\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped']\n", + "[' j over the lazy dog. También: is a technique practice in the', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the']\n", "21\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over']\n", + "[' j over the lazy dog. También: is a technique practice in the real', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media']\n", "22\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the']\n", + "[' j over the lazy dog. También: is a technique practice in the real-', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry']\n", "23\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy']\n", + "[' j over the lazy dog. También: is a technique practice in the real-.', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry.']\n", "24\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However']\n", "25\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We,', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However,']\n", "26\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there']\n", "27\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\n']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has']\n", "28\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous sentence', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\nThe']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been']\n", "29\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous sentence in', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\nThe quick']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little']\n", "30\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous sentence in the', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\nThe quick brown']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little research', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little emp']\n", "31\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous sentence in the English', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\nThe quick brown f']\n", + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little researchirical', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little empirical']\n", "32\n", - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous sentence in the English language', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\nThe quick brown fox']\n" + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little researchirical research', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little empirical research']\n" ] } ], "source": [ - "import ctypes\n", - "\n", "streams = [\"\"] * n_parallel\n", "i_batch = [batch.n_tokens - 1] * n_parallel\n", "\n", @@ -581,36 +335,17 @@ " if i_batch[i] < 0:\n", " continue\n", "\n", - " n_vocab = llama_cpp.llama_n_vocab(model)\n", - " logits = llama_cpp.llama_get_logits_ith(ctx, i_batch[i])\n", - "\n", - " candidates = (llama_cpp.llama_token_data * n_vocab)()\n", - "\n", - " for token_id in range(n_vocab):\n", - " candidates[token_id].id = token_id\n", - " candidates[token_id].logit = logits[token_id]\n", - " candidates[token_id].p = 0.0\n", - "\n", - " candidates_p = llama_cpp.llama_token_data_array(\n", - " candidates, len(candidates), False\n", - " )\n", - "\n", - " top_k = 40\n", - " top_p = 0.9\n", - " temp = 0.4\n", - "\n", - " llama_cpp.llama_sample_top_k(ctx, ctypes.byref(candidates_p), top_k, 1)\n", - " llama_cpp.llama_sample_top_p(ctx, ctypes.byref(candidates_p), top_p, 1)\n", - " llama_cpp.llama_sample_temp(ctx, ctypes.byref(candidates_p), temp)\n", - "\n", - " new_token_id = llama_cpp.llama_sample_token(ctx, ctypes.byref(candidates_p))\n", + " # Sample the next token using the sampler chain\n", + " new_token_id = llama_cpp.llama_sampler_sample(sampler_chain, ctx, -1)\n", "\n", " if new_token_id == llama_cpp.llama_token_eos(ctx) or n_cur == n_len:\n", " i_batch[i] = -1\n", " continue\n", "\n", " buf = (ctypes.c_char * 32)()\n", - " outlen = llama_cpp.llama_token_to_piece(model, new_token_id, buf, len(buf))\n", + " \n", + " # Convert token ID to text\n", + " outlen = llama_cpp.llama_token_to_piece(model, new_token_id, buf, len(buf), 0, False)\n", " streams[i] += bytes(buf[:outlen]).decode(\"utf-8\")\n", "\n", " batch.token[batch.n_tokens] = new_token_id\n", @@ -637,14 +372,14 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "[' jumps over the lazy dog.\\n\\nI’m not sure if that’s the most famous sentence in the English language', ' jumped over the lazy dog.\\n\\nThe quick brown fox jumped over the lazy dog.\\n\\nThe quick brown fox']\n" + "[' j over the lazy dog. También: is a technique practice in the real-. We, when is been little researchirical research', ' jumped over the lazy dog\\nGroupLayouting is a common practice in the media industry. However, there has been little empirical research']\n" ] } ], @@ -654,7 +389,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -663,7 +398,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -672,7 +407,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -681,7 +416,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -705,7 +440,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -719,7 +454,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5+" + "version": "3.12.1" }, "orig_nbformat": 4 }, From 4f0ec659f9519d3009873f6adf6aa8af5ab32d8b Mon Sep 17 00:00:00 2001 From: Adam Jones Date: Fri, 6 Dec 2024 12:35:46 +0000 Subject: [PATCH 27/55] fix: chat API logprobs format (#1788) * fix: chat API logprobs format * Fix optional properties --- Makefile | 2 +- llama_cpp/llama_chat_format.py | 53 ++++++++++++++++++++++++---------- llama_cpp/llama_types.py | 22 ++++++++++++-- 3 files changed, 59 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index be9b55a3e..52b17b61a 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ docker: docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile . run-server: - uvicorn --factory llama.server:app --host ${HOST} --port ${PORT} + python llama_cpp/server --model ${MODEL} clean: - cd vendor/llama.cpp && make clean diff --git a/llama_cpp/llama_chat_format.py b/llama_cpp/llama_chat_format.py index dfb0af65e..17575c700 100644 --- a/llama_cpp/llama_chat_format.py +++ b/llama_cpp/llama_chat_format.py @@ -259,6 +259,31 @@ def to_chat_handler(self) -> LlamaChatCompletionHandler: return chat_formatter_to_chat_completion_handler(self) +def _convert_text_completion_logprobs_to_chat( + logprobs: Optional[llama_types.CompletionLogprobs], +) -> llama_types.ChatCompletionLogprobs: + if logprobs is None: + return None + + return { + "content": [ + { + "token": token, + "bytes": None, + "logprob": logprob, + "top_logprobs": [ + { + "token": top_token, + "logprob": top_logprob, + "bytes": None, + } + for top_token, top_logprob in top_logprobs.items() + ], + } for (token, logprob, top_logprobs) in zip(logprobs["tokens"], logprobs["token_logprobs"], logprobs["top_logprobs"]) + ], + "refusal": None, + } + def _convert_text_completion_to_chat( completion: llama_types.Completion, ) -> llama_types.ChatCompletion: @@ -275,7 +300,7 @@ def _convert_text_completion_to_chat( "role": "assistant", "content": completion["choices"][0]["text"], }, - "logprobs": completion["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), "finish_reason": completion["choices"][0]["finish_reason"], } ], @@ -319,7 +344,7 @@ def _convert_text_completion_chunks_to_chat( if chunk["choices"][0]["finish_reason"] is None else {} ), - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "finish_reason": chunk["choices"][0]["finish_reason"], } ], @@ -382,7 +407,7 @@ def _convert_completion_to_chat_function( } ], }, - "logprobs": completion["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), "finish_reason": "tool_calls", } ], @@ -435,7 +460,7 @@ def _stream_response_to_function_stream( { "index": 0, "finish_reason": None, - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": None, "content": None, @@ -472,7 +497,7 @@ def _stream_response_to_function_stream( { "index": 0, "finish_reason": None, - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": None, "content": None, @@ -1716,7 +1741,7 @@ def message_to_str(msg: llama_types.ChatCompletionRequestMessage): } ], }, - "logprobs": completion["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), "finish_reason": "tool_calls", } ], @@ -2128,7 +2153,7 @@ def generate_streaming(tools, functions, function_call, prompt): choices=[ { "index": 0, - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": None, "content": None, @@ -2230,7 +2255,7 @@ def generate_streaming(tools, functions, function_call, prompt): choices=[ { "index": 0, - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": "assistant", "content": None, @@ -2268,9 +2293,7 @@ def generate_streaming(tools, functions, function_call, prompt): choices=[ { "index": 0, - "logprobs": chunk["choices"][0][ - "logprobs" - ], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": "assistant", "content": buffer.pop(0), @@ -2293,7 +2316,7 @@ def generate_streaming(tools, functions, function_call, prompt): choices=[ { "index": 0, - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": "assistant", "content": ( @@ -2379,7 +2402,7 @@ def generate_streaming(tools, functions, function_call, prompt): choices=[ { "index": 0, - "logprobs": chunk["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(chunk["choices"][0]["logprobs"]), "delta": { "role": None, "content": None, @@ -2613,7 +2636,7 @@ def generate_streaming(tools, functions, function_call, prompt): choices=[ { "index": 0, - "logprobs": completion["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), "message": { "role": "assistant", "content": None if content == "" else content, @@ -3745,7 +3768,7 @@ def chatml_function_calling( { "finish_reason": "tool_calls", "index": 0, - "logprobs": completion["choices"][0]["logprobs"], + "logprobs": _convert_text_completion_logprobs_to_chat(completion["choices"][0]["logprobs"]), "message": { "role": "assistant", "content": None, diff --git a/llama_cpp/llama_types.py b/llama_cpp/llama_types.py index 57836ee76..f647822ff 100644 --- a/llama_cpp/llama_types.py +++ b/llama_cpp/llama_types.py @@ -82,10 +82,28 @@ class ChatCompletionFunction(TypedDict): parameters: Dict[str, JsonType] # TODO: make this more specific +class ChatCompletionTopLogprobToken(TypedDict): + token: str + logprob: float + bytes: Optional[List[int]] + + +class ChatCompletionLogprobToken(ChatCompletionTopLogprobToken): + token: str + logprob: float + bytes: Optional[List[int]] + top_logprobs: List[ChatCompletionTopLogprobToken] + + +class ChatCompletionLogprobs(TypedDict): + content: Optional[List[ChatCompletionLogprobToken]] + refusal: Optional[List[ChatCompletionLogprobToken]] + + class ChatCompletionResponseChoice(TypedDict): index: int message: "ChatCompletionResponseMessage" - logprobs: Optional[CompletionLogprobs] + logprobs: Optional[ChatCompletionLogprobs] finish_reason: Optional[str] @@ -134,7 +152,7 @@ class ChatCompletionStreamResponseChoice(TypedDict): ChatCompletionStreamResponseDelta, ChatCompletionStreamResponseDeltaEmpty ] finish_reason: Optional[Literal["stop", "length", "tool_calls", "function_call"]] - logprobs: NotRequired[Optional[CompletionLogprobs]] + logprobs: NotRequired[Optional[ChatCompletionLogprobs]] class CreateChatCompletionStreamResponse(TypedDict): From df136cb3c7d3a7a9a4e30ef2d16c5e42e0e36e43 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 07:36:47 -0500 Subject: [PATCH 28/55] misc: Update development Makefile --- Makefile | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index be9b55a3e..37ef9b1d6 100644 --- a/Makefile +++ b/Makefile @@ -21,6 +21,15 @@ build.debug: --config-settings=cmake.args="-DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-ggdb -O0';-DCMAKE_CXX_FLAGS='-ggdb -O0'" \ --editable . +build.debug.extra: + python3 -m pip install \ + --verbose \ + --config-settings=cmake.verbose=true \ + --config-settings=logging.level=INFO \ + --config-settings=install.strip=false \ + --config-settings=cmake.args="-DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-fsanitize=address -ggdb -O0';-DCMAKE_CXX_FLAGS='-fsanitize=address -ggdb -O0'" \ + --editable . + build.cuda: CMAKE_ARGS="-DGGML_CUDA=on" python3 -m pip install --verbose -e . @@ -46,7 +55,7 @@ build.rpc: CMAKE_ARGS="-DGGML_RPC=on" python3 -m pip install --verbose -e . build.sdist: - python3 -m build --sdist + python3 -m build --sdist --verbose deploy.pypi: python3 -m twine upload dist/* @@ -56,7 +65,7 @@ deploy.gh-docs: mkdocs gh-deploy test: - python3 -m pytest + python3 -m pytest --full-trace -v docker: docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile . @@ -68,11 +77,11 @@ clean: - cd vendor/llama.cpp && make clean - cd vendor/llama.cpp && rm libllama.so - rm -rf _skbuild - - rm llama_cpp/*.so - - rm llama_cpp/*.dylib - - rm llama_cpp/*.metal - - rm llama_cpp/*.dll - - rm llama_cpp/*.lib + - rm llama_cpp/lib/*.so + - rm llama_cpp/lib/*.dylib + - rm llama_cpp/lib/*.metal + - rm llama_cpp/lib/*.dll + - rm llama_cpp/lib/*.lib .PHONY: \ update \ From b9b50e50b9d29b2034c50e1b0ee09dc7c47bbc63 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 6 Dec 2024 07:37:53 -0500 Subject: [PATCH 29/55] misc: Update run server command --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 78b11cf11..26ddf2c7a 100644 --- a/Makefile +++ b/Makefile @@ -71,7 +71,7 @@ docker: docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile . run-server: - python llama_cpp/server --model ${MODEL} + python3 -m llama_cpp.server --model ${MODEL} clean: - cd vendor/llama.cpp && make clean From 5585f8afe1aea74dca7364f18a76f0426381001d Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 8 Dec 2024 22:40:19 -0500 Subject: [PATCH 30/55] feat: Update llama.cpp --- llama_cpp/llama_cpp.py | 12 ++++++------ vendor/llama.cpp | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index cd59bb430..44a1461db 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -344,9 +344,9 @@ # LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors # LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors # LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors +# //LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack +# //LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack +# //LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack # LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors # LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors # @@ -382,9 +382,9 @@ LLAMA_FTYPE_MOSTLY_IQ4_XS = 30 LLAMA_FTYPE_MOSTLY_IQ1_M = 31 LLAMA_FTYPE_MOSTLY_BF16 = 32 -LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33 -LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34 -LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35 +# LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33 +# LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34 +# LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35 LLAMA_FTYPE_MOSTLY_TQ1_0 = 36 LLAMA_FTYPE_MOSTLY_TQ2_0 = 37 LLAMA_FTYPE_GUESSED = 1024 diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 7736837d6..ce8784bdb 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 7736837d62efed1dbebfe579472fca041eda12d6 +Subproject commit ce8784bdb153ff7794dde5a50b0ebfa51baa6171 From 61508c265a4e295d69aaee7533bde5b51278e574 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 8 Dec 2024 22:55:03 -0500 Subject: [PATCH 31/55] Add CUDA 12.5 and 12.6 to generated output wheels --- .github/workflows/generate-index-from-release.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/generate-index-from-release.yaml b/.github/workflows/generate-index-from-release.yaml index 51961088d..5c7171bc1 100644 --- a/.github/workflows/generate-index-from-release.yaml +++ b/.github/workflows/generate-index-from-release.yaml @@ -44,6 +44,8 @@ jobs: ./scripts/releases-to-pep-503.sh index/whl/cu122 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu122$' ./scripts/releases-to-pep-503.sh index/whl/cu123 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu123$' ./scripts/releases-to-pep-503.sh index/whl/cu124 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + ./scripts/releases-to-pep-503.sh index/whl/cu125 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + ./scripts/releases-to-pep-503.sh index/whl/cu126 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' ./scripts/releases-to-pep-503.sh index/whl/metal '^[v]?[0-9]+\.[0-9]+\.[0-9]+-metal$' - name: Upload artifact uses: actions/upload-pages-artifact@v3 From a9fe0f872527ffc1bf64e125b7996ec4dfa598eb Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 8 Dec 2024 22:56:45 -0500 Subject: [PATCH 32/55] chore: Bump version --- CHANGELOG.md | 14 ++++++++++++++ llama_cpp/__init__.py | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a194fece..331161a0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.3] + +- feat: Update llama.cpp to ggerganov/llama.cpp@ce8784bdb153ff7794dde5a50b0ebfa51baa6171 +- fix: chat API logprobs format by @domdomegg in #1788 +- feat: Add support for CUDA 12.6, fix CUDA 12.5 by @Smartappli in #1775 +- fix: Make content not required in ChatCompletionRequestAssistantMessage by @feloy in #1807 +- fix: Fix pickling of Llama class by setting seed from _seed member by @abetlen in 2523472c3eccb9ab9277117cc4ff705212b6888a +- fix: Fix logit-bias type hint by @ddh0 in #1802 +- fix(server): Avoid thread starvation on many concurrent requests by making use of asyncio to lock llama_proxy context by @gjpower in #1798 +- fix(server): Added missing exit_stack.close() to /v1/chat/completions by @Ian321 in #1796 +- fix(examples): Refactor Batching notebook to use new sampler chain API by @lukestanley in #1793 +- fix(docs): Update development instructions by @Florents-Tselai in #1833 +- fix(docs): Remove ref to llama_eval in llama_cpp.py docs by @richdougherty in #1819 + ## [0.3.2] - feat: Update llama.cpp to ggerganov/llama.cpp@74d73dc85cc2057446bf63cc37ff649ae7cebd80 diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index cda38a841..36c0eafc0 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.2" +__version__ = "0.3.3" From ca808028bd16b8327bd84128d48015a4b1304690 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 8 Dec 2024 23:51:49 -0500 Subject: [PATCH 33/55] fix(ci): hotfix for wheels --- .github/workflows/build-wheels-cuda.yaml | 2 +- .github/workflows/build-wheels-metal.yaml | 2 +- .github/workflows/generate-index-from-release.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build-wheels-cuda.yaml b/.github/workflows/build-wheels-cuda.yaml index 8624ec6d9..3d410148f 100644 --- a/.github/workflows/build-wheels-cuda.yaml +++ b/.github/workflows/build-wheels-cuda.yaml @@ -22,7 +22,7 @@ jobs: $matrix = @{ 'os' = @('ubuntu-latest', 'windows-2019') 'pyver' = @("3.9", "3.10", "3.11", "3.12") - 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1", "12.5.1", "12.6.1") + 'cuda' = @("12.1.1", "12.2.2", "12.3.2", "12.4.1") #, "12.5.1", "12.6.1") 'releasetag' = @("basic") } diff --git a/.github/workflows/build-wheels-metal.yaml b/.github/workflows/build-wheels-metal.yaml index 01da3cef5..9b97bf2f5 100644 --- a/.github/workflows/build-wheels-metal.yaml +++ b/.github/workflows/build-wheels-metal.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [macos-12, macos-13, macos-14] + os: [macos-13, macos-14, macos-15] steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/generate-index-from-release.yaml b/.github/workflows/generate-index-from-release.yaml index 5c7171bc1..255ee67d6 100644 --- a/.github/workflows/generate-index-from-release.yaml +++ b/.github/workflows/generate-index-from-release.yaml @@ -44,8 +44,8 @@ jobs: ./scripts/releases-to-pep-503.sh index/whl/cu122 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu122$' ./scripts/releases-to-pep-503.sh index/whl/cu123 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu123$' ./scripts/releases-to-pep-503.sh index/whl/cu124 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' - ./scripts/releases-to-pep-503.sh index/whl/cu125 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' - ./scripts/releases-to-pep-503.sh index/whl/cu126 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + # ./scripts/releases-to-pep-503.sh index/whl/cu125 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' + # ./scripts/releases-to-pep-503.sh index/whl/cu126 '^[v]?[0-9]+\.[0-9]+\.[0-9]+-cu124$' ./scripts/releases-to-pep-503.sh index/whl/metal '^[v]?[0-9]+\.[0-9]+\.[0-9]+-metal$' - name: Upload artifact uses: actions/upload-pages-artifact@v3 From 002f58300fdaf865cf1d305b3721c5d96003c440 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sun, 8 Dec 2024 23:53:30 -0500 Subject: [PATCH 34/55] chore: Bump version --- CHANGELOG.md | 4 ++++ llama_cpp/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 331161a0c..2a1d3225f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.4] + +- fix(ci): Build wheels for macos 13-15, cuda 12.1-12.4 by @abetlen in ca808028bd16b8327bd84128d48015a4b1304690 + ## [0.3.3] - feat: Update llama.cpp to ggerganov/llama.cpp@ce8784bdb153ff7794dde5a50b0ebfa51baa6171 diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index 36c0eafc0..dd8f1fc58 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.3" +__version__ = "0.3.4" From ea4d86ac25e0f39370ff3f54550387a02467ab77 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Mon, 9 Dec 2024 00:42:15 -0500 Subject: [PATCH 35/55] fix(ci): update macos runner image to non-deprecated version --- .github/workflows/build-and-release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-and-release.yaml b/.github/workflows/build-and-release.yaml index 1d549ed68..7307c85ab 100644 --- a/.github/workflows/build-and-release.yaml +++ b/.github/workflows/build-and-release.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04, windows-2019, macos-12] + os: [ubuntu-20.04, windows-2019, macos-13] steps: - uses: actions/checkout@v4 From afedfc888462f9a6e809dc9455eb3b663764cc3f Mon Sep 17 00:00:00 2001 From: Graeme Power Date: Mon, 9 Dec 2024 23:47:18 +0000 Subject: [PATCH 36/55] fix: add missing await statements for async exit_stack handling (#1858) --- llama_cpp/server/app.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index f7c028475..b6db453b8 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -318,7 +318,7 @@ async def create_completion( Iterator[llama_cpp.CreateCompletionStreamResponse], ] = await run_in_threadpool(llama, **kwargs) except Exception as err: - exit_stack.close() + await exit_stack.aclose() raise err if isinstance(iterator_or_completion, Iterator): @@ -475,7 +475,7 @@ async def create_chat_completion( # is complete. # https://github.com/tiangolo/fastapi/issues/11143 exit_stack = contextlib.AsyncExitStack() - llama_proxy = exit_stack.enter_async_context(contextlib.asynccontextmanager(get_llama_proxy)()) + llama_proxy = await exit_stack.enter_async_context(contextlib.asynccontextmanager(get_llama_proxy)()) if llama_proxy is None: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, @@ -513,7 +513,7 @@ async def create_chat_completion( llama_cpp.ChatCompletion, Iterator[llama_cpp.ChatCompletionChunk] ] = await run_in_threadpool(llama.create_chat_completion, **kwargs) except Exception as err: - exit_stack.close() + await exit_stack.aclose() raise err if isinstance(iterator_or_completion, Iterator): From 801a73a621c6ba8f9a8ffe110766c9e721b09520 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Mon, 9 Dec 2024 18:49:27 -0500 Subject: [PATCH 37/55] feat: Update llama.cpp --- vendor/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/llama.cpp b/vendor/llama.cpp index ce8784bdb..26a8406ba 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit ce8784bdb153ff7794dde5a50b0ebfa51baa6171 +Subproject commit 26a8406ba9198eb6fdd8329fa717555b4f77f05f From 803924b02cc8ca7531b70e6497d1ff7b83f75737 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Mon, 9 Dec 2024 18:59:22 -0500 Subject: [PATCH 38/55] chore: Bump version --- CHANGELOG.md | 6 ++++++ llama_cpp/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a1d3225f..6f6355893 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.5] + +- feat: Update llama.cpp to ggerganov/llama.cpp@26a8406ba9198eb6fdd8329fa717555b4f77f05f +- fix(ci): Fix release by updating macos runner image to non-deprecated version by @abetlen in afedfc888462f9a6e809dc9455eb3b663764cc3f +- fix(server): add missing await statements for async exit_stack handling by @gjpower in #1858 + ## [0.3.4] - fix(ci): Build wheels for macos 13-15, cuda 12.1-12.4 by @abetlen in ca808028bd16b8327bd84128d48015a4b1304690 diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index dd8f1fc58..d7d9be881 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.4" +__version__ = "0.3.5" From 2bc1d97c9672320828e70dc8293d5f8754682109 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 19 Dec 2024 01:55:12 -0500 Subject: [PATCH 39/55] feat: Update llama.cpp --- llama_cpp/_internals.py | 5 ---- llama_cpp/llama_cpp.py | 54 +++++++++++------------------------------ vendor/llama.cpp | 2 +- 3 files changed, 15 insertions(+), 46 deletions(-) diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py index 994d5f149..8fa2b447f 100644 --- a/llama_cpp/_internals.py +++ b/llama_cpp/_internals.py @@ -805,15 +805,10 @@ def add_penalties( ignore_eos: bool, ): sampler = llama_cpp.llama_sampler_init_penalties( - n_vocab, - special_eos_id, - linefeed_id, penalty_last_n, penalty_repeat, penalty_freq, penalty_present, - penalize_nl, - ignore_eos, ) self._add_sampler(sampler) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 44a1461db..0481cdbcd 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -256,13 +256,17 @@ # // note: these values should be synchronized with ggml_rope # // TODO: maybe move this enum to ggml.h (ggml_rope_type) # enum llama_rope_type { -# LLAMA_ROPE_TYPE_NONE = -1, -# LLAMA_ROPE_TYPE_NORM = 0, -# LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, +# LLAMA_ROPE_TYPE_NONE = -1, +# LLAMA_ROPE_TYPE_NORM = 0, +# LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX, +# LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE, +# LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION, # }; LLAMA_ROPE_TYPE_NONE = -1 LLAMA_ROPE_TYPE_NORM = 0 LLAMA_ROPE_TYPE_NEOX = GGML_ROPE_TYPE_NEOX = 2 +LLAMA_ROPE_TYPE_MROPE = GGML_ROPE_TYPE_MROPE = 8 +LLAMA_ROPE_TYPE_VISION = GGML_ROPE_TYPE_VISION = 24 # enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file @@ -1265,6 +1269,7 @@ def llama_rope_freq_scale_train(model: llama_model_p, /) -> float: # // Functions to access the model's GGUF metadata scalar values # // - The functions return the length of the string on success, or -1 on failure # // - The output string is always null-terminated and cleared on failure +# // - When retrieving a string, an extra byte must be allocated to account for the null terminator # // - GGUF array values are not supported by these functions @@ -1378,18 +1383,6 @@ def llama_model_n_params(model: llama_model_p, /) -> int: ... -# // Get a llama model tensor -# LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name); -@ctypes_function( - "llama_get_model_tensor", [llama_model_p_ctypes, ctypes.c_char_p], ctypes.c_void_p -) -def llama_get_model_tensor( - model: llama_model_p, name: Union[ctypes.c_char_p, bytes], / -) -> ctypes.c_void_p: - """Get a llama model tensor""" - ... - - # // Returns true if the model contains an encoder that requires llama_encode() call # LLAMA_API bool llama_model_has_encoder(const struct llama_model * model); @ctypes_function("llama_model_has_encoder", [llama_model_p_ctypes], ctypes.c_bool) @@ -3336,41 +3329,22 @@ def llama_sampler_init_grammar( ... +# /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. # LLAMA_API struct llama_sampler * llama_sampler_init_penalties( -# int32_t n_vocab, // llama_n_vocab() -# llama_token special_eos_id, // llama_token_eos() -# llama_token linefeed_id, // llama_token_nl() -# int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) -# float penalty_repeat, // 1.0 = disabled -# float penalty_freq, // 0.0 = disabled -# float penalty_present, // 0.0 = disabled -# bool penalize_nl, // consider newlines as a repeatable token -# bool ignore_eos); // ignore the end-of-sequence token +# int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) +# float penalty_repeat, // 1.0 = disabled +# float penalty_freq, // 0.0 = disabled +# float penalty_present); // 0.0 = disabled @ctypes_function( "llama_sampler_init_penalties", - [ - ctypes.c_int32, - llama_token, - llama_token, - ctypes.c_int32, - ctypes.c_float, - ctypes.c_float, - ctypes.c_float, - ctypes.c_bool, - ctypes.c_bool, - ], + [ctypes.c_int32, ctypes.c_float, ctypes.c_float, ctypes.c_float], llama_sampler_p_ctypes, ) def llama_sampler_init_penalties( - n_vocab: int, - special_eos_id: int, - linefeed_id: int, penalty_last_n: int, penalty_repeat: float, penalty_freq: float, penalty_present: float, - penalize_nl: bool, - ignore_eos: bool, /, ) -> llama_sampler_p: ... diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 26a8406ba..7909e8588 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 26a8406ba9198eb6fdd8329fa717555b4f77f05f +Subproject commit 7909e8588ddf70820adf1f325490eb3f67b32875 From c9dfad4b6fdebc85f8f7914e8e46c6d9299ff4b4 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Mon, 30 Dec 2024 13:43:46 -0500 Subject: [PATCH 40/55] feat: Update llama.cpp --- vendor/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 7909e8588..716bd6dec 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 7909e8588ddf70820adf1f325490eb3f67b32875 +Subproject commit 716bd6dec3e044e5c325386b5b0483392b24cefe From 1d5f5340e49d90bc097c1f75605a4dd7408c2378 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 8 Jan 2025 16:33:53 -0500 Subject: [PATCH 41/55] feat: Update llama.cpp --- llama_cpp/llama_cpp.py | 31 +++++++++++++++++++++++++++++-- vendor/llama.cpp | 2 +- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 0481cdbcd..16c6b9d73 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -222,6 +222,7 @@ # LLAMA_VOCAB_PRE_TYPE_EXAONE = 25, # LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, # LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, +# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, # }; LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 @@ -251,6 +252,7 @@ LLAMA_VOCAB_PRE_TYPE_EXAONE = 25 LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26 LLAMA_VOCAB_PRE_TYPE_MINERVA = 27 +LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28 # // note: these values should be synchronized with ggml_rope @@ -1090,9 +1092,10 @@ def llama_backend_free(): ... -# LLAMA_API struct llama_model * llama_load_model_from_file( +# DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file( # const char * path_model, -# struct llama_model_params params); +# struct llama_model_params params), +# "use llama_model_load_from_file instead"); @ctypes_function( "llama_load_model_from_file", [ctypes.c_char_p, llama_model_params], @@ -1104,6 +1107,20 @@ def llama_load_model_from_file( ... +# LLAMA_API struct llama_model * llama_model_load_from_file( +# const char * path_model, +# struct llama_model_params params); +@ctypes_function( + "llama_model_load_from_file", + [ctypes.c_char_p, llama_model_params], + llama_model_p_ctypes, +) +def llama_model_load_from_file( + path_model: bytes, params: llama_model_params, / +) -> Optional[llama_model_p]: + ... + + # LLAMA_API void llama_free_model(struct llama_model * model); @ctypes_function( "llama_free_model", @@ -1114,6 +1131,16 @@ def llama_free_model(model: llama_model_p, /): ... +# LLAMA_API void llama_model_free(struct llama_model * model); +@ctypes_function( + "llama_model_free", + [llama_model_p_ctypes], + None, +) +def llama_model_free(model: llama_model_p, /): + ... + + # LLAMA_API struct llama_context * llama_new_context_with_model( # struct llama_model * model, # struct llama_context_params params); diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 716bd6dec..f7cd13301 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 716bd6dec3e044e5c325386b5b0483392b24cefe +Subproject commit f7cd13301c2a88f97073fd119072b4cc92c08df1 From e8f14ceff9b3279c67368d85f9457bb27168d126 Mon Sep 17 00:00:00 2001 From: Graeme Power Date: Wed, 8 Jan 2025 21:46:43 +0000 Subject: [PATCH 42/55] fix: streaming resource lock (#1879) * fix: correct issue with handling lock during streaming move locking for streaming into get_event_publisher call so it is locked and unlocked in the correct task for the streaming reponse * fix: simplify exit stack management for create_chat_completion and create_completion * fix: correct missing `async with` and format code * fix: remove unnecessary explicit use of AsyncExitStack fix: correct type hints for body_model --------- Co-authored-by: Andrei --- llama_cpp/server/app.py | 224 ++++++++++++++++++---------------------- 1 file changed, 103 insertions(+), 121 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index b6db453b8..5120f2416 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -7,7 +7,7 @@ from anyio import Lock from functools import partial -from typing import Iterator, List, Optional, Union, Dict +from typing import List, Optional, Union, Dict import llama_cpp @@ -155,34 +155,71 @@ def create_app( return app +def prepare_request_resources( + body: CreateCompletionRequest | CreateChatCompletionRequest, + llama_proxy: LlamaProxy, + body_model: str | None, + kwargs, +) -> llama_cpp.Llama: + if llama_proxy is None: + raise HTTPException( + status_code=status.HTTP_503_SERVICE_UNAVAILABLE, + detail="Service is not available", + ) + llama = llama_proxy(body_model) + if body.logit_bias is not None: + kwargs["logit_bias"] = ( + _logit_bias_tokens_to_input_ids(llama, body.logit_bias) + if body.logit_bias_type == "tokens" + else body.logit_bias + ) + + if body.grammar is not None: + kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar) + + if body.min_tokens > 0: + _min_tokens_logits_processor = llama_cpp.LogitsProcessorList( + [llama_cpp.MinTokensLogitsProcessor(body.min_tokens, llama.token_eos())] + ) + if "logits_processor" not in kwargs: + kwargs["logits_processor"] = _min_tokens_logits_processor + else: + kwargs["logits_processor"].extend(_min_tokens_logits_processor) + return llama + + async def get_event_publisher( request: Request, inner_send_chan: MemoryObjectSendStream[typing.Any], - iterator: Iterator[typing.Any], - on_complete: typing.Optional[typing.Callable[[], typing.Awaitable[None]]] = None, + body: CreateCompletionRequest | CreateChatCompletionRequest, + body_model: str | None, + llama_call, + kwargs, ): server_settings = next(get_server_settings()) interrupt_requests = ( server_settings.interrupt_requests if server_settings else False ) - async with inner_send_chan: - try: - async for chunk in iterate_in_threadpool(iterator): - await inner_send_chan.send(dict(data=json.dumps(chunk))) - if await request.is_disconnected(): - raise anyio.get_cancelled_exc_class()() - if interrupt_requests and llama_outer_lock.locked(): - await inner_send_chan.send(dict(data="[DONE]")) - raise anyio.get_cancelled_exc_class()() - await inner_send_chan.send(dict(data="[DONE]")) - except anyio.get_cancelled_exc_class() as e: - print("disconnected") - with anyio.move_on_after(1, shield=True): - print(f"Disconnected from client (via refresh/close) {request.client}") - raise e - finally: - if on_complete: - await on_complete() + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + async with inner_send_chan: + try: + iterator = await run_in_threadpool(llama_call, llama, **kwargs) + async for chunk in iterate_in_threadpool(iterator): + await inner_send_chan.send(dict(data=json.dumps(chunk))) + if await request.is_disconnected(): + raise anyio.get_cancelled_exc_class()() + if interrupt_requests and llama_outer_lock.locked(): + await inner_send_chan.send(dict(data="[DONE]")) + raise anyio.get_cancelled_exc_class()() + await inner_send_chan.send(dict(data="[DONE]")) + except anyio.get_cancelled_exc_class() as e: + print("disconnected") + with anyio.move_on_after(1, shield=True): + print( + f"Disconnected from client (via refresh/close) {request.client}" + ) + raise e def _logit_bias_tokens_to_input_ids( @@ -267,18 +304,11 @@ async def create_completion( request: Request, body: CreateCompletionRequest, ) -> llama_cpp.Completion: - exit_stack = contextlib.AsyncExitStack() - llama_proxy = await exit_stack.enter_async_context(contextlib.asynccontextmanager(get_llama_proxy)()) - if llama_proxy is None: - raise HTTPException( - status_code=status.HTTP_503_SERVICE_UNAVAILABLE, - detail="Service is not available", - ) if isinstance(body.prompt, list): assert len(body.prompt) <= 1 body.prompt = body.prompt[0] if len(body.prompt) > 0 else "" - llama = llama_proxy( + body_model = ( body.model if request.url.path != "/v1/engines/copilot-codex/completions" else "copilot-codex" @@ -293,44 +323,8 @@ async def create_completion( } kwargs = body.model_dump(exclude=exclude) - if body.logit_bias is not None: - kwargs["logit_bias"] = ( - _logit_bias_tokens_to_input_ids(llama, body.logit_bias) - if body.logit_bias_type == "tokens" - else body.logit_bias - ) - - if body.grammar is not None: - kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar) - - if body.min_tokens > 0: - _min_tokens_logits_processor = llama_cpp.LogitsProcessorList( - [llama_cpp.MinTokensLogitsProcessor(body.min_tokens, llama.token_eos())] - ) - if "logits_processor" not in kwargs: - kwargs["logits_processor"] = _min_tokens_logits_processor - else: - kwargs["logits_processor"].extend(_min_tokens_logits_processor) - - try: - iterator_or_completion: Union[ - llama_cpp.CreateCompletionResponse, - Iterator[llama_cpp.CreateCompletionStreamResponse], - ] = await run_in_threadpool(llama, **kwargs) - except Exception as err: - await exit_stack.aclose() - raise err - - if isinstance(iterator_or_completion, Iterator): - # EAFP: It's easier to ask for forgiveness than permission - first_response = await run_in_threadpool(next, iterator_or_completion) - - # If no exception was raised from first_response, we can assume that - # the iterator is valid and we can use it to stream the response. - def iterator() -> Iterator[llama_cpp.CreateCompletionStreamResponse]: - yield first_response - yield from iterator_or_completion - + # handle streaming request + if kwargs.get("stream", False): send_chan, recv_chan = anyio.create_memory_object_stream(10) return EventSourceResponse( recv_chan, @@ -338,15 +332,29 @@ def iterator() -> Iterator[llama_cpp.CreateCompletionStreamResponse]: get_event_publisher, request=request, inner_send_chan=send_chan, - iterator=iterator(), - on_complete=exit_stack.aclose, + body=body, + body_model=body_model, + llama_call=llama_cpp.Llama.__call__, + kwargs=kwargs, ), sep="\n", ping_message_factory=_ping_message_factory, ) - else: - await exit_stack.aclose() - return iterator_or_completion + + # handle regular request + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + + if await request.is_disconnected(): + print( + f"Disconnected from client (via refresh/close) before llm invoked {request.client}" + ) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Client closed request", + ) + + return await run_in_threadpool(llama, **kwargs) @router.post( @@ -474,13 +482,8 @@ async def create_chat_completion( # where the dependency is cleaned up before a StreamingResponse # is complete. # https://github.com/tiangolo/fastapi/issues/11143 - exit_stack = contextlib.AsyncExitStack() - llama_proxy = await exit_stack.enter_async_context(contextlib.asynccontextmanager(get_llama_proxy)()) - if llama_proxy is None: - raise HTTPException( - status_code=status.HTTP_503_SERVICE_UNAVAILABLE, - detail="Service is not available", - ) + + body_model = body.model exclude = { "n", "logit_bias_type", @@ -488,44 +491,9 @@ async def create_chat_completion( "min_tokens", } kwargs = body.model_dump(exclude=exclude) - llama = llama_proxy(body.model) - if body.logit_bias is not None: - kwargs["logit_bias"] = ( - _logit_bias_tokens_to_input_ids(llama, body.logit_bias) - if body.logit_bias_type == "tokens" - else body.logit_bias - ) - - if body.grammar is not None: - kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar) - - if body.min_tokens > 0: - _min_tokens_logits_processor = llama_cpp.LogitsProcessorList( - [llama_cpp.MinTokensLogitsProcessor(body.min_tokens, llama.token_eos())] - ) - if "logits_processor" not in kwargs: - kwargs["logits_processor"] = _min_tokens_logits_processor - else: - kwargs["logits_processor"].extend(_min_tokens_logits_processor) - - try: - iterator_or_completion: Union[ - llama_cpp.ChatCompletion, Iterator[llama_cpp.ChatCompletionChunk] - ] = await run_in_threadpool(llama.create_chat_completion, **kwargs) - except Exception as err: - await exit_stack.aclose() - raise err - - if isinstance(iterator_or_completion, Iterator): - # EAFP: It's easier to ask for forgiveness than permission - first_response = await run_in_threadpool(next, iterator_or_completion) - - # If no exception was raised from first_response, we can assume that - # the iterator is valid and we can use it to stream the response. - def iterator() -> Iterator[llama_cpp.ChatCompletionChunk]: - yield first_response - yield from iterator_or_completion + # handle streaming request + if kwargs.get("stream", False): send_chan, recv_chan = anyio.create_memory_object_stream(10) return EventSourceResponse( recv_chan, @@ -533,15 +501,29 @@ def iterator() -> Iterator[llama_cpp.ChatCompletionChunk]: get_event_publisher, request=request, inner_send_chan=send_chan, - iterator=iterator(), - on_complete=exit_stack.aclose, + body=body, + body_model=body_model, + llama_call=llama_cpp.Llama.create_chat_completion, + kwargs=kwargs, ), sep="\n", ping_message_factory=_ping_message_factory, ) - else: - await exit_stack.aclose() - return iterator_or_completion + + # handle regular request + async with contextlib.asynccontextmanager(get_llama_proxy)() as llama_proxy: + llama = prepare_request_resources(body, llama_proxy, body_model, kwargs) + + if await request.is_disconnected(): + print( + f"Disconnected from client (via refresh/close) before llm invoked {request.client}" + ) + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Client closed request", + ) + + return await run_in_threadpool(llama.create_chat_completion, **kwargs) @router.get( From 0580cf273debf4a7f2efcdfd5ef092ff5cedf9b0 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 8 Jan 2025 16:53:54 -0500 Subject: [PATCH 43/55] chore: Bump version --- CHANGELOG.md | 5 +++++ llama_cpp/__init__.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f6355893..b4f49c904 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.6] + +- feat: Update llama.cpp to ggerganov/llama.cpp@f7cd13301c2a88f97073fd119072b4cc92c08df1 +- fix(server): streaming resource lock by @gjpower in #1879 + ## [0.3.5] - feat: Update llama.cpp to ggerganov/llama.cpp@26a8406ba9198eb6fdd8329fa717555b4f77f05f diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index d7d9be881..8c6118fb4 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.5" +__version__ = "0.3.6" From 80be68ac36b458e02f5e6af8bddd005a2f680e39 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Tue, 28 Jan 2025 21:38:05 -0500 Subject: [PATCH 44/55] feat: Update llama.cpp --- llama_cpp/_internals.py | 181 +++++----- llama_cpp/llama.py | 16 +- llama_cpp/llama_cpp.py | 707 +++++++++++++++++++++++++++++----------- vendor/llama.cpp | 2 +- 4 files changed, 626 insertions(+), 280 deletions(-) diff --git a/llama_cpp/_internals.py b/llama_cpp/_internals.py index 8fa2b447f..343581dce 100644 --- a/llama_cpp/_internals.py +++ b/llama_cpp/_internals.py @@ -55,7 +55,13 @@ def __init__( if model is None: raise ValueError(f"Failed to load model from file: {path_model}") + vocab = llama_cpp.llama_model_get_vocab(model) + + if vocab is None: + raise ValueError(f"Failed to get vocab from model: {path_model}") + self.model = model + self.vocab = vocab def free_model(): if self.model is None: @@ -75,7 +81,7 @@ def vocab_type(self) -> int: return llama_cpp.llama_vocab_type(self.model) def n_vocab(self) -> int: - return llama_cpp.llama_n_vocab(self.model) + return llama_cpp.llama_n_vocab(self.vocab) def n_ctx_train(self) -> int: return llama_cpp.llama_n_ctx_train(self.model) @@ -84,7 +90,7 @@ def n_embd(self) -> int: return llama_cpp.llama_n_embd(self.model) def rope_freq_scale_train(self) -> float: - return llama_cpp.llama_rope_freq_scale_train(self.model) + return llama_cpp.llama_model_rope_freq_scale_train(self.model) def desc(self) -> str: buf = ctypes.create_string_buffer(1024) @@ -98,53 +104,53 @@ def n_params(self) -> int: return llama_cpp.llama_model_n_params(self.model) def get_tensor(self, name: str) -> ctypes.c_void_p: - return llama_cpp.llama_get_model_tensor(self.model, name.encode("utf-8")) + raise NotImplementedError("get_tensor is not implemented in llama.cpp") # Vocab def token_get_text(self, token: int) -> str: - return llama_cpp.llama_token_get_text(self.model, token).decode("utf-8") + return llama_cpp.llama_token_get_text(self.vocab, token).decode("utf-8") def token_get_score(self, token: int) -> float: - return llama_cpp.llama_token_get_score(self.model, token) + return llama_cpp.llama_token_get_score(self.vocab, token) def token_get_attr(self, token: int) -> int: - return llama_cpp.llama_token_get_attr(self.model, token) + return llama_cpp.llama_token_get_attr(self.vocab, token) # Special tokens def token_bos(self) -> int: - return llama_cpp.llama_token_bos(self.model) + return llama_cpp.llama_token_bos(self.vocab) def token_eos(self) -> int: - return llama_cpp.llama_token_eos(self.model) + return llama_cpp.llama_token_eos(self.vocab) def token_cls(self) -> int: - return llama_cpp.llama_token_cls(self.model) + return llama_cpp.llama_token_cls(self.vocab) def token_sep(self) -> int: - return llama_cpp.llama_token_sep(self.model) + return llama_cpp.llama_token_sep(self.vocab) def token_nl(self) -> int: - return llama_cpp.llama_token_nl(self.model) + return llama_cpp.llama_token_nl(self.vocab) def token_prefix(self) -> int: - return llama_cpp.llama_token_prefix(self.model) + raise NotImplementedError("token_prefix is not implemented in llama.cpp") def token_middle(self) -> int: - return llama_cpp.llama_token_middle(self.model) + raise NotImplementedError("token_middle is not implemented in llama.cpp") def token_suffix(self) -> int: - return llama_cpp.llama_token_suffix(self.model) + raise NotImplementedError("token_suffix is not implemented in llama.cpp") def token_eot(self) -> int: - return llama_cpp.llama_token_eot(self.model) + return llama_cpp.llama_token_eot(self.vocab) def add_bos_token(self) -> bool: - return llama_cpp.llama_add_bos_token(self.model) + return llama_cpp.llama_add_bos_token(self.vocab) def add_eos_token(self) -> bool: - return llama_cpp.llama_add_eos_token(self.model) + return llama_cpp.llama_add_eos_token(self.vocab) # Tokenization @@ -152,13 +158,13 @@ def tokenize(self, text: bytes, add_bos: bool, special: bool): n_ctx = self.n_ctx_train() tokens = (llama_cpp.llama_token * n_ctx)() n_tokens = llama_cpp.llama_tokenize( - self.model, text, len(text), tokens, n_ctx, add_bos, special + self.vocab, text, len(text), tokens, n_ctx, add_bos, special ) if n_tokens < 0: n_tokens = abs(n_tokens) tokens = (llama_cpp.llama_token * n_tokens)() n_tokens = llama_cpp.llama_tokenize( - self.model, text, len(text), tokens, n_tokens, add_bos, special + self.vocab, text, len(text), tokens, n_tokens, add_bos, special ) if n_tokens < 0: raise RuntimeError( @@ -168,7 +174,7 @@ def tokenize(self, text: bytes, add_bos: bool, special: bool): def token_to_piece(self, token: int, special: bool = False) -> bytes: buf = ctypes.create_string_buffer(32) - llama_cpp.llama_token_to_piece(self.model, token, buf, 32, 0, special) + llama_cpp.llama_token_to_piece(self.vocab, token, buf, 32, 0, special) return bytes(buf) def detokenize(self, tokens: List[int], special: bool = False) -> bytes: @@ -177,7 +183,7 @@ def detokenize(self, tokens: List[int], special: bool = False) -> bytes: buffer = (ctypes.c_char * size)() for token in tokens: n = llama_cpp.llama_token_to_piece( - self.model, llama_cpp.llama_token(token), buffer, size, 0, special + self.vocab, llama_cpp.llama_token(token), buffer, size, 0, special ) assert n <= size output += bytes(buffer[:n]) @@ -320,7 +326,8 @@ def get_embeddings(self): def set_rng_seed(self, seed: int): # TODO: Fix - llama_cpp.llama_set_rng_seed(self.ctx, seed) + # llama_cpp.llama_set_rng_seed(self.ctx, seed) + raise NotImplementedError("set_rng_seed is not implemented in llama.cpp") def sample_repetition_penalties( self, @@ -331,55 +338,63 @@ def sample_repetition_penalties( penalty_freq: float, penalty_present: float, ): - llama_cpp.llama_sample_repetition_penalties( - self.ctx, - llama_cpp.byref(candidates.candidates), - last_tokens_data, - penalty_last_n, - penalty_repeat, - penalty_freq, - penalty_present, - ) + # llama_cpp.llama_sample_repetition_penalties( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # last_tokens_data, + # penalty_last_n, + # penalty_repeat, + # penalty_freq, + # penalty_present, + # ) + raise NotImplementedError("sample_repetition_penalties is not implemented in llama.cpp") def sample_softmax(self, candidates: "_LlamaTokenDataArray"): - llama_cpp.llama_sample_softmax( - self.ctx, - llama_cpp.byref(candidates.candidates), - ) + # llama_cpp.llama_sample_softmax( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) + raise NotImplementedError("sample_softmax is not implemented in llama.cpp") def sample_top_k(self, candidates: "_LlamaTokenDataArray", k: int, min_keep: int): - llama_cpp.llama_sample_top_k( - self.ctx, llama_cpp.byref(candidates.candidates), k, min_keep - ) + # llama_cpp.llama_sample_top_k( + # self.ctx, llama_cpp.byref(candidates.candidates), k, min_keep + # ) + raise NotImplementedError("sample_top_k is not implemented in llama.cpp") def sample_top_p(self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int): - llama_cpp.llama_sample_top_p( - self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep - ) + # llama_cpp.llama_sample_top_p( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_top_p is not implemented in llama.cpp") def sample_min_p(self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int): - llama_cpp.llama_sample_min_p( - self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep - ) + # llama_cpp.llama_sample_min_p( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_min_p is not implemented in llama.cpp") def sample_typical( self, candidates: "_LlamaTokenDataArray", p: float, min_keep: int ): - llama_cpp.llama_sample_typical( - self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep - ) + # llama_cpp.llama_sample_typical( + # self.ctx, llama_cpp.byref(candidates.candidates), p, min_keep + # ) + raise NotImplementedError("sample_typical is not implemented in llama.cpp") def sample_temp(self, candidates: "_LlamaTokenDataArray", temp: float): - llama_cpp.llama_sample_temp( - self.ctx, llama_cpp.byref(candidates.candidates), temp - ) + # llama_cpp.llama_sample_temp( + # self.ctx, llama_cpp.byref(candidates.candidates), temp + # ) + raise NotImplementedError("sample_temp is not implemented in llama.cpp") def sample_grammar(self, candidates: "_LlamaTokenDataArray", grammar: LlamaGrammar): - llama_cpp.llama_sample_grammar( - self.ctx, - llama_cpp.byref(candidates.candidates), - grammar.grammar, - ) + # llama_cpp.llama_sample_grammar( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # grammar.grammar, + # ) + raise NotImplementedError("sample_grammar is not implemented in llama.cpp") def sample_token_mirostat( self, @@ -389,14 +404,15 @@ def sample_token_mirostat( m: int, mu: llama_cpp.CtypesPointerOrRef[ctypes.c_float], ) -> int: - return llama_cpp.llama_sample_token_mirostat( - self.ctx, - llama_cpp.byref(candidates.candidates), - tau, - eta, - m, - mu, - ) + raise NotImplementedError("sample_token_mirostat is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_mirostat( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # tau, + # eta, + # m, + # mu, + # ) def sample_token_mirostat_v2( self, @@ -405,29 +421,33 @@ def sample_token_mirostat_v2( eta: float, mu: llama_cpp.CtypesPointerOrRef[ctypes.c_float], ) -> int: - return llama_cpp.llama_sample_token_mirostat_v2( - self.ctx, - llama_cpp.byref(candidates.candidates), - tau, - eta, - mu, - ) + raise NotImplementedError("sample_token_mirostat_v2 is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_mirostat_v2( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # tau, + # eta, + # mu, + # ) def sample_token_greedy(self, candidates: "_LlamaTokenDataArray") -> int: - return llama_cpp.llama_sample_token_greedy( - self.ctx, - llama_cpp.byref(candidates.candidates), - ) + raise NotImplementedError("sample_token_greedy is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token_greedy( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) def sample_token(self, candidates: "_LlamaTokenDataArray") -> int: - return llama_cpp.llama_sample_token( - self.ctx, - llama_cpp.byref(candidates.candidates), - ) + raise NotImplementedError("sample_token is not implemented in llama.cpp") + # return llama_cpp.llama_sample_token( + # self.ctx, + # llama_cpp.byref(candidates.candidates), + # ) # Grammar def grammar_accept_token(self, grammar: LlamaGrammar, token: int): - llama_cpp.llama_grammar_accept_token(grammar.grammar, self.ctx, token) + raise NotImplementedError("grammar_accept_token is not implemented in llama.cpp") + # llama_cpp.llama_grammar_accept_token(grammar.grammar, self.ctx, token) def reset_timings(self): llama_cpp.llama_perf_context_reset(self.ctx) @@ -788,7 +808,7 @@ def add_mirostat_v2(self, seed: int, tau: float, eta: float): def add_grammar(self, model: LlamaModel, grammar: LlamaGrammar): sampler = llama_cpp.llama_sampler_init_grammar( - model.model, grammar._grammar.encode("utf-8"), grammar._root.encode("utf-8") + model.vocab, grammar._grammar.encode("utf-8"), grammar._root.encode("utf-8") ) self._add_sampler(sampler) @@ -842,6 +862,7 @@ def get_seed(self) -> int: def sample(self, ctx: LlamaContext, idx: int) -> int: assert self.sampler is not None + assert ctx.ctx is not None return llama_cpp.llama_sampler_sample(self.sampler, ctx.ctx, idx) def close(self): diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 2fd7ff193..11fc744a0 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -406,10 +406,10 @@ def __init__( ) ) - self._lora_adapter: Optional[llama_cpp.llama_lora_adapter_p] = None + self._lora_adapter: Optional[llama_cpp.llama_adapter_lora_p] = None if self.lora_path: - self._lora_adapter = llama_cpp.llama_lora_adapter_init( + self._lora_adapter = llama_cpp.llama_adapter_lora_init( self._model.model, self.lora_path.encode("utf-8"), ) @@ -421,12 +421,12 @@ def __init__( def free_lora_adapter(): if self._lora_adapter is None: return - llama_cpp.llama_lora_adapter_free(self._lora_adapter) + llama_cpp.llama_adapter_lora_free(self._lora_adapter) self._lora_adapter = None self._stack.callback(free_lora_adapter) - if llama_cpp.llama_lora_adapter_set( + if llama_cpp.llama_set_adapter_lora( self._ctx.ctx, self._lora_adapter, self.lora_scale ): raise RuntimeError( @@ -1152,9 +1152,9 @@ def _create_completion( bos_token_id: int = self.token_bos() cls_token_id: int = self._model.token_cls() sep_token_id: int = self._model.token_sep() - prefix_token_id: int = self._model.token_prefix() - middle_token_id: int = self._model.token_middle() - suffix_token_id: int = self._model.token_suffix() + prefix_token_id: int = 0 # self._model.token_prefix() # TODO: Fix + middle_token_id: int = 0 # self._model.token_middle() # TODO: Fix + suffix_token_id: int = 0 # self._model.token_suffix() # TODO: Fix add_space_prefix: bool = ( self.metadata.get("tokenizer.ggml.add_space_prefix", "true") == "true" ) @@ -1332,7 +1332,7 @@ def logit_bias_processor( logits_processor=logits_processor, grammar=grammar, ): - if llama_cpp.llama_token_is_eog(self._model.model, token): + if llama_cpp.llama_token_is_eog(self._model.vocab, token): text = self.detokenize(completion_tokens, prev_tokens=prompt_tokens) finish_reason = "stop" break diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 16c6b9d73..7845f0d1c 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -149,6 +149,10 @@ # define LLAMA_STATE_SEQ_VERSION 2 LLAMA_STATE_SEQ_VERSION = 2 +# struct llama_vocab; +llama_vocab_p = NewType("llama_vocab_p", int) +llama_vocab_p_ctypes = ctypes.c_void_p + # struct llama_model; llama_model_p = NewType("llama_model_p", int) llama_model_p_ctypes = ctypes.c_void_p @@ -640,9 +644,6 @@ class llama_model_kv_override(ctypes.Structure): # // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() # const float * tensor_split; -# // comma separated list of RPC servers to use for offloading -# const char * rpc_servers; - # // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. # // If the provided progress_callback returns true, model loading continues. # // If it returns false, model loading is immediately aborted. @@ -669,7 +670,6 @@ class llama_model_params(ctypes.Structure): split_mode (int): how to split the model across multiple GPUs main_gpu (int): the GPU that is used for the entire model. main_gpu interpretation depends on split_mode: LLAMA_SPLIT_NONE: the GPU that is used for the entire model LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results LLAMA_SPLIT_LAYER: ignored tensor_split (ctypes.Array[ctypes.ctypes.c_float]): proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() - rpc_servers (ctypes.c_char_p): comma separated list of RPC servers to use for offloading progress_callback (llama_progress_callback): called with a progress value between 0.0 and 1.0. Pass NULL to disable. If the provided progress_callback returns true, model loading continues. If it returns false, model loading is immediately aborted. progress_callback_user_data (ctypes.ctypes.c_void_p): context pointer passed to the progress callback kv_overrides (ctypes.Array[llama_model_kv_override]): override key-value pairs of the model meta data @@ -683,7 +683,6 @@ class llama_model_params(ctypes.Structure): split_mode: int main_gpu: int tensor_split: CtypesArray[ctypes.c_float] - rpc_servers: ctypes.c_char_p progress_callback: Callable[[float, ctypes.c_void_p], bool] progress_callback_user_data: ctypes.c_void_p kv_overrides: CtypesArray[llama_model_kv_override] @@ -698,7 +697,6 @@ class llama_model_params(ctypes.Structure): ("split_mode", ctypes.c_int), ("main_gpu", ctypes.c_int32), ("tensor_split", ctypes.POINTER(ctypes.c_float)), - ("rpc_servers", ctypes.c_char_p), ("progress_callback", llama_progress_callback), ("progress_callback_user_data", ctypes.c_void_p), ("kv_overrides", ctypes.POINTER(llama_model_kv_override)), @@ -975,9 +973,9 @@ class llama_chat_message(ctypes.Structure): # // lora adapter -# struct llama_lora_adapter; -llama_lora_adapter_p = ctypes.c_void_p -llama_lora_adapter_p_ctypes = ctypes.POINTER(ctypes.c_void_p) +# struct llama_adapter_lora; +llama_adapter_lora_p = ctypes.c_void_p +llama_adapter_lora_p_ctypes = ctypes.POINTER(ctypes.c_void_p) # // Helpers for getting default parameters @@ -1059,6 +1057,18 @@ def llama_backend_init(): GGML_NUMA_STRATEGY_COUNT = 5 +# // Call once at the end of the program - currently only used for MPI +# LLAMA_API void llama_backend_free(void); +@ctypes_function( + "llama_backend_free", + [], + None, +) +def llama_backend_free(): + """Call once at the end of the program - currently only used for MPI""" + ... + + # //optional: # LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); @ctypes_function( @@ -1072,24 +1082,14 @@ def llama_numa_init(numa: int, /): # // Optional: an auto threadpool gets created in ggml if not passed explicitly # LLAMA_API void llama_attach_threadpool( -# struct llama_context * ctx, -# ggml_threadpool_t threadpool, -# ggml_threadpool_t threadpool_batch); +# struct llama_context * ctx, +# ggml_threadpool_t threadpool, +# ggml_threadpool_t threadpool_batch); +# TODO: Add llama_attach_threadpool # LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); - - -# // Call once at the end of the program - currently only used for MPI -# LLAMA_API void llama_backend_free(void); -@ctypes_function( - "llama_backend_free", - [], - None, -) -def llama_backend_free(): - """Call once at the end of the program - currently only used for MPI""" - ... +# TODO: Add llama_detach_threadpool # DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file( @@ -1107,6 +1107,9 @@ def llama_load_model_from_file( ... +# // Load the model from a file +# // If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf +# // If the split file name does not follow this pattern, use llama_model_load_from_splits # LLAMA_API struct llama_model * llama_model_load_from_file( # const char * path_model, # struct llama_model_params params); @@ -1118,6 +1121,31 @@ def llama_load_model_from_file( def llama_model_load_from_file( path_model: bytes, params: llama_model_params, / ) -> Optional[llama_model_p]: + """Load the model from a file + + If the file is split into multiple parts, the file name must follow this pattern: -%05d-of-%05d.gguf + + If the split file name does not follow this pattern, use llama_model_load_from_splits""" + ... + + +# // Load the model from multiple splits (support custom naming scheme) +# // The paths must be in the correct order +# LLAMA_API struct llama_model * llama_model_load_from_splits( +# const char ** paths, +# size_t n_paths, +# struct llama_model_params params); +@ctypes_function( + "llama_model_load_from_splits", + [ctypes.POINTER(ctypes.c_char_p), ctypes.c_size_t, llama_model_params], + llama_model_p_ctypes, +) +def llama_model_load_from_splits( + paths: List[bytes], n_paths: int, params: llama_model_params, / +) -> Optional[llama_model_p]: + """Load the model from multiple splits (support custom naming scheme) + + The paths must be in the correct order""" ... @@ -1141,9 +1169,24 @@ def llama_model_free(model: llama_model_p, /): ... -# LLAMA_API struct llama_context * llama_new_context_with_model( +# LLAMA_API struct llama_context * llama_init_from_model( # struct llama_model * model, # struct llama_context_params params); +@ctypes_function( + "llama_init_from_model", + [llama_model_p_ctypes, llama_context_params], + llama_context_p_ctypes, +) +def llama_init_from_model( + model: llama_model_p, params: llama_context_params, / +) -> Optional[llama_context_p]: + ... + + +# DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model( +# struct llama_model * model, +# struct llama_context_params params), +# "use llama_init_from_model instead"); @ctypes_function( "llama_new_context_with_model", [llama_model_p_ctypes, llama_context_params], @@ -1231,65 +1274,102 @@ def llama_n_seq_max(ctx: llama_context_p, /) -> int: ... -# LLAMA_API int32_t llama_n_vocab (const struct llama_model * model); -@ctypes_function("llama_n_vocab", [llama_model_p_ctypes], ctypes.c_int32) -def llama_n_vocab(model: llama_model_p, /) -> int: - ... -# LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model); +# DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead"); @ctypes_function("llama_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32) def llama_n_ctx_train(model: llama_model_p, /) -> int: ... -# LLAMA_API int32_t llama_n_embd (const struct llama_model * model); +# DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead"); @ctypes_function("llama_n_embd", [llama_model_p_ctypes], ctypes.c_int32) def llama_n_embd(model: llama_model_p, /) -> int: ... -# LLAMA_API int32_t llama_n_layer (const struct llama_model * model); +# DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead"); @ctypes_function("llama_n_layer", [llama_model_p_ctypes], ctypes.c_int32) def llama_n_layer(model: llama_model_p, /) -> int: ... -# LLAMA_API int32_t llama_n_head (const struct llama_model * model); +# DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead"); @ctypes_function("llama_n_head", [llama_model_p_ctypes], ctypes.c_int32) def llama_n_head(model: llama_model_p, /) -> int: ... -# LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx); +# DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); +@ctypes_function("llama_n_vocab", [llama_vocab_p_ctypes], ctypes.c_int32) +def llama_n_vocab(model: llama_vocab_p, /) -> int: + ... + + +# LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); @ctypes_function("llama_get_model", [llama_context_p_ctypes], llama_model_p_ctypes) def llama_get_model(ctx: llama_context_p, /) -> Optional[llama_model_p]: ... -# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); +# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); @ctypes_function("llama_pooling_type", [llama_context_p_ctypes], ctypes.c_int) def llama_pooling_type(ctx: llama_context_p, /) -> int: ... -# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); -@ctypes_function("llama_vocab_type", [llama_model_p_ctypes], ctypes.c_int) -def llama_vocab_type(model: llama_model_p, /) -> int: +# LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); +@ctypes_function("llama_model_get_vocab", [llama_model_p_ctypes], llama_vocab_p_ctypes) +def llama_model_get_vocab(model: llama_model_p, /) -> Optional[llama_vocab_p]: + ... + + +# LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); +@ctypes_function("llama_model_rope_type", [llama_model_p_ctypes], ctypes.c_int) +def llama_model_rope_type(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model); +@ctypes_function("llama_model_n_ctx_train", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_ctx_train(model: llama_model_p, /) -> int: ... -# LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); -@ctypes_function("llama_rope_type", [llama_model_p_ctypes], ctypes.c_int) -def llama_rope_type(model: llama_model_p, /) -> int: +# LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model); +@ctypes_function("llama_model_n_embd", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_embd(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); +@ctypes_function("llama_model_n_layer", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_layer(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); +@ctypes_function("llama_model_n_head", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_head(model: llama_model_p, /) -> int: ... # // Get the model's RoPE frequency scaling factor -# LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model); -@ctypes_function("llama_rope_freq_scale_train", [llama_model_p_ctypes], ctypes.c_float) -def llama_rope_freq_scale_train(model: llama_model_p, /) -> float: - """Get the model's RoPE frequency scaling factor""" +# LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); +@ctypes_function("llama_model_rope_freq_scale_train", [llama_model_p_ctypes], ctypes.c_float) +def llama_model_rope_freq_scale_train(model: llama_model_p, /) -> float: + ... + + +# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); +@ctypes_function("llama_vocab_type", [llama_model_p_ctypes], ctypes.c_int) +def llama_vocab_type(model: llama_model_p, /) -> int: + ... + + +# LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab); +@ctypes_function("llama_vocab_n_tokens", [llama_vocab_p_ctypes], ctypes.c_int32) +def llama_vocab_n_tokens(vocab: llama_vocab_p, /) -> int: ... @@ -1402,6 +1482,14 @@ def llama_model_size(model: llama_model_p, /) -> int: ... +# // Get the default chat template. Returns nullptr if not available +# LLAMA_API const char * llama_model_chat_template(const struct llama_model * model); +@ctypes_function("llama_model_chat_template", [llama_model_p_ctypes], ctypes.c_char_p) +def llama_model_chat_template(model: llama_model_p, /) -> Optional[bytes]: + """Get the default chat template. Returns None if not available""" + ... + + # // Returns the total number of parameters in the model # LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); @ctypes_function("llama_model_n_params", [llama_model_p_ctypes], ctypes.c_uint64) @@ -1472,37 +1560,48 @@ def llama_model_quantize( # // Load a LoRA adapter from file -# // The loaded adapter will be associated to the given model, and will be free when the model is deleted -# LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( +# LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init( # struct llama_model * model, # const char * path_lora); @ctypes_function( - "llama_lora_adapter_init", + "llama_adapter_lora_init", [llama_model_p_ctypes, ctypes.c_char_p], - llama_lora_adapter_p_ctypes, + llama_adapter_lora_p_ctypes, ) -def llama_lora_adapter_init( +def llama_adapter_lora_init( model: llama_model_p, path_lora: bytes, / -) -> Optional[llama_lora_adapter_p]: - """Load a LoRA adapter from file - The loaded adapter will be associated to the given model, and will be free when the model is deleted - """ +) -> Optional[llama_adapter_lora_p]: + ... + + +# // Manually free a LoRA adapter +# // Note: loaded adapters will be free when the associated model is deleted +# LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter); +@ctypes_function( + "llama_adapter_lora_free", + [llama_adapter_lora_p_ctypes], + None, +) +def llama_adapter_lora_free(adapter: llama_adapter_lora_p, /): ... +# // The following functions operate on a llama_context, hence the naming: llama_verb_... + + # // Add a loaded LoRA adapter to given context # // This will not modify model's weight -# LLAMA_API int32_t llama_lora_adapter_set( +# LLAMA_API int32_t llama_set_adapter_lora( # struct llama_context * ctx, -# struct llama_lora_adapter * adapter, +# struct llama_adapter_lora * adapter, # float scale); @ctypes_function( - "llama_lora_adapter_set", - [llama_context_p_ctypes, llama_lora_adapter_p_ctypes, ctypes.c_float], + "llama_set_adapter_lora", + [llama_context_p_ctypes, llama_adapter_lora_p_ctypes, ctypes.c_float], ctypes.c_int32, ) -def llama_lora_adapter_set( - ctx: llama_context_p, adapter: llama_lora_adapter_p, scale: float, / +def llama_set_adapter_lora( + ctx: llama_context_p, adapter: llama_adapter_lora_p, scale: float, / ) -> int: """Add a loaded LoRA adapter to given context This will not modify model's weight""" @@ -1511,64 +1610,49 @@ def llama_lora_adapter_set( # // Remove a specific LoRA adapter from given context # // Return -1 if the adapter is not present in the context -# LLAMA_API int32_t llama_lora_adapter_remove( +# LLAMA_API int32_t llama_rm_adapter_lora( # struct llama_context * ctx, -# struct llama_lora_adapter * adapter); +# struct llama_adapter_lora * adapter); @ctypes_function( - "llama_lora_adapter_remove", - [llama_context_p_ctypes, llama_lora_adapter_p_ctypes], + "llama_rm_adapter_lora", + [llama_context_p_ctypes, llama_adapter_lora_p_ctypes], ctypes.c_int32, ) -def llama_lora_adapter_remove( - ctx: llama_context_p, adapter: llama_lora_adapter_p, / +def llama_rm_adapter_lora( + ctx: llama_context_p, adapter: llama_adapter_lora_p, / ) -> int: - """Remove a LoRA adapter from given context + """Remove a specific LoRA adapter from given context Return -1 if the adapter is not present in the context""" ... # // Remove all LoRA adapters from given context -# LLAMA_API void llama_lora_adapter_clear( -# struct llama_context * ctx); +# LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx); @ctypes_function( - "llama_lora_adapter_clear", + "llama_clear_adapter_lora", [llama_context_p_ctypes], None, ) -def llama_lora_adapter_clear(ctx: llama_context_p, /): +def llama_clear_adapter_lora(ctx: llama_context_p, /): """Remove all LoRA adapters from given context""" ... -# // Manually free a LoRA adapter -# // Note: loaded adapters will be free when the associated model is deleted -# LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); -@ctypes_function( - "llama_lora_adapter_free", - [llama_lora_adapter_p_ctypes], - None, -) -def llama_lora_adapter_free(adapter: llama_lora_adapter_p, /): - """Manually free a LoRA adapter - Note: loaded adapters will be free when the associated model is deleted""" - ... - - # // Apply a loaded control vector to a llama_context, or if data is NULL, clear # // the currently loaded vector. # // n_embd should be the size of a single layer's control, and data should point # // to an n_embd x n_layers buffer starting from layer 1. # // il_start and il_end are the layer range the vector should apply to (both inclusive) # // See llama_control_vector_load in common to load a control vector. -# LLAMA_API int32_t llama_control_vector_apply( -# struct llama_context * lctx, +# LLAMA_API int32_t llama_apply_adapter_cvec( +# struct llama_context * ctx, # const float * data, # size_t len, # int32_t n_embd, # int32_t il_start, # int32_t il_end); @ctypes_function( - "llama_control_vector_apply", + "llama_apply_adapter_cvec", [ llama_context_p_ctypes, ctypes.POINTER(ctypes.c_float), @@ -1579,8 +1663,8 @@ def llama_lora_adapter_free(adapter: llama_lora_adapter_p, /): ], ctypes.c_int32, ) -def llama_control_vector_apply( - lctx: llama_context_p, +def llama_apply_adapter_cvec( + ctx: llama_context_p, data: CtypesPointerOrRef[ctypes.c_float], len: int, n_embd: int, @@ -2577,53 +2661,53 @@ def llama_get_embeddings_seq( # // -# LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token); +# LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token); @ctypes_function( - "llama_token_get_text", [llama_model_p_ctypes, llama_token], ctypes.c_char_p + "llama_vocab_get_text", [llama_vocab_p_ctypes, llama_token], ctypes.c_char_p ) -def llama_token_get_text( - model: llama_model_p, token: Union[llama_token, int], / +def llama_vocab_get_text( + vocab: llama_vocab_p, token: Union[llama_token, int], / ) -> bytes: ... -# LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token); +# LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token); @ctypes_function( - "llama_token_get_score", [llama_model_p_ctypes, llama_token], ctypes.c_float + "llama_vocab_get_score", [llama_vocab_p_ctypes, llama_token], ctypes.c_float ) -def llama_token_get_score( - model: llama_model_p, token: Union[llama_token, int], / +def llama_vocab_get_score( + vocab: llama_vocab_p, token: Union[llama_token, int], / ) -> float: ... -# LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token); +# LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token); @ctypes_function( - "llama_token_get_attr", [llama_model_p_ctypes, llama_token], ctypes.c_int + "llama_vocab_get_attr", [llama_vocab_p_ctypes, llama_token], ctypes.c_int ) -def llama_token_get_attr( - model: llama_model_p, token: Union[llama_token, int], / +def llama_vocab_get_attr( + vocab: llama_vocab_p, token: Union[llama_token, int], / ) -> int: ... # // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) -# LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token); +# LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token); @ctypes_function( - "llama_token_is_eog", [llama_model_p_ctypes, llama_token], ctypes.c_bool + "llama_vocab_is_eog", [llama_vocab_p_ctypes, llama_token], ctypes.c_bool ) -def llama_token_is_eog(model: llama_model_p, token: Union[llama_token, int], /) -> bool: +def llama_vocab_is_eog(vocab: llama_vocab_p, token: Union[llama_token, int], /) -> bool: """Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)""" ... # // Identify if Token Id is a control token or a render-able token -# LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token); +# LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token); @ctypes_function( - "llama_token_is_control", [llama_model_p_ctypes, llama_token], ctypes.c_bool + "llama_vocab_is_control", [llama_vocab_p_ctypes, llama_token], ctypes.c_bool ) -def llama_token_is_control( - model: llama_model_p, token: Union[llama_token, int], / +def llama_vocab_is_control( + vocab: llama_vocab_p, token: Union[llama_token, int], / ) -> bool: """Identify if Token Id is a control token or a render-able token""" ... @@ -2632,110 +2716,335 @@ def llama_token_is_control( # // Special tokens -# LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence -@ctypes_function("llama_token_bos", [llama_model_p_ctypes], llama_token) -def llama_token_bos(model: llama_model_p, /) -> int: +# LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence +@ctypes_function("llama_vocab_bos", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_bos(vocab: llama_vocab_p, /) -> llama_token: """beginning-of-sentence""" ... -# LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence -@ctypes_function("llama_token_eos", [llama_model_p_ctypes], llama_token) -def llama_token_eos(model: llama_model_p, /) -> int: +# LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence +@ctypes_function("llama_vocab_eos", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_eos(vocab: llama_vocab_p, /) -> llama_token: """end-of-sentence""" ... -# LLAMA_API llama_token llama_token_eot(const struct llama_model * model); // end-of-turn -@ctypes_function("llama_token_eot", [llama_model_p_ctypes], llama_token) -def llama_token_eot(model: llama_model_p, /) -> int: +# LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn +@ctypes_function("llama_vocab_eot", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_eot(vocab: llama_vocab_p, /) -> llama_token: """end-of-turn""" ... -# LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification -@ctypes_function("llama_token_cls", [llama_model_p_ctypes], llama_token) -def llama_token_cls(model: llama_model_p, /) -> int: - """classification""" +# LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator +@ctypes_function("llama_vocab_sep", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_sep(vocab: llama_vocab_p, /) -> llama_token: + """sentence separator""" ... -# LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator -@ctypes_function("llama_token_sep", [llama_model_p_ctypes], llama_token) -def llama_token_sep(model: llama_model_p, /) -> int: - """sentence separator""" +# LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line +@ctypes_function("llama_vocab_nl", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_nl(vocab: llama_vocab_p, /) -> llama_token: + """next-line""" ... -# LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line -@ctypes_function("llama_token_nl", [llama_model_p_ctypes], llama_token) -def llama_token_nl(model: llama_model_p, /) -> int: - """next-line""" +# LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding +@ctypes_function("llama_vocab_pad", [llama_vocab_p_ctypes], llama_token) +def llama_vocab_pad(vocab: llama_vocab_p, /) -> llama_token: + """padding""" + ... + +# LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_get_add_bos", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_vocab_get_add_bos(vocab: llama_vocab_p, /) -> bool: ... -# LLAMA_API bool llama_add_bos_token(const struct llama_model * model); -@ctypes_function("llama_add_bos_token", [llama_model_p_ctypes], ctypes.c_bool) -def llama_add_bos_token(model: llama_model_p, /) -> bool: +# LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_get_add_eos", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_vocab_get_add_eos(vocab: llama_vocab_p, /) -> bool: ... -# LLAMA_API bool llama_add_eos_token(const struct llama_model * model); -@ctypes_function("llama_add_eos_token", [llama_model_p_ctypes], ctypes.c_bool) -def llama_add_eos_token(model: llama_model_p, /) -> bool: +# LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_pre", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_pre(vocab: llama_vocab_p, /) -> llama_token: ... -# // Codellama infill tokens -# DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead"); -@ctypes_function("llama_token_prefix", [llama_model_p_ctypes], llama_token) -def llama_token_prefix(model: llama_model_p) -> int: - """codellama infill tokens""" +# LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_suf", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_suf(vocab: llama_vocab_p, /) -> llama_token: ... -# DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead"); -@ctypes_function("llama_token_middle", [llama_model_p_ctypes], llama_token) -def llama_token_middle(model: llama_model_p, /) -> int: +# LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_mid", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_mid(vocab: llama_vocab_p, /) -> llama_token: ... -# DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead"); -@ctypes_function("llama_token_suffix", [llama_model_p_ctypes], llama_token) -def llama_token_suffix(model: llama_model_p, /) -> int: +# LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_pad(vocab: llama_vocab_p, /) -> llama_token: ... -# LLAMA_API llama_token llama_token_fim_pre(const struct llama_model * model); -@ctypes_function("llama_token_fim_pre", [llama_model_p_ctypes], llama_token) -def llama_token_fim_pre(model: llama_model_p, /) -> int: +# LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_rep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_rep(vocab: llama_vocab_p, /) -> llama_token: ... -# LLAMA_API llama_token llama_token_fim_suf(const struct llama_model * model); -@ctypes_function("llama_token_fim_suf", [llama_model_p_ctypes], llama_token) -def llama_token_fim_suf(model: llama_model_p, /) -> int: + +# LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab); +@ctypes_function( + "llama_vocab_fim_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_fim_sep(vocab: llama_vocab_p, /) -> llama_token: ... -# LLAMA_API llama_token llama_token_fim_mid(const struct llama_model * model); -@ctypes_function("llama_token_fim_mid", [llama_model_p_ctypes], llama_token) -def llama_token_fim_mid(model: llama_model_p, /) -> int: + + +# DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_text instead"); +@ctypes_function( + "llama_token_get_text", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_char_p, +) +def llama_token_get_text( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bytes: + ... + + +# DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead"); +@ctypes_function( + "llama_token_get_score", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_float, +) +def llama_token_get_score( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> float: + ... + +# DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead"); +@ctypes_function( + "llama_token_get_attr", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_int, +) +def llama_token_get_attr( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> int: + ... + +# DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead"); +@ctypes_function( + "llama_token_is_eog", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_bool, +) +def llama_token_is_eog( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + ... + +# DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead"); +@ctypes_function( + "llama_token_is_control", + [llama_vocab_p_ctypes, llama_token], + ctypes.c_bool, +) +def llama_token_is_control( + vocab: llama_vocab_p, token: Union[llama_token, int], / +) -> bool: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead"); +@ctypes_function( + "llama_token_bos", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_bos(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead"); +@ctypes_function( + "llama_token_eos", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_eos(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead"); +@ctypes_function( + "llama_token_eot", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_eot(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead"); +@ctypes_function( + "llama_token_cls", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_cls(vocab: llama_vocab_p, /) -> int: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead"); +@ctypes_function( + "llama_token_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_sep(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead"); +@ctypes_function( + "llama_token_nl", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_nl(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead"); +@ctypes_function( + "llama_token_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_pad(vocab: llama_vocab_p, /) -> int: + ... + + +# DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead"); +@ctypes_function( + "llama_add_bos_token", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_add_bos_token(vocab: llama_vocab_p, /) -> bool: + ... + +# DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead"); +@ctypes_function( + "llama_add_eos_token", + [llama_vocab_p_ctypes], + ctypes.c_bool, +) +def llama_add_eos_token(vocab: llama_vocab_p, /) -> bool: + ... + + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead"); +@ctypes_function( + "llama_token_fim_pre", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_pre(vocab: llama_vocab_p, /) -> llama_token: ... -# LLAMA_API llama_token llama_token_fim_pad(const struct llama_model * model); -@ctypes_function("llama_token_fim_pad", [llama_model_p_ctypes], llama_token) -def llama_token_fim_pad(model: llama_model_p, /) -> int: +# DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead"); +@ctypes_function( + "llama_token_fim_suf", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_suf(vocab: llama_vocab_p, /) -> llama_token: ... -# LLAMA_API llama_token llama_token_fim_rep(const struct llama_model * model); -@ctypes_function("llama_token_fim_rep", [llama_model_p_ctypes], llama_token) -def llama_token_fim_rep(model: llama_model_p, /) -> int: +# DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead"); +@ctypes_function( + "llama_token_fim_mid", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_mid(vocab: llama_vocab_p, /) -> llama_token: ... -# LLAMA_API llama_token llama_token_fim_sep(const struct llama_model * model); -@ctypes_function("llama_token_fim_sep", [llama_model_p_ctypes], llama_token) -def llama_token_fim_sep(model: llama_model_p, /) -> int: +# DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead"); +@ctypes_function( + "llama_token_fim_pad", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_pad(vocab: llama_vocab_p, /) -> llama_token: ... +# DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead"); +@ctypes_function( + "llama_token_fim_rep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_rep(vocab: llama_vocab_p, /) -> llama_token: + ... + +# DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead"); +@ctypes_function( + "llama_token_fim_sep", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_token_fim_sep(vocab: llama_vocab_p, /) -> llama_token: + ... + +# // CLS is equivalent to BOS +# DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification +# "use llama_vocab_bos instead"); +@ctypes_function( + "llama_vocab_cls", + [llama_vocab_p_ctypes], + llama_token, +) +def llama_vocab_cls(vocab: llama_vocab_p, /) -> llama_token: + ... + + # // # // Tokenization # // @@ -2751,7 +3060,7 @@ def llama_token_fim_sep(model: llama_model_p, /) -> int: # /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated # /// as plaintext. Does not insert a leading space. # LLAMA_API int32_t llama_tokenize( -# const struct llama_model * model, +# const struct llama_vocab * vocab, # const char * text, # int32_t text_len, # llama_token * tokens, @@ -2761,7 +3070,7 @@ def llama_token_fim_sep(model: llama_model_p, /) -> int: @ctypes_function( "llama_tokenize", [ - llama_model_p_ctypes, + llama_vocab_p_ctypes, ctypes.c_char_p, ctypes.c_int32, llama_token_p, @@ -2772,7 +3081,7 @@ def llama_token_fim_sep(model: llama_model_p, /) -> int: ctypes.c_int32, ) def llama_tokenize( - model: llama_model_p, + vocab: llama_vocab_p, text: bytes, text_len: Union[ctypes.c_int, int], tokens: CtypesArray[llama_token], @@ -2784,7 +3093,7 @@ def llama_tokenize( """Convert the provided text into tokens. Args: - model: The model to use for tokenization. + vocab: The vocabulary to use for tokenization. text: The text to tokenize. text_len: The length of the text. tokens: The tokens pointer must be large enough to hold the resulting tokens. @@ -2805,7 +3114,7 @@ def llama_tokenize( # // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') # // @param special If true, special tokens are rendered in the output. # LLAMA_API int32_t llama_token_to_piece( -# const struct llama_model * model, +# const struct llama_vocab * vocab, # llama_token token, # char * buf, # int32_t length, @@ -2814,7 +3123,7 @@ def llama_tokenize( @ctypes_function( "llama_token_to_piece", [ - llama_model_p_ctypes, + llama_vocab_p_ctypes, llama_token, ctypes.c_char_p, ctypes.c_int32, @@ -2824,7 +3133,7 @@ def llama_tokenize( ctypes.c_int32, ) def llama_token_to_piece( - model: llama_model_p, + vocab: llama_vocab_p, token: Union[llama_token, int], buf: Union[ctypes.c_char_p, bytes, CtypesArray[ctypes.c_char]], length: Union[ctypes.c_int, int], @@ -2838,7 +3147,7 @@ def llama_token_to_piece( User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens. Args: - model: The model to use for tokenization. + vocab: The vocabulary to use for tokenization. token: The token to convert. buf: The buffer to write the token to. length: The length of the buffer. @@ -2930,7 +3239,6 @@ def llama_detokenize( # /// @param length The size of the allocated buffer # /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. # LLAMA_API int32_t llama_chat_apply_template( -# const struct llama_model * model, # const char * tmpl, # const struct llama_chat_message * chat, # size_t n_msg, @@ -2940,20 +3248,37 @@ def llama_detokenize( @ctypes_function( "llama_chat_apply_template", [ - ctypes.c_void_p, - ctypes.c_char_p, - ctypes.POINTER(llama_chat_message), - ctypes.c_size_t, + ctypes.c_char_p, # tmpl + ctypes.POINTER(llama_chat_message), # chat + ctypes.c_size_t, # n_msg + ctypes.c_bool, # add_ass (added) + ctypes.c_char_p, # buf + ctypes.c_int32, # length ], ctypes.c_int32, ) def llama_chat_apply_template( - model: llama_model_p, tmpl: bytes, chat: CtypesArray[llama_chat_message], n_msg: int, + add_ass: bool, # Added parameter + buf: bytes, + length: int, /, ) -> int: + """Apply chat template. + + Args: + tmpl: Template to use. If None, uses model's default + chat: Array of chat messages + n_msg: Number of messages + add_ass: Whether to end prompt with assistant token + buf: Output buffer + length: Buffer length + + Returns: + Number of bytes written, or needed if buffer too small + """ ... @@ -3022,7 +3347,6 @@ def llama_chat_builtin_templates( # // llama_sampler_free(smpl); # // # // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). -# // TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab # // # typedef void * llama_sampler_context_t; @@ -3342,16 +3666,16 @@ def llama_sampler_init_mirostat_v2( # LLAMA_API struct llama_sampler * llama_sampler_init_grammar( -# const struct llama_model * model, +# const struct llama_vocab * vocab, # const char * grammar_str, # const char * grammar_root); @ctypes_function( "llama_sampler_init_grammar", - [llama_model_p_ctypes, ctypes.c_char_p, ctypes.c_char_p], + [llama_vocab_p_ctypes, ctypes.c_char_p, ctypes.c_char_p], llama_sampler_p_ctypes, ) def llama_sampler_init_grammar( - model: llama_model_p, grammar_str: bytes, grammar_root: bytes, / + vocab: llama_vocab_p, grammar_str: bytes, grammar_root: bytes, / ) -> llama_sampler_p: ... @@ -3379,7 +3703,8 @@ def llama_sampler_init_penalties( # /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 # LLAMA_API struct llama_sampler * llama_sampler_init_dry( -# const struct llama_model * model, +# const struct llama_vocab * vocab, +# int32_t n_ctx_train, # float dry_multiplier, # float dry_base, # int32_t dry_allowed_length, @@ -3389,7 +3714,8 @@ def llama_sampler_init_penalties( @ctypes_function( "llama_sampler_init_dry", [ - llama_model_p_ctypes, + llama_vocab_p_ctypes, + ctypes.c_int32, ctypes.c_float, ctypes.c_float, ctypes.c_int32, @@ -3400,7 +3726,8 @@ def llama_sampler_init_penalties( llama_sampler_p_ctypes, ) def llama_sampler_init_dry( - model: llama_model_p, + vocab: llama_vocab_p, + n_ctx_train: int, dry_multiplier: float, dry_base: float, dry_allowed_length: int, @@ -3448,15 +3775,13 @@ def llama_sampler_init_logit_bias( # // 3. discard non-EOG tokens with low prob # // 4. if no tokens are left -> pick EOT # // -# LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model); +# LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab); @ctypes_function( "llama_sampler_init_infill", - [llama_model_p_ctypes], + [llama_vocab_p_ctypes], llama_sampler_p_ctypes, ) -def llama_sampler_init_infill(model: llama_model_p, /) -> llama_sampler_p: - """This sampler is meant to be used for fill-in-the-middle infilling. - """ +def llama_sampler_init_infill(vocab: llama_vocab_p, /) -> llama_sampler_p: ... diff --git a/vendor/llama.cpp b/vendor/llama.cpp index f7cd13301..ef6dada60 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit f7cd13301c2a88f97073fd119072b4cc92c08df1 +Subproject commit ef6dada60ca710f4edfbb6fd9e1258685d8ea49d From 0b89fe48ad26ffcff76451bd87642d916a1a3385 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Tue, 28 Jan 2025 21:40:53 -0500 Subject: [PATCH 45/55] feat: Update llama.cpp --- llama_cpp/llama_cpp.py | 10 ++++++---- vendor/llama.cpp | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 7845f0d1c..2b9edbb99 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -1483,10 +1483,12 @@ def llama_model_size(model: llama_model_p, /) -> int: # // Get the default chat template. Returns nullptr if not available -# LLAMA_API const char * llama_model_chat_template(const struct llama_model * model); -@ctypes_function("llama_model_chat_template", [llama_model_p_ctypes], ctypes.c_char_p) -def llama_model_chat_template(model: llama_model_p, /) -> Optional[bytes]: - """Get the default chat template. Returns None if not available""" +# // If name is NULL, returns the default chat template +# LLAMA_API const char * llama_model_chat_template(const struct llama_model * model, const char * name); +@ctypes_function("llama_model_chat_template", [llama_model_p_ctypes, ctypes.c_char_p], ctypes.c_char_p) +def llama_model_chat_template(model: llama_model_p, name: Optional[bytes], /) -> Optional[bytes]: + """Get the default chat template. Returns None if not available + If name is None, returns the default chat template""" ... diff --git a/vendor/llama.cpp b/vendor/llama.cpp index ef6dada60..794fe23f2 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit ef6dada60ca710f4edfbb6fd9e1258685d8ea49d +Subproject commit 794fe23f29fb40104975c91fe19f23798f7c726e From 14879c7819eb023a6a272e0f18aca887ae605c0f Mon Sep 17 00:00:00 2001 From: oobabooga Date: Tue, 28 Jan 2025 23:43:29 -0300 Subject: [PATCH 46/55] fix(ci): Fix the CUDA workflow (#1894) --- .github/workflows/build-wheels-cuda.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/build-wheels-cuda.yaml b/.github/workflows/build-wheels-cuda.yaml index 3d410148f..745b2e602 100644 --- a/.github/workflows/build-wheels-cuda.yaml +++ b/.github/workflows/build-wheels-cuda.yaml @@ -61,11 +61,9 @@ jobs: - name: Setup Mamba uses: conda-incubator/setup-miniconda@v3.1.0 with: - activate-environment: "build" + activate-environment: "llamacpp" python-version: ${{ matrix.pyver }} - miniforge-variant: Mambaforge miniforge-version: latest - use-mamba: true add-pip-as-python-dependency: true auto-activate-base: false From 4442ff8e0a09feafcf7c91045ebbe0ba6f026feb Mon Sep 17 00:00:00 2001 From: Shaka Huang Date: Wed, 29 Jan 2025 11:57:37 +0800 Subject: [PATCH 47/55] fix: error showing time spent in llama perf context print (#1898) * feat: Sync with llama.cpp Add `no_perf` field to `llama_context_params` to optionally disable performance timing measurements. * fix: Display performance metrics by default --------- Co-authored-by: Andrei --- llama_cpp/llama.py | 4 ++++ llama_cpp/llama_cpp.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 11fc744a0..7e9a6af23 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -94,6 +94,7 @@ def __init__( offload_kqv: bool = True, flash_attn: bool = False, # Sampling Params + no_perf: bool = False, last_n_tokens_size: int = 64, # LoRA Params lora_base: Optional[str] = None, @@ -173,6 +174,7 @@ def __init__( embedding: Embedding mode only. offload_kqv: Offload K, Q, V to GPU. flash_attn: Use flash attention. + no_perf: Measure performance timings. last_n_tokens_size: Maximum number of tokens to keep in the last_n_tokens deque. lora_base: Optional path to base model, useful if using a quantized base model and you want to apply LoRA to an f16 model. lora_path: Path to a LoRA file to apply to the model. @@ -351,6 +353,7 @@ def __init__( if type_v is not None: self.context_params.type_v = type_v # Sampling Params + self.context_params.no_perf = no_perf self.last_n_tokens_size = last_n_tokens_size self.cache: Optional[BaseLlamaCache] = None @@ -2093,6 +2096,7 @@ def __getstate__(self): offload_kqv=self.context_params.offload_kqv, flash_attn=self.context_params.flash_attn, # Sampling Params + no_perf=self.context_params.no_perf, last_n_tokens_size=self.last_n_tokens_size, # LoRA Params lora_base=self.lora_base, diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 2b9edbb99..205d89a0b 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -780,6 +780,7 @@ class llama_context_params(ctypes.Structure): embeddings (bool): if true, extract embeddings (together with logits) offload_kqv (bool): whether to offload the KQV ops (including the KV cache) to GPU flash_attn (bool): whether to use flash attention + no_perf (bool): whether to measure performance timings abort_callback (ggml_abort_callback): abort callback if it returns true, execution of llama_decode() will be aborted abort_callback_data (ctypes.ctypes.c_void_p): data for abort_callback """ @@ -810,6 +811,7 @@ class llama_context_params(ctypes.Structure): embeddings: bool offload_kqv: bool flash_attn: bool + no_perf: bool abort_callback: Callable[[ctypes.c_void_p], bool] abort_callback_data: ctypes.c_void_p @@ -839,6 +841,7 @@ class llama_context_params(ctypes.Structure): ("embeddings", ctypes.c_bool), ("offload_kqv", ctypes.c_bool), ("flash_attn", ctypes.c_bool), + ("no_perf", ctypes.c_bool), ("abort_callback", ggml_abort_callback), ("abort_callback_data", ctypes.c_void_p), ] From 710e19a81284e5af0d5db93cef7a9063b3e8534f Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Tue, 28 Jan 2025 23:19:04 -0500 Subject: [PATCH 48/55] chore: Bump version --- CHANGELOG.md | 6 ++++++ llama_cpp/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4f49c904..53365e368 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.7] + +- feat: Update llama.cpp to ggerganov/llama.cpp@794fe23f29fb40104975c91fe19f23798f7c726e +- fix(ci): Fix the CUDA workflow by @oobabooga in #1894 +- fix: error showing time spent in llama perf context print, adds `no_perf` flag to `Llama` class by @shakalaca in #1898 + ## [0.3.6] - feat: Update llama.cpp to ggerganov/llama.cpp@f7cd13301c2a88f97073fd119072b4cc92c08df1 diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index 8c6118fb4..fc1fcbcf6 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.6" +__version__ = "0.3.7" From 344c106a6961b50f734b95f084fbf02057d4b475 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 12 Mar 2025 04:11:42 -0400 Subject: [PATCH 49/55] feat: Update llama.cpp --- llama_cpp/llama_cpp.py | 74 ++++++++++++++++++++++++++++++++++++++++-- vendor/llama.cpp | 2 +- 2 files changed, 72 insertions(+), 4 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 205d89a0b..f3985ad2f 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -227,6 +227,7 @@ # LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26, # LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, # LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, +# LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, # }; LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 @@ -257,6 +258,7 @@ LLAMA_VOCAB_PRE_TYPE_CHAMELEON = 26 LLAMA_VOCAB_PRE_TYPE_MINERVA = 27 LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28 +LLAMA_VOCAB_PRE_TYPE_GPT4O = 29 # // note: these values should be synchronized with ggml_rope @@ -1357,6 +1359,12 @@ def llama_model_n_head(model: llama_model_p, /) -> int: ... +# LLAMA_API int32_t llama_model_n_head_kv (const struct llama_model * model); +@ctypes_function("llama_model_n_head_kv", [llama_model_p_ctypes], ctypes.c_int32) +def llama_model_n_head_kv(model: llama_model_p, /) -> int: + ... + + # // Get the model's RoPE frequency scaling factor # LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); @ctypes_function("llama_model_rope_freq_scale_train", [llama_model_p_ctypes], ctypes.c_float) @@ -3375,8 +3383,8 @@ class llama_sampler_i(ctypes.Structure): # struct llama_sampler { -# struct llama_sampler_i * iface; -# llama_sampler_context_t ctx; +# const struct llama_sampler_i * iface; +# llama_sampler_context_t ctx; # }; class llama_sampler(ctypes.Structure): _fields_ = [ @@ -3410,6 +3418,18 @@ class llama_sampler(ctypes.Structure): # // mirror of llama_sampler_i: +# LLAMA_API struct llama_sampler * llama_sampler_init (const struct llama_sampler_i * iface, llama_sampler_context_t ctx); +@ctypes_function( + "llama_sampler_init", + [ctypes.POINTER(llama_sampler_i), llama_sampler_context_t], + llama_sampler_p_ctypes, +) +def llama_sampler_init( + iface: ctypes.POINTER(llama_sampler_i), ctx: llama_sampler_context_t, / +) -> llama_sampler_p: + ... + + # LLAMA_API const char * llama_sampler_name (const struct llama_sampler * smpl); @ctypes_function( "llama_sampler_name", @@ -3627,6 +3647,17 @@ def llama_sampler_init_xtc( ... +# /// @details Top n sigma sampling as described in academic paper "Top-nσ: Not All Logits Are You Need" https://arxiv.org/pdf/2411.07641 +# LLAMA_API struct llama_sampler * llama_sampler_init_top_n_sigma(float n); +@ctypes_function( + "llama_sampler_init_top_n_sigma", + [ctypes.c_float], + llama_sampler_p_ctypes, +) +def llama_sampler_init_top_n_sigma(n: float, /) -> llama_sampler_p: + ... + + # /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. # /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. # /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. @@ -3685,6 +3716,43 @@ def llama_sampler_init_grammar( ... +# /// @details Lazy grammar sampler, introduced in https://github.com/ggml-org/llama.cpp/pull/9639 +# /// @param trigger_patterns A list of patterns that will trigger the grammar sampler. Pattern will be matched from the start of the generation output, and grammar sampler will be fed content starting from its first match group. +# /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. Grammar sampler will be fed content starting from the trigger token included. +# LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy_patterns( +# const struct llama_vocab * vocab, +# const char * grammar_str, +# const char * grammar_root, +# const char ** trigger_patterns, +# size_t num_trigger_patterns, +# const llama_token * trigger_tokens, +# size_t num_trigger_tokens); +@ctypes_function( + "llama_sampler_init_grammar_lazy_patterns", + [ + llama_vocab_p_ctypes, + ctypes.c_char_p, + ctypes.c_char_p, + ctypes.POINTER(ctypes.c_char_p), + ctypes.c_size_t, + ctypes.POINTER(llama_token), + ctypes.c_size_t, + ], + llama_sampler_p_ctypes, +) +def llama_sampler_init_grammar_lazy_patterns( + vocab: llama_vocab_p, + grammar_str: bytes, + grammar_root: bytes, + trigger_patterns: CtypesArray[bytes], + num_trigger_patterns: int, + trigger_tokens: CtypesArray[llama_token], + num_trigger_tokens: int, + /, +) -> llama_sampler_p: + ... + + # /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. # LLAMA_API struct llama_sampler * llama_sampler_init_penalties( # int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) @@ -3737,7 +3805,7 @@ def llama_sampler_init_dry( dry_base: float, dry_allowed_length: int, dry_penalty_last_n: int, - seq_breakers: CtypesArray[bytes], + seq_breakers, num_breakers: int, /, ) -> llama_sampler_p: diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 794fe23f2..2c9f833d1 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 794fe23f29fb40104975c91fe19f23798f7c726e +Subproject commit 2c9f833d17bb5b8ea89dec663b072b5420fc5438 From e232fae1a35c0bdaabf230a5d71780c0178e5d8f Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 12 Mar 2025 04:44:18 -0400 Subject: [PATCH 50/55] feat: Update llama.cpp --- vendor/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 2c9f833d1..7841fc723 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 2c9f833d17bb5b8ea89dec663b072b5420fc5438 +Subproject commit 7841fc723e059d1fd9640e5c0ef19050fcc7c698 From 37eb5f0a4c2a8706b89ead1406b1577c4602cdec Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Wed, 12 Mar 2025 05:30:21 -0400 Subject: [PATCH 51/55] chore: Bump version --- CHANGELOG.md | 4 ++++ llama_cpp/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53365e368..605370e7d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.8] + +- feat: Update llama.cpp to ggerganov/llama.cpp@7841fc723e059d1fd9640e5c0ef19050fcc7c698 + ## [0.3.7] - feat: Update llama.cpp to ggerganov/llama.cpp@794fe23f29fb40104975c91fe19f23798f7c726e diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index fc1fcbcf6..b1a8b9baa 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.7" +__version__ = "0.3.8" From 99f2ebfde18912adeb7f714b49c1ddb624df3087 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 11 Apr 2025 03:28:20 -0400 Subject: [PATCH 52/55] feat: Update llama.cpp --- llama_cpp/llama_cpp.py | 268 ++++++++++++++++++++++++++++++++++++++--- vendor/llama.cpp | 2 +- 2 files changed, 255 insertions(+), 15 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index f3985ad2f..710bd83c8 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -165,6 +165,10 @@ # llama_sampler_p = NewType("llama_sampler_p", int) # llama_sampler_p_ctypes = ctypes.c_void_p +# struct llama_kv_cache; +llama_kv_cache_p = NewType("llama_kv_cache_p", int) +llama_kv_cache_p_ctypes = ctypes.c_void_p + # typedef int32_t llama_pos; llama_pos = ctypes.c_int32 # typedef int32_t llama_token; @@ -228,6 +232,9 @@ # LLAMA_VOCAB_PRE_TYPE_MINERVA = 27, # LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28, # LLAMA_VOCAB_PRE_TYPE_GPT4O = 29, +# LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, +# LLAMA_VOCAB_PRE_TYPE_TRILLION = 31, +# LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32, # }; LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 @@ -259,6 +266,9 @@ LLAMA_VOCAB_PRE_TYPE_MINERVA = 27 LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28 LLAMA_VOCAB_PRE_TYPE_GPT4O = 29 +LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30 +LLAMA_VOCAB_PRE_TYPE_TRILLION = 31 +LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32 # // note: these values should be synchronized with ggml_rope @@ -630,10 +640,19 @@ class llama_model_kv_override(ctypes.Structure): value: Union[int, float, bool, bytes] +# struct llama_model_tensor_buft_override { +# const char * pattern; +# ggml_backend_buffer_type_t buft; +# }; + + # struct llama_model_params { # // NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) # ggml_backend_dev_t * devices; +# // NULL-terminated list of buffer types to use for tensors that match a pattern +# const struct llama_model_tensor_buft_override * tensor_buft_overrides; + # int32_t n_gpu_layers; // number of layers to store in VRAM # enum llama_split_mode split_mode; // how to split the model across multiple GPUs @@ -668,6 +687,8 @@ class llama_model_params(ctypes.Structure): """Parameters for llama_model Attributes: + devices (ctypes.Array[ggml_backend_dev_t]): NULL-terminated list of devices to use for offloading (if NULL, all available devices are used) + tensor_buft_overrides (ctypes.Array[llama_model_tensor_buft_override]): NULL-terminated list of buffer types to use for tensors that match a pattern n_gpu_layers (int): number of layers to store in VRAM split_mode (int): how to split the model across multiple GPUs main_gpu (int): the GPU that is used for the entire model. main_gpu interpretation depends on split_mode: LLAMA_SPLIT_NONE: the GPU that is used for the entire model LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results LLAMA_SPLIT_LAYER: ignored @@ -681,6 +702,8 @@ class llama_model_params(ctypes.Structure): check_tensors (bool): validate model tensor data""" if TYPE_CHECKING: + devices: CtypesArray[ctypes.c_void_p] # NOTE: unused + tensor_buft_overrides: CtypesArray[llama_model_tensor_buft_override] # NOTE: unused n_gpu_layers: int split_mode: int main_gpu: int @@ -695,6 +718,7 @@ class llama_model_params(ctypes.Structure): _fields_ = [ ("devices", ctypes.c_void_p), # NOTE: unnused + ("tensor_buft_overrides", ctypes.c_void_p), # NOTE: unused ("n_gpu_layers", ctypes.c_int32), ("split_mode", ctypes.c_int), ("main_gpu", ctypes.c_int32), @@ -1317,7 +1341,18 @@ def llama_get_model(ctx: llama_context_p, /) -> Optional[llama_model_p]: ... -# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); +# LLAMA_API struct llama_kv_cache * llama_get_kv_self ( struct llama_context * ctx); +@ctypes_function( + "llama_get_kv_self", + [llama_context_p_ctypes], + llama_kv_cache_p_ctypes, +) +def llama_get_kv_self(ctx: llama_context_p, /) -> Optional[llama_kv_cache_p]: + """Get the KV cache for self-attention""" + ... + + +# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); @ctypes_function("llama_pooling_type", [llama_context_p_ctypes], ctypes.c_int) def llama_pooling_type(ctx: llama_context_p, /) -> int: ... @@ -1810,7 +1845,19 @@ def llama_kv_cache_view_update(ctx: llama_context_p, view: CtypesPointerOrRef[ll # // Returns the number of tokens in the KV cache (slow, use only for debug) # // If a KV cell has multiple sequences assigned to it, it will be counted multiple times -# LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); +# LLAMA_API int32_t llama_kv_self_n_tokens(const struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_n_tokens", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_kv_self_n_tokens(ctx: llama_context_p, /) -> int: + """Returns the number of tokens in the KV cache (slow, use only for debug) + If a KV cell has multiple sequences assigned to it, it will be counted multiple times + """ + ... + + +# DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx), +# "use llama_kv_self_n_tokens instead"); @ctypes_function( "llama_get_kv_cache_token_count", [llama_context_p_ctypes], ctypes.c_int32 ) @@ -1822,7 +1869,17 @@ def llama_get_kv_cache_token_count(ctx: llama_context_p, /) -> int: # // Returns the number of used KV cells (i.e. have at least one sequence assigned to them) -# LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx); +# LLAMA_API int32_t llama_kv_self_used_cells(const struct llama_context * ctx); +@ctypes_function( + "llama_kv_self_used_cells", [llama_context_p_ctypes], ctypes.c_int32 +) +def llama_kv_self_used_cells(ctx: llama_context_p, /) -> int: + """Returns the number of used KV cells (i.e. have at least one sequence assigned to them)""" + ... + + +# DEPRECATED(LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx), +# "use llama_kv_self_used_cells instead"); @ctypes_function( "llama_get_kv_cache_used_cells", [llama_context_p_ctypes], ctypes.c_int32 ) @@ -1832,9 +1889,17 @@ def llama_get_kv_cache_used_cells(ctx: llama_context_p, /) -> int: # // Clear the KV cache - both cell info is erased and KV data is zeroed -# LLAMA_API void llama_kv_cache_clear( +# LLAMA_API void llama_kv_self_clear( # struct llama_context * ctx); -@ctypes_function("llama_kv_cache_clear", [llama_context_p_ctypes], None) +@ctypes_function( + "llama_kv_self_clear", [llama_context_p_ctypes], None +) +def llama_kv_self_clear(ctx: llama_context_p, /): + """Clear the KV cache - both cell info is erased and KV data is zeroed""" + ... + +# NOTE: Deprecated +@ctypes_function("llama_kv_self_clear", [llama_context_p_ctypes], None) def llama_kv_cache_clear(ctx: llama_context_p, /): """Clear the KV cache""" ... @@ -1881,14 +1946,41 @@ def llama_kv_cache_seq_rm( # // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence # // p0 < 0 : [0, p1] # // p1 < 0 : [p0, inf) -# LLAMA_API void llama_kv_cache_seq_cp( +# LLAMA_API void llama_kv_self_seq_cp( # struct llama_context * ctx, # llama_seq_id seq_id_src, # llama_seq_id seq_id_dst, # llama_pos p0, # llama_pos p1); @ctypes_function( - "llama_kv_cache_seq_cp", + "llama_kv_self_seq_cp", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_seq_id, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_self_seq_cp( + ctx: llama_context_p, + seq_id_src: Union[llama_seq_id, int], + seq_id_dst: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + /, +): + """Copy all tokens that belong to the specified sequence to another sequence + Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# NOTE: Deprecated +@ctypes_function( + "llama_kv_self_seq_cp", [ llama_context_p_ctypes, llama_seq_id, @@ -1914,17 +2006,27 @@ def llama_kv_cache_seq_cp( # // Removes all tokens that do not belong to the specified sequence -# LLAMA_API void llama_kv_cache_seq_keep( +# LLAMA_API void llama_kv_self_seq_keep( # struct llama_context * ctx, # llama_seq_id seq_id); @ctypes_function( - "llama_kv_cache_seq_keep", [llama_context_p_ctypes, llama_seq_id], None + "llama_kv_self_seq_keep", [llama_context_p_ctypes, llama_seq_id], None +) +def llama_kv_self_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, int], /): + """Removes all tokens that do not belong to the specified sequence""" + ... + + +# NOTE: Deprecated +@ctypes_function( + "llama_kv_self_seq_keep", [llama_context_p_ctypes, llama_seq_id], None ) def llama_kv_cache_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, int], /): """Removes all tokens that do not belong to the specified sequence""" ... + # // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) # // If the KV cache is RoPEd, the KV data is updated accordingly: # // - lazily on next llama_decode() @@ -1938,7 +2040,48 @@ def llama_kv_cache_seq_keep(ctx: llama_context_p, seq_id: Union[llama_seq_id, in # llama_pos p1, # llama_pos delta); @ctypes_function( - "llama_kv_cache_seq_add", + "llama_kv_self_seq_add", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + llama_pos, + ], + None, +) +def llama_kv_self_seq_add( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + delta: Union[llama_pos, int], + /, +): + """Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) + If the KV cache is RoPEd, the KV data is updated accordingly: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update() + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // NOTE: Deprecated +# // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1) +# // If the KV cache is RoPEd, the KV data is updated accordingly: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_cache_update() +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_add( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# llama_pos delta); +@ctypes_function( + "llama_kv_self_seq_add", [ llama_context_p_ctypes, llama_seq_id, @@ -1976,7 +2119,44 @@ def llama_kv_cache_seq_add( # llama_pos p1, # int d); @ctypes_function( - "llama_kv_cache_seq_div", + "llama_kv_self_seq_div", + [ + llama_context_p_ctypes, + llama_seq_id, + llama_pos, + llama_pos, + ctypes.c_int, + ], + None, +) +def llama_kv_self_seq_div( + ctx: llama_context_p, + seq_id: Union[llama_seq_id, int], + p0: Union[llama_pos, int], + p1: Union[llama_pos, int], + d: Union[ctypes.c_int, int], + /, +): + """Integer division of the positions by factor of `d > 1` + If the KV cache is RoPEd, the KV data is updated accordingly + p0 < 0 : [0, p1] + p1 < 0 : [p0, inf)""" + ... + + +# // NOTE: Deprecated +# // Integer division of the positions by factor of `d > 1` +# // If the KV cache is RoPEd, the KV data is updated accordingly +# // p0 < 0 : [0, p1] +# // p1 < 0 : [p0, inf) +# LLAMA_API void llama_kv_cache_seq_div( +# struct llama_context * ctx, +# llama_seq_id seq_id, +# llama_pos p0, +# llama_pos p1, +# int d); +@ctypes_function( + "llama_kv_self_seq_div", [ llama_context_p_ctypes, llama_seq_id, @@ -2001,10 +2181,39 @@ def llama_kv_cache_seq_div( ... +# // Returns the largest position present in the KV cache for the specified sequence +# LLAMA_API llama_pos llama_kv_self_seq_pos_max( +# struct llama_context * ctx, +# llama_seq_id seq_id); +@ctypes_function( + "llama_kv_self_seq_pos_max", [llama_context_p_ctypes, llama_seq_id], llama_pos +) +def llama_kv_self_seq_pos_max( + ctx: llama_context_p, seq_id: Union[llama_seq_id, int], / +) -> int: + """Returns the largest position present in the KV cache for the specified sequence""" + ... + + # // Defragment the KV cache # // This will be applied: # // - lazily on next llama_decode() -# // - explicitly with llama_kv_cache_update() +# // - explicitly with llama_kv_self_update() +# LLAMA_API void llama_kv_self_defrag(struct llama_context * ctx); +@ctypes_function("llama_kv_self_defrag", [llama_context_p_ctypes], None) +def llama_kv_self_defrag(ctx: llama_context_p, /): + """Defragment the KV cache + This will be applied: + - lazily on next llama_decode() + - explicitly with llama_kv_cache_update()""" + ... + + +# NOTE: Deprecated +# // Defragment the KV cache +# // This will be applied: +# // - lazily on next llama_decode() +# // - explicitly with llama_kv_self_update() # LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx); @ctypes_function("llama_kv_cache_defrag", [llama_context_p_ctypes], None) def llama_kv_cache_defrag(ctx: llama_context_p, /): @@ -2017,7 +2226,15 @@ def llama_kv_cache_defrag(ctx: llama_context_p, /): # // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) # LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); -@ctypes_function("llama_kv_cache_update", [llama_context_p_ctypes], None) +@ctypes_function("llama_kv_self_update", [llama_context_p_ctypes], None) +def llama_kv_self_update(ctx: llama_context_p, /): + """Apply the KV cache updates (such as K-shifts, defragmentation, etc.)""" + ... + +# // NOTE: Deprecated +# // Apply the KV cache updates (such as K-shifts, defragmentation, etc.) +# LLAMA_API void llama_kv_cache_update(struct llama_context * ctx); +@ctypes_function("llama_kv_self_update", [llama_context_p_ctypes], None) def llama_kv_cache_update(ctx: llama_context_p, /): """Apply the KV cache updates (such as K-shifts, defragmentation, etc.)""" ... @@ -2025,7 +2242,16 @@ def llama_kv_cache_update(ctx: llama_context_p, /): # // Check if the context supports KV cache shifting # LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); -@ctypes_function("llama_kv_cache_can_shift", [llama_context_p_ctypes], ctypes.c_bool) +@ctypes_function("llama_kv_self_can_shift", [llama_context_p_ctypes], ctypes.c_bool) +def llama_kv_self_can_shift(ctx: llama_context_p, /) -> bool: + """Check if the context supports KV cache shifting""" + ... + + +# // NOTE: Deprecated +# // Check if the context supports KV cache shifting +# LLAMA_API bool llama_kv_cache_can_shift(struct llama_context * ctx); +@ctypes_function("llama_kv_self_can_shift", [llama_context_p_ctypes], ctypes.c_bool) def llama_kv_cache_can_shift(ctx: llama_context_p, /) -> bool: """Check if the context supports KV cache shifting""" ... @@ -2547,6 +2773,16 @@ def llama_set_causal_attn(ctx: llama_context_p, causal_attn: bool, /): ... +# // Set whether the model is in warmup mode or not +# // If true, all model tensors are activated during llama_decode() to load and cache their weights. +# LLAMA_API void llama_set_warmup(struct llama_context * ctx, bool warmup); +@ctypes_function("llama_set_warmup", [llama_context_p_ctypes, ctypes.c_bool], None) +def llama_set_warmup(ctx: llama_context_p, warmup: bool, /): + """Set whether the model is in warmup mode or not + If true, all model tensors are activated during llama_decode() to load and cache their weights.""" + ... + + # // Set abort callback # LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data); @ctypes_function( @@ -3701,6 +3937,10 @@ def llama_sampler_init_mirostat_v2( ... +# /// @details Intializes a GBNF grammar, see grammars/README.md for details. +# /// @param vocab The vocabulary that this grammar will be used with. +# /// @param grammar_str The production rules for the grammar, encoded as a string. Returns an empty grammar if empty. Returns NULL if parsing of grammar_str fails. +# /// @param grammar_root The name of the start symbol for the grammar. # LLAMA_API struct llama_sampler * llama_sampler_init_grammar( # const struct llama_vocab * vocab, # const char * grammar_str, diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 7841fc723..6bf28f011 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 7841fc723e059d1fd9640e5c0ef19050fcc7c698 +Subproject commit 6bf28f0111ff9f21b3c1b1eace20c590281e7ba6 From 4c6514d365cb4f00910424c2feb8e45e68a870a6 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 8 May 2025 06:52:52 -0400 Subject: [PATCH 53/55] feat: Update llama.cpp --- CMakeLists.txt | 2 +- llama_cpp/llama_cpp.py | 33 +++++++++++++++++++++------------ vendor/llama.cpp | 2 +- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 64a0304a1..443d1ee53 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -143,7 +143,7 @@ if (LLAMA_BUILD) endif() # Building llava - add_subdirectory(vendor/llama.cpp/examples/llava) + add_subdirectory(vendor/llama.cpp/tools/mtmd) set_target_properties(llava_shared PROPERTIES OUTPUT_NAME "llava") if (WIN32) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 710bd83c8..63de3a93a 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -235,6 +235,8 @@ # LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30, # LLAMA_VOCAB_PRE_TYPE_TRILLION = 31, # LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32, +# LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33, +# LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34, # }; LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 @@ -252,7 +254,7 @@ LLAMA_VOCAB_PRE_TYPE_DBRX = 13 LLAMA_VOCAB_PRE_TYPE_SMAUG = 14 LLAMA_VOCAB_PRE_TYPE_PORO = 15 -LLAMA_VOCAV_PRE_TYPE_CHATGLM3 = 16 +LLAMA_VOCAB_PRE_TYPE_CHATGLM3 = 16 LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17 LLAMA_VOCAB_PRE_TYPE_VIKING = 18 LLAMA_VOCAB_PRE_TYPE_JAIS = 19 @@ -269,6 +271,8 @@ LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30 LLAMA_VOCAB_PRE_TYPE_TRILLION = 31 LLAMA_VOCAB_PRE_TYPE_BAILINGMOE = 32 +LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33 +LLAMA_VOCAB_PRE_TYPE_PIXTRAL = 34 # // note: these values should be synchronized with ggml_rope @@ -891,17 +895,18 @@ class llama_context_params(ctypes.Structure): # // model quantization parameters # typedef struct llama_model_quantize_params { -# int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() -# enum llama_ftype ftype; // quantize to this llama_ftype -# enum ggml_type output_tensor_type; // output tensor type -# enum ggml_type token_embedding_type; // token embeddings tensor type -# bool allow_requantize; // allow quantizing non-f32/f16 tensors -# bool quantize_output_tensor; // quantize output.weight -# bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored -# bool pure; // quantize all tensors to the default type -# bool keep_split; // quantize to the same number of shards -# void * imatrix; // pointer to importance matrix data -# void * kv_overrides; // pointer to vector containing overrides +# int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() +# enum llama_ftype ftype; // quantize to this llama_ftype +# enum ggml_type output_tensor_type; // output tensor type +# enum ggml_type token_embedding_type; // token embeddings tensor type +# bool allow_requantize; // allow quantizing non-f32/f16 tensors +# bool quantize_output_tensor; // quantize output.weight +# bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored +# bool pure; // quantize all tensors to the default type +# bool keep_split; // quantize to the same number of shards +# void * imatrix; // pointer to importance matrix data +# void * kv_overrides; // pointer to vector containing overrides +# void * tensor_types; // pointer to vector containing tensor types # } llama_model_quantize_params; class llama_model_quantize_params(ctypes.Structure): """Parameters for llama_model_quantize @@ -918,6 +923,7 @@ class llama_model_quantize_params(ctypes.Structure): keep_split (bool): quantize to the same number of shards imatrix (ctypes.c_void_p): pointer to importance matrix data kv_overrides (ctypes.c_void_p): pointer to vector containing overrides + tensor_types (ctypes.c_void_p): pointer to vector containing tensor types """ if TYPE_CHECKING: @@ -932,6 +938,7 @@ class llama_model_quantize_params(ctypes.Structure): keep_split: bool imatrix: ctypes.c_void_p kv_overrides: ctypes.c_void_p + tensor_types: ctypes.c_void_p _fields_ = [ ("nthread", ctypes.c_int32), @@ -945,6 +952,7 @@ class llama_model_quantize_params(ctypes.Structure): ("keep_split", ctypes.c_bool), ("imatrix", ctypes.c_void_p), ("kv_overrides", ctypes.c_void_p), + ("tensor_types", ctypes.c_void_p), ] @@ -3812,6 +3820,7 @@ def llama_sampler_init_softmax() -> llama_sampler_p: # /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 +# /// Setting k <= 0 makes this a noop # LLAMA_API struct llama_sampler * llama_sampler_init_top_k (int32_t k); @ctypes_function("llama_sampler_init_top_k", [ctypes.c_int32], llama_sampler_p_ctypes) def llama_sampler_init_top_k(k: int) -> llama_sampler_p: diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 6bf28f011..8733e0cf6 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 6bf28f0111ff9f21b3c1b1eace20c590281e7ba6 +Subproject commit 8733e0cf6eefc7c7752297cc22d0836706f4222c From cb2edb9c9fef90fe680e528fadc86806c6f385e5 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 8 May 2025 06:59:55 -0400 Subject: [PATCH 54/55] chore: Bump version --- CHANGELOG.md | 4 ++++ llama_cpp/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 605370e7d..affbd5db7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.9] + +- feat: Update llama.cpp to ggerganov/llama.cpp@8733e0cf6eefc7c7752297cc22d0836706f4222c + ## [0.3.8] - feat: Update llama.cpp to ggerganov/llama.cpp@7841fc723e059d1fd9640e5c0ef19050fcc7c698 diff --git a/llama_cpp/__init__.py b/llama_cpp/__init__.py index b1a8b9baa..2c9c527cd 100644 --- a/llama_cpp/__init__.py +++ b/llama_cpp/__init__.py @@ -1,4 +1,4 @@ from .llama_cpp import * from .llama import * -__version__ = "0.3.8" +__version__ = "0.3.9" From b1d23df0bbd327b774083b5cf88e67ca0dd52b92 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 8 May 2025 07:02:20 -0400 Subject: [PATCH 55/55] hotfix: Disable curl support --- CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 443d1ee53..b9178e856 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,6 +62,9 @@ if (LLAMA_BUILD) # Enable building of the common library set(LLAMA_BUILD_COMMON ON CACHE BOOL "Build llama.cpp common library" FORCE) + # Disable building curl support + set(LLAMA_CURL OFF CACHE BOOL "llama.cpp: enable curl" FORCE) + # Architecture detection and settings for Apple platforms if (APPLE) # Get the target architecture