[bug#76999,2/2] gnu: llama-cpp: Update to 0.0.0-b4882.
Commit Message
* gnu/packages/machine-learning.scm (llama-cpp): Update to 0.0.0-b4882.
[inputs]: Add curl, glslang, and python-gguf-llama-cpp.
[native-inputs]: bash -> bash-minimal.
[source, homepage]: Update URL.
[python-scripts]: Check that we can run them.
[fix-tests]: Fix an additional test.
* gnu/packages/patches/llama-cpp-vulkan-optional.patch: Delete.
* gnu/local.mk: Unregister patch.
Change-Id: Ic297534cd142cb83e3964eae21b4eb807b74e9bc
---
gnu/local.mk | 1 -
gnu/packages/machine-learning.scm | 41 +++++++++++--------
.../patches/llama-cpp-vulkan-optional.patch | 38 -----------------
3 files changed, 25 insertions(+), 55 deletions(-)
delete mode 100644 gnu/packages/patches/llama-cpp-vulkan-optional.patch
@@ -1841,7 +1841,6 @@ dist_patch_DATA = \
%D%/packages/patches/mcrypt-CVE-2012-4527.patch \
%D%/packages/patches/libmemcached-build-with-gcc7.patch \
%D%/packages/patches/libmhash-hmac-fix-uaf.patch \
- %D%/packages/patches/llama-cpp-vulkan-optional.patch \
%D%/packages/patches/llhttp-ponyfill-object-fromentries.patch \
%D%/packages/patches/lvm2-no-systemd.patch \
%D%/packages/patches/maturin-no-cross-compile.patch \
@@ -77,6 +77,7 @@ (define-module (gnu packages machine-learning)
#:use-module (gnu packages cmake)
#:use-module (gnu packages cpp)
#:use-module (gnu packages cran)
+ #:use-module (gnu packages curl)
#:use-module (gnu packages databases)
#:use-module (gnu packages dejagnu)
#:use-module (gnu packages documentation)
@@ -585,7 +586,7 @@ (define-public guile-aiscm-next
(deprecated-package "guile-aiscm-next" guile-aiscm))
(define-public llama-cpp
- (let ((tag "b4549"))
+ (let ((tag "b4882"))
(package
(name "llama-cpp")
(version (string-append "0.0.0-" tag))
@@ -593,19 +594,19 @@ (define-public llama-cpp
(origin
(method git-fetch)
(uri (git-reference
- (url "https://github.com/ggerganov/llama.cpp")
+ (url "https://github.com/ggml-org/llama.cpp")
(commit tag)))
(file-name (git-file-name name tag))
(sha256
- (base32 "1xf2579q0r8nv06kj8padi6w9cv30w58vdys65nq8yzm3dy452a1"))
- (patches
- (search-patches "llama-cpp-vulkan-optional.patch"))))
+ (base32 "1mhh4293lgvyvyq58hpphqk18n5g2zadafpdf9icf7xlj0cf7bqc"))))
(build-system cmake-build-system)
(arguments
(list
#:configure-flags
- #~(list "-DBUILD_SHARED_LIBS=ON"
+ #~(list #$(string-append "-DGGML_BUILD_NUMBER=" tag)
+ "-DBUILD_SHARED_LIBS=ON"
"-DGGML_VULKAN=ON"
+ "-DLLAMA_CURL=ON"
"-DGGML_BLAS=ON"
"-DGGML_BLAS_VENDOR=OpenBLAS"
(string-append "-DBLAS_INCLUDE_DIRS="
@@ -635,13 +636,16 @@ (define-public llama-cpp
(substitute* "ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp"
(("\"/bin/sh\"")
(string-append "\"" (search-input-file inputs "/bin/sh") "\"")))))
- (add-after 'unpack 'disable-unrunable-tests
+ (add-after 'unpack 'fix-tests
(lambda _
;; test-eval-callback downloads ML model from network, cannot
;; run in Guix build environment
(substitute* '("examples/eval-callback/CMakeLists.txt")
(("COMMAND llama-eval-callback")
- "COMMAND true llama-eval-callback"))))
+ "COMMAND true llama-eval-callback"))
+ ;; Help it find the test files it needs
+ (substitute* "tests/test-chat.cpp"
+ (("\"\\.\\./\"") "\"../source/\""))))
(add-before 'install 'install-python-scripts
(lambda _
(let ((bin (string-append #$output "/bin/")))
@@ -657,23 +661,28 @@ (define-public llama-cpp
(get-string-all input))))))
(chmod (string-append bin script) #o555)))
(mkdir-p bin)
- (make-script "convert_hf_to_gguf")
- (make-script "convert_llama_ggml_to_gguf")
- (make-script "convert_hf_to_gguf_update.py"))))
- (add-after 'install-python-scripts 'wrap-python-scripts
- (assoc-ref python:%standard-phases 'wrap))
+ (for-each
+ (lambda (file)
+ (make-script file)
+ ;; Run script as a sanity check
+ (invoke (string-append bin file) "-h"))
+ '(;; involves adding python-transformers package which looks involved.
+ ;; "convert_hf_to_gguf_update.py"
+ "convert_hf_to_gguf"
+ "convert_llama_ggml_to_gguf")))))
(add-after 'install 'remove-tests
(lambda* (#:key outputs #:allow-other-keys)
(for-each delete-file (find-files
(string-append (assoc-ref outputs "out")
"/bin")
"^test-")))))))
- (inputs (list python vulkan-headers vulkan-loader))
- (native-inputs (list pkg-config shaderc bash))
+ (inputs (list curl glslang python python-gguf-llama-cpp
+ vulkan-headers vulkan-loader))
+ (native-inputs (list pkg-config shaderc bash-minimal))
(propagated-inputs
(list python-numpy python-pytorch python-sentencepiece openblas))
(properties '((tunable? . #true))) ;use AVX512, FMA, etc. when available
- (home-page "https://github.com/ggerganov/llama.cpp")
+ (home-page "https://github.com/ggml-org/llama.cpp")
(synopsis "Port of Facebook's LLaMA model in C/C++")
(description "This package provides a port to Facebook's LLaMA collection
of foundation language models. It requires models parameters to be downloaded
deleted file mode 100644
@@ -1,38 +0,0 @@
-Author: Danny Milosavljevic <dannym@friendly-machines.com>
-Date: 2025-01-29
-License: Expat
-Subject: Make Vulkan optional
-
-See also: <https://github.com/ggerganov/llama.cpp/pull/11494>
-
-diff -ru orig/llama.cpp/ggml/include/ggml-vulkan.h llama.cpp/ggml/include/ggml-vulkan.h
---- orig/llama.cpp/ggml/include/ggml-vulkan.h 2025-01-29 10:24:10.894476682 +0100
-+++ llama.cpp/ggml/include/ggml-vulkan.h 2025-02-07 18:28:34.509509638 +0100
-@@ -10,8 +10,6 @@
- #define GGML_VK_NAME "Vulkan"
- #define GGML_VK_MAX_DEVICES 16
-
--GGML_BACKEND_API void ggml_vk_instance_init(void);
--
- // backend API
- GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num);
-
-diff -ru orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp
---- orig/llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 10:24:10.922476480 +0100
-+++ llama.cpp/ggml/src/ggml-vulkan/ggml-vulkan.cpp 2025-01-29 22:33:19.955087552 +0100
-@@ -8174,8 +8174,13 @@
- /* .iface = */ ggml_backend_vk_reg_i,
- /* .context = */ nullptr,
- };
--
-- return ®
-+ try {
-+ ggml_vk_instance_init();
-+ return ®
-+ } catch (const vk::SystemError& e) {
-+ VK_LOG_DEBUG("ggml_vk_get_device_count() -> Error: System error: " << e.what());
-+ return nullptr;
-+ }
- }
-
- // Extension availability