diff mbox series

[bug#68455,v2] gnu: llama-cpp: Update to 1873.

Message ID 20240126122110.10991-1-david@pflug.io
State New
Headers show
Series [bug#68455,v2] gnu: llama-cpp: Update to 1873. | expand

Commit Message

David Pflug Jan. 26, 2024, 12:20 p.m. UTC
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.

python-gguf added by #68735

Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
 gnu/packages/machine-learning.scm | 110 +++++++++++++++---------------
 1 file changed, 55 insertions(+), 55 deletions(-)


base-commit: c5453fbfeb0dbd19cb402199fe1e5ad51a051e56
diff mbox series

Patch

diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 0e88f7265b..1d590d1c1b 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -519,63 +519,63 @@  (define-public guile-aiscm-next
   (deprecated-package "guile-aiscm-next" guile-aiscm))
 
 (define-public llama-cpp
-  (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
-        (revision "0"))
-    (package
-      (name "llama-cpp")
-      (version (git-version "0.0.0" revision commit))
-      (source
-       (origin
-         (method git-fetch)
-         (uri (git-reference
-               (url "https://github.com/ggerganov/llama.cpp")
-               (commit (string-append "master-" (string-take commit 7)))))
-         (file-name (git-file-name name version))
-         (sha256
-          (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
-      (build-system cmake-build-system)
-      (arguments
-       (list
-        #:modules '((ice-9 textual-ports)
-                    (guix build utils)
-                    ((guix build python-build-system) #:prefix python:)
-                    (guix build cmake-build-system))
-        #:imported-modules `(,@%cmake-build-system-modules
-                             (guix build python-build-system))
-        #:phases
-        #~(modify-phases %standard-phases
-            (add-before 'install 'install-python-scripts
-              (lambda _
-                (let ((bin (string-append #$output "/bin/")))
-                  (define (make-script script)
-                    (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
-                      (call-with-input-file
-                          (string-append "../source/" script suffix)
-                        (lambda (input)
-                          (call-with-output-file (string-append bin script)
-                            (lambda (output)
-                              (format output "#!~a/bin/python3\n~a"
-                                      #$(this-package-input "python")
-                                      (get-string-all input))))))
-                      (chmod (string-append bin script) #o555)))
-                  (mkdir-p bin)
-                  (make-script "convert-pth-to-ggml")
-                  (make-script "convert-lora-to-ggml")
-                  (make-script "convert"))))
-            (add-after 'install-python-scripts 'wrap-python-scripts
-              (assoc-ref python:%standard-phases 'wrap))
-            (replace 'install
-              (lambda _
-                (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
-      (inputs (list python))
-      (propagated-inputs
-       (list python-numpy python-pytorch python-sentencepiece))
-      (home-page "https://github.com/ggerganov/llama.cpp")
-      (synopsis "Port of Facebook's LLaMA model in C/C++")
-      (description "This package provides a port to Facebook's LLaMA collection
+  (package
+    (name "llama-cpp")
+    (version "1873")
+    (source
+     (origin
+       (method git-fetch)
+       (uri (git-reference
+             (url "https://github.com/ggerganov/llama.cpp")
+             (commit (string-append "b" version))))
+       (file-name (git-file-name name version))
+       (sha256
+        (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+    (build-system cmake-build-system)
+    (arguments
+     (list
+      #:modules '((ice-9 textual-ports)
+                  (guix build utils)
+                  ((guix build python-build-system) #:prefix python:)
+                  (guix build cmake-build-system))
+      #:imported-modules `(,@%cmake-build-system-modules
+                           (guix build python-build-system))
+      #:phases
+      #~(modify-phases %standard-phases
+          (add-before 'install 'install-python-scripts
+            (lambda _
+              (let ((bin (string-append #$output "/bin/")))
+                (define (make-script script)
+                  (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+                    (call-with-input-file
+                        (string-append "../source/" script suffix)
+                      (lambda (input)
+                        (call-with-output-file (string-append bin script)
+                          (lambda (output)
+                            (format output "#!~a/bin/python3\n~a"
+                                    #$(this-package-input "python")
+                                    (get-string-all input))))))
+                    (chmod (string-append bin script) #o555)))
+                (mkdir-p bin)
+                (make-script "convert-hf-to-gguf")
+                (make-script "convert-llama-ggml-to-gguf")
+                (make-script "convert-lora-to-ggml")
+                (make-script "convert-persimmon-to-gguf")
+                (make-script "convert"))))
+          (add-after 'install-python-scripts 'wrap-python-scripts
+            (assoc-ref python:%standard-phases 'wrap))
+          (replace 'install
+            (lambda _
+              (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+    (inputs (list python))
+    (propagated-inputs
+     (list python-numpy python-pytorch python-sentencepiece python-gguf))
+    (home-page "https://github.com/ggerganov/llama.cpp")
+    (synopsis "Port of Facebook's LLaMA model in C/C++")
+    (description "This package provides a port to Facebook's LLaMA collection
 of foundation language models.  It requires models parameters to be downloaded
 independently to be able to run a LLaMA model.")
-      (license license:expat))))
+    (license license:expat)))
 
 (define-public mcl
   (package