diff mbox series

[bug#68455] gnu: llama-cpp: Update to 1873.

Message ID 20240114203255.26500-1-david@pflug.io
State New
Headers show
Series [bug#68455] gnu: llama-cpp: Update to 1873. | expand

Commit Message

David Pflug Jan. 14, 2024, 8:32 p.m. UTC
* gnu/packages/machine-learning.scm (llama-cpp): Update to 1873.

Change-Id: I091cd20192743c87b497ea3c5fd18a75ada75d9d
---
 gnu/packages/machine-learning.scm | 133 ++++++++++++++++++------------
 1 file changed, 78 insertions(+), 55 deletions(-)


base-commit: 18393fcdddf5c3d834fa89ebf5f3925fc5b166ed

Comments

Mathieu Othacehe Jan. 17, 2024, 5:29 p.m. UTC | #1
Hello David,

> +(define-public python-gguf
> +  (package
> +    (name "python-gguf")
> +    (version "0.6.0")
> +    (source
> +     (origin
> +       (method url-fetch)
> +       (uri (pypi-uri "gguf" version))
> +       (sha256
> +        (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
> +    (build-system pyproject-build-system)
> +    (arguments
> +      `(#:phases
> +        (modify-phases %standard-phases
> +                       (delete 'check))))
> +    (inputs (list poetry python-pytest))
> +    (propagated-inputs (list python-numpy))
> +    (home-page "https://ggml.ai")
> +    (synopsis "Read and write ML models in GGUF for GGML")
> +    (description "Read and write ML models in GGUF for GGML")
> +    (license license:expat)))

This should be part of a separate patch. Can you send a v2?

Thanks,

Mathieu
diff mbox series

Patch

diff --git a/gnu/packages/machine-learning.scm b/gnu/packages/machine-learning.scm
index 1616738399..0cdfe7bb08 100644
--- a/gnu/packages/machine-learning.scm
+++ b/gnu/packages/machine-learning.scm
@@ -22,6 +22,7 @@ 
 ;;; Copyright © 2023 Navid Afkhami <navid.afkhami@mdc-berlin.de>
 ;;; Copyright © 2023 Zheng Junjie <873216071@qq.com>
 ;;; Copyright © 2023 Troy Figiel <troy@troyfigiel.com>
+;;; Copyright © 2023 David Pflug <david@pflug.io>
 ;;;
 ;;; This file is part of GNU Guix.
 ;;;
@@ -517,63 +518,63 @@  (define-public guile-aiscm-next
   (deprecated-package "guile-aiscm-next" guile-aiscm))
 
 (define-public llama-cpp
-  (let ((commit "f31b5397143009d682db90fd2a6cde83f1ef00eb")
-        (revision "0"))
-    (package
-      (name "llama-cpp")
-      (version (git-version "0.0.0" revision commit))
-      (source
-       (origin
-         (method git-fetch)
-         (uri (git-reference
-               (url "https://github.com/ggerganov/llama.cpp")
-               (commit (string-append "master-" (string-take commit 7)))))
-         (file-name (git-file-name name version))
-         (sha256
-          (base32 "0ys6n53n032zq1ll9f3vgxk8sw0qq7x3fi7awsyy13adzp3hn08p"))))
-      (build-system cmake-build-system)
-      (arguments
-       (list
-        #:modules '((ice-9 textual-ports)
-                    (guix build utils)
-                    ((guix build python-build-system) #:prefix python:)
-                    (guix build cmake-build-system))
-        #:imported-modules `(,@%cmake-build-system-modules
-                             (guix build python-build-system))
-        #:phases
-        #~(modify-phases %standard-phases
-            (add-before 'install 'install-python-scripts
-              (lambda _
-                (let ((bin (string-append #$output "/bin/")))
-                  (define (make-script script)
-                    (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
-                      (call-with-input-file
-                          (string-append "../source/" script suffix)
-                        (lambda (input)
-                          (call-with-output-file (string-append bin script)
-                            (lambda (output)
-                              (format output "#!~a/bin/python3\n~a"
-                                      #$(this-package-input "python")
-                                      (get-string-all input))))))
-                      (chmod (string-append bin script) #o555)))
-                  (mkdir-p bin)
-                  (make-script "convert-pth-to-ggml")
-                  (make-script "convert-lora-to-ggml")
-                  (make-script "convert"))))
-            (add-after 'install-python-scripts 'wrap-python-scripts
-              (assoc-ref python:%standard-phases 'wrap))
-            (replace 'install
-              (lambda _
-                (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
-      (inputs (list python))
-      (propagated-inputs
-       (list python-numpy python-pytorch python-sentencepiece))
-      (home-page "https://github.com/ggerganov/llama.cpp")
-      (synopsis "Port of Facebook's LLaMA model in C/C++")
-      (description "This package provides a port to Facebook's LLaMA collection
+  (package
+    (name "llama-cpp")
+    (version "1873")
+    (source
+     (origin
+       (method git-fetch)
+       (uri (git-reference
+             (url "https://github.com/ggerganov/llama.cpp")
+             (commit (string-append "b" version))))
+       (file-name (git-file-name name version))
+       (sha256
+        (base32 "11may9gkafg5bfma5incijvkypjgx9778gmygxp3x2dz1140809d"))))
+    (build-system cmake-build-system)
+    (arguments
+     (list
+      #:modules '((ice-9 textual-ports)
+                  (guix build utils)
+                  ((guix build python-build-system) #:prefix python:)
+                  (guix build cmake-build-system))
+      #:imported-modules `(,@%cmake-build-system-modules
+                           (guix build python-build-system))
+      #:phases
+      #~(modify-phases %standard-phases
+          (add-before 'install 'install-python-scripts
+            (lambda _
+              (let ((bin (string-append #$output "/bin/")))
+                (define (make-script script)
+                  (let ((suffix (if (string-suffix? ".py" script) "" ".py")))
+                    (call-with-input-file
+                        (string-append "../source/" script suffix)
+                      (lambda (input)
+                        (call-with-output-file (string-append bin script)
+                          (lambda (output)
+                            (format output "#!~a/bin/python3\n~a"
+                                    #$(this-package-input "python")
+                                    (get-string-all input))))))
+                    (chmod (string-append bin script) #o555)))
+                (mkdir-p bin)
+                (make-script "convert-hf-to-gguf")
+                (make-script "convert-llama-ggml-to-gguf")
+                (make-script "convert-lora-to-ggml")
+                (make-script "convert-persimmon-to-gguf")
+                (make-script "convert"))))
+          (add-after 'install-python-scripts 'wrap-python-scripts
+            (assoc-ref python:%standard-phases 'wrap))
+          (replace 'install
+            (lambda _
+              (copy-file "bin/main" (string-append #$output "/bin/llama")))))))
+    (inputs (list python))
+    (propagated-inputs
+     (list python-numpy python-pytorch python-sentencepiece python-gguf))
+    (home-page "https://github.com/ggerganov/llama.cpp")
+    (synopsis "Port of Facebook's LLaMA model in C/C++")
+    (description "This package provides a port to Facebook's LLaMA collection
 of foundation language models.  It requires models parameters to be downloaded
 independently to be able to run a LLaMA model.")
-      (license license:expat))))
+    (license license:expat)))
 
 (define-public mcl
   (package
@@ -5257,3 +5258,25 @@  (define-public oneapi-dnnl
      "OneAPI Deep Neural Network Library (oneDNN) is a cross-platform
 performance library of basic building blocks for deep learning applications.")
     (license license:asl2.0)))
+
+(define-public python-gguf
+  (package
+    (name "python-gguf")
+    (version "0.6.0")
+    (source
+     (origin
+       (method url-fetch)
+       (uri (pypi-uri "gguf" version))
+       (sha256
+        (base32 "0rbyc2h3kpqnrvbyjvv8a69l577jv55a31l12jnw21m1lamjxqmj"))))
+    (build-system pyproject-build-system)
+    (arguments
+      `(#:phases
+        (modify-phases %standard-phases
+                       (delete 'check))))
+    (inputs (list poetry python-pytest))
+    (propagated-inputs (list python-numpy))
+    (home-page "https://ggml.ai")
+    (synopsis "Read and write ML models in GGUF for GGML")
+    (description "Read and write ML models in GGUF for GGML")
+    (license license:expat)))