Download raw body.
misc/llama.cpp: update to b7086; devel/libggml update to 20251120
ports@,
I'd like to update devel/libggml to commit
781baf2a14d9e0aaee542b2e1bb918bfc4132199 which is sync with misc/llama.cpp
b7086.
Tested on -current/amd64, no regression on my usecase.
I also run make test on whisper.cpp, works as before update.
check_sym reprots new symbol, so, I've increased minor version of lib.
Ok?
Index: devel/libggml/Makefile
===================================================================
RCS file: /home/cvs/ports/devel/libggml/Makefile,v
diff -u -p -r1.4 Makefile
--- devel/libggml/Makefile 5 Nov 2025 08:59:44 -0000 1.4
+++ devel/libggml/Makefile 21 Nov 2025 22:00:22 -0000
@@ -2,13 +2,12 @@ COMMENT= tensor library for machine lea
GH_ACCOUNT= ggml-org
GH_PROJECT= ggml
-GH_COMMIT= 09aa758381718f7731c148238574a7e169001f13
-REVISION= 0
-DISTNAME= ggml-0.9.4pl20251101
+GH_COMMIT= 781baf2a14d9e0aaee542b2e1bb918bfc4132199
+DISTNAME= ggml-0.9.4pl20251120
PKGNAME= lib${DISTNAME}
-SHARED_LIBS += ggml 3.0
-SHARED_LIBS += ggml-base 3.0
+SHARED_LIBS += ggml 3.1
+SHARED_LIBS += ggml-base 3.1
CATEGORIES= devel
Index: devel/libggml/distinfo
===================================================================
RCS file: /home/cvs/ports/devel/libggml/distinfo,v
diff -u -p -r1.2 distinfo
--- devel/libggml/distinfo 4 Nov 2025 15:05:00 -0000 1.2
+++ devel/libggml/distinfo 21 Nov 2025 21:29:46 -0000
@@ -1,2 +1,2 @@
-SHA256 (ggml-0.9.4pl20251101-09aa7583.tar.gz) = fx+ZI4GhV5KlZlGM3QDWERyj1xXY8t5vh5ELtkwK15Y=
-SIZE (ggml-0.9.4pl20251101-09aa7583.tar.gz) = 2330931
+SHA256 (ggml-0.9.4pl20251120-781baf2a.tar.gz) = qu5WT9HUZ12982U0x4SWNbBDMTameA7DvFXccUbldis=
+SIZE (ggml-0.9.4pl20251120-781baf2a.tar.gz) = 2344462
Index: devel/libggml/patches/patch-src_ggml-cpu_repack_cpp
===================================================================
RCS file: devel/libggml/patches/patch-src_ggml-cpu_repack_cpp
diff -N devel/libggml/patches/patch-src_ggml-cpu_repack_cpp
--- devel/libggml/patches/patch-src_ggml-cpu_repack_cpp 4 Nov 2025 15:05:00 -0000 1.1
+++ /dev/null 1 Jan 1970 00:00:00 -0000
@@ -1,60 +0,0 @@
-Fix garbled output with REPACK at high thread counts
-https://github.com/ggml-org/llama.cpp/commit/1f5accb8d0056e6099cd5b772b1cb787dd590a13
-
-Index: src/ggml-cpu/repack.cpp
---- src/ggml-cpu/repack.cpp.orig
-+++ src/ggml-cpu/repack.cpp
-@@ -1678,10 +1678,24 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int6
- int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled;
- int64_t nchunk = (nr + chunk_size - 1) / chunk_size;
-
-+ // Ensure minimum chunk size to avoid alignment issues with high thread counts
-+ // Minimum chunk size should be at least NB_COLS to prevent overlapping chunks after alignment
-+ const int64_t min_chunk_size = NB_COLS;
-+ if (nchunk > 0 && (nr / nchunk) < min_chunk_size && nr >= min_chunk_size) {
-+ nchunk = (nr + min_chunk_size - 1) / min_chunk_size;
-+ }
-+
- if (nth == 1 || nchunk < nth || disable_chunking) {
- nchunk = nth;
- }
-
-+ // Ensure nchunk doesn't exceed the number of rows divided by minimum chunk size
-+ // This prevents creating too many tiny chunks that could overlap after alignment
-+ const int64_t max_nchunk = (nr + min_chunk_size - 1) / min_chunk_size;
-+ if (nchunk > max_nchunk) {
-+ nchunk = max_nchunk;
-+ }
-+
- if (ith == 0) {
- // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
- ggml_threadpool_chunk_set(params->threadpool, nth);
-@@ -1695,8 +1709,15 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int6
- while (current_chunk < nchunk) {
- int64_t src0_start = (current_chunk * ne01) / nchunk;
- int64_t src0_end = ((current_chunk + 1) * ne01) / nchunk;
-+
-+ // Align boundaries to NB_COLS - round up to ensure all data is included
-+ // The chunk size limiting above ensures chunks are large enough to prevent overlaps
- src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start;
- src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end;
-+ if (src0_end > ne01) {
-+ src0_end = ne01;
-+ }
-+
- if (src0_start >= src0_end) {
- break;
- }
-@@ -1808,8 +1829,12 @@ template <typename BLOC_TYPE, int64_t INTER_SIZE, int6
- int64_t src0_cur_start = (ith * ne01) / nth;
- int64_t src0_cur_end = ((ith + 1) * ne01) / nth;
-
-+ // Align boundaries to NB_COLS - round up to ensure all data is included
- src0_cur_start = (src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start;
- src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end;
-+ if (src0_cur_end > ne01) {
-+ src0_cur_end = ne01;
-+ }
-
- if (src0_cur_start >= src0_cur_end) {
- return;
Index: misc/llama.cpp/Makefile
===================================================================
RCS file: /home/cvs/ports/misc/llama.cpp/Makefile,v
diff -u -p -r1.12 Makefile
--- misc/llama.cpp/Makefile 12 Nov 2025 09:33:37 -0000 1.12
+++ misc/llama.cpp/Makefile 21 Nov 2025 23:31:00 -0000
@@ -2,8 +2,7 @@ COMMENT = LLM inference system
GH_ACCOUNT = ggml-org
GH_PROJECT = llama.cpp
-GH_TAGNAME = b6934
-REVISION = 0
+GH_TAGNAME = b7086
PKGNAME = llama.cpp-0.0.${GH_TAGNAME:S/b//}
SHARED_LIBS += llama 3.0
Index: misc/llama.cpp/distinfo
===================================================================
RCS file: /home/cvs/ports/misc/llama.cpp/distinfo,v
diff -u -p -r1.5 distinfo
--- misc/llama.cpp/distinfo 4 Nov 2025 15:05:41 -0000 1.5
+++ misc/llama.cpp/distinfo 21 Nov 2025 23:31:15 -0000
@@ -1,2 +1,2 @@
-SHA256 (llama.cpp-b6934.tar.gz) = qsr4P+8j/z/nK8k8/Iv1/QTdSFhqIshFYwCI7L5/a7k=
-SIZE (llama.cpp-b6934.tar.gz) = 26417348
+SHA256 (llama.cpp-b7086.tar.gz) = FmxxNbpcxDsrZQtbfUkFl3h1f/CbXcEjWJEKpXEszwA=
+SIZE (llama.cpp-b7086.tar.gz) = 27243237
--
wbr, Kirill
misc/llama.cpp: update to b7086; devel/libggml update to 20251120