Skip to content
Snippets Groups Projects
Commit 7826eb33 authored by Yuri Victorovich's avatar Yuri Victorovich
Browse files

misc/ollama: Fix Vulkan compatibility

parent ed6082e5
No related branches found
No related tags found
No related merge requests found
PORTNAME= ollama
DISTVERSIONPREFIX= v
DISTVERSION= 0.3.4
PORTREVISION= 2
PORTREVISION= 3
CATEGORIES= misc # machine-learning
MAINTAINER= yuri@FreeBSD.org
......@@ -16,6 +16,7 @@ ONLY_FOR_ARCHS_REASON= bundled patched llama-cpp is placed into the arch-specifi
BUILD_DEPENDS= bash:shells/bash \
cmake:devel/cmake-core \
glslc:graphics/shaderc \
vulkan-headers>0:graphics/vulkan-headers
LIB_DEPENDS= libvulkan.so:graphics/vulkan-loader
......@@ -27,12 +28,14 @@ GO_TARGET= .
USE_GITHUB= nodefault
GH_TUPLE= ggerganov:llama.cpp:6eeaeba:llama_cpp/llm/llama.cpp
MAKE_ENV= PATH=${PATH}:${WRKSRC}/llm/build/bsd/x86_64_static/bin # workaround to find vulkan-shaders-gen
PLIST_FILES= bin/${PORTNAME}
post-patch: # workaround for https://github.com/ollama/ollama/issues/6259 (use of extenral libllama.so)
@${REINPLACE_CMD} \
-e '\
s| llama | llama omp |; \
s| llama | llama ${LOCALBASE}/lib/libvulkan.so omp pthread |; \
s| llama | ${WRKSRC}/llm/build/bsd/x86_64_static/src/libllama.a |; \
s| ggml | ${WRKSRC}/llm/build/bsd/x86_64_static/ggml/src/libggml.a |; \
' \
......
......@@ -143,38 +143,38 @@ new file mode 100644
+COMMON_BSD_DEFS="-DCMAKE_SYSTEM_NAME=$(uname -s)"
+CMAKE_TARGETS="--target llama --target ggml"
+
+COMMON_CPU_DEFS="${COMMON_BSD_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH}"
+COMMON_CPU_DEFS="${COMMON_BSD_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DBUILD_SHARED_LIBS=off"
+
+# Static build for linking into the Go binary
+init_vars
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_VULKAN=on -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}_static"
+echo "Building static library"
+build
+
+init_vars
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/cpu"
+echo "Building LCD CPU"
+build
+compress
+
+init_vars
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/cpu_avx"
+echo "Building AVX CPU"
+build
+compress
+
+init_vars
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/cpu_avx2"
+echo "Building AVX2 CPU"
+build
+compress
+
+init_vars
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_VULKAN=on ${CMAKE_DEFS}"
+CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_VULKAN=on ${CMAKE_DEFS}"
+BUILD_DIR="../build/bsd/${ARCH}/vulkan"
+echo "Building Vulkan GPU"
+build
......
--- llm/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt.orig 2024-08-08 21:55:59 UTC
+++ llm/llama.cpp/ggml/src/vulkan-shaders/CMakeLists.txt
@@ -1,5 +1,6 @@ add_executable(${TARGET} vulkan-shaders-gen.cpp)
set(TARGET vulkan-shaders-gen)
add_executable(${TARGET} vulkan-shaders-gen.cpp)
+target_link_libraries(${TARGET} PRIVATE pthread)
install(TARGETS ${TARGET} RUNTIME)
target_compile_features(${TARGET} PRIVATE cxx_std_11)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment