Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

llama-cpp: Add new b4570 version #26477

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions recipes/llama-cpp/all/conandata.yml
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
sources:
"b4570":
AbrilRBS marked this conversation as resolved.
Show resolved Hide resolved
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b4570.tar.gz"
sha256: "35bfe07807fd0cf30710023765b9a7ab6c1003f27ef907ce9cea2c5464411430"
"b4079":
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b4079.tar.gz"
sha256: "79093413dcdbd30f83b800aeb958c87369fdfdaf4e5603b094185898ff404a32"
"b3542":
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3542.tar.gz"
sha256: "6f8b23d930400fce5708d2c85022ef33f1083af8f6ac395abefadacee0942e78"
"b3040":
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3040.tar.gz"
sha256: "020e040139660eb40113503bb1057d5387677d249b990e008e04821532f7cd62"
patches:
"b4570":
- patch_file: "patches/b4570-001-curl-patch-targets.patch"
"b4079":
- patch_file: "patches/b4079-001-curl-patch-targets.patch"
"b3542":
- patch_file: "patches/b3542-001-curl-patch-targets.patch"
"b3040":
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This version had a slightly different logic, because did not have the ggml component so it the recipe was a bit cleaner removing this, and it's also quiet outdated

- patch_file: "patches/b3040-001-curl-patch-targets.patch"
121 changes: 84 additions & 37 deletions recipes/llama-cpp/all/conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
from conan.tools.files import copy, get, rmdir, apply_conandata_patches, export_conandata_patches
from conan.tools.scm import Version


required_conan_version = ">=1.53.0"
required_conan_version = ">=2.0.9"


class LlamaCppConan(ConanFile):
Expand Down Expand Up @@ -37,36 +36,23 @@ class LlamaCppConan(ConanFile):
"with_curl": False,
}

@property
def _min_cppstd(self):
return "11"
implements = ["auto_shared_fpic"]

@property
def _compilers_minimum_version(self):
return {
"gcc": "8"
}
def _is_new_llama(self):
# Structure of llama.cpp libraries was changed after b4079
return Version(self.version) >= "b4570"

def export_sources(self):
export_conandata_patches(self)
copy(self, "cmake/*", dst=self.export_sources_folder, src=self.recipe_folder)

def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC

def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")

def validate(self):
if self.settings.compiler.cppstd:
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.get_safe("compiler.version")) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires {str(self.settings.compiler)}>={minimum_version}."
)
check_min_cppstd(self, 17 if self._is_new_llama else 11)

def validate_build(self):
if Version(self.version) >= "b4570" and self.settings.compiler == "msvc" and "arm" in self.settings.arch:
raise ConanInvalidConfiguration("llama-cpp does not support ARM architecture on msvc, it recommends to use clang instead")

def layout(self):
cmake_layout(self, src_folder="src")
Expand All @@ -83,14 +69,20 @@ def generate(self):
deps.generate()

tc = CMakeToolchain(self)
tc.variables["BUILD_SHARED_LIBS"] = bool(self.options.shared)
tc.variables["LLAMA_STANDALONE"] = False
tc.variables["LLAMA_BUILD_TESTS"] = False
tc.variables["LLAMA_BUILD_EXAMPLES"] = self.options.get_safe("with_examples")
tc.variables["LLAMA_CURL"] = self.options.get_safe("with_curl")
tc.variables["BUILD_SHARED_LIBS"] = bool(self.options.shared)
tc.variables["GGML_CUDA"] = self.options.get_safe("with_cuda")
if hasattr(self, "settings_build") and cross_building(self):
if cross_building(self):
tc.variables["LLAMA_NATIVE"] = False
tc.variables["GGML_NATIVE_DEFAULT"] = False

tc.variables["GGML_BUILD_TESTS"] = False
# Follow with_examples when newer versions can compile examples,
# right now it tries to add_subdirectory to a non-existent folder
tc.variables["GGML_BUILD_EXAMPLES"] = False
tc.variables["GGML_CUDA"] = self.options.get_safe("with_cuda")
tc.generate()

def build(self):
Expand All @@ -114,23 +106,78 @@ def package(self):
copy(self, "*common*.a", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
copy(self, "*.cmake", src=os.path.join(self.export_sources_folder, "cmake"), dst=os.path.join(self.package_folder, "lib", "cmake"))

def _get_backends(self):
results = ["cpu"]
if is_apple_os(self):
results.append("blas")
results.append("metal")
if self.options.with_cuda:
results.append("cuda")
return results

def package_info(self):
self.cpp_info.components["common"].includedirs = [os.path.join("include", "common")]
self.cpp_info.components["common"].libs = ["common"]
self.cpp_info.components["common"].libdirs = ["lib"]
if self.version >= Version("b3240"):
self.cpp_info.components["common"].libs.append("ggml")
self.cpp_info.components["ggml"].libs = ["ggml"]
self.cpp_info.components["ggml"].resdirs = ["res"]
self.cpp_info.components["ggml"].set_property("cmake_target_name", "ggml::all")
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["ggml"].system_libs.append("dl")

self.cpp_info.components["llama"].libs = ["llama"]
self.cpp_info.components["llama"].resdirs = ["res"]
self.cpp_info.components["llama"].libdirs = ["lib"]
self.cpp_info.components["llama"].requires.append("ggml")
self.cpp_info.components["llama"].set_property("cmake_target_name", "llama")
self.cpp_info.components["llama"].set_property("cmake_target_aliases", ["llama-cpp::llama"])

if self.options.with_cuda and not self.options.shared:
self.cpp_info.builddirs.append(os.path.join("lib", "cmake"))
module_path = os.path.join("lib", "cmake", "llama-cpp-cuda-static.cmake")
self.cpp_info.set_property("cmake_build_modules", [module_path])
self.cpp_info.components["common"].includedirs = [os.path.join("include", "common")]
self.cpp_info.components["common"].libs = ["common"]
self.cpp_info.components["common"].requires = ["llama"]
self.cpp_info.components["common"].set_property("cmake_target_name", "common")
self.cpp_info.components["common"].set_property("cmake_target_aliases", ["llama-cpp::common"])

if self.options.with_curl:
self.cpp_info.components["common"].requires.append("libcurl::libcurl")
self.cpp_info.components["common"].defines.append("LLAMA_USE_CURL")

if is_apple_os(self):
self.cpp_info.components["common"].frameworks.extend(["Foundation", "Accelerate", "Metal"])
elif self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["common"].system_libs.extend(["dl", "m", "pthread", "gomp"])

if self.options.with_cuda and not self.options.shared:
self.cpp_info.builddirs.append(os.path.join("lib", "cmake"))
module_path = os.path.join("lib", "cmake", "llama-cpp-cuda-static.cmake")
self.cpp_info.set_property("cmake_build_modules", [module_path])

if self._is_new_llama:
self.cpp_info.components["ggml-base"].libs = ["ggml-base"]
self.cpp_info.components["ggml-base"].resdirs = ["res"]
self.cpp_info.components["ggml-base"].set_property("cmake_target_name", "ggml-base")

self.cpp_info.components["ggml"].requires = ["ggml-base"]
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["ggml-base"].system_libs.extend(["dl", "m", "pthread"])


if self.options.shared:
self.cpp_info.components["llama"].defines.append("LLAMA_SHARED")
self.cpp_info.components["ggml-base"].defines.append("GGML_SHARED")
self.cpp_info.components["ggml"].defines.append("GGML_SHARED")

backends = self._get_backends()
for backend in backends:
self.cpp_info.components[f"ggml-{backend}"].libs = [f"ggml-{backend}"]
self.cpp_info.components[f"ggml-{backend}"].resdirs = ["res"]
self.cpp_info.components[f"ggml-{backend}"].set_property("cmake_target_name", f"ggml-{backend}")
if self.options.shared:
self.cpp_info.components[f"ggml-{backend}"].defines.append("GGML_BACKEND_SHARED")
self.cpp_info.components["ggml"].defines.append(f"GGML_USE_{backend.upper()}")
self.cpp_info.components["ggml"].requires.append(f"ggml-{backend}")

if is_apple_os(self):
if "blas" in backends:
self.cpp_info.components["ggml-blas"].frameworks.append("Accelerate")
if "metal" in backends:
self.cpp_info.components["ggml-metal"].frameworks.extend(["Metal", "MetalKit", "Foundation", "CoreFoundation"])
if "cuda" in backends:
# TODO: Add CUDA information
pass
15 changes: 15 additions & 0 deletions recipes/llama-cpp/all/patches/b4570-001-curl-patch-targets.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt
index 24b7f87..e332bec 100644
--- a/common/CMakeLists.txt
+++ b/common/CMakeLists.txt
@@ -84,9 +84,7 @@ set(LLAMA_COMMON_EXTRA_LIBS build_info)
if (LLAMA_CURL)
find_package(CURL REQUIRED)
target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_CURL)
- include_directories(${CURL_INCLUDE_DIRS})
- find_library(CURL_LIBRARY curl REQUIRED)
- set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARY})
+ list(APPEND LLAMA_COMMON_EXTRA_LIBS CURL::libcurl)
endif ()

target_include_directories(${TARGET} PUBLIC .)
2 changes: 1 addition & 1 deletion recipes/llama-cpp/all/test_package/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,5 +5,5 @@ project(test_package CXX)
find_package(llama-cpp REQUIRED CONFIG)

add_executable(${PROJECT_NAME} test_package.cpp)
target_link_libraries(${PROJECT_NAME} PRIVATE llama-cpp::llama llama-cpp::common)
target_link_libraries(${PROJECT_NAME} PRIVATE llama common)
set_property(TARGET ${PROJECT_NAME} PROPERTY CXX_STANDARD 11)
4 changes: 2 additions & 2 deletions recipes/llama-cpp/config.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
versions:
"b4570":
folder: "all"
"b4079":
folder: "all"
"b3542":
folder: "all"
"b3040":
folder: "all"