Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

llama-cpp: Add new b4570 version #26477

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions recipes/llama-cpp/all/conandata.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
sources:
"b4570":
AbrilRBS marked this conversation as resolved.
Show resolved Hide resolved
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b4570.tar.gz"
sha256: "35bfe07807fd0cf30710023765b9a7ab6c1003f27ef907ce9cea2c5464411430"
"b4079":
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b4079.tar.gz"
sha256: "79093413dcdbd30f83b800aeb958c87369fdfdaf4e5603b094185898ff404a32"
Expand All @@ -9,6 +12,8 @@ sources:
url: "https://github.com/ggerganov/llama.cpp/archive/refs/tags/b3040.tar.gz"
sha256: "020e040139660eb40113503bb1057d5387677d249b990e008e04821532f7cd62"
patches:
"b4570":
- patch_file: "patches/b4570-001-curl-patch-targets.patch"
"b4079":
- patch_file: "patches/b4079-001-curl-patch-targets.patch"
"b3542":
Expand Down
138 changes: 90 additions & 48 deletions recipes/llama-cpp/all/conanfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
from conan.tools.files import copy, get, rmdir, apply_conandata_patches, export_conandata_patches
from conan.tools.scm import Version


required_conan_version = ">=1.53.0"
required_conan_version = ">=2.0.9"


class LlamaCppConan(ConanFile):
Expand Down Expand Up @@ -37,36 +36,18 @@ class LlamaCppConan(ConanFile):
"with_curl": False,
}

@property
def _min_cppstd(self):
return "11"

@property
def _compilers_minimum_version(self):
return {
"gcc": "8"
}
implements = ["auto_shared_fpic"]

def export_sources(self):
export_conandata_patches(self)
copy(self, "cmake/*", dst=self.export_sources_folder, src=self.recipe_folder)

def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC

def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")

def validate(self):
if self.settings.compiler.cppstd:
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.get_safe("compiler.version")) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires {str(self.settings.compiler)}>={minimum_version}."
)
check_min_cppstd(self, 11)

def validate_build(self):
if Version(self.version) >= "b4570" and self.settings.compiler == "msvc" and "arm" in self.settings.arch:
raise ConanInvalidConfiguration("llama-cpp does not support ARM architecture on msvc, it recommends to use clang instead")

def layout(self):
cmake_layout(self, src_folder="src")
Expand All @@ -83,14 +64,18 @@ def generate(self):
deps.generate()

tc = CMakeToolchain(self)
tc.variables["BUILD_SHARED_LIBS"] = bool(self.options.shared)
tc.variables["LLAMA_STANDALONE"] = False
tc.variables["LLAMA_BUILD_TESTS"] = False
tc.variables["LLAMA_BUILD_EXAMPLES"] = self.options.get_safe("with_examples")
tc.variables["LLAMA_CURL"] = self.options.get_safe("with_curl")
tc.variables["BUILD_SHARED_LIBS"] = bool(self.options.shared)
tc.variables["GGML_CUDA"] = self.options.get_safe("with_cuda")
if hasattr(self, "settings_build") and cross_building(self):
if cross_building(self):
tc.variables["LLAMA_NATIVE"] = False
tc.variables["GGML_NATIVE_DEFAULT"] = False

tc.variables["GGML_BUILD_TESTS"] = False
#tc.variables["GGML_BUILD_EXAMPLES"] = self.options.get_safe("with_examples")
AbrilRBS marked this conversation as resolved.
Show resolved Hide resolved
tc.variables["GGML_CUDA"] = self.options.get_safe("with_cuda")
tc.generate()

def build(self):
Expand All @@ -114,23 +99,80 @@ def package(self):
copy(self, "*common*.a", src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
copy(self, "*.cmake", src=os.path.join(self.export_sources_folder, "cmake"), dst=os.path.join(self.package_folder, "lib", "cmake"))

def package_info(self):
self.cpp_info.components["common"].includedirs = [os.path.join("include", "common")]
self.cpp_info.components["common"].libs = ["common"]
self.cpp_info.components["common"].libdirs = ["lib"]
if self.version >= Version("b3240"):
self.cpp_info.components["common"].libs.append("ggml")

self.cpp_info.components["llama"].libs = ["llama"]
self.cpp_info.components["llama"].resdirs = ["res"]
self.cpp_info.components["llama"].libdirs = ["lib"]

if self.options.with_cuda and not self.options.shared:
self.cpp_info.builddirs.append(os.path.join("lib", "cmake"))
module_path = os.path.join("lib", "cmake", "llama-cpp-cuda-static.cmake")
self.cpp_info.set_property("cmake_build_modules", [module_path])

def _get_default_backends(self):
results = ["cpu"]
if is_apple_os(self):
self.cpp_info.components["common"].frameworks.extend(["Foundation", "Accelerate", "Metal"])
elif self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["common"].system_libs.extend(["dl", "m", "pthread", "gomp"])
results.append("blas")
results.append("metal")
return results

def package_info(self):
# New structure in llama
if Version(self.version) >= "b4570":
self.cpp_info.components["common"].includedirs = [os.path.join("include", "common")]
self.cpp_info.components["common"].libs = ["common"]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can move this out from the if Version and use for all versions

self.cpp_info.components["common"].requires = ["llama"]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this true? I'm not sure that llama requires common, this requires was not defined for previous versions

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads) with TARGET being common here too

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems like it was missing for the old versions too, as this same line is present in the previous version too

if self.options.with_curl:
self.cpp_info.components["common"].requires.append("libcurl::libcurl")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

shouldn't this be defined for both versions?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, because if no requires is explicitly defined, all components get all the library requirements. I can be more explicity to avoid overlinking in old versions

self.cpp_info.components["common"].defines.append("LLAMA_USE_CURL")
Copy link
Contributor

@czoido czoido Jan 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we don't need to define this in the package_info?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Upstream's CMakeLists contains target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_CURL) with TARGET being common

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

And is this new from this version or should we add it to the old versions too?

if is_apple_os(self):
self.cpp_info.components["common"].frameworks.extend(["Foundation", "Accelerate", "Metal"])
elif self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["common"].system_libs.extend(["dl", "m", "pthread", "gomp"])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this code is common for all versions?


self.cpp_info.components["llama"].libs = ["llama"]
self.cpp_info.components["llama"].resdirs = ["res"]
self.cpp_info.components["llama"].requires = ["ggml", "ggml-base"]

self.cpp_info.components["ggml-base"].libs = ["ggml-base"]
self.cpp_info.components["ggml-base"].resdirs = ["res"]
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["ggml-base"].system_libs.extend(["dl", "m", "pthread"])

self.cpp_info.components["ggml"].libs = ["ggml"]
self.cpp_info.components["ggml"].resdirs = ["res"]
self.cpp_info.components["ggml"].requires = ["ggml-base"]
if self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["ggml"].system_libs.append("dl")

if self.options.shared:
self.cpp_info.components["llama"].defines.append("LLAMA_SHARED")
self.cpp_info.components["ggml-base"].defines.append("GGML_SHARED")
self.cpp_info.components["ggml"].defines.append("GGML_SHARED")

backends = self._get_default_backends()
for backend in backends:
self.cpp_info.components[f"ggml-{backend}"].libs = [f"ggml-{backend}"]
self.cpp_info.components[f"ggml-{backend}"].resdirs = ["res"]
if self.options.shared:
self.cpp_info.components[f"ggml-{backend}"].defines.append("GGML_BACKEND_SHARED")
self.cpp_info.components["ggml"].defines.append(f"GGML_USE_{backend.upper()}")
self.cpp_info.components["ggml"].requires.append(f"ggml-{backend}")

if is_apple_os(self):
self.cpp_info.components["ggml-blas"].frameworks.append("Accelerate")

if self.options.with_cuda and not self.options.shared:
self.cpp_info.builddirs.append(os.path.join("lib", "cmake"))
module_path = os.path.join("lib", "cmake", "llama-cpp-cuda-static.cmake")
self.cpp_info.set_property("cmake_build_modules", [module_path])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this can also be shared between both versions

else:
self.cpp_info.components["common"].includedirs = [os.path.join("include", "common")]
self.cpp_info.components["common"].libs = ["common"]
self.cpp_info.components["common"].libdirs = ["lib"]
if self.version >= Version("b3240"):
self.cpp_info.components["common"].libs.append("ggml")

self.cpp_info.components["llama"].libs = ["llama"]
self.cpp_info.components["llama"].resdirs = ["res"]
self.cpp_info.components["llama"].libdirs = ["lib"]

if self.options.with_cuda and not self.options.shared:
self.cpp_info.builddirs.append(os.path.join("lib", "cmake"))
module_path = os.path.join("lib", "cmake", "llama-cpp-cuda-static.cmake")
self.cpp_info.set_property("cmake_build_modules", [module_path])

if is_apple_os(self):
self.cpp_info.components["common"].frameworks.extend(["Foundation", "Accelerate", "Metal"])
elif self.settings.os in ("Linux", "FreeBSD"):
self.cpp_info.components["common"].system_libs.extend(["dl", "m", "pthread", "gomp"])
15 changes: 15 additions & 0 deletions recipes/llama-cpp/all/patches/b4570-001-curl-patch-targets.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt
index 24b7f87..e332bec 100644
--- a/common/CMakeLists.txt
+++ b/common/CMakeLists.txt
@@ -84,9 +84,7 @@ set(LLAMA_COMMON_EXTRA_LIBS build_info)
if (LLAMA_CURL)
find_package(CURL REQUIRED)
target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_CURL)
- include_directories(${CURL_INCLUDE_DIRS})
- find_library(CURL_LIBRARY curl REQUIRED)
- set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARY})
+ list(APPEND LLAMA_COMMON_EXTRA_LIBS CURL::libcurl)
endif ()

target_include_directories(${TARGET} PUBLIC .)