diff --git a/common/arg.cpp b/common/arg.cpp index 0d0daa3610105..74559d28a4474 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -891,51 +891,62 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context }; for (int i = 1; i < argc; i++) { - const std::string arg_prefix = "--"; + const std::string arg_prefix = "--"; - std::string arg = argv[i]; - if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) { - std::replace(arg.begin(), arg.end(), '_', '-'); - } - if (arg_to_options.find(arg) == arg_to_options.end()) { - throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str())); - } - auto opt = *arg_to_options[arg]; - if (opt.has_value_from_env()) { - fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str()); + std::string arg = argv[i]; + if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) { + std::replace(arg.begin(), arg.end(), '_', '-'); + } + + // Skip --parse-layer and its value(s) + if (arg == "--parse-layer") { + // Assuming --parse-layer takes exactly 1 argument + if (i + 1 < argc) { + i++; // skip the next value as well } - try { - if (opt.handler_void) { - opt.handler_void(params); - continue; - } + continue; + } - // arg with single value - check_arg(i); - std::string val = argv[++i]; - if (opt.handler_int) { - opt.handler_int(params, std::stoi(val)); - continue; - } - if (opt.handler_string) { - opt.handler_string(params, val); - continue; - } + if (arg_to_options.find(arg) == arg_to_options.end()) { + throw std::invalid_argument(string_format("error: invalid argument: %s", arg.c_str())); + } - // arg with 2 values - check_arg(i); - std::string val2 = argv[++i]; - if (opt.handler_str_str) { - opt.handler_str_str(params, val, val2); - continue; - } - } catch (std::exception & e) { - throw std::invalid_argument(string_format( - "error while handling argument \"%s\": %s\n\n" - "usage:\n%s\n\nto show complete usage, run with -h", - arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str())); - } + auto opt = *arg_to_options[arg]; + if (opt.has_value_from_env()) { + fprintf(stderr, "warn: %s environment variable is set, but will be overwritten by command line argument %s\n", opt.env, arg.c_str()); + } + try { + if (opt.handler_void) { + opt.handler_void(params); + continue; + } + + // arg with single value + check_arg(i); + std::string val = argv[++i]; + if (opt.handler_int) { + opt.handler_int(params, std::stoi(val)); + continue; + } + if (opt.handler_string) { + opt.handler_string(params, val); + continue; + } + + // arg with 2 values + check_arg(i); + std::string val2 = argv[++i]; + if (opt.handler_str_str) { + opt.handler_str_str(params, val, val2); + continue; + } + } catch (std::exception & e) { + throw std::invalid_argument(string_format( + "error while handling argument \"%s\": %s\n\n" + "usage:\n%s\n\nto show complete usage, run with -h", + arg.c_str(), e.what(), arg_to_options[arg]->to_string().c_str())); } +} postprocess_cpu_params(params.cpuparams, nullptr); postprocess_cpu_params(params.cpuparams_batch, ¶ms.cpuparams); diff --git a/examples/eval-callback/README.md b/examples/eval-callback/README.md index 63a57ad6b68e5..f9065cc368d3d 100644 --- a/examples/eval-callback/README.md +++ b/examples/eval-callback/README.md @@ -6,13 +6,16 @@ It simply prints to the console all operations and tensor data. Usage: ```shell -llama-eval-callback \ - --hf-repo ggml-org/models \ - --hf-file phi-2/ggml-model-q4_0.gguf \ - --model phi-2-q4_0.gguf \ - --prompt hello \ - --seed 42 \ - -ngl 33 +llama-eval-callback \ +--model path/to/model.gguf \ +--parse-layer l_out-31 \ +--n-predict 200 \ +--prompt "What is the capital of France?" \ +--prompt "Explain black holes" \ +--prompt "Give me a joke" > output.txt +--seed 42 \ +-ngl 33 + ``` Will print: diff --git a/examples/eval-callback/eval-callback.cpp b/examples/eval-callback/eval-callback.cpp index fb188f5a9e132..6c805aef217e7 100644 --- a/examples/eval-callback/eval-callback.cpp +++ b/examples/eval-callback/eval-callback.cpp @@ -3,17 +3,24 @@ #include "log.h" #include "llama.h" #include "ggml.h" +#include "sampling.h" +#include #include #include #include +#include + +#include + +std::ofstream prompt_output_file; +std::ofstream tensor_output_file; + -/** - * This the arbitrary data which will be passed to each callback. - * Later on we can for example add operation or tensor name filter from the CLI arg, or a file descriptor to dump the tensor. - */ struct callback_data { std::vector data; + std::string parse_layer_name; + int current_token_index = -1; }; static std::string ggml_ne_string(const ggml_tensor * t) { @@ -27,89 +34,45 @@ static std::string ggml_ne_string(const ggml_tensor * t) { return str; } -static void ggml_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t n) { - GGML_ASSERT(n > 0); - float sum = 0; - for (int64_t i3 = 0; i3 < ne[3]; i3++) { - LOG(" [\n"); - for (int64_t i2 = 0; i2 < ne[2]; i2++) { - if (i2 == n && ne[2] > 2*n) { - LOG(" ..., \n"); - i2 = ne[2] - n; - } - LOG(" [\n"); - for (int64_t i1 = 0; i1 < ne[1]; i1++) { - if (i1 == n && ne[1] > 2*n) { - LOG(" ..., \n"); - i1 = ne[1] - n; - } - LOG(" ["); - for (int64_t i0 = 0; i0 < ne[0]; i0++) { - if (i0 == n && ne[0] > 2*n) { - LOG("..., "); - i0 = ne[0] - n; - } - size_t i = i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0]; - float v; - if (type == GGML_TYPE_F16) { - v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[i]); - } else if (type == GGML_TYPE_F32) { - v = *(float *) &data[i]; - } else if (type == GGML_TYPE_I32) { - v = (float) *(int32_t *) &data[i]; - } else if (type == GGML_TYPE_I16) { - v = (float) *(int16_t *) &data[i]; - } else if (type == GGML_TYPE_I8) { - v = (float) *(int8_t *) &data[i]; - } else { - GGML_ABORT("fatal error"); - } - LOG("%12.4f", v); - sum += v; - if (i0 < ne[0] - 1) LOG(", "); - } - LOG("],\n"); - } - LOG(" ],\n"); +static void ggml_print_tensor_block(const std::string& tensor_name, uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t token_idx) { + const int64_t dim = ne[0]; + tensor_output_file << "=== TOKEN " << token_idx << " ===\n"; + tensor_output_file << "--- TENSOR: " << tensor_name << " ---\n"; + tensor_output_file << "SHAPE: [" << dim << "]\n"; + tensor_output_file << "DATA:\n"; + + for (int64_t i = 0; i < dim; ++i) { + size_t offset = i * nb[0]; + float v; + + switch (type) { + case GGML_TYPE_F16: v = ggml_fp16_to_fp32(*(ggml_fp16_t *) &data[offset]); break; + case GGML_TYPE_F32: v = *(float *) &data[offset]; break; + default: GGML_ABORT("Unsupported tensor type"); } - LOG(" ]\n"); - LOG(" sum = %f\n", sum); + + tensor_output_file << v; + if (i < dim - 1) tensor_output_file << ", "; } + + tensor_output_file << "\n\n"; } -/** - * GGML operations callback during the graph execution. - * - * @param t current tensor - * @param ask when ask is true, the scheduler wants to know if we are interested in data from this tensor - * if we return true, a follow-up call will be made with ask=false in which we can do the actual collection. - * see ggml_backend_sched_eval_callback - * @param user_data user data to pass at each call back - * @return true to receive data or continue the graph, false otherwise - */ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) { auto * cb_data = (callback_data *) user_data; - const struct ggml_tensor * src0 = t->src[0]; - const struct ggml_tensor * src1 = t->src[1]; - if (ask) { - return true; // Always retrieve data + if (cb_data->parse_layer_name == "__LIST__") { + tensor_output_file << t->name << "\n"; + return false; + } + return std::string(t->name) == cb_data->parse_layer_name; } - char src1_str[128] = {0}; - if (src1) { - snprintf(src1_str, sizeof(src1_str), "%s{%s}", src1->name, ggml_ne_string(src1).c_str()); + if (std::string(t->name) != cb_data->parse_layer_name) { + return false; } - LOG("%s: %24s = (%s) %10s(%s{%s}, %s}) = {%s}\n", __func__, - t->name, ggml_type_name(t->type), ggml_op_desc(t), - src0->name, ggml_ne_string(src0).c_str(), - src1 ? src1_str : "", - ggml_ne_string(t).c_str()); - - - // copy the data from the GPU memory if needed const bool is_host = ggml_backend_buffer_is_host(t->buffer); if (!is_host) { @@ -120,51 +83,151 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) { if (!ggml_is_quantized(t->type)) { uint8_t * data = is_host ? (uint8_t *) t->data : cb_data->data.data(); - ggml_print_tensor(data, t->type, t->ne, t->nb, 3); + ggml_print_tensor_block(t->name, data, t->type, t->ne, t->nb, cb_data->current_token_index); } return true; } -static bool run(llama_context * ctx, const common_params & params) { +static bool run(llama_context * ctx, const common_params & params, callback_data & cb_data) { const llama_model * model = llama_get_model(ctx); const llama_vocab * vocab = llama_model_get_vocab(model); - const bool add_bos = llama_vocab_get_add_bos(vocab); std::vector tokens = common_tokenize(ctx, params.prompt, add_bos); - if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) { - LOG_ERR("%s : failed to eval\n", __func__); + auto sparams = llama_sampler_chain_default_params(); + sparams.no_perf = false; + llama_sampler * sampler = llama_sampler_chain_init(sparams); + llama_sampler_chain_add(sampler, llama_sampler_init_greedy()); + + llama_batch batch = llama_batch_get_one(tokens.data(), tokens.size()); + cb_data.current_token_index = -1; + if (llama_decode(ctx, batch)) { + LOG_ERR("Failed to evaluate prompt\n"); + llama_sampler_free(sampler); return false; } + std::string result; + llama_token token; + + for (int i = 0; i < params.n_predict; ++i) { + token = llama_sampler_sample(sampler, ctx, -1); + if (llama_vocab_is_eog(vocab, token)) { + break; + } + + char buf[128]; + int n = llama_token_to_piece(vocab, token, buf, sizeof(buf), 0, true); + if (n < 0) { + LOG_ERR("Failed to convert token to string\n"); + llama_sampler_free(sampler); + return false; + } + result += std::string(buf, n); // <-- store instead of printing + + llama_batch new_batch = llama_batch_get_one(&token, 1); + cb_data.current_token_index = i; + if (llama_decode(ctx, new_batch)) { + LOG_ERR("Failed to decode sampled token\n"); + llama_sampler_free(sampler); + return false; + } + } + + llama_sampler_free(sampler); + + // Output final result + prompt_output_file << "\n\nFull output:\n" << result << "\n"; + return true; } -int main(int argc, char ** argv) { - callback_data cb_data; +int main(int argc, char **argv) { + prompt_output_file.open("prompt_output.txt"); + tensor_output_file.open("tensor_output.txt"); + + if (!prompt_output_file || !tensor_output_file) { + std::cerr << "❌ Failed to open output files.\n"; + return 1; + } + + callback_data cb_data; common_params params; + bool list_layers = false; + std::string list_layers_filter = ""; + std::string parse_layer_value; + std::vector filtered_argv; + std::vector prompts; - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + filtered_argv.push_back(argv[0]); + params.n_gpu_layers = 20; + + + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + if (arg.compare(0, 2, "--") == 0) { + std::replace(arg.begin(), arg.end(), '_', '-'); + } + + if (arg == "--parse-layer") { + if (i + 1 < argc) { + parse_layer_value = argv[++i]; + } else { + fprintf(stderr, "error: --parse-layer requires an argument\n"); + return 1; + } + continue; + } else if (arg == "--prompt") { + if (i + 1 < argc) { + prompts.emplace_back(argv[++i]); + } else { + fprintf(stderr, "error: --prompt requires an argument\n"); + return 1; + } + continue; + } else if (arg == "--n-gpu-layers") { + if (i + 1 < argc) { + params.n_gpu_layers = std::stoi(argv[++i]); // override default + } else { + fprintf(stderr, "error: --n-gpu-layers requires an integer argument\n"); + return 1; + } + continue; + } + else if (arg == "--list-layers") { + list_layers = true; + if (i + 1 < argc && argv[i + 1][0] != '-') { + list_layers_filter = argv[++i]; // take optional argument + } + continue; + } + + filtered_argv.push_back(argv[i]); + } + + + if (!common_params_parse((int)filtered_argv.size(), filtered_argv.data(), params, LLAMA_EXAMPLE_COMMON)) { return 1; } - common_init(); + if (!parse_layer_value.empty()) { + LOG_INF("Parse layer argument value: %s\n", parse_layer_value.c_str()); + } + cb_data.parse_layer_name = parse_layer_value; + common_init(); llama_backend_init(); llama_numa_init(params.numa); - // pass the callback to the backend scheduler - // it will be executed for each node during the graph computation params.cb_eval = ggml_debug; params.cb_eval_user_data = &cb_data; params.warmup = false; - // init - common_init_result llama_init = common_init_from_params(params); + common_init_result llama_init = common_init_from_params(params); llama_model * model = llama_init.model.get(); llama_context * ctx = llama_init.context.get(); @@ -173,22 +236,45 @@ int main(int argc, char ** argv) { return 1; } - // print system information - { - LOG_INF("\n"); - LOG_INF("%s\n", common_params_get_system_info(params).c_str()); - LOG_INF("\n"); + LOG_INF("\n"); + LOG_INF("%s\n", common_params_get_system_info(params).c_str()); + LOG_INF("\n"); + + if (prompts.empty()) { + prompts.emplace_back("What is the capital of France?"); // Fallback default } - bool OK = run(ctx, params); - if (!OK) { - return 1; + if (list_layers) { + cb_data.parse_layer_name = "__LIST__"; + params.n_predict = 1; + params.prompt = "dummy"; // any valid prompt to trigger eval + + if (!run(ctx, params, cb_data)) { + LOG_ERR("Failed during layer listing run\n"); + return 1; + } + prompt_output_file.close(); + tensor_output_file.close(); + + return 0; + + } + + for (const auto& prompt : prompts) { + prompt_output_file << "Running prompt: " << prompt << "\n"; + params.prompt = prompt; + if (!run(ctx, params, cb_data)) { + LOG_ERR("Failed on prompt: %s\n", prompt.c_str()); + return 1; + } } LOG("\n"); llama_perf_context_print(ctx); llama_backend_free(); + prompt_output_file.close(); + tensor_output_file.close(); return 0; }