Skip to content

Commit 0581602

Browse files
authored
common : YAYF (yet another YARN fix) (ggml-org#3925)
ggml-ci
1 parent 3fdbe6b commit 0581602

File tree

2 files changed

+27
-27
lines changed

2 files changed

+27
-27
lines changed

common/common.h

+22-22
Original file line numberDiff line numberDiff line change
@@ -43,29 +43,29 @@ extern char const *LLAMA_BUILD_TARGET;
4343
int32_t get_num_physical_cores();
4444

4545
struct gpt_params {
46-
uint32_t seed = -1; // RNG seed
46+
uint32_t seed = -1; // RNG seed
4747
int32_t n_threads = get_num_physical_cores();
48-
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
49-
int32_t n_predict = -1; // new tokens to predict
50-
int32_t n_ctx = 512; // context size
51-
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
52-
int32_t n_keep = 0; // number of tokens to keep from initial prompt
53-
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
54-
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
55-
int32_t n_parallel = 1; // number of parallel sequences to decode
56-
int32_t n_sequences = 1; // number of sequences to decode
57-
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
58-
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
59-
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
60-
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
61-
int32_t n_beams = 0; // if non-zero then use beam search of given width.
62-
float rope_freq_base = 0.0f; // RoPE base frequency
63-
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
64-
float yarn_ext_factor = NAN; // YaRN extrapolation mix factor
65-
float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor
66-
float yarn_beta_fast = 32.0f;// YaRN low correction dim
67-
float yarn_beta_slow = 1.0f; // YaRN high correction dim
68-
int32_t yarn_orig_ctx = 0; // YaRN original context length
48+
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
49+
int32_t n_predict = -1; // new tokens to predict
50+
int32_t n_ctx = 512; // context size
51+
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
52+
int32_t n_keep = 0; // number of tokens to keep from initial prompt
53+
int32_t n_draft = 16; // number of tokens to draft during speculative decoding
54+
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
55+
int32_t n_parallel = 1; // number of parallel sequences to decode
56+
int32_t n_sequences = 1; // number of sequences to decode
57+
int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default)
58+
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
59+
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
60+
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
61+
int32_t n_beams = 0; // if non-zero then use beam search of given width.
62+
float rope_freq_base = 0.0f; // RoPE base frequency
63+
float rope_freq_scale = 0.0f; // RoPE frequency scaling factor
64+
float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor
65+
float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor
66+
float yarn_beta_fast = 32.0f; // YaRN low correction dim
67+
float yarn_beta_slow = 1.0f; // YaRN high correction dim
68+
int32_t yarn_orig_ctx = 0; // YaRN original context length
6969
int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
7070

7171
// // sampling parameters

llama.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -175,11 +175,11 @@ extern "C" {
175175
};
176176

177177
struct llama_context_params {
178-
uint32_t seed; // RNG seed, -1 for random
179-
uint32_t n_ctx; // text context, 0 = from model
180-
uint32_t n_batch; // prompt processing maximum batch size
181-
uint32_t n_threads; // number of threads to use for generation
182-
uint32_t n_threads_batch; // number of threads to use for batch processing
178+
uint32_t seed; // RNG seed, -1 for random
179+
uint32_t n_ctx; // text context, 0 = from model
180+
uint32_t n_batch; // prompt processing maximum batch size
181+
uint32_t n_threads; // number of threads to use for generation
182+
uint32_t n_threads_batch; // number of threads to use for batch processing
183183
int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
184184

185185
// ref: https://github.com/ggerganov/llama.cpp/pull/2054

0 commit comments

Comments
 (0)