@@ -43,29 +43,29 @@ extern char const *LLAMA_BUILD_TARGET;
43
43
int32_t get_num_physical_cores ();
44
44
45
45
struct gpt_params {
46
- uint32_t seed = -1 ; // RNG seed
46
+ uint32_t seed = -1 ; // RNG seed
47
47
int32_t n_threads = get_num_physical_cores();
48
- int32_t n_threads_batch = -1 ; // number of threads to use for batch processing (-1 = use n_threads)
49
- int32_t n_predict = -1 ; // new tokens to predict
50
- int32_t n_ctx = 512 ; // context size
51
- int32_t n_batch = 512 ; // batch size for prompt processing (must be >=32 to use BLAS)
52
- int32_t n_keep = 0 ; // number of tokens to keep from initial prompt
53
- int32_t n_draft = 16 ; // number of tokens to draft during speculative decoding
54
- int32_t n_chunks = -1 ; // max number of chunks to process (-1 = unlimited)
55
- int32_t n_parallel = 1 ; // number of parallel sequences to decode
56
- int32_t n_sequences = 1 ; // number of sequences to decode
57
- int32_t n_gpu_layers = -1 ; // number of layers to store in VRAM (-1 - use default)
58
- int32_t n_gpu_layers_draft = -1 ; // number of layers to store in VRAM for the draft model (-1 - use default)
59
- int32_t main_gpu = 0 ; // the GPU that is used for scratch and small tensors
60
- float tensor_split[LLAMA_MAX_DEVICES] = {0 }; // how split tensors should be distributed across GPUs
61
- int32_t n_beams = 0 ; // if non-zero then use beam search of given width.
62
- float rope_freq_base = 0 .0f ; // RoPE base frequency
63
- float rope_freq_scale = 0 .0f ; // RoPE frequency scaling factor
64
- float yarn_ext_factor = NAN; // YaRN extrapolation mix factor
65
- float yarn_attn_factor = 1 .0f ; // YaRN magnitude scaling factor
66
- float yarn_beta_fast = 32 .0f ;// YaRN low correction dim
67
- float yarn_beta_slow = 1 .0f ; // YaRN high correction dim
68
- int32_t yarn_orig_ctx = 0 ; // YaRN original context length
48
+ int32_t n_threads_batch = -1 ; // number of threads to use for batch processing (-1 = use n_threads)
49
+ int32_t n_predict = -1 ; // new tokens to predict
50
+ int32_t n_ctx = 512 ; // context size
51
+ int32_t n_batch = 512 ; // batch size for prompt processing (must be >=32 to use BLAS)
52
+ int32_t n_keep = 0 ; // number of tokens to keep from initial prompt
53
+ int32_t n_draft = 16 ; // number of tokens to draft during speculative decoding
54
+ int32_t n_chunks = -1 ; // max number of chunks to process (-1 = unlimited)
55
+ int32_t n_parallel = 1 ; // number of parallel sequences to decode
56
+ int32_t n_sequences = 1 ; // number of sequences to decode
57
+ int32_t n_gpu_layers = -1 ; // number of layers to store in VRAM (-1 - use default)
58
+ int32_t n_gpu_layers_draft = -1 ; // number of layers to store in VRAM for the draft model (-1 - use default)
59
+ int32_t main_gpu = 0 ; // the GPU that is used for scratch and small tensors
60
+ float tensor_split[LLAMA_MAX_DEVICES] = {0 }; // how split tensors should be distributed across GPUs
61
+ int32_t n_beams = 0 ; // if non-zero then use beam search of given width.
62
+ float rope_freq_base = 0 .0f ; // RoPE base frequency
63
+ float rope_freq_scale = 0 .0f ; // RoPE frequency scaling factor
64
+ float yarn_ext_factor = - 1 . 0f ; // YaRN extrapolation mix factor
65
+ float yarn_attn_factor = 1 .0f ; // YaRN magnitude scaling factor
66
+ float yarn_beta_fast = 32 .0f ; // YaRN low correction dim
67
+ float yarn_beta_slow = 1 .0f ; // YaRN high correction dim
68
+ int32_t yarn_orig_ctx = 0 ; // YaRN original context length
69
69
int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
70
70
71
71
// // sampling parameters
0 commit comments