Skip to content

Commit 8a68656

Browse files
author
Bodhi Hu
committed
remove the LLaMA-MoE model scale factor proccessing for this PR
1 parent cb0f488 commit 8a68656

File tree

1 file changed

+0
-8
lines changed

1 file changed

+0
-8
lines changed

convert_hf_to_gguf.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -253,14 +253,6 @@ def set_gguf_parameters(self):
253253
self.gguf_writer.add_expert_used_count(n_experts_used)
254254
logger.info(f"gguf: experts used count = {n_experts_used}")
255255

256-
if (sliding_window := self.hparams.get("sliding_window")) is not None:
257-
self.gguf_writer.add_sliding_window(sliding_window)
258-
logger.info(f"gguf: sliding_window = {sliding_window}")
259-
260-
if (expert_weights_scale := self.find_hparam(["routed_scaling_factor", "scale_factor"], optional=True)) is not None:
261-
self.gguf_writer.add_expert_weights_scale(expert_weights_scale)
262-
logger.info(f"gguf: expert_weights_scale = {expert_weights_scale}")
263-
264256
if (head_dim := self.hparams.get("head_dim")) is not None:
265257
self.gguf_writer.add_key_length(head_dim)
266258
self.gguf_writer.add_value_length(head_dim)

0 commit comments

Comments
 (0)