Skip to content

Commit ed1d3ff

Browse files
committed
optimize convert-hf-to-gguf.py for chatglm model
Signed-off-by: XingXing Qiao <[email protected]>
1 parent 83b313a commit ed1d3ff

File tree

1 file changed

+8
-10
lines changed

1 file changed

+8
-10
lines changed

convert-hf-to-gguf.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2416,13 +2416,15 @@ def set_vocab(self):
24162416

24172417
text = piece.encode("utf-8")
24182418
score = 0.0
2419-
if len(piece) != 0 and token_id < 64789:
2419+
# Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
2420+
# it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
2421+
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
24202422
score = tokenizer.tokenizer.sp_model.get_score(token_id)
24212423

24222424
if len(piece) == 0:
24232425
text = f"[PAD{token_id}]".encode("utf-8")
24242426

2425-
if token_id >= 64789:
2427+
if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
24262428
toktype = SentencePieceTokenTypes.UNKNOWN
24272429
tokens.append(text)
24282430
scores.append(score)
@@ -2452,7 +2454,7 @@ def set_vocab(self):
24522454
special_vocab.add_to_gguf(self.gguf_writer)
24532455

24542456
def set_gguf_parameters(self):
2455-
self.gguf_writer.add_name("ChatGLM-6b-chat")
2457+
self.gguf_writer.add_name(self.dir_model.name)
24562458
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
24572459
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
24582460
n_head_kv = self.hparams.get("multi_query_group_num", n_head)
@@ -2468,16 +2470,12 @@ def set_gguf_parameters(self):
24682470
self.gguf_writer.add_add_bos_token(False)
24692471

24702472
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
2471-
if name.endswith(".rotary_pos_emb.inv_freq"):
2472-
return []
2473-
24742473
del bid # unused
24752474

2476-
name = re.sub(r'transformer\.', '', name)
2477-
2478-
if name == "word_embeddings.weight":
2479-
assert self.tensor_names is not None
2475+
if name.endswith(".rotary_pos_emb.inv_freq"):
2476+
return []
24802477

2478+
name = name.removeprefix("transformer.")
24812479
return [(self.map_tensor_name(name), data_torch)]
24822480

24832481

0 commit comments

Comments
 (0)