diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 4f2339a02a13c..aed595e259ed5 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -936,7 +936,11 @@ def _create_vocab_sentencepiece(self): scores: list[float] = [-10000.0] * vocab_size toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - for token_id in range(vocab_size): + for token_id in range(tokenizer.vocab_size()): + if token_id >= vocab_size: + logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}') + break + piece = tokenizer.IdToPiece(token_id) text = piece.encode("utf-8") score = tokenizer.GetScore(token_id) @@ -951,10 +955,6 @@ def _create_vocab_sentencepiece(self): elif tokenizer.IsByte(token_id): toktype = SentencePieceTokenTypes.BYTE - if token_id >= vocab_size: - logger.warning(f'ignore tokens from {token_id}: id is out of range, max={vocab_size - 1}') - break - tokens[token_id] = text scores[token_id] = score toktypes[token_id] = toktype