Skip to content

Commit 95dabb1

Browse files
committed
convert-hf-to-gguf.py: fix flake8 warnings. Removed unused variable
1 parent 1088626 commit 95dabb1

File tree

1 file changed

+5
-7
lines changed

1 file changed

+5
-7
lines changed

convert-hf-to-gguf.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2293,8 +2293,6 @@ def set_vocab(self):
22932293
vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
22942294
assert max(tokenizer.get_vocab().values()) < vocab_size
22952295

2296-
reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.get_vocab().items()}
2297-
22982296
for token_id in range(vocab_size):
22992297
piece = tokenizer._convert_id_to_token(token_id)
23002298
if token_id == 0:
@@ -2395,7 +2393,7 @@ def write_tensors(self):
23952393
),
23962394
axis=0,
23972395
)
2398-
print("re-format attention.linear_qkv.weight")
2396+
logger.debug("re-format attention.linear_qkv.weight")
23992397
elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name):
24002398
qkv_bias = data.reshape((n_head, 3, n_embed // n_head))
24012399
data = np.concatenate(
@@ -2406,12 +2404,12 @@ def write_tensors(self):
24062404
),
24072405
axis=0,
24082406
)
2409-
print("re-format attention.linear_qkv.bias")
2407+
logger.debug("re-format attention.linear_qkv.bias")
24102408

24112409
# map tensor names
24122410
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
24132411
if new_name is None:
2414-
print(f"Can not map tensor {name!r}")
2412+
logger.error(f"Can not map tensor {name!r}")
24152413
sys.exit()
24162414

24172415
n_dims = len(data.shape)
@@ -2429,13 +2427,13 @@ def write_tensors(self):
24292427
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
24302428
data = data.astype(np.float16)
24312429

2432-
print(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
2430+
logger.debug(f"=> {new_name}, shape = {data.shape}, {old_dtype} --> {data.dtype}")
24332431

24342432
self.gguf_writer.add_tensor(new_name, data)
24352433

24362434
if not has_lm_head and name == "word_embeddings.weight":
24372435
self.gguf_writer.add_tensor("output.weight", data)
2438-
print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
2436+
logger.debug(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}")
24392437

24402438

24412439
###### CONVERSION LOGIC ######

0 commit comments

Comments
 (0)