Skip to content

Commit 1794ec4

Browse files
author
daixu
committed
add llama config; add llama2 70 support; update readme
1 parent b3a6477 commit 1794ec4

6 files changed

+363
-0
lines changed

configs/llama2_70b_config.json

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
{
2+
"source": "https://huggingface.co/meta-llama/Llama-2-70b-hf/blob/main/config.json",
3+
"_name_or_path": "meta-llama/Llama-2-70b-hf",
4+
"architectures": [
5+
"LlamaForCausalLM"
6+
],
7+
"bos_token_id": 1,
8+
"eos_token_id": 2,
9+
"hidden_act": "silu",
10+
"hidden_size": 8192,
11+
"initializer_range": 0.02,
12+
"intermediate_size": 28672,
13+
"max_position_embeddings": 4096,
14+
"model_type": "llama",
15+
"num_attention_heads": 64,
16+
"num_hidden_layers": 80,
17+
"num_key_value_heads": 8,
18+
"pretraining_tp": 1,
19+
"rms_norm_eps": 1e-05,
20+
"rope_scaling": null,
21+
"tie_word_embeddings": false,
22+
"torch_dtype": "float16",
23+
"transformers_version": "4.32.0.dev0",
24+
"use_cache": true,
25+
"vocab_size": 32000
26+
}

configs/llama2_7b_config.json

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
{
2+
"source": "https://huggingface.co/meta-llama/Llama-2-7b-hf/blob/main/config.json",
3+
"_name_or_path": "meta-llama/Llama-2-7b-hf",
4+
"architectures": [
5+
"LlamaForCausalLM"
6+
],
7+
"bos_token_id": 1,
8+
"eos_token_id": 2,
9+
"hidden_act": "silu",
10+
"hidden_size": 4096,
11+
"initializer_range": 0.02,
12+
"intermediate_size": 11008,
13+
"max_position_embeddings": 4096,
14+
"model_type": "llama",
15+
"num_attention_heads": 32,
16+
"num_hidden_layers": 32,
17+
"num_key_value_heads": 32,
18+
"pretraining_tp": 1,
19+
"rms_norm_eps": 1e-05,
20+
"rope_scaling": null,
21+
"tie_word_embeddings": false,
22+
"torch_dtype": "float16",
23+
"transformers_version": "4.31.0.dev0",
24+
"use_cache": true,
25+
"vocab_size": 32000
26+
}

configuration_llama.py

+178
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,178 @@
1+
# coding=utf-8
2+
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3+
#
4+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5+
# and OPT implementations in this library. It has been modified from its
6+
# original forms to accommodate minor architectural differences compared
7+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8+
#
9+
# Licensed under the Apache License, Version 2.0 (the "License");
10+
# you may not use this file except in compliance with the License.
11+
# You may obtain a copy of the License at
12+
#
13+
# http://www.apache.org/licenses/LICENSE-2.0
14+
#
15+
# Unless required by applicable law or agreed to in writing, software
16+
# distributed under the License is distributed on an "AS IS" BASIS,
17+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18+
# See the License for the specific language governing permissions and
19+
# limitations under the License.
20+
""" LLaMA model configuration"""
21+
22+
23+
LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24+
25+
26+
class LlamaConfig():
27+
r"""
28+
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
29+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
30+
defaults will yield a similar configuration to that of the LLaMA-7B.
31+
32+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33+
documentation from [`PretrainedConfig`] for more information.
34+
35+
36+
Args:
37+
vocab_size (`int`, *optional*, defaults to 32000):
38+
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
39+
`inputs_ids` passed when calling [`LlamaModel`]
40+
hidden_size (`int`, *optional*, defaults to 4096):
41+
Dimension of the hidden representations.
42+
intermediate_size (`int`, *optional*, defaults to 11008):
43+
Dimension of the MLP representations.
44+
num_hidden_layers (`int`, *optional*, defaults to 32):
45+
Number of hidden layers in the Transformer decoder.
46+
num_attention_heads (`int`, *optional*, defaults to 32):
47+
Number of attention heads for each attention layer in the Transformer decoder.
48+
num_key_value_heads (`int`, *optional*):
49+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53+
by meanpooling all the original heads within that group. For more details checkout [this
54+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
55+
`num_attention_heads`.
56+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
57+
The non-linear activation function (function or string) in the decoder.
58+
max_position_embeddings (`int`, *optional*, defaults to 2048):
59+
The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
60+
Llama 2 up to 4096, CodeLlama up to 16384.
61+
initializer_range (`float`, *optional*, defaults to 0.02):
62+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63+
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64+
The epsilon used by the rms normalization layers.
65+
use_cache (`bool`, *optional*, defaults to `True`):
66+
Whether or not the model should return the last key/values attentions (not used by all models). Only
67+
relevant if `config.is_decoder=True`.
68+
pad_token_id (`int`, *optional*):
69+
Padding token id.
70+
bos_token_id (`int`, *optional*, defaults to 1):
71+
Beginning of stream token id.
72+
eos_token_id (`int`, *optional*, defaults to 2):
73+
End of stream token id.
74+
pretraining_tp (`int`, *optional*, defaults to 1):
75+
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
76+
document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
77+
necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
78+
issue](https://github.com/pytorch/pytorch/issues/76232).
79+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
80+
Whether to tie weight embeddings
81+
rope_theta (`float`, *optional*, defaults to 10000.0):
82+
The base period of the RoPE embeddings.
83+
rope_scaling (`Dict`, *optional*):
84+
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
85+
strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
86+
`{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
87+
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
88+
these scaling strategies behave:
89+
https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
90+
experimental feature, subject to breaking API changes in future versions.
91+
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
92+
Whether to use a bias in the query, key, value and output projection layers during self-attention.
93+
94+
95+
```python
96+
from configuration_llama import LlamaConfig
97+
98+
# 使用默认的配置
99+
configuration = LlamaConfig()
100+
101+
# 使用配置数据初始化一个LlamaConfig对象
102+
import json
103+
with open('configs/llama2_7b_config.json', 'r') as f:
104+
config_data = json.load(f)
105+
configuration = LlamaConfig(**config_data)
106+
107+
```
108+
"""
109+
model_type = "llama"
110+
keys_to_ignore_at_inference = ["past_key_values"]
111+
112+
def __init__(
113+
self,
114+
vocab_size=32000,
115+
hidden_size=4096,
116+
intermediate_size=11008,
117+
num_hidden_layers=32,
118+
num_attention_heads=32,
119+
num_key_value_heads=None,
120+
hidden_act="silu",
121+
max_position_embeddings=2048,
122+
initializer_range=0.02,
123+
rms_norm_eps=1e-6,
124+
use_cache=True,
125+
pad_token_id=None,
126+
bos_token_id=1,
127+
eos_token_id=2,
128+
pretraining_tp=1,
129+
tie_word_embeddings=False,
130+
rope_theta=10000.0,
131+
rope_scaling=None,
132+
attention_bias=False,
133+
weights_dir = 'weights/llama2_7b',
134+
**kwargs,
135+
):
136+
self.vocab_size = vocab_size
137+
self.max_position_embeddings = max_position_embeddings
138+
self.hidden_size = hidden_size
139+
self.intermediate_size = intermediate_size
140+
self.num_hidden_layers = num_hidden_layers
141+
self.num_attention_heads = num_attention_heads
142+
143+
# for backward compatibility
144+
if num_key_value_heads is None:
145+
num_key_value_heads = num_attention_heads
146+
147+
self.num_key_value_heads = num_key_value_heads
148+
self.hidden_act = hidden_act
149+
self.initializer_range = initializer_range
150+
self.rms_norm_eps = rms_norm_eps
151+
self.pretraining_tp = pretraining_tp
152+
self.use_cache = use_cache
153+
self.rope_theta = rope_theta
154+
self.rope_scaling = rope_scaling
155+
self._rope_scaling_validation()
156+
self.attention_bias = attention_bias
157+
self.weights_dir = weights_dir
158+
159+
def _rope_scaling_validation(self):
160+
"""
161+
Validate the `rope_scaling` configuration.
162+
"""
163+
if self.rope_scaling is None:
164+
return
165+
166+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
167+
raise ValueError(
168+
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
169+
f"got {self.rope_scaling}"
170+
)
171+
rope_scaling_type = self.rope_scaling.get("type", None)
172+
rope_scaling_factor = self.rope_scaling.get("factor", None)
173+
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
174+
raise ValueError(
175+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
176+
)
177+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
178+
raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")

convert_hf_to_pkl.py

+5
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,11 @@
1212
'hf_model': 'meta-llama/Llama-2-7b-hf',
1313
'tokenizer': 'meta-llama/Llama-2-7b-hf',
1414
'weights_dir': 'weights/llama2_7b/'
15+
},
16+
'llama2_70b': {
17+
'hf_model': 'meta-llama/Llama-2-70b-hf',
18+
'tokenizer': 'meta-llama/Llama-2-70b-hf',
19+
'weights_dir': 'weights/llama2_70b/'
1520
}
1621
}
1722

naked_llama2.py

+101
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
import os.path as osp
2+
import torch
3+
import argparse
4+
from transformers import AutoTokenizer, LlamaForCausalLM
5+
from utils import npy_to_tensor, load_llama_config, get_attentioin_mask
6+
from configuration_llama import LlamaConfig
7+
from layers.norm import RMSNorm
8+
from layers.rope import init_rope_embeddings
9+
from layers.embedding import embedding_lookup
10+
from layers.matmul import LlamaMLP, lm_head
11+
from layers.transformer_block import llama2_transformer_block
12+
13+
14+
def llama2(token_ids: torch.Tensor, config: LlamaConfig):
15+
"""
16+
手动实现llama2 7B/13B/70B的推理计算。
17+
18+
参数:
19+
- token_ids: token id组成的tensor,形状为 [batch_size, seq_length]
20+
"""
21+
bsz, seq_length = token_ids.shape
22+
# embedding
23+
embdding_weights = npy_to_tensor(osp.join(config.weights_dir, 'model.embed_tokens.weight.npy'))
24+
input_embeds = embedding_lookup(token_ids, embdding_weights)
25+
hidden_states = input_embeds # shape [batch_size, seq_length, hidden_size], hidden_size=4096
26+
27+
# mask
28+
mask = get_attentioin_mask(start_pos=0, seq_length=seq_length, ref_tensor=hidden_states)
29+
30+
# 重复 32次(7B)/ 80次(70B) llama2_transformer_block 的计算
31+
for layer_id in range(config.num_hidden_layers):
32+
print(f'Naked llama2: Computing Layer {layer_id}')
33+
output = llama2_transformer_block(hidden_states, config, layer_id=layer_id, attention_mask=mask)
34+
hidden_states = output[0]
35+
36+
# 先 RMSNorm,然后head输出
37+
norm_weight = npy_to_tensor(osp.join(config.weights_dir, 'model.norm.weight.npy'))
38+
hidden_states = RMSNorm(hidden_states, norm_weight, eps=config.rms_norm_eps)
39+
40+
lm_head_weight = npy_to_tensor(osp.join(config.weights_dir, 'lm_head.weight.npy'))
41+
logits = lm_head(hidden_states, lm_head_weight)
42+
return logits
43+
44+
45+
if __name__ == '__main__':
46+
parser = argparse.ArgumentParser(description='nake.')
47+
parser.add_argument('--model_size', type=str,
48+
help='prammeter size of the llama2 model to use',
49+
default='7b',
50+
choices=['7b', '70b']
51+
)
52+
args = parser.parse_args()
53+
54+
# initial rope embeddings
55+
init_rope_embeddings(dim=128)
56+
prompt = "Hey, are you conscious? Can you talk to me?"
57+
model_dict = {
58+
"llama2_7b": {
59+
'tokenizer': 'meta-llama/Llama-2-7b-hf',
60+
'config_path': 'configs/llama2_7b_config.json',
61+
'weights_dir': 'weights/llama2_7b/'
62+
},
63+
"llama2_70b": {
64+
'tokenizer': 'meta-llama/Llama-2-70b-hf',
65+
'config_path': 'configs/llama2_70b_config.json',
66+
'weights_dir': 'weights/llama2_70b/'
67+
}
68+
}
69+
if args.model_size == '7b':
70+
model_name = "llama2_7b"
71+
elif args.model_size == '70b':
72+
model_name = "llama2_70b"
73+
74+
print('Model:', model_name)
75+
76+
# tokenization
77+
tokenizer = AutoTokenizer.from_pretrained(model_dict[model_name]['tokenizer'])
78+
inputs = tokenizer(prompt, return_tensors="pt")
79+
token_ids = inputs.input_ids
80+
81+
# random input
82+
# token_ids = torch.randint(0, 32000, (1, 512)) # (1, 512) shape
83+
84+
config = load_llama_config(model_dict[model_name]['config_path'])
85+
config.weights_dir = model_dict[model_name]['weights_dir']
86+
logits = llama2(token_ids, config)
87+
88+
print('Naked llama result:')
89+
print(logits)
90+
91+
# check result
92+
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
93+
model.eval()
94+
with torch.inference_mode():
95+
hf_res = model(input_ids = token_ids)
96+
print('Hugging face llama result:')
97+
print(hf_res.logits)
98+
error = torch.abs(hf_res.logits-logits)
99+
print(f"Compare error sum: {torch.sum(error)}")
100+
101+

utils.py

+27
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import numpy as np
22
import torch
3+
import json
4+
from configuration_llama import LlamaConfig
35

46

57
def npy_to_tensor(npy_name):
@@ -8,3 +10,28 @@ def npy_to_tensor(npy_name):
810
loaded_tensor = torch.from_numpy(loaded_numpy_array)
911
loaded_tensor = loaded_tensor.to(torch.float32) # 将张量转换为 float32 类型
1012
return loaded_tensor
13+
14+
15+
def load_llama_config(config_file):
16+
with open(config_file, "r") as f:
17+
config_data = json.load(f)
18+
configuration = LlamaConfig(**config_data)
19+
return configuration
20+
21+
22+
def get_attentioin_mask(start_pos, seq_length, ref_tensor):
23+
if seq_length > 1:
24+
mask = torch.full((seq_length, seq_length), float("-inf"), device=ref_tensor.device)
25+
26+
mask = torch.triu(mask, diagonal=1)
27+
# When performing key-value caching, we compute the attention scores
28+
# only for the new sequence. Thus, the matrix of scores is of size
29+
# (seqlen, cache_len + seqlen), and the only masked entries are (i, j) for
30+
# j > cache_len + i, since row i corresponds to token cache_len + i.
31+
mask = torch.hstack([
32+
torch.zeros((seq_length, start_pos), device=ref_tensor.device),
33+
mask
34+
]).type_as(ref_tensor)
35+
else:
36+
mask = None
37+
return mask

0 commit comments

Comments
 (0)