diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py new file mode 100644 index 0000000..31a8ae3 --- /dev/null +++ b/notebooks/embedding_wrapper.py @@ -0,0 +1,151 @@ +from transformers import AutoTokenizer, AutoModelForCausalLM +from typing import List +import transformers +from tokenizers import AddedToken +import torch +from torch import nn + +class EmbeddingWrapperMask(nn.Module): + def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): + super().__init__() + self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) + self.embedding_dim = old_embedding.embedding_dim + self.old_embedding.weight.data = old_embedding.weight.data.clone() + self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, self.embedding_dim) + + if freeze_old: + for param in self.old_embedding.parameters(): + param.requires_grad = False + + self.num_old_embeddings = old_embedding.num_embeddings + + def forward(self, x): + old_x_mask = x < self.num_old_embeddings + new_x_mask = x >= self.num_old_embeddings + + old_x = x[old_x_mask] + new_x = x[new_x_mask] - self.num_old_embeddings + + output = torch.zeros((len(x), self.old_embedding.embedding_dim), dtype=self.old_embedding.weight.dtype, device=self.old_embedding.weight.device) + output[old_x_mask] = self.old_embedding(old_x) + output[new_x_mask] = self.new_embedding(new_x) + + return output + +class EmbeddingWrapperHook(nn.Module): + def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): + super().__init__() + + self.num_old_embeddings = old_embedding.num_embeddings + self.embedding_dim = old_embedding.embedding_dim + self.new_embedding = nn.Embedding(num_embeddings, self.embedding_dim) + + self.new_embedding.weight.data[:self.num_old_embeddings] = old_embedding.weight.data.clone() + + if freeze_old: + self.new_embedding.weight.register_hook(self._hook) + + def _hook(self, grad): + grad[:self.num_old_embeddings] = 0 + return grad + + def forward(self, x): + return self.new_embedding(x) + + +class PartiallyFrozenEmbedding(torch.autograd.Function): + @staticmethod + def forward(ctx, old_embedding_weight, new_embedding_weight, num_old_embeddings, embedding_dim, x): + old_x_mask = x < num_old_embeddings + new_x_mask = x >= num_old_embeddings + + old_x = x[old_x_mask] + new_x = x[new_x_mask] - num_old_embeddings + + # set up an output vector, same length as x and same embedding dimension + num_outputs = x.shape[0] + output = torch.empty((num_outputs, embedding_dim), dtype=old_embedding_weight.dtype, device=old_embedding_weight.device) + + output[old_x_mask] = torch.nn.functional.embedding(old_x, old_embedding_weight) + output[new_x_mask] = torch.nn.functional.embedding(new_x, new_embedding_weight) + + ctx.save_for_backward(old_embedding_weight, new_embedding_weight, old_x, new_x, old_x_mask, new_x_mask) + ctx.num_old_embeddings = num_old_embeddings + + return output + + @staticmethod + def backward(ctx, grad_output): + old_embedding_weight, new_embedding_weight, old_x, new_x, old_x_mask, new_x_mask = ctx.saved_tensors + + grad_old_embedding = torch.zeros_like(old_embedding_weight) + grad_new_embedding = torch.zeros_like(new_embedding_weight) + + grad_old_embedding.index_add_(0, old_x, grad_output[old_x_mask]) + grad_new_embedding.index_add_(0, new_x, grad_output[new_x_mask]) + + return None, grad_new_embedding, None, None, None, None + +class EmbeddingWrapperFunction(nn.Module): + def __init__(self, old_embedding: nn.Embedding, num_embeddings: int): + super().__init__() + self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) + self.embedding_dim = old_embedding.embedding_dim + self.old_embedding.weight.data = old_embedding.weight.data.clone() + self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, self.embedding_dim) + self.num_old_embeddings = old_embedding.num_embeddings + + self.old_embedding.weight.requires_grad = True + self.new_embedding.weight.requires_grad = True + + def forward(self, x): + return PartiallyFrozenEmbedding.apply(self.old_embedding.weight, self.new_embedding.weight, self.num_old_embeddings, self.embedding_dim, x) + +if __name__=="__main__": + import torch + from torch import nn + from torch.optim import SGD + + # Step 1: Create an instance of nn.Embedding as the old embedding and initialize its weights randomly. + old_embedding = nn.Embedding(10, 32) + old_embedding.weight.data.normal_() + + # Step 2: Create an instance of EmbeddingWrapper3, passing the old embedding and the desired number of embeddings to its constructor. + num_embeddings = 15 + embedding_wrapper = EmbeddingWrapperFunction(old_embedding, num_embeddings) + + # Step 3: Create a linear layer on top of the EmbeddingWrapper3. + linear_layer = nn.Linear(embedding_wrapper.embedding_dim, 1) + + # Step 4: Create some synthetic training data. + x = torch.randint(num_embeddings, (100,)) # 100 random integers between 0 and num_embeddings + y = torch.randint(2, (100,)).float() # 100 random 0s and 1s + + # Step 5: Train the model using a simple training loop. + optimizer = SGD(list(embedding_wrapper.parameters()) + list(linear_layer.parameters()), lr=0.5) + criterion = nn.BCEWithLogitsLoss() + + # store the old embedding weights + old_embedding_weights = old_embedding.weight.data.clone() + new_embedding_weights = embedding_wrapper.new_embedding.weight.data.clone() + + for epoch in range(10): + optimizer.zero_grad() + embeddings = embedding_wrapper(x) + logits = linear_layer(embeddings).squeeze() + loss = criterion(logits, y) + loss.backward() + optimizer.step() + print(f"Epoch {epoch + 1}, Loss: {loss.item()}") + + # check that the old embedding weights are the same + print("test passed: ", torch.allclose(old_embedding_weights, embedding_wrapper.old_embedding.weight.data)) + + # for hook embedding wrapper + #print("test passed: ", torch.allclose(old_embedding_weights, embedding_wrapper.new_embedding.weight.data[:embedding_wrapper.num_old_embeddings])) + + # check that the new embedding weights are different + print("test passed: ", not torch.allclose(new_embedding_weights, embedding_wrapper.new_embedding.weight.data)) + + print(old_embedding_weights[0,0], embedding_wrapper.old_embedding.weight.data[0,0]) + print(new_embedding_weights[0,0], embedding_wrapper.new_embedding.weight.data[0,0]) \ No newline at end of file diff --git a/notebooks/surgery_experiments.ipynb b/notebooks/surgery_experiments.ipynb new file mode 100644 index 0000000..340fbc5 --- /dev/null +++ b/notebooks/surgery_experiments.ipynb @@ -0,0 +1,3442 @@ +{ + "cells": [ + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-04-15T09:24:22.730568Z", + "start_time": "2024-04-15T09:24:21.837708Z" + } + }, + "source": [ + "from transformers import AutoTokenizer, AutoModelForCausalLM\n", + "from typing import List\n", + "import transformers\n", + "from tokenizers import AddedToken\n", + "import torch\n", + "from torch import nn" + ], + "outputs": [], + "execution_count": 1 + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-04-15T09:24:22.741368Z", + "start_time": "2024-04-15T09:24:22.731652Z" + } + }, + "source": [ + "class LinearWrapper(nn.Module):\n", + " def __init__(self, layer: nn.Linear, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.layer = layer\n", + " self.num_embeddings = num_embeddings\n", + " self.n_new_tokens = num_embeddings - layer.out_features\n", + " self.new_embeddings = nn.Linear(layer.in_features, self.n_new_tokens, bias=False)\n", + " self.new_embeddings.to(layer.weight.device).to(layer.weight.dtype)\n", + " if freeze_old:\n", + " for param in self.layer.parameters():\n", + " param.requires_grad = False\n", + " \n", + " def forward(self, x):\n", + " z1 = self.layer(x)\n", + " z2 = self.new_embeddings(x)\n", + " return torch.cat([z1, z2], dim=-1)\n", + "\n", + "class EmbeddingWrapper(nn.Module):\n", + " def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.embedding_dim = embedding.embedding_dim\n", + " self.num_embeddings = num_embeddings\n", + " self.n_new_tokens = num_embeddings - embedding.num_embeddings\n", + "\n", + " # inspired from here \n", + " # https://github.com/huggingface/transformers/blob/185463784e0a0b4cd7974ce5bded7a52ae170f6d/src/transformers/modeling_utils.py#L2026\n", + " self.old_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim)\n", + " self.old_embeddings.weight.data = torch.ones_like(self.old_embeddings.weight.data)*0#1e-7\n", + " self.old_embeddings.weight.data[:embedding.num_embeddings] = embedding.weight.data\n", + " self.old_embeddings.to(embedding.weight.device).to(embedding.weight.dtype)\n", + " self.new_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim)\n", + " self.new_embeddings.weight.data[:embedding.num_embeddings] = torch.ones_like(embedding.weight.data)*0#1e-7\n", + " self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype)\n", + " if freeze_old:\n", + " for param in self.old_embeddings.parameters():\n", + " param.requires_grad = False\n", + "\n", + " \n", + " def forward(self, x):\n", + " self.old_embeddings(x) + self.new_embeddings(x)\n", + "\n", + "class EmbeddingWrapper2(nn.Module):\n", + " def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.old_embeddings = embedding\n", + " self.num_embeddings = num_embeddings\n", + " self.embedding_dim = embedding.embedding_dim\n", + " self.n_new_tokens = num_embeddings - embedding.num_embeddings\n", + " self.new_embeddings = nn.Embedding(self.n_new_tokens, self.embedding_dim)\n", + " self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype)\n", + " if freeze_old:\n", + " for param in self.old_embeddings.parameters():\n", + " param.requires_grad = False\n", + "\n", + " def forward(self, x):\n", + " with torch.amp.autocast(\"cuda\"):\n", + " mask_small = x < self.old_embeddings.num_embeddings\n", + " mask_large = x >= self.old_embeddings.num_embeddings\n", + " small_ids = x[mask_small]\n", + " large_ids = x[mask_large]\n", + " small_embs = self.old_embeddings(small_ids)\n", + " large_embs = self.new_embeddings(large_ids % self.old_embeddings.num_embeddings)\n", + " # assuming batch x seq x emb\n", + " y = torch.empty((x.shape[0], x.shape[1], small_embs.shape[-1]), dtype=large_embs.dtype, device=large_embs.device)\n", + " y[mask_small] = small_embs\n", + " y[mask_large] = large_embs\n", + " return y \n", + " \n", + "class EmbeddingWrapper3(nn.Module):\n", + " def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.embedding = nn.Embedding(num_embeddings, old_embedding.embedding_dim)\n", + " self.embedding.to(old_embedding.weight.device).to(old_embedding.weight.dtype)\n", + " self.embedding.weight.data[:old_embedding.num_embeddings] = old_embedding.weight.data\n", + " \n", + " # ToDo: I don't think this has any effect\n", + " if freeze_old:\n", + " for param in old_embedding.parameters():\n", + " param.requires_grad = False\n", + " \n", + " # register hook to update gradients\n", + " self.num_old_embeddings = old_embedding.num_embeddings\n", + " self.embedding.register_backward_hook(self._hook)\n", + " \n", + " def _hook(self, grad):\n", + " # zero out the gradient for the embedding weights coming from the old embedding\n", + " grad[:self.num_old_embeddings].zero_()\n", + " \n", + " def forward(self, x):\n", + " return self.embedding(x)\n", + "\n", + "class Llama2EmbeddingSurgeon():\n", + " def __init__(self, llama, extended_tokenizer):\n", + " self.llama = llama \n", + " self.extended_tokenizer = extended_tokenizer\n", + " self.extended_embedding = EmbeddingWrapper2(llama.model.embed_tokens, len(extended_tokenizer))\n", + " self.extended_unembedding = LinearWrapper(llama.lm_head, len(extended_tokenizer))\n", + " \n", + " def get_surgeried_model(self):\n", + " self.backup_embed_tokens = self.llama.model.embed_tokens\n", + " self.backup_lm_head = self.llama.lm_head\n", + " self.llama.model.embed_tokens = self.extended_embedding\n", + " self.llama.lm_head = self.extended_unembedding\n", + " self.llama.config.vocab_size = len(self.extended_tokenizer)\n", + " return self.llama\n", + " \n", + " def save(self, llama, path):\n", + " # check if llama is surgeried\n", + " assert llama.model.embed_tokens == self.extended_embedding\n", + " assert llama.lm_head == self.extended_unembedding\n", + " backup_embed_tokens = self.llama.model.embed_tokens\n", + " backup_lm_head = self.llama.lm_head\n", + " self.llama.model.embed_tokens = self.backup_embed_tokens\n", + " self.llama.lm_head = self.backup_lm_head\n", + " self.llama.save_pretrained(path)\n", + " self.llama.model.embed_tokens = backup_embed_tokens\n", + " self.llama.lm_head = backup_lm_head\n", + " self.extended_tokenizer.save_pretrained(path)\n", + " torch.save(self.extended_embedding.state_dict(), f\"{path}/extended_embedding.pt\")\n", + " torch.save(self.extended_unembedding.state_dict(), f\"{path}/extended_unembedding.pt\") \n", + " \n", + " @classmethod\n", + " def load(cls, path):\n", + " extended_embedding_dict = torch.load(f\"{path}/extended_embedding.pt\")\n", + " extended_unembedding_dict = torch.load(f\"{path}/extended_unembedding.pt\")\n", + " llama = AutoModelForCausalLM.from_pretrained(path)\n", + " tokenizer = AutoTokenizer.from_pretrained(path)\n", + " surgeon = cls(llama, tokenizer)\n", + " surgeon.extended_embedding.load_state_dict(extended_embedding_dict)\n", + " surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict)\n", + " return surgeon\n", + "\n", + "class PeftModelEmbeddingSurgeon():\n", + " def __init__(self, peft_model, extended_tokenizer):\n", + " try:\n", + " self.llama = peft_model.base_model.model\n", + " except AttributeError:\n", + " self.llama = peft_model\n", + " self.peft_model = peft_model\n", + " self.extended_tokenizer = extended_tokenizer\n", + " self.extended_embedding = EmbeddingWrapper2(self.llama.model.embed_tokens, len(extended_tokenizer))\n", + " self.extended_unembedding = LinearWrapper(self.llama.lm_head, len(extended_tokenizer))\n", + " \n", + " def get_surgeried_model(self):\n", + " self.backup_embed_tokens = self.llama.model.embed_tokens\n", + " self.backup_lm_head = self.llama.lm_head\n", + " self.llama.model.embed_tokens = self.extended_embedding\n", + " self.llama.lm_head = self.extended_unembedding\n", + " self.llama.config.vocab_size = len(self.extended_tokenizer)\n", + " return self.peft_model\n", + "\n", + " def save(self, peft_model, path): \n", + " self.llama.model.embed_tokens = self.backup_embed_tokens\n", + " self.llama.lm_head = self.backup_lm_head\n", + " self.peft_model.save_pretrained(path)\n", + " self.extended_tokenizer.save_pretrained(path)\n", + " torch.save(self.extended_embedding.state_dict(), f\"{path}/extended_embedding.pt\")\n", + " torch.save(self.extended_unembedding.state_dict(), f\"{path}/extended_unembedding.pt\") \n", + " \n", + " @classmethod\n", + " def load(cls, path, **kwargs):\n", + " extended_embedding_dict = torch.load(f\"{path}/extended_embedding.pt\")\n", + " extended_unembedding_dict = torch.load(f\"{path}/extended_unembedding.pt\")\n", + " peft_model = AutoModelForCausalLM.from_pretrained(path, **kwargs)\n", + " tokenizer = AutoTokenizer.from_pretrained(path)\n", + " surgeon = cls(peft_model, tokenizer)\n", + " surgeon.extended_embedding.load_state_dict(extended_embedding_dict)\n", + " surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict)\n", + " return surgeon" + ], + "outputs": [], + "execution_count": 2 + }, + { + "cell_type": "code", + "metadata": { + "jupyter": { + "is_executing": true + }, + "ExecuteTime": { + "start_time": "2024-04-15T09:37:15.076202Z" + } + }, + "source": [ + "#model_name_or_path = 'gpt2'\n", + "model_name_or_path = \"microsoft/phi-1_5\"\n", + "# load model in torch.bfloat16\n", + "model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map='auto', torch_dtype=torch.float16)\n", + "tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)" + ], + "outputs": [ + { + "data": { + "text/plain": [ + "pytorch_model.bin: 0%| | 0.00/2.84G [00:00\", \n", + " single_word=False, \n", + " lstrip=True, \n", + " rstrip=True)\n", + " #special=True, \n", + " #normalized=False)\n", + "test_token1 = AddedToken(\">>24\", \n", + " single_word=False, \n", + " lstrip=True, \n", + " rstrip=True)\n", + "test_token2 = AddedToken(\">>72\", \n", + " single_word=False, \n", + " lstrip=True, \n", + " rstrip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LlamaTokenizerFast(name_or_path='/dlabscratch1/public/llm_weights/llama2_hf/Llama-2-7b-hf/', vocab_size=32000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '', 'eos_token': '', 'unk_token': ''}, clean_up_tokenization_spaces=False), added_tokens_decoder={\n", + "\t0: AddedToken(\"\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n", + "\t1: AddedToken(\"\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n", + "\t2: AddedToken(\"\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n", + "\t32000: AddedToken(\"<|pause|>\", rstrip=True, lstrip=True, single_word=False, normalized=False, special=True),\n", + "\t32001: AddedToken(\">>24\", rstrip=True, lstrip=True, single_word=False, normalized=False, special=True),\n", + "\t32002: AddedToken(\">>72\", rstrip=True, lstrip=True, single_word=False, normalized=False, special=True),\n", + "}\n", + "32000\n" + ] + } + ], + "source": [ + "tokenizer.add_tokens([pause_token, test_token1, test_token2], special_tokens=True)\n", + "print(tokenizer)\n", + "# get idx of pause otken\n", + "pause_token_id = tokenizer.convert_tokens_to_ids(\"<|pause|>\")\n", + "print(pause_token_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PeftModelForCausalLM(\n", + " (base_model): LoraModel(\n", + " (model): LlamaForCausalLM(\n", + " (model): LlamaModel(\n", + " (embed_tokens): EmbeddingWrapper2(\n", + " (old_embeddings): Embedding(32000, 4096)\n", + " (new_embeddings): Embedding(3, 4096)\n", + " )\n", + " (layers): ModuleList(\n", + " (0-31): 32 x LlamaDecoderLayer(\n", + " (self_attn): LlamaSdpaAttention(\n", + " (q_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (k_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (v_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (o_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (rotary_emb): LlamaRotaryEmbedding()\n", + " )\n", + " (mlp): LlamaMLP(\n", + " (gate_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (up_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (down_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=11008, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (act_fn): SiLU()\n", + " )\n", + " (input_layernorm): LlamaRMSNorm()\n", + " (post_attention_layernorm): LlamaRMSNorm()\n", + " )\n", + " )\n", + " (norm): LlamaRMSNorm()\n", + " )\n", + " (lm_head): LinearWrapper(\n", + " (layer): Linear(in_features=4096, out_features=32000, bias=False)\n", + " (new_embeddings): Linear(in_features=4096, out_features=3, bias=False)\n", + " )\n", + " )\n", + " )\n", + ")\n" + ] + } + ], + "source": [ + "## conventionally you'd do this like this:\n", + "#model.resize_token_embeddings(len(tokenizer))\n", + " \n", + "## ours\n", + "surgeon = PeftModelEmbeddingSurgeon(model, tokenizer)\n", + "model = surgeon.get_surgeried_model()\n", + "print(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "base_model.model.model.embed_tokens.new_embeddings.weight torch.Size([3, 4096])\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.lm_head.new_embeddings.weight torch.Size([3, 4096])\n" + ] + } + ], + "source": [ + "for k, p in model.named_parameters():\n", + " if p.requires_grad:\n", + " print(k, p.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "base_model.model.model.embed_tokens.old_embeddings.weight cuda:0\n", + "base_model.model.model.embed_tokens.new_embeddings.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.0.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.1.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.2.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.3.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.4.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.5.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.6.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.7.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.8.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.9.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.10.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.11.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.12.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.13.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.14.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.15.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.16.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.16.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.17.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.18.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.19.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.20.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.21.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.22.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.23.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.24.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.25.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.26.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.27.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.28.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.29.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.30.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.31.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.norm.weight cuda:1\n", + "base_model.model.lm_head.layer.weight cuda:1\n", + "base_model.model.lm_head.new_embeddings.weight cuda:1\n" + ] + } + ], + "source": [ + "for name, param in model.named_parameters():\n", + " print(name, param.device)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "w/o 1 w 1 \n", + "w/o 450 The w 450 The\n", + "skipping pause token...\n", + "w/o 4996 quick w 4996 quick\n", + "skipping pause token...\n", + "w/o 17354 brown w 17354 brown\n", + "skipping pause token...\n", + "w/o 1701 fo w 1701 fo\n", + "w/o 29916 x w 29916 x\n", + "w/o 432 j w 432 j\n", + "w/o 17204 umps w 17204 umps\n", + "w/o 975 over w 975 over\n", + "w/o 278 the w 278 the\n", + "w/o 17366 lazy w 17366 lazy\n", + "w/o 11203 dog w 11203 dog\n" + ] + } + ], + "source": [ + "# test\n", + "toks1 = tokenizer.encode('The<|pause|> quick<|pause|> brown <|pause|> fox jumps over the lazy dog', return_tensors='pt')\n", + "toks2 = tokenizer.encode('The quick brown fox jumps over the lazy dog', return_tensors='pt')\n", + "idx2 = 0\n", + "for idx1 in range(len(toks1[0])):\n", + " if toks1[0, idx1].item() != pause_token_id:\n", + " print('w/o', toks2[0, idx2].item(), tokenizer.decode([toks2[0, idx2]]), 'w', toks1[0, idx1].item(), tokenizer.decode([toks1[0, idx1]]))\n", + " assert toks2[0, idx2] == toks1[0, idx1]\n", + " idx2 += 1\n", + " else:\n", + " print('skipping pause token...')" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dlabdata1/wendler/.rlllm/lib/python3.11/site-packages/transformers/generation/utils.py:1460: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "out = model.generate(tokenizer.encode('The<|pause|> quick<|pause|> brown <|pause|> fox jumps over the lazy dog', return_tensors='pt'), max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'The quick brown fox jumps over the lazy dog only one\\nThe lazy fox jumps over the lazy dog only one The lazy fox jumps over the lazy dog only one The lazy fox jumps over the'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tokenizer.decode(out[0], skip_special_tokens=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "' The<|pause|> quick<|pause|> brown<|pause|> fox jumps over the lazy dog only one\\nThe lazy fox jumps over the lazy dog only one The lazy fox jumps over the lazy dog only one The lazy fox jumps over the'" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tokenizer.decode(out[0], skip_special_tokens=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test training" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from datasets import load_dataset\n", + "dataset = load_dataset(\"gsm8k\", \"main\", split = \"train\")\n", + "alpaca_prompt = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n", + "\n", + "### Instruction:\n", + "{}\n", + "\n", + "### Input:\n", + "{}\n", + "\n", + "### Response:\n", + "{}\"\"\"\n", + "\n", + "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n", + "def formatting_prompts_func(examples):\n", + " instructions = len(examples[\"question\"])*[\"Solve the math problem using a eval tool. The command eval[[expr]] allows you to evaluate an expression.\"]\n", + " inputs = examples[\"question\"]\n", + " outputs = examples[\"answer\"]\n", + " texts = []\n", + " for instruction, input, output in zip(instructions, inputs, outputs):\n", + " # Must add EOS_TOKEN, otherwise your generation will go on forever!\n", + " text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN\n", + " texts.append(text)\n", + " #print(texts)\n", + " return { \"text\" : texts, }\n", + "pass\n", + "\n", + "dataset = dataset.map(formatting_prompts_func, batched = True)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "PeftModelForCausalLM(\n", + " (base_model): LoraModel(\n", + " (model): LlamaForCausalLM(\n", + " (model): LlamaModel(\n", + " (embed_tokens): EmbeddingWrapper2(\n", + " (old_embeddings): Embedding(32000, 4096)\n", + " (new_embeddings): Embedding(3, 4096)\n", + " )\n", + " (layers): ModuleList(\n", + " (0-31): 32 x LlamaDecoderLayer(\n", + " (self_attn): LlamaSdpaAttention(\n", + " (q_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (k_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (v_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (o_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (rotary_emb): LlamaRotaryEmbedding()\n", + " )\n", + " (mlp): LlamaMLP(\n", + " (gate_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (up_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (down_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=11008, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (act_fn): SiLU()\n", + " )\n", + " (input_layernorm): LlamaRMSNorm()\n", + " (post_attention_layernorm): LlamaRMSNorm()\n", + " )\n", + " )\n", + " (norm): LlamaRMSNorm()\n", + " )\n", + " (lm_head): LinearWrapper(\n", + " (layer): Linear(in_features=4096, out_features=32000, bias=False)\n", + " (new_embeddings): Linear(in_features=4096, out_features=3, bias=False)\n", + " )\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "w_before = model.base_model.model.model.embed_tokens.new_embeddings.weight.detach().cpu().clone()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dlabdata1/wendler/.rlllm/lib/python3.11/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead: \n", + "dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)\n", + " warnings.warn(\n", + "Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n", + "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n", + "\u001B[34m\u001B[1mwandb\u001B[0m: Currently logged in as: \u001B[33mchrisxx\u001B[0m. Use \u001B[1m`wandb login --relogin`\u001B[0m to force relogin\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.16.6" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /dlabdata1/wendler/code/PauseToken/notebooks/wandb/run-20240412_112353-srjkuann" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run copper-sea-36 to Weights & Biases (docs)
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/chrisxx/huggingface" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/chrisxx/huggingface/runs/srjkuann" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [10/10 00:31, Epoch 0/1]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
StepTraining Loss
11.517500
21.428600
31.369000
41.747600
51.629700
61.517800
71.584500
82.080300
91.817600
101.349400

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from trl import SFTTrainer\n", + "from transformers import TrainingArguments\n", + "import os\n", + "\n", + "tokenizer.pad_token = tokenizer.eos_token\n", + "\n", + "# prepare model for training... \n", + "dtype = model.base_model.model.model.embed_tokens.parameters().__next__().dtype\n", + "model.base_model.model.model.embed_tokens.to(torch.float32)\n", + "model.base_model.model.lm_head.to(torch.float32)\n", + "for param in model.parameters():\n", + " if param.requires_grad:\n", + " param.data = param.data.to(torch.float32)\n", + "\n", + "\n", + "trainer = SFTTrainer(\n", + " model = model,\n", + " tokenizer = tokenizer,\n", + " train_dataset = dataset,\n", + " eval_dataset = None,\n", + " dataset_text_field = \"text\",\n", + " max_seq_length = 1024,\n", + " dataset_num_proc = 2,\n", + " packing = False, # Can make training 5x faster for short sequences.\n", + " args = TrainingArguments(\n", + " gradient_checkpointing=False,\n", + " per_device_train_batch_size = 1,\n", + " gradient_accumulation_steps = 1,\n", + " warmup_steps = 0,\n", + " max_steps = 10,\n", + " #num_train_epochs = 1,\n", + " learning_rate = 2e-3,\n", + " fp16 = not torch.cuda.is_bf16_supported(),\n", + " bf16 = torch.cuda.is_bf16_supported(),\n", + " logging_steps = 1,\n", + " optim = \"adamw_8bit\",\n", + " weight_decay = 0.01,\n", + " #lr_scheduler_type = \"linear\",\n", + " seed = 3407,\n", + " output_dir = \"outputs\",\n", + " ),\n", + ")\n", + "\n", + "with torch.cuda.amp.autocast():\n", + " trainer_stats = trainer.train()\n", + "\n", + "# prepare model for inference \n", + "model.base_model.model.model.embed_tokens.to(dtype)\n", + "model.base_model.model.lm_head.to(dtype)\n", + "\n", + "for param in model.parameters():\n", + " if param.requires_grad:\n", + " param.data = param.data.to(torch.float16)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(0.2426, dtype=torch.float16)\n" + ] + } + ], + "source": [ + "from matplotlib import pyplot as plt\n", + "w_after = model.base_model.model.model.embed_tokens.new_embeddings.weight.detach().cpu().clone()\n", + "print(((w_before - w_after)**2).sum())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test saving and loading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dlabdata1/wendler/.rlllm/lib/python3.11/site-packages/peft/utils/save_and_load.py:154: UserWarning: Could not find a config file in /dlabscratch1/public/llm_weights/llama2_hf/Llama-2-7b-hf/ - will assume that the vocabulary was not modified.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "surgeon.save(model, '/dlabscratch1/tmp/peft_test')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "PeftModelForCausalLM(\n", + " (base_model): LoraModel(\n", + " (model): LlamaForCausalLM(\n", + " (model): LlamaModel(\n", + " (embed_tokens): Embedding(32000, 4096)\n", + " (layers): ModuleList(\n", + " (0-31): 32 x LlamaDecoderLayer(\n", + " (self_attn): LlamaSdpaAttention(\n", + " (q_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (k_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (v_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (o_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (rotary_emb): LlamaRotaryEmbedding()\n", + " )\n", + " (mlp): LlamaMLP(\n", + " (gate_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (up_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (down_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=11008, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (act_fn): SiLU()\n", + " )\n", + " (input_layernorm): LlamaRMSNorm()\n", + " (post_attention_layernorm): LlamaRMSNorm()\n", + " )\n", + " )\n", + " (norm): LlamaRMSNorm()\n", + " )\n", + " (lm_head): Linear(in_features=4096, out_features=32000, bias=False)\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import gc\n", + "model.cpu()\n", + "gc.collect()\n", + "for i in range(torch.cuda.device_count()):\n", + " torch.cuda.set_device(i) \n", + " torch.cuda.empty_cache() " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "96dc671298af4c36b29b86cc0219d4d1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/2 [00:00