From c4f53210003288a331749a76288dccaed1fd859d Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 11:55:48 +0200 Subject: [PATCH 1/9] adding a small experiment script to play with different forms of weight-tying / grad-zeroing / etc --- notebooks/embedding_wrapper.py | 160 +++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) create mode 100644 notebooks/embedding_wrapper.py diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py new file mode 100644 index 0000000..6e71951 --- /dev/null +++ b/notebooks/embedding_wrapper.py @@ -0,0 +1,160 @@ +from transformers import AutoTokenizer, AutoModelForCausalLM +from typing import List +import transformers +from tokenizers import AddedToken +import torch +from torch import nn + + +# write torch.nn.Modules that can be used as a wrapper around huggingface models and change the embedding layer +# and the unembedding layer +class LinearWrapper(nn.Module): + def __init__(self, layer: nn.Linear, num_embeddings: int, freeze_old=True): + super().__init__() + self.layer = layer + self.num_embeddings = num_embeddings + self.n_new_tokens = num_embeddings - layer.out_features + self.new_embeddings = nn.Linear(layer.in_features, self.n_new_tokens, bias=False) + self.new_embeddings.to(layer.weight.device).to(layer.weight.dtype) + if freeze_old: + for param in self.layer.parameters(): + param.requires_grad = False + + def forward(self, x): + z1 = self.layer(x) + z2 = self.new_embeddings(x) + return torch.cat([z1, z2], dim=-1) + + +class EmbeddingWrapper(nn.Module): + def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True): + super().__init__() + self.embedding_dim = embedding.embedding_dim + self.num_embeddings = num_embeddings + self.n_new_tokens = num_embeddings - embedding.num_embeddings + + # inspired from here + # https://github.com/huggingface/transformers/blob/185463784e0a0b4cd7974ce5bded7a52ae170f6d/src/transformers/modeling_utils.py#L2026 + self.old_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim) + self.old_embeddings.weight.data = torch.ones_like(self.old_embeddings.weight.data) * 0 # 1e-7 + self.old_embeddings.weight.data[:embedding.num_embeddings] = embedding.weight.data + self.old_embeddings.to(embedding.weight.device).to(embedding.weight.dtype) + self.new_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim) + self.new_embeddings.weight.data[:embedding.num_embeddings] = torch.ones_like(embedding.weight.data) * 0 # 1e-7 + self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype) + if freeze_old: + for param in self.old_embeddings.parameters(): + param.requires_grad = False + + def forward(self, x): + self.old_embeddings(x) + self.new_embeddings(x) + + +class EmbeddingWrapper2(nn.Module): + def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True): + super().__init__() + self.old_embeddings = embedding + self.num_embeddings = num_embeddings + self.embedding_dim = embedding.embedding_dim + self.n_new_tokens = num_embeddings - embedding.num_embeddings + self.new_embeddings = nn.Embedding(self.n_new_tokens, self.embedding_dim) + self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype) + if freeze_old: + for param in self.old_embeddings.parameters(): + param.requires_grad = False + + def forward(self, x): + with torch.amp.autocast("cuda"): + mask_small = x < self.old_embeddings.num_embeddings + mask_large = x >= self.old_embeddings.num_embeddings + small_ids = x[mask_small] + large_ids = x[mask_large] + small_embs = self.old_embeddings(small_ids) + large_embs = self.new_embeddings(large_ids % self.old_embeddings.num_embeddings) + # assuming batch x seq x emb + y = torch.empty((x.shape[0], x.shape[1], small_embs.shape[-1]), dtype=large_embs.dtype, + device=large_embs.device) + y[mask_small] = small_embs + y[mask_large] = large_embs + return y + + +class Llama2EmbeddingSurgeon(): + def __init__(self, llama, extended_tokenizer): + self.llama = llama + self.extended_tokenizer = extended_tokenizer + self.extended_embedding = EmbeddingWrapper2(llama.model.embed_tokens, len(extended_tokenizer)) + self.extended_unembedding = LinearWrapper(llama.lm_head, len(extended_tokenizer)) + + def get_surgeried_model(self): + self.backup_embed_tokens = self.llama.model.embed_tokens + self.backup_lm_head = self.llama.lm_head + self.llama.model.embed_tokens = self.extended_embedding + self.llama.lm_head = self.extended_unembedding + self.llama.config.vocab_size = len(self.extended_tokenizer) + return self.llama + + def save(self, llama, path): + # check if llama is surgeried + assert llama.model.embed_tokens == self.extended_embedding + assert llama.lm_head == self.extended_unembedding + backup_embed_tokens = self.llama.model.embed_tokens + backup_lm_head = self.llama.lm_head + self.llama.model.embed_tokens = self.backup_embed_tokens + self.llama.lm_head = self.backup_lm_head + self.llama.save_pretrained(path) + self.llama.model.embed_tokens = backup_embed_tokens + self.llama.lm_head = backup_lm_head + self.extended_tokenizer.save_pretrained(path) + torch.save(self.extended_embedding.state_dict(), f"{path}/extended_embedding.pt") + torch.save(self.extended_unembedding.state_dict(), f"{path}/extended_unembedding.pt") + + @classmethod + def load(cls, path): + extended_embedding_dict = torch.load(f"{path}/extended_embedding.pt") + extended_unembedding_dict = torch.load(f"{path}/extended_unembedding.pt") + llama = AutoModelForCausalLM.from_pretrained(path) + tokenizer = AutoTokenizer.from_pretrained(path) + surgeon = cls(llama, tokenizer) + surgeon.extended_embedding.load_state_dict(extended_embedding_dict) + surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict) + return surgeon + + +class PeftModelEmbeddingSurgeon(): + def __init__(self, peft_model, extended_tokenizer): + try: + self.llama = peft_model.base_model.model + except AttributeError: + self.llama = peft_model + self.peft_model = peft_model + self.extended_tokenizer = extended_tokenizer + self.extended_embedding = EmbeddingWrapper2(self.llama.model.embed_tokens, len(extended_tokenizer)) + self.extended_unembedding = LinearWrapper(self.llama.lm_head, len(extended_tokenizer)) + + def get_surgeried_model(self): + self.backup_embed_tokens = self.llama.model.embed_tokens + self.backup_lm_head = self.llama.lm_head + self.llama.model.embed_tokens = self.extended_embedding + self.llama.lm_head = self.extended_unembedding + self.llama.config.vocab_size = len(self.extended_tokenizer) + return self.peft_model + + def save(self, peft_model, path): + self.llama.model.embed_tokens = self.backup_embed_tokens + self.llama.lm_head = self.backup_lm_head + self.peft_model.save_pretrained(path) + self.extended_tokenizer.save_pretrained(path) + torch.save(self.extended_embedding.state_dict(), f"{path}/extended_embedding.pt") + torch.save(self.extended_unembedding.state_dict(), f"{path}/extended_unembedding.pt") + + @classmethod + def load(cls, path, **kwargs): + extended_embedding_dict = torch.load(f"{path}/extended_embedding.pt") + extended_unembedding_dict = torch.load(f"{path}/extended_unembedding.pt") + peft_model = AutoModelForCausalLM.from_pretrained(path, **kwargs) + tokenizer = AutoTokenizer.from_pretrained(path) + surgeon = cls(peft_model, tokenizer) + surgeon.extended_embedding.load_state_dict(extended_embedding_dict) + surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict) + return surgeon \ No newline at end of file From dbe88c7eaf9a5ec0502ac0cc0a0c04e81719c3b7 Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 11:58:14 +0200 Subject: [PATCH 2/9] wip: test notebook --- notebooks/surgery_experiments.ipynb | 3442 +++++++++++++++++++++++++++ 1 file changed, 3442 insertions(+) create mode 100644 notebooks/surgery_experiments.ipynb diff --git a/notebooks/surgery_experiments.ipynb b/notebooks/surgery_experiments.ipynb new file mode 100644 index 0000000..340fbc5 --- /dev/null +++ b/notebooks/surgery_experiments.ipynb @@ -0,0 +1,3442 @@ +{ + "cells": [ + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-04-15T09:24:22.730568Z", + "start_time": "2024-04-15T09:24:21.837708Z" + } + }, + "source": [ + "from transformers import AutoTokenizer, AutoModelForCausalLM\n", + "from typing import List\n", + "import transformers\n", + "from tokenizers import AddedToken\n", + "import torch\n", + "from torch import nn" + ], + "outputs": [], + "execution_count": 1 + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-04-15T09:24:22.741368Z", + "start_time": "2024-04-15T09:24:22.731652Z" + } + }, + "source": [ + "class LinearWrapper(nn.Module):\n", + " def __init__(self, layer: nn.Linear, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.layer = layer\n", + " self.num_embeddings = num_embeddings\n", + " self.n_new_tokens = num_embeddings - layer.out_features\n", + " self.new_embeddings = nn.Linear(layer.in_features, self.n_new_tokens, bias=False)\n", + " self.new_embeddings.to(layer.weight.device).to(layer.weight.dtype)\n", + " if freeze_old:\n", + " for param in self.layer.parameters():\n", + " param.requires_grad = False\n", + " \n", + " def forward(self, x):\n", + " z1 = self.layer(x)\n", + " z2 = self.new_embeddings(x)\n", + " return torch.cat([z1, z2], dim=-1)\n", + "\n", + "class EmbeddingWrapper(nn.Module):\n", + " def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.embedding_dim = embedding.embedding_dim\n", + " self.num_embeddings = num_embeddings\n", + " self.n_new_tokens = num_embeddings - embedding.num_embeddings\n", + "\n", + " # inspired from here \n", + " # https://github.com/huggingface/transformers/blob/185463784e0a0b4cd7974ce5bded7a52ae170f6d/src/transformers/modeling_utils.py#L2026\n", + " self.old_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim)\n", + " self.old_embeddings.weight.data = torch.ones_like(self.old_embeddings.weight.data)*0#1e-7\n", + " self.old_embeddings.weight.data[:embedding.num_embeddings] = embedding.weight.data\n", + " self.old_embeddings.to(embedding.weight.device).to(embedding.weight.dtype)\n", + " self.new_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim)\n", + " self.new_embeddings.weight.data[:embedding.num_embeddings] = torch.ones_like(embedding.weight.data)*0#1e-7\n", + " self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype)\n", + " if freeze_old:\n", + " for param in self.old_embeddings.parameters():\n", + " param.requires_grad = False\n", + "\n", + " \n", + " def forward(self, x):\n", + " self.old_embeddings(x) + self.new_embeddings(x)\n", + "\n", + "class EmbeddingWrapper2(nn.Module):\n", + " def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.old_embeddings = embedding\n", + " self.num_embeddings = num_embeddings\n", + " self.embedding_dim = embedding.embedding_dim\n", + " self.n_new_tokens = num_embeddings - embedding.num_embeddings\n", + " self.new_embeddings = nn.Embedding(self.n_new_tokens, self.embedding_dim)\n", + " self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype)\n", + " if freeze_old:\n", + " for param in self.old_embeddings.parameters():\n", + " param.requires_grad = False\n", + "\n", + " def forward(self, x):\n", + " with torch.amp.autocast(\"cuda\"):\n", + " mask_small = x < self.old_embeddings.num_embeddings\n", + " mask_large = x >= self.old_embeddings.num_embeddings\n", + " small_ids = x[mask_small]\n", + " large_ids = x[mask_large]\n", + " small_embs = self.old_embeddings(small_ids)\n", + " large_embs = self.new_embeddings(large_ids % self.old_embeddings.num_embeddings)\n", + " # assuming batch x seq x emb\n", + " y = torch.empty((x.shape[0], x.shape[1], small_embs.shape[-1]), dtype=large_embs.dtype, device=large_embs.device)\n", + " y[mask_small] = small_embs\n", + " y[mask_large] = large_embs\n", + " return y \n", + " \n", + "class EmbeddingWrapper3(nn.Module):\n", + " def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True):\n", + " super().__init__()\n", + " self.embedding = nn.Embedding(num_embeddings, old_embedding.embedding_dim)\n", + " self.embedding.to(old_embedding.weight.device).to(old_embedding.weight.dtype)\n", + " self.embedding.weight.data[:old_embedding.num_embeddings] = old_embedding.weight.data\n", + " \n", + " # ToDo: I don't think this has any effect\n", + " if freeze_old:\n", + " for param in old_embedding.parameters():\n", + " param.requires_grad = False\n", + " \n", + " # register hook to update gradients\n", + " self.num_old_embeddings = old_embedding.num_embeddings\n", + " self.embedding.register_backward_hook(self._hook)\n", + " \n", + " def _hook(self, grad):\n", + " # zero out the gradient for the embedding weights coming from the old embedding\n", + " grad[:self.num_old_embeddings].zero_()\n", + " \n", + " def forward(self, x):\n", + " return self.embedding(x)\n", + "\n", + "class Llama2EmbeddingSurgeon():\n", + " def __init__(self, llama, extended_tokenizer):\n", + " self.llama = llama \n", + " self.extended_tokenizer = extended_tokenizer\n", + " self.extended_embedding = EmbeddingWrapper2(llama.model.embed_tokens, len(extended_tokenizer))\n", + " self.extended_unembedding = LinearWrapper(llama.lm_head, len(extended_tokenizer))\n", + " \n", + " def get_surgeried_model(self):\n", + " self.backup_embed_tokens = self.llama.model.embed_tokens\n", + " self.backup_lm_head = self.llama.lm_head\n", + " self.llama.model.embed_tokens = self.extended_embedding\n", + " self.llama.lm_head = self.extended_unembedding\n", + " self.llama.config.vocab_size = len(self.extended_tokenizer)\n", + " return self.llama\n", + " \n", + " def save(self, llama, path):\n", + " # check if llama is surgeried\n", + " assert llama.model.embed_tokens == self.extended_embedding\n", + " assert llama.lm_head == self.extended_unembedding\n", + " backup_embed_tokens = self.llama.model.embed_tokens\n", + " backup_lm_head = self.llama.lm_head\n", + " self.llama.model.embed_tokens = self.backup_embed_tokens\n", + " self.llama.lm_head = self.backup_lm_head\n", + " self.llama.save_pretrained(path)\n", + " self.llama.model.embed_tokens = backup_embed_tokens\n", + " self.llama.lm_head = backup_lm_head\n", + " self.extended_tokenizer.save_pretrained(path)\n", + " torch.save(self.extended_embedding.state_dict(), f\"{path}/extended_embedding.pt\")\n", + " torch.save(self.extended_unembedding.state_dict(), f\"{path}/extended_unembedding.pt\") \n", + " \n", + " @classmethod\n", + " def load(cls, path):\n", + " extended_embedding_dict = torch.load(f\"{path}/extended_embedding.pt\")\n", + " extended_unembedding_dict = torch.load(f\"{path}/extended_unembedding.pt\")\n", + " llama = AutoModelForCausalLM.from_pretrained(path)\n", + " tokenizer = AutoTokenizer.from_pretrained(path)\n", + " surgeon = cls(llama, tokenizer)\n", + " surgeon.extended_embedding.load_state_dict(extended_embedding_dict)\n", + " surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict)\n", + " return surgeon\n", + "\n", + "class PeftModelEmbeddingSurgeon():\n", + " def __init__(self, peft_model, extended_tokenizer):\n", + " try:\n", + " self.llama = peft_model.base_model.model\n", + " except AttributeError:\n", + " self.llama = peft_model\n", + " self.peft_model = peft_model\n", + " self.extended_tokenizer = extended_tokenizer\n", + " self.extended_embedding = EmbeddingWrapper2(self.llama.model.embed_tokens, len(extended_tokenizer))\n", + " self.extended_unembedding = LinearWrapper(self.llama.lm_head, len(extended_tokenizer))\n", + " \n", + " def get_surgeried_model(self):\n", + " self.backup_embed_tokens = self.llama.model.embed_tokens\n", + " self.backup_lm_head = self.llama.lm_head\n", + " self.llama.model.embed_tokens = self.extended_embedding\n", + " self.llama.lm_head = self.extended_unembedding\n", + " self.llama.config.vocab_size = len(self.extended_tokenizer)\n", + " return self.peft_model\n", + "\n", + " def save(self, peft_model, path): \n", + " self.llama.model.embed_tokens = self.backup_embed_tokens\n", + " self.llama.lm_head = self.backup_lm_head\n", + " self.peft_model.save_pretrained(path)\n", + " self.extended_tokenizer.save_pretrained(path)\n", + " torch.save(self.extended_embedding.state_dict(), f\"{path}/extended_embedding.pt\")\n", + " torch.save(self.extended_unembedding.state_dict(), f\"{path}/extended_unembedding.pt\") \n", + " \n", + " @classmethod\n", + " def load(cls, path, **kwargs):\n", + " extended_embedding_dict = torch.load(f\"{path}/extended_embedding.pt\")\n", + " extended_unembedding_dict = torch.load(f\"{path}/extended_unembedding.pt\")\n", + " peft_model = AutoModelForCausalLM.from_pretrained(path, **kwargs)\n", + " tokenizer = AutoTokenizer.from_pretrained(path)\n", + " surgeon = cls(peft_model, tokenizer)\n", + " surgeon.extended_embedding.load_state_dict(extended_embedding_dict)\n", + " surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict)\n", + " return surgeon" + ], + "outputs": [], + "execution_count": 2 + }, + { + "cell_type": "code", + "metadata": { + "jupyter": { + "is_executing": true + }, + "ExecuteTime": { + "start_time": "2024-04-15T09:37:15.076202Z" + } + }, + "source": [ + "#model_name_or_path = 'gpt2'\n", + "model_name_or_path = \"microsoft/phi-1_5\"\n", + "# load model in torch.bfloat16\n", + "model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map='auto', torch_dtype=torch.float16)\n", + "tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)" + ], + "outputs": [ + { + "data": { + "text/plain": [ + "pytorch_model.bin: 0%| | 0.00/2.84G [00:00\", \n", + " single_word=False, \n", + " lstrip=True, \n", + " rstrip=True)\n", + " #special=True, \n", + " #normalized=False)\n", + "test_token1 = AddedToken(\">>24\", \n", + " single_word=False, \n", + " lstrip=True, \n", + " rstrip=True)\n", + "test_token2 = AddedToken(\">>72\", \n", + " single_word=False, \n", + " lstrip=True, \n", + " rstrip=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LlamaTokenizerFast(name_or_path='/dlabscratch1/public/llm_weights/llama2_hf/Llama-2-7b-hf/', vocab_size=32000, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '', 'eos_token': '', 'unk_token': ''}, clean_up_tokenization_spaces=False), added_tokens_decoder={\n", + "\t0: AddedToken(\"\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n", + "\t1: AddedToken(\"\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n", + "\t2: AddedToken(\"\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n", + "\t32000: AddedToken(\"<|pause|>\", rstrip=True, lstrip=True, single_word=False, normalized=False, special=True),\n", + "\t32001: AddedToken(\">>24\", rstrip=True, lstrip=True, single_word=False, normalized=False, special=True),\n", + "\t32002: AddedToken(\">>72\", rstrip=True, lstrip=True, single_word=False, normalized=False, special=True),\n", + "}\n", + "32000\n" + ] + } + ], + "source": [ + "tokenizer.add_tokens([pause_token, test_token1, test_token2], special_tokens=True)\n", + "print(tokenizer)\n", + "# get idx of pause otken\n", + "pause_token_id = tokenizer.convert_tokens_to_ids(\"<|pause|>\")\n", + "print(pause_token_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PeftModelForCausalLM(\n", + " (base_model): LoraModel(\n", + " (model): LlamaForCausalLM(\n", + " (model): LlamaModel(\n", + " (embed_tokens): EmbeddingWrapper2(\n", + " (old_embeddings): Embedding(32000, 4096)\n", + " (new_embeddings): Embedding(3, 4096)\n", + " )\n", + " (layers): ModuleList(\n", + " (0-31): 32 x LlamaDecoderLayer(\n", + " (self_attn): LlamaSdpaAttention(\n", + " (q_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (k_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (v_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (o_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (rotary_emb): LlamaRotaryEmbedding()\n", + " )\n", + " (mlp): LlamaMLP(\n", + " (gate_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (up_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (down_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=11008, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (act_fn): SiLU()\n", + " )\n", + " (input_layernorm): LlamaRMSNorm()\n", + " (post_attention_layernorm): LlamaRMSNorm()\n", + " )\n", + " )\n", + " (norm): LlamaRMSNorm()\n", + " )\n", + " (lm_head): LinearWrapper(\n", + " (layer): Linear(in_features=4096, out_features=32000, bias=False)\n", + " (new_embeddings): Linear(in_features=4096, out_features=3, bias=False)\n", + " )\n", + " )\n", + " )\n", + ")\n" + ] + } + ], + "source": [ + "## conventionally you'd do this like this:\n", + "#model.resize_token_embeddings(len(tokenizer))\n", + " \n", + "## ours\n", + "surgeon = PeftModelEmbeddingSurgeon(model, tokenizer)\n", + "model = surgeon.get_surgeried_model()\n", + "print(model)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "base_model.model.model.embed_tokens.new_embeddings.weight torch.Size([3, 4096])\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_A.default.weight torch.Size([64, 4096])\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_B.default.weight torch.Size([11008, 64])\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_A.default.weight torch.Size([64, 11008])\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_B.default.weight torch.Size([4096, 64])\n", + "base_model.model.lm_head.new_embeddings.weight torch.Size([3, 4096])\n" + ] + } + ], + "source": [ + "for k, p in model.named_parameters():\n", + " if p.requires_grad:\n", + " print(k, p.shape)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "base_model.model.model.embed_tokens.old_embeddings.weight cuda:0\n", + "base_model.model.model.embed_tokens.new_embeddings.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.0.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.0.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.0.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.1.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.1.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.1.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.2.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.2.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.2.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.3.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.3.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.3.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.4.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.4.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.4.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.5.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.5.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.5.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.6.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.6.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.6.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.7.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.7.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.7.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.8.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.8.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.8.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.9.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.9.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.9.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.10.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.10.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.10.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.11.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.11.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.11.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.12.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.12.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.12.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.13.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.13.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.13.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.14.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.14.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.14.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.q_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.q_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.k_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.k_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.v_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.v_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.o_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.self_attn.o_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.gate_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.gate_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.up_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.up_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.down_proj.base_layer.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_A.default.weight cuda:0\n", + "base_model.model.model.layers.15.mlp.down_proj.lora_B.default.weight cuda:0\n", + "base_model.model.model.layers.15.input_layernorm.weight cuda:0\n", + "base_model.model.model.layers.15.post_attention_layernorm.weight cuda:0\n", + "base_model.model.model.layers.16.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.16.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.16.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.16.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.17.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.17.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.17.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.18.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.18.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.18.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.19.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.19.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.19.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.20.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.20.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.20.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.21.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.21.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.21.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.22.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.22.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.22.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.23.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.23.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.23.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.24.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.24.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.24.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.25.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.25.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.25.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.26.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.26.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.26.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.27.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.27.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.27.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.28.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.28.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.28.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.29.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.29.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.29.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.30.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.30.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.30.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.q_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.q_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.k_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.k_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.v_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.v_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.o_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.self_attn.o_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.gate_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.gate_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.up_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.up_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.down_proj.base_layer.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_A.default.weight cuda:1\n", + "base_model.model.model.layers.31.mlp.down_proj.lora_B.default.weight cuda:1\n", + "base_model.model.model.layers.31.input_layernorm.weight cuda:1\n", + "base_model.model.model.layers.31.post_attention_layernorm.weight cuda:1\n", + "base_model.model.model.norm.weight cuda:1\n", + "base_model.model.lm_head.layer.weight cuda:1\n", + "base_model.model.lm_head.new_embeddings.weight cuda:1\n" + ] + } + ], + "source": [ + "for name, param in model.named_parameters():\n", + " print(name, param.device)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "w/o 1 w 1 \n", + "w/o 450 The w 450 The\n", + "skipping pause token...\n", + "w/o 4996 quick w 4996 quick\n", + "skipping pause token...\n", + "w/o 17354 brown w 17354 brown\n", + "skipping pause token...\n", + "w/o 1701 fo w 1701 fo\n", + "w/o 29916 x w 29916 x\n", + "w/o 432 j w 432 j\n", + "w/o 17204 umps w 17204 umps\n", + "w/o 975 over w 975 over\n", + "w/o 278 the w 278 the\n", + "w/o 17366 lazy w 17366 lazy\n", + "w/o 11203 dog w 11203 dog\n" + ] + } + ], + "source": [ + "# test\n", + "toks1 = tokenizer.encode('The<|pause|> quick<|pause|> brown <|pause|> fox jumps over the lazy dog', return_tensors='pt')\n", + "toks2 = tokenizer.encode('The quick brown fox jumps over the lazy dog', return_tensors='pt')\n", + "idx2 = 0\n", + "for idx1 in range(len(toks1[0])):\n", + " if toks1[0, idx1].item() != pause_token_id:\n", + " print('w/o', toks2[0, idx2].item(), tokenizer.decode([toks2[0, idx2]]), 'w', toks1[0, idx1].item(), tokenizer.decode([toks1[0, idx1]]))\n", + " assert toks2[0, idx2] == toks1[0, idx1]\n", + " idx2 += 1\n", + " else:\n", + " print('skipping pause token...')" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dlabdata1/wendler/.rlllm/lib/python3.11/site-packages/transformers/generation/utils.py:1460: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "out = model.generate(tokenizer.encode('The<|pause|> quick<|pause|> brown <|pause|> fox jumps over the lazy dog', return_tensors='pt'), max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'The quick brown fox jumps over the lazy dog only one\\nThe lazy fox jumps over the lazy dog only one The lazy fox jumps over the lazy dog only one The lazy fox jumps over the'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tokenizer.decode(out[0], skip_special_tokens=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "' The<|pause|> quick<|pause|> brown<|pause|> fox jumps over the lazy dog only one\\nThe lazy fox jumps over the lazy dog only one The lazy fox jumps over the lazy dog only one The lazy fox jumps over the'" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tokenizer.decode(out[0], skip_special_tokens=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test training" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "from datasets import load_dataset\n", + "dataset = load_dataset(\"gsm8k\", \"main\", split = \"train\")\n", + "alpaca_prompt = \"\"\"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n", + "\n", + "### Instruction:\n", + "{}\n", + "\n", + "### Input:\n", + "{}\n", + "\n", + "### Response:\n", + "{}\"\"\"\n", + "\n", + "EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n", + "def formatting_prompts_func(examples):\n", + " instructions = len(examples[\"question\"])*[\"Solve the math problem using a eval tool. The command eval[[expr]] allows you to evaluate an expression.\"]\n", + " inputs = examples[\"question\"]\n", + " outputs = examples[\"answer\"]\n", + " texts = []\n", + " for instruction, input, output in zip(instructions, inputs, outputs):\n", + " # Must add EOS_TOKEN, otherwise your generation will go on forever!\n", + " text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN\n", + " texts.append(text)\n", + " #print(texts)\n", + " return { \"text\" : texts, }\n", + "pass\n", + "\n", + "dataset = dataset.map(formatting_prompts_func, batched = True)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "PeftModelForCausalLM(\n", + " (base_model): LoraModel(\n", + " (model): LlamaForCausalLM(\n", + " (model): LlamaModel(\n", + " (embed_tokens): EmbeddingWrapper2(\n", + " (old_embeddings): Embedding(32000, 4096)\n", + " (new_embeddings): Embedding(3, 4096)\n", + " )\n", + " (layers): ModuleList(\n", + " (0-31): 32 x LlamaDecoderLayer(\n", + " (self_attn): LlamaSdpaAttention(\n", + " (q_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (k_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (v_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (o_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (rotary_emb): LlamaRotaryEmbedding()\n", + " )\n", + " (mlp): LlamaMLP(\n", + " (gate_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (up_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (down_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=11008, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (act_fn): SiLU()\n", + " )\n", + " (input_layernorm): LlamaRMSNorm()\n", + " (post_attention_layernorm): LlamaRMSNorm()\n", + " )\n", + " )\n", + " (norm): LlamaRMSNorm()\n", + " )\n", + " (lm_head): LinearWrapper(\n", + " (layer): Linear(in_features=4096, out_features=32000, bias=False)\n", + " (new_embeddings): Linear(in_features=4096, out_features=3, bias=False)\n", + " )\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "w_before = model.base_model.model.model.embed_tokens.new_embeddings.weight.detach().cpu().clone()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dlabdata1/wendler/.rlllm/lib/python3.11/site-packages/accelerate/accelerator.py:436: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches', 'even_batches', 'use_seedable_sampler']). Please pass an `accelerate.DataLoaderConfiguration` instead: \n", + "dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False, even_batches=True, use_seedable_sampler=True)\n", + " warnings.warn(\n", + "Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.\n", + "Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.\n", + "\u001B[34m\u001B[1mwandb\u001B[0m: Currently logged in as: \u001B[33mchrisxx\u001B[0m. Use \u001B[1m`wandb login --relogin`\u001B[0m to force relogin\n" + ] + }, + { + "data": { + "text/html": [ + "Tracking run with wandb version 0.16.6" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Run data is saved locally in /dlabdata1/wendler/code/PauseToken/notebooks/wandb/run-20240412_112353-srjkuann" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "Syncing run copper-sea-36 to Weights & Biases (docs)
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View project at https://wandb.ai/chrisxx/huggingface" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + " View run at https://wandb.ai/chrisxx/huggingface/runs/srjkuann" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " [10/10 00:31, Epoch 0/1]\n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
StepTraining Loss
11.517500
21.428600
31.369000
41.747600
51.629700
61.517800
71.584500
82.080300
91.817600
101.349400

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from trl import SFTTrainer\n", + "from transformers import TrainingArguments\n", + "import os\n", + "\n", + "tokenizer.pad_token = tokenizer.eos_token\n", + "\n", + "# prepare model for training... \n", + "dtype = model.base_model.model.model.embed_tokens.parameters().__next__().dtype\n", + "model.base_model.model.model.embed_tokens.to(torch.float32)\n", + "model.base_model.model.lm_head.to(torch.float32)\n", + "for param in model.parameters():\n", + " if param.requires_grad:\n", + " param.data = param.data.to(torch.float32)\n", + "\n", + "\n", + "trainer = SFTTrainer(\n", + " model = model,\n", + " tokenizer = tokenizer,\n", + " train_dataset = dataset,\n", + " eval_dataset = None,\n", + " dataset_text_field = \"text\",\n", + " max_seq_length = 1024,\n", + " dataset_num_proc = 2,\n", + " packing = False, # Can make training 5x faster for short sequences.\n", + " args = TrainingArguments(\n", + " gradient_checkpointing=False,\n", + " per_device_train_batch_size = 1,\n", + " gradient_accumulation_steps = 1,\n", + " warmup_steps = 0,\n", + " max_steps = 10,\n", + " #num_train_epochs = 1,\n", + " learning_rate = 2e-3,\n", + " fp16 = not torch.cuda.is_bf16_supported(),\n", + " bf16 = torch.cuda.is_bf16_supported(),\n", + " logging_steps = 1,\n", + " optim = \"adamw_8bit\",\n", + " weight_decay = 0.01,\n", + " #lr_scheduler_type = \"linear\",\n", + " seed = 3407,\n", + " output_dir = \"outputs\",\n", + " ),\n", + ")\n", + "\n", + "with torch.cuda.amp.autocast():\n", + " trainer_stats = trainer.train()\n", + "\n", + "# prepare model for inference \n", + "model.base_model.model.model.embed_tokens.to(dtype)\n", + "model.base_model.model.lm_head.to(dtype)\n", + "\n", + "for param in model.parameters():\n", + " if param.requires_grad:\n", + " param.data = param.data.to(torch.float16)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor(0.2426, dtype=torch.float16)\n" + ] + } + ], + "source": [ + "from matplotlib import pyplot as plt\n", + "w_after = model.base_model.model.model.embed_tokens.new_embeddings.weight.detach().cpu().clone()\n", + "print(((w_before - w_after)**2).sum())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test saving and loading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/dlabdata1/wendler/.rlllm/lib/python3.11/site-packages/peft/utils/save_and_load.py:154: UserWarning: Could not find a config file in /dlabscratch1/public/llm_weights/llama2_hf/Llama-2-7b-hf/ - will assume that the vocabulary was not modified.\n", + " warnings.warn(\n" + ] + } + ], + "source": [ + "surgeon.save(model, '/dlabscratch1/tmp/peft_test')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "PeftModelForCausalLM(\n", + " (base_model): LoraModel(\n", + " (model): LlamaForCausalLM(\n", + " (model): LlamaModel(\n", + " (embed_tokens): Embedding(32000, 4096)\n", + " (layers): ModuleList(\n", + " (0-31): 32 x LlamaDecoderLayer(\n", + " (self_attn): LlamaSdpaAttention(\n", + " (q_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (k_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (v_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (o_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (rotary_emb): LlamaRotaryEmbedding()\n", + " )\n", + " (mlp): LlamaMLP(\n", + " (gate_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (up_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=4096, out_features=11008, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=4096, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=11008, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (down_proj): lora.Linear(\n", + " (base_layer): Linear(in_features=11008, out_features=4096, bias=False)\n", + " (lora_dropout): ModuleDict(\n", + " (default): Identity()\n", + " )\n", + " (lora_A): ModuleDict(\n", + " (default): Linear(in_features=11008, out_features=64, bias=False)\n", + " )\n", + " (lora_B): ModuleDict(\n", + " (default): Linear(in_features=64, out_features=4096, bias=False)\n", + " )\n", + " (lora_embedding_A): ParameterDict()\n", + " (lora_embedding_B): ParameterDict()\n", + " )\n", + " (act_fn): SiLU()\n", + " )\n", + " (input_layernorm): LlamaRMSNorm()\n", + " (post_attention_layernorm): LlamaRMSNorm()\n", + " )\n", + " )\n", + " (norm): LlamaRMSNorm()\n", + " )\n", + " (lm_head): Linear(in_features=4096, out_features=32000, bias=False)\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import gc\n", + "model.cpu()\n", + "gc.collect()\n", + "for i in range(torch.cuda.device_count()):\n", + " torch.cuda.set_device(i) \n", + " torch.cuda.empty_cache() " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "96dc671298af4c36b29b86cc0219d4d1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading checkpoint shards: 0%| | 0/2 [00:00 Date: Mon, 15 Apr 2024 12:43:06 +0200 Subject: [PATCH 3/9] wip: adding small training setup for testing --- notebooks/embedding_wrapper.py | 118 ++++++++++++++++++++++++++++++++- 1 file changed, 117 insertions(+), 1 deletion(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index 6e71951..8ba91e2 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -79,6 +79,29 @@ def forward(self, x): return y +class EmbeddingWrapper3(nn.Module): + def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): + super().__init__() + self.embedding = nn.Embedding(num_embeddings, old_embedding.embedding_dim) + self.embedding.to(old_embedding.weight.device).to(old_embedding.weight.dtype) + self.embedding.weight.data[:old_embedding.num_embeddings] = old_embedding.weight.data + + # ToDo: I don't think this has any effect + if freeze_old: + for param in old_embedding.parameters(): + param.requires_grad = False + + # register hook to update gradients + self.num_old_embeddings = old_embedding.num_embeddings + self.embedding.register_backward_hook(self._hook) + + def _hook(self, grad): + # zero out the gradient for the embedding weights coming from the old embedding + grad[:self.num_old_embeddings].zero_() + + def forward(self, x): + return self.embedding(x) + class Llama2EmbeddingSurgeon(): def __init__(self, llama, extended_tokenizer): self.llama = llama @@ -157,4 +180,97 @@ def load(cls, path, **kwargs): surgeon = cls(peft_model, tokenizer) surgeon.extended_embedding.load_state_dict(extended_embedding_dict) surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict) - return surgeon \ No newline at end of file + return surgeon + + +if __name__=="__main__": + # set up a simple model and run a few training steps, ensure that the old embeddings are not changed + # and the new embeddings are changed + + # set up a simple model + model = AutoModelForCausalLM.from_pretrained("gpt2") + tokenizer = AutoTokenizer.from_pretrained("gpt2") + extended_tokenizer = AutoTokenizer.from_pretrained("gpt2") + extended_tokenizer.add_special_tokens({"additional_special_tokens": ["<|NEW_TOKEN|>"]}) + + # set up the surgeon + extended_embedding = EmbeddingWrapper3(model.base_model.wte, len(extended_tokenizer)) + extended_unembedding = LinearWrapper(model.lm_head, len(extended_tokenizer)) + + model.base_model.wte = extended_embedding + model.lm_head = extended_unembedding + model.config.vocab_size = len(extended_tokenizer) + + from datasets import load_dataset + + dataset = load_dataset("gsm8k", "main", split="train") + alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. + + ### Instruction: + {} + + ### Input: + {} + + ### Response: + {}""" + + EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN + + + def formatting_prompts_func(examples): + instructions = len(examples["question"]) * [ + "Solve the math problem using a eval tool. The command eval[[expr]] allows you to evaluate an expression."] + inputs = examples["question"] + outputs = examples["answer"] + texts = [] + for instruction, input, output in zip(instructions, inputs, outputs): + # Must add EOS_TOKEN, otherwise your generation will go on forever! + text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN + texts.append(text) + # print(texts) + return {"text": texts, } + + dataset = dataset.map(formatting_prompts_func, batched=True) + + w_before = model.base_model.wte.embedding.weight.detach().cpu().clone() + + from trl import SFTTrainer + from transformers import TrainingArguments + import os + + tokenizer.pad_token = tokenizer.eos_token + + trainer = SFTTrainer( + model=model, + tokenizer=tokenizer, + train_dataset=dataset, + eval_dataset=None, + dataset_text_field="text", + max_seq_length=1024, + dataset_num_proc=2, + packing=False, # Can make training 5x faster for short sequences. + args=TrainingArguments( + gradient_checkpointing=False, + per_device_train_batch_size=1, + gradient_accumulation_steps=1, + warmup_steps=0, + max_steps=2, + # num_train_epochs = 1, + learning_rate=2e-3, + logging_steps=1, + optim="adamw_torch", + weight_decay=0.01, + # lr_scheduler_type = "linear", + seed=3407, + output_dir="outputs", + ), + ) + + trainer_stats = trainer.train() + + # compare w_before and w_after + w_after = model.base_model.wte.embedding.weight.detach().cpu().clone() + print(w_before.shape, w_after.shape) + print(w_before, w_after) + assert torch.allclose(w_before, w_after[:w_before.shape[0]]) \ No newline at end of file From f3b3e25309b4cd45087127a4c1c26f6dde2691e0 Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 12:51:36 +0200 Subject: [PATCH 4/9] wip: debugging --- notebooks/embedding_wrapper.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index 8ba91e2..a30b370 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -93,11 +93,19 @@ def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old= # register hook to update gradients self.num_old_embeddings = old_embedding.num_embeddings - self.embedding.register_backward_hook(self._hook) + self.embedding.register_full_backward_hook(self._hook) - def _hook(self, grad): - # zero out the gradient for the embedding weights coming from the old embedding - grad[:self.num_old_embeddings].zero_() + def _hook(self, module, grad_input, grad_output): + # grad_input is a tuple (grad_wrt_output, grad_wrt_weight, grad_wrt_bias) + # We only want to modify grad_wrt_weight + grad_wrt_weight = grad_input[1] + + # Zero out gradients for original embeddings + if grad_wrt_weight is not None: + grad_wrt_weight[:self.num_old_embeddings].zero_() + + # Return modified grad_input and unchanged grad_output + return (grad_input[0], grad_wrt_weight, grad_input[2]), grad_output def forward(self, x): return self.embedding(x) From 5330a34bd795eb7d10d9fa5f3fed5de964c9c701 Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 12:55:48 +0200 Subject: [PATCH 5/9] wip: debugging --- notebooks/embedding_wrapper.py | 39 ++++++++++++---------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index a30b370..c7ffbff 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -82,33 +82,20 @@ def forward(self, x): class EmbeddingWrapper3(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): super().__init__() - self.embedding = nn.Embedding(num_embeddings, old_embedding.embedding_dim) - self.embedding.to(old_embedding.weight.device).to(old_embedding.weight.dtype) - self.embedding.weight.data[:old_embedding.num_embeddings] = old_embedding.weight.data + self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) + self.old_embedding.weight.data = old_embedding.weight.data.clone() + self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, old_embedding.embedding_dim) - # ToDo: I don't think this has any effect if freeze_old: - for param in old_embedding.parameters(): + for param in self.old_embedding.parameters(): param.requires_grad = False - # register hook to update gradients self.num_old_embeddings = old_embedding.num_embeddings - self.embedding.register_full_backward_hook(self._hook) - - def _hook(self, module, grad_input, grad_output): - # grad_input is a tuple (grad_wrt_output, grad_wrt_weight, grad_wrt_bias) - # We only want to modify grad_wrt_weight - grad_wrt_weight = grad_input[1] - - # Zero out gradients for original embeddings - if grad_wrt_weight is not None: - grad_wrt_weight[:self.num_old_embeddings].zero_() - - # Return modified grad_input and unchanged grad_output - return (grad_input[0], grad_wrt_weight, grad_input[2]), grad_output def forward(self, x): - return self.embedding(x) + old_x = x[x < self.num_old_embeddings] + new_x = x[x >= self.num_old_embeddings] - self.num_old_embeddings + return torch.cat([self.old_embedding(old_x), self.new_embedding(new_x)], dim=0) class Llama2EmbeddingSurgeon(): def __init__(self, llama, extended_tokenizer): @@ -241,7 +228,8 @@ def formatting_prompts_func(examples): dataset = dataset.map(formatting_prompts_func, batched=True) - w_before = model.base_model.wte.embedding.weight.detach().cpu().clone() + w_old_before = model.base_model.wte.old_embedding.weight.detach().cpu().clone() + w_new_before = model.base_model.wte.new_embedding.weight.detach().cpu().clone() from trl import SFTTrainer from transformers import TrainingArguments @@ -278,7 +266,8 @@ def formatting_prompts_func(examples): trainer_stats = trainer.train() # compare w_before and w_after - w_after = model.base_model.wte.embedding.weight.detach().cpu().clone() - print(w_before.shape, w_after.shape) - print(w_before, w_after) - assert torch.allclose(w_before, w_after[:w_before.shape[0]]) \ No newline at end of file + w_old_after = model.base_model.wte.old_embedding.weight.detach().cpu().clone() + w_new_after = model.base_model.wte.new_embedding.weight.detach().cpu().clone() + + print(torch.allclose(w_old_before, w_old_after)) + print(torch.allclose(w_new_before, w_new_after)) \ No newline at end of file From 5ac48fa7881f182ef98ab7314e2883aaf47f8755 Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 13:00:38 +0200 Subject: [PATCH 6/9] setting up new testbed, getting rid of backward_hook --- notebooks/embedding_wrapper.py | 216 +++++++-------------------------- 1 file changed, 43 insertions(+), 173 deletions(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index c7ffbff..3c5065d 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -83,8 +83,9 @@ class EmbeddingWrapper3(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): super().__init__() self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) + self.embedding_dim = old_embedding.embedding_dim self.old_embedding.weight.data = old_embedding.weight.data.clone() - self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, old_embedding.embedding_dim) + self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, self.embedding_dim) if freeze_old: for param in self.old_embedding.parameters(): @@ -97,177 +98,46 @@ def forward(self, x): new_x = x[x >= self.num_old_embeddings] - self.num_old_embeddings return torch.cat([self.old_embedding(old_x), self.new_embedding(new_x)], dim=0) -class Llama2EmbeddingSurgeon(): - def __init__(self, llama, extended_tokenizer): - self.llama = llama - self.extended_tokenizer = extended_tokenizer - self.extended_embedding = EmbeddingWrapper2(llama.model.embed_tokens, len(extended_tokenizer)) - self.extended_unembedding = LinearWrapper(llama.lm_head, len(extended_tokenizer)) - - def get_surgeried_model(self): - self.backup_embed_tokens = self.llama.model.embed_tokens - self.backup_lm_head = self.llama.lm_head - self.llama.model.embed_tokens = self.extended_embedding - self.llama.lm_head = self.extended_unembedding - self.llama.config.vocab_size = len(self.extended_tokenizer) - return self.llama - - def save(self, llama, path): - # check if llama is surgeried - assert llama.model.embed_tokens == self.extended_embedding - assert llama.lm_head == self.extended_unembedding - backup_embed_tokens = self.llama.model.embed_tokens - backup_lm_head = self.llama.lm_head - self.llama.model.embed_tokens = self.backup_embed_tokens - self.llama.lm_head = self.backup_lm_head - self.llama.save_pretrained(path) - self.llama.model.embed_tokens = backup_embed_tokens - self.llama.lm_head = backup_lm_head - self.extended_tokenizer.save_pretrained(path) - torch.save(self.extended_embedding.state_dict(), f"{path}/extended_embedding.pt") - torch.save(self.extended_unembedding.state_dict(), f"{path}/extended_unembedding.pt") - - @classmethod - def load(cls, path): - extended_embedding_dict = torch.load(f"{path}/extended_embedding.pt") - extended_unembedding_dict = torch.load(f"{path}/extended_unembedding.pt") - llama = AutoModelForCausalLM.from_pretrained(path) - tokenizer = AutoTokenizer.from_pretrained(path) - surgeon = cls(llama, tokenizer) - surgeon.extended_embedding.load_state_dict(extended_embedding_dict) - surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict) - return surgeon - - -class PeftModelEmbeddingSurgeon(): - def __init__(self, peft_model, extended_tokenizer): - try: - self.llama = peft_model.base_model.model - except AttributeError: - self.llama = peft_model - self.peft_model = peft_model - self.extended_tokenizer = extended_tokenizer - self.extended_embedding = EmbeddingWrapper2(self.llama.model.embed_tokens, len(extended_tokenizer)) - self.extended_unembedding = LinearWrapper(self.llama.lm_head, len(extended_tokenizer)) - - def get_surgeried_model(self): - self.backup_embed_tokens = self.llama.model.embed_tokens - self.backup_lm_head = self.llama.lm_head - self.llama.model.embed_tokens = self.extended_embedding - self.llama.lm_head = self.extended_unembedding - self.llama.config.vocab_size = len(self.extended_tokenizer) - return self.peft_model - - def save(self, peft_model, path): - self.llama.model.embed_tokens = self.backup_embed_tokens - self.llama.lm_head = self.backup_lm_head - self.peft_model.save_pretrained(path) - self.extended_tokenizer.save_pretrained(path) - torch.save(self.extended_embedding.state_dict(), f"{path}/extended_embedding.pt") - torch.save(self.extended_unembedding.state_dict(), f"{path}/extended_unembedding.pt") - - @classmethod - def load(cls, path, **kwargs): - extended_embedding_dict = torch.load(f"{path}/extended_embedding.pt") - extended_unembedding_dict = torch.load(f"{path}/extended_unembedding.pt") - peft_model = AutoModelForCausalLM.from_pretrained(path, **kwargs) - tokenizer = AutoTokenizer.from_pretrained(path) - surgeon = cls(peft_model, tokenizer) - surgeon.extended_embedding.load_state_dict(extended_embedding_dict) - surgeon.extended_unembedding.load_state_dict(extended_unembedding_dict) - return surgeon - if __name__=="__main__": - # set up a simple model and run a few training steps, ensure that the old embeddings are not changed - # and the new embeddings are changed - - # set up a simple model - model = AutoModelForCausalLM.from_pretrained("gpt2") - tokenizer = AutoTokenizer.from_pretrained("gpt2") - extended_tokenizer = AutoTokenizer.from_pretrained("gpt2") - extended_tokenizer.add_special_tokens({"additional_special_tokens": ["<|NEW_TOKEN|>"]}) - - # set up the surgeon - extended_embedding = EmbeddingWrapper3(model.base_model.wte, len(extended_tokenizer)) - extended_unembedding = LinearWrapper(model.lm_head, len(extended_tokenizer)) - - model.base_model.wte = extended_embedding - model.lm_head = extended_unembedding - model.config.vocab_size = len(extended_tokenizer) - - from datasets import load_dataset - - dataset = load_dataset("gsm8k", "main", split="train") - alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. - - ### Instruction: - {} - - ### Input: - {} - - ### Response: - {}""" - - EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN - - - def formatting_prompts_func(examples): - instructions = len(examples["question"]) * [ - "Solve the math problem using a eval tool. The command eval[[expr]] allows you to evaluate an expression."] - inputs = examples["question"] - outputs = examples["answer"] - texts = [] - for instruction, input, output in zip(instructions, inputs, outputs): - # Must add EOS_TOKEN, otherwise your generation will go on forever! - text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN - texts.append(text) - # print(texts) - return {"text": texts, } - - dataset = dataset.map(formatting_prompts_func, batched=True) - - w_old_before = model.base_model.wte.old_embedding.weight.detach().cpu().clone() - w_new_before = model.base_model.wte.new_embedding.weight.detach().cpu().clone() - - from trl import SFTTrainer - from transformers import TrainingArguments - import os - - tokenizer.pad_token = tokenizer.eos_token - - trainer = SFTTrainer( - model=model, - tokenizer=tokenizer, - train_dataset=dataset, - eval_dataset=None, - dataset_text_field="text", - max_seq_length=1024, - dataset_num_proc=2, - packing=False, # Can make training 5x faster for short sequences. - args=TrainingArguments( - gradient_checkpointing=False, - per_device_train_batch_size=1, - gradient_accumulation_steps=1, - warmup_steps=0, - max_steps=2, - # num_train_epochs = 1, - learning_rate=2e-3, - logging_steps=1, - optim="adamw_torch", - weight_decay=0.01, - # lr_scheduler_type = "linear", - seed=3407, - output_dir="outputs", - ), - ) - - trainer_stats = trainer.train() - - # compare w_before and w_after - w_old_after = model.base_model.wte.old_embedding.weight.detach().cpu().clone() - w_new_after = model.base_model.wte.new_embedding.weight.detach().cpu().clone() - - print(torch.allclose(w_old_before, w_old_after)) - print(torch.allclose(w_new_before, w_new_after)) \ No newline at end of file + import torch + from torch import nn + from torch.optim import SGD + + # Step 1: Create an instance of nn.Embedding as the old embedding and initialize its weights randomly. + old_embedding = nn.Embedding(10, 32) + old_embedding.weight.data.normal_() + + # Step 2: Create an instance of EmbeddingWrapper3, passing the old embedding and the desired number of embeddings to its constructor. + num_embeddings = 15 + embedding_wrapper = EmbeddingWrapper3(old_embedding, num_embeddings) + + # Step 3: Create a linear layer on top of the EmbeddingWrapper3. + linear_layer = nn.Linear(embedding_wrapper.embedding_dim, 1) + + # Step 4: Create some synthetic training data. + x = torch.randint(num_embeddings, (100,)) # 100 random integers between 0 and num_embeddings + y = torch.randint(2, (100,)).float() # 100 random 0s and 1s + + # Step 5: Train the model using a simple training loop. + optimizer = SGD(list(embedding_wrapper.parameters()) + list(linear_layer.parameters()), lr=0.01) + criterion = nn.BCEWithLogitsLoss() + + # store the old embedding weights + old_embedding_weights = old_embedding.weight.data.clone() + new_embedding_weights = embedding_wrapper.new_embedding.weight.data.clone() + + for epoch in range(10): + optimizer.zero_grad() + embeddings = embedding_wrapper(x) + logits = linear_layer(embeddings).squeeze() + loss = criterion(logits, y) + loss.backward() + optimizer.step() + print(f"Epoch {epoch + 1}, Loss: {loss.item()}") + + # check that the old embedding weights are the same + print("test passed: ", torch.allclose(old_embedding_weights, old_embedding.weight.data)) + + # check that the new embedding weights are different + print("test passed: ", not torch.allclose(new_embedding_weights, embedding_wrapper.new_embedding.weight.data)) \ No newline at end of file From 038376e505eace526d66d4b75be68faeb6d9277c Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 14:05:04 +0200 Subject: [PATCH 7/9] adding implementation based on autograd function --- notebooks/embedding_wrapper.py | 80 +++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 5 deletions(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index 3c5065d..f9c7543 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -79,7 +79,7 @@ def forward(self, x): return y -class EmbeddingWrapper3(nn.Module): +class EmbeddingWrapperMask(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): super().__init__() self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) @@ -98,7 +98,74 @@ def forward(self, x): new_x = x[x >= self.num_old_embeddings] - self.num_old_embeddings return torch.cat([self.old_embedding(old_x), self.new_embedding(new_x)], dim=0) +class EmbeddingWrapperHook(nn.Module): + def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): + super().__init__() + self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) + self.embedding_dim = old_embedding.embedding_dim + self.old_embedding.weight.data = old_embedding.weight.data.clone() + self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, self.embedding_dim) + + self.num_old_embeddings = old_embedding.num_embeddings + + if freeze_old: + self.old_embedding.weight.register_hook(lambda grad: grad * 0) + + def forward(self, x): + old_x = x[x < self.num_old_embeddings] + new_x = x[x >= self.num_old_embeddings] - self.num_old_embeddings + return torch.cat([self.old_embedding(old_x), self.new_embedding(new_x)], dim=0) + + +class PartiallyFrozenEmbedding(torch.autograd.Function): + @staticmethod + def forward(ctx, old_embedding_weight, new_embedding_weight, num_old_embeddings, embedding_dim, x): + old_x_mask = x < num_old_embeddings + new_x_mask = x >= num_old_embeddings + + old_x = x[old_x_mask] + new_x = x[new_x_mask] - num_old_embeddings + + # set up an output vector, same length as x and same embedding dimension + num_outputs = x.shape[0] + output = torch.empty((num_outputs, embedding_dim), dtype=old_embedding_weight.dtype, device=old_embedding_weight.device) + output[old_x_mask] = torch.nn.functional.embedding(old_x, old_embedding_weight) + output[new_x_mask] = torch.nn.functional.embedding(new_x, new_embedding_weight) + + ctx.save_for_backward(old_embedding_weight, new_embedding_weight, old_x, new_x, old_x_mask, new_x_mask) + ctx.num_old_embeddings = num_old_embeddings + + return output + + @staticmethod + def backward(ctx, grad_output): + old_embedding_weight, new_embedding_weight, old_x, new_x, old_x_mask, new_x_mask = ctx.saved_tensors + + grad_old_embedding = torch.zeros_like(old_embedding_weight) + grad_new_embedding = torch.zeros_like(new_embedding_weight) + + grad_old_embedding.index_add_(0, old_x, grad_output[old_x_mask]) + grad_new_embedding.index_add_(0, new_x, grad_output[new_x_mask]) + + return None, grad_new_embedding, None, None, None, None + + + +class EmbeddingWrapperFunction(nn.Module): + def __init__(self, old_embedding: nn.Embedding, num_embeddings: int): + super().__init__() + self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) + self.embedding_dim = old_embedding.embedding_dim + self.old_embedding.weight.data = old_embedding.weight.data.clone() + self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, self.embedding_dim) + self.num_old_embeddings = old_embedding.num_embeddings + + self.old_embedding.weight.requires_grad = True + self.new_embedding.weight.requires_grad = True + + def forward(self, x): + return PartiallyFrozenEmbedding.apply(self.old_embedding.weight, self.new_embedding.weight, self.num_old_embeddings, self.embedding_dim, x) if __name__=="__main__": import torch from torch import nn @@ -110,7 +177,7 @@ def forward(self, x): # Step 2: Create an instance of EmbeddingWrapper3, passing the old embedding and the desired number of embeddings to its constructor. num_embeddings = 15 - embedding_wrapper = EmbeddingWrapper3(old_embedding, num_embeddings) + embedding_wrapper = EmbeddingWrapperFunction(old_embedding, num_embeddings) # Step 3: Create a linear layer on top of the EmbeddingWrapper3. linear_layer = nn.Linear(embedding_wrapper.embedding_dim, 1) @@ -120,7 +187,7 @@ def forward(self, x): y = torch.randint(2, (100,)).float() # 100 random 0s and 1s # Step 5: Train the model using a simple training loop. - optimizer = SGD(list(embedding_wrapper.parameters()) + list(linear_layer.parameters()), lr=0.01) + optimizer = SGD(list(embedding_wrapper.parameters()) + list(linear_layer.parameters()), lr=0.5) criterion = nn.BCEWithLogitsLoss() # store the old embedding weights @@ -137,7 +204,10 @@ def forward(self, x): print(f"Epoch {epoch + 1}, Loss: {loss.item()}") # check that the old embedding weights are the same - print("test passed: ", torch.allclose(old_embedding_weights, old_embedding.weight.data)) + print("test passed: ", torch.allclose(old_embedding_weights, embedding_wrapper.old_embedding.weight.data)) # check that the new embedding weights are different - print("test passed: ", not torch.allclose(new_embedding_weights, embedding_wrapper.new_embedding.weight.data)) \ No newline at end of file + print("test passed: ", not torch.allclose(new_embedding_weights, embedding_wrapper.new_embedding.weight.data)) + + print(old_embedding_weights[0,0], embedding_wrapper.old_embedding.weight.data[0,0]) + print(new_embedding_weights[0,0], embedding_wrapper.new_embedding.weight.data[0,0]) \ No newline at end of file From 0806645dc49218f627d406935d4da7a66b7aad4a Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 14:09:56 +0200 Subject: [PATCH 8/9] wip: debugging --- notebooks/embedding_wrapper.py | 93 +++++----------------------------- 1 file changed, 13 insertions(+), 80 deletions(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index f9c7543..1548d8c 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -5,80 +5,6 @@ import torch from torch import nn - -# write torch.nn.Modules that can be used as a wrapper around huggingface models and change the embedding layer -# and the unembedding layer -class LinearWrapper(nn.Module): - def __init__(self, layer: nn.Linear, num_embeddings: int, freeze_old=True): - super().__init__() - self.layer = layer - self.num_embeddings = num_embeddings - self.n_new_tokens = num_embeddings - layer.out_features - self.new_embeddings = nn.Linear(layer.in_features, self.n_new_tokens, bias=False) - self.new_embeddings.to(layer.weight.device).to(layer.weight.dtype) - if freeze_old: - for param in self.layer.parameters(): - param.requires_grad = False - - def forward(self, x): - z1 = self.layer(x) - z2 = self.new_embeddings(x) - return torch.cat([z1, z2], dim=-1) - - -class EmbeddingWrapper(nn.Module): - def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True): - super().__init__() - self.embedding_dim = embedding.embedding_dim - self.num_embeddings = num_embeddings - self.n_new_tokens = num_embeddings - embedding.num_embeddings - - # inspired from here - # https://github.com/huggingface/transformers/blob/185463784e0a0b4cd7974ce5bded7a52ae170f6d/src/transformers/modeling_utils.py#L2026 - self.old_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim) - self.old_embeddings.weight.data = torch.ones_like(self.old_embeddings.weight.data) * 0 # 1e-7 - self.old_embeddings.weight.data[:embedding.num_embeddings] = embedding.weight.data - self.old_embeddings.to(embedding.weight.device).to(embedding.weight.dtype) - self.new_embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim) - self.new_embeddings.weight.data[:embedding.num_embeddings] = torch.ones_like(embedding.weight.data) * 0 # 1e-7 - self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype) - if freeze_old: - for param in self.old_embeddings.parameters(): - param.requires_grad = False - - def forward(self, x): - self.old_embeddings(x) + self.new_embeddings(x) - - -class EmbeddingWrapper2(nn.Module): - def __init__(self, embedding: nn.Embedding, num_embeddings: int, freeze_old=True): - super().__init__() - self.old_embeddings = embedding - self.num_embeddings = num_embeddings - self.embedding_dim = embedding.embedding_dim - self.n_new_tokens = num_embeddings - embedding.num_embeddings - self.new_embeddings = nn.Embedding(self.n_new_tokens, self.embedding_dim) - self.new_embeddings.to(embedding.weight.device).to(embedding.weight.dtype) - if freeze_old: - for param in self.old_embeddings.parameters(): - param.requires_grad = False - - def forward(self, x): - with torch.amp.autocast("cuda"): - mask_small = x < self.old_embeddings.num_embeddings - mask_large = x >= self.old_embeddings.num_embeddings - small_ids = x[mask_small] - large_ids = x[mask_large] - small_embs = self.old_embeddings(small_ids) - large_embs = self.new_embeddings(large_ids % self.old_embeddings.num_embeddings) - # assuming batch x seq x emb - y = torch.empty((x.shape[0], x.shape[1], small_embs.shape[-1]), dtype=large_embs.dtype, - device=large_embs.device) - y[mask_small] = small_embs - y[mask_large] = large_embs - return y - - class EmbeddingWrapperMask(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): super().__init__() @@ -94,9 +20,17 @@ def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old= self.num_old_embeddings = old_embedding.num_embeddings def forward(self, x): - old_x = x[x < self.num_old_embeddings] - new_x = x[x >= self.num_old_embeddings] - self.num_old_embeddings - return torch.cat([self.old_embedding(old_x), self.new_embedding(new_x)], dim=0) + old_x_mask = x < self.num_old_embeddings + new_x_mask = x >= self.num_old_embeddings + + old_x = x[old_x_mask] + new_x = x[new_x_mask] - self.num_old_embeddings + + output = torch.zeros((len(x), self.old_embedding.embedding_dim), dtype=self.old_embedding.weight.dtype, device=self.old_embedding.weight.device) + output[old_x_mask] = self.old_embedding(old_x) + output[new_x_mask] = self.new_embedding(new_x) + + return output class EmbeddingWrapperHook(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): @@ -150,8 +84,6 @@ def backward(ctx, grad_output): return None, grad_new_embedding, None, None, None, None - - class EmbeddingWrapperFunction(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int): super().__init__() @@ -166,6 +98,7 @@ def __init__(self, old_embedding: nn.Embedding, num_embeddings: int): def forward(self, x): return PartiallyFrozenEmbedding.apply(self.old_embedding.weight, self.new_embedding.weight, self.num_old_embeddings, self.embedding_dim, x) + if __name__=="__main__": import torch from torch import nn @@ -177,7 +110,7 @@ def forward(self, x): # Step 2: Create an instance of EmbeddingWrapper3, passing the old embedding and the desired number of embeddings to its constructor. num_embeddings = 15 - embedding_wrapper = EmbeddingWrapperFunction(old_embedding, num_embeddings) + embedding_wrapper = EmbeddingWrapperHook(old_embedding, num_embeddings) # Step 3: Create a linear layer on top of the EmbeddingWrapper3. linear_layer = nn.Linear(embedding_wrapper.embedding_dim, 1) From af9b61eaf025f3af842e57984a2a72df95f35bb9 Mon Sep 17 00:00:00 2001 From: lklein Date: Mon, 15 Apr 2024 14:14:02 +0200 Subject: [PATCH 9/9] probably done, adding 3 different weight freezing approaches. Based on a mask, a hook and a custom autograd function --- notebooks/embedding_wrapper.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/notebooks/embedding_wrapper.py b/notebooks/embedding_wrapper.py index 1548d8c..31a8ae3 100644 --- a/notebooks/embedding_wrapper.py +++ b/notebooks/embedding_wrapper.py @@ -35,20 +35,22 @@ def forward(self, x): class EmbeddingWrapperHook(nn.Module): def __init__(self, old_embedding: nn.Embedding, num_embeddings: int, freeze_old=True): super().__init__() - self.old_embedding = nn.Embedding(old_embedding.num_embeddings, old_embedding.embedding_dim) - self.embedding_dim = old_embedding.embedding_dim - self.old_embedding.weight.data = old_embedding.weight.data.clone() - self.new_embedding = nn.Embedding(num_embeddings - old_embedding.num_embeddings, self.embedding_dim) self.num_old_embeddings = old_embedding.num_embeddings + self.embedding_dim = old_embedding.embedding_dim + self.new_embedding = nn.Embedding(num_embeddings, self.embedding_dim) + + self.new_embedding.weight.data[:self.num_old_embeddings] = old_embedding.weight.data.clone() if freeze_old: - self.old_embedding.weight.register_hook(lambda grad: grad * 0) + self.new_embedding.weight.register_hook(self._hook) + + def _hook(self, grad): + grad[:self.num_old_embeddings] = 0 + return grad def forward(self, x): - old_x = x[x < self.num_old_embeddings] - new_x = x[x >= self.num_old_embeddings] - self.num_old_embeddings - return torch.cat([self.old_embedding(old_x), self.new_embedding(new_x)], dim=0) + return self.new_embedding(x) class PartiallyFrozenEmbedding(torch.autograd.Function): @@ -110,7 +112,7 @@ def forward(self, x): # Step 2: Create an instance of EmbeddingWrapper3, passing the old embedding and the desired number of embeddings to its constructor. num_embeddings = 15 - embedding_wrapper = EmbeddingWrapperHook(old_embedding, num_embeddings) + embedding_wrapper = EmbeddingWrapperFunction(old_embedding, num_embeddings) # Step 3: Create a linear layer on top of the EmbeddingWrapper3. linear_layer = nn.Linear(embedding_wrapper.embedding_dim, 1) @@ -139,6 +141,9 @@ def forward(self, x): # check that the old embedding weights are the same print("test passed: ", torch.allclose(old_embedding_weights, embedding_wrapper.old_embedding.weight.data)) + # for hook embedding wrapper + #print("test passed: ", torch.allclose(old_embedding_weights, embedding_wrapper.new_embedding.weight.data[:embedding_wrapper.num_old_embeddings])) + # check that the new embedding weights are different print("test passed: ", not torch.allclose(new_embedding_weights, embedding_wrapper.new_embedding.weight.data))