Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: training #66

Merged
merged 2 commits into from
Jan 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 0 additions & 8 deletions protein_lm/configs/train/toy_hf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,6 @@ dataset:
sequence_column_name: "sequence"
max_sequence_length: 10
do_curriculum_learning: false
curriculum_learning_strategy:
- 'sequence_length'
- 'ppl'
- 'plddt'
curriculum_learning_column_name:
- 'sequence_length'
- 'ppl'
- 'plddt'

# corresponds to HuggingFace's TrainingArguments
training_arguments:
Expand Down
8 changes: 0 additions & 8 deletions protein_lm/configs/train/toy_localcsv.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,6 @@ dataset:
sequence_column_name: "sequence"
max_sequence_length: 10
do_curriculum_learning: false
curriculum_learning_strategy:
- 'sequence_length'
- 'ppl'
- 'plddt'
curriculum_learning_column_name:
- 'sequence_length'
- 'ppl'
- 'plddt'

# corresponds to HuggingFace's TrainingArguments
training_arguments:
Expand Down
4 changes: 2 additions & 2 deletions protein_lm/modeling/getters/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ class DatasetConfig(BaseModel):

max_sequence_length: int
do_curriculum_learning: bool
curriculum_learning_strategy: str
curriculum_learning_column_name: str
curriculum_learning_strategy: Optional[str] = None
curriculum_learning_column_name: Optional[str] = None


def set_input_ids(
Expand Down
6 changes: 4 additions & 2 deletions protein_lm/modeling/models/apt/config.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from transformers import GPT2Config

from typing import Literal

class APTConfig(GPT2Config):
"""
Expand All @@ -8,14 +8,16 @@ class APTConfig(GPT2Config):

def __init__(
self,
position_embedding="learned",
position_embedding: Literal["alibi", "learned", "rope", "rerope", "linear_rope_scaling", "dynamic_rope_scaling"]="learned",
tokenizer=None,
max_sequence_length = 1024,
attn_type="standard",
**kwargs
):
super().__init__(**kwargs)
self.nn_model_type = "APT"
self.position_embedding = position_embedding
self.tokenizer = tokenizer
self.max_sequence_length = max_sequence_length
self.attn_type = attn_type

24 changes: 13 additions & 11 deletions protein_lm/modeling/models/apt/model_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ def __init__(self, config, is_cross_attention=False, layer_idx=None):
)
self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
self.position_embedding = config.position_embedding
self.rope_scaling_factor=config.rope_scaling_factor
self.rope_theta=config.rope_theta

self.max_sequence_length = config.max_sequence_length
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
Expand Down Expand Up @@ -72,15 +71,18 @@ def __init__(self, config, is_cross_attention=False, layer_idx=None):

self.pruned_heads = set()

self.rot_emb=None
if self.position_embedding == "rope":
self.rot_emb=RotaryEmbedding(dim=self.head_dim)
elif self.position_embedding == "rerope":
self.rot_emb = RectifiedRotaryEmbedding(dim=self.head_dim,max_position_embeddings = self.max_positions)
elif self.position_embedding=="linear_rope_scaling":
self.rot_emb=LlamaLinearScalingRotaryEmbedding(dim=self.head_dim,max_position_embeddings=self.max_positions,scaling_factor=self.rope_scaling_factor,base=self.rope_theta)
elif self.position_embedding=="dynamic_rope_scaling":
self.rot_emb=LlamaDynamicNTKScalingRotaryEmbedding(dim=self.head_dim,max_position_embeddings=self.max_positions,scaling_factor=self.rope_scaling_factor,base=self.rope_theta)
self.rot_emb = None
if self.position_embedding in ["rope", "rerope", "linear_rope_scaling", "dynamic_rope_scaling"]:
self.rope_scaling_factor = config.rope_scaling_factor
self.rope_theta = config.rope_theta
if self.position_embedding == "rope":
self.rot_emb=RotaryEmbedding(dim=self.head_dim)
elif self.position_embedding == "rerope":
self.rot_emb = RectifiedRotaryEmbedding(dim=self.head_dim,max_position_embeddings = self.max_positions)
elif self.position_embedding=="linear_rope_scaling":
self.rot_emb=LlamaLinearScalingRotaryEmbedding(dim=self.head_dim,max_position_embeddings=self.max_positions,scaling_factor=self.rope_scaling_factor,base=self.rope_theta)
elif self.position_embedding=="dynamic_rope_scaling":
self.rot_emb=LlamaDynamicNTKScalingRotaryEmbedding(dim=self.head_dim,max_position_embeddings=self.max_positions,scaling_factor=self.rope_scaling_factor,base=self.rope_theta)



Expand Down