Skip to content

Commit 8270f9b

Browse files
authored
Merge pull request #223 from togethercomputer/artem/change_lora_ft_default
Enable LoRA fine-tuning by default
2 parents 759add7 + 953359b commit 8270f9b

File tree

3 files changed

+4
-4
lines changed

3 files changed

+4
-4
lines changed

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ build-backend = "poetry.masonry.api"
1212

1313
[tool.poetry]
1414
name = "together"
15-
version = "1.3.8"
15+
version = "1.3.9"
1616
authors = [
1717
"Together AI <[email protected]>"
1818
]

src/together/cli/api/finetune.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def fine_tuning(ctx: click.Context) -> None:
9292
@click.option(
9393
"--lora/--no-lora",
9494
type=bool,
95-
default=False,
95+
default=True,
9696
help="Whether to use LoRA adapters for fine-tuning",
9797
)
9898
@click.option("--lora-r", type=int, default=8, help="LoRA adapters' rank")

src/together/resources/finetune.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ def create(
149149
warmup_ratio: float = 0.0,
150150
max_grad_norm: float = 1.0,
151151
weight_decay: float = 0.0,
152-
lora: bool = False,
152+
lora: bool = True,
153153
lora_r: int | None = None,
154154
lora_dropout: float | None = 0,
155155
lora_alpha: float | None = None,
@@ -490,7 +490,7 @@ async def create(
490490
warmup_ratio: float = 0.0,
491491
max_grad_norm: float = 1.0,
492492
weight_decay: float = 0.0,
493-
lora: bool = False,
493+
lora: bool = True,
494494
lora_r: int | None = None,
495495
lora_dropout: float | None = 0,
496496
lora_alpha: float | None = None,

0 commit comments

Comments
 (0)