Skip to content

Commit de88c70

Browse files
Prepare for PEFT release of v0.14.0 (#2258)
- Bump versions - Remove deprecated convert_pissa_to_lora argument - Remove a pytest skip for older transformers versions - Adjust some comments, docstrings
1 parent 860f783 commit de88c70

File tree

8 files changed

+12
-60
lines changed

8 files changed

+12
-60
lines changed

examples/pissa_finetuning/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ The main advantage of PiSSA is concentrated during the training phase. For a tra
7171
peft_model.save_pretrained(output_dir)
7272
# Given the matrices $A_0$ and $B_0$, initialized by PiSSA and untrained, and the trained matrices $A$ and $B$,
7373
# we can convert these to LoRA by setting $\Delta W = A \times B - A_0 \times B_0 = [A \mid A_0] \times [B \mid -B_0]^T = A'B'$.
74-
peft_model.save_pretrained(output_dir, convert_pissa_to_lora="pissa_init")
74+
peft_model.save_pretrained(output_dir, path_initial_model_for_weight_conversion="pissa_init")
7575

7676
```
7777
This conversion enables the loading of LoRA on top of a standard base model:

examples/pissa_finetuning/pissa_finetuning.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ class ScriptArguments(SFTConfig):
136136
if script_args.convert_pissa_to_lora:
137137
peft_model.save_pretrained(
138138
os.path.join(script_args.output_dir, "pissa_lora"),
139-
convert_pissa_to_lora=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
139+
path_initial_model_for_weight_conversion=os.path.join(script_args.residual_model_name_or_path, "pissa_init"),
140140
)
141141
else:
142142
peft_model.save_pretrained(

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from setuptools import find_packages, setup
1616

1717

18-
VERSION = "0.13.3.dev0"
18+
VERSION = "0.14.0"
1919

2020
extras = {}
2121
extras["quality"] = [

src/peft/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
# See the License for the specific language governing permissions and
1818
# limitations under the License.
1919

20-
__version__ = "0.13.3.dev0"
20+
__version__ = "0.14.0"
2121

2222
from .auto import (
2323
AutoPeftModel,

src/peft/peft_model.py

-10
Original file line numberDiff line numberDiff line change
@@ -229,7 +229,6 @@ def save_pretrained(
229229
selected_adapters: Optional[list[str]] = None,
230230
save_embedding_layers: Union[str, bool] = "auto",
231231
is_main_process: bool = True,
232-
convert_pissa_to_lora: Optional[str] = None,
233232
path_initial_model_for_weight_conversion: Optional[str] = None,
234233
**kwargs: Any,
235234
) -> None:
@@ -253,8 +252,6 @@ def save_pretrained(
253252
is_main_process (`bool`, *optional*):
254253
Whether the process calling this is the main process or not. Will default to `True`. Will not save the
255254
checkpoint if not on the main process, which is important for multi device setups (e.g. DDP).
256-
convert_pissa_to_lora (`str, *optional*`):
257-
Deprecated. Use `path_initial_model_for_weight_conversion` instead.
258255
path_initial_model_for_weight_conversion (`str, *optional*`):
259256
The path to the initialized adapter, which is obtained after initializing the model with PiSSA or OLoRA
260257
and before performing any training. When `path_initial_model_for_weight_conversion` is not None, the
@@ -281,13 +278,6 @@ def save_pretrained(
281278
f"You passed an invalid `selected_adapters` arguments, current supported adapter names are"
282279
f" {list(self.peft_config.keys())} - got {selected_adapters}."
283280
)
284-
# TODO: remove deprecated parameter in PEFT v0.14.0
285-
if convert_pissa_to_lora is not None:
286-
warnings.warn(
287-
"`convert_pissa_to_lora` is deprecated and will be removed in a future version. "
288-
"Use `path_initial_model_for_weight_conversion` instead."
289-
)
290-
path_initial_model_for_weight_conversion = convert_pissa_to_lora
291281

292282
def save_mutated_as_lora(peft_config, path_initial_model_for_weight_conversion, output_state_dict, kwargs):
293283
if peft_config.use_rslora and (peft_config.rank_pattern or peft_config.alpha_pattern):

tests/test_custom_models.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -912,7 +912,13 @@ def from_pretrained(cls, model_id, torch_dtype=None):
912912

913913

914914
class PeftCustomModelTester(unittest.TestCase, PeftCommonTester):
915-
"""TODO"""
915+
"""
916+
Implements the tests for custom models.
917+
918+
Most tests should just call the parent class, e.g. test_save_pretrained calls self._test_save_pretrained. Override
919+
this if custom models don't work with the parent test method.
920+
921+
"""
916922

917923
transformers_class = MockTransformerWrapper
918924

tests/test_initialization.py

-38
Original file line numberDiff line numberDiff line change
@@ -583,44 +583,6 @@ def test_pissa_alpha_pattern_and_rslora_raises(self, tmp_path):
583583
tmp_path / "pissa-model", path_initial_model_for_weight_conversion=tmp_path / "init-model"
584584
)
585585

586-
# TODO: remove test for deprecated arg in PEFT v0.14.0
587-
def test_lora_pissa_conversion_same_output_after_loading_with_deprecated_arg(self, data, tmp_path):
588-
model = self.get_model()
589-
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
590-
peft_model = get_peft_model(deepcopy(model), config)
591-
peft_model.peft_config["default"].init_lora_weights = True
592-
peft_model.save_pretrained(tmp_path / "init-model")
593-
peft_model.peft_config["default"].init_lora_weights = "pissa"
594-
595-
tol = 1e-06
596-
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
597-
output_pissa = peft_model(data)[0]
598-
599-
peft_model.save_pretrained(tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model")
600-
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
601-
output_converted = model_converted(data)[0]
602-
603-
assert torch.allclose(output_pissa, output_converted, atol=tol, rtol=tol)
604-
assert model_converted.peft_config["default"].r == 16
605-
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
606-
assert torch.allclose(
607-
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
608-
)
609-
610-
# TODO: remove test for deprecated warning in PEFT v0.14.0
611-
def test_lora_pissa_conversion_deprecated_warning(self, data, tmp_path):
612-
model = self.get_model()
613-
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
614-
peft_model = get_peft_model(deepcopy(model), config)
615-
peft_model.peft_config["default"].init_lora_weights = True
616-
peft_model.save_pretrained(tmp_path / "init-model")
617-
warning_message = "`convert_pissa_to_lora` is deprecated and will be removed in a future version. Use `path_initial_model_for_weight_conversion` instead."
618-
# Test the warning
619-
with pytest.warns(UserWarning, match=warning_message):
620-
peft_model.save_pretrained(
621-
tmp_path / "pissa-model-converted", convert_pissa_to_lora=tmp_path / "init-model"
622-
)
623-
624586
def test_olora_conversion_same_output_after_loading(self, data, tmp_path):
625587
model = self.get_model()
626588
output_base = model(data)[0]

tests/test_xlora.py

+1-7
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,8 @@
1515
import os
1616

1717
import huggingface_hub
18-
import packaging
1918
import pytest
2019
import torch
21-
import transformers
2220
from safetensors.torch import load_file
2321
from transformers import AutoModelForCausalLM, AutoTokenizer
2422

@@ -27,9 +25,6 @@
2725
from peft.utils import infer_device
2826

2927

30-
uses_transformers_4_45 = packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.45.0")
31-
32-
3328
class TestXlora:
3429
torch_device = infer_device()
3530

@@ -133,8 +128,7 @@ def test_functional(self, tokenizer, model):
133128
)
134129
assert torch.isfinite(outputs[: inputs.shape[1] :]).all()
135130

136-
# TODO: remove the skip when 4.45 is released!
137-
@pytest.mark.skipif(not uses_transformers_4_45, reason="Requires transformers >= 4.45")
131+
# TODO: fix the xfailing test
138132
@pytest.mark.xfail
139133
def test_scalings_logging_methods(self, tokenizer, model):
140134
model.enable_scalings_logging()

0 commit comments

Comments
 (0)