From 25832a508834a8886f38209489847358900e0d13 Mon Sep 17 00:00:00 2001 From: Chime Ogbuji Date: Sat, 22 Feb 2025 14:07:40 -0500 Subject: [PATCH] Fix logic and update changelog --- CHANGELOG.md | 2 ++ src/mlx_tuning_fork/training.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 088a74a..068833e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,9 +11,11 @@ Major synchronization to changes in MLX - Configuration for DoRA fine tuning - Summary of how to perform mlx CLI fine tuning using generated parameters - Composable configurations +- axolotl-like configuration parameters for automatically determining values for the mlx_lm fine-tuning parameters ### Removed - Schedule configiration (use MLXes) - Colorize option +- diff --git a/src/mlx_tuning_fork/training.py b/src/mlx_tuning_fork/training.py index 8c00d55..a309588 100755 --- a/src/mlx_tuning_fork/training.py +++ b/src/mlx_tuning_fork/training.py @@ -116,7 +116,7 @@ def composably_train(args, config, config_file, model, summary, tokenizer, train scaled_steps_per_eval = int(num_iterations * args.validation_interval_proportion) scaled_val_batches = int(args.validations_per_train_item * args.validation_interval_proportion * num_iterations) scaled_val_batches = max(1, scaled_val_batches) - scaled_steps_per_report = int(args.reporting_interval_proportion * num_iterations) + scaled_steps_per_report = max(1, int(args.reporting_interval_proportion * num_iterations)) if args.saves_per_epoch: scaled_save_every = int(epoch_num_steps / args.saves_per_epoch) else: