Skip to content

Commit

Permalink
Merge pull request #424 from bmaltais/dev
Browse files Browse the repository at this point in the history
v2.3.0
  • Loading branch information
bmaltais committed Mar 22, 2023
2 parents 1ac6892 + e78c12f commit 838478b
Show file tree
Hide file tree
Showing 39 changed files with 6,218 additions and 3,930 deletions.
25 changes: 25 additions & 0 deletions README.md
Expand Up @@ -41,6 +41,9 @@ If you run on Linux and would like to use the GUI, there is now a port of it as

## Installation

### Runpod
Follow the instructions found in this discussion: https://github.com/bmaltais/kohya_ss/discussions/379

### Ubuntu
In the terminal, run

Expand Down Expand Up @@ -189,6 +192,28 @@ This will store your a backup file with your current locally installed pip packa

## Change History

* 2023/03/22 (v21.3.0)
- Add a function to load training config with `.toml` to each training script. Thanks to Linaqruf for this great contribution!
- Specify `.toml` file with `--config_file`. `.toml` file has `key=value` entries. Keys are same as command line options. See [#241](https://github.com/kohya-ss/sd-scripts/pull/241) for details.
- All sub-sections are combined to a single dictionary (the section names are ignored.)
- Omitted arguments are the default values for command line arguments.
- Command line args override the arguments in `.toml`.
- With `--output_config` option, you can output current command line options to the `.toml` specified with`--config_file`. Please use as a template.
- Add `--lr_scheduler_type` and `--lr_scheduler_args` arguments for custom LR scheduler to each training script. Thanks to Isotr0py! [#271](https://github.com/kohya-ss/sd-scripts/pull/271)
- Same as the optimizer.
- Add sample image generation with weight and no length limit. Thanks to mio2333! [#288](https://github.com/kohya-ss/sd-scripts/pull/288)
- `( )`, `(xxxx:1.2)` and `[ ]` can be used.
- Fix exception on training model in diffusers format with `train_network.py` Thanks to orenwang! [#290](https://github.com/kohya-ss/sd-scripts/pull/290)
- Add warning if you are about to overwrite an existing model: https://github.com/bmaltais/kohya_ss/issues/404
- Add `--vae_batch_size` for faster latents caching to each training script. This batches VAE calls.
- Please start with`2` or `4` depending on the size of VRAM.
- Fix a number of training steps with `--gradient_accumulation_steps` and `--max_train_epochs`. Thanks to tsukimiya!
- Extract parser setup to external scripts. Thanks to robertsmieja!
- Fix an issue without `.npz` and with `--full_path` in training.
- Support extensions with upper cases for images for not Windows environment.
- Fix `resize_lora.py` to work with LoRA with dynamic rank (including `conv_dim != network_dim`). Thanks to toshiaki!
- Fix issue: https://github.com/bmaltais/kohya_ss/issues/406
- Add device support to LoRA extract.
* 2023/03/19 (v21.2.5):
- Fix basic captioning logic
- Add possibility to not train TE in Dreamboot by setting `Step text encoder training` to -1.
Expand Down
65 changes: 46 additions & 19 deletions dreambooth_gui.py
Expand Up @@ -26,6 +26,7 @@
gradio_source_model,
# set_legacy_8bitadam,
update_my_data,
check_if_model_exist,
)
from library.tensorboard_gui import (
gradio_tensorboard,
Expand Down Expand Up @@ -104,7 +105,9 @@ def save_configuration(
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
sample_prompts,additional_parameters,
sample_prompts,
additional_parameters,
vae_batch_size,
):
# Get list of function parameters and values
parameters = list(locals().items())
Expand Down Expand Up @@ -210,15 +213,17 @@ def open_configuration(
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
sample_prompts,additional_parameters,
sample_prompts,
additional_parameters,
vae_batch_size,
):
# Get list of function parameters and values
parameters = list(locals().items())

ask_for_file = True if ask_for_file.get('label') == 'True' else False

original_file_path = file_path

if ask_for_file:
file_path = get_file_path(file_path)

Expand Down Expand Up @@ -298,7 +303,9 @@ def train_model(
sample_every_n_steps,
sample_every_n_epochs,
sample_sampler,
sample_prompts,additional_parameters,
sample_prompts,
additional_parameters,
vae_batch_size,
):
if pretrained_model_name_or_path == '':
msgbox('Source model information is missing')
Expand All @@ -321,19 +328,31 @@ def train_model(
msgbox('Output folder path is missing')
return

# Get a list of all subfolders in train_data_dir
if check_if_model_exist(output_name, output_dir, save_model_as):
return

# Get a list of all subfolders in train_data_dir, excluding hidden folders
subfolders = [
f
for f in os.listdir(train_data_dir)
if os.path.isdir(os.path.join(train_data_dir, f))
if os.path.isdir(os.path.join(train_data_dir, f)) and not f.startswith('.')
]

# Check if subfolders are present. If not let the user know and return
if not subfolders:
print('\033[33mNo subfolders were found in', train_data_dir, ' can\'t train\...033[0m')
return

total_steps = 0

# Loop through each subfolder and extract the number of repeats
for folder in subfolders:
# Extract the number of repeats from the folder name
repeats = int(folder.split('_')[0])
try:
repeats = int(folder.split('_')[0])
except ValueError:
print('\033[33mSubfolder', folder, 'does not have a proper repeat value, please correct the name or remove it... can\'t train...\033[0m')
continue

# Count the number of images in the folder
num_images = len(
Expand All @@ -346,23 +365,28 @@ def train_model(
or f.endswith('.webp')
]
)

# Calculate the total number of steps for this folder
steps = repeats * num_images
total_steps += steps

# Print the result
print(f'Folder {folder}: {steps} steps')

if num_images == 0:
print(f'{folder} folder contain no images, skipping...')
else:
# Calculate the total number of steps for this folder
steps = repeats * num_images
total_steps += steps

# Print the result
print('\033[33mFolder', folder, ':', steps, 'steps\033[0m')

if total_steps == 0:
print('\033[33mNo images were found in folder', train_data_dir, '... please rectify!\033[0m')
return

# Print the result
# print(f"{total_steps} total steps")

if reg_data_dir == '':
reg_factor = 1
else:
print(
'Regularisation images are used... Will double the number of steps required...'
)
print('\033[94mRegularisation images are used... Will double the number of steps required...\033[0m')
reg_factor = 2

# calculate max_train_steps
Expand Down Expand Up @@ -473,6 +497,7 @@ def train_model(
caption_dropout_rate=caption_dropout_rate,
noise_offset=noise_offset,
additional_parameters=additional_parameters,
vae_batch_size=vae_batch_size,
)

run_cmd += run_cmd_sample(
Expand Down Expand Up @@ -679,6 +704,7 @@ def dreambooth_tab(
caption_dropout_rate,
noise_offset,
additional_parameters,
vae_batch_size,
) = gradio_advanced_training()
color_aug.change(
color_aug_changed,
Expand Down Expand Up @@ -779,6 +805,7 @@ def dreambooth_tab(
sample_sampler,
sample_prompts,
additional_parameters,
vae_batch_size,
]

button_open_config.click(
Expand All @@ -787,7 +814,7 @@ def dreambooth_tab(
outputs=[config_file_name] + settings_list,
show_progress=False,
)

button_load_config.click(
open_configuration,
inputs=[dummy_db_false, config_file_name] + settings_list,
Expand Down

0 comments on commit 838478b

Please sign in to comment.