From 2ffec5d89c1b21a5cd78fde110965ee50a4eb780 Mon Sep 17 00:00:00 2001 From: Luca Beltrame Date: Sun, 4 Jun 2023 15:15:45 +0200 Subject: [PATCH] Command to launch LoRA training Some bits need to be ironed out. --- stable-diffusion/launch_training.sh | 44 +++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 stable-diffusion/launch_training.sh diff --git a/stable-diffusion/launch_training.sh b/stable-diffusion/launch_training.sh new file mode 100644 index 0000000..a13b4e3 --- /dev/null +++ b/stable-diffusion/launch_training.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +set -e +set -o nounset +set -o pipefail + +output_name="$1" + +accelerate launch \ + --num_cpu_threads_per_process=10 "train_network.py" \ + --enable_bucket --pretrained_model_name_or_path="/workspace/AnyLoRA_noVae_fp16-pruned.ckpt" \ + --train_data_dir="/workspace/images/train/img" \ + --resolution=512 \ + --output_dir="/workspace/images/train/model" \ + --logging_dir="/workspace/images/train/log" \ + --network_alpha="16" \ + --save_model_as=safetensors \ + --network_module=networks.lora \ + --text_encoder_lr=1.0 \ + --unet_lr=1.0 \ + --network_dim=32 \ + --output_name="${output_name}" \ + --lr_scheduler_num_cycles="300" \ + --learning_rate="1.0" \ + --lr_scheduler="cosine" \ + --lr_warmup_steps="258" \ + --train_batch_size="10" \ + --max_train_steps="2580" \ + --save_every_n_epochs="10" \ + --mixed_precision="fp16" \ + --save_precision="fp16" \ + --seed="1234" \ + --caption_extension=".txt" \ + --cache_latents \ + --optimizer_type="DAdaptAdam" \ + --optimizer_args decouple=True weight_decay=0.4 \ + --max_data_loader_n_workers="0" \ + --max_token_length=225 \ + --clip_skip=2 \ + --keep_tokens="1" \ + --bucket_reso_steps=64 \ + --shuffle_caption \ + --xformers \ + --bucket_no_upscale