Command to launch LoRA training
Some bits need to be ironed out.
This commit is contained in:
parent
a38fdb1457
commit
2ffec5d89c
1 changed files with 44 additions and 0 deletions
44
stable-diffusion/launch_training.sh
Normal file
44
stable-diffusion/launch_training.sh
Normal file
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
output_name="$1"
|
||||
|
||||
accelerate launch \
|
||||
--num_cpu_threads_per_process=10 "train_network.py" \
|
||||
--enable_bucket --pretrained_model_name_or_path="/workspace/AnyLoRA_noVae_fp16-pruned.ckpt" \
|
||||
--train_data_dir="/workspace/images/train/img" \
|
||||
--resolution=512 \
|
||||
--output_dir="/workspace/images/train/model" \
|
||||
--logging_dir="/workspace/images/train/log" \
|
||||
--network_alpha="16" \
|
||||
--save_model_as=safetensors \
|
||||
--network_module=networks.lora \
|
||||
--text_encoder_lr=1.0 \
|
||||
--unet_lr=1.0 \
|
||||
--network_dim=32 \
|
||||
--output_name="${output_name}" \
|
||||
--lr_scheduler_num_cycles="300" \
|
||||
--learning_rate="1.0" \
|
||||
--lr_scheduler="cosine" \
|
||||
--lr_warmup_steps="258" \
|
||||
--train_batch_size="10" \
|
||||
--max_train_steps="2580" \
|
||||
--save_every_n_epochs="10" \
|
||||
--mixed_precision="fp16" \
|
||||
--save_precision="fp16" \
|
||||
--seed="1234" \
|
||||
--caption_extension=".txt" \
|
||||
--cache_latents \
|
||||
--optimizer_type="DAdaptAdam" \
|
||||
--optimizer_args decouple=True weight_decay=0.4 \
|
||||
--max_data_loader_n_workers="0" \
|
||||
--max_token_length=225 \
|
||||
--clip_skip=2 \
|
||||
--keep_tokens="1" \
|
||||
--bucket_reso_steps=64 \
|
||||
--shuffle_caption \
|
||||
--xformers \
|
||||
--bucket_no_upscale
|
Loading…
Add table
Reference in a new issue