1
0
Fork 0

Command to launch LoRA training

Some bits need to be ironed out.
This commit is contained in:
Luca Beltrame 2023-06-04 15:15:45 +02:00
parent a38fdb1457
commit 2ffec5d89c
Signed by: einar
GPG key ID: 4707F46E9EC72DEC

View file

@ -0,0 +1,44 @@
#!/bin/bash
set -e
set -o nounset
set -o pipefail
output_name="$1"
accelerate launch \
--num_cpu_threads_per_process=10 "train_network.py" \
--enable_bucket --pretrained_model_name_or_path="/workspace/AnyLoRA_noVae_fp16-pruned.ckpt" \
--train_data_dir="/workspace/images/train/img" \
--resolution=512 \
--output_dir="/workspace/images/train/model" \
--logging_dir="/workspace/images/train/log" \
--network_alpha="16" \
--save_model_as=safetensors \
--network_module=networks.lora \
--text_encoder_lr=1.0 \
--unet_lr=1.0 \
--network_dim=32 \
--output_name="${output_name}" \
--lr_scheduler_num_cycles="300" \
--learning_rate="1.0" \
--lr_scheduler="cosine" \
--lr_warmup_steps="258" \
--train_batch_size="10" \
--max_train_steps="2580" \
--save_every_n_epochs="10" \
--mixed_precision="fp16" \
--save_precision="fp16" \
--seed="1234" \
--caption_extension=".txt" \
--cache_latents \
--optimizer_type="DAdaptAdam" \
--optimizer_args decouple=True weight_decay=0.4 \
--max_data_loader_n_workers="0" \
--max_token_length=225 \
--clip_skip=2 \
--keep_tokens="1" \
--bucket_reso_steps=64 \
--shuffle_caption \
--xformers \
--bucket_no_upscale