End of training
Browse files- README.md +72 -0
- checkpoint-500/optimizer.bin +3 -0
- checkpoint-500/pytorch_lora_weights.safetensors +3 -0
- checkpoint-500/random_states_0.pkl +3 -0
- checkpoint-500/scheduler.bin +3 -0
- poop_emoji_flux_lora_500_style.sh +68 -0
- prompt.txt +3 -0
- pytorch_lora_weights.safetensors +3 -0
README.md
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: black-forest-labs/FLUX.1-dev
|
3 |
+
library_name: diffusers
|
4 |
+
license: other
|
5 |
+
instance_prompt: a photo of sks poop emoji
|
6 |
+
widget: []
|
7 |
+
tags:
|
8 |
+
- text-to-image
|
9 |
+
- diffusers-training
|
10 |
+
- diffusers
|
11 |
+
- lora
|
12 |
+
- flux
|
13 |
+
- flux-diffusers
|
14 |
+
- template:sd-lora
|
15 |
+
---
|
16 |
+
|
17 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
18 |
+
should probably proofread and complete it, then remove this comment. -->
|
19 |
+
|
20 |
+
|
21 |
+
# Flux DreamBooth LoRA - cst7/poop_emoji_flux_lora_500_style
|
22 |
+
|
23 |
+
<Gallery />
|
24 |
+
|
25 |
+
## Model description
|
26 |
+
|
27 |
+
These are cst7/poop_emoji_flux_lora_500_style DreamBooth LoRA weights for black-forest-labs/FLUX.1-dev.
|
28 |
+
|
29 |
+
The weights were trained using [DreamBooth](https://dreambooth.github.io/) with the [Flux diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/README_flux.md).
|
30 |
+
|
31 |
+
Was LoRA for the text encoder enabled? True.
|
32 |
+
|
33 |
+
## Trigger words
|
34 |
+
|
35 |
+
You should use `a photo of sks poop emoji` to trigger the image generation.
|
36 |
+
|
37 |
+
## Download model
|
38 |
+
|
39 |
+
[Download the *.safetensors LoRA](cst7/poop_emoji_flux_lora_500_style/tree/main) in the Files & versions tab.
|
40 |
+
|
41 |
+
## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
|
42 |
+
|
43 |
+
```py
|
44 |
+
from diffusers import AutoPipelineForText2Image
|
45 |
+
import torch
|
46 |
+
pipeline = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda')
|
47 |
+
pipeline.load_lora_weights('cst7/poop_emoji_flux_lora_500_style', weight_name='pytorch_lora_weights.safetensors')
|
48 |
+
image = pipeline('a photo of sks poop emoji').images[0]
|
49 |
+
```
|
50 |
+
|
51 |
+
For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
|
52 |
+
|
53 |
+
## License
|
54 |
+
|
55 |
+
Please adhere to the licensing terms as described [here](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md).
|
56 |
+
|
57 |
+
|
58 |
+
## Intended uses & limitations
|
59 |
+
|
60 |
+
#### How to use
|
61 |
+
|
62 |
+
```python
|
63 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
64 |
+
```
|
65 |
+
|
66 |
+
#### Limitations and bias
|
67 |
+
|
68 |
+
[TODO: provide examples of latent issues and potential remediations]
|
69 |
+
|
70 |
+
## Training details
|
71 |
+
|
72 |
+
[TODO: describe the data used to train the model]
|
checkpoint-500/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b2d46c72b8e557cee7159caf1864c840a9a53c9dd1400493e747933c0d892cc3
|
3 |
+
size 81411010
|
checkpoint-500/pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:598529761a6136276738a8739fa8f020239ecde0f171285aad19cea9d6b9993d
|
3 |
+
size 23107256
|
checkpoint-500/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aad16113d1bf0b6367a3fbe08890aa8557e5b1cae71e4a571208b701c4261148
|
3 |
+
size 14344
|
checkpoint-500/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72ef7361eb3ca738d82f6b0ce999ee77ee2bbe2c79a64c85a993f2a2eea063c5
|
3 |
+
size 1064
|
poop_emoji_flux_lora_500_style.sh
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
MODEL_NAME="black-forest-labs/FLUX.1-dev"
|
4 |
+
DATASET_BASE="/disks/sata5/jiacheng/dataset/dreambooth_dataset"
|
5 |
+
ACCELERATOR_ARGS="--num_processes=1 --num_machines=1 --gpu_ids=2"
|
6 |
+
|
7 |
+
# 训练函数
|
8 |
+
train() {
|
9 |
+
local dataset_name=$1
|
10 |
+
|
11 |
+
# 处理 instance_prompt:
|
12 |
+
# 1. 去掉数据集名称末尾的数字
|
13 |
+
processed_name=$(echo "$dataset_name" | sed 's/[0-9]*$//')
|
14 |
+
# 2. 替换下划线为空格
|
15 |
+
processed_name=$(echo "$processed_name" | sed 's/_/ /g')
|
16 |
+
instance_prompt="a photo of a sks ${processed_name}" # 最终的 prompt
|
17 |
+
|
18 |
+
# 打印处理后的数据集名称
|
19 |
+
echo "原始 dataset_name: $dataset_name"
|
20 |
+
echo "处理后 processed_name: $processed_name"
|
21 |
+
echo "a photo of a sks ${processed_name}"
|
22 |
+
|
23 |
+
OUTPUT_NAME="${dataset_name}_flux_lora_500_style"
|
24 |
+
OUTPUT_DIR="output/dreambooth_lora_flux/${dataset_name}/$OUTPUT_NAME"
|
25 |
+
|
26 |
+
# 每次训练前,创建对应的文件夹并保存当前脚本
|
27 |
+
mkdir -p $OUTPUT_DIR
|
28 |
+
SCRIPT_BACKUP="$OUTPUT_DIR/$OUTPUT_NAME.sh"
|
29 |
+
cp "$0" "$SCRIPT_BACKUP"
|
30 |
+
echo "当前脚本已保存为配置文件: $SCRIPT_BACKUP"
|
31 |
+
|
32 |
+
# 保存 instance_prompt 到 txt 文件
|
33 |
+
PROMPT_FILE="$OUTPUT_DIR/prompt.txt"
|
34 |
+
echo "原始 dataset_name: $dataset_name" > "$PROMPT_FILE"
|
35 |
+
echo "处理后 processed_name: $processed_name" >> "$PROMPT_FILE"
|
36 |
+
echo "$instance_prompt" >> "$PROMPT_FILE"
|
37 |
+
echo "instance_prompt 已保存到: $PROMPT_FILE"
|
38 |
+
|
39 |
+
export NCCL_P2P_DISABLE=1
|
40 |
+
export NCCL_IB_DISABLE=1
|
41 |
+
|
42 |
+
# 执行训练命令
|
43 |
+
accelerate launch $ACCELERATOR_ARGS examples/dreambooth/train_dreambooth_lora_flux.py \
|
44 |
+
--pretrained_model_name_or_path=$MODEL_NAME \
|
45 |
+
--instance_data_dir="$DATASET_BASE/$dataset_name" \
|
46 |
+
--output_dir=$OUTPUT_DIR \
|
47 |
+
--mixed_precision="bf16" \
|
48 |
+
--train_text_encoder\
|
49 |
+
--instance_prompt="a photo of sks ${processed_name}" \
|
50 |
+
--resolution=512 \
|
51 |
+
--train_batch_size=1 \
|
52 |
+
--guidance_scale=1 \
|
53 |
+
--gradient_accumulation_steps=4 \
|
54 |
+
--optimizer="prodigy" \
|
55 |
+
--learning_rate=1. \
|
56 |
+
--report_to="wandb" \
|
57 |
+
--lr_scheduler="constant" \
|
58 |
+
--lr_warmup_steps=0 \
|
59 |
+
--max_train_steps=500 \
|
60 |
+
--seed="0" \
|
61 |
+
--push_to_hub
|
62 |
+
}
|
63 |
+
|
64 |
+
# 遍历 `dreambooth_dataset` 目录下的所有数据集
|
65 |
+
for dataset in $(ls $DATASET_BASE); do
|
66 |
+
echo "开始训练数据集: $dataset"
|
67 |
+
train "$dataset" || true
|
68 |
+
done
|
prompt.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
原始 dataset_name: poop_emoji
|
2 |
+
处理后 processed_name: poop emoji
|
3 |
+
a photo of a sks poop emoji
|
pytorch_lora_weights.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1499be1226bfe53efaaffee9c546646edf84ae8e4d9dff1c62eafafdbdf38b20
|
3 |
+
size 23697096
|