File size: 3,126 Bytes
826d89a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
---
job: extension
config:
  name: "flux_lora_Adam"
  process:
    - type: 'sd_trainer'
      training_folder: "output/flux_lora_adam"
      performance_log_every: 1000
      device: cuda:0
      trigger_word: "4d4m luc3k"
      network:
        type: "lora"
        linear: 32
        linear_alpha: 32
      save:
        dtype: float16
        save_every: 200
        max_step_saves_to_keep: 4
        push_to_hub: true
        hf_repo_id: AdamLucek/flux_lora_adam
        hf_private: true
      datasets:
        - folder_path: "./lora_me"
          caption_ext: "txt"
          caption_dropout_rate: 0.05 
          shuffle_tokens: true
          cache_latents_to_disk: true
          resolution: [ 512, 768, 1024 ]
      train:
        batch_size: 1
        steps: 2000
        gradient_accumulation_steps: 1
        train_unet: true
        train_text_encoder: false
        gradient_checkpointing: true
        noise_scheduler: "flowmatch" # for training only
        optimizer: "adamw8bit"
        lr: 4e-4
        skip_first_sample: true
        ema_config:
          use_ema: true
          ema_decay: 0.99

        # will probably need this if gpu supports it for flux, other dtypes may not work correctly
        dtype: bf16
      model:
        # huggingface model name or path
        name_or_path: "black-forest-labs/FLUX.1-dev"
        is_flux: true
        quantize: true  # run 8bit mixed precision
#        low_vram: true  # uncomment this if the GPU is connected to your monitors. It will use less vram to quantize, but is slower.
      sample:
        sampler: "flowmatch" # must match train.noise_scheduler
        sample_every: 200 # sample every this many steps
        width: 1024
        height: 1024
        prompts:
          # you can add [trigger] to the prompts here and it will be replaced with the trigger word
          - "[trigger] holding a sign that says 'I LOVE PROMPTS!'"
          - "[trigger] with red hair, playing chess at the park, bomb going off in the background"
          - "[trigger] holding a coffee cup, in a beanie, sitting at a cafe"
          - "[trigger] is a DJ at a night club, fish eye lens, smoke machine, lazer lights, holding a martini"
          - "[trigger] showing off his cool new t shirt at the beach, a shark is jumping out of the water in the background"
          - "[trigger] building a log cabin in the snow covered mountains"
          - "[trigger] playing the guitar, on stage, singing a song, laser lights, punk rocker"
          - "[trigger] with a beard, building a chair, in a wood shop"
          - "photo of [trigger], white background, medium shot, modeling clothing, studio lighting, white backdrop"
          - "[trigger] holding a sign that says, 'this is a sign'"
          - "[trigger], in a post apocalyptic world, with a shotgun, in a leather jacket, in a desert, with a motorcycle"
        neg: ""  # not used on flux
        seed: 42
        walk_seed: true
        guidance_scale: 4
        sample_steps: 20
# you can add any additional meta info here. [name] is replaced with config name at top
meta:
  name: "[name]"
  version: '1.0'