diff --git a/.gitattributes b/.gitattributes
index 886ac0c..54b765c 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,38 +1,38 @@
*.7z filter=lfs diff=lfs merge=lfs -text
*.arrow filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
-*.bin.* filter=lfs diff=lfs merge=lfs -text
*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
*.ftz filter=lfs diff=lfs merge=lfs -text
*.gz filter=lfs diff=lfs merge=lfs -text
*.h5 filter=lfs diff=lfs merge=lfs -text
*.joblib filter=lfs diff=lfs merge=lfs -text
*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
*.model filter=lfs diff=lfs merge=lfs -text
*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
*.onnx filter=lfs diff=lfs merge=lfs -text
*.ot filter=lfs diff=lfs merge=lfs -text
*.parquet filter=lfs diff=lfs merge=lfs -text
*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
*.pt filter=lfs diff=lfs merge=lfs -text
*.pth filter=lfs diff=lfs merge=lfs -text
*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
*.tflite filter=lfs diff=lfs merge=lfs -text
*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
*.xz filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
-*.zstandard filter=lfs diff=lfs merge=lfs -text
-*.tfevents* filter=lfs diff=lfs merge=lfs -text
-*.db* filter=lfs diff=lfs merge=lfs -text
-*.ark* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
-**/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
-*.safetensors filter=lfs diff=lfs merge=lfs -text
-*.ckpt filter=lfs diff=lfs merge=lfs -text
-*.gguf* filter=lfs diff=lfs merge=lfs -text
-*.ggml filter=lfs diff=lfs merge=lfs -text
-*.llamafile* filter=lfs diff=lfs merge=lfs -text
-*.pt2 filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+T2I.png filter=lfs diff=lfs merge=lfs -text
+inpaint.png filter=lfs diff=lfs merge=lfs -text
+diffusion_pytorch_model.safetensors filter=lfs diff=lfs merge=lfs -text
diff --git a/README.md b/README.md
index bef6db2..a183724 100644
--- a/README.md
+++ b/README.md
@@ -1,47 +1,82 @@
---
-license: Apache License 2.0
-
-#model-type:
-##如 gpt、phi、llama、chatglm、baichuan 等
-#- gpt
-
-#domain:
-##如 nlp、cv、audio、multi-modal
-#- nlp
-
-#language:
-##语言代码列表 https://help.aliyun.com/document_detail/215387.html?spm=a2c4g.11186623.0.0.9f8d7467kni6Aa
-#- cn
-
-#metrics:
-##如 CIDEr、Blue、ROUGE 等
-#- CIDEr
-
-#tags:
-##各种自定义,包括 pretrained、fine-tuned、instruction-tuned、RL-tuned 等训练方法和其他
-#- pretrained
-
-#tools:
-##如 vllm、fastchat、llamacpp、AdaSeq 等
-#- vllm
+license: other
+license_name: flux-1-dev-non-commercial-license
+license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
+language:
+- en
+base_model: black-forest-labs/FLUX.1-dev
+library_name: diffusers
+tags:
+- Text-to-Image
+- FLUX
+- Stable Diffusion
+pipeline_tag: text-to-image
---
-### 当前模型的贡献者未提供更加详细的模型介绍。模型文件和权重,可浏览“模型文件”页面获取。
-#### 您可以通过如下git clone命令,或者ModelScope SDK来下载模型
-SDK下载
-```bash
-#安装ModelScope
-pip install modelscope
-```
-```python
-#SDK模型下载
-from modelscope import snapshot_download
-model_dir = snapshot_download('alimama-creative/FLUX.1-Turbo-Alpha')
-```
-Git下载
-```
-#Git模型下载
-git clone https://www.modelscope.cn/alimama-creative/FLUX.1-Turbo-Alpha.git
+
+

+

+
+
+[中文版Readme](./README_ZH.md)
+
+This repository provides a 8-step distilled lora for [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) model released by AlimamaCreative Team.
+
+# Description
+This checkpoint is a 8-step distilled Lora, trained based on FLUX.1-dev model. We use a multi-head discriminator to improve the distill quality. Our model can be used for T2I, inpainting controlnet and other FLUX related models. The recommended guidance_scale=3.5 and lora_scale=1. Our Lower steps version will release later.
+
+- Text-to-Image.
+
+
+
+- With [alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta](https://huggingface.co/alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta). Our distilled lora can be well adapted to the Inpainting controlnet, and the accelerated generated effect can follow the original output well.
+
+
+
+# How to use
+## diffusers
+This model can be used ditrectly with diffusers
+
+```json
+import torch
+from diffusers.pipelines import FluxPipeline
+
+model_id = "black-forest-labs/FLUX.1-dev"
+adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
+
+pipe = FluxPipeline.from_pretrained(
+ model_id,
+ torch_dtype=torch.bfloat16
+)
+pipe.to("cuda")
+
+pipe.load_lora_weights(adapter_id)
+pipe.fuse_lora()
+
+prompt = "A DSLR photo of a shiny VW van that has a cityscape painted on it. A smiling sloth stands on grass in front of the van and is wearing a leather jacket, a cowboy hat, a kilt and a bowtie. The sloth is holding a quarterstaff and a big book."
+image = pipe(
+ prompt=prompt,
+ guidance_scale=3.5,
+ height=1024,
+ width=1024,
+ num_inference_steps=8,
+ max_sequence_length=512).images[0]
```
-如果您是本模型的贡献者,我们邀请您根据模型贡献文档,及时完善模型卡片内容。
\ No newline at end of file
+## comfyui
+
+- T2I turbo workflow: [click here](./workflows/t2I_flux_turbo.json)
+- Inpainting controlnet turbo workflow: [click here](./workflows/alimama_flux_inpainting_turbo_8step.json)
+
+
+# Training Details
+
+The model is trained on 1M open source and internal sources images, with the aesthetic 6.3+ and resolution greater than 800. We use adversarial training to improve the quality. Our method fix the original FLUX.1-dev transformer as the discriminator backbone, and add multi heads to every transformer layer. We fix the guidance scale as 3.5 during training, and use the time shift as 3.
+
+Mixed precision: bf16
+
+Learning rate: 2e-5
+
+Batch size: 64
+
+Image size: 1024x1024
\ No newline at end of file
diff --git a/README_ZH.md b/README_ZH.md
new file mode 100644
index 0000000..fdb97e7
--- /dev/null
+++ b/README_ZH.md
@@ -0,0 +1,81 @@
+---
+license: other
+license_name: flux-1-dev-non-commercial-license
+license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
+language:
+- en
+base_model: black-forest-labs/FLUX.1-dev
+library_name: diffusers
+tags:
+- Text-to-Image
+- FLUX
+- Stable Diffusion
+pipeline_tag: text-to-image
+---
+
+
+

+

+
+
+本仓库包含了由阿里妈妈创意团队开发的基于[FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)模型的8步蒸馏版。
+
+# 介绍
+
+该模型是基于FLUX.1-dev模型的8步蒸馏版lora。我们使用特殊设计的判别器来提高蒸馏质量。该模型可以用于T2I、Inpainting controlnet和其他FLUX相关模型。建议guidance_scale=3.5和lora_scale=1。我们的更低步数的版本将在后续发布。
+
+- Text-to-Image.
+
+
+
+- 配合[alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta](https://huggingface.co/alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta)。我们模型可以很好地适配Inpainting controlnet,并与原始输出保持相似的结果。
+
+
+
+# 使用指南
+## diffusers
+该模型可以直接与diffusers一起使用
+
+```python
+import torch
+from diffusers.pipelines import FluxPipeline
+
+model_id = "black-forest-labs/FLUX.1-dev"
+adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
+
+pipe = FluxPipeline.from_pretrained(
+ model_id,
+ torch_dtype=torch.bfloat16
+)
+pipe.to("cuda")
+
+pipe.load_lora_weights(adapter_id)
+pipe.fuse_lora()
+
+prompt = "A DSLR photo of a shiny VW van that has a cityscape painted on it. A smiling sloth stands on grass in front of the van and is wearing a leather jacket, a cowboy hat, a kilt and a bowtie. The sloth is holding a quarterstaff and a big book."
+image = pipe(
+ prompt=prompt,
+ guidance_scale=3.5,
+ height=1024,
+ width=1024,
+ num_inference_steps=8,
+ max_sequence_length=512).images[0]
+```
+
+## comfyui
+
+- 文生图加速链路: [点击这里](./workflows/t2I_flux_turbo.json)
+- Inpainting controlnet 加速链路: [点击这里](./workflows/alimama_flux_inpainting_turbo_8step.json)
+
+
+# 训练细节
+
+该模型在1M公开数据集和内部源图片上进行训练,这些数据美学评分6.3+而且分辨率大于800。我们使用对抗训练来提高质量,我们的方法将原始FLUX.1-dev transformer固定为判别器的特征提取器,并在每个transformer层中添加判别头网络。在训练期间,我们将guidance scale固定为3.5,并使用时间偏移量3。
+
+混合精度: bf16
+
+学习率: 2e-5
+
+批大小: 64
+
+训练分辨率: 1024x1024
\ No newline at end of file
diff --git a/configuration.json b/configuration.json
new file mode 100644
index 0000000..b5f6ce7
--- /dev/null
+++ b/configuration.json
@@ -0,0 +1 @@
+{"framework": "pytorch", "task": "text-to-image", "allow_remote": true}
\ No newline at end of file
diff --git a/diffusion_pytorch_model.safetensors b/diffusion_pytorch_model.safetensors
new file mode 100644
index 0000000..902d354
--- /dev/null
+++ b/diffusion_pytorch_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff
+size 694082424
diff --git a/images/T2I.png b/images/T2I.png
new file mode 100644
index 0000000..6a42971
--- /dev/null
+++ b/images/T2I.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b519e6578c674441c24edf6cea01203d5acae8a151135269cc6636135a55203e
+size 9955906
diff --git a/images/images_alibaba.png b/images/images_alibaba.png
new file mode 100644
index 0000000..244e9cc
Binary files /dev/null and b/images/images_alibaba.png differ
diff --git a/images/images_alimama.png b/images/images_alimama.png
new file mode 100644
index 0000000..99a2c98
Binary files /dev/null and b/images/images_alimama.png differ
diff --git a/images/inpaint.png b/images/inpaint.png
new file mode 100644
index 0000000..9425706
--- /dev/null
+++ b/images/inpaint.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bfdacf1ada0c208a2bc3c536de58fe7d111f118cde16a6bad9409eacaee02124
+size 7228772
diff --git a/workflows/alimama_flux_inpainting_turbo_8step.json b/workflows/alimama_flux_inpainting_turbo_8step.json
new file mode 100644
index 0000000..c7322bf
--- /dev/null
+++ b/workflows/alimama_flux_inpainting_turbo_8step.json
@@ -0,0 +1,1666 @@
+{
+ "last_node_id": 146,
+ "last_link_id": 238,
+ "nodes": [
+ {
+ "id": 88,
+ "type": "Reroute",
+ "pos": {
+ "0": 156.76731872558594,
+ "1": -115.1278305053711
+ },
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 9,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 223,
+ "label": ""
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "CLIP",
+ "links": [
+ 147,
+ 210
+ ],
+ "slot_index": 0,
+ "label": ""
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 89,
+ "type": "Reroute",
+ "pos": {
+ "0": 155.76731872558594,
+ "1": -64.12783813476562
+ },
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 10,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 150,
+ "label": ""
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "VAE",
+ "links": [
+ 151,
+ 207
+ ],
+ "slot_index": 0,
+ "label": ""
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 72,
+ "type": "CLIPTextEncodeFlux",
+ "pos": {
+ "0": 635.7671508789062,
+ "1": -120.12781524658203
+ },
+ "size": {
+ "0": 219.3687286376953,
+ "1": 108
+ },
+ "flags": {},
+ "order": 17,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 147,
+ "label": "clip"
+ },
+ {
+ "name": "t5xxl",
+ "type": "STRING",
+ "link": 199,
+ "widget": {
+ "name": "t5xxl"
+ },
+ "label": "t5xxl"
+ },
+ {
+ "name": "clip_l",
+ "type": "STRING",
+ "link": 200,
+ "widget": {
+ "name": "clip_l"
+ },
+ "label": "clip_l"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 208
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CONDITIONING"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncodeFlux"
+ },
+ "widgets_values": [
+ "",
+ "",
+ 3.5
+ ]
+ },
+ {
+ "id": 131,
+ "type": "CLIPTextEncodeFlux",
+ "pos": {
+ "0": 897.7671508789062,
+ "1": -151.12783813476562
+ },
+ "size": {
+ "0": 222.8309326171875,
+ "1": 160
+ },
+ "flags": {},
+ "order": 14,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 210,
+ "label": "clip"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 209
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CONDITIONING"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncodeFlux"
+ },
+ "widgets_values": [
+ "",
+ "bad,ugly,deformed",
+ 3.5
+ ]
+ },
+ {
+ "id": 13,
+ "type": "SamplerCustomAdvanced",
+ "pos": {
+ "0": 1215.767333984375,
+ "1": 133.8721466064453
+ },
+ "size": {
+ "0": 266.6016845703125,
+ "1": 106
+ },
+ "flags": {},
+ "order": 23,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "noise",
+ "type": "NOISE",
+ "link": 174,
+ "slot_index": 0,
+ "label": "noise"
+ },
+ {
+ "name": "guider",
+ "type": "GUIDER",
+ "link": 228,
+ "slot_index": 1,
+ "label": "guider"
+ },
+ {
+ "name": "sampler",
+ "type": "SAMPLER",
+ "link": 172,
+ "slot_index": 2,
+ "label": "sampler"
+ },
+ {
+ "name": "sigmas",
+ "type": "SIGMAS",
+ "link": 20,
+ "slot_index": 3,
+ "label": "sigmas"
+ },
+ {
+ "name": "latent_image",
+ "type": "LATENT",
+ "link": 222,
+ "slot_index": 4,
+ "label": "latent_image"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "output",
+ "type": "LATENT",
+ "links": [
+ 24
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "output"
+ },
+ {
+ "name": "denoised_output",
+ "type": "LATENT",
+ "links": null,
+ "shape": 3,
+ "label": "denoised_output"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "SamplerCustomAdvanced"
+ }
+ },
+ {
+ "id": 87,
+ "type": "Reroute",
+ "pos": {
+ "0": 159.76731872558594,
+ "1": -170.12783813476562
+ },
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 18,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 238,
+ "label": ""
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "MODEL",
+ "links": [
+ 148,
+ 227
+ ],
+ "slot_index": 0,
+ "label": ""
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 8,
+ "type": "VAEDecode",
+ "pos": {
+ "0": 1513.767333984375,
+ "1": 140.87216186523438
+ },
+ "size": {
+ "0": 210,
+ "1": 46
+ },
+ "flags": {},
+ "order": 24,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "samples",
+ "type": "LATENT",
+ "link": 24,
+ "label": "samples"
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 151,
+ "label": "vae"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 127,
+ 225,
+ 232
+ ],
+ "slot_index": 0,
+ "label": "IMAGE"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAEDecode"
+ }
+ },
+ {
+ "id": 134,
+ "type": "MaskToImage",
+ "pos": {
+ "0": 440,
+ "1": -815
+ },
+ "size": {
+ "0": 176.39999389648438,
+ "1": 29.597196578979492
+ },
+ "flags": {},
+ "order": 15,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "mask",
+ "type": "MASK",
+ "link": 219,
+ "label": "mask"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 215
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "IMAGE"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "MaskToImage"
+ }
+ },
+ {
+ "id": 136,
+ "type": "PreviewImage",
+ "pos": {
+ "0": 668,
+ "1": -886
+ },
+ "size": {
+ "0": 408.4986267089844,
+ "1": 246
+ },
+ "flags": {},
+ "order": 19,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 215,
+ "label": "images"
+ }
+ ],
+ "outputs": [],
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 144,
+ "type": "ImageCompositeMasked",
+ "pos": {
+ "0": 1607,
+ "1": -460
+ },
+ "size": {
+ "0": 315,
+ "1": 146
+ },
+ "flags": {},
+ "order": 27,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "destination",
+ "type": "IMAGE",
+ "link": 234
+ },
+ {
+ "name": "source",
+ "type": "IMAGE",
+ "link": 232
+ },
+ {
+ "name": "mask",
+ "type": "MASK",
+ "link": 236
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 235
+ ],
+ "slot_index": 0,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ImageCompositeMasked"
+ },
+ "widgets_values": [
+ 0,
+ 0,
+ true
+ ]
+ },
+ {
+ "id": 11,
+ "type": "DualCLIPLoader",
+ "pos": {
+ "0": -564.7698974609375,
+ "1": -22.488271713256836
+ },
+ "size": {
+ "0": 315,
+ "1": 106
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "CLIP",
+ "type": "CLIP",
+ "links": [
+ 223
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CLIP"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "DualCLIPLoader"
+ },
+ "widgets_values": [
+ "t5xxl_fp16.safetensors",
+ "clip_l.safetensors",
+ "flux"
+ ]
+ },
+ {
+ "id": 10,
+ "type": "VAELoader",
+ "pos": {
+ "0": -557.7698974609375,
+ "1": 134.51173400878906
+ },
+ "size": {
+ "0": 305.9473876953125,
+ "1": 68.47245788574219
+ },
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "VAE",
+ "type": "VAE",
+ "links": [
+ 150
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "VAE"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAELoader"
+ },
+ "widgets_values": [
+ "ae.safetensors"
+ ]
+ },
+ {
+ "id": 25,
+ "type": "RandomNoise",
+ "pos": {
+ "0": 825.7671508789062,
+ "1": 134.8721466064453
+ },
+ "size": {
+ "0": 290.9759216308594,
+ "1": 82
+ },
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "NOISE",
+ "type": "NOISE",
+ "links": [
+ 174
+ ],
+ "shape": 3,
+ "label": "NOISE"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "RandomNoise"
+ },
+ "widgets_values": [
+ 24,
+ "fixed"
+ ]
+ },
+ {
+ "id": 76,
+ "type": "SaveImage",
+ "pos": {
+ "0": 1232,
+ "1": 312
+ },
+ "size": {
+ "0": 367.42144775390625,
+ "1": 406.4285888671875
+ },
+ "flags": {},
+ "order": 25,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 127,
+ "label": "images"
+ }
+ ],
+ "outputs": [],
+ "title": "Model Output",
+ "properties": {
+ "Node name for S&R": "SaveImage"
+ },
+ "widgets_values": [
+ "pl"
+ ]
+ },
+ {
+ "id": 130,
+ "type": "LoadImage",
+ "pos": {
+ "0": 240,
+ "1": -611
+ },
+ "size": {
+ "0": 417.38385009765625,
+ "1": 314
+ },
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 205,
+ 226,
+ 234
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "IMAGE"
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": [
+ 217
+ ],
+ "slot_index": 1,
+ "shape": 3,
+ "label": "MASK"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "clipspace/clipspace-mask-506435.7999997139.png [input]",
+ "image"
+ ]
+ },
+ {
+ "id": 138,
+ "type": "GrowMaskWithBlur",
+ "pos": {
+ "0": 715.314453125,
+ "1": -571.8499755859375
+ },
+ "size": {
+ "0": 315,
+ "1": 246
+ },
+ "flags": {},
+ "order": 11,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "mask",
+ "type": "MASK",
+ "link": 217,
+ "label": "mask"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "mask",
+ "type": "MASK",
+ "links": [
+ 218,
+ 219,
+ 236
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "mask"
+ },
+ {
+ "name": "mask_inverted",
+ "type": "MASK",
+ "links": null,
+ "shape": 3,
+ "label": "mask_inverted"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "GrowMaskWithBlur"
+ },
+ "widgets_values": [
+ 0,
+ 0,
+ false,
+ false,
+ 0,
+ 1,
+ 1,
+ false
+ ]
+ },
+ {
+ "id": 16,
+ "type": "KSamplerSelect",
+ "pos": {
+ "0": 823.7671508789062,
+ "1": 283.8721923828125
+ },
+ "size": {
+ "0": 256.2611999511719,
+ "1": 58
+ },
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "SAMPLER",
+ "type": "SAMPLER",
+ "links": [
+ 172
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "SAMPLER"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "KSamplerSelect"
+ },
+ "widgets_values": [
+ "euler"
+ ]
+ },
+ {
+ "id": 111,
+ "type": "ShowText|pysssss",
+ "pos": {
+ "0": 251,
+ "1": 518
+ },
+ "size": {
+ "0": 407.2326354980469,
+ "1": 219.7727508544922
+ },
+ "flags": {},
+ "order": 16,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text",
+ "type": "STRING",
+ "link": 180,
+ "widget": {
+ "name": "text"
+ },
+ "label": "text"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "STRING",
+ "type": "STRING",
+ "links": [],
+ "slot_index": 0,
+ "shape": 6,
+ "label": "STRING"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ShowText|pysssss"
+ },
+ "widgets_values": [
+ "",
+ "Albert Einstein"
+ ]
+ },
+ {
+ "id": 139,
+ "type": "EmptySD3LatentImage",
+ "pos": {
+ "0": 821.7671508789062,
+ "1": 573.8721313476562
+ },
+ "size": {
+ "0": 315,
+ "1": 106
+ },
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 222
+ ],
+ "slot_index": 0,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "EmptySD3LatentImage"
+ },
+ "widgets_values": [
+ 1024,
+ 1024,
+ 1
+ ]
+ },
+ {
+ "id": 124,
+ "type": "Text Concatenate (JPS)",
+ "pos": {
+ "0": 312,
+ "1": -154
+ },
+ "size": {
+ "0": 219.5895233154297,
+ "1": 138
+ },
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text1",
+ "type": "STRING",
+ "link": null,
+ "widget": {
+ "name": "text1"
+ },
+ "label": "text1"
+ },
+ {
+ "name": "text2",
+ "type": "STRING",
+ "link": null,
+ "widget": {
+ "name": "text2"
+ },
+ "label": "text2"
+ },
+ {
+ "name": "text3",
+ "type": "STRING",
+ "link": null,
+ "widget": {
+ "name": "text3"
+ },
+ "label": "text3"
+ },
+ {
+ "name": "text4",
+ "type": "STRING",
+ "link": null,
+ "widget": {
+ "name": "text4"
+ },
+ "label": "text4"
+ },
+ {
+ "name": "text5",
+ "type": "STRING",
+ "link": null,
+ "widget": {
+ "name": "text5"
+ },
+ "label": "text5"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "text",
+ "type": "STRING",
+ "links": [
+ 198
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "text"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Text Concatenate (JPS)"
+ },
+ "widgets_values": [
+ "comma",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ {
+ "id": 140,
+ "type": "Image Comparer (rgthree)",
+ "pos": {
+ "0": 1613,
+ "1": 321
+ },
+ "size": {
+ "0": 357.58453369140625,
+ "1": 424.3191223144531
+ },
+ "flags": {},
+ "order": 26,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "image_a",
+ "type": "IMAGE",
+ "link": 225,
+ "dir": 3
+ },
+ {
+ "name": "image_b",
+ "type": "IMAGE",
+ "link": 226,
+ "dir": 3
+ }
+ ],
+ "outputs": [],
+ "properties": {
+ "comparer_mode": "Slide"
+ },
+ "widgets_values": [
+ [
+ {
+ "name": "A",
+ "selected": true,
+ "url": "/api/view?filename=rgthree.compare._temp_dmora_02633_.png&type=temp&subfolder=&rand=0.39816085595232487"
+ },
+ {
+ "name": "B",
+ "selected": true,
+ "url": "/api/view?filename=rgthree.compare._temp_dmora_02634_.png&type=temp&subfolder=&rand=0.9050946330845266"
+ }
+ ]
+ ]
+ },
+ {
+ "id": 133,
+ "type": "ControlNetLoader",
+ "pos": {
+ "0": 1109.0059814453125,
+ "1": -691.749755859375
+ },
+ "size": {
+ "0": 435.60980224609375,
+ "1": 100.55636596679688
+ },
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "CONTROL_NET",
+ "type": "CONTROL_NET",
+ "links": [
+ 212
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CONTROL_NET"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ControlNetLoader"
+ },
+ "widgets_values": [
+ "0930_inpaint_cn.safetensors"
+ ]
+ },
+ {
+ "id": 145,
+ "type": "PreviewImage",
+ "pos": {
+ "0": 2014,
+ "1": 317
+ },
+ "size": {
+ "0": 434.4592590332031,
+ "1": 423.6662292480469
+ },
+ "flags": {},
+ "order": 28,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 235,
+ "label": "images"
+ }
+ ],
+ "outputs": [],
+ "title": "Compose",
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ },
+ {
+ "id": 35,
+ "type": "StringFunction|pysssss",
+ "pos": {
+ "0": 234,
+ "1": 72
+ },
+ "size": {
+ "0": 438.01171875,
+ "1": 381.4057922363281
+ },
+ "flags": {},
+ "order": 12,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "text_c",
+ "type": "STRING",
+ "link": 198,
+ "widget": {
+ "name": "text_c"
+ },
+ "label": "text_c"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "STRING",
+ "type": "STRING",
+ "links": [
+ 180,
+ 199,
+ 200
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "STRING"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "StringFunction|pysssss"
+ },
+ "widgets_values": [
+ "append",
+ "no",
+ "Albert Einstein",
+ "",
+ "",
+ "Albert Einstein"
+ ]
+ },
+ {
+ "id": 141,
+ "type": "CFGGuider",
+ "pos": {
+ "0": 1295,
+ "1": -166
+ },
+ "size": {
+ "0": 315,
+ "1": 98
+ },
+ "flags": {},
+ "order": 22,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 227
+ },
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "link": 230
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "link": 229
+ }
+ ],
+ "outputs": [
+ {
+ "name": "GUIDER",
+ "type": "GUIDER",
+ "links": [
+ 228
+ ],
+ "slot_index": 0,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CFGGuider"
+ },
+ "widgets_values": [
+ 1
+ ]
+ },
+ {
+ "id": 12,
+ "type": "UNETLoader",
+ "pos": {
+ "0": -558.7698974609375,
+ "1": -155.48825073242188
+ },
+ "size": {
+ "0": 308.9964904785156,
+ "1": 83.4256591796875
+ },
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 237
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "MODEL"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "UNETLoader"
+ },
+ "widgets_values": [
+ "flux1-dev-fp8.safetensors",
+ "fp8_e4m3fn"
+ ]
+ },
+ {
+ "id": 17,
+ "type": "BasicScheduler",
+ "pos": {
+ "0": 826,
+ "1": 410
+ },
+ "size": {
+ "0": 315,
+ "1": 106
+ },
+ "flags": {},
+ "order": 21,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 148,
+ "slot_index": 0,
+ "label": "model"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "SIGMAS",
+ "type": "SIGMAS",
+ "links": [
+ 20
+ ],
+ "shape": 3,
+ "label": "SIGMAS"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BasicScheduler"
+ },
+ "widgets_values": [
+ "simple",
+ 8,
+ 1
+ ]
+ },
+ {
+ "id": 128,
+ "type": "ControlNetInpaintingAliMamaApply",
+ "pos": {
+ "0": 1080,
+ "1": -538
+ },
+ "size": {
+ "0": 403.1999816894531,
+ "1": 206
+ },
+ "flags": {},
+ "order": 20,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "link": 208,
+ "label": "positive"
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "link": 209,
+ "label": "negative"
+ },
+ {
+ "name": "control_net",
+ "type": "CONTROL_NET",
+ "link": 212,
+ "label": "control_net"
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 207,
+ "label": "vae"
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "link": 205,
+ "label": "image"
+ },
+ {
+ "name": "mask",
+ "type": "MASK",
+ "link": 218,
+ "label": "mask"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "links": [
+ 230
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "positive"
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "links": [
+ 229
+ ],
+ "slot_index": 1,
+ "shape": 3,
+ "label": "negative"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ControlNetInpaintingAliMamaApply"
+ },
+ "widgets_values": [
+ 1,
+ 0,
+ 1
+ ]
+ },
+ {
+ "id": 146,
+ "type": "LoraLoaderModelOnly",
+ "pos": {
+ "0": -196,
+ "1": -201
+ },
+ "size": {
+ "0": 315,
+ "1": 82
+ },
+ "flags": {},
+ "order": 13,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 237
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 238
+ ],
+ "slot_index": 0,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoraLoaderModelOnly"
+ },
+ "widgets_values": [
+ "flux_turbo_v1_1.safetensors",
+ 1
+ ]
+ }
+ ],
+ "links": [
+ [
+ 20,
+ 17,
+ 0,
+ 13,
+ 3,
+ "SIGMAS"
+ ],
+ [
+ 24,
+ 13,
+ 0,
+ 8,
+ 0,
+ "LATENT"
+ ],
+ [
+ 127,
+ 8,
+ 0,
+ 76,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 147,
+ 88,
+ 0,
+ 72,
+ 0,
+ "CLIP"
+ ],
+ [
+ 148,
+ 87,
+ 0,
+ 17,
+ 0,
+ "MODEL"
+ ],
+ [
+ 150,
+ 10,
+ 0,
+ 89,
+ 0,
+ "*"
+ ],
+ [
+ 151,
+ 89,
+ 0,
+ 8,
+ 1,
+ "VAE"
+ ],
+ [
+ 172,
+ 16,
+ 0,
+ 13,
+ 2,
+ "SAMPLER"
+ ],
+ [
+ 174,
+ 25,
+ 0,
+ 13,
+ 0,
+ "NOISE"
+ ],
+ [
+ 180,
+ 35,
+ 0,
+ 111,
+ 0,
+ "STRING"
+ ],
+ [
+ 198,
+ 124,
+ 0,
+ 35,
+ 0,
+ "STRING"
+ ],
+ [
+ 199,
+ 35,
+ 0,
+ 72,
+ 1,
+ "STRING"
+ ],
+ [
+ 200,
+ 35,
+ 0,
+ 72,
+ 2,
+ "STRING"
+ ],
+ [
+ 205,
+ 130,
+ 0,
+ 128,
+ 4,
+ "IMAGE"
+ ],
+ [
+ 207,
+ 89,
+ 0,
+ 128,
+ 3,
+ "VAE"
+ ],
+ [
+ 208,
+ 72,
+ 0,
+ 128,
+ 0,
+ "CONDITIONING"
+ ],
+ [
+ 209,
+ 131,
+ 0,
+ 128,
+ 1,
+ "CONDITIONING"
+ ],
+ [
+ 210,
+ 88,
+ 0,
+ 131,
+ 0,
+ "CLIP"
+ ],
+ [
+ 212,
+ 133,
+ 0,
+ 128,
+ 2,
+ "CONTROL_NET"
+ ],
+ [
+ 215,
+ 134,
+ 0,
+ 136,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 217,
+ 130,
+ 1,
+ 138,
+ 0,
+ "MASK"
+ ],
+ [
+ 218,
+ 138,
+ 0,
+ 128,
+ 5,
+ "MASK"
+ ],
+ [
+ 219,
+ 138,
+ 0,
+ 134,
+ 0,
+ "MASK"
+ ],
+ [
+ 222,
+ 139,
+ 0,
+ 13,
+ 4,
+ "LATENT"
+ ],
+ [
+ 223,
+ 11,
+ 0,
+ 88,
+ 0,
+ "*"
+ ],
+ [
+ 225,
+ 8,
+ 0,
+ 140,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 226,
+ 130,
+ 0,
+ 140,
+ 1,
+ "IMAGE"
+ ],
+ [
+ 227,
+ 87,
+ 0,
+ 141,
+ 0,
+ "MODEL"
+ ],
+ [
+ 228,
+ 141,
+ 0,
+ 13,
+ 1,
+ "GUIDER"
+ ],
+ [
+ 229,
+ 128,
+ 1,
+ 141,
+ 2,
+ "CONDITIONING"
+ ],
+ [
+ 230,
+ 128,
+ 0,
+ 141,
+ 1,
+ "CONDITIONING"
+ ],
+ [
+ 232,
+ 8,
+ 0,
+ 144,
+ 1,
+ "IMAGE"
+ ],
+ [
+ 234,
+ 130,
+ 0,
+ 144,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 235,
+ 144,
+ 0,
+ 145,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 236,
+ 138,
+ 0,
+ 144,
+ 2,
+ "MASK"
+ ],
+ [
+ 237,
+ 12,
+ 0,
+ 146,
+ 0,
+ "MODEL"
+ ],
+ [
+ 238,
+ 146,
+ 0,
+ 87,
+ 0,
+ "*"
+ ]
+ ],
+ "groups": [
+ {
+ "title": "AliMama Inpainting",
+ "bounding": [
+ 214,
+ -963,
+ 1341,
+ 687
+ ],
+ "color": "#3f789e",
+ "font_size": 24,
+ "flags": {}
+ },
+ {
+ "title": "FLUX-Text2Image",
+ "bounding": [
+ 146,
+ -241,
+ 1747,
+ 989
+ ],
+ "color": "#3f789e",
+ "font_size": 24,
+ "flags": {}
+ },
+ {
+ "title": "Load Model",
+ "bounding": [
+ -585,
+ -250,
+ 365,
+ 491
+ ],
+ "color": "#3f789e",
+ "font_size": 24,
+ "flags": {}
+ }
+ ],
+ "config": {},
+ "extra": {
+ "ds": {
+ "scale": 0.6115909044841532,
+ "offset": [
+ 876.5983955698489,
+ 1007.0029507778453
+ ]
+ },
+ "workspace_info": {
+ "id": "sSr80zkRsolLQHBh3oFSe",
+ "saveLock": false,
+ "cloudID": null,
+ "coverMediaPath": null
+ },
+ "0246.VERSION": [
+ 0,
+ 0,
+ 4
+ ]
+ },
+ "version": 0.4
+}
\ No newline at end of file
diff --git a/workflows/t2I_flux_turbo.json b/workflows/t2I_flux_turbo.json
new file mode 100644
index 0000000..47983a4
--- /dev/null
+++ b/workflows/t2I_flux_turbo.json
@@ -0,0 +1,528 @@
+{
+ "last_node_id": 106,
+ "last_link_id": 196,
+ "nodes": [
+ {
+ "id": 4,
+ "type": "DualCLIPLoader",
+ "pos": {
+ "0": -182.46112060546875,
+ "1": 35.274688720703125
+ },
+ "size": {
+ "0": 315,
+ "1": 106
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "CLIP",
+ "type": "CLIP",
+ "links": [
+ 2,
+ 27
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CLIP"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "DualCLIPLoader"
+ },
+ "widgets_values": [
+ "clip_l.safetensors",
+ "t5xxl_fp16.safetensors",
+ "flux"
+ ]
+ },
+ {
+ "id": 7,
+ "type": "VAEDecode",
+ "pos": {
+ "0": 1028,
+ "1": -107
+ },
+ "size": {
+ "0": 210,
+ "1": 46
+ },
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "samples",
+ "type": "LATENT",
+ "link": 6,
+ "slot_index": 0,
+ "label": "samples"
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 7,
+ "label": "vae"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 79
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "IMAGE"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAEDecode"
+ }
+ },
+ {
+ "id": 6,
+ "type": "EmptyLatentImage",
+ "pos": {
+ "0": 665,
+ "1": -145
+ },
+ "size": {
+ "0": 315,
+ "1": 106
+ },
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 177
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "LATENT"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "EmptyLatentImage"
+ },
+ "widgets_values": [
+ 832,
+ 1248,
+ 1
+ ]
+ },
+ {
+ "id": 19,
+ "type": "CLIPTextEncodeFlux",
+ "pos": {
+ "0": 206,
+ "1": 116
+ },
+ "size": {
+ "0": 400,
+ "1": 200
+ },
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 27,
+ "slot_index": 0,
+ "label": "clip"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 26
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CONDITIONING"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncodeFlux"
+ },
+ "widgets_values": [
+ "",
+ "(bad hand,bad finger),logo,Backlight,nsfw,(worst quality,low resolution,bad hands),distorted,twisted,watermark,",
+ 3.5
+ ]
+ },
+ {
+ "id": 5,
+ "type": "CLIPTextEncodeFlux",
+ "pos": {
+ "0": 202,
+ "1": -146
+ },
+ "size": {
+ "0": 400,
+ "1": 200
+ },
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 2,
+ "slot_index": 0,
+ "label": "clip"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 18
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "CONDITIONING"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncodeFlux"
+ },
+ "widgets_values": [
+ "((Asian Face)), baby girl, age 2, , , , , , , , , , (falling white curtain as background, minimalist, white tone, very soft, bright), photography, masterpiece, best quality, 8K, HDR, highres, front to camera",
+ "((Asian Face)), baby girl, age 2, , , , , , , , , , (falling white curtain as background, minimalist, white tone, very soft, bright), photography, masterpiece, best quality, 8K, HDR, highres, front to camera",
+ 3.5
+ ]
+ },
+ {
+ "id": 55,
+ "type": "UNETLoader",
+ "pos": {
+ "0": -177,
+ "1": 204
+ },
+ "size": {
+ "0": 308.9964904785156,
+ "1": 83.4256591796875
+ },
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 195
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "MODEL"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "UNETLoader"
+ },
+ "widgets_values": [
+ "flux1-dev-fp8.safetensors",
+ "fp8_e4m3fn"
+ ]
+ },
+ {
+ "id": 106,
+ "type": "LoraLoaderModelOnly",
+ "pos": {
+ "0": -184,
+ "1": 375
+ },
+ "size": {
+ "0": 315,
+ "1": 82
+ },
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 195
+ }
+ ],
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 196
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoraLoaderModelOnly"
+ },
+ "widgets_values": [
+ "flux_turbo_v1_1.safetensors",
+ 1
+ ]
+ },
+ {
+ "id": 8,
+ "type": "VAELoader",
+ "pos": {
+ "0": -179.46112060546875,
+ "1": -70.72531127929688
+ },
+ "size": {
+ "0": 315,
+ "1": 58
+ },
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "inputs": [],
+ "outputs": [
+ {
+ "name": "VAE",
+ "type": "VAE",
+ "links": [
+ 7
+ ],
+ "slot_index": 0,
+ "shape": 3,
+ "label": "VAE"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAELoader"
+ },
+ "widgets_values": [
+ "ae.safetensors"
+ ]
+ },
+ {
+ "id": 3,
+ "type": "XlabsSampler",
+ "pos": {
+ "0": 654,
+ "1": 12
+ },
+ "size": {
+ "0": 342.5999755859375,
+ "1": 282
+ },
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 196,
+ "slot_index": 0,
+ "label": "model"
+ },
+ {
+ "name": "conditioning",
+ "type": "CONDITIONING",
+ "link": 18,
+ "label": "conditioning"
+ },
+ {
+ "name": "neg_conditioning",
+ "type": "CONDITIONING",
+ "link": 26,
+ "label": "neg_conditioning"
+ },
+ {
+ "name": "latent_image",
+ "type": "LATENT",
+ "link": 177,
+ "label": "latent_image"
+ },
+ {
+ "name": "controlnet_condition",
+ "type": "ControlNetCondition",
+ "link": null,
+ "label": "controlnet_condition"
+ }
+ ],
+ "outputs": [
+ {
+ "name": "latent",
+ "type": "LATENT",
+ "links": [
+ 6
+ ],
+ "shape": 3,
+ "label": "latent"
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "XlabsSampler"
+ },
+ "widgets_values": [
+ 24,
+ "fixed",
+ 8,
+ 1,
+ 2,
+ 0,
+ 1
+ ]
+ },
+ {
+ "id": 21,
+ "type": "PreviewImage",
+ "pos": {
+ "0": 1026,
+ "1": 19
+ },
+ "size": {
+ "0": 210,
+ "1": 318
+ },
+ "flags": {},
+ "order": 9,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 79,
+ "slot_index": 0,
+ "label": "images"
+ }
+ ],
+ "outputs": [],
+ "title": "t2i output",
+ "properties": {
+ "Node name for S&R": "PreviewImage"
+ }
+ }
+ ],
+ "links": [
+ [
+ 2,
+ 4,
+ 0,
+ 5,
+ 0,
+ "CLIP"
+ ],
+ [
+ 6,
+ 3,
+ 0,
+ 7,
+ 0,
+ "LATENT"
+ ],
+ [
+ 7,
+ 8,
+ 0,
+ 7,
+ 1,
+ "VAE"
+ ],
+ [
+ 18,
+ 5,
+ 0,
+ 3,
+ 1,
+ "CONDITIONING"
+ ],
+ [
+ 26,
+ 19,
+ 0,
+ 3,
+ 2,
+ "CONDITIONING"
+ ],
+ [
+ 27,
+ 4,
+ 0,
+ 19,
+ 0,
+ "CLIP"
+ ],
+ [
+ 79,
+ 7,
+ 0,
+ 21,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 177,
+ 6,
+ 0,
+ 3,
+ 3,
+ "LATENT"
+ ],
+ [
+ 195,
+ 55,
+ 0,
+ 106,
+ 0,
+ "MODEL"
+ ],
+ [
+ 196,
+ 106,
+ 0,
+ 3,
+ 0,
+ "MODEL"
+ ]
+ ],
+ "groups": [
+ {
+ "title": "Load Model",
+ "bounding": [
+ -210,
+ -187,
+ 371,
+ 700
+ ],
+ "color": "#3f789e",
+ "font_size": 24,
+ "flags": {}
+ }
+ ],
+ "config": {},
+ "extra": {
+ "ds": {
+ "scale": 1.1918176537727374,
+ "offset": [
+ 438.12831553640723,
+ 376.2590792694179
+ ]
+ }
+ },
+ "version": 0.4
+}
\ No newline at end of file