Skip to content

Commit 8faafb6

Browse files
author
Kavya Mali
authored
Merge branch 'dev' into maxwell
2 parents 589cf50 + 6685e53 commit 8faafb6

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+698
-239
lines changed

.eslintrc.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ module.exports = {
8888
// imageviewer.js
8989
modalPrevImage: "readonly",
9090
modalNextImage: "readonly",
91+
updateModalImageIfVisible: "readonly",
9192
// localStorage.js
9293
localSet: "readonly",
9394
localGet: "readonly",

.github/workflows/on_pull_request.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ jobs:
2222
- name: Install Ruff
2323
run: pip install ruff==0.3.3
2424
- name: Run Ruff
25-
run: ruff .
25+
run: ruff check .
2626
lint-js:
2727
name: eslint
2828
runs-on: ubuntu-latest

CODEOWNERS

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1 @@
1-
* @AUTOMATIC1111
2-
3-
# if you were managing a localization and were removed from this file, this is because
4-
# the intended way to do localizations now is via extensions. See:
5-
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions
6-
# Make a repo with your localization and since you are still listed as a collaborator
7-
# you can add it to the wiki page yourself. This change is because some people complained
8-
# the git commit log is cluttered with things unrelated to almost everyone and
9-
# because I believe this is the best overall for the project to handle localizations almost
10-
# entirely without my oversight.
11-
12-
1+
* @AUTOMATIC1111 @w-e-w @catboxanon

configs/sd_xl_v.yaml

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
model:
2+
target: sgm.models.diffusion.DiffusionEngine
3+
params:
4+
scale_factor: 0.13025
5+
disable_first_stage_autocast: True
6+
7+
denoiser_config:
8+
target: sgm.modules.diffusionmodules.denoiser.DiscreteDenoiser
9+
params:
10+
num_idx: 1000
11+
12+
weighting_config:
13+
target: sgm.modules.diffusionmodules.denoiser_weighting.VWeighting
14+
scaling_config:
15+
target: sgm.modules.diffusionmodules.denoiser_scaling.VScaling
16+
discretization_config:
17+
target: sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization
18+
19+
network_config:
20+
target: sgm.modules.diffusionmodules.openaimodel.UNetModel
21+
params:
22+
adm_in_channels: 2816
23+
num_classes: sequential
24+
use_checkpoint: False
25+
in_channels: 4
26+
out_channels: 4
27+
model_channels: 320
28+
attention_resolutions: [4, 2]
29+
num_res_blocks: 2
30+
channel_mult: [1, 2, 4]
31+
num_head_channels: 64
32+
use_spatial_transformer: True
33+
use_linear_in_transformer: True
34+
transformer_depth: [1, 2, 10] # note: the first is unused (due to attn_res starting at 2) 32, 16, 8 --> 64, 32, 16
35+
context_dim: 2048
36+
spatial_transformer_attn_type: softmax-xformers
37+
legacy: False
38+
39+
conditioner_config:
40+
target: sgm.modules.GeneralConditioner
41+
params:
42+
emb_models:
43+
# crossattn cond
44+
- is_trainable: False
45+
input_key: txt
46+
target: sgm.modules.encoders.modules.FrozenCLIPEmbedder
47+
params:
48+
layer: hidden
49+
layer_idx: 11
50+
# crossattn and vector cond
51+
- is_trainable: False
52+
input_key: txt
53+
target: sgm.modules.encoders.modules.FrozenOpenCLIPEmbedder2
54+
params:
55+
arch: ViT-bigG-14
56+
version: laion2b_s39b_b160k
57+
freeze: True
58+
layer: penultimate
59+
always_return_pooled: True
60+
legacy: False
61+
# vector cond
62+
- is_trainable: False
63+
input_key: original_size_as_tuple
64+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
65+
params:
66+
outdim: 256 # multiplied by two
67+
# vector cond
68+
- is_trainable: False
69+
input_key: crop_coords_top_left
70+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
71+
params:
72+
outdim: 256 # multiplied by two
73+
# vector cond
74+
- is_trainable: False
75+
input_key: target_size_as_tuple
76+
target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
77+
params:
78+
outdim: 256 # multiplied by two
79+
80+
first_stage_config:
81+
target: sgm.models.autoencoder.AutoencoderKLInferenceWrapper
82+
params:
83+
embed_dim: 4
84+
monitor: val/rec_loss
85+
ddconfig:
86+
attn_type: vanilla-xformers
87+
double_z: true
88+
z_channels: 4
89+
resolution: 256
90+
in_channels: 3
91+
out_ch: 3
92+
ch: 128
93+
ch_mult: [1, 2, 4, 4]
94+
num_res_blocks: 2
95+
attn_resolutions: []
96+
dropout: 0.0
97+
lossconfig:
98+
target: torch.nn.Identity

extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -816,7 +816,7 @@ onUiLoaded(async() => {
816816
// Increase or decrease brush size based on scroll direction
817817
adjustBrushSize(elemId, e.deltaY);
818818
}
819-
});
819+
}, {passive: false});
820820

821821
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
822822
function handleMoveKeyDown(e) {

extensions-builtin/hypertile/hypertile.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""
22
Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE
33
Warn: The patch works well only if the input image has a width and height that are multiples of 128
4-
Original author: @tfernd Github: https://github.com/tfernd/HyperTile
4+
Original author: @tfernd GitHub: https://github.com/tfernd/HyperTile
55
"""
66

77
from __future__ import annotations

extensions-builtin/postprocessing-for-training/scripts/postprocessing_autosized_crop.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ def ui(self):
3434
with ui_components.InputAccordion(False, label="Auto-sized crop") as enable:
3535
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
3636
with gr.Row():
37-
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="postprocess_multicrop_mindim")
38-
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="postprocess_multicrop_maxdim")
37+
mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id=self.elem_id_suffix("postprocess_multicrop_mindim"))
38+
maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id=self.elem_id_suffix("postprocess_multicrop_maxdim"))
3939
with gr.Row():
40-
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id="postprocess_multicrop_minarea")
41-
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id="postprocess_multicrop_maxarea")
40+
minarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area lower bound", value=64 * 64, elem_id=self.elem_id_suffix("postprocess_multicrop_minarea"))
41+
maxarea = gr.Slider(minimum=64 * 64, maximum=2048 * 2048, step=1, label="Area upper bound", value=640 * 640, elem_id=self.elem_id_suffix("postprocess_multicrop_maxarea"))
4242
with gr.Row():
43-
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="postprocess_multicrop_objective")
44-
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="postprocess_multicrop_threshold")
43+
objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id=self.elem_id_suffix("postprocess_multicrop_objective"))
44+
threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id=self.elem_id_suffix("postprocess_multicrop_threshold"))
4545

4646
return {
4747
"enable": enable,

extensions-builtin/postprocessing-for-training/scripts/postprocessing_focal_crop.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,10 @@ class ScriptPostprocessingFocalCrop(scripts_postprocessing.ScriptPostprocessing)
1111

1212
def ui(self):
1313
with ui_components.InputAccordion(False, label="Auto focal point crop") as enable:
14-
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_face_weight")
15-
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_entropy_weight")
16-
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_focal_crop_edges_weight")
17-
debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
14+
face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_face_weight"))
15+
entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_entropy_weight"))
16+
edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_focal_crop_edges_weight"))
17+
debug = gr.Checkbox(label='Create debug image', elem_id=self.elem_id_suffix("train_process_focal_crop_debug"))
1818

1919
return {
2020
"enable": enable,

extensions-builtin/postprocessing-for-training/scripts/postprocessing_split_oversized.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces
3535
def ui(self):
3636
with ui_components.InputAccordion(False, label="Split oversized images") as enable:
3737
with gr.Row():
38-
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="postprocess_split_threshold")
39-
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="postprocess_overlap_ratio")
38+
split_threshold = gr.Slider(label='Threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id=self.elem_id_suffix("postprocess_split_threshold"))
39+
overlap_ratio = gr.Slider(label='Overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id=self.elem_id_suffix("postprocess_overlap_ratio"))
4040

4141
return {
4242
"enable": enable,

extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js

Lines changed: 57 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,36 +1,69 @@
1-
// Stable Diffusion WebUI - Bracket checker
2-
// By Hingashi no Florin/Bwin4L & @akx
1+
// Stable Diffusion WebUI - Bracket Checker
2+
// By @Bwin4L, @akx, @w-e-w, @Haoming02
33
// Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs.
4-
// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
5-
6-
function checkBrackets(textArea, counterElt) {
7-
var counts = {};
8-
(textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
9-
counts[bracket] = (counts[bracket] || 0) + 1;
10-
});
11-
var errors = [];
12-
13-
function checkPair(open, close, kind) {
14-
if (counts[open] !== counts[close]) {
15-
errors.push(
16-
`${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
17-
);
4+
// If there's a mismatch, the keyword counter turns red, and if you hover on it, a tooltip tells you what's wrong.
5+
6+
function checkBrackets(textArea, counterElem) {
7+
const pairs = [
8+
['(', ')', 'round brackets'],
9+
['[', ']', 'square brackets'],
10+
['{', '}', 'curly brackets']
11+
];
12+
13+
const counts = {};
14+
const errors = new Set();
15+
let i = 0;
16+
17+
while (i < textArea.value.length) {
18+
let char = textArea.value[i];
19+
let escaped = false;
20+
while (char === '\\' && i + 1 < textArea.value.length) {
21+
escaped = !escaped;
22+
i++;
23+
char = textArea.value[i];
24+
}
25+
26+
if (escaped) {
27+
i++;
28+
continue;
29+
}
30+
31+
for (const [open, close, label] of pairs) {
32+
if (char === open) {
33+
counts[label] = (counts[label] || 0) + 1;
34+
} else if (char === close) {
35+
counts[label] = (counts[label] || 0) - 1;
36+
if (counts[label] < 0) {
37+
errors.add(`Incorrect order of ${label}.`);
38+
}
39+
}
40+
}
41+
42+
i++;
43+
}
44+
45+
for (const [open, close, label] of pairs) {
46+
if (counts[label] == undefined) {
47+
continue;
48+
}
49+
50+
if (counts[label] > 0) {
51+
errors.add(`${open} ... ${close} - Detected ${counts[label]} more opening than closing ${label}.`);
52+
} else if (counts[label] < 0) {
53+
errors.add(`${open} ... ${close} - Detected ${-counts[label]} more closing than opening ${label}.`);
1854
}
1955
}
2056

21-
checkPair('(', ')', 'round brackets');
22-
checkPair('[', ']', 'square brackets');
23-
checkPair('{', '}', 'curly brackets');
24-
counterElt.title = errors.join('\n');
25-
counterElt.classList.toggle('error', errors.length !== 0);
57+
counterElem.title = [...errors].join('\n');
58+
counterElem.classList.toggle('error', errors.size !== 0);
2659
}
2760

2861
function setupBracketChecking(id_prompt, id_counter) {
29-
var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
30-
var counter = gradioApp().getElementById(id_counter);
62+
const textarea = gradioApp().querySelector(`#${id_prompt} > label > textarea`);
63+
const counter = gradioApp().getElementById(id_counter);
3164

3265
if (textarea && counter) {
33-
textarea.addEventListener("input", () => checkBrackets(textarea, counter));
66+
onEdit(`${id_prompt}_BracketChecking`, textarea, 400, () => checkBrackets(textarea, counter));
3467
}
3568
}
3669

0 commit comments

Comments
 (0)