Skip to content

Commit 1896c6f

Browse files
committed
Merge remote-tracking branch 'origin/main' into release/3.4
2 parents 1a37827 + 47f3515 commit 1896c6f

27 files changed

+399
-225
lines changed

docs/nodes/communityNodes.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ To use a community workflow, download the the `.json` node graph file and load i
3232
+ [Size Stepper Nodes](#size-stepper-nodes)
3333
+ [Text font to Image](#text-font-to-image)
3434
+ [Thresholding](#thresholding)
35+
+ [Unsharp Mask](#unsharp-mask)
3536
+ [XY Image to Grid and Images to Grids nodes](#xy-image-to-grid-and-images-to-grids-nodes)
3637
- [Example Node Template](#example-node-template)
3738
- [Disclaimer](#disclaimer)
@@ -316,6 +317,13 @@ Highlights/Midtones/Shadows (with LUT blur enabled):
316317
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0a440e43-697f-4d17-82ee-f287467df0a5" width="300" />
317318
<img src="https://github.com/invoke-ai/InvokeAI/assets/34005131/0701fd0f-2ca7-4fe2-8613-2b52547bafce" width="300" />
318319

320+
--------------------------------
321+
### Unsharp Mask
322+
323+
**Description:** Applies an unsharp mask filter to an image, preserving its alpha channel in the process.
324+
325+
**Node Link:** https://github.com/JPPhoto/unsharp-mask-node
326+
319327
--------------------------------
320328
### XY Image to Grid and Images to Grids nodes
321329

invokeai/app/invocations/image.py

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from PIL import Image, ImageChops, ImageFilter, ImageOps
99

1010
from invokeai.app.invocations.primitives import BoardField, ColorField, ImageField, ImageOutput
11-
from invokeai.app.services.image_records.image_records_common import ImageCategory, ResourceOrigin
11+
from invokeai.app.services.image_records.image_records_common import ImageCategory, ImageRecordChanges, ResourceOrigin
1212
from invokeai.app.shared.fields import FieldDescriptions
1313
from invokeai.backend.image_util.invisible_watermark import InvisibleWatermark
1414
from invokeai.backend.image_util.safety_checker import SafetyChecker
@@ -1017,3 +1017,35 @@ def invoke(self, context: InvocationContext) -> ImageOutput:
10171017
width=image_dto.width,
10181018
height=image_dto.height,
10191019
)
1020+
1021+
1022+
@invocation(
1023+
"linear_ui_output",
1024+
title="Linear UI Image Output",
1025+
tags=["primitives", "image"],
1026+
category="primitives",
1027+
version="1.0.1",
1028+
use_cache=False,
1029+
)
1030+
class LinearUIOutputInvocation(BaseInvocation, WithWorkflow, WithMetadata):
1031+
"""Handles Linear UI Image Outputting tasks."""
1032+
1033+
image: ImageField = InputField(description=FieldDescriptions.image)
1034+
board: Optional[BoardField] = InputField(default=None, description=FieldDescriptions.board, input=Input.Direct)
1035+
1036+
def invoke(self, context: InvocationContext) -> ImageOutput:
1037+
image_dto = context.services.images.get_dto(self.image.image_name)
1038+
1039+
if self.board:
1040+
context.services.board_images.add_image_to_board(self.board.board_id, self.image.image_name)
1041+
1042+
if image_dto.is_intermediate != self.is_intermediate:
1043+
context.services.images.update(
1044+
self.image.image_name, changes=ImageRecordChanges(is_intermediate=self.is_intermediate)
1045+
)
1046+
1047+
return ImageOutput(
1048+
image=ImageField(image_name=self.image.image_name),
1049+
width=image_dto.width,
1050+
height=image_dto.height,
1051+
)

invokeai/app/invocations/metadata.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def invoke(self, context: InvocationContext) -> MetadataOutput:
112112
]
113113

114114

115-
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.0")
115+
@invocation("core_metadata", title="Core Metadata", tags=["metadata"], category="metadata", version="1.0.1")
116116
class CoreMetadataInvocation(BaseInvocation):
117117
"""Collects core generation metadata into a MetadataField"""
118118

@@ -160,7 +160,7 @@ class CoreMetadataInvocation(BaseInvocation):
160160
)
161161

162162
# High resolution fix metadata.
163-
hrf_enabled: Optional[float] = InputField(
163+
hrf_enabled: Optional[bool] = InputField(
164164
default=None,
165165
description="Whether or not high resolution fix was enabled.",
166166
)

invokeai/frontend/web/public/locales/zh_CN.json

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1222,7 +1222,8 @@
12221222
"seamless": "无缝",
12231223
"fit": "图生图匹配",
12241224
"recallParameters": "召回参数",
1225-
"noRecallParameters": "未找到要召回的参数"
1225+
"noRecallParameters": "未找到要召回的参数",
1226+
"vae": "VAE"
12261227
},
12271228
"models": {
12281229
"noMatchingModels": "无相匹配的模型",
@@ -1501,5 +1502,18 @@
15011502
"clear": "清除",
15021503
"maxCacheSize": "最大缓存大小",
15031504
"cacheSize": "缓存大小"
1505+
},
1506+
"hrf": {
1507+
"enableHrf": "启用高分辨率修复",
1508+
"upscaleMethod": "放大方法",
1509+
"enableHrfTooltip": "使用较低的分辨率进行初始生成,放大到基础分辨率后进行图生图。",
1510+
"metadata": {
1511+
"strength": "高分辨率修复强度",
1512+
"enabled": "高分辨率修复已启用",
1513+
"method": "高分辨率修复方法"
1514+
},
1515+
"hrf": "高分辨率修复",
1516+
"hrfStrength": "高分辨率修复强度",
1517+
"strengthTooltip": "值越低细节越少,但可以减少部分潜在的伪影。"
15041518
}
15051519
}

invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/controlNetImageProcessed.ts

Lines changed: 3 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import {
88
selectControlAdapterById,
99
} from 'features/controlAdapters/store/controlAdaptersSlice';
1010
import { isControlNetOrT2IAdapter } from 'features/controlAdapters/store/types';
11-
import { SAVE_IMAGE } from 'features/nodes/util/graphBuilders/constants';
1211
import { addToast } from 'features/system/store/systemSlice';
1312
import { t } from 'i18next';
1413
import { imagesApi } from 'services/api/endpoints/images';
@@ -38,6 +37,7 @@ export const addControlNetImageProcessedListener = () => {
3837
// ControlNet one-off procressing graph is just the processor node, no edges.
3938
// Also we need to grab the image.
4039

40+
const nodeId = ca.processorNode.id;
4141
const enqueueBatchArg: BatchConfig = {
4242
prepend: true,
4343
batch: {
@@ -46,27 +46,10 @@ export const addControlNetImageProcessedListener = () => {
4646
[ca.processorNode.id]: {
4747
...ca.processorNode,
4848
is_intermediate: true,
49-
image: { image_name: ca.controlImage },
50-
},
51-
[SAVE_IMAGE]: {
52-
id: SAVE_IMAGE,
53-
type: 'save_image',
54-
is_intermediate: true,
5549
use_cache: false,
50+
image: { image_name: ca.controlImage },
5651
},
5752
},
58-
edges: [
59-
{
60-
source: {
61-
node_id: ca.processorNode.id,
62-
field: 'image',
63-
},
64-
destination: {
65-
node_id: SAVE_IMAGE,
66-
field: 'image',
67-
},
68-
},
69-
],
7053
},
7154
runs: 1,
7255
},
@@ -90,7 +73,7 @@ export const addControlNetImageProcessedListener = () => {
9073
socketInvocationComplete.match(action) &&
9174
action.payload.data.queue_batch_id ===
9275
enqueueResult.batch.batch_id &&
93-
action.payload.data.source_node_id === SAVE_IMAGE
76+
action.payload.data.source_node_id === nodeId
9477
);
9578

9679
// We still have to check the output type

invokeai/frontend/web/src/app/store/middleware/listenerMiddleware/listeners/socketio/socketInvocationComplete.ts

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,10 @@ import {
77
imageSelected,
88
} from 'features/gallery/store/gallerySlice';
99
import { IMAGE_CATEGORIES } from 'features/gallery/store/types';
10-
import { CANVAS_OUTPUT } from 'features/nodes/util/graphBuilders/constants';
10+
import {
11+
LINEAR_UI_OUTPUT,
12+
nodeIDDenyList,
13+
} from 'features/nodes/util/graphBuilders/constants';
1114
import { boardsApi } from 'services/api/endpoints/boards';
1215
import { imagesApi } from 'services/api/endpoints/images';
1316
import { isImageOutput } from 'services/api/guards';
@@ -19,7 +22,7 @@ import {
1922
import { startAppListening } from '../..';
2023

2124
// These nodes output an image, but do not actually *save* an image, so we don't want to handle the gallery logic on them
22-
const nodeDenylist = ['load_image', 'image'];
25+
const nodeTypeDenylist = ['load_image', 'image'];
2326

2427
export const addInvocationCompleteEventListener = () => {
2528
startAppListening({
@@ -32,22 +35,31 @@ export const addInvocationCompleteEventListener = () => {
3235
`Invocation complete (${action.payload.data.node.type})`
3336
);
3437

35-
const { result, node, queue_batch_id } = data;
38+
const { result, node, queue_batch_id, source_node_id } = data;
3639

3740
// This complete event has an associated image output
38-
if (isImageOutput(result) && !nodeDenylist.includes(node.type)) {
41+
if (
42+
isImageOutput(result) &&
43+
!nodeTypeDenylist.includes(node.type) &&
44+
!nodeIDDenyList.includes(source_node_id)
45+
) {
3946
const { image_name } = result.image;
4047
const { canvas, gallery } = getState();
4148

4249
// This populates the `getImageDTO` cache
43-
const imageDTO = await dispatch(
44-
imagesApi.endpoints.getImageDTO.initiate(image_name)
45-
).unwrap();
50+
const imageDTORequest = dispatch(
51+
imagesApi.endpoints.getImageDTO.initiate(image_name, {
52+
forceRefetch: true,
53+
})
54+
);
55+
56+
const imageDTO = await imageDTORequest.unwrap();
57+
imageDTORequest.unsubscribe();
4658

4759
// Add canvas images to the staging area
4860
if (
4961
canvas.batchIds.includes(queue_batch_id) &&
50-
[CANVAS_OUTPUT].includes(data.source_node_id)
62+
[LINEAR_UI_OUTPUT].includes(data.source_node_id)
5163
) {
5264
dispatch(addImageToStagingArea(imageDTO));
5365
}

invokeai/frontend/web/src/features/gallery/components/ImageMetadataViewer/ImageMetadataActions.tsx

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,8 @@ const ImageMetadataActions = (props: Props) => {
157157
return null;
158158
}
159159

160+
console.log(metadata);
161+
160162
return (
161163
<>
162164
{metadata.created_by && (

invokeai/frontend/web/src/features/nodes/util/graphBuilders/addHrfToGraph.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import {
2323
RESIZE_HRF,
2424
VAE_LOADER,
2525
} from './constants';
26-
import { upsertMetadata } from './metadata';
26+
import { setMetadataReceivingNode, upsertMetadata } from './metadata';
2727

2828
// Copy certain connections from previous DENOISE_LATENTS to new DENOISE_LATENTS_HRF.
2929
function copyConnectionsToDenoiseLatentsHrf(graph: NonNullableGraph): void {
@@ -369,4 +369,5 @@ export const addHrfToGraph = (
369369
hrf_enabled: hrfEnabled,
370370
hrf_method: hrfMethod,
371371
});
372+
setMetadataReceivingNode(graph, LATENTS_TO_IMAGE_HRF_HR);
372373
};

invokeai/frontend/web/src/features/nodes/util/graphBuilders/addSaveImageNode.ts renamed to invokeai/frontend/web/src/features/nodes/util/graphBuilders/addLinearUIOutputNode.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
11
import { RootState } from 'app/store/store';
22
import { NonNullableGraph } from 'features/nodes/types/types';
33
import { activeTabNameSelector } from 'features/ui/store/uiSelectors';
4-
import { SaveImageInvocation } from 'services/api/types';
4+
import { LinearUIOutputInvocation } from 'services/api/types';
55
import {
66
CANVAS_OUTPUT,
77
LATENTS_TO_IMAGE,
88
LATENTS_TO_IMAGE_HRF_HR,
9+
LINEAR_UI_OUTPUT,
910
NSFW_CHECKER,
10-
SAVE_IMAGE,
1111
WATERMARKER,
1212
} from './constants';
1313

1414
/**
1515
* Set the `use_cache` field on the linear/canvas graph's final image output node to False.
1616
*/
17-
export const addSaveImageNode = (
17+
export const addLinearUIOutputNode = (
1818
state: RootState,
1919
graph: NonNullableGraph
2020
): void => {
@@ -23,18 +23,18 @@ export const addSaveImageNode = (
2323
activeTabName === 'unifiedCanvas' ? !state.canvas.shouldAutoSave : false;
2424
const { autoAddBoardId } = state.gallery;
2525

26-
const saveImageNode: SaveImageInvocation = {
27-
id: SAVE_IMAGE,
28-
type: 'save_image',
26+
const linearUIOutputNode: LinearUIOutputInvocation = {
27+
id: LINEAR_UI_OUTPUT,
28+
type: 'linear_ui_output',
2929
is_intermediate,
3030
use_cache: false,
3131
board: autoAddBoardId === 'none' ? undefined : { board_id: autoAddBoardId },
3232
};
3333

34-
graph.nodes[SAVE_IMAGE] = saveImageNode;
34+
graph.nodes[LINEAR_UI_OUTPUT] = linearUIOutputNode;
3535

3636
const destination = {
37-
node_id: SAVE_IMAGE,
37+
node_id: LINEAR_UI_OUTPUT,
3838
field: 'image',
3939
};
4040

invokeai/frontend/web/src/features/nodes/util/graphBuilders/buildAdHocUpscaleGraph.ts

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,9 @@ import { ESRGANModelName } from 'features/parameters/store/postprocessingSlice';
44
import {
55
ESRGANInvocation,
66
Graph,
7-
SaveImageInvocation,
7+
LinearUIOutputInvocation,
88
} from 'services/api/types';
9-
import { REALESRGAN as ESRGAN, SAVE_IMAGE } from './constants';
9+
import { ESRGAN, LINEAR_UI_OUTPUT } from './constants';
1010
import { addCoreMetadataNode, upsertMetadata } from './metadata';
1111

1212
type Arg = {
@@ -28,9 +28,9 @@ export const buildAdHocUpscaleGraph = ({
2828
is_intermediate: true,
2929
};
3030

31-
const saveImageNode: SaveImageInvocation = {
32-
id: SAVE_IMAGE,
33-
type: 'save_image',
31+
const linearUIOutputNode: LinearUIOutputInvocation = {
32+
id: LINEAR_UI_OUTPUT,
33+
type: 'linear_ui_output',
3434
use_cache: false,
3535
is_intermediate: false,
3636
board: autoAddBoardId === 'none' ? undefined : { board_id: autoAddBoardId },
@@ -40,7 +40,7 @@ export const buildAdHocUpscaleGraph = ({
4040
id: `adhoc-esrgan-graph`,
4141
nodes: {
4242
[ESRGAN]: realesrganNode,
43-
[SAVE_IMAGE]: saveImageNode,
43+
[LINEAR_UI_OUTPUT]: linearUIOutputNode,
4444
},
4545
edges: [
4646
{
@@ -49,14 +49,14 @@ export const buildAdHocUpscaleGraph = ({
4949
field: 'image',
5050
},
5151
destination: {
52-
node_id: SAVE_IMAGE,
52+
node_id: LINEAR_UI_OUTPUT,
5353
field: 'image',
5454
},
5555
},
5656
],
5757
};
5858

59-
addCoreMetadataNode(graph, {});
59+
addCoreMetadataNode(graph, {}, ESRGAN);
6060
upsertMetadata(graph, {
6161
esrgan_model: esrganModelName,
6262
});

0 commit comments

Comments
 (0)