Skip to content

Commit 3abc0a9

Browse files
committed
add missing files
1 parent c771e9b commit 3abc0a9

File tree

2 files changed

+508
-0
lines changed

2 files changed

+508
-0
lines changed
Lines changed: 317 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,317 @@
1+
// /*
2+
// * This module contains the sagas responsible for sending the contents of the save queue
3+
// * to the back-end (thus, draining the queue).
4+
// */
5+
6+
import { sendSaveRequestWithToken } from "admin/rest_api";
7+
import Date from "libs/date";
8+
import ErrorHandling from "libs/error_handling";
9+
import Toast from "libs/toast";
10+
import { sleep } from "libs/utils";
11+
import window, { alert, document, location } from "libs/window";
12+
import memoizeOne from "memoize-one";
13+
import messages from "messages";
14+
import { call, delay, put, race, take } from "typed-redux-saga";
15+
import { ControlModeEnum } from "viewer/constants";
16+
import { getMagInfo } from "viewer/model/accessors/dataset_accessor";
17+
import {
18+
setLastSaveTimestampAction,
19+
setSaveBusyAction,
20+
setVersionNumberAction,
21+
shiftSaveQueueAction,
22+
} from "viewer/model/actions/save_actions";
23+
import compactSaveQueue from "viewer/model/helpers/compaction/compact_save_queue";
24+
import { globalPositionToBucketPosition } from "viewer/model/helpers/position_converter";
25+
import type { Saga } from "viewer/model/sagas/effect-generators";
26+
import { select } from "viewer/model/sagas/effect-generators";
27+
import { ensureWkReady } from "viewer/model/sagas/ready_sagas";
28+
import {
29+
MAXIMUM_ACTION_COUNT_PER_SAVE,
30+
MAX_SAVE_RETRY_WAITING_TIME,
31+
PUSH_THROTTLE_TIME,
32+
SAVE_RETRY_WAITING_TIME,
33+
} from "viewer/model/sagas/saving/save_saga_constants";
34+
import { Model } from "viewer/singletons";
35+
import type { SaveQueueEntry } from "viewer/store";
36+
37+
const ONE_YEAR_MS = 365 * 24 * 3600 * 1000;
38+
39+
export function* pushSaveQueueAsync(): Saga<never> {
40+
yield* call(ensureWkReady);
41+
42+
yield* put(setLastSaveTimestampAction());
43+
let loopCounter = 0;
44+
45+
while (true) {
46+
loopCounter++;
47+
let saveQueue;
48+
// Check whether the save queue is actually empty, the PUSH_SAVE_QUEUE_TRANSACTION action
49+
// could have been triggered during the call to sendSaveRequestToServer
50+
saveQueue = yield* select((state) => state.save.queue);
51+
52+
if (saveQueue.length === 0) {
53+
if (loopCounter % 100 === 0) {
54+
// See https://github.com/scalableminds/webknossos/pull/6076 (or 82e16e1) for an explanation
55+
// of this delay call.
56+
yield* delay(0);
57+
}
58+
59+
// Save queue is empty, wait for push event
60+
yield* take("PUSH_SAVE_QUEUE_TRANSACTION");
61+
}
62+
63+
const { forcePush } = yield* race({
64+
timeout: delay(PUSH_THROTTLE_TIME),
65+
forcePush: take("SAVE_NOW"),
66+
});
67+
yield* put(setSaveBusyAction(true));
68+
69+
// Send (parts of) the save queue to the server.
70+
// There are two main cases:
71+
// 1) forcePush is true
72+
// The user explicitly requested to save an annotation.
73+
// In this case, batches are sent to the server until the save
74+
// queue is empty. Note that the save queue might be added to
75+
// while saving is in progress. Still, the save queue will be
76+
// drained until it is empty. If the user hits save and continuously
77+
// annotates further, a high number of save-requests might be sent.
78+
// 2) forcePush is false
79+
// The auto-save interval was reached at time T. The following code
80+
// will determine how many items are in the save queue at this time T.
81+
// Exactly that many items will be sent to the server.
82+
// New items that might be added to the save queue during saving, will be
83+
// ignored (they will be picked up in the next iteration of this loop).
84+
// Otherwise, the risk of a high number of save-requests (see case 1)
85+
// would be present here, too (note the risk would be greater, because the
86+
// user didn't use the save button which is usually accompanied by a small pause).
87+
const itemCountToSave = forcePush
88+
? Number.POSITIVE_INFINITY
89+
: yield* select((state) => state.save.queue.length);
90+
let savedItemCount = 0;
91+
while (savedItemCount < itemCountToSave) {
92+
saveQueue = yield* select((state) => state.save.queue);
93+
94+
if (saveQueue.length > 0) {
95+
savedItemCount += yield* call(sendSaveRequestToServer);
96+
} else {
97+
break;
98+
}
99+
}
100+
yield* put(setSaveBusyAction(false));
101+
}
102+
}
103+
104+
function getRetryWaitTime(retryCount: number) {
105+
// Exponential backoff up until MAX_SAVE_RETRY_WAITING_TIME
106+
return Math.min(2 ** retryCount * SAVE_RETRY_WAITING_TIME, MAX_SAVE_RETRY_WAITING_TIME);
107+
}
108+
109+
// The value for this boolean does not need to be restored to false
110+
// at any time, because the browser page is reloaded after the message is shown, anyway.
111+
let didShowFailedSimultaneousTracingError = false;
112+
113+
export function* sendSaveRequestToServer(): Saga<number> {
114+
/*
115+
* Saves a reasonably-sized part of the save queue to the server (plus retry-mechanism).
116+
* The saga returns the number of save queue items that were saved.
117+
*/
118+
119+
const fullSaveQueue = yield* select((state) => state.save.queue);
120+
const saveQueue = sliceAppropriateBatchCount(fullSaveQueue);
121+
let compactedSaveQueue = compactSaveQueue(saveQueue);
122+
const version = yield* select((state) => state.annotation.version);
123+
const annotationId = yield* select((state) => state.annotation.annotationId);
124+
const tracingStoreUrl = yield* select((state) => state.annotation.tracingStore.url);
125+
let versionIncrement;
126+
[compactedSaveQueue, versionIncrement] = addVersionNumbers(compactedSaveQueue, version);
127+
let retryCount = 0;
128+
129+
// This while-loop only exists for the purpose of a retry-mechanism
130+
while (true) {
131+
let exceptionDuringMarkBucketsAsNotDirty = false;
132+
133+
try {
134+
const startTime = Date.now();
135+
yield* call(
136+
sendSaveRequestWithToken,
137+
`${tracingStoreUrl}/tracings/annotation/${annotationId}/update?token=`,
138+
{
139+
method: "POST",
140+
data: compactedSaveQueue,
141+
compress: process.env.NODE_ENV === "production",
142+
// Suppressing error toast, as the doWithToken retry with personal token functionality should not show an error.
143+
// Instead the error is logged and toggleErrorHighlighting should take care of showing an error to the user.
144+
showErrorToast: false,
145+
},
146+
);
147+
const endTime = Date.now();
148+
149+
if (endTime - startTime > PUSH_THROTTLE_TIME) {
150+
yield* call(
151+
[ErrorHandling, ErrorHandling.notify],
152+
new Error(
153+
`Warning: Save request took more than ${Math.ceil(PUSH_THROTTLE_TIME / 1000)} seconds.`,
154+
),
155+
);
156+
}
157+
158+
yield* put(setVersionNumberAction(version + versionIncrement));
159+
yield* put(setLastSaveTimestampAction());
160+
yield* put(shiftSaveQueueAction(saveQueue.length));
161+
162+
try {
163+
yield* call(markBucketsAsNotDirty, compactedSaveQueue);
164+
} catch (error) {
165+
// If markBucketsAsNotDirty fails some reason, wk cannot recover from this error.
166+
console.warn("Error when marking buckets as clean. No retry possible. Error:", error);
167+
exceptionDuringMarkBucketsAsNotDirty = true;
168+
throw error;
169+
}
170+
171+
yield* call(toggleErrorHighlighting, false);
172+
return saveQueue.length;
173+
} catch (error) {
174+
if (exceptionDuringMarkBucketsAsNotDirty) {
175+
throw error;
176+
}
177+
178+
console.warn("Error during saving. Will retry. Error:", error);
179+
const controlMode = yield* select((state) => state.temporaryConfiguration.controlMode);
180+
const isViewOrSandboxMode =
181+
controlMode === ControlModeEnum.VIEW || controlMode === ControlModeEnum.SANDBOX;
182+
183+
if (!isViewOrSandboxMode) {
184+
// Notify user about error unless, view or sandbox mode is active. In that case,
185+
// we do not need to show the error as it is not so important and distracts the user.
186+
yield* call(toggleErrorHighlighting, true);
187+
}
188+
189+
// Log the error to airbrake. Also compactedSaveQueue needs to be within an object
190+
// as otherwise the entries would be spread by the notify function.
191+
// @ts-ignore
192+
yield* call({ context: ErrorHandling, fn: ErrorHandling.notify }, error, {
193+
compactedSaveQueue,
194+
retryCount,
195+
});
196+
197+
// @ts-ignore
198+
if (error.status === 409) {
199+
// HTTP Code 409 'conflict' for dirty state
200+
// @ts-ignore
201+
window.onbeforeunload = null;
202+
yield* call(
203+
[ErrorHandling, ErrorHandling.notify],
204+
new Error("Saving failed due to '409' status code"),
205+
);
206+
if (!didShowFailedSimultaneousTracingError) {
207+
// If the saving fails for one tracing (e.g., skeleton), it can also
208+
// fail for another tracing (e.g., volume). The message simply tells the
209+
// user that the saving in general failed. So, there is no sense in showing
210+
// the message multiple times.
211+
yield* call(alert, messages["save.failed_simultaneous_tracing"]);
212+
location.reload();
213+
didShowFailedSimultaneousTracingError = true;
214+
}
215+
216+
// Wait "forever" to avoid that the caller initiates other save calls afterwards (e.g.,
217+
// can happen if the caller tries to force-flush the save queue).
218+
// The reason we don't throw an error immediately is that this would immediately
219+
// crash all sagas (including saving other tracings).
220+
yield* call(sleep, ONE_YEAR_MS);
221+
throw new Error("Saving failed due to conflict.");
222+
}
223+
224+
yield* race({
225+
timeout: delay(getRetryWaitTime(retryCount)),
226+
forcePush: take("SAVE_NOW"),
227+
});
228+
retryCount++;
229+
}
230+
}
231+
}
232+
233+
function* markBucketsAsNotDirty(saveQueue: Array<SaveQueueEntry>) {
234+
const getLayerAndMagInfoForTracingId = memoizeOne((tracingId: string) => {
235+
const segmentationLayer = Model.getSegmentationTracingLayer(tracingId);
236+
const segmentationMagInfo = getMagInfo(segmentationLayer.mags);
237+
return [segmentationLayer, segmentationMagInfo] as const;
238+
});
239+
for (const saveEntry of saveQueue) {
240+
for (const updateAction of saveEntry.actions) {
241+
if (updateAction.name === "updateBucket") {
242+
const { actionTracingId: tracingId } = updateAction.value;
243+
const [segmentationLayer, segmentationMagInfo] = getLayerAndMagInfoForTracingId(tracingId);
244+
245+
const { position, mag, additionalCoordinates } = updateAction.value;
246+
const magIndex = segmentationMagInfo.getIndexByMag(mag);
247+
const zoomedBucketAddress = globalPositionToBucketPosition(
248+
position,
249+
segmentationMagInfo.getDenseMags(),
250+
magIndex,
251+
additionalCoordinates,
252+
);
253+
const bucket = segmentationLayer.cube.getOrCreateBucket(zoomedBucketAddress);
254+
255+
if (bucket.type === "null") {
256+
continue;
257+
}
258+
259+
bucket.dirtyCount--;
260+
261+
if (bucket.dirtyCount === 0) {
262+
bucket.markAsPushed();
263+
}
264+
}
265+
}
266+
}
267+
}
268+
269+
export function toggleErrorHighlighting(state: boolean, permanentError: boolean = false): void {
270+
if (document.body != null) {
271+
document.body.classList.toggle("save-error", state);
272+
}
273+
274+
const message = permanentError ? messages["save.failed.permanent"] : messages["save.failed"];
275+
276+
if (state) {
277+
Toast.error(message, {
278+
sticky: true,
279+
});
280+
} else {
281+
Toast.close(message);
282+
}
283+
}
284+
285+
// This function returns the first n batches of the provided array, so that the count of
286+
// all actions in these n batches does not exceed MAXIMUM_ACTION_COUNT_PER_SAVE
287+
function sliceAppropriateBatchCount(batches: Array<SaveQueueEntry>): Array<SaveQueueEntry> {
288+
const slicedBatches = [];
289+
let actionCount = 0;
290+
291+
for (const batch of batches) {
292+
const newActionCount = actionCount + batch.actions.length;
293+
294+
if (newActionCount <= MAXIMUM_ACTION_COUNT_PER_SAVE) {
295+
actionCount = newActionCount;
296+
slicedBatches.push(batch);
297+
} else {
298+
break;
299+
}
300+
}
301+
302+
return slicedBatches;
303+
}
304+
305+
export function addVersionNumbers(
306+
updateActionsBatches: Array<SaveQueueEntry>,
307+
lastVersion: number,
308+
): [Array<SaveQueueEntry>, number] {
309+
let versionIncrement = 0;
310+
const batchesWithVersions = updateActionsBatches.map((batch) => {
311+
if (batch.transactionGroupIndex === 0) {
312+
versionIncrement++;
313+
}
314+
return { ...batch, version: lastVersion + versionIncrement };
315+
});
316+
return [batchesWithVersions, versionIncrement];
317+
}

0 commit comments

Comments
 (0)