-
Notifications
You must be signed in to change notification settings - Fork 46
:chore: Extract explanations from prediction queries #650
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. Weβll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
995d60a
ae3f9ab
272fa8c
ebc12f2
a838fc0
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -109,7 +109,7 @@ export const AnnotatorProvider = ({ children }: AnnotatorProviderProps): JSX.Ele | |||||||||
settings={userProjectSettings} | ||||||||||
userAnnotationScene={userAnnotationScene} | ||||||||||
initPredictions={initialPredictionAnnotations} | ||||||||||
explanations={selectedMediaItem?.predictions?.maps || EMPTY_EXPLANATION} | ||||||||||
explanations={selectedMediaItem?.explanations || EMPTY_EXPLANATION} | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Let's try to
Suggested change
where we can define the query inside of this function or possibly remove the argument and use this query directly inside of the prediction provider. |
||||||||||
> | ||||||||||
<SubmitAnnotationsProvider | ||||||||||
settings={userProjectSettings} | ||||||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -33,7 +33,8 @@ const geSelectedMediaItem = (predictions: Annotation[] = [], annotations: Annota | |
...getMockedImageMediaItem({}), | ||
image: getMockedImage(), | ||
annotations, | ||
predictions: { annotations: predictions, maps: [] }, | ||
predictions: { annotations: predictions }, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. i think on a different PR we can also simplify this interface. |
||
explanations: [], | ||
}); | ||
const getInitialPredictionConfig = (isEnabled = true) => ({ | ||
[FEATURES_KEYS.INITIAL_PREDICTION]: { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -326,7 +326,7 @@ export const PredictionProvider = ({ | |
selectedInput, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Regarding this file: I think we can refactor this provider so that we no longer need to keep track of the explanations inside of it. Instead it should be possible to retrieve the explanations directly from a query result. |
||
taskId: String(selectedTask?.id), | ||
enabled: isPredictionsQueryEnabled, | ||
onSuccess: runWhenNotDrawing(({ annotations: newRawPredictions, maps: newMaps }: PredictionResult) => { | ||
onSuccess: runWhenNotDrawing(({ annotations: newRawPredictions }: PredictionResult) => { | ||
jpggvilaca marked this conversation as resolved.
Show resolved
Hide resolved
|
||
const selectedPredictions = selectAnnotations(newRawPredictions, userSceneSelectedInputs); | ||
|
||
if (isEmpty(userAnnotations)) { | ||
|
@@ -348,7 +348,6 @@ export const PredictionProvider = ({ | |
canUpdatePrediction && userAnnotationScene.updateAnnotation(newPrediction); | ||
} | ||
|
||
setExplanations(newMaps); | ||
setRawPredictions(selectedPredictions); | ||
|
||
// Optionally update video timeline predictions | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,6 +7,7 @@ import QUERY_KEYS from '@geti/core/src/requests/query-keys'; | |
import { useQuery, UseQueryResult } from '@tanstack/react-query'; | ||
import { noop } from 'lodash-es'; | ||
|
||
import { ExplanationResult } from '../../../../core/annotations/services/inference-service.interface'; | ||
import { PredictionResult } from '../../../../core/annotations/services/prediction-service.interface'; | ||
import { SelectedMediaItemContext, SelectedMediaItemProps } from './selected-media-item-provider.component'; | ||
import { SelectedMediaItem } from './selected-media-item.interface'; | ||
|
@@ -40,6 +41,7 @@ export const DefaultSelectedMediaItemProvider = ({ | |
selectedMediaItemQuery, | ||
setSelectedMediaItem: noop, | ||
predictionsQuery: { isLoading: false } as UseQueryResult<PredictionResult>, | ||
explanationsQuery: { isLoading: false } as UseQueryResult<ExplanationResult>, | ||
jpggvilaca marked this conversation as resolved.
Show resolved
Hide resolved
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it possible to completely remove this and instead use the query directly? |
||
}; | ||
|
||
return <SelectedMediaItemContext.Provider value={value}>{children}</SelectedMediaItemContext.Provider>; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,6 +12,7 @@ import { AxiosError } from 'axios'; | |
import { isEmpty, isEqual } from 'lodash-es'; | ||
|
||
import { Annotation } from '../../../../core/annotations/annotation.interface'; | ||
import { ExplanationResult } from '../../../../core/annotations/services/inference-service.interface'; | ||
import { PredictionMode, PredictionResult } from '../../../../core/annotations/services/prediction-service.interface'; | ||
import { InferenceModel } from '../../../../core/annotations/services/visual-prompt-service'; | ||
import { MediaItem } from '../../../../core/media/media.interface'; | ||
|
@@ -31,6 +32,7 @@ import { hasEmptyLabels } from '../../utils'; | |
import { useTask } from '../task-provider/task-provider.component'; | ||
import { SelectedMediaItem } from './selected-media-item.interface'; | ||
import { useAnnotationsQuery } from './use-annotations-query.hook'; | ||
import { useExplanationsQuery } from './use-explanation-query.hook'; | ||
import { useLoadImageQuery } from './use-load-image-query.hook'; | ||
import { usePredictionsQuery } from './use-predictions-query.hook'; | ||
import { useSelectedInferenceModel } from './use-selected-inference-model'; | ||
|
@@ -43,6 +45,7 @@ import { | |
|
||
export interface SelectedMediaItemProps { | ||
predictionsQuery: UseQueryResult<PredictionResult>; | ||
explanationsQuery: UseQueryResult<ExplanationResult>; | ||
selectedMediaItem: SelectedMediaItem | undefined; | ||
selectedMediaItemQuery: UseQueryResult<SelectedMediaItem>; | ||
setSelectedMediaItem: (media: MediaItem | undefined) => void; | ||
|
@@ -87,7 +90,7 @@ const useIsSuggestPredictionEnabled = (projectIdentifier: ProjectIdentifier): bo | |
}; | ||
|
||
/** | ||
* Load either online or auto predicitons based on the annotator mode | ||
* Load either online or auto predictions based on the annotator mode | ||
* When the user switches between both modes we don't want to refetch predictions, | ||
* so in both cases we will keep the queries mounted | ||
*/ | ||
|
@@ -169,10 +172,20 @@ export const SelectedMediaItemProvider = ({ children }: SelectedMediaItemProvide | |
}); | ||
|
||
const predictionsQuery = usePredictionsQueryBasedOnAnnotatorMode(mediaItem); | ||
const explanationsQuery = useExplanationsQuery({ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Similar question/comment as above: I'd prefer if we can fully remove it from this provider. |
||
datasetIdentifier, | ||
mediaItem, | ||
taskId: selectedTask?.id, | ||
}); | ||
|
||
const selectedMediaItemQueryKey = [ | ||
...QUERY_KEYS.SELECTED_MEDIA_ITEM.SELECTED(pendingMediaItem?.identifier, selectedTask?.id), | ||
[imageQuery.fetchStatus, annotationsQuery.fetchStatus, predictionsQuery.fetchStatus], | ||
[ | ||
imageQuery.fetchStatus, | ||
annotationsQuery.fetchStatus, | ||
predictionsQuery.fetchStatus, | ||
explanationsQuery.fetchStatus, | ||
], | ||
]; | ||
|
||
const isSelectedMediaItemQueryEnabled = mediaItem !== undefined; | ||
|
@@ -184,31 +197,32 @@ export const SelectedMediaItemProvider = ({ children }: SelectedMediaItemProvide | |
throw new Error("Can't fetch undefined media item"); | ||
} | ||
|
||
const [image, annotations, predictions] = await new Promise<[ImageData, Annotation[], PredictionResult]>( | ||
(resolve, reject) => { | ||
if (imageQuery.isError) { | ||
reject({ | ||
message: 'Failed loading media item. Please try refreshing or selecting a different item.', | ||
}); | ||
} | ||
const [image, annotations, predictions, explanations] = await new Promise< | ||
[ImageData, Annotation[], PredictionResult, ExplanationResult] | ||
>((resolve, reject) => { | ||
if (imageQuery.isError) { | ||
reject({ | ||
message: 'Failed loading media item. Please try refreshing or selecting a different item.', | ||
}); | ||
} | ||
|
||
if (imageQuery.data && annotationsQuery.data) { | ||
if (!predictionsQuery.data && predictionsQuery.isFetching) { | ||
// If we do not yet have predictions and the user has not yet made any annotations | ||
// for the selected task, then we will wait for predictions | ||
if (isNotAnnotatedForTask(annotationsQuery.data, selectedTask)) { | ||
return; | ||
} | ||
if (imageQuery.data && annotationsQuery.data) { | ||
if (!predictionsQuery.data && predictionsQuery.isFetching) { | ||
// If we do not yet have predictions and the user has not yet made any annotations | ||
// for the selected task, then we will wait for predictions | ||
if (isNotAnnotatedForTask(annotationsQuery.data, selectedTask)) { | ||
return; | ||
} | ||
} | ||
|
||
const predictionsData = predictionsQuery.data ?? { maps: [], annotations: [] }; | ||
const predictionsData = predictionsQuery.data ?? { annotations: [] }; | ||
const explanationsData = explanationsQuery.data ?? []; | ||
|
||
resolve([imageQuery.data, annotationsQuery.data, predictionsData]); | ||
} | ||
resolve([imageQuery.data, annotationsQuery.data, predictionsData, explanationsData]); | ||
} | ||
); | ||
}); | ||
|
||
const newlySelectedMediaItem = { ...mediaItem, image, annotations, predictions }; | ||
const newlySelectedMediaItem = { ...mediaItem, image, annotations, predictions, explanations }; | ||
|
||
if (isSingleDomainProject(isClassificationDomain)) { | ||
return { | ||
|
@@ -266,6 +280,7 @@ export const SelectedMediaItemProvider = ({ children }: SelectedMediaItemProvide | |
|
||
const value: SelectedMediaItemProps = { | ||
predictionsQuery, | ||
explanationsQuery, | ||
selectedMediaItem, | ||
selectedMediaItemQuery, | ||
setSelectedMediaItem: setPendingMediaItem, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should this also depend on the
roi
? Iirc when we are in task chain we request the explanation map based on a given ROI.