Skip to content
Draft
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
49 commits
Select commit Hold shift + click to select a range
0134ed4
work in progress
picalarix Feb 14, 2024
28ab1b5
offload (not yet really) first try
picalarix Feb 21, 2024
42d2e01
linear solver update cusparse
picalarix Feb 25, 2024
8daaf5d
some more changes
picalarix Mar 6, 2024
0995127
cusparse convert - not done
picalarix Mar 12, 2024
38fd7ba
last changes to offload
picalarix Mar 14, 2024
fb9be7f
Commentary
picalarix Apr 23, 2024
b6c6d4c
after meeting
picalarix May 9, 2024
69508ab
Events
picalarix Jul 12, 2024
c120d5b
adding simple test for debugging
Olender Mar 18, 2025
bbd958f
duplicating to get around locking
Olender Mar 27, 2025
f6ed362
different fix for the lcoked vector
Olender Mar 27, 2025
a46028e
only install for now
Olender Mar 27, 2025
57b7a97
calling data to synchronize vector
Olender Mar 31, 2025
74e4cfb
adding first test
Olender Apr 1, 2025
65bceb0
adding kmv wave test
Olender Apr 2, 2025
c20a3e7
minor fix
Olender Apr 2, 2025
383529a
Merge remote-tracking branch 'origin/master' into olender/CUDA
Olender Apr 3, 2025
25ea718
minor changes
Olender Apr 3, 2025
3350bde
offload now subclass of assembledpc
Olender Apr 4, 2025
763fe6b
adding tests in CI
Olender Apr 4, 2025
fc13aa2
checking if run tests gets the tests with cuda marker
Olender Apr 9, 2025
af7e6ce
Update .github/workflows/build_cuda.yml
Olender Apr 10, 2025
c988926
adding env options
Olender Apr 10, 2025
d075453
trying to figure out whats wrong with petsc4py now
Olender Apr 10, 2025
1765f5b
Merge remote-tracking branch 'origin/master' into olender/CUDA
Olender Apr 10, 2025
af69f87
wip
Olender Apr 14, 2025
84f8851
updating PETSC
Olender Apr 14, 2025
b00c615
wip
Olender Apr 15, 2025
9e9fe08
wip
Olender Apr 15, 2025
7a3c5da
wip
Olender Apr 15, 2025
7f69823
adding slepc
Olender Apr 15, 2025
c23f37e
wip
Olender Apr 15, 2025
13498cf
Merge remote-tracking branch 'origin/master' into olender/CUDA
Olender Apr 15, 2025
58db56b
wip
Olender Apr 15, 2025
a380acf
wip
Olender Apr 15, 2025
18f4daa
wip
Olender Apr 15, 2025
319aa19
wip
Olender Apr 16, 2025
80b8e2b
Merge remote-tracking branch 'origin/master' into olender/CUDA
Olender Apr 17, 2025
bbf1825
wip
Olender Apr 17, 2025
07a2a20
wip
Olender Apr 22, 2025
97e41fa
wip
Olender Apr 23, 2025
25d356b
back to openmpi
Olender Apr 23, 2025
6ff1e91
wip
Olender Apr 23, 2025
86ae6a8
wip
Olender Apr 24, 2025
c96c15b
wip
Olender Apr 24, 2025
61f65b1
wip
Olender Apr 24, 2025
64f67dc
Merge remote-tracking branch 'origin/master' into olender/CUDA
Olender Apr 29, 2025
c57acf0
jsut to debug something
Olender May 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 90 additions & 0 deletions .github/workflows/build_cuda.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
name: Install and test Firedrake (CUDA)

on:
push:
branches:
- master
pull_request:

concurrency:
# Cancels jobs running if new commits are pushed
group: >
${{ github.workflow }}-
${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

jobs:
test:
name: Install and test Firedrake (Linux)
strategy:
# We want to know all of the tests which fail, so don't kill real if
# complex fails and vice-versa
fail-fast: false
matrix:
arch: [default]
runs-on: [self-hosted, Linux]
container:
image: firedrakeproject/firedrake-env:latest
env:
FIREDRAKE_CI: 1
OMP_NUM_THREADS: 1
steps:
- name: Pre-run cleanup
# Make sure the current directory is empty
run: find . -delete

- uses: actions/checkout@v4
with:
path: firedrake-repo

- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get -y install \
$(python3 ./firedrake-repo/scripts/firedrake-configure --arch ${{ matrix.arch }} --show-system-packages)

- name: Install PETSc
run: |
git clone --depth 1 https://github.com/firedrakeproject/petsc.git
cd petsc
# TODO update configure file
./configure --with-make-np=12 --with-c2html=0 --with-debugging=0 --with-fortran-bindings=0 --with-shared-libraries=1 --with-strict-petscerrorcode PETSC_ARCH=arch-firedrake-default --COPTFLAGS=-O3 -march=native -mtune=native --CXXOPTFLAGS=-O3 -march=native -mtune=native --FOPTFLAGS=-O3 -march=native -mtune=native --download-bison --download-fftw --download-hdf5 --download-hwloc --download-metis --download-mumps --download-netcdf --download-pnetcdf --download-ptscotch --download-scalapack --download-suitesparse --download-superlu_dist --download-zlib --download-hypre --with-cuda --with-cuda-dir=/usr/local/cuda CUDAPPFLAGS=-Wno-deprecated-gpu-targets --download-openmpi
make
# TODO: This fails for some reason
# make check

- name: Install Firedrake
id: install
run: |
# TODO update configure file for the exports
# export $(python3 ./firedrake-repo/scripts/firedrake-configure --arch ${{ matrix.arch }} --show-env)
export PETSC_DIR=./petsc
export PETSC_ARCH=arch-firedrake-default
export MPI_HOME=$PETSC_DIR/$PETSC_ARCH
export CC=$PETSC_DIR/$PETSC_ARCH/bin/mpicc
export CXX=$PETSC_DIR/$PETSC_ARCH/bin/mpicxx
export MPICC=$CC
export MPI_HOME=$PETSC_DIR/$PETSC_ARCH
export PATH=$PETSC_DIR/$PETSC_ARCH/bin:$PATH
export HDF5_MPI=ON
python3 -m venv venv
. venv/bin/activate
: # Force a rebuild of petsc4py as the cached one will not link to the fresh
: # install of PETSc. A similar trick may be needed for compiled dependencies
: # like h5py or mpi4py if changing HDF5/MPI libraries.
pip cache remove petsc4py
pip install --verbose --no-binary h5py './firedrake-repo[ci]'
firedrake-clean
: # Extra test dependencies
pip install matplotlib vtk
pip list

- name: Run smoke tests
run: |
. venv/bin/activate
firedrake-check
timeout-minutes: 10

- name: Post-run cleanup
if: always()
run: find . -delete
57 changes: 57 additions & 0 deletions demos/helmholtz/helmholtz.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
Main Stage 366614
Main Stage;firedrake 44369
Main Stage;firedrake;firedrake.solving.solve 86
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve 196
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve 140
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESFunctionEval 736
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESFunctionEval;ParLoopExecute 212
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESFunctionEval;ParLoopExecute;Parloop_Cells_wrap_form0_cell_integral 112
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESFunctionEval;ParLoopExecute;Parloop_Cells_wrap_form0_cell_integral;pyop2.global_kernel.GlobalKernel.compile 415552
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESFunctionEval;firedrake.tsfc_interface.compile_form 42597
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESJacobianEval 866
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESJacobianEval;ParLoopExecute 149
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESJacobianEval;ParLoopExecute;Parloop_Cells_wrap_form00_cell_integral 136
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.solve;SNESSolve;SNESJacobianEval;ParLoopExecute;Parloop_Cells_wrap_form00_cell_integral;pyop2.global_kernel.GlobalKernel.compile 407506
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.__init__ 1771
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.__init__;firedrake.tsfc_interface.compile_form 56423
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.__init__;firedrake.tsfc_interface.compile_form;firedrake.formmanipulation.split_form 1907
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.NonlinearVariationalSolver.__init__;firedrake.solving_utils._SNESContext.__init__ 618
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.LinearVariationalProblem.__init__ 145
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.LinearVariationalProblem.__init__;firedrake.ufl_expr.action 4387
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.LinearVariationalProblem.__init__;firedrake.variational_solver.NonlinearVariationalProblem.__init__ 332
Main Stage;firedrake;firedrake.solving.solve;firedrake.variational_solver.LinearVariationalProblem.__init__;firedrake.variational_solver.NonlinearVariationalProblem.__init__;firedrake.ufl_expr.adjoint 2798
Main Stage;firedrake;firedrake.function.Function.interpolate 342
Main Stage;firedrake;firedrake.function.Function.interpolate;firedrake.assemble.assemble 5644
Main Stage;firedrake;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate 29
Main Stage;firedrake;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate;ParLoopExecute 298
Main Stage;firedrake;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate;ParLoopExecute;Parloop_Cells_wrap_expression_kernel 204
Main Stage;firedrake;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate;ParLoopExecute;Parloop_Cells_wrap_expression_kernel;pyop2.global_kernel.GlobalKernel.compile 682292
Main Stage;firedrake;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.make_interpolator 40658
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write 2473
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate 303
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate;firedrake.assemble.assemble 1080
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate 23
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate;ParLoopExecute 328
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate;ParLoopExecute;Parloop_Cells_wrap_expression_kernel 165
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.SameMeshInterpolator._interpolate;ParLoopExecute;Parloop_Cells_wrap_expression_kernel;pyop2.global_kernel.GlobalKernel.compile 663410
Main Stage;firedrake;firedrake.output.vtk_output.VTKFile.write;firedrake.function.Function.interpolate;firedrake.assemble.assemble;firedrake.interpolation.make_interpolator 55147
Main Stage;firedrake;firedrake.__init__ 495196
Main Stage;firedrake;firedrake.assemble.assemble 949
Main Stage;firedrake;firedrake.assemble.assemble;ParLoopExecute 310
Main Stage;firedrake;firedrake.assemble.assemble;ParLoopExecute;Parloop_Cells_wrap_form_cell_integral 95
Main Stage;firedrake;firedrake.assemble.assemble;ParLoopExecute;Parloop_Cells_wrap_form_cell_integral;pyop2.global_kernel.GlobalKernel.compile 355507
Main Stage;firedrake;firedrake.assemble.assemble;firedrake.tsfc_interface.compile_form 20219
Main Stage;firedrake;CreateFunctionSpace 919
Main Stage;firedrake;CreateFunctionSpace;CreateFunctionSpace 79
Main Stage;firedrake;CreateFunctionSpace;CreateFunctionSpace;firedrake.functionspaceimpl.FunctionSpace.__init__ 165
Main Stage;firedrake;CreateFunctionSpace;CreateFunctionSpace;firedrake.functionspaceimpl.FunctionSpace.__init__;firedrake.functionspacedata.get_shared_data 13
Main Stage;firedrake;CreateFunctionSpace;CreateFunctionSpace;firedrake.functionspaceimpl.FunctionSpace.__init__;firedrake.functionspacedata.get_shared_data;firedrake.functionspacedata.FunctionSpaceData.__init__ 825
Main Stage;firedrake;CreateFunctionSpace;CreateFunctionSpace;firedrake.functionspaceimpl.FunctionSpace.__init__;firedrake.functionspacedata.get_shared_data;firedrake.functionspacedata.FunctionSpaceData.__init__;FunctionSpaceData: CreateElement 1274
Main Stage;firedrake;CreateFunctionSpace;CreateFunctionSpace;firedrake.functionspaceimpl.FunctionSpace.__init__;firedrake.functionspacedata.get_shared_data;firedrake.functionspacedata.FunctionSpaceData.__init__;firedrake.mesh.MeshTopology._facets 789
Main Stage;firedrake;CreateFunctionSpace;CreateMesh 147
Main Stage;firedrake;CreateFunctionSpace;CreateMesh;Mesh: numbering 376
Main Stage;firedrake;firedrake.utility_meshes.UnitSquareMesh 12
Main Stage;firedrake;firedrake.utility_meshes.UnitSquareMesh;firedrake.utility_meshes.SquareMesh 11
Main Stage;firedrake;firedrake.utility_meshes.UnitSquareMesh;firedrake.utility_meshes.SquareMesh;firedrake.utility_meshes.RectangleMesh 834
Main Stage;firedrake;firedrake.utility_meshes.UnitSquareMesh;firedrake.utility_meshes.SquareMesh;firedrake.utility_meshes.RectangleMesh;CreateMesh 676
Main Stage;firedrake;firedrake.utility_meshes.UnitSquareMesh;firedrake.utility_meshes.SquareMesh;firedrake.utility_meshes.RectangleMesh;DMPlexInterp 382
14 changes: 14 additions & 0 deletions firedrake/linear_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ def __init__(self, A, *, P=None, solver_parameters=None,
solver_parameters = solving_utils.set_defaults(solver_parameters,
A.arguments(),
ksp_defaults=self.DEFAULT_KSP_PARAMETERS)
# todo: add offload to solver parameters - how? prefix?

self.A = A
self.comm = A.comm
self._comm = internal_comm(self.comm, self)
Expand Down Expand Up @@ -181,6 +183,18 @@ def solve(self, x, b):
else:
acc = x.dat.vec_wo

# if "cu" in self.A.petscmat.type: # todo: cuda or cu?
# with self.inserted_options(), b.dat.vec_ro as rhs, acc as solution, dmhooks.add_hooks(self.ksp.dm, self):
# b_cu = PETSc.Vec()
# b_cu.createCUDAWithArrays(rhs)
# u = PETSc.Vec()
# u.createCUDAWithArrays(solution)
# self.ksp.solve(b_cu, u)
# u.getArray()

# else:
# instead: preconditioner

with self.inserted_options(), b.dat.vec_ro as rhs, acc as solution, dmhooks.add_hooks(self.ksp.dm, self):
self.ksp.solve(rhs, solution)

Expand Down
1 change: 1 addition & 0 deletions firedrake/preconditioners/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@
from firedrake.preconditioners.hiptmair import * # noqa: F401
from firedrake.preconditioners.facet_split import * # noqa: F401
from firedrake.preconditioners.bddc import * # noqa: F401
from firedrake.preconditioners.offload import * # noqa: F401
118 changes: 118 additions & 0 deletions firedrake/preconditioners/offload.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
from firedrake.preconditioners.base import PCBase
from firedrake.functionspace import FunctionSpace, MixedFunctionSpace
from firedrake.petsc import PETSc
from firedrake.ufl_expr import TestFunction, TrialFunction
import firedrake.dmhooks as dmhooks
from firedrake.dmhooks import get_function_space

__all__ = ("OffloadPC",)


class OffloadPC(PCBase):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could AssembledPC assume this functionality by providing -assembled_mat_type aijcusparse?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Possibly, but then you lose all flexibility w.r.t. using other matrix types.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could have this one as a subclass of AssembledPC, there's substantial code duplication

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That seems like a good idea. @Olender

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good idea! I implemented this in the latest commit, but I'm still testing things out to ensure everything works as expected. Let me know if you have any more suggestions

"""Offload PC from CPU to GPU and back.

Internally this makes a PETSc PC object that can be controlled by
options using the extra options prefix ``offload_``.
"""

_prefix = "offload_"

def initialize(self, pc):
with PETSc.Log.Event("Event: initialize offload"): #
A, P = pc.getOperators()

outer_pc = pc
appctx = self.get_appctx(pc)
fcp = appctx.get("form_compiler_parameters")

V = get_function_space(pc.getDM())
if len(V) == 1:
V = FunctionSpace(V.mesh(), V.ufl_element())
else:
V = MixedFunctionSpace([V_ for V_ in V])
test = TestFunction(V)
trial = TrialFunction(V)

(a, bcs) = self.form(pc, test, trial)

if P.type == "assembled":
context = P.getPythonContext()
# It only makes sense to preconditioner/invert a diagonal
# block in general. That's all we're going to allow.
if not context.on_diag:
raise ValueError("Only makes sense to invert diagonal block")

prefix = pc.getOptionsPrefix()
options_prefix = prefix + self._prefix

mat_type = PETSc.Options().getString(options_prefix + "mat_type", "cusparse")

# Convert matrix to ajicusparse
with PETSc.Log.Event("Event: matrix offload"):
P_cu = P.convert(mat_type='aijcusparse') # todo

# Transfer nullspace
P_cu.setNullSpace(P.getNullSpace())
tnullsp = P.getTransposeNullSpace()
if tnullsp.handle != 0:
P_cu.setTransposeNullSpace(tnullsp)
P_cu.setNearNullSpace(P.getNearNullSpace())

# PC object set-up
pc = PETSc.PC().create(comm=outer_pc.comm)
pc.incrementTabLevel(1, parent=outer_pc)

# We set a DM and an appropriate SNESContext on the constructed PC
# so one can do e.g. multigrid or patch solves.
dm = outer_pc.getDM()
self._ctx_ref = self.new_snes_ctx(
outer_pc, a, bcs, mat_type,
fcp=fcp, options_prefix=options_prefix
)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You might not need to create a new _SNESContext here, but instead just grab it from the parent PC. I guess most of the symbolic stuff involving the form above wouldn't be required anymore, so this will look very different from AssembledPC


pc.setDM(dm)
pc.setOptionsPrefix(options_prefix)
pc.setOperators(A, P_cu)
self.pc = pc
with dmhooks.add_hooks(dm, self, appctx=self._ctx_ref, save=False):
pc.setFromOptions()

def update(self, pc):
_, P = pc.getOperators()
_, P_cu = self.pc.getOperators()
P.copy(P_cu)

def form(self, pc, test, trial):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This method is already inherited from PCBase

_, P = pc.getOperators()
if P.getType() == "python":
context = P.getPythonContext()
return (context.a, context.row_bcs)
else:
context = dmhooks.get_appctx(pc.getDM())
return (context.Jp or context.J, context._problem.bcs)

# Convert vectors to CUDA, solve and get solution on CPU back
def apply(self, pc, x, y):
with PETSc.Log.Event("Event: apply offload"): #
dm = pc.getDM()
with dmhooks.add_hooks(dm, self, appctx=self._ctx_ref):
with PETSc.Log.Event("Event: vectors offload"):
y_cu = PETSc.Vec() # begin
y_cu.createCUDAWithArrays(y)
x_cu = PETSc.Vec()
# Passing a vec into another vec doesnt work because original is locked
x_cu.createCUDAWithArrays(x.array_r)
with PETSc.Log.Event("Event: solve"):
self.pc.apply(x_cu, y_cu) #
with PETSc.Log.Event("Event: vectors copy back"):
y.copy(y_cu) #

def applyTranspose(self, pc, X, Y):
raise NotImplementedError

def view(self, pc, viewer=None):
super().view(pc, viewer)
print("viewing PC")
if hasattr(self, "pc"):
viewer.printfASCII("PC to solve on GPU\n")
self.pc.view(viewer)
25 changes: 12 additions & 13 deletions firedrake/solving.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,19 +252,18 @@ def _la_solve(A, x, b, **kwargs):
options_prefix=options_prefix)
if isinstance(x, firedrake.Vector):
x = x.function
# linear MG doesn't need RHS, supply zero.
L = 0
aP = None if P is None else P.a
lvp = vs.LinearVariationalProblem(A.a, L, x, bcs=A.bcs, aP=aP)
mat_type = A.mat_type
pmat_type = mat_type if P is None else P.mat_type
appctx = solver_parameters.get("appctx", {})
ctx = solving_utils._SNESContext(lvp,
mat_type=mat_type,
pmat_type=pmat_type,
appctx=appctx,
options_prefix=options_prefix,
pre_apply_bcs=pre_apply_bcs)
if not isinstance(A, firedrake.matrix.AssembledMatrix):
# linear MG doesn't need RHS, supply zero.
lvp = vs.LinearVariationalProblem(a=A.a, L=0, u=x, bcs=A.bcs)
mat_type = A.mat_type
appctx = solver_parameters.get("appctx", {})
ctx = solving_utils._SNESContext(lvp,
mat_type=mat_type,
pmat_type=mat_type,
appctx=appctx,
options_prefix=options_prefix)
else:
ctx = None
dm = solver.ksp.dm

with dmhooks.add_hooks(dm, solver, appctx=ctx):
Expand Down