diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 0000000000..7196b121ca --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,4 @@ +self-hosted-runner: + labels: + # Custom label for GPU-enabled self-hosted runners + - gpu \ No newline at end of file diff --git a/.github/workflows/core.yml b/.github/workflows/core.yml index 764e1c90fa..32f0e02f4b 100644 --- a/.github/workflows/core.yml +++ b/.github/workflows/core.yml @@ -23,6 +23,10 @@ on: description: Whether to test using macOS type: boolean default: false + test_gpu: + description: Whether to test using CUDA-enabled PETSc + type: boolean + default: false deploy_website: description: Whether to deploy the website type: boolean @@ -54,6 +58,10 @@ on: description: Whether to test using macOS type: boolean default: false + test_gpu: + description: Whether to test using CUDA-enabled PETSc + type: boolean + default: false deploy_website: description: Whether to deploy the website type: boolean @@ -319,7 +327,12 @@ jobs: matrix.arch == 'default' run: | . venv/bin/activate - git clone --depth 1 https://github.com/firedrakeproject/gusto.git gusto-repo + if [ ${{ inputs.target_branch }} = 'release' ]; then + GUSTO_BRANCH='main' + else + GUSTO_BRANCH='future' + fi + git clone --depth 1 https://github.com/firedrakeproject/gusto.git gusto-repo --branch $GUSTO_BRANCH pip install --verbose ./gusto-repo python -m pytest -n 8 --verbose \ gusto-repo/integration-tests/balance/test_saturated_balance.py \ @@ -460,6 +473,137 @@ jobs: run: | find . -delete + test_gpu: + name: Build and test Firedrake (Linux CUDA) + runs-on: [self-hosted, Linux, gpu] + container: + image: ubuntu:latest + options: --gpus all + if: inputs.test_gpu + env: + OMPI_ALLOW_RUN_AS_ROOT: 1 + OMPI_ALLOW_RUN_AS_ROOT_CONFIRM: 1 + OMP_NUM_THREADS: 1 + OPENBLAS_NUM_THREADS: 1 + FIREDRAKE_CI: 1 + PYOP2_SPMD_STRICT: 1 + # Disable fast math as it exposes compiler bugs + PYOP2_CFLAGS: -fno-fast-math + # NOTE: One should occasionally update test_durations.json by running + # 'make test_durations' inside a 'firedrake:latest' Docker image. + EXTRA_PYTEST_ARGS: --splitting-algorithm least_duration --timeout=600 --timeout-method=thread -o faulthandler_timeout=660 --durations-path=./firedrake-repo/tests/test_durations.json --durations=50 + PYTEST_MPI_MAX_NPROCS: 8 + # Prevent PETSc from exiting with an error due to using non-GPU aware system MPI + PETSC_OPTIONS: -use_gpu_aware_mpi 0 + steps: + - name: Confirm Nvidia GPUs are enabled + # The presence of the nvidia-smi command indicates that the Nvidia drivers have + # successfully been imported into the container, there is no point continuing + # if nvidia-smi is not present + run: nvidia-smi + + - name: Fix HOME + # For unknown reasons GitHub actions overwrite HOME to /github/home + # which will break everything unless fixed + # (https://github.com/actions/runner/issues/863) + run: echo "HOME=/root" >> "$GITHUB_ENV" + + + # Git is needed for actions/checkout and Python for firedrake-configure + # curl needed for adding new deb repositories to ubuntu + - name: Install system dependencies (1) + run: | + apt-get update + apt-get -y install git python3 curl + + + - name: Pre-run cleanup + # Make sure the current directory is empty + run: find . -delete + + - uses: actions/checkout@v5 + with: + path: firedrake-repo + ref: ${{ inputs.source_ref }} + + - name: Add Nvidia CUDA deb repositories + run: | + deburl=$( python3 ./firedrake-repo/scripts/firedrake-configure --show-extra-repo-pkg-url --gpu-arch cuda ) + debfile=$( basename "${deburl}" ) + curl -fsSLO "${deburl}" + dpkg -i "${debfile}" + apt-get update + + - name: Install system dependencies (2) + run: | + apt-get -y install \ + $(python3 ./firedrake-repo/scripts/firedrake-configure --arch default --gpu-arch cuda --show-system-packages) + apt-get -y install python3-venv + : # Dependencies needed to run the test suite + apt-get -y install fonts-dejavu graphviz graphviz-dev parallel poppler-utils + + - name: Install PETSc + env: + EXTRA_OPTIONS: -use_gpu_aware_mpi 0 + run: | + if [ ${{ inputs.target_branch }} = 'release' ]; then + git clone --depth 1 \ + --branch $(python3 ./firedrake-repo/scripts/firedrake-configure --gpu-arch cuda --show-petsc-version) \ + https://gitlab.com/petsc/petsc.git + else + git clone --depth 1 https://gitlab.com/petsc/petsc.git + fi + cd petsc + python3 ../firedrake-repo/scripts/firedrake-configure \ + --arch default --gpu-arch cuda --show-petsc-configure-options | \ + xargs -L1 ./configure --with-make-np=4 + make + make check + { + echo "PETSC_DIR=/__w/firedrake/firedrake/petsc" + echo "PETSC_ARCH=arch-firedrake-default-cuda" + echo "SLEPC_DIR=/__w/firedrake/firedrake/petsc/arch-firedrake-default-cuda" + } >> "$GITHUB_ENV" + + - name: Install Firedrake + id: install + run: | + export $(python3 ./firedrake-repo/scripts/firedrake-configure --arch default --gpu-arch cuda --show-env) + python3 -m venv venv + . venv/bin/activate + + : # Empty the pip cache to ensure that everything is compiled from scratch + pip cache purge + + if [ ${{ inputs.target_branch }} = 'release' ]; then + EXTRA_PIP_FLAGS='' + else + : # Install build dependencies + pip install "$PETSC_DIR"/src/binding/petsc4py + pip install -r ./firedrake-repo/requirements-build.txt + + : # We have to pass '--no-build-isolation' to use a custom petsc4py + EXTRA_PIP_FLAGS='--no-build-isolation' + fi + + pip install --verbose $EXTRA_PIP_FLAGS \ + --no-binary h5py \ + './firedrake-repo[check]' + + firedrake-clean + pip list + + - name: Run smoke tests + run: | + . venv/bin/activate + firedrake-check + timeout-minutes: 10 + + - name: Post-run cleanup + if: always() + run: | + find . -delete + lint: name: Lint codebase runs-on: ubuntu-latest diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index a1430b57e7..6b63f97cae 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -12,4 +12,6 @@ jobs: target_branch: ${{ github.base_ref }} # Only run macOS tests if the PR is labelled 'macOS' test_macos: ${{ contains(github.event.pull_request.labels.*.name, 'macOS') }} + # Only run GPU tests if the PR is labelled 'gpu' + test_gpu: ${{ contains(github.event.pull_request.labels.*.name, 'gpu') }} secrets: inherit diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 87ce86f32c..6e17222d27 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -13,6 +13,7 @@ jobs: source_ref: ${{ github.ref_name }} target_branch: ${{ github.ref_name }} test_macos: true + test_gpu: true deploy_website: true secrets: inherit diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 545cd45965..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,5 +0,0 @@ -# This file contains all the non-standard files that need to be included -# in the source distribution. - -recursive-include firedrake/cython *.pyx *.pxi -recursive-exclude firedrake/cython *.c diff --git a/demos/adaptive_multigrid/adaptive_convergence.png b/demos/adaptive_multigrid/adaptive_convergence.png new file mode 100644 index 0000000000..295d8de024 Binary files /dev/null and b/demos/adaptive_multigrid/adaptive_convergence.png differ diff --git a/demos/adaptive_multigrid/adaptive_multigrid.py.rst b/demos/adaptive_multigrid/adaptive_multigrid.py.rst new file mode 100644 index 0000000000..44ff4eac0f --- /dev/null +++ b/demos/adaptive_multigrid/adaptive_multigrid.py.rst @@ -0,0 +1,255 @@ +Adaptive Multigrid Methods using AdaptiveMeshHierarchy +====================================================== + + +Contributed by Anurag Rao. + +The purpose of this demo is to show how to use Firedrake's multigrid solver on a hierarchy of adaptively refined Netgen meshes. +We will first have a look at how to use the :class:`~.AdaptiveMeshHierarchy` to construct the mesh hierarchy with Netgen meshes, then we will consider a solution to the Poisson problem on an L-shaped domain. +Finally, we will show how to use the :class:`~.AdaptiveMeshHierarchy` and :class:`~.AdaptiveTransferManager` to construct a scalable solver. The :class:`~.AdaptiveMeshHierarchy` contains information of the mesh hierarchy and the parent child relations between the meshes. +The :class:`~.AdaptiveTransferManager` deals with the transfer operator logic across any given levels in the hierarchy. +We begin by importing the necessary libraries :: + + from firedrake import * + from netgen.occ import * + import numpy + +Constructing the Mesh Hierarchy +------------------------------- +We first must construct the domain over which we will solve the problem. For a more comprehensive demo on how to use Open Cascade Technology (OCC) and Constructive Solid Geometry (CSG), +see `Netgen integration in Firedrake `_. +We begin with the L-shaped domain, which we build as the union of two rectangles: :: + + rect1 = WorkPlane(Axes((0,0,0), n=Z, h=X)).Rectangle(1,2).Face() + rect2 = WorkPlane(Axes((0,1,0), n=Z, h=X)).Rectangle(2,1).Face() + L = rect1 + rect2 + + geo = OCCGeometry(L, dim=2) + ngmsh = geo.GenerateMesh(maxh=0.5) + mesh = Mesh(ngmsh) + +It is important to convert the initial Netgen mesh into a Firedrake mesh before constructing the :class:`~.AdaptiveMeshHierarchy`. To call the constructor to the hierarchy, we must pass the initial mesh. Our initial mesh looks like this: + +.. figure:: initial_mesh.png + :align: center + :alt: Initial mesh. + +We will also initialize the :class:`~.AdaptiveTransferManager` here: :: + + amh = AdaptiveMeshHierarchy(mesh) + atm = AdaptiveTransferManager() + +Poisson Problem +--------------- +Now we can define a simple Poisson problem + +.. math:: + + - \nabla^2 u = f \text{ in } \Omega, \quad u = 0 \text{ on } \partial \Omega. + +Our approach strongly follows the similar problem in this `lecture course `_. We define the function ``solve_poisson``. The first lines correspond to finding a solution in the CG1 space. The right-hand side is set to be the constant function equal to 1. Since we want Dirichlet boundary conditions, we construct the :class:`~.DirichletBC` object and apply it to the entire boundary: :: + + def solve_poisson(mesh, params): + V = FunctionSpace(mesh, "CG", 1) + v = TestFunction(V) + u = TrialFunction(V) + uh = Function(V, name="solution") + bcs = [DirichletBC(V, 0, "on_boundary")] + f = Constant(1) + + a = inner(grad(u), grad(v))*dx + L = inner(f, v)*dx + + problem = LinearVariationalProblem(a, L, uh, bcs) + solver = LinearVariationalSolver(problem, solver_parameters=params) + + solver.set_transfer_manager(atm) + solver.solve() + + its = solver.snes.getLinearSolveIterations() + return uh, its + +Note the code after the construction of the :class:`~.LinearVariationalProblem`. To use the :class:`~.AdaptiveMeshHierarchy` with the existing Firedrake solver, we have to set the :class:`~.AdaptiveTransferManager` as the transfer manager of the multigrid solver. +Since we are using linear Lagrange elements, we will employ Jacobi as the multigrid relaxation, which we define with :: + + solver_params = { + "mat_type": "matfree", + "ksp_type": "cg", + "pc_type": "mg", + "mg_levels": { + "ksp_type": "chebyshev", + "ksp_max_it": 1, + "pc_type": "jacobi", + }, + "mg_coarse": { + "mat_type": "aij", + "pc_type": "lu", + }, + } + +Alternatively for high-order CG elements, it is recommended to use patch relaxation +to achieve degree-independent multigrid convergence. +For more information +see :doc:`Using patch relaxation for multigrid `. +The initial solution is shown below. + +.. figure:: solution_l1.png + :align: center + :alt: Initial Solution from multigrid with initial mesh. + + +Adaptive Mesh Refinement +------------------------ +In this section we will discuss how to adaptively refine select elements and add the newly refined mesh into the :class:`~.AdaptiveMeshHierarchy`. +For this problem, we will be using the Babuška-Rheinbolt a-posteriori estimate for an element: + +.. math:: + \eta_K^2 = h_K^2 \int_K \| f + \nabla^2 u_h \|^2 \mathrm{d}x + \frac{h_K}{2} \int_{\partial K \setminus \partial \Omega} \left[[ \nabla u_h \cdot \mathbf{n} \right]]^2 \mathrm{d}s, + +where :math:`K` is the element, :math:`h_K` is the diameter of the element, :math:`\mathbf{n}` is the outward-facing normal, and :math:`\left[[ \cdot \right]]` is the jump operator. The a-posteriori estimator is computed using the solution at the current level :math:`h`. Integrating over the domain and using the fact that the components of the estimator are piecewise constant on each cell, we can transform the above estimator into the variational problem + +.. math:: + \int_\Omega \eta_K^2 q \,\mathrm{d}x = \int_\Omega \sum_K h_K^2 \int_K (f + \text{div} (\text{grad} u_h) )^2 \,\mathrm{d}x q \,\mathrm{d}x + \int_\Omega \sum_K \frac{h_K}{2} \int_{\partial K \setminus \partial \Omega} \left[[ \nabla u_h \cdot \mathbf{n} \right]]^2 \,\mathrm{d}s q \,\mathrm{d}x \quad \forall\, q \in \mathrm{DG}_0 + +Our approach will be to compute the estimator over all elements and selectively choose to refine only those that contribute most to the error. To compute the error estimator, we use the function below to solve the variational formulation of the error estimator. Since our estimator is a constant per element, we use a DG0 function space. :: + + def estimate_error(mesh, uh): + Q = FunctionSpace(mesh, "DG", 0) + eta_sq = Function(Q) + p = TrialFunction(Q) + q = TestFunction(Q) + f = Constant(1) + residual = f + div(grad(uh)) + + # symbols for mesh quantities + h = CellDiameter(mesh) + n = FacetNormal(mesh) + vol = CellVolume(mesh) + + # compute cellwise error estimator + a = inner(p, q / vol) * dx + L = (inner(residual**2, q * h**2) * dx + + inner(jump(grad(uh), n)**2, avg(q * h)) * dS + ) + + sp = {"mat_type": "matfree", "ksp_type": "preonly", "pc_type": "jacobi"} + solve(a == L, eta_sq, solver_parameters=sp) + + # compute eta from eta^2 + eta = Function(Q).interpolate(sqrt(eta_sq)) + + # compute estimate for error in energy norm + with eta.dat.vec_ro as eta_: + error_est = eta_.norm() + return eta, error_est + +The next step is to choose which elements to refine. For this we use a simplified variant of Dörfler marking :cite:`Dorfler1996`: + +.. math:: + \eta_K \geq \theta \text{max}_L \eta_L + +The logic is to select an element :math:`K` to refine if the estimator is greater than some factor :math:`\theta` of the maximum error estimate of the mesh, where :math:`\theta` ranges from 0 to 1. In our code we choose :math:`\theta=0.5`. +With these helper functions complete, we can solve the system iteratively. In the max_iterations is the number of total levels we want to perform multigrid on. We will solve for 15 levels. At every level :math:`l`, we first compute the solution using multigrid up to level :math:`l`. We then use the current approximation of the solution to estimate the error across the mesh. Finally, we adaptively refine the mesh and repeat. :: + + theta = 0.5 + refinements = 15 + est_errors = [] + sqrt_dofs = [] + mg_iterations = [] + for level in range(refinements): + print(f"level {level}") + + mesh = amh[-1] + uh, its = solve_poisson(mesh, solver_params) + VTKFile(f"output/adaptive_loop_{level}.pvd").write(uh) + + (eta, error_est) = estimate_error(mesh, uh) + VTKFile(f"output/eta_{level}.pvd").write(eta) + + est_errors.append(error_est) + sqrt_dofs.append(uh.function_space().dim() ** 0.5) + mg_iterations.append(its) + + print(f" ||u - u_h|| <= C * {error_est}") + if len(est_errors) > 1: + rates = -numpy.diff(numpy.log(est_errors)) / numpy.diff(numpy.log(sqrt_dofs)) + print(f" rate = {rates[-1]}") + + if i != refinements - 1: + amh.adapt(eta, theta) + +To perform Dörfler marking, refine the current mesh, and add the mesh to the :class:`~.AdaptiveMeshHierarchy`, we use the ``amh.adapt(eta, theta)`` method. In this method the input is the recently computed error estimator ``eta`` and the Dörfler marking parameter ``theta``. The method always performs this on the current fine mesh in the hierarchy. There is another method for adding a mesh to the hierarchy: ``amh.add_mesh(mesh)``. In this method, refinement on the mesh is performed externally by some custom procedure and the resulting mesh directly gets added to the hierarchy. +The meshes now refine according to the error estimator. The error estimators at levels 3,5, and 15 are shown below. Zooming into the vertex of the L-shape at level 15 shows the error indicator remains strongest there. Further refinements will focus on that area. + ++-------------------------------+-------------------------------+-------------------------------+ +| .. figure:: eta_l3.png | .. figure:: eta_l6.png | .. figure:: eta_l15.png | +| :align: center | :align: center | :align: center | +| :height: 250px | :height: 250px | :height: 250px | +| :alt: Eta at level 3 | :alt: Eta at level 6 | :alt: Eta at level 15 | +| | | | +| *Level 3* | *Level 6* | *Level 15* | ++-------------------------------+-------------------------------+-------------------------------+ + +The solutions at level 4 and 15 are shown below. + ++------------------------------------+------------------------------------+ +| .. figure:: solution_l4.png | .. figure:: solution_l15.png | +| :align: center | :align: center | +| :height: 300px | :height: 300px | +| :alt: Solution, level 4 | :alt: Solution, level 15 | +| | | +| *MG solution at level 4* | *MG solution at level 15* | ++------------------------------------+------------------------------------+ + +The convergence follows the expected optimal behavior: :: + + from matplotlib import pyplot as plt + + dofs = numpy.array(sqrt_dofs) ** 2 + opt_errors = est_errors[0] * (sqrt_dofs[0] / numpy.array(sqrt_dofs)) + plt.loglog(dofs, est_errors, '-o', markersize = 3, label="Estimated error") + plt.loglog(dofs, opt_errors, '--', markersize = 3, label="Optimal convergence") + plt.ylabel("Error estimate of the energy norm") + plt.xlabel("Number of degrees of freedom") + plt.legend() + plt.savefig("output/adaptive_convergence.png") + +.. figure:: adaptive_convergence.png + :align: center + :alt: Convergence of the error estimator. + +Moreover, the multigrid iteration count is robust to the level of refinement :: + + print(" Level\t | Iterations") + print("---------------------") + for level, its in enumerate(mg_iterations): + print(f" {level}\t | {its}") + +.. + +======== ================ + Level Iterations +======== ================ + 0 2 + 1 8 + 2 8 + 3 8 + 4 8 + 5 8 + 6 8 + 7 8 + 8 8 + 9 9 + 10 9 + 11 9 + 12 9 + 13 9 + 14 9 +======== ================ + +A runnable python version of this demo can be found :demo:`here`. + +.. rubric:: References + +.. bibliography:: demo_references.bib + :filter: docname in docnames diff --git a/demos/adaptive_multigrid/eta_l15.png b/demos/adaptive_multigrid/eta_l15.png new file mode 100644 index 0000000000..05efad50c3 Binary files /dev/null and b/demos/adaptive_multigrid/eta_l15.png differ diff --git a/demos/adaptive_multigrid/eta_l3.png b/demos/adaptive_multigrid/eta_l3.png new file mode 100644 index 0000000000..ccc80c30f4 Binary files /dev/null and b/demos/adaptive_multigrid/eta_l3.png differ diff --git a/demos/adaptive_multigrid/eta_l6.png b/demos/adaptive_multigrid/eta_l6.png new file mode 100644 index 0000000000..f71d2c5788 Binary files /dev/null and b/demos/adaptive_multigrid/eta_l6.png differ diff --git a/demos/adaptive_multigrid/initial_mesh.png b/demos/adaptive_multigrid/initial_mesh.png new file mode 100644 index 0000000000..7ea4bb85bc Binary files /dev/null and b/demos/adaptive_multigrid/initial_mesh.png differ diff --git a/demos/adaptive_multigrid/solution_l1.png b/demos/adaptive_multigrid/solution_l1.png new file mode 100644 index 0000000000..ff6e6ed997 Binary files /dev/null and b/demos/adaptive_multigrid/solution_l1.png differ diff --git a/demos/adaptive_multigrid/solution_l15.png b/demos/adaptive_multigrid/solution_l15.png new file mode 100644 index 0000000000..e2436d74c0 Binary files /dev/null and b/demos/adaptive_multigrid/solution_l15.png differ diff --git a/demos/adaptive_multigrid/solution_l4.png b/demos/adaptive_multigrid/solution_l4.png new file mode 100644 index 0000000000..d457e29979 Binary files /dev/null and b/demos/adaptive_multigrid/solution_l4.png differ diff --git a/demos/boussinesq/boussinesq.py.rst b/demos/boussinesq/boussinesq.py.rst index edfdf3c1a5..5cc6708cf0 100644 --- a/demos/boussinesq/boussinesq.py.rst +++ b/demos/boussinesq/boussinesq.py.rst @@ -184,7 +184,7 @@ implements a boundary condition that fixes a field at a single point. :: # Take the basis function with the largest abs value at bc_point v = TestFunction(V) - F = assemble(Interpolate(inner(v, v), Fvom)) + F = assemble(interpolate(inner(v, v), Fvom)) with F.dat.vec as Fvec: max_index, _ = Fvec.max() nodes = V.dof_dset.lgmap.applyInverse([max_index]) diff --git a/demos/demo_references.bib b/demos/demo_references.bib index 3c0d5e61a2..5792042258 100644 --- a/demos/demo_references.bib +++ b/demos/demo_references.bib @@ -373,6 +373,17 @@ @article{Brubeck2024 year = {2024} } +@article{Dorfler1996, + title={A convergent adaptive algorithm for Poisson’s equation}, + author={D{\"o}rfler, Willy}, + journal={SIAM Journal on Numerical Analysis}, + volume={33}, + number={3}, + pages={1106--1124}, + year={1996}, + publisher={SIAM} +} + @Article{Farrell2015, author = {Patrick E. Farrell and \'Asgeir Birkisson and Simon W. Funke}, title = {{Deflation techniques for finding distinct solutions of nonlinear partial differential equations}}, diff --git a/demos/extruded_shallow_water/extruded_shallow_water.py.rst b/demos/extruded_shallow_water/extruded_shallow_water.py.rst new file mode 100644 index 0000000000..d9f752d7d7 --- /dev/null +++ b/demos/extruded_shallow_water/extruded_shallow_water.py.rst @@ -0,0 +1,178 @@ +Linear Shallow Water Equations on an Extruded Mesh using a Strang timestepping scheme +===================================================================================== + +This demo solves the linear shallow water equations on an extruded mesh +using a Strang timestepping scheme. + +The equations are solved in a domain :math:`\Omega` utilizing a 2D base mesh +that is extruded vertically to form a 3D volume. + +As usual, we start by importing Firedrake: :: + + from firedrake import * + +Mesh Generation +---------------- + +We use an *extruded* mesh, where the base mesh is a :math:`2^5 \times 2^5` unit square +with 5 evenly-spaced vertical layers. This results in a 3D volume composed of +prisms. :: + + power = 5 + m = UnitSquareMesh(2 ** power, 2 ** power) + layers = 5 + + mesh = ExtrudedMesh(m, layers, layer_height=0.25) + +Function Spaces +---------------- +For the velocity field, we use an :math:`H(\mathrm{div})`-conforming function space +constructed as the outer product of a 2D BDM space and a 1D DG space. This ensures +that the normal component of the velocity is continuous across element boundaries +in the horizontal directions, which is important for accurately capturing fluxes. :: + + horiz = FiniteElement("BDM", "triangle", 1) + vert = FiniteElement("DG", "interval", 0) + prod = HDiv(TensorProductElement(horiz, vert)) + W = FunctionSpace(mesh, prod) + +We also define a pressure space :math:`X` using piecewise constant discontinuous +Galerkin elements, and a plotting space :math:`X_{\text{plot}}` using continuous +Galerkin elements for better visualization. :: + + X = FunctionSpace(mesh, "DG", 0, vfamily="DG", vdegree=0) + Xplot = FunctionSpace(mesh, "CG", 1, vfamily="Lagrange", vdegree=1) + +Initial Conditions +------------------- + +We define our functions for velocity and pressure fields. The initial pressure field +is set to a prescribed sine function to create a wave-like disturbance. :: + + # Define starting field + u_0 = Function(W) + u_h = Function(W) + u_1 = Function(W) + p_0 = Function(X) + p_1 = Function(X) + p_plot = Function(Xplot) + x, y, z = SpatialCoordinate(mesh) + p_0.interpolate(sin(4*pi*x)*sin(2*pi*x)) + + T = 0.5 + t = 0 + dt = 0.0025 + + file = VTKFile("lsw3d.pvd") + +Before starting the time-stepping loop, we project the initial pressure field +into the plotting space for visualization. :: + + p_trial = TrialFunction(Xplot) + p_test = TestFunction(Xplot) + solve(p_trial * p_test * dx == p_0 * p_test * dx, p_plot) + file.write(p_plot, time=t) + + E_0 = assemble(0.5 * p_0 * p_0 * dx + 0.5 * dot(u_0, u_0) * dx) + +Time-Stepping Loop +-------------------- + +We evolve the system in time using a Strang splitting method. In each time step, +we perform a half-step update of the velocity field, a full-step update of the pressure field, +and then another half-step update of the velocity field. This approach helps to +maintain stability and accuracy. + +**Step 1: Velocity Half-Step** +First, we solve for an intermediate velocity :math:`u_h` using the pressure +from the start of the step :math:`p_0`. Mathematically, we find :math:`u_h \in W` such that: + +.. math:: + + \int_{\Omega} w \cdot u_h \, dx = \int_{\Omega} w \cdot u_0 \, dx + \frac{\Delta t}{2} \int_{\Omega} (\nabla \cdot w) p_0 \, dx \quad \forall w \in W + +.. code-block:: python + + a_1 = dot(w, u) * dx + L_1 = dot(w, u_0) * dx + 0.5 * dt * div(w) * p_0 * dx + solve(a_1 == L_1, u_h) + +**Step 2: Pressure Full-Step** +Next, we update the pressure to :math:`p_1` using the divergence of the +intermediate velocity :math:`u_h`. We find :math:`p_1 \in X` such that: + +.. math:: + + \int_{\Omega} \phi \, p_1 \, dx = \int_{\Omega} \phi \, p_0 \, dx - \Delta t \int_{\Omega} \phi (\nabla \cdot u_h) \, dx \quad \forall \phi \in X + +.. code-block:: python + + a_2 = phi * p * dx + L_2 = phi * p_0 * dx - dt * phi * div(u_h) * dx + solve(a_2 == L_2, p_1) + +**Step 3: Velocity Final Half-Step** +Finally, we compute the velocity at the end of the time step :math:`u_1` using +the updated pressure :math:`p_1`. We find :math:`u_1 \in W` such that: + +.. math:: + + \int_{\Omega} w \cdot u_1 \, dx = \int_{\Omega} w \cdot u_h \, dx + \frac{\Delta t}{2} \int_{\Omega} (\nabla \cdot w) p_1 \, dx \quad \forall w \in W + +.. code-block:: python + + a_3 = dot(w, u) * dx + L_3 = dot(w, u_h) * dx + 0.5 * dt * div(w) * p_1 * dx + solve(a_3 == L_3, u_1) + +Implementation in the Simulation Loop +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here follows the complete implementation of the time-stepping loop, including the updates of the velocity and pressure fields, +as well as the projection of the pressure field for visualization at each time step. +We also print the current simulation time at each step for tracking progress. + +.. code-block:: python + + while t < T: + u = TrialFunction(W) + w = TestFunction(W) + a_1 = dot(w, u) * dx + L_1 = dot(w, u_0) * dx + 0.5 * dt * div(w) * p_0 * dx + solve(a_1 == L_1, u_h) + + p = TrialFunction(X) + phi = TestFunction(X) + a_2 = phi * p * dx + L_2 = phi * p_0 * dx - dt * phi * div(u_h) * dx + solve(a_2 == L_2, p_1) + + u = TrialFunction(W) + w = TestFunction(W) + a_3 = dot(w, u) * dx + L_3 = dot(w, u_h) * dx + 0.5 * dt * div(w) * p_1 * dx + solve(a_3 == L_3, u_1) + + u_0.assign(u_1) + p_0.assign(p_1) + t += dt + + # project into P1 x P1 for plotting + p_trial = TrialFunction(Xplot) + p_test = TestFunction(Xplot) + solve(p_trial * p_test * dx == p_0 * p_test * dx, p_plot) + file.write(p_plot, time=t) + print(t) + +Energy Calculation +------------------ + +Finally, we compute and print the total energy of the system at the end of the simulation +and compare it to the initial energy to assess conservation properties. :: + + E_1 = assemble(0.5 * p_0 * p_0 * dx + 0.5 * dot(u_0, u_0) * dx) + print('Initial energy', E_0) + print('Final energy', E_1) + +This demo can be found as a script in +:demo:`extruded_shallow_water.py `. \ No newline at end of file diff --git a/demos/extruded_shallow_water/test_extrusion_lsw.py b/demos/extruded_shallow_water/test_extrusion_lsw.py deleted file mode 100644 index be1719dbc1..0000000000 --- a/demos/extruded_shallow_water/test_extrusion_lsw.py +++ /dev/null @@ -1,80 +0,0 @@ -# FIXME: document properly -"""Demo of Linear Shallow Water, with Strang timestepping and silly BCs, but -a sin(x)*sin(y) solution that doesn't care about the silly BCs""" - -from firedrake import * - - -power = 5 -# Create mesh and define function space -m = UnitSquareMesh(2 ** power, 2 ** power) -layers = 5 - -# Populate the coordinates of the extruded mesh by providing the -# coordinates as a field. - -mesh = ExtrudedMesh(m, layers, layer_height=0.25) - -horiz = FiniteElement("BDM", "triangle", 1) -vert = FiniteElement("DG", "interval", 0) -prod = HDiv(OuterProductElement(horiz, vert)) -W = FunctionSpace(mesh, prod) - -X = FunctionSpace(mesh, "DG", 0, vfamily="DG", vdegree=0) -Xplot = FunctionSpace(mesh, "CG", 1, vfamily="Lagrange", vdegree=1) - -# Define starting field -u_0 = Function(W) -u_h = Function(W) -u_1 = Function(W) -p_0 = Function(X) -p_1 = Function(X) -p_plot = Function(Xplot) -x, y = SpatialCoordinate(m) -p_0.interpolate(sin(4*pi*x)*sin(2*pi*x)) - -T = 0.5 -t = 0 -dt = 0.0025 - -file = VTKFile("lsw3d.pvd") -p_trial = TrialFunction(Xplot) -p_test = TestFunction(Xplot) -solve(p_trial * p_test * dx == p_0 * p_test * dx, p_plot) -file << p_plot, t - -E_0 = assemble(0.5 * p_0 * p_0 * dx + 0.5 * dot(u_0, u_0) * dx) - -while t < T: - u = TrialFunction(W) - w = TestFunction(W) - a_1 = dot(w, u) * dx - L_1 = dot(w, u_0) * dx + 0.5 * dt * div(w) * p_0 * dx - solve(a_1 == L_1, u_h) - - p = TrialFunction(X) - phi = TestFunction(X) - a_2 = phi * p * dx - L_2 = phi * p_0 * dx - dt * phi * div(u_h) * dx - solve(a_2 == L_2, p_1) - - u = TrialFunction(W) - w = TestFunction(W) - a_3 = dot(w, u) * dx - L_3 = dot(w, u_h) * dx + 0.5 * dt * div(w) * p_1 * dx - solve(a_3 == L_3, u_1) - - u_0.assign(u_1) - p_0.assign(p_1) - t += dt - - # project into P1 x P1 for plotting - p_trial = TrialFunction(Xplot) - p_test = TestFunction(Xplot) - solve(p_trial * p_test * dx == p_0 * p_test * dx, p_plot) - file << p_plot, t - print(t) - -E_1 = assemble(0.5 * p_0 * p_0 * dx + 0.5 * dot(u_0, u_0) * dx) -print('Initial energy', E_0) -print('Final energy', E_1) diff --git a/demos/ma-demo/ma-demo.py.rst b/demos/ma-demo/ma-demo.py.rst index f8717d4d6e..432042a20d 100644 --- a/demos/ma-demo/ma-demo.py.rst +++ b/demos/ma-demo/ma-demo.py.rst @@ -111,7 +111,7 @@ a Function, w. :: n = FacetNormal(mesh) - I = Identity(mesh.geometric_dimension()) + I = Identity(mesh.geometric_dimension) L = inner(sigma, tau)*dx L += (inner(div(tau), grad(u))*dx diff --git a/demos/multicomponent/multicomponent.py.rst b/demos/multicomponent/multicomponent.py.rst index f041d028f2..c21ebbc211 100644 --- a/demos/multicomponent/multicomponent.py.rst +++ b/demos/multicomponent/multicomponent.py.rst @@ -533,7 +533,7 @@ mathematically valid to do this):: # Take the basis function with the largest abs value at bc_point v = TestFunction(V) - F = assemble(Interpolate(inner(v, v), Fvom)) + F = assemble(interpolate(inner(v, v), Fvom)) with F.dat.vec as Fvec: max_index, _ = Fvec.max() nodes = V.dof_dset.lgmap.applyInverse([max_index]) diff --git a/demos/netgen/netgen_mesh.py.rst b/demos/netgen/netgen_mesh.py.rst index 480220cbd0..6d0f488c83 100755 --- a/demos/netgen/netgen_mesh.py.rst +++ b/demos/netgen/netgen_mesh.py.rst @@ -112,7 +112,7 @@ In code this becomes: :: Now we are ready to assemble the stiffness matrix for the problem. Since we want to enforce Dirichlet boundary conditions we construct a ``DirichletBC`` object and we use the ``GetRegionNames`` method from the Netgen mesh in order to map the label we have given when describing the geometry to the PETSc ``DMPLEX`` IDs. In particular if we look for the IDs of boundary element labeled either "line" or "curve" we would get:: - labels = [i+1 for i, name in enumerate(ngmsh.GetRegionNames(codim=1)) if name in ["line","curve"]] + labels = [i+1 for i, name in enumerate(ngmsh.GetRegionNames(codim=1)) if name in ["line", "curve"]] bc = DirichletBC(V, 0, labels) print(labels) @@ -151,7 +151,7 @@ Then a SLEPc Eigenvalue Problem Solver (``EPS``) is initialised and set up to us u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v))*dx - m = (u*v)*dx + m = inner(u, v)*dx uh = Function(V) bc = DirichletBC(V, 0, labels) A = assemble(a, bcs=bc) @@ -217,7 +217,7 @@ In order to do so we begin by computing the value of the indicator using a piece sum_marked_eta += sum(eta[new_marked]) marked += new_marked frac -= delfrac - markedVec0.getArray()[:] = 1.0*marked[:] + markedVec0.getArray()[:] = marked[:] sct(markedVec0, markedVec, mode=PETSc.Scatter.Mode.REVERSE) return mark @@ -225,7 +225,7 @@ It is now time to define the solve, mark and refine loop that is at the heart of for i in range(max_iterations): - print("level {}".format(i)) + print(f"level {i}") lam, uh, V = Solve(msh, labels) mark = Mark(msh, uh, lam) msh = msh.refine_marked_elements(mark) @@ -332,8 +332,7 @@ As usual, we generate a mesh for the described geometry and use the Firedrake-Ne High-order Meshes ------------------ It is possible to construct high-order meshes for a geometry constructed in Netgen. -In order to do so we need to use the ``curve_field`` method of a Firedrake ``Mesh`` object generated from a Netgen mesh. -In particular, we need to pass the degree of the polynomial field we want to use to parametrise the coordinates of the domain to the ``curve_field`` method, which will return a ``Function`` constructed on a DG space for this purpose. :: +We can set the degree of the geometry via the ``netgen_flags`` keyword argument of the ``Mesh`` constructor. :: from netgen.occ import WorkPlane, OCCGeometry import netgen @@ -344,10 +343,10 @@ In particular, we need to pass the degree of the polynomial field we want to use for i in range(6): wp.Line(0.6).Arc(0.4, 60) shape = wp.Face() - ngmesh = OCCGeometry(shape,dim=2).GenerateMesh(maxh=1.) + ngmesh = OCCGeometry(shape, dim=2).GenerateMesh(maxh=1.) else: ngmesh = netgen.libngpy._meshing.Mesh(2) - mesh = Mesh(Mesh(ngmesh, comm=COMM_WORLD).curve_field(4)) + mesh = Mesh(ngmesh, comm=COMM_WORLD, netgen_flags={"degree": 4}) VTKFile("output/MeshExample5.pvd").write(mesh) .. figure:: Example5.png @@ -363,15 +362,16 @@ We will now show how to solve the Poisson problem on a high-order mesh, of order if COMM_WORLD.rank == 0: shape = Sphere(Pnt(0,0,0), 1) - ngmesh = OCCGeometry(shape,dim=3).GenerateMesh(maxh=1.) + ngmesh = OCCGeometry(shape, dim=3).GenerateMesh(maxh=1.) else: ngmesh = netgen.libngpy._meshing.Mesh(3) - mesh = Mesh(Mesh(ngmesh).curve_field(3)) + mesh = Mesh(ngmesh, netgen_flags={"degree": 3}) + # Solving the Poisson problem VTKFile("output/MeshExample6.pvd").write(mesh) x, y, z = SpatialCoordinate(mesh) V = FunctionSpace(mesh, "CG", 3) - f = Function(V).interpolate(1+0*x) + f = Constant(1) u = TrialFunction(V) v = TestFunction(V) a = inner(grad(u), grad(v)) * dx @@ -397,15 +397,15 @@ It is also possible to construct high-order meshes using the ``SplineGeometry``, from mpi4py import MPI if COMM_WORLD.rank == 0: - geo = CSG2d() - circle = Circle(center=(1,1), radius=0.1, bc="curve").Maxh(0.01) - rect = Rectangle(pmin=(0,1), pmax=(1,2), - bottom="b", left="l", top="t", right="r") - geo.Add(rect-circle) - ngmesh = geo.GenerateMesh(maxh=0.2) + geo = CSG2d() + circle = Circle(center=(1,1), radius=0.1, bc="curve").Maxh(0.01) + rect = Rectangle(pmin=(0,1), pmax=(1,2), + bottom="b", left="l", top="t", right="r") + geo.Add(rect-circle) + ngmesh = geo.GenerateMesh(maxh=0.2) else: ngmesh = netgen.libngpy._meshing.Mesh(2) - mesh = Mesh(Mesh(ngmesh,comm=COMM_WORLD).curve_field(2)) + mesh = Mesh(ngmesh, comm=COMM_WORLD, netgen_flags={"degree": 2}) VTKFile("output/MeshExample7.pvd").write(mesh) .. figure:: Example7.png diff --git a/demos/saddle_point_pc/saddle_point_systems.py.rst b/demos/saddle_point_pc/saddle_point_systems.py.rst index 540435e0be..cc9117a9fd 100644 --- a/demos/saddle_point_pc/saddle_point_systems.py.rst +++ b/demos/saddle_point_pc/saddle_point_systems.py.rst @@ -180,7 +180,7 @@ Finally, at each mesh size, we print out the number of cells in the mesh and the number of iterations the solver took to converge :: # - print(w.function_space().mesh().num_cells(), solver.snes.ksp.getIterationNumber()) + print(w.function_space().mesh().unique().num_cells(), solver.snes.ksp.getIterationNumber()) The resulting convergence is unimpressive: @@ -282,7 +282,7 @@ applying the action of blocks, so we can use a block matrix format. :: for n in range(8): solver, w = build_problem(n, parameters, block_matrix=True) solver.solve() - print(w.function_space().mesh().num_cells(), solver.snes.ksp.getIterationNumber()) + print(w.function_space().mesh().unique().num_cells(), solver.snes.ksp.getIterationNumber()) The resulting convergence is algorithmically good, however, the larger problems still take a long time. @@ -367,7 +367,7 @@ Let's see what happens. :: for n in range(8): solver, w = build_problem(n, parameters, block_matrix=True) solver.solve() - print(w.function_space().mesh().num_cells(), solver.snes.ksp.getIterationNumber()) + print(w.function_space().mesh().unique().num_cells(), solver.snes.ksp.getIterationNumber()) This is much better, the problem takes much less time to solve and when observing the iteration counts for inverting :math:`S` we can see @@ -422,7 +422,7 @@ and so we no longer need a flexible Krylov method. :: for n in range(8): solver, w = build_problem(n, parameters, block_matrix=True) solver.solve() - print(w.function_space().mesh().num_cells(), solver.snes.ksp.getIterationNumber()) + print(w.function_space().mesh().unique().num_cells(), solver.snes.ksp.getIterationNumber()) This results in the following GMRES iteration counts @@ -487,7 +487,7 @@ variable. We can provide it as an :class:`~.AuxiliaryOperatorPC` via a python pr for n in range(8): solver, w = build_problem(n, parameters, aP=None, block_matrix=False) solver.solve() - print(w.function_space().mesh().num_cells(), solver.snes.ksp.getIterationNumber()) + print(w.function_space().mesh().unique().num_cells(), solver.snes.ksp.getIterationNumber()) This actually results in slightly worse convergence than the diagonal approximation we used above. @@ -571,7 +571,7 @@ Let's see what the iteration count looks like now. :: for n in range(8): solver, w = build_problem(n, parameters, aP=riesz, block_matrix=True) solver.solve() - print(w.function_space().mesh().num_cells(), solver.snes.ksp.getIterationNumber()) + print(w.function_space().mesh().unique().num_cells(), solver.snes.ksp.getIterationNumber()) ============== ================== Mesh elements GMRES iterations diff --git a/docker/Dockerfile.vanilla b/docker/Dockerfile.vanilla index bdce0be94a..415bfa62ae 100644 --- a/docker/Dockerfile.vanilla +++ b/docker/Dockerfile.vanilla @@ -132,8 +132,7 @@ ENV SLEPC_DIR=$PETSC_DIR/$PETSC_ARCH ENV PATH="$PETSC_DIR/$PETSC_ARCH/bin:$PATH" ENV HDF5_MPI=ON -ENV CC=mpicc CXX=mpicxx -ENV MPICC=$CC +ENV MPICC=mpicc ENV CFLAGS="-O3 -mtune=generic" CPPFLAGS="-O3 -mtune=generic" # Install Firedrake diff --git a/docs/notebooks/03-elasticity.ipynb b/docs/notebooks/03-elasticity.ipynb index 0dab70b83e..1b68f81361 100644 --- a/docs/notebooks/03-elasticity.ipynb +++ b/docs/notebooks/03-elasticity.ipynb @@ -118,7 +118,7 @@ "f = as_vector([0, -rho*g])\n", "mu = Constant(1)\n", "lambda_ = Constant(0.25)\n", - "Id = Identity(mesh.geometric_dimension()) # 2x2 Identity tensor" + "Id = Identity(mesh.geometric_dimension) # 2x2 Identity tensor" ] }, { @@ -285,7 +285,7 @@ " f = as_vector([0, -rho*g])\n", " mu = Constant(1)\n", " lambda_ = Constant(0.25)\n", - " Id = Identity(mesh.geometric_dimension()) # 2x2 Identity tensor\n", + " Id = Identity(mesh.geometric_dimension) # 2x2 Identity tensor\n", " \n", " bc = DirichletBC(V, Constant([0, 0]), 1)\n", " u = TrialFunction(V)\n", @@ -328,7 +328,7 @@ "\u001b[36mFile \u001b[39m\u001b[32mpetsc4py/PETSc/Log.pyx:188\u001b[39m, in \u001b[36mpetsc4py.PETSc.Log.EventDecorator.decorator.wrapped_func\u001b[39m\u001b[34m()\u001b[39m\n", "\u001b[36mFile \u001b[39m\u001b[32mpetsc4py/PETSc/Log.pyx:189\u001b[39m, in \u001b[36mpetsc4py.PETSc.Log.EventDecorator.decorator.wrapped_func\u001b[39m\u001b[34m()\u001b[39m\n", "\u001b[36mFile \u001b[39m\u001b[32m~/src/firedrake-pyadjoint/firedrake/firedrake/adjoint_utils/variational_solver.py:108\u001b[39m, in \u001b[36mNonlinearVariationalSolverMixin._ad_annotate_solve..wrapper\u001b[39m\u001b[34m(self, **kwargs)\u001b[39m\n\u001b[32m 105\u001b[39m tape.add_block(block)\n\u001b[32m 107\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m stop_annotating():\n\u001b[32m--> \u001b[39m\u001b[32m108\u001b[39m out = \u001b[43msolve\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 110\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m annotate:\n\u001b[32m 111\u001b[39m block.add_output(\u001b[38;5;28mself\u001b[39m._ad_problem._ad_u.create_block_variable())\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/src/firedrake-pyadjoint/firedrake/firedrake/variational_solver.py:361\u001b[39m, in \u001b[36mNonlinearVariationalSolver.solve\u001b[39m\u001b[34m(self, bounds)\u001b[39m\n\u001b[32m 359\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m problem.restrict:\n\u001b[32m 360\u001b[39m problem.u.interpolate(problem.u_restrict)\n\u001b[32m--> \u001b[39m\u001b[32m361\u001b[39m \u001b[43msolving_utils\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcheck_snes_convergence\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msnes\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 363\u001b[39m \u001b[38;5;66;03m# Grab the comm associated with the `_problem` and call PETSc's garbage cleanup routine\u001b[39;00m\n\u001b[32m 364\u001b[39m comm = \u001b[38;5;28mself\u001b[39m._problem.u_restrict.function_space().mesh()._comm\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/src/firedrake-pyadjoint/firedrake/firedrake/variational_solver.py:361\u001b[39m, in \u001b[36mNonlinearVariationalSolver.solve\u001b[39m\u001b[34m(self, bounds)\u001b[39m\n\u001b[32m 359\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m problem.restrict:\n\u001b[32m 360\u001b[39m problem.u.interpolate(problem.u_restrict)\n\u001b[32m--> \u001b[39m\u001b[32m361\u001b[39m \u001b[43msolving_utils\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcheck_snes_convergence\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43msnes\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 363\u001b[39m \u001b[38;5;66;03m# Grab the comm associated with the `_problem` and call PETSc's garbage cleanup routine\u001b[39;00m\n\u001b[32m 364\u001b[39m comm = \u001b[38;5;28mself\u001b[39m._problem.u_restrict.function_space().mesh().comm\n", "\u001b[36mFile \u001b[39m\u001b[32m~/src/firedrake-pyadjoint/firedrake/firedrake/solving_utils.py:128\u001b[39m, in \u001b[36mcheck_snes_convergence\u001b[39m\u001b[34m(snes)\u001b[39m\n\u001b[32m 126\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 127\u001b[39m msg = reason\n\u001b[32m--> \u001b[39m\u001b[32m128\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m ConvergenceError(\u001b[33mr\u001b[39m\u001b[33m\"\"\"\u001b[39m\u001b[33mNonlinear solve failed to converge after \u001b[39m\u001b[38;5;132;01m%d\u001b[39;00m\u001b[33m nonlinear iterations.\u001b[39m\n\u001b[32m 129\u001b[39m \u001b[33mReason:\u001b[39m\n\u001b[32m 130\u001b[39m \u001b[33m \u001b[39m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m\"\"\"\u001b[39m % (snes.getIterationNumber(), msg))\n", "\u001b[31mConvergenceError\u001b[39m: Nonlinear solve failed to converge after 0 nonlinear iterations.\nReason:\n DIVERGED_LINEAR_SOLVE" ] @@ -393,7 +393,7 @@ " f = as_vector([0, -rho*g])\n", " mu = Constant(1)\n", " lambda_ = Constant(0.25)\n", - " Id = Identity(mesh.geometric_dimension()) # 2x2 Identity tensor\n", + " Id = Identity(mesh.geometric_dimension) # 2x2 Identity tensor\n", " def epsilon(u):\n", " return 0.5*(grad(u) + grad(u).T)\n", "\n", diff --git a/docs/notebooks/08-composable-solvers.ipynb b/docs/notebooks/08-composable-solvers.ipynb index 78ba106b5f..d13d798bb0 100644 --- a/docs/notebooks/08-composable-solvers.ipynb +++ b/docs/notebooks/08-composable-solvers.ipynb @@ -114,7 +114,7 @@ "bc_value = as_vector([0.25 * x**2 * (2-x)**2 *y**2, 0])\n", "\n", "bcs = [DirichletBC(W.sub(0), bc_value, 4),\n", - " DirichletBC(W.sub(0), zero(mesh.geometric_dimension()), (1, 2, 3))]" + " DirichletBC(W.sub(0), zero(mesh.geometric_dimension), (1, 2, 3))]" ] }, { diff --git a/docs/source/advanced_tut.rst b/docs/source/advanced_tut.rst index 29d8d093a4..5007d12f4b 100644 --- a/docs/source/advanced_tut.rst +++ b/docs/source/advanced_tut.rst @@ -26,6 +26,7 @@ element systems. Full-waveform inversion: spatial and wave sources parallelism. 1D Vlasov-Poisson equation using vertical independent function spaces. Degree-independent multigrid convergence using patch relaxation. + Multigrid on adaptively-refined mesh hierarchies. Monolithic multigrid with Vanka relaxation for Stokes. Vertex/edge star multigrid relaxation for H(div). Auxiliary space patch relaxation multigrid for H(curl). diff --git a/docs/source/conf.py b/docs/source/conf.py index 1aa47360e9..b5e26a9f5f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -157,6 +157,8 @@ # Cofunction.ufl_domains references FormArgument but it isn't picked # up by Sphinx (see https://github.com/sphinx-doc/sphinx/issues/11225) ('py:class', 'FormArgument'), + # Some complex type hints confuse Sphinx (https://github.com/sphinx-doc/sphinx/issues/14159) + ("py:obj", r"typing\.Literal\[.*"), ] # Dodgy links diff --git a/docs/source/duals.rst b/docs/source/duals.rst index 156b92e49a..453af77406 100644 --- a/docs/source/duals.rst +++ b/docs/source/duals.rst @@ -1,3 +1,5 @@ +.. _duals: + Dual spaces ===================================== diff --git a/docs/source/install.rst b/docs/source/install.rst index e8c22cb51b..a589196516 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -226,7 +226,7 @@ install Firedrake. To do this perform the following steps: .. code-block:: text - CC=mpicc CXX=mpicxx PETSC_DIR=/path/to/petsc PETSC_ARCH=arch-firedrake-{default,complex} HDF5_MPI=ON + PETSC_DIR=/path/to/petsc PETSC_ARCH=arch-firedrake-{default,complex} HDF5_MPI=ON .. note:: This command will only work if you have the right starting directory. diff --git a/docs/source/interpolation.rst b/docs/source/interpolation.rst index ed80824912..50441902ff 100644 --- a/docs/source/interpolation.rst +++ b/docs/source/interpolation.rst @@ -11,7 +11,7 @@ Firedrake offers highly flexible capabilities for interpolating expressions (functions of space) into finite element :py:class:`~.Function`\s. Interpolation is often used to set up initial conditions and/or boundary conditions. Mathematically, if :math:`e(x)` is a function of space and -:math:`V` is a finite element functionspace then +:math:`V` is a finite element function space then :math:`\operatorname{interpolate}(e, V)` is the :py:class:`~.Function` :math:`v_i \phi_i\in V` such that: @@ -46,6 +46,9 @@ The basic syntax for interpolation is: :start-after: [test_interpolate_operator 1] :end-before: [test_interpolate_operator 2] +Here, the :py:func:`~.interpolate` function returned a **symbolic** UFL_ :py:class:`~ufl.Interpolate` +expression. To calculate a concrete numerical result, we need to call :py:func:`~.assemble` on this expression. + It is also possible to interpolate an expression directly into an existing :py:class:`~.Function`: @@ -89,8 +92,7 @@ Here is an example demonstrating some of these features: :start-after: [test_interpolate_operator 7] :end-before: [test_interpolate_operator 8] -This also works as expected when interpolating into a a space defined on the facets -of the mesh: +This also works when interpolating into a space defined on the facets of the mesh: .. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py :language: python3 @@ -105,20 +107,165 @@ of the mesh: interpolate into spaces defined by higher-continuity elements such as Argyris and Hermite. -Interpolation across meshes ---------------------------- -The interpolation API supports interpolation between meshes where the target -function space has finite elements (as given in the list of -:ref:`supported elements `) +Semantics of symbolic interpolation +----------------------------------- + +Let :math:`U` and :math:`V` be finite element spaces with DoFs :math:`\{\psi^{*}_{i}\}` and :math:`\{\phi^{*}_{i}\}` +and basis functions :math:`\{\psi_{i}\}` and :math:`\{\phi_{i}\}`, respectively. +The interpolation operator between :math:`U` and :math:`V` is defined + +.. math:: + + \mathcal{I}_{V} : U &\to V \\ \mathcal{I}_{V}(u)(x) &= \phi^{*}_{i}(u)\phi_{i}(x). + +We define the following bilinear form + +.. math:: + + I : U \times V^{*} &\to \mathbb{R} \\ I(u, v^*) &= v^{*}(u) + +where :math:`v^{*}\in V^{*}` is a linear functional in the dual space to :math:`V`, extended so that +it can act on functions in :math:`U`. If we choose :math:`v^{*} = \phi^{*}_{i}` then +:math:`I(u, \phi^{*}_{i}) = \phi^{*}_{i}(u)` gives the coefficients of the interpolation of :math:`u` into :math:`V`. +This allows us to represent the interpolation as a form in UFL_. This is exactly the +:py:class:`~ufl.Interpolate` UFL_ object. Note that this differs from typical bilinear forms since one of the +arguments is in a dual space. For more information on dual spaces in Firedrake, +see :ref:`the relevant section of the manual `. + +Interpolation operators +~~~~~~~~~~~~~~~~~~~~~~~ + +2-forms are assembled into matrices, and we can do the same with the interpolation form. +If we let :math:`u` be a ``TrialFunction(U)`` (i.e. an argument in slot 1) and :math:`v^*` be a +``TestFunction(V.dual())`` (i.e. a :py:class:`~ufl.Coargument` in slot 0) then -* **Lagrange/CG** (also known a Continuous Galerkin or P elements), -* **Q** (i.e. Lagrange/CG on lines, quadrilaterals and hexahedra), -* **Discontinuous Lagrange/DG** (also known as Discontinuous Galerkin or DP elements) and -* **DQ** (i.e. Discontinuous Lagrange/DG on lines, quadrilaterals and hexahedra). +.. math:: + + I(u, v^*) = I(\psi_{j},\phi_{i}^*)=\phi_{i}^*(\psi_{j})=:A_{ij} + +The matrix :math:`A` is the interpolation matrix from :math:`U` to :math:`V`. In Firedrake, we can +assemble this matrix by doing + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 11] + :end-before: [test_interpolate_operator 12] -Vector, tensor and mixed function spaces can also be interpolated into from -other meshes as long as they are constructed from these spaces. +Passing a :py:class:`~.FunctionSpace` into the dual slot of :py:func:`~.interpolate` is +syntactic sugar for ``TestFunction(V.dual())``. + +If :math:`g\in U` is a :py:class:`~.Function`, then we can write it as :math:`g = g_j \psi_j` for +some coefficients :math:`g_j`. Interpolating :math:`g` into :math:`V` gives + +.. math:: + + I(g, v^*) = \phi^{*}_{i}(g_j \psi_j)= A_{ij} g_j, + +so we can multiply the vector of coefficients of :math:`g` by the interpolation matrix to obtain the +coefficients of the interpolated function. In Firedrake, we can do this by + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 12] + :end-before: [test_interpolate_operator 13] + +:math:`h` is a :py:class:`~.Function` in :math:`V` representing the interpolation of :math:`g` into :math:`V`. + +.. note:: + + When interpolating a :py:class:`~.Function` directly, for example + + .. code-block:: python3 + + assemble(interpolate(Function(U), V)) + + Firedrake does not explicitly assemble the interpolation matrix. Instead, the interpolation + is performed matrix-free. + +Adjoint interpolation +~~~~~~~~~~~~~~~~~~~~~ +The adjoint of the interpolation operator is defined as + +.. math:: + + \mathcal{I}_{V}^{*} : V^{*} \to U^{*}. + +This operator interpolates :py:class:`~.Cofunction`\s in the dual space :math:`V^{*}` into +the dual space :math:`U^{*}`. The associated form is + +.. math:: + + I^{*} : V^{*} \times U \to \mathbb{R}. + +So to obtain the adjoint interpolation operator, we swap the arguments of the :py:class:`~ufl.Interpolate` +form. In Firedrake, we can accomplish this in two ways. The first is to swap the argument numbers to the form: + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 14] + :end-before: [test_interpolate_operator 15] + +The second way is to use UFL_'s :py:func:`~ufl.adjoint` operator, which takes a form and returns its adjoint: + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 15] + :end-before: [test_interpolate_operator 16] + +If :math:`g^*` is a :py:class:`~.Cofunction` in :math:`V^{*}` then we can interpolate it into :math:`U^{*}` by doing + +.. math:: + + I^{*}(g^*, u) = g^*_i \phi_i^*(\psi_j) = g^*_i A_{ij}. + +This is the product of the adjoint interpolation matrix :math:`A^{*}` and the coefficients of :math:`g^*`. +In Firedrake, we can do this by + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 16] + :end-before: [test_interpolate_operator 17] + +Again, Firedrake does not explicitly assemble the adjoint interpolation matrix, but performs the +interpolation matrix-free. To perform the interpolation with the assembled adjoint interpolation operator, +we can take the :py:func:`~ufl.action` of the operator on the :py:class:`~.Cofunction`: + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 17] + :end-before: [test_interpolate_operator 18] + +The final case is when we interpolate a :py:class:`~.Function` into :py:class:`~.Cofunction`: + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_interpolate_operator 19] + :end-before: [test_interpolate_operator 20] + +This interpolation has zero arguments and hence is assembled into a number. Mathematically, we have + +.. math:: + + I^{*}(g^*, u) = g^*_i \phi_i^*(u_{j}\psi_j) = g^*_i A_{ij} u_j. + +which indeed contracts into a number. + +Interpolation across meshes +--------------------------- + +The interpolation API supports interpolation across meshes where the target +function space has any finite element which supports interpolation, as specified in the list of +:ref:`supported elements `. Vector, tensor, and mixed function +spaces can also be interpolated into from other meshes as long as they are +constructed from these spaces. .. note:: @@ -142,7 +289,7 @@ of the source mesh. Volume, surface and line integrals can therefore be calculated by interpolating onto the mesh or :ref:`immersed manifold ` which defines the volume, surface or line of interest in the domain. The integral itself is calculated -by calling :py:func:`~.assemble` on an approriate form over the target mesh +by calling :py:func:`~.assemble` on an appropriate form over the target mesh function space: .. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py @@ -178,7 +325,7 @@ interpolation will raise a :py:class:`~.DofNotDefinedError`. :start-after: [test_cross_mesh 3] :end-before: [test_cross_mesh 4] -This can be overriden with the optional ``allow_missing_dofs`` keyword +This can be overridden with the optional ``allow_missing_dofs`` keyword argument: .. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py @@ -248,7 +395,7 @@ Interpolation from external data Unfortunately, UFL interpolation is not applicable if some of the source data is not yet available as a Firedrake :py:class:`~.Function` -or UFL expression. Here we describe a recipe for moving external to +or UFL expression. Here we describe a recipe for moving external data to Firedrake fields. Let us assume that there is some function ``mydata(X)`` which takes as @@ -277,6 +424,63 @@ the next operation using ``f``. For interaction with external point data, see the :ref:`corresponding manual section `. +Interpolation between mixed function spaces +------------------------------------------- + +Assembly of interpolation operators between mixed function spaces is also supported. +Each component of the mixed space may be on different meshes. +For example, consider the following mixed finite element spaces: + +.. math:: + + W &= V_1 \times V_2 \\ + U &= V_3 \times V_4 + +where each :math:`V_i` is a finite element space defined on possibly different meshes. +We can assemble the interpolation matrix from :math:`U` to :math:`W` in Firedrake as follows: + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_mixed_space_interpolation 1] + :end-before: [test_mixed_space_interpolation 2] + +We specified ``mat_type="nest"`` here to obtain a PETSc MatNest matrix, but Firedrake also +supports assembly of ``mat_type="aij"`` and ``mat_type="matfree"`` interpolation matrices +between mixed function spaces. In this example ``I`` is a block diagonal matrix, with +each block given by + +.. math:: + + \begin{pmatrix} + V_3 \rightarrow V_1 & 0 \\ + 0 & V_4 \rightarrow V_2 + \end{pmatrix} + +The off-diagonal blocks are zero since the dofs are applied component-wise. Firedrake's form +compiler recognises this and avoids assembling the zero blocks. + +We can assemble more general interpolation matrices between mixed function spaces by interpolating +vector expressions with arguments. For example, by doing + +.. literalinclude:: ../../tests/firedrake/regression/test_interpolation_manual.py + :language: python3 + :dedent: + :start-after: [test_mixed_space_interpolation 3] + :end-before: [test_mixed_space_interpolation 4] + +we can assemble the interpolation matrix with block structure + +.. math:: + + \begin{pmatrix} + V_3 \rightarrow V_1 & V_4 \rightarrow V_1 \\ + V_3 \rightarrow V_2 & V_4 \rightarrow V_2 + \end{pmatrix} + +Here we obtain non-zero off-diagonal blocks by including both components of the trial function +in each component of the expression. + Generating Functions with randomised values ------------------------------------------- diff --git a/docs/source/intro_tut.rst b/docs/source/intro_tut.rst index 6f31f420b1..93b3d3afeb 100644 --- a/docs/source/intro_tut.rst +++ b/docs/source/intro_tut.rst @@ -14,5 +14,6 @@ Introductory Tutorials A mixed formulation of the Poisson equation. A time-dependent DG advection equation using upwinding. An extruded mesh example, using a steady-state continuity equation. + A linear shallow water equations example using a Strang timestepping scheme. A linear wave equation with optional mass lumping. Creating Firedrake-compatible meshes in Gmsh. diff --git a/docs/source/mesh-coordinates.rst b/docs/source/mesh-coordinates.rst index 7ae34cd7bb..450ed8be12 100644 --- a/docs/source/mesh-coordinates.rst +++ b/docs/source/mesh-coordinates.rst @@ -118,7 +118,7 @@ functions over to the new mesh. To move `f` over to ``mesh``, use: .. code-block:: python3 - g = Function(functionspaceimpl.WithGeometry.create(f.function_space(), mesh), + g = Function(functionspaceimpl.WithGeometry(f.function_space(), mesh), val=f.topological) This creates a :py:class:`~.Function` `g` which shares data with `f`, diff --git a/docs/source/parallelism.rst b/docs/source/parallelism.rst index f3219c087c..bc2528eba4 100644 --- a/docs/source/parallelism.rst +++ b/docs/source/parallelism.rst @@ -96,9 +96,5 @@ different simulations on the two halves we would write. To access the communicator a mesh was created on, we can use the ``mesh.comm`` property, or the function ``mesh.mpi_comm``. -.. warning:: - Do not use the internal ``mesh._comm`` attribute for communication. - This communicator is for internal Firedrake MPI communication only. - .. _MPI: http://mpi-forum.org/ .. _STREAMS: http://www.cs.virginia.edu/stream/ diff --git a/docs/source/petsc-interface.rst b/docs/source/petsc-interface.rst index 45a874a51e..b5add34a8d 100644 --- a/docs/source/petsc-interface.rst +++ b/docs/source/petsc-interface.rst @@ -85,7 +85,7 @@ SciPy in the following way: import scipy.sparse as sp indptr, indices, data = petsc_mat.getValuesCSR() - scipy_mat = sp.csr_matrix((data, indices, indptr), shape=petsc_mat.getSize()) + scipy_mat = sp.csr_array((data, indices, indptr), shape=petsc_mat.getSize()) The sparsity pattern may then be straightforwardly plotted using matplotlib: @@ -375,8 +375,8 @@ Accordingly, set .. code-block:: python3 - dim = mesh.topological_dimension() - gdim = mesh.geometrical_dimension() + dim = mesh.topological_dimension + gdim = mesh.geometric_dimension entity_dofs = np.zeros(dim+1, dtype=np.int32) entity_dofs[0] = gdim entity_dofs[1] = gdim*(p-1) diff --git a/docs/source/quadrature.rst b/docs/source/quadrature.rst index 02f0bb7ec6..c65dcddf2a 100644 --- a/docs/source/quadrature.rst +++ b/docs/source/quadrature.rst @@ -15,9 +15,9 @@ this estimate might be quite large, and a warning like this one will be raised: tsfc:WARNING Estimated quadrature degree 13 more than tenfold greater than any argument/coefficient degree (max 1) -For integrals with very complicated nonlinearities, the estimated quadrature -degree might be in the hundreds or thousands, rendering the integration -prohibitively expensive, or leading to segfaults. +For integrals with very complicated or non-polynomial nonlinearities, the +estimated quadrature degree might be in the hundreds or thousands, rendering +the integration prohibitively expensive, or leading to segfaults. Specifying the quadrature rule in the variational formulation ------------------------------------------------------------- @@ -30,11 +30,28 @@ quadrature degree can be prescribed on each integral :py:class:`~ufl.measure.Mea inner(sin(u)**4, v) * dx(degree=4) Setting ``degree=4`` means that the quadrature rule will be exact only for integrands -of total polynomial degree up to 4. This, of course, will introduce a greater numerical error than the default. +of total polynomial degree up to 4. This, of course, will introduce a greater numerical +error than the default. -For integrals that do not specify a quadrature degree, the default may be keyed as -``"quadrature_degree"`` in the ``form_compiler_parameters`` dictionary passed on to -:py:func:`~.solve`, :py:func:`~.project`, or :py:class:`~.NonlinearVariationalProblem`. +Rather than enforcing a fixed quadrature degree, it is also possible to set +a maximum allowable degree. This value will only be used if UFL estimates a larger +degree. +The maximum allowable degree can be set for a particular integral by +adding the ``"max_quadrature_degree"`` entry to the ``metadata`` of the ``Measure``: + +.. code-block:: python3 + + inner(sin(u)**4, v) * dx(metadata={"max_quadrature_degree": 4}) + +The ``metadata`` can also be used to set a fixed quadrature degree by adding +a ``"quadrature_degree"`` entry to the dictionary. However, because setting a +fixed degree is quite common the ``degree`` keyword argument shown above is +provided as a convenient shorthand. + +For integrals that do not specify a fixed or maximum quadrature degree, a default value +may be keyed as the ``"quadrature_degree"`` or ``"max_quadrature_degree"`` entry +respectively in the ``form_compiler_parameters`` dictionary passed on to :py:func:`~.solve`, +:py:func:`~.project`, :py:class:`~.NonlinearVariationalProblem`, or :py:func:`~.assemble`. .. code-block:: python3 @@ -42,9 +59,18 @@ For integrals that do not specify a quadrature degree, the default may be keyed solve(F == 0, u, form_compiler_parameters={"quadrature_degree": 4}) -In the example above, only the integrals with unspecified quadrature degree -will be computed on a quadrature rule that exactly integrates polynomials of -the degree set in ``form_compiler_parameters``. + assemble(F, form_compiler_parameters={"max_quadrature_degree": 4}) + +In the example above: + +* In the ``solve`` call, the integrals with unspecified quadrature degree + will be computed on a quadrature rule that exactly integrates polynomials of + the degree set in ``form_compiler_parameters``. + +* In the ``assemble`` call, the integrals with unspecified quadrature degree will + be computed with the ``"max_quadrature_degree"`` set in the ``"form_compiler_parameters"`` + if the estimated quadrature degree is greater than 4, otherwise they will be computed + with the estimated quadrature degree. Another way to specify the quadrature rule is through the ``scheme`` keyword. This could be either a :py:class:`~finat.quadrature.QuadratureRule`, or a string. Supported string values diff --git a/firedrake/__init__.py b/firedrake/__init__.py index 70dec1d6ca..3cab34998a 100644 --- a/firedrake/__init__.py +++ b/firedrake/__init__.py @@ -3,7 +3,7 @@ # the specific version, here we are more permissive. This is to catch the # case where users don't update their PETSc for a really long time or # accidentally install a too-new release that isn't yet supported. -PETSC_SUPPORTED_VERSIONS = ">=3.23.0" +PETSC_SUPPORTED_VERSIONS = ">=3.25.0" def init_petsc(): @@ -45,8 +45,8 @@ def init_petsc(): del atexit del petsc -from ufl import * -from finat.ufl import * +from ufl import * # noqa: F401 +from finat.ufl import * # noqa: F401 from pyop2 import op2 # noqa: F401 from pyop2.mpi import COMM_WORLD, COMM_SELF # noqa: F401 @@ -56,54 +56,129 @@ def init_petsc(): petsctools.cite("FiredrakeUserManual") del petsctools -from firedrake.assemble import * -from firedrake.bcs import * -from firedrake.checkpointing import * -from firedrake.cofunction import * -from firedrake.constant import * -from firedrake.deflation import * -from firedrake.exceptions import * -from firedrake.function import * -from firedrake.functionspace import * -from firedrake.interpolation import * -from firedrake.linear_solver import * -from firedrake.preconditioners import * -from firedrake.mesh import * -from firedrake.mg.mesh import * -from firedrake.mg.interface import * -from firedrake.mg.embedded import * -from firedrake.mg.opencascade_mh import * -from firedrake.norms import * -from firedrake.nullspace import * -from firedrake.parameters import * -from firedrake.parloops import * -from firedrake.projection import * -from firedrake.slate import * -from firedrake.slope_limiter import * -from firedrake.solving import * -from firedrake.ufl_expr import * -from firedrake.utility_meshes import * -from firedrake.variational_solver import * -from firedrake.eigensolver import * -from firedrake.ensemble import * -from firedrake.randomfunctiongen import * -from firedrake.external_operators import * +from firedrake.petsc import PETSc # noqa: F401 +from firedrake.assemble import assemble # noqa: F401 +from firedrake.bcs import DirichletBC, homogenize, EquationBC # noqa: F401 +from firedrake.checkpointing import ( # noqa: F401 + DumbCheckpoint, HDF5File, FILE_READ, FILE_CREATE, + FILE_UPDATE, CheckpointFile +) +from firedrake.cofunction import Cofunction, RieszMap # noqa: F401 +from firedrake.constant import Constant # noqa: F401 +from firedrake.deflation import DeflatedSNES, Deflation # noqa: F401 +from firedrake.exceptions import ( # noqa: F401 + FiredrakeException, ConvergenceError, MismatchingDomainError, + VertexOnlyMeshMissingPointsError, DofNotDefinedError, DofTypeError, +) +from firedrake.function import ( # noqa: F401 + Function, PointNotInDomainError, + CoordinatelessFunction, PointEvaluator +) +from firedrake.functionspace import ( # noqa: F401 + MixedFunctionSpace, FunctionSpace, VectorFunctionSpace, + TensorFunctionSpace, RestrictedFunctionSpace +) +from firedrake.interpolation import ( # noqa: F401 + interpolate, Interpolate, get_interpolator +) +from firedrake.linear_solver import LinearSolver # noqa: F401 +from firedrake.preconditioners import ( # noqa: F401 + PCBase, SNESBase, PCSNESBase, ASMPatchPC, ASMStarPC, ASMVankaPC, + ASMLinesmoothPC, ASMExtrudedStarPC, AssembledPC, AuxiliaryOperatorPC, + MassInvPC, PCDPC, PatchPC, PlaneSmoother, PatchSNES, P1PC, P1SNES, + LORPC, GTMGPC, PMGPC, PMGSNES, HypreAMS, HypreADS, FDMPC, + PoissonFDMPC, TwoLevelPC, HiptmairPC, FacetSplitPC, BDDCPC, + CovariancePC +) +from firedrake.mesh import ( # noqa: F401 + Mesh, ExtrudedMesh, VertexOnlyMesh, RelabeledMesh, + SubDomainData, UNMARKED, DistributedMeshOverlapType, + DEFAULT_MESH_NAME, MeshGeometry, MeshTopology, + AbstractMeshTopology, ExtrudedMeshTopology, Submesh, + VertexOnlyMeshTopology, MeshSequenceGeometry, MeshSequenceTopology +) +from firedrake.mg import ( # noqa: F401 + HierarchyBase, MeshHierarchy, ExtrudedMeshHierarchy, + NonNestedHierarchy, SemiCoarsenedExtrudedHierarchy, + prolong, restrict, inject, TransferManager, + OpenCascadeMeshHierarchy, AdaptiveMeshHierarchy, + AdaptiveTransferManager +) +from firedrake.norms import errornorm, norm # noqa: F401 +from firedrake.nullspace import VectorSpaceBasis, MixedVectorSpaceBasis # noqa: F401 +from firedrake.output import VTKFile # noqa: F401 +from firedrake.parameters import ( # noqa: F401 + Parameters, parameters, disable_performance_optimisations +) +from firedrake.parloops import ( # noqa: F401 + par_loop, direct, READ, WRITE, RW, INC, MIN, MAX +) +from firedrake.projection import ( # noqa: F401 + project, Projector +) +from firedrake.slate import ( # noqa: F401 + AssembledVector, Block, Factorization, Tensor, Inverse, + Transpose, Negative, Add, Mul, Solve, BlockAssembledVector, + DiagonalTensor, Reciprocal, HybridizationPC, SchurComplementBuilder, + SCPC, TensorOp +) +from firedrake.slope_limiter import ( # noqa: F401 + Limiter, VertexBasedLimiter +) +from firedrake.solving import solve # noqa: F401 +from firedrake.ufl_expr import ( # noqa: F401 + Argument, Coargument, TestFunction, TrialFunction, + TestFunctions, TrialFunctions, derivative, adjoint, + action, CellSize, FacetNormal +) +from firedrake.utility_meshes import ( # noqa: F401 + IntervalMesh, UnitIntervalMesh, PeriodicIntervalMesh, + PeriodicUnitIntervalMesh, UnitTriangleMesh, RectangleMesh, + TensorRectangleMesh, SquareMesh, UnitSquareMesh, PeriodicRectangleMesh, + PeriodicSquareMesh, PeriodicUnitSquareMesh, CircleManifoldMesh, + UnitDiskMesh, UnitBallMesh, UnitTetrahedronMesh, TensorBoxMesh, + BoxMesh, CubeMesh, UnitCubeMesh, PeriodicBoxMesh, PeriodicUnitCubeMesh, + IcosahedralSphereMesh, UnitIcosahedralSphereMesh, OctahedralSphereMesh, + UnitOctahedralSphereMesh, CubedSphereMesh, UnitCubedSphereMesh, + TorusMesh, AnnulusMesh, SolidTorusMesh, CylinderMesh +) +from firedrake.variational_solver import ( # noqa: F401 + LinearVariationalProblem, LinearVariationalSolver, + NonlinearVariationalProblem, NonlinearVariationalSolver +) +from firedrake.eigensolver import ( # noqa: F401 + LinearEigenproblem, LinearEigensolver +) +from firedrake.ensemble import ( # noqa: F401 + Ensemble, EnsembleFunction, EnsembleCofunction, + EnsembleFunctionSpace, EnsembleDualSpace, EnsembleBJacobiPC, + EnsembleBlockDiagonalMat +) +from firedrake.randomfunctiongen import * # noqa: F401 +from firedrake.external_operators import ( # noqa: F401 + AbstractExternalOperator, assemble_method, + PointexprOperator, point_expr, MLOperator +) from firedrake.progress_bar import ProgressBar # noqa: F401 -from firedrake.logging import * +from firedrake.logging import ( # noqa: F401 + set_level, set_log_handlers, set_log_level, DEBUG, INFO, + WARNING, ERROR, CRITICAL, log, debug, info, warning, error, + critical, info_red, info_green, info_blue, RED, GREEN, BLUE +) +from firedrake.matrix import ( # noqa: F401 + MatrixBase, Matrix, ImplicitMatrix, AssembledMatrix +) + # Set default log level set_log_level(WARNING) set_log_handlers(comm=COMM_WORLD) # Moved functionality -from firedrake._deprecation import plot, File # noqa: F401 -# Once `File` is deprecated update the above line removing `File` and add -# from firedrake._deprecation import output -# sys.modules["firedrake.output"] = output -from firedrake.output import * +from firedrake._deprecation import plot # noqa: F401 import sys sys.modules["firedrake.plot"] = plot -from firedrake.plot import * +from firedrake.plot import * # noqa: F401 del sys diff --git a/firedrake/_deprecation.py b/firedrake/_deprecation.py index 6e0c6c6e5f..f5cad3d0fd 100644 --- a/firedrake/_deprecation.py +++ b/firedrake/_deprecation.py @@ -2,8 +2,6 @@ """ import importlib -from warnings import warn - class _fake_module: """ Object which behaves like a module @@ -47,28 +45,6 @@ def __call__(*args, **kwargs): return __call__ -# Deprecate output.File in the global namespace -output = _fake_module( - "firedrake.output", - ["File", ], - ["VTKFile", ] -) - - -# I hate it -def File(*args, **kwargs): - """Deprecated File constructor. - - Use `VTKFile` from `firedrake.output` instead - """ - from .output import VTKFile - warn( - "The use of `File` for output is deprecated, please update your " - "code to use `VTKFile` from `firedrake.output`." - ) - return VTKFile(*args, **kwargs) - - # Deprecate plotting in the global namespace plot = _fake_module( "firedrake.pyplot", diff --git a/firedrake/adjoint/__init__.py b/firedrake/adjoint/__init__.py index 071c190912..fcb6e55b54 100644 --- a/firedrake/adjoint/__init__.py +++ b/firedrake/adjoint/__init__.py @@ -10,41 +10,50 @@ import pyadjoint __version__ = pyadjoint.__version__ -from pyadjoint.tape import Tape, set_working_tape, get_working_tape, \ - pause_annotation, continue_annotation, \ - stop_annotating, annotate_tape # noqa F401 +from pyadjoint.tape import ( # noqa: F401 + Tape, set_working_tape, get_working_tape, pause_annotation, + continue_annotation, stop_annotating, annotate_tape +) from pyadjoint.reduced_functional import ReducedFunctional # noqa F401 from pyadjoint.checkpointing import disk_checkpointing_callback # noqa F401 -from firedrake.adjoint_utils.checkpointing import \ - enable_disk_checkpointing, pause_disk_checkpointing, \ - continue_disk_checkpointing, stop_disk_checkpointing, \ - checkpointable_mesh # noqa F401 +from firedrake.adjoint_utils.checkpointing import ( # noqa: F401 + enable_disk_checkpointing, pause_disk_checkpointing, + continue_disk_checkpointing, stop_disk_checkpointing, + checkpointable_mesh +) from firedrake.adjoint_utils import get_solve_blocks # noqa F401 - from pyadjoint.verification import taylor_test, taylor_to_dict # noqa F401 from pyadjoint.drivers import compute_gradient, compute_derivative, compute_hessian # noqa F401 from pyadjoint.adjfloat import AdjFloat # noqa F401 from pyadjoint.control import Control # noqa F401 -from pyadjoint import IPOPTSolver, ROLSolver, MinimizationProblem, \ - InequalityConstraint, minimize # noqa F401 - -from firedrake.adjoint.ufl_constraints import UFLInequalityConstraint, \ - UFLEqualityConstraint # noqa F401 +from pyadjoint import ( # noqa: F401 + IPOPTSolver, ROLSolver, MinimizationProblem, InequalityConstraint, minimize +) +from firedrake.adjoint.ufl_constraints import ( # noqa: F401 + UFLInequalityConstraint, UFLEqualityConstraint +) from firedrake.adjoint.ensemble_reduced_functional import EnsembleReducedFunctional # noqa F401 +from firedrake.adjoint.transformed_functional import L2RieszMap, L2TransformedFunctional # noqa: F401 +from firedrake.adjoint.covariance_operator import ( # noqa F401 + WhiteNoiseGenerator, AutoregressiveCovariance, CovarianceMat, + PyOP2NoiseBackend, PetscNoiseBackend, VOMNoiseBackend, MixedCovarianceOperator) import numpy_adjoint # noqa F401 import firedrake.ufl_expr import types import sys - # Work around the name clash of firedrake.adjoint vs ufl.adjoint. # This will eventually become cleaner once we can rely on users having # Python 3.12 (see PEP 713). + + class _AdjointModule(types.ModuleType): def __call__(self, form): return firedrake.ufl_expr.adjoint(form) sys.modules[__name__].__class__ = _AdjointModule +del sys +del types set_working_tape(Tape()) diff --git a/firedrake/adjoint/covariance_operator.py b/firedrake/adjoint/covariance_operator.py new file mode 100644 index 0000000000..3c1b58a479 --- /dev/null +++ b/firedrake/adjoint/covariance_operator.py @@ -0,0 +1,1164 @@ +import abc +from enum import Enum +from functools import cached_property +from typing import Iterable +from textwrap import dedent +from scipy.special import factorial +import petsctools +from loopy import generate_code_v2 +from pyop2 import op2 +from firedrake.tsfc_interface import compile_form +from firedrake.adjoint.transformed_functional import L2Cholesky +from firedrake.functionspaceimpl import WithGeometry +from firedrake.bcs import BCBase +from firedrake import ( + grad, inner, avg, action, outer, + assemble, CellSize, FacetNormal, + dx, dS, sqrt, Constant, + Function, Cofunction, RieszMap, + TrialFunction, TestFunction, + RandomGenerator, PCG64, + LinearVariationalProblem, + LinearVariationalSolver, + VertexOnlyMeshTopology, + PETSc +) + + +class NoiseBackendBase: + r""" + A base class for implementations of a mass matrix square root action + for generating white noise samples. + + Inheriting classes implement the method from [Croci et al 2018](https://epubs.siam.org/doi/10.1137/18M1175239) + + Generating the samples on the function space :math:`V` requires the following steps: + + 1. On each element generate a white noise sample :math:`z_{e}\sim\mathcal{N}(0, I)` + over all DoFs in the element. Equivalantly, generate the sample on the + discontinuous superspace :math:`V_{d}^{*}` containing :math:`V^{*}`. + (i.e. ``Vd.ufl_element() = BrokenElement(V.ufl_element``). + + 2. Apply the Cholesky factor :math:`C_{e}` of the element-wise mass matrix :math:`M_{e}` + to the element-wise sample (:math:`M_{e}=C_{e}C_{e}^{T}`). + + 3. Assemble the element-wise samples :math:`z_{e}\in V_{d}^{*}` into the global + sample vector :math:`z\in V^{*}`. If :math:`L` is the interpolation operator + then :math:`z=Lz_{e}=LC_{e}z_{e}`. + + 4. Optionally apply a Riesz map to :math:`z` to return a sample in :math:`V`. + + Parameters + ---------- + V : + The :func:`~.firedrake.functionspace.FunctionSpace` to generate the samples in. + rng : + The :mod:`RandomGenerator ` to generate the samples + on the discontinuous superspace. + seed : + Seed for the :mod:`RandomGenerator `. + Ignored if ``rng`` is given. + + See Also + -------- + PyOP2NoiseBackend + PetscNoiseBackend + VOMNoiseBackend + WhiteNoiseGenerator + """ + + def __init__(self, V: WithGeometry, rng=None, + seed: int | None = None): + self._V = V + self._Vb = V.broken_space() + self._rng = rng or RandomGenerator(PCG64(seed=seed, comm=V.comm)) + + @abc.abstractmethod + def sample(self, *, rng=None, + tensor: Function | Cofunction | None = None, + apply_riesz: bool = False): + """ + Generate a white noise sample. + + Parameters + ---------- + rng : + A :mod:`RandomGenerator ` to use for + sampling IID vectors. If ``None`` then ``self.rng`` is used. + + tensor : + Optional location to place the result into. + + apply_riesz : + Whether to apply the L2 Riesz map to return a sample in :math:`V`. + + Returns + ------- + Function | Cofunction : + The white noise sample in :math:`V` + """ + raise NotImplementedError + + @property + def broken_space(self): + """ + The discontinuous superspace containing :math:`V`, ``self.function_space``. + """ + return self._Vb + + @property + def function_space(self): + """The function space that the noise will be generated on. + """ + return self._V + + @property + def rng(self): + """The :mod:`RandomGenerator ` to generate the + IID sample on the broken function space. + """ + return self._rng + + @cached_property + def riesz_map(self): + """A :class:`~firedrake.cofunction.RieszMap` to cache the solver + for :meth:`~firedrake.cofunction.Cofunction.riesz_representation`. + """ + return RieszMap(self.function_space, "L2", constant_jacobian=True) + + +class PyOP2NoiseBackend(NoiseBackendBase): + """ + A PyOP2 based implementation of a mass matrix square root + for generating white noise. + + See Also + -------- + NoiseBackendBase + WhiteNoiseGenerator + """ + def __init__(self, V: WithGeometry, rng=None, + seed: int | None = None): + super().__init__(V, rng=rng, seed=seed) + + u = TrialFunction(V) + v = TestFunction(V) + mass = inner(u, v)*dx + + # Create mass expression, assemble and extract kernel + mass_ker, *stuff = compile_form(mass, "mass") + mass_code = generate_code_v2(mass_ker.kinfo.kernel.code).device_code() + mass_code = mass_code.replace( + "void " + mass_ker.kinfo.kernel.name, + "static void " + mass_ker.kinfo.kernel.name) + + # Add custom code for doing Cholesky + # decomposition and applying to broken vector + name = mass_ker.kinfo.kernel.name + blocksize = mass_ker.kinfo.kernel.code[name].args[0].shape[0] + + cholesky_code = dedent( + f"""\ + extern void dpotrf_(char *UPLO, + int *N, + double *A, + int *LDA, + int *INFO); + + extern void dgemv_(char *TRANS, + int *M, + int *N, + double *ALPHA, + double *A, + int *LDA, + double *X, + int *INCX, + double *BETA, + double *Y, + int *INCY); + + {mass_code} + + void apply_cholesky(double *__restrict__ z, + double *__restrict__ b, + double const *__restrict__ coords) + {{ + char uplo[1]; + int32_t N = {blocksize}, LDA = {blocksize}, INFO = 0; + int32_t i=0, j=0; + uplo[0] = 'u'; + double H[{blocksize}*{blocksize}] = {{{{ 0.0 }}}}; + + char trans[1]; + int32_t stride = 1; + double scale = 1.0; + double zero = 0.0; + + {mass_ker.kinfo.kernel.name}(H, coords); + + uplo[0] = 'u'; + dpotrf_(uplo, &N, H, &LDA, &INFO); + for (int i = 0; i < N; i++) + for (int j = 0; j < N; j++) + if (j>i) + H[i*N + j] = 0.0; + + trans[0] = 'T'; + dgemv_(trans, &N, &N, &scale, H, &LDA, z, &stride, &zero, b, &stride); + }} + """ + ) + + # Get the BLAS and LAPACK compiler parameters to compile the kernel + comm = V.mesh().comm + if comm.rank == 0: + petsc_variables = petsctools.get_petscvariables() + BLASLAPACK_LIB = petsc_variables.get("BLASLAPACK_LIB", "") + BLASLAPACK_LIB = comm.bcast(BLASLAPACK_LIB, root=0) + BLASLAPACK_INCLUDE = petsc_variables.get("BLASLAPACK_INCLUDE", "") + BLASLAPACK_INCLUDE = comm.bcast(BLASLAPACK_INCLUDE, root=0) + else: + BLASLAPACK_LIB = comm.bcast(None, root=0) + BLASLAPACK_INCLUDE = comm.bcast(None, root=0) + + self.cholesky_kernel = op2.Kernel( + cholesky_code, "apply_cholesky", + include_dirs=BLASLAPACK_INCLUDE.split(), + ldargs=BLASLAPACK_LIB.split()) + + def sample(self, *, rng=None, + tensor: Function | Cofunction | None = None, + apply_riesz: bool = False): + rng = rng or self.rng + + z = rng.standard_normal(self.broken_space) + b = Cofunction(self.function_space.dual()) + + z_arg = z.dat(op2.READ, self.broken_space.cell_node_map()) + b_arg = b.dat(op2.INC, self.function_space.cell_node_map()) + + mesh = self.function_space.mesh() + coords = mesh.coordinates + c_arg = coords.dat(op2.READ, coords.cell_node_map()) + + op2.par_loop( + self.cholesky_kernel, + mesh.cell_set, + z_arg, b_arg, c_arg + ) + + if apply_riesz: + b = b.riesz_representation(self.riesz_map) + + if tensor: + tensor.assign(b) + else: + tensor = b + + return tensor + + +class PetscNoiseBackend(NoiseBackendBase): + """ + A PETSc based implementation of a mass matrix square root action + for generating white noise. + + See Also + -------- + NoiseBackendBase + WhiteNoiseGenerator + """ + def __init__(self, V: WithGeometry, rng=None, + seed: int | None = None): + super().__init__(V, rng=rng, seed=seed) + self.cholesky = L2Cholesky(self.broken_space) + self._zb = Function(self.broken_space) + self.M = inner(self._zb, TestFunction(self.broken_space))*dx + + def sample(self, *, rng=None, + tensor: Function | Cofunction | None = None, + apply_riesz: bool = False): + V = self.function_space + rng = rng or self.rng + + # z + z = rng.standard_normal(self.broken_space) + # C z + self._zb.assign(self.cholesky.C_T_inv_action(z)) + Cz = assemble(self.M) + # L C z + b = Cofunction(V.dual()).interpolate(Cz) + + if apply_riesz: + b = b.riesz_representation(self.riesz_map) + + if tensor: + tensor.assign(b) + else: + tensor = b + + return tensor + + +class VOMNoiseBackend(NoiseBackendBase): + """ + A mass matrix square root for generating white noise + on a vertex only mesh. + + Notes + ----- + Computationally this is a no-op because the mass matrix + on a vertex only mesh is the identity, but we need a + consistent interface with other white noise backends. + + See Also + -------- + NoiseBackendBase + WhiteNoiseGenerator + """ + def sample(self, *, rng=None, + tensor: Function | Cofunction | None = None, + apply_riesz: bool = False): + rng = rng or self.rng + + b = rng.standard_normal(self.function_space) + + if not apply_riesz: + b = b.riesz_representation(self.riesz_map) + + if tensor: + tensor.assign(b) + else: + tensor = b + + return tensor + + +class WhiteNoiseGenerator: + r"""Generate white noise samples. + + Generates samples :math:`w\in V^{*}` with + :math:`w\sim\mathcal{N}(0, M)`, where :math:`M` is + the mass matrix, or its Riesz representer in :math:`V`. + + Parameters + ---------- + V : + The :class:`~firedrake.functionspace.FunctionSpace` to construct a + white noise sample on. + backend : + The backend to calculate and apply the mass matrix square root. + rng : + Initialised random number generator to use for sampling IID vectors. + seed : + Seed for the :mod:`RandomGenerator `. + Ignored if ``rng`` is given. + + References + ---------- + Croci, M. and Giles, M. B and Rognes, M. E. and Farrell, P. E., 2018: + "Efficient White Noise Sampling and Coupling for Multilevel Monte Carlo + with Nonnested Meshes". SIAM/ASA J. Uncertainty Quantification, Vol. 6, + No. 4, pp. 1630--1655. + https://doi.org/10.1137/18M1175239 + + See Also + -------- + NoiseBackendBase + PyOP2NoiseBackend + PetscNoiseBackend + VOMNoiseBackend + CovarianceOperatorBase + """ + + def __init__(self, V: WithGeometry, + backend: NoiseBackendBase | None = None, + rng=None, seed: int | None = None): + + # Not all backends are valid for VOM. + if isinstance(V.mesh().topology, VertexOnlyMeshTopology): + backend = backend or VOMNoiseBackend(V, rng=rng, seed=seed) + if not isinstance(backend, VOMNoiseBackend): + raise ValueError( + f"Cannot use white noise backend {type(backend).__name__}" + " with a VertexOnlyMesh. Please use a VOMNoiseBackend.") + else: + backend = backend or PyOP2NoiseBackend(V, rng=rng, seed=seed) + + self.backend = backend + self.function_space = backend.function_space + self.rng = backend.rng + + petsctools.cite("Croci2018") + + def sample(self, *, rng=None, + tensor: Function | Cofunction | None = None, + apply_riesz: bool = False): + """ + Generate a white noise sample. + + Parameters + ---------- + rng : + A :mod:`RandomGenerator ` to use for + sampling IID vectors. If ``None`` then ``self.rng`` is used. + + tensor : + Optional location to place the result into. + + apply_riesz : + Whether to apply the L2 Riesz map to return a sample in :math:`V`. + + Returns + ------- + Function | Cofunction : + The white noise sample + """ + return self.backend.sample( + rng=rng, tensor=tensor, apply_riesz=apply_riesz) + + +# Auto-regressive function parameters + +def lengthscale_m(Lar: float, m: int): + """Daley-equivalent lengthscale of m-th order autoregressive function. + + Parameters + ---------- + Lar : + Target Daley correlation lengthscale. + m : + Order of autoregressive function. + + Returns + ------- + L : float + Lengthscale parameter for autoregressive function. + """ + return Lar/sqrt(2*m - 3) + + +def lambda_m(Lar: float, m: int): + """Normalisation factor for autoregressive function. + + Parameters + ---------- + Lar : + Target Daley correlation lengthscale. + m : + Order of autoregressive function. + + Returns + ------- + lambda : float + Normalisation coefficient for autoregressive correlation operator. + """ + L = lengthscale_m(Lar, m) + num = (2**(2*m - 1))*factorial(m - 1)**2 + den = factorial(2*m - 2) + return L*num/den + + +def kappa_m(Lar: float, m: int): + """Diffusion coefficient for autoregressive function. + + Parameters + ---------- + Lar : + Target Daley correlation lengthscale. + m : + Order of autoregressive function. + + Returns + ------- + kappa : float + Diffusion coefficient for autoregressive covariance operator. + """ + return lengthscale_m(Lar, m)**2 + + +class CovarianceOperatorBase: + r""" + Abstract base class for a covariance operator B where + + .. math:: + + B: V^{*} \to V \quad \text{and} \quad B^{-1}: V \to V^{*} + + The covariance operators can be used to: + + - calculate weighted norms :math:`\|x\|_{B^{-1}} = x^{T}B^{-1}x` + to account for uncertainty in optimisation methods. + + - generate samples from the normal distribution :math:`\mathcal{N}(0, B)` + using :math:`w = B^{1/2}z` where :math:`z\sim\mathcal{N}(0, I)`. + + Inheriting classes must implement the following methods: + + - ``rng`` + + - ``function_space`` + + Inheriting classes may implement the following methods (at least one + of this list must be implemented for this class to be useful): + + - ``sample`` + + - ``apply_inverse`` + + - ``apply_action`` + + They may optionally override ``norm`` to provide a more + efficient implementation. + + See Also + -------- + WhiteNoiseGenerator + AutoregressiveCovariance + CovarianceMatCtx + CovarianceMat + ~firedrake.preconditioners.covariance.CovariancePC + """ + + @abc.abstractmethod + def rng(self): + """:class:`~.WhiteNoiseGenerator` for generating samples. + """ + raise NotImplementedError + + @abc.abstractmethod + def function_space(self): + """The function space V that the covariance operator maps to. + """ + raise NotImplementedError + + def sample(self, *, rng: WhiteNoiseGenerator | None = None, + tensor: Function | None = None): + r""" + Sample from :math:`\mathcal{N}(0, B)` by correlating a + white noise sample: :math:`w = B^{1/2}z`. + + Parameters + ---------- + rng : + Generator for the white noise sample. + If not provided then ``self.rng`` will be used. + tensor : + Optional location to place the result into. + + Returns + ------- + firedrake.function.Function : + The sample. + """ + raise NotImplementedError( + "Need to implementation for sampling w~N(0, B), for" + " example by calculating w=B^{1/2}z with z~N(0, I)") + + def norm(self, x: Function): + r"""Return the weighted norm :math:`\|x\|_{B^{-1}} = x^{T}B^{-1}x`. + + Default implementation uses ``apply_inverse`` to first calculate + the :class:`~firedrake.cofunction.Cofunction` :math:`y = B^{-1}x`, + then returns :math:`y(x)`. + + Inheriting classes may provide more efficient specialisations. + + Parameters + ---------- + x : + The :class:`~firedrake.function.Function` to take the norm of. + + Returns + ------- + pyadjoint.AdjFloat : + The norm of ``x``. + """ + return self.apply_inverse(x)(x) + + def apply_inverse(self, x: Function, *, + tensor: Cofunction | None = None): + r"""Return :math:`y = B^{-1}x` where B is the covariance operator. + :math:`B^{-1}: V \to V^{*}`. + + Parameters + ---------- + x : + The :class:`~firedrake.function.Function` to apply the inverse to. + tensor : + Optional location to place the result into. + + Returns + ------- + firedrake.cofunction.Cofunction : + The result of :math:`B^{-1}x` + """ + raise NotImplementedError( + "Inverse of B not implemented. You probably" + " also want to implement apply_action.") + + def apply_action(self, x: Cofunction, *, + tensor: Function | None = None): + r"""Return :math:`y = Bx` where B is the covariance operator. + :math:`B: V^{*} \to V`. + + Parameters + ---------- + x : + The :class:`~firedrake.cofunction.Cofunction` to apply + the action to. + tensor : + Optional location to place the result into. + + Returns + ------- + firedrake.function.Function : + The result of :math:`B^{-1}x` + """ + raise NotImplementedError( + "Action of B not implemented. You probably" + " also want to implement apply_inverse.") + + +class MixedCovarianceOperator(CovarianceOperatorBase): + """ + A block-diagonal covariance operator that acts component-wise on a mixed function space. + + The norm, sample, action, and inverse methods of this covariance operator will + apply the corresponding methods of each subcovariance operator to each component of the mixed space. + + Parameters + ---------- + W : + The MixedFunctionSpace that this covariance operator acts on. + subcovariances : + The covariance operators for each component of W. + + See Also + -------- + CovarianceOperatorBase + CovarianceMat + ~firedrake.preconditioners.covariance.CovariancePC + """ + def __init__(self, W: WithGeometry, subcovariances: Iterable[CovarianceOperatorBase]): + if len(subcovariances) != len(W.subspaces): + raise ValueError( + "Need one covariance operator per component of mixed space") + if not all(isinstance(cov, CovarianceOperatorBase) + for cov in subcovariances): + raise TypeError( + "All covariance operators must be a CovarianceOperatorBase") + if not all(cov.function_space() == Wsub + for cov, Wsub in zip(subcovariances, W.subspaces)): + raise ValueError( + "Covariance function spaces must match W subspaces") + + self._W = W + self._subcovariances = subcovariances + self._rngs = [cov.rng() for cov in self.subcovariances] + + def function_space(self): + return self._W + + @property + def subcovariances(self): + """The covariance operators for each component of the mixed space.""" + return self._subcovariances + + def rng(self): + return self._rngs + + def sample(self, rng=None, tensor=None): + tensor = tensor or Function(self.function_space) + for cov, tsub, rsub in zip(self.subcovariances, + tensor.subfunctions, + rng or self.rng()): + cov.sample(rng=rsub, tensor=tsub) + return tensor + + def norm(self, x): + return sum(cov.norm(xsub) + for cov, xsub in zip(self.subcovariances, + x.subfunctions)) + + def apply_inverse(self, x, tensor=None): + tensor = tensor or Cofunction(self.function_space().dual()) + for cov, xsub, tsub in zip(self.subcovariances, + x.subfunctions, + tensor.subfunctions): + cov.apply_inverse(xsub, tensor=tsub) + return tensor + + def apply_action(self, x, tensor=None): + tensor = tensor or Function(self.function_space()) + for cov, xsub, tsub in zip(self.subcovariances, + x.subfunctions, + tensor.subfunctions): + cov.apply_action(xsub, tensor=tsub) + return tensor + + +class AutoregressiveCovariance(CovarianceOperatorBase): + r""" + An m-th order autoregressive covariance operator using an implicit diffusion operator. + + Covariance operator B with a kernel that is the ``m``-th autoregressive + function can be calculated using ``m`` Backward Euler steps of a + diffusion operator, where the diffusion coefficient is specified by + the desired correlation lengthscale. + + If :math:`M` is the mass matrix, :math:`K` is the matrix for a single + Backward Euler step, and :math:`\lambda` is a normalisation factor, then the + m-th order correlation operator (unit variance) is: + + .. math:: + + B: V^{*} \to V = \lambda((K^{-1}M)^{m}M^{-1})\lambda + + B^{-1}: V \to V^{*} = (1/\lambda)M(M^{-1}K)^{m}(1/\lambda) + + This formulation leads to an efficient implementations for :math:`B^{1/2}` + by taking only m/2 steps of the diffusion operator. This can be used + to calculate weighted norms and sample from :math:`\mathcal{N}(0,B)`. + + .. math:: + + \|x\|_{B^{-1}} = \|(M^{-1}K)^{m/2}(1/\lambda)x\|_{M} + + w = B^{1/2}z = \lambda M^{-1}(MK^{-1})^{m/2}(M^{1/2}z) + + The white noise sample :math:`M^{1/2}z` is generated by a + :class:`.WhiteNoiseGenerator`. + + Parameters + ---------- + V : + The function space that the covariance operator maps into. + L : + The correlation lengthscale. + sigma : + The standard deviation. + m : + The number of diffusion operator steps. + Equal to the order of the autoregressive function kernel. + rng : + White noise generator to seed generating correlated samples. + seed : + Seed for the :mod:`RandomGenerator `. + Ignored if ``rng`` is given. + form : AutoregressiveCovariance.DiffusionForm | ufl.Form | None + The diffusion formulation or form. If a ``DiffusionForm`` then + :func:`.diffusion_form` will be used to generate the diffusion + form. Otherwise assumed to be a ufl.Form on ``V``. + Defaults to ``AutoregressiveCovariance.DiffusionForm.CG``. + weight : + Weighting to normalise the diffusion operator into a correlation operator. + Defaults to 1. Only used if ``form`` is a ``ufl.Form``. + bcs : + Boundary conditions for the diffusion operator. + solver_parameters : + The PETSc options for the diffusion operator solver. + options_prefix : + The options prefix for the diffusion operator solver. + mass_parameters : + The PETSc options for the mass matrix solver. + mass_prefix : + The options prefix for the matrix matrix solver. + + References + ---------- + Mirouze, I. and Weaver, A. T., 2010: "Representation of correlation + functions in variational assimilation using an implicit diffusion + operator". Q. J. R. Meteorol. Soc. 136: 1421–1443, July 2010 Part B. + https://doi.org/10.1002/qj.643 + + See Also + -------- + WhiteNoiseGenerator + CovarianceOperatorBase + CovarianceMat + ~firedrake.preconditioners.covariance.CovariancePC + diffusion_form + """ + + class DiffusionForm(Enum): + """ + The diffusion operator formulation. + + See Also + -------- + diffusion_form + """ + CG = 'CG' + IP = 'IP' + + def __init__(self, V: WithGeometry, L: float | Constant, + sigma: float | Constant = 1., m: int = 2, + rng: WhiteNoiseGenerator | None = None, + seed: int | None = None, + form=None, weight: Constant | None = None, + bcs: BCBase | Iterable[BCBase] | None = None, + solver_parameters: dict | None = None, + options_prefix: str | None = None, + mass_parameters: dict | None = None, + mass_prefix: str | None = None): + + form = form or self.DiffusionForm.CG + if isinstance(form, str): + form = self.DiffusionForm(form) + + self._rng = rng or WhiteNoiseGenerator(V, seed=seed) + self._function_space = self.rng().function_space + + if L < 0: + raise ValueError("Correlation lengthscale must be positive.") + if m < 0: + raise ValueError("Number of iterations must be positive.") + if (m % 2) != 0: + raise ValueError("Number of iterations must be even.") + + self.stddev = sigma + self.lengthscale = L + self.iterations = m + + if self.iterations > 0: + # Calculate diffusion operator parameters + + # setup diffusion solver + u, v = TrialFunction(V), TestFunction(V) + if isinstance(form, self.DiffusionForm): + self.kappa = Constant(kappa_m(L, m)) + self.lambda_m = Constant(lambda_m(L, m)) + self._weight = Constant(sigma*sqrt(self.lambda_m)) + K = diffusion_form(u, v, self.kappa, formulation=form) + else: + K = form + self._weight = weight or Constant(1.0) + + M = inner(u, v)*dx + + self._u = Function(V) + self._urhs = Function(V) + + self._Mrhs = action(M, self._urhs) + self._Krhs = action(K, self._urhs) + + self.solver = LinearVariationalSolver( + LinearVariationalProblem(K, self._Mrhs, self._u, bcs=bcs, + constant_jacobian=True), + solver_parameters=solver_parameters, + options_prefix=options_prefix) + + self.mass_solver = LinearVariationalSolver( + LinearVariationalProblem(M, self._Krhs, self._u, bcs=bcs, + constant_jacobian=True), + solver_parameters=mass_parameters, + options_prefix=mass_prefix) + + def function_space(self): + return self._function_space + + def rng(self): + return self._rng + + def sample(self, *, rng: WhiteNoiseGenerator | None = None, + tensor: Function | None = None): + tensor = tensor or Function(self.function_space()) + rng = rng or self.rng() + + if self.iterations == 0: + w = rng.sample(apply_riesz=True) + return tensor.assign(self.stddev*w) + + w = rng.sample(apply_riesz=True) + self._u.assign(w) + + for i in range(self.iterations//2): + self._urhs.assign(self._u) + self.solver.solve() + + return tensor.assign(self._weight*self._u) + + def norm(self, x: Function): + if self.iterations == 0: + sigma_x = (1/self.stddev)*x + return assemble(inner(sigma_x, sigma_x)*dx) + + lamda1 = 1/self._weight + self._u.assign(lamda1*x) + + for i in range(self.iterations//2): + self._urhs.assign(self._u) + self.mass_solver.solve() + + return assemble(inner(self._u, self._u)*dx) + + def apply_inverse(self, x: Function, *, + tensor: Cofunction | None = None): + tensor = tensor or Cofunction(self.function_space().dual()) + + if self.iterations == 0: + riesz_map = self.rng().backend.riesz_map + Cx = x.riesz_representation(riesz_map) + variance1 = 1/(self.stddev*self.stddev) + return tensor.assign(variance1*Cx) + + lamda1 = Constant(1/self._weight) + self._u.assign(lamda1*x) + + for i in range(self.iterations): + self._urhs.assign(self._u) + if i != self.iterations - 1: + self.mass_solver.solve() + b = assemble(self._Krhs) + + return tensor.assign(lamda1*b) + + def apply_action(self, x: Cofunction, *, + tensor: Function | None = None): + tensor = tensor or Function(self.function_space()) + + riesz_map = self.rng().backend.riesz_map + Cx = x.riesz_representation(riesz_map) + + if self.iterations == 0: + variance = self.stddev*self.stddev + return tensor.assign(variance*Cx) + + self._u.assign(self._weight*Cx) + + for i in range(self.iterations): + self._urhs.assign(self._u) + self.solver.solve() + + return tensor.assign(self._weight*self._u) + + +def diffusion_form(u, v, kappa: Constant | Function, + formulation: AutoregressiveCovariance.DiffusionForm, + cell_size=None): + """ + Convenience function for common diffusion forms. + + Currently provides: + + - Standard continuous Galerkin form. + + - Interior penalty method for discontinuous spaces. + + + Parameters + ---------- + u : + :func:`~firedrake.ufl_expr.TrialFunction` to construct diffusion form with. + v : + :func:`~firedrake.ufl_expr.TestFunction` to construct diffusion form with. + kappa : + The diffusion coefficient. + formulation : + The type of diffusion form. + cell_size : + The cell size used to calculate the interior penalty stabilisation. + Defaults to ``CellSize(mesh)``. Ignored if formulation is ``CG``. + + Returns + ------- + ufl.Form : + The diffusion form over u and v. + + Raises + ------ + ValueError + Unrecognised formulation. + + See Also + -------- + AutoregressiveCovariance + AutoregressiveCovariance.DiffusionForm + """ + if formulation == AutoregressiveCovariance.DiffusionForm.CG: + return inner(u, v)*dx + inner(kappa*grad(u), grad(v))*dx + + elif formulation == AutoregressiveCovariance.DiffusionForm.IP: + mesh = v.function_space().mesh() + n = FacetNormal(mesh) + h = cell_size or CellSize(mesh) + h_avg = 0.5*(h('+') + h('-')) + alpha_h = Constant(4.0)/h_avg + return ( + inner(u, v)*dx + kappa*( + inner(grad(u), grad(v))*dx + - inner(avg(2*outer(u, n)), avg(grad(v)))*dS + - inner(avg(grad(u)), avg(2*outer(v, n)))*dS + + alpha_h*inner(avg(2*outer(u, n)), avg(2*outer(v, n)))*dS + ) + ) + + else: + raise ValueError("Unknown AutoregressiveCovariance.DiffusionForm {formulation}") + + +class CovarianceMatCtx: + r""" + A python Mat context for a covariance operator. + Can apply either the action or inverse of the covariance. + + .. math:: + + B: V^{*} \to V + + B^{-1}: V \to V^{*} + + Parameters + ---------- + covariance : + The covariance operator. + operation : CovarianceMatCtx.Operation + Whether the matrix applies the action or inverse of the covariance operator. + Defaults to ``Operation.ACTION``. + + See Also + -------- + CovarianceOperatorBase + AutoregressiveCovariance + CovarianceMat + ~firedrake.preconditioners.covariance.CovariancePC + """ + class Operation(Enum): + """ + The covariance operation to apply with this Mat. + + See Also + -------- + CovarianceOperatorBase + AutoregressiveCovariance + CovarianceMat + ~firedrake.preconditioners.covariance.CovariancePC + """ + ACTION = 'action' + INVERSE = 'inverse' + + def __init__(self, covariance: CovarianceOperatorBase, operation=None): + operation = self.Operation(operation or self.Operation.ACTION) + + V = covariance.function_space() + self.function_space = V + self.comm = V.mesh().comm + self.covariance = covariance + self.operation = operation + + primal = Function(V) + dual = Function(V.dual()) + + if operation == self.Operation.ACTION: + self.x = dual + self.y = primal + self._mult_op = covariance.apply_action + elif operation == self.Operation.INVERSE: + self.x = primal + self.y = dual + self._mult_op = covariance.apply_inverse + else: + raise ValueError( + f"Unrecognised CovarianceMat operation {operation}") + + def mult(self, mat, x, y): + """Apply the action or inverse of the covariance operator + to x, putting the result in y. + + y is not guaranteed to be zero on entry. + + Parameters + ---------- + A : PETSc.Mat + The PETSc matrix that self is the python context of. + x : PETSc.Vec + The vector acted on by the matrix. + y : PETSc.Vec + The result of the matrix action. + """ + with self.x.dat.vec_wo as v: + x.copy(result=v) + + self._mult_op(self.x, tensor=self.y) + + with self.y.dat.vec_ro as v: + v.copy(result=y) + + def view(self, mat, viewer=None): + """View object. Method usually called by PETSc with e.g. -ksp_view. + """ + if viewer is None: + return + if viewer.getType() != PETSc.Viewer.Type.ASCII: + return + + viewer.printfASCII(f" firedrake covariance operator matrix: {type(self).__name__}\n") + viewer.printfASCII(f" Applying the {str(self.operation)} of the covariance operator {type(self.covariance).__name__}\n") + + if (type(self.covariance) is AutoregressiveCovariance) and (self.covariance.iterations > 0): + viewer.printfASCII(" Autoregressive covariance operator with:\n") + viewer.printfASCII(f" order: {self.covariance.iterations}\n") + viewer.printfASCII(f" correlation lengthscale: {self.covariance.lengthscale}\n") + viewer.printfASCII(f" standard deviation: {self.covariance.stddev}\n") + + if viewer.getFormat() == PETSc.Viewer.Format.ASCII_INFO_DETAIL: + if self.operation == self.Operation.ACTION: + viewer.printfASCII(" Information for the diffusion solver for applying the action:\n") + ksp = self.covariance.solver.snes.ksp + elif self.operation == self.Operation.INVERSE: + viewer.printfASCII(" Information for the mass solver for applying the inverse:\n") + ksp = self.covariance.mass_solver.snes.ksp + viewer.pushASCIITab() + ksp.view(viewer) + viewer.popASCIITab() + else: + prefix = mat.getOptionsPrefix() or "" + viewer.printfASCII(f" Use -{prefix}ksp_view ::ascii_info_detail to display information for diffusion or mass solver.\n") + + +def CovarianceMat(covariance: CovarianceOperatorBase, + operation: CovarianceMatCtx.Operation | None = None): + r""" + A Mat for a covariance operator. + Can apply either the action or inverse of the covariance. + This is a convenience function to create a PETSc.Mat with a :class:`.CovarianceMatCtx` Python context. + + .. math:: + + B: V^{*} \to V + + B^{-1}: V \to V^{*} + + Parameters + ---------- + covariance : + The covariance operator. + operation : CovarianceMatCtx.Operation + Whether the matrix applies the action or inverse of the covariance operator. + + Returns + ------- + PETSc.Mat : + The python type Mat with a :class:`CovarianceMatCtx` context. + + See Also + -------- + CovarianceOperatorBase + AutoregressiveCovariance + CovarianceMatCtx + CovarianceMatCtx.Operation + ~firedrake.preconditioners.covariance.CovariancePC + """ + ctx = CovarianceMatCtx(covariance, operation=operation) + + sizes = covariance.function_space().dof_dset.layout_vec.getSizes() + + mat = PETSc.Mat().createPython( + (sizes, sizes), ctx, comm=ctx.comm) + mat.setUp() + mat.assemble() + return mat + + +CovarianceMat.Operation = CovarianceMatCtx.Operation diff --git a/firedrake/adjoint/transformed_functional.py b/firedrake/adjoint/transformed_functional.py new file mode 100644 index 0000000000..ce739edf4d --- /dev/null +++ b/firedrake/adjoint/transformed_functional.py @@ -0,0 +1,486 @@ +from collections.abc import Sequence +from contextlib import contextmanager +from numbers import Real +from operator import itemgetter +from typing import Optional, Union + +import firedrake as fd +from firedrake.adjoint import Control, ReducedFunctional, Tape +from firedrake.functionspaceimpl import WithGeometry +import finat +import pyadjoint +from pyadjoint import no_annotations +from pyadjoint.enlisting import Enlist +from pyadjoint.reduced_functional import AbstractReducedFunctional +import ufl + +__all__ = \ + [ + "L2RieszMap", + "L2TransformedFunctional" + ] + + +@contextmanager +def local_vector(u, *, readonly=False): + u_local = u.createLocalVector() + u.getLocalVector(u_local, readonly=readonly) + yield u_local + u.restoreLocalVector(u_local, readonly=readonly) + + +class L2Cholesky: + """Mass matrix Cholesky factorization for a (real) DG space. + + Parameters + ---------- + + space + DG space. + constant_jacobian + Whether the mass matrix is constant. + """ + + def __init__(self, space: WithGeometry, *, constant_jacobian: Optional[bool] = True): + if fd.utils.complex_mode: + raise NotImplementedError("complex not supported") + + self._space = space + self._constant_jacobian = constant_jacobian + self._cached_pc = None + + @property + def space(self) -> fd.functionspaceimpl.WithGeometry: + """Function space. + """ + + return self._space + + def _pc(self): + import petsc4py.PETSc as PETSc + + if self._cached_pc is None: + M = fd.assemble(fd.inner(fd.TrialFunction(self.space), fd.TestFunction(self.space)) * fd.dx, + mat_type="aij") + M_local = M.petscmat.getDiagonalBlock() + + pc = PETSc.PC().create(M_local.comm) + pc.setType(PETSc.PC.Type.CHOLESKY) + pc.setFactorSolverType(PETSc.Mat.SolverType.PETSC) + pc.setOperators(M_local) + pc.setUp() + + if self._constant_jacobian: + self._cached_pc = M, M_local, pc + else: + _, _, pc = self._cached_pc + + return pc + + def C_inv_action(self, u: Union[fd.Function, fd.Cofunction]) -> fd.Cofunction: + r"""For the Cholesky factorization + + ... math : + + M = C C^T, + + compute the action of :math:`C^{-1}`. + + Parameters + ---------- + + u + Compute :math:`C^{-1} \tilde{u}` where :math:`\tilde{u}` is the + vector of degrees of freedom for :math:`u`. + + Returns + ------- + + firedrake.cofunction.Cofunction + Has vector of degrees of freedom :math:`C^{-1} \tilde{u}`. + """ + + pc = self._pc() + v = fd.Cofunction(self.space.dual()) + with u.dat.vec_ro as u_v, v.dat.vec_wo as v_v: + with local_vector(u_v, readonly=True) as u_v_s, local_vector(v_v) as v_v_s: + pc.applySymmetricLeft(u_v_s, v_v_s) + return v + + def C_T_inv_action(self, u: Union[fd.Function, fd.Cofunction]) -> fd.Function: + r"""For the Cholesky factorization + + ... math : + + M = C C^T, + + compute the action of :math:`C^{-T}`. + + Parameters + ---------- + + u + Compute :math:`C^{-T} \tilde{u}` where :math:`\tilde{u}` is the + vector of degrees of freedom for :math:`u`. + + Returns + ------- + + firedrake.function.Function + Has vector of degrees of freedom :math:`C^{-T} \tilde{u}`. + """ + + pc = self._pc() + v = fd.Function(self.space) + with u.dat.vec_ro as u_v, v.dat.vec_wo as v_v: + with local_vector(u_v, readonly=True) as u_v_s, local_vector(v_v) as v_v_s: + pc.applySymmetricRight(u_v_s, v_v_s) + return v + + +class L2RieszMap(fd.RieszMap): + """An :math:`L^2` Riesz map. + + Parameters + ---------- + + target + Function space. + kwargs + Keyword arguments are passed to the base class constructor. + """ + + def __init__(self, target: WithGeometry, **kwargs): + if not isinstance(target, fd.functionspaceimpl.WithGeometry): + raise TypeError("Target must be a WithGeometry") + super().__init__(target, ufl.L2, **kwargs) + + +def is_dg_space(space: WithGeometry) -> bool: + """Return whether a function space is DG. + + Parameters + ---------- + + space + The function space. + + Returns + ------- + + bool + Whether the function space is DG. + """ + + e, _ = finat.element_factory.convert(space.ufl_element()) + return e.is_dg() + + +class L2TransformedFunctional(AbstractReducedFunctional): + r"""Represents the functional + + .. math:: + + J \circ \Pi \circ \Xi + + where + + - :math:`J` is the functional definining an optimization problem. + - :math:`\Pi` is the :math:`L^2` projection from a DG space containing + the control space as a subspace. + - :math:`\Xi` represents a change of basis from an :math:`L^2` + orthonormal basis to the finite element basis for the DG space. + + The optimization is therefore transformed into an optimization problem + using an :math:`L^2` orthonormal basis for a DG finite element space. + + The transformation is related to the factorization in section 4.1 of + https://doi.org/10.1137/18M1175239 -- specifically the factorization + in their equation (4.2) can be related to :math:`\Pi \circ \Xi`. + + Parameters + ---------- + + functional + Functional defining the optimization problem, :math:`J`. + controls + Controls. + space_D + DG space containing the control space. + riesz_map + Used for projecting from the DG space onto the control space. Ignored + for DG controls. + alpha + Modifies the functional, equivalent to adding an extra term to + :math:`J \circ \Pi` + + .. math:: + + \frac{1}{2} \alpha \left\| m_D - \Pi ( m_D ) \right\|_{L^2}^2. + + e.g. in a minimization problem this adds a penalty term which can + be used to avoid ill-posedness due to the use of a larger DG space. + tape + Tape used in evaluations involving :math:`J`. + """ + + @no_annotations + def __init__(self, functional: pyadjoint.OverloadedType, controls: Union[Control, Sequence[Control]], *, + space_D: Optional[Union[None, WithGeometry, Sequence[Union[None, WithGeometry]]]] = None, + riesz_map: Optional[Union[L2RieszMap, Sequence[L2RieszMap]]] = None, + alpha: Optional[Real] = 0, + tape: Optional[Tape] = None): + if not all(isinstance(control.control, fd.Function) for control in Enlist(controls)): + raise TypeError("controls must be Function objects") + + super().__init__() + self._J = ReducedFunctional(functional, controls, tape=tape) + + self._space = tuple(control.control.function_space() + for control in self._J.controls) + if space_D is None: + space_D = tuple(None for _ in self._space) + self._space_D = Enlist(space_D) + if len(self._space_D) != len(self._space): + raise ValueError("Invalid length") + self._space_D = tuple((space if is_dg_space(space) else space.broken_space()) + if space_D is None else space_D + for space, space_D in zip(self._space, self._space_D)) + + self._controls = tuple(Control(fd.Function(space_D), riesz_map="l2") + for space_D in self._space_D) + self._controls = Enlist(Enlist(controls).delist(self._controls)) + + if riesz_map is None: + riesz_map = tuple(map(L2RieszMap, self._space)) + self._riesz_map = Enlist(riesz_map) + if len(self._riesz_map) != len(self._controls): + raise ValueError("Invalid length") + self._C = tuple(L2Cholesky(space_D, constant_jacobian=riesz_map.constant_jacobian) + for space_D, riesz_map in zip(self._space_D, self._riesz_map)) + + self._alpha = alpha + self._m_k = None + + # Map the initial guess + controls_t = self._dual_transform(tuple(control.control for control in self._J.controls), apply_riesz=False) + for control, control_t in zip(self._controls, controls_t): + control.control.assign(control_t) + + @property + def controls(self) -> Enlist[Control]: + return Enlist(self._controls.delist()) + + def _dual_transform(self, u, u_D=None, *, apply_riesz=False): + u = Enlist(u) + if len(u) != len(self.controls): + raise ValueError("Invalid length") + if u_D is None: + u_D = tuple(None for _ in u) + else: + u_D = Enlist(u_D) + if len(u_D) != len(self.controls): + raise ValueError("Invalid length") + + def transform(C, u, u_D, space, space_D, riesz_map): + if apply_riesz: + if space is space_D: + v = u + else: + v = fd.assemble(fd.inner(riesz_map(u), fd.TestFunction(space_D)) * fd.dx) + else: + v = fd.assemble(fd.inner(u, fd.TestFunction(space_D)) * fd.dx) + if u_D is not None: + v.dat.axpy(1, u_D.dat) + v = C.C_inv_action(v) + return v.riesz_representation("l2") + + v = tuple(map(transform, self._C, u, u_D, self._space, self._space_D, self._riesz_map)) + return u.delist(v) + + def _primal_transform(self, u): + u = Enlist(u) + if len(u) != len(self.controls): + raise ValueError("Invalid length") + + def transform(C, u, space, space_D, riesz_map): + if fd.utils.complex_mode: + # Would need to be adjoint + raise NotImplementedError("complex not supported") + v = C.C_T_inv_action(u) + if space is space_D: + w = v + else: + w = riesz_map(fd.assemble(fd.inner(v, fd.TestFunction(space)) * fd.dx)) + return v, w + + vw = tuple(map(transform, self._C, u, self._space, self._space_D, self._riesz_map)) + return u.delist(tuple(map(itemgetter(0), vw))), u.delist(tuple(map(itemgetter(1), vw))) + + @no_annotations + def map_result(self, m: Union[fd.Function, Sequence[fd.Function]]) -> Union[fd.Function, Sequence[fd.Function]]: + """Map the result of an optimization. + + Parameters + ---------- + + m + The result of the optimization. Represents an expansion in the + :math:`L^2` orthonormal basis for the DG space. + + Returns + ------- + + firedrake.function.Function or Sequence[firedrake.function.Function] + The mapped result in the original control space. + """ + + _, m_J = self._primal_transform(m) + return m_J + + @no_annotations + def __call__(self, values: Union[fd.Function, Sequence[fd.Function]]) -> pyadjoint.AdjFloat: + """Evaluate the functional. + + Parameters + --------- + + value + Control values. + + Returns + ------- + + pyadjoint.AdjFloat + The functional value. + """ + + values = Enlist(values) + m_D, m_J = self._primal_transform(values) + J = self._J(m_J) + if self._alpha != 0: + for space, space_D, m_D_i, m_J_i in zip(self._space, self._space_D, m_D, m_J): + if space is not space_D: + J += fd.assemble(0.5 * fd.Constant(self._alpha) * fd.inner(m_D_i - m_J_i, m_D_i - m_J_i) * fd.dx) + self._m_k = m_D, m_J + return J + + @no_annotations + def derivative(self, adj_input: Optional[Real] = 1.0, + apply_riesz: Optional[bool] = False) -> Union[fd.Function, fd.Cofunction, list[fd.Function, fd.Cofunction]]: + """Evaluate the derivative. + + Parameters + --------- + + adj_value + Not supported. + apply_riesz + Whether to apply the Riesz map to the result. + + Returns + ------- + + firedrake.function.Function, firedrake.cofunction.Cofunction, or list[firedrake.function.Function or firedrake.cofunction.Cofunction] + The derivative. + """ + + if not isinstance(adj_input, Real) or adj_input != 1: + raise NotImplementedError("adj_input != 1 not supported") + + u = Enlist(self._J.derivative()) + + if self._alpha == 0: + v_alpha = None + else: + v_alpha = [] + for space, space_D, m_D, m_J in zip(self._space, self._space_D, *self._m_k): + if space is space_D: + v_alpha.append(None) + else: + if fd.utils.complex_mode: + raise RuntimeError("Not complex differentiable") + v_alpha.append(fd.assemble(fd.Constant(self._alpha) * fd.inner(m_D - m_J, fd.TestFunction(space_D)) * fd.dx)) + v = self._dual_transform(u, v_alpha, apply_riesz=True) + if apply_riesz: + v = tuple(v_i._ad_convert_riesz(v_i, riesz_map=control.riesz_map) + for v_i, control in zip(v, self.controls)) + return u.delist(v) + + @no_annotations + def hessian(self, m_dot: Union[fd.Function, Sequence[fd.Function]], + hessian_input: Optional[None] = None, evaluate_tlm: Optional[bool] = True, + apply_riesz: Optional[bool] = False) -> Union[fd.Function, fd.Cofunction, list[fd.Function, fd.Cofunction]]: + """Evaluate the Hessian action. + + Parameters + ---------- + + m_dot + Action direction. + hessian_input + Not supported. + evaluate_tlm + Whether to re-evaluate the tangent-linear. + apply_riesz + Whether to apply the Riesz map to the result. + + Returns + ------- + + firedrake.function.Function, firedrake.cofunction.Cofunction, or list[firedrake.function.Function or firedrake.cofunction.Cofunction] + The Hessian action. + """ + + if hessian_input is not None: + raise NotImplementedError("hessian_input not None not supported") + + m_dot = Enlist(m_dot) + m_dot_D, m_dot_J = self._primal_transform(m_dot) + u = Enlist(self._J.hessian(m_dot.delist(m_dot_J), evaluate_tlm=evaluate_tlm)) + + if self._alpha == 0: + v_alpha = None + else: + v_alpha = [] + for space, space_D, m_dot_D_i, m_dot_J_i in zip(self._space, self._space_D, m_dot_D, m_dot_J): + if space is space_D: + v_alpha.append(None) + else: + if fd.utils.complex_mode: + raise RuntimeError("Not complex differentiable") + v_alpha.append(fd.assemble(fd.Constant(self._alpha) * fd.inner(m_dot_D_i - m_dot_J_i, fd.TestFunction(space_D)) * fd.dx)) + v = self._dual_transform(u, v_alpha, apply_riesz=True) + if apply_riesz: + v = tuple(v_i._ad_convert_riesz(v_i, riesz_map=control.riesz_map) + for v_i, control in zip(v, self.controls)) + return u.delist(v) + + @no_annotations + def tlm(self, m_dot: Union[fd.Function, Sequence[fd.Function]]) -> Union[fd.Function, list[fd.Function]]: + """Evaluate a Jacobian action. + + Parameters + ---------- + + m_dot + Action direction. + + Returns + ------- + + firedrake.function.Function or list[firedrake.function.Function] + The Jacobian action. + """ + + m_dot = Enlist(m_dot) + m_dot_D, m_dot_J = self._primal_transform(m_dot) + tau_J = self._J.tlm(m_dot.delist(m_dot_J)) + + if self._alpha != 0: + for space, space_D, m_dot_D_i, m_D, m_J in zip(self._space, self._space_D, m_dot_D, *self._m_k): + if space is not space_D: + if fd.utils.complex_mode: + raise RuntimeError("Not complex differentiable") + tau_J += fd.assemble(fd.Constant(self._alpha) * fd.inner(m_D - m_J, m_dot_D_i) * fd.dx) + return tau_J diff --git a/firedrake/adjoint_utils/__init__.py b/firedrake/adjoint_utils/__init__.py index fba6df5d74..e1a31c8f1b 100644 --- a/firedrake/adjoint_utils/__init__.py +++ b/firedrake/adjoint_utils/__init__.py @@ -5,11 +5,19 @@ :mod:`firedrake.adjoint`. """ -from firedrake.adjoint_utils.function import * # noqa: F401 -from firedrake.adjoint_utils.assembly import * # noqa: F401 -from firedrake.adjoint_utils.projection import * # noqa: F401 -from firedrake.adjoint_utils.variational_solver import * # noqa: F401 -from firedrake.adjoint_utils.solving import * # noqa: F401 -from firedrake.adjoint_utils.mesh import * # noqa: F401 -from firedrake.adjoint_utils.checkpointing import * # noqa: F401 -from firedrake.adjoint_utils.ensemble_function import * # noqa: F401 +from firedrake.adjoint_utils.function import ( # noqa F401 + FunctionMixin, CofunctionMixin +) +from firedrake.adjoint_utils.assembly import annotate_assemble # noqa F401 +from firedrake.adjoint_utils.projection import annotate_project # noqa F401 +from firedrake.adjoint_utils.variational_solver import ( # noqa F401 + NonlinearVariationalProblemMixin, NonlinearVariationalSolverMixin +) +from firedrake.adjoint_utils.solving import annotate_solve, get_solve_blocks # noqa F401 +from firedrake.adjoint_utils.mesh import MeshGeometryMixin # noqa F401 +from firedrake.adjoint_utils.checkpointing import ( # noqa F401 + enable_disk_checkpointing, disk_checkpointing, + pause_disk_checkpointing, continue_disk_checkpointing, + stop_disk_checkpointing, checkpointable_mesh +) +from firedrake.adjoint_utils.ensemble_function import EnsembleFunctionMixin # noqa F401 diff --git a/firedrake/adjoint_utils/blocks/__init__.py b/firedrake/adjoint_utils/blocks/__init__.py index bf83b896cc..3bbc926ce9 100644 --- a/firedrake/adjoint_utils/blocks/__init__.py +++ b/firedrake/adjoint_utils/blocks/__init__.py @@ -1,9 +1,12 @@ -from .assembly import AssembleBlock # NOQA F401 -from .solving import GenericSolveBlock, SolveLinearSystemBlock, \ - ProjectBlock, SupermeshProjectBlock, SolveVarFormBlock, \ - NonlinearVariationalSolveBlock # NOQA F401 -from .function import FunctionAssignBlock, FunctionMergeBlock, \ - SubfunctionBlock # NOQA F401 -from .dirichlet_bc import DirichletBCBlock # NOQA F401 -from .constant import ConstantAssignBlock # NOQA F401 -from .mesh import MeshInputBlock, MeshOutputBlock # NOQA F401 +from firedrake.adjoint_utils.blocks.assembly import AssembleBlock # noqa F401 +from firedrake.adjoint_utils.blocks.solving import ( # noqa F401 + GenericSolveBlock, SolveLinearSystemBlock, ProjectBlock, + SupermeshProjectBlock, SolveVarFormBlock, + NonlinearVariationalSolveBlock +) +from firedrake.adjoint_utils.blocks.function import ( # noqa F401 + FunctionAssignBlock, FunctionMergeBlock, SubfunctionBlock +) +from firedrake.adjoint_utils.blocks.dirichlet_bc import DirichletBCBlock # noqa F401 +from firedrake.adjoint_utils.blocks.constant import ConstantAssignBlock # noqa F401 +from firedrake.adjoint_utils.blocks.mesh import MeshInputBlock, MeshOutputBlock # noqa F401 diff --git a/firedrake/adjoint_utils/checkpointing.py b/firedrake/adjoint_utils/checkpointing.py index cd377afa49..a18eac5521 100644 --- a/firedrake/adjoint_utils/checkpointing.py +++ b/firedrake/adjoint_utils/checkpointing.py @@ -6,6 +6,7 @@ import os import shutil import atexit +import warnings from abc import ABC, abstractmethod from numbers import Number _enable_disk_checkpoint = False @@ -49,7 +50,8 @@ def __exit__(self, *args): _checkpoint_init_data = self._init -def enable_disk_checkpointing(dirname=None, comm=COMM_WORLD, cleanup=True): +def enable_disk_checkpointing(dirname=None, comm=COMM_WORLD, cleanup=True, + checkpoint_comm=None, checkpoint_dir=None): """Add a DiskCheckpointer to the current tape. Disk checkpointing is fully enabled by calling:: @@ -68,12 +70,18 @@ def enable_disk_checkpointing(dirname=None, comm=COMM_WORLD, cleanup=True): `checkpoint_schedules` provides other schedules for checkpointing to memory, disk, or a combination of both. + For HPC systems with fast node-local storage, function data can be + checkpointed on a sub-communicator to avoid parallel HDF5 overhead:: + + enable_disk_checkpointing(checkpoint_comm=MPI.COMM_SELF, + checkpoint_dir="/local/scratch") + Parameters ---------- dirname : str - The directory in which the disk checkpoints should be stored. If not - specified then the current working directory is used. Checkpoints are - stored in a temporary subdirectory of this directory. + The directory in which the shared disk checkpoints should be stored. + If not specified then the current working directory is used. + Checkpoints are stored in a temporary subdirectory of this directory. comm : mpi4py.MPI.Intracomm The MPI communicator over which the computation to be disk checkpointed is defined. This will usually match the communicator on which the @@ -81,10 +89,29 @@ def enable_disk_checkpointing(dirname=None, comm=COMM_WORLD, cleanup=True): cleanup : bool If set to False, checkpoint files will not be deleted when no longer required. This is usually only useful for debugging. + checkpoint_comm : mpi4py.MPI.Intracomm or None + If specified, function data is checkpointed using PETSc Vec I/O on + this communicator instead of using Firedrake's CheckpointFile. This + bypasses parallel HDF5 and is ideal for node-local storage on HPC + systems. Passing ``MPI.COMM_SELF`` gives each rank its own file, + while a shared node communicator groups ranks that share storage. + The mesh checkpoint (via ``checkpointable_mesh``) always uses shared + storage. Requires the same communicator layout on restore. + checkpoint_dir : str or None + The directory in which checkpoint_comm files are stored. Only used + when ``checkpoint_comm`` is not None. Each group of ranks sharing + a checkpoint_comm creates a temporary subdirectory here. This + directory must be accessible from all ranks within each + checkpoint_comm group. For example, using a node-local path like + /tmp is safe when checkpoint_comm groups ranks on the same node, + but would fail if checkpoint_comm spans nodes whose filesystems + are not shared. """ tape = get_working_tape() if "firedrake" not in tape._package_data: - tape._package_data["firedrake"] = DiskCheckpointer(dirname, comm, cleanup) + tape._package_data["firedrake"] = DiskCheckpointer( + dirname, comm, cleanup, checkpoint_comm, checkpoint_dir + ) def disk_checkpointing(): @@ -120,14 +147,29 @@ def __exit__(self, *args): class CheckPointFileReference: """A filename which deletes the associated file when it is destroyed.""" - def __init__(self, name, comm, cleanup=False): + def __init__(self, name, comm, cleanup=False, checkpoint_comm=None): self.name = name self.comm = comm self.cleanup = cleanup + self.checkpoint_comm = checkpoint_comm def __del__(self): - if self.cleanup and self.comm.rank == 0 and os.path.exists(self.name): - os.remove(self.name) + if self.cleanup and os.path.exists(self.name): + if self.comm.rank == 0: + os.remove(self.name) + # Prune the index-tracking entry for this file from CheckpointFunction. + # This is safe for the following reasons: + # (1) CheckpointFunction holds self.file as a direct strong reference, + # so __del__ here can only fire after every CheckpointFunction that + # wrote to this filepath has already been garbage-collected. + # (2) restore() never reads _checkpoint_indices — it uses stored_name + # and stored_index baked into the CheckpointFunction at save time. + # (3) Under revolve schedules the tape checkpoint store holds the + # CheckPointFileReference alive until forward re-execution is done, + # so there is no risk of premature pruning. + # (4) pop is a no-op for init files where no CheckpointFunction ever + # wrote an entry (e.g. checkpointable_mesh files). + CheckpointFunction._checkpoint_indices.pop(self.name, None) class DiskCheckpointer(TapePackageData): @@ -136,9 +178,9 @@ class DiskCheckpointer(TapePackageData): Parameters ---------- dirname : str - The directory in which the disk checkpoints should be stored. If not - specified then the current working directory is used. Checkpoints are - stored in a temporary subdirectory of this directory. + The directory in which the shared disk checkpoints should be stored. + If not specified then the current working directory is used. + Checkpoints are stored in a temporary subdirectory of this directory. comm : mpi4py.MPI.Intracomm The MPI communicator over which the computation to be disk checkpointed is defined. This will usually match the communicator on which the @@ -146,42 +188,123 @@ class DiskCheckpointer(TapePackageData): cleanup : bool If set to False, checkpoint files will not be deleted when no longer required. This is usually only useful for debugging. + checkpoint_comm : mpi4py.MPI.Intracomm or None + If specified, function data is checkpointed on this communicator. + checkpoint_dir : str or None + Directory for checkpoint_comm files. This directory must be + accessible from all ranks within each checkpoint_comm group. + For example, using a node-local path like /tmp is safe when + checkpoint_comm groups ranks on the same node, but would fail + if checkpoint_comm spans nodes whose filesystems are not shared. """ - def __init__(self, dirname=None, comm=COMM_WORLD, cleanup=True): - - if comm.rank == 0: - self.dirname = comm.bcast(tempfile.mkdtemp( - prefix="firedrake_adjoint_checkpoint_", dir=dirname or os.getcwd() - )) - else: - self.dirname = comm.bcast("") + def __init__(self, dirname=None, comm=COMM_WORLD, cleanup=True, + checkpoint_comm=None, checkpoint_dir=None): + self.checkpoint_comm = checkpoint_comm self.comm = comm self.cleanup = cleanup + + # Shared directory (for mesh checkpoint and init data). The bcast + # uses comm (COMM_WORLD) so every rank knows the shared path. + path = tempfile.mkdtemp( + prefix="firedrake_adjoint_checkpoint_", dir=dirname or os.getcwd() + ) if comm.rank == 0 else None + self.dirname = comm.bcast(path) if self.cleanup and comm.rank == 0: - # Delete the checkpoint folder on process exit. + # Delete the shared checkpoint folder on process exit. atexit.register(shutil.rmtree, self.dirname) - # # A checkpoint file holding the state of block variables set outside - # the tape. - self.init_checkpoint_file = self.new_checkpoint_file() - self.current_checkpoint_file = self.new_checkpoint_file() - def new_checkpoint_file(self): - """Set up a disk checkpointing file.""" + # Local directory (for function data on checkpoint_comm). The bcast + # uses checkpoint_comm, not comm: only ranks within the same + # checkpoint_comm group share a local filesystem, so we must not + # perform a COMM_WORLD collective here. + if self.checkpoint_comm is not None: + if checkpoint_dir is None: + warnings.warn( + "checkpoint_comm without checkpoint_dir defaults to cwd, " + "which is usually on the shared filesystem. Without a " + "node-local path the collective CheckpointFile is more " + "suitable. Consider setting checkpoint_dir.", + UserWarning + ) + base_dir = checkpoint_dir or os.getcwd() + if checkpoint_comm.rank == 0: + # ignore_cleanup_errors avoids tracebacks if the finalizer fires + # during interpreter shutdown after MPI has already finalized. + self._local_tmpdir = tempfile.TemporaryDirectory( + prefix="firedrake_adjoint_checkpoint_cc_", + dir=base_dir, + ignore_cleanup_errors=True, + ) + local_path = self._local_tmpdir.name + else: + self._local_tmpdir = None + local_path = None + self._local_dirname = checkpoint_comm.bcast(local_path) + else: + self._local_tmpdir = None + self._local_dirname = None + + # A checkpoint file holding the state of block variables set outside + # the tape (always shared, used by checkpointable_mesh). + self.init_checkpoint_file = self._new_shared_checkpoint_file() + self.current_checkpoint_file = self._new_checkpoint_file() + + def __del__(self): + """Cleanup TemporaryDirectory if one was created""" + if self.cleanup: + if self._local_tmpdir is not None: + self._local_tmpdir.cleanup() + + def _new_shared_checkpoint_file(self): + """Set up a shared disk checkpointing file (all ranks use same file).""" from firedrake.checkpointing import CheckpointFile if self.comm.rank == 0: - _, checkpoint_file = tempfile.mkstemp( - dir=self.dirname, suffix=".h5" - ) - checkpoint_file = self.comm.bcast(checkpoint_file) + _, checkpoint_file = tempfile.mkstemp(dir=self.dirname, suffix=".h5") else: - checkpoint_file = self.comm.bcast("") + checkpoint_file = None + checkpoint_file = self.comm.bcast(checkpoint_file) # Let h5py create a file at this location just to be sure. - with CheckpointFile(checkpoint_file, 'w'): + with CheckpointFile(checkpoint_file, 'w', comm=self.comm): pass return CheckPointFileReference(checkpoint_file, self.comm, self.cleanup) + def _new_checkpoint_comm_file(self): + """Set up a checkpoint file on the checkpoint communicator.""" + from firedrake.checkpointing import TemporaryFunctionCheckpointFile + if self.checkpoint_comm.rank == 0: + fd, filepath = tempfile.mkstemp(dir=self._local_dirname, suffix=".h5") + os.close(fd) + else: + filepath = None + filepath = self.checkpoint_comm.bcast(filepath) + # Initialise an empty HDF5 file. Opened in 'w' mode and immediately + # closed so that subsequent 'a' opens from save_function find a valid + # file. + with TemporaryFunctionCheckpointFile(self.checkpoint_comm, filepath, 'w'): + pass + return CheckPointFileReference(filepath, self.checkpoint_comm, self.cleanup, + checkpoint_comm=self.checkpoint_comm) + + def _new_checkpoint_file(self): + """Set up a checkpoint file for function data.""" + if self.checkpoint_comm is not None: + return self._new_checkpoint_comm_file() + else: + return self._new_shared_checkpoint_file() + + def new_checkpoint_file(self): + """Set up a disk checkpointing file.""" + warnings.warn( + "'new_checkpoint_file' is deprecated and will be removed in a " + "future release. Checkpoint file management is now handled " + "internally; to advance to a new checkpoint file call " + "'reset()' on the DiskCheckpointer instead.", + FutureWarning + ) + return self._new_checkpoint_file() + def clear(self, init=True): """Reset the DiskCheckPointer. @@ -198,8 +321,8 @@ def clear(self, init=True): if not self.cleanup: return if init: - self.init_checkpoint_file = self.new_checkpoint_file() - self.current_checkpoint_file = self.new_checkpoint_file() + self.init_checkpoint_file = self._new_shared_checkpoint_file() + self.current_checkpoint_file = self._new_checkpoint_file() def reset(self): self.clear(init=False) @@ -254,9 +377,9 @@ def checkpointable_mesh(mesh): "No current checkpoint file. Call enable_disk_checkpointing()." ) - with CheckpointFile(checkpoint_file.name, 'a') as outfile: + with CheckpointFile(checkpoint_file.name, 'a', comm=checkpoint_file.comm) as outfile: outfile.save_mesh(mesh) - with CheckpointFile(checkpoint_file.name, 'r') as outfile: + with CheckpointFile(checkpoint_file.name, 'r', comm=checkpoint_file.comm) as outfile: return outfile.load_mesh(mesh.name) @@ -290,7 +413,6 @@ class CheckpointFunction(CheckpointBase, OverloadedType): _checkpoint_indices = {} def __init__(self, function): - from firedrake.checkpointing import CheckpointFile self.name = function.name() self.mesh = function.function_space().mesh() self.file = current_checkpoint_file() @@ -300,31 +422,70 @@ def __init__(self, function): "No current checkpoint file. Call enable_disk_checkpointing()." ) + self.count = function.count() + + # Compute stored_name and stored_index once, shared by both checkpoint + # paths. stored_name encodes the function space (mesh name + element + # family/degree) so that functions on different meshes or spaces never + # collide. stored_index disambiguates successive saves of the same + # space to the same file. + from firedrake.checkpointing import _generate_function_space_name stored_names = CheckpointFunction._checkpoint_indices if self.file.name not in stored_names: stored_names[self.file.name] = {} + self.stored_name = _generate_function_space_name(function.function_space()) + indices = stored_names[self.file.name] + indices.setdefault(self.stored_name, 0) + indices[self.stored_name] += 1 + self.stored_index = indices[self.stored_name] + + if self.file.checkpoint_comm is not None: + self._function_space = function.function_space() + self._save_local_checkpoint(function) + else: + self._save_shared_checkpoint(function) - self.count = function.count() - with CheckpointFile(self.file.name, 'a') as outfile: - self.stored_name = outfile._generate_function_space_name( - function.function_space() - ) - indices = stored_names[self.file.name] - indices.setdefault(self.stored_name, 0) - indices[self.stored_name] += 1 - self.stored_index = indices[self.stored_name] + def _save_shared_checkpoint(self, function): + """Save function data to a shared HDF5 file via CheckpointFile.""" + from firedrake.checkpointing import CheckpointFile + with CheckpointFile(self.file.name, 'a', self.file.comm) as outfile: outfile.save_function(function, name=self.stored_name, idx=self.stored_index) + def _save_local_checkpoint(self, function): + """Save function data to a local HDF5 file via PETSc Vec I/O.""" + from firedrake.checkpointing import TemporaryFunctionCheckpointFile + with TemporaryFunctionCheckpointFile( + self.file.checkpoint_comm, self.file.name, 'a' + ) as outfile: + outfile.save_function(function, self.stored_name, self.stored_index) + def restore(self): """Read and return this Function from the checkpoint.""" - from firedrake.checkpointing import CheckpointFile - with CheckpointFile(self.file.name, 'r') as infile: - function = infile.load_function(self.mesh, self.stored_name, - idx=self.stored_index) + if self.file.checkpoint_comm is not None: + function = self._restore_local_checkpoint() + else: + function = self._restore_shared_checkpoint() return type(function)(function.function_space(), function.dat, name=self.name, count=self.count) + def _restore_shared_checkpoint(self): + """Load function data from a shared HDF5 file via :class:`.CheckpointFile`.""" + from firedrake.checkpointing import CheckpointFile + with CheckpointFile(self.file.name, 'r', comm=self.file.comm) as infile: + return infile.load_function(self.mesh, self.stored_name, + idx=self.stored_index) + + def _restore_local_checkpoint(self): + """Load function data via :class:`TemporaryFunctionCheckpointFile`.""" + from firedrake.checkpointing import TemporaryFunctionCheckpointFile + with TemporaryFunctionCheckpointFile( + self.file.checkpoint_comm, self.file.name, 'r' + ) as infile: + return infile.load_function( + self._function_space, self.stored_name, self.stored_index + ) + def _ad_restore_at_checkpoint(self, checkpoint): return checkpoint.restore() diff --git a/firedrake/adjoint_utils/ensemble_function.py b/firedrake/adjoint_utils/ensemble_function.py index 179d017285..fb19c9a02e 100644 --- a/firedrake/adjoint_utils/ensemble_function.py +++ b/firedrake/adjoint_utils/ensemble_function.py @@ -13,7 +13,7 @@ class EnsembleFunctionMixin(OverloadedType): Enables EnsembleFunction to do the following: - Be a Control for a NumpyReducedFunctional (_ad_to_list and _ad_assign_numpy) - Be used with pyadjoint TAO solver (_ad_{to,from}_petsc) - - Be used as a Control for Taylor tests (_ad_dot) + - Be used as a Control for Taylor tests (_ad_dot, _ad_add, _ad_mul) """ @staticmethod @@ -32,10 +32,8 @@ def wrapper(self, *args, **kwargs): @staticmethod def _ad_to_list(m): with m.vec_ro() as gvec: - lvec = PETSc.Vec().createSeq(gvec.size, - comm=PETSc.COMM_SELF) - PETSc.Scatter().toAll(gvec).scatter( - gvec, lvec, addv=PETSc.InsertMode.INSERT_VALUES) + scatter, lvec = PETSc.Scatter().toAll(gvec) + scatter.scatter(gvec, lvec, addv=PETSc.InsertMode.INSERT_VALUES) return lvec.array_r.tolist() @staticmethod @@ -50,22 +48,22 @@ def _ad_dot(self, other, options=None): local_dot = sum(uself._ad_dot(uother, options=options) for uself, uother in zip(self.subfunctions, other.subfunctions)) - return self.ensemble.ensemble_comm.allreduce(local_dot) - - def _ad_convert_riesz(self, value, options=None): - raise NotImplementedError + return self.function_space().ensemble.allreduce(local_dot) def _ad_init_zero(self, dual=False): - from firedrake import EnsembleFunction, EnsembleCofunction + from firedrake import EnsembleFunction + space = self.function_space() if dual: - return EnsembleCofunction(self.function_space().dual()) - else: - return EnsembleFunction(self.function_space()) + space = space.dual() + return EnsembleFunction(space) + + def _ad_convert_riesz(self, value, riesz_map=None): + return value.riesz_representation(riesz_map=riesz_map or "L2") def _ad_create_checkpoint(self): if disk_checkpointing(): raise NotImplementedError( - "Disk checkpointing not implemented for EnsembleFunctions") + f"Disk checkpointing not implemented for {type(self).__name__}") else: return self.copy() @@ -73,12 +71,12 @@ def _ad_restore_at_checkpoint(self, checkpoint): if type(checkpoint) is type(self): return checkpoint raise NotImplementedError( - "Disk checkpointing not implemented for EnsembleFunctions") + f"Disk checkpointing not implemented for {type(self).__name__}") def _ad_from_petsc(self, vec): - with self.vec_wo as self_v: + with self.vec_wo() as self_v: vec.copy(self_v) def _ad_to_petsc(self, vec=None): - with self.vec_ro as self_v: + with self.vec_ro() as self_v: return self_v.copy(vec or self._vec.duplicate()) diff --git a/firedrake/adjoint_utils/function.py b/firedrake/adjoint_utils/function.py index da2b3051f4..a4b83b9828 100644 --- a/firedrake/adjoint_utils/function.py +++ b/firedrake/adjoint_utils/function.py @@ -1,4 +1,5 @@ from functools import wraps +from pyop2.mpi import temp_internal_comm import ufl from ufl.domain import extract_unique_domain from pyadjoint.overloaded_type import create_overloaded_object, FloatingType @@ -280,8 +281,9 @@ def _ad_assign_numpy(dst, src, offset): m_a_local = src[offset + range_begin:offset + range_end] if dst.function_space().ufl_element().family() == "Real": # Real space keeps a redundant copy of the data on every rank - comm = dst.function_space().mesh()._comm - dst.dat.data_wo[...] = comm.bcast(m_a_local, root=0) + comm = dst.function_space().mesh().comm + with temp_internal_comm(comm) as icomm: + dst.dat.data_wo[...] = icomm.bcast(m_a_local, root=0) else: dst.dat.data_wo[...] = m_a_local.reshape(dst.dat.data_wo.shape) offset += dst.dat.dataset.layout_vec.size diff --git a/firedrake/adjoint_utils/mesh.py b/firedrake/adjoint_utils/mesh.py index 22de2f239b..b60f9bc9b1 100644 --- a/firedrake/adjoint_utils/mesh.py +++ b/firedrake/adjoint_utils/mesh.py @@ -8,9 +8,24 @@ class MeshGeometryMixin(OverloadedType): def _ad_annotate_init(init): @wraps(init) def wrapper(self, *args, **kwargs): + from firedrake.mesh import ExtrudedMeshTopology + from .blocks import MeshInputBlock, MeshOutputBlock + OverloadedType.__init__(self, *args, **kwargs) init(self, *args, **kwargs) self._ad_coordinate_space = None + + # attach information to the mesh coordinates, this does not work for + # meshes with multiple cell types + if not isinstance(self.topology, ExtrudedMeshTopology) and len(self.topology.dm_cell_types) == 1: + f = self._coordinates_function + f.block_class = MeshInputBlock + f._ad_floating_active = True + f._ad_args = [self] + + f._ad_output_args = [self] + f.output_block_class = MeshOutputBlock + f._ad_outputs = [self] return wrapper @no_annotations @@ -22,22 +37,6 @@ def _ad_restore_at_checkpoint(self, checkpoint): self.coordinates.assign(checkpoint) return self - @staticmethod - def _ad_annotate_coordinates_function(coordinates_function): - @wraps(coordinates_function) - def wrapper(self, *args, **kwargs): - from .blocks import MeshInputBlock, MeshOutputBlock - f = coordinates_function(self) - f.block_class = MeshInputBlock - f._ad_floating_active = True - f._ad_args = [self] - - f._ad_output_args = [self] - f.output_block_class = MeshOutputBlock - f._ad_outputs = [self] - return f - return wrapper - def _ad_function_space(self): if self._ad_coordinate_space is None: self._ad_coordinate_space = self.coordinates.function_space().ufl_function_space() diff --git a/firedrake/assemble.py b/firedrake/assemble.py index 5071160369..6340502cc4 100644 --- a/firedrake/assemble.py +++ b/firedrake/assemble.py @@ -19,18 +19,20 @@ from firedrake import (extrusion_utils as eutils, matrix, parameters, solving, tsfc_interface, utils) from firedrake.adjoint_utils import annotate_assemble -from firedrake.ufl_expr import extract_unique_domain +from firedrake.ufl_expr import extract_domains from firedrake.bcs import DirichletBC, EquationBC, EquationBCSplit from firedrake.functionspaceimpl import WithGeometry, FunctionSpace, FiredrakeDualSpace from firedrake.functionspacedata import entity_dofs_key, entity_permutations_key +from firedrake.interpolation import get_interpolator from firedrake.petsc import PETSc from firedrake.slate import slac, slate from firedrake.slate.slac.kernel_builder import CellFacetKernelArg, LayerCountKernelArg from firedrake.utils import ScalarType, assert_empty, tuplify from pyop2 import op2 from pyop2.exceptions import MapValueError, SparsityFormatError +from functools import cached_property + from pyop2.types.mat import _GlobalMatPayload, _DatMatPayload -from pyop2.utils import cached_property __all__ = "assemble", @@ -364,7 +366,9 @@ def allocate(self): else: test, trial = self._form.arguments() sparsity = ExplicitMatrixAssembler._make_sparsity(test, trial, self._mat_type, self._sub_mat_type, self.maps_and_regions) - return matrix.Matrix(self._form, self._bcs, self._mat_type, sparsity, ScalarType, options_prefix=self._options_prefix) + return matrix.Matrix(self._form, self._bcs, self._mat_type, sparsity, ScalarType, + sub_mat_type=self._sub_mat_type, + options_prefix=self._options_prefix) else: raise NotImplementedError("Only implemented for rank = 2 and diagonal = False") @@ -500,6 +504,14 @@ def base_form_assembly_visitor(self, expr, tensor, bcs, *args): with lhs.dat.vec_ro as x, rhs.dat.vec_ro as y: res = x.dot(y) return res + elif isinstance(rhs, matrix.MatrixBase): + # Compute action(Cofunc, Mat) => Mat^* @ Cofunc + petsc_mat = rhs.petscmat + (_, col) = rhs.arguments() + res = tensor if tensor else firedrake.Function(col.function_space().dual()) + with lhs.dat.vec_ro as v_vec, res.dat.vec as res_vec: + petsc_mat.multHermitian(v_vec, res_vec) + return res else: raise TypeError("Incompatible RHS for Action.") else: @@ -611,17 +623,8 @@ def base_form_assembly_visitor(self, expr, tensor, bcs, *args): rank = len(expr.arguments()) if rank > 2: raise ValueError("Cannot assemble an Interpolate with more than two arguments") - # Get the target space - V = v.function_space().dual() - - # Get the interpolator - interp_data = expr.interp_data.copy() - default_missing_val = interp_data.pop('default_missing_val', None) - if rank == 1 and isinstance(tensor, firedrake.Function): - V = tensor - interpolator = firedrake.Interpolator(expr, V, bcs=bcs, **interp_data) - # Assembly - return interpolator.assemble(tensor=tensor, default_missing_val=default_missing_val) + interpolator = get_interpolator(expr) + return interpolator.assemble(tensor=tensor, bcs=bcs, mat_type=self._mat_type, sub_mat_type=self._sub_mat_type) elif tensor and isinstance(expr, (firedrake.Function, firedrake.Cofunction, firedrake.MatrixBase)): return tensor.assign(expr) elif tensor and isinstance(expr, ufl.ZeroBaseForm): @@ -836,6 +839,12 @@ def restructure_base_form(expr, visited=None): # Replace arguments return ufl.replace(right, replace_map) + # Action(Adjoint(A), w*) -> Action(w*, A) + if isinstance(left, ufl.Adjoint) and not isinstance(right, firedrake.Function) and is_rank_1(right): + # TODO: ufl.action(Coefficient, Form) currently fails. When it is fixed, we can remove the + # `not isinstance(right, firedrake.Function)` check. + return ufl.action(right, left.form()) + # -- Case (4) -- # if isinstance(expr, ufl.Adjoint) and isinstance(expr.form(), ufl.core.base_form_operator.BaseFormOperator): B = expr.form() @@ -861,6 +870,15 @@ def restructure_base_form(expr, visited=None): if isinstance(expr, ufl.FormSum) and all(ufl.duals.is_dual(a.function_space()) for a in expr.arguments()): # Return ufl.Sum if we are assembling a FormSum with Coarguments (a primal expression) return sum(w*c for w, c in zip(expr.weights(), expr.components())) + + # If F: V3 x V2 -> R, then + # Interpolate(TestFunction(V1), F) <=> Action(Interpolate(TestFunction(V1), TrialFunction(V2.dual())), F). + # The result is a two-form V3 x V1 -> R. + if isinstance(expr, ufl.Interpolate) and isinstance(expr.argument_slots()[0], ufl.form.Form) and len(expr.argument_slots()[0].arguments()) == 2: + form, operand = expr.argument_slots() + vstar = firedrake.Argument(form.arguments()[0].function_space().dual(), 1) + expr = expr._ufl_expr_reconstruct_(operand, v=vstar) + return ufl.action(expr, form) return expr @staticmethod @@ -1069,7 +1087,7 @@ def parloops(self, tensor): self._bcs, local_kernel, subdomain_id, - self.all_integer_subdomain_ids[local_kernel.indices], + self.all_integer_subdomain_ids[local_kernel.indices][local_kernel.kinfo.domain_number], diagonal=self.diagonal, ) pyop2_tensor = self._as_pyop2_type(tensor, local_kernel.indices) @@ -1090,16 +1108,6 @@ def local_kernels(self): each possible combination. """ - try: - topology, = set(d.topology for d in self._form.ufl_domains()) - except ValueError: - raise NotImplementedError("All integration domains must share a mesh topology") - - for o in itertools.chain(self._form.arguments(), self._form.coefficients()): - domain = extract_unique_domain(o) - if domain is not None and domain.topology != topology: - raise NotImplementedError("Assembly with multiple meshes is not supported") - if isinstance(self._form, ufl.Form): kernels = tsfc_interface.compile_form( self._form, "form", diagonal=self.diagonal, @@ -1164,13 +1172,13 @@ def __init__(self, form, form_compiler_parameters=None): def allocate(self): # Getting the comm attribute of a form isn't straightforward - # form.ufl_domains()[0]._comm seems the most robust method + # form.ufl_domains()[0].comm seems the most robust method # revisit in a refactor return op2.Global( 1, [0.0], dtype=utils.ScalarType, - comm=self._form.ufl_domains()[0]._comm + comm=self._form.ufl_domains()[0].comm ) def _apply_bc(self, tensor, bc, u=None): @@ -1326,12 +1334,12 @@ def _get_mat_type(mat_type, sub_mat_type, arguments): for arg in arguments for V in arg.function_space()): mat_type = "nest" - if mat_type not in {"matfree", "aij", "baij", "nest", "dense"}: + if mat_type not in {"matfree", "aij", "baij", "nest", "dense", "is"}: raise ValueError(f"Unrecognised matrix type, '{mat_type}'") if sub_mat_type is None: sub_mat_type = parameters.parameters["default_sub_matrix_type"] - if sub_mat_type not in {"aij", "baij"}: - raise ValueError(f"Invalid submatrix type, '{sub_mat_type}' (not 'aij' or 'baij')") + if sub_mat_type not in {"aij", "baij", "is"}: + raise ValueError(f"Invalid submatrix type, '{sub_mat_type}' (not 'aij', 'baij', or 'is')") return mat_type, sub_mat_type @@ -1375,6 +1383,7 @@ def allocate(self): self._sub_mat_type, self._make_maps_and_regions()) return matrix.Matrix(self._form, self._bcs, self._mat_type, sparsity, ScalarType, + sub_mat_type=self._sub_mat_type, options_prefix=self._options_prefix, fc_params=self._form_compiler_params) @@ -1413,12 +1422,12 @@ def _make_maps_and_regions(self): else: maps_and_regions = defaultdict(lambda: defaultdict(set)) for assembler in self._all_assemblers: - all_meshes = assembler._form.ufl_domains() + all_meshes = extract_domains(assembler._form) for local_kernel, subdomain_id in assembler.local_kernels: i, j = local_kernel.indices mesh = all_meshes[local_kernel.kinfo.domain_number] # integration domain integral_type = local_kernel.kinfo.integral_type - all_subdomain_ids = assembler.all_integer_subdomain_ids[local_kernel.indices] + all_subdomain_ids = assembler.all_integer_subdomain_ids[local_kernel.indices][local_kernel.kinfo.domain_number] # Make Sparsity independent of the subdomain of integration for better reusability; # subdomain_id is passed here only to determine the integration_type on the target domain # (see ``entity_node_map``). @@ -1492,8 +1501,10 @@ def _apply_bc(self, tensor, bc, u=None): # Set diagonal entries on bc nodes to 1 if the current # block is on the matrix diagonal and its index matches the # index of the function space the bc is defined on. + if op2tensor.handle.getType() == "is": + # Flag the entire matrix as assembled before indexing the diagonal block + op2tensor.handle.assemble() op2tensor[index, index].set_local_diagonal_entries(bc.nodes, idx=component, diag_val=self.weight) - # Handle off-diagonal block involving real function space. # "lgmaps" is correctly constructed in _matrix_arg, but # is ignored by PyOP2 in this case. @@ -1595,6 +1606,10 @@ def _global_kernel_cache_key(form, local_knl, subdomain_id, all_integer_subdomai # N.B. Generating the global kernel is not a collective operation so the # communicator does not need to be a part of this cache key. + # Maps in the cached global kernel depend on concrete mesh data. + all_meshes = extract_domains(form) + domain_ids = tuple(mesh.ufl_id() for mesh in all_meshes) + if isinstance(form, ufl.Form): sig = form.signature() elif isinstance(form, slate.TensorBase): @@ -1614,7 +1629,8 @@ def _global_kernel_cache_key(form, local_knl, subdomain_id, all_integer_subdomai else: subdomain_key.append((k, i)) - return ((sig, subdomain_id) + return (domain_ids + + (sig, subdomain_id) + tuple(subdomain_key) + tuplify(all_integer_subdomain_ids) + cachetools.keys.hashkey(local_knl, **kwargs)) @@ -1651,8 +1667,16 @@ def __init__(self, form, local_knl, subdomain_id, all_integer_subdomain_ids, dia self._diagonal = diagonal self._unroll = unroll + self._active_coordinates = _FormHandler.iter_active_coordinates(form, local_knl.kinfo) + self._active_cell_orientations = _FormHandler.iter_active_cell_orientations(form, local_knl.kinfo) + self._active_cell_sizes = _FormHandler.iter_active_cell_sizes(form, local_knl.kinfo) self._active_coefficients = _FormHandler.iter_active_coefficients(form, local_knl.kinfo) self._constants = _FormHandler.iter_constants(form, local_knl.kinfo) + self._active_exterior_facets = _FormHandler.iter_active_exterior_facets(form, local_knl.kinfo) + self._active_interior_facets = _FormHandler.iter_active_interior_facets(form, local_knl.kinfo) + self._active_orientations_cell = _FormHandler.iter_active_orientations_cell(form, local_knl.kinfo) + self._active_orientations_exterior_facet = _FormHandler.iter_active_orientations_exterior_facet(form, local_knl.kinfo) + self._active_orientations_interior_facet = _FormHandler.iter_active_orientations_interior_facet(form, local_knl.kinfo) self._map_arg_cache = {} # Cache for holding :class:`op2.MapKernelArg` instances. @@ -1666,8 +1690,16 @@ def build(self): for arg in self._kinfo.arguments] # we should use up all of the coefficients and constants + assert_empty(self._active_coordinates) + assert_empty(self._active_cell_orientations) + assert_empty(self._active_cell_sizes) assert_empty(self._active_coefficients) assert_empty(self._constants) + assert_empty(self._active_exterior_facets) + assert_empty(self._active_interior_facets) + assert_empty(self._active_orientations_cell) + assert_empty(self._active_orientations_exterior_facet) + assert_empty(self._active_orientations_interior_facet) iteration_regions = {"exterior_facet_top": op2.ON_TOP, "exterior_facet_bottom": op2.ON_BOTTOM, @@ -1692,7 +1724,8 @@ def _integral_type(self): @cached_property def _mesh(self): - return self._form.ufl_domains()[self._kinfo.domain_number] + all_meshes = extract_domains(self._form) + return all_meshes[self._kinfo.domain_number] @cached_property def _needs_subset(self): @@ -1797,7 +1830,22 @@ def _as_global_kernel_arg_output(_, self): @_as_global_kernel_arg.register(kernel_args.CoordinatesKernelArg) def _as_global_kernel_arg_coordinates(_, self): - V = self._mesh.coordinates.function_space() + coord = next(self._active_coordinates) + V = coord.function_space() + return self._make_dat_global_kernel_arg(V) + + +@_as_global_kernel_arg.register(kernel_args.CellOrientationsKernelArg) +def _as_global_kernel_arg_cell_orientations(_, self): + c = next(self._active_cell_orientations) + V = c.function_space() + return self._make_dat_global_kernel_arg(V) + + +@_as_global_kernel_arg.register(kernel_args.CellSizesKernelArg) +def _as_global_kernel_arg_cell_sizes(_, self): + c = next(self._active_cell_sizes) + V = c.function_space() return self._make_dat_global_kernel_arg(V) @@ -1811,9 +1859,12 @@ def _as_global_kernel_arg_coefficient(_, self): else: index = None - ufl_element = V.ufl_element() - if ufl_element.family() == "Real": - return op2.GlobalKernelArg((V.value_size,)) + if V.ufl_element().family() == "Real": + # Interior facet integrals double Real coefficients for the + # two sides of the facet, matching the TSFC-generated kernel. + return op2.GlobalKernelArg( + (V.value_size,), double=self._integral_type.startswith("interior_facet") + ) else: return self._make_dat_global_kernel_arg(V, index=index) @@ -1825,47 +1876,70 @@ def _as_global_kernel_arg_constant(_, self): return op2.GlobalKernelArg((value_size,)) -@_as_global_kernel_arg.register(kernel_args.CellSizesKernelArg) -def _as_global_kernel_arg_cell_sizes(_, self): - V = self._mesh.cell_sizes.function_space() - return self._make_dat_global_kernel_arg(V) - - @_as_global_kernel_arg.register(kernel_args.ExteriorFacetKernelArg) def _as_global_kernel_arg_exterior_facet(_, self): - return op2.DatKernelArg((1,)) + mesh = next(self._active_exterior_facets) + if mesh is self._mesh: + return op2.DatKernelArg((1,)) + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "exterior_facet" + return op2.DatKernelArg((1,), m._global_kernel_arg) @_as_global_kernel_arg.register(kernel_args.InteriorFacetKernelArg) def _as_global_kernel_arg_interior_facet(_, self): - return op2.DatKernelArg((2,)) + mesh = next(self._active_interior_facets) + if mesh is self._mesh: + return op2.DatKernelArg((2,)) + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "interior_facet" + return op2.DatKernelArg((2,), m._global_kernel_arg) + + +@_as_global_kernel_arg.register(kernel_args.OrientationsCellKernelArg) +def _(_, self): + mesh = next(self._active_orientations_cell) + if mesh is self._mesh: + return op2.DatKernelArg((1,)) + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "cell" + return op2.DatKernelArg((1,), m._global_kernel_arg) -@_as_global_kernel_arg.register(kernel_args.ExteriorFacetOrientationKernelArg) -def _as_global_kernel_arg_exterior_facet_orientation(_, self): - return op2.DatKernelArg((1,)) +@_as_global_kernel_arg.register(kernel_args.OrientationsExteriorFacetKernelArg) +def _(_, self): + mesh = next(self._active_orientations_exterior_facet) + if mesh is self._mesh: + return op2.DatKernelArg((1,)) + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "exterior_facet" + return op2.DatKernelArg((1,), m._global_kernel_arg) -@_as_global_kernel_arg.register(kernel_args.InteriorFacetOrientationKernelArg) -def _as_global_kernel_arg_interior_facet_orientation(_, self): - return op2.DatKernelArg((2,)) +@_as_global_kernel_arg.register(kernel_args.OrientationsInteriorFacetKernelArg) +def _(_, self): + mesh = next(self._active_orientations_interior_facet) + if mesh is self._mesh: + return op2.DatKernelArg((2,)) + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "interior_facet" + return op2.DatKernelArg((2,), m._global_kernel_arg) @_as_global_kernel_arg.register(CellFacetKernelArg) def _as_global_kernel_arg_cell_facet(_, self): if self._mesh.extruded: - num_facets = self._mesh._base_mesh.ufl_cell().num_facets() + num_facets = self._mesh._base_mesh.ufl_cell().num_facets else: - num_facets = self._mesh.ufl_cell().num_facets() + num_facets = self._mesh.ufl_cell().num_facets return op2.DatKernelArg((num_facets, 2)) -@_as_global_kernel_arg.register(kernel_args.CellOrientationsKernelArg) -def _as_global_kernel_arg_cell_orientations(_, self): - V = self._mesh.cell_orientations().function_space() - return self._make_dat_global_kernel_arg(V) - - @_as_global_kernel_arg.register(LayerCountKernelArg) def _as_global_kernel_arg_layer_count(_, self): return op2.GlobalKernelArg((1,)) @@ -1899,8 +1973,16 @@ def __init__(self, form, bcs, local_knl, subdomain_id, self._diagonal = diagonal self._bcs = bcs + self._active_coordinates = _FormHandler.iter_active_coordinates(form, local_knl.kinfo) + self._active_cell_orientations = _FormHandler.iter_active_cell_orientations(form, local_knl.kinfo) + self._active_cell_sizes = _FormHandler.iter_active_cell_sizes(form, local_knl.kinfo) self._active_coefficients = _FormHandler.iter_active_coefficients(form, local_knl.kinfo) self._constants = _FormHandler.iter_constants(form, local_knl.kinfo) + self._active_exterior_facets = _FormHandler.iter_active_exterior_facets(form, local_knl.kinfo) + self._active_interior_facets = _FormHandler.iter_active_interior_facets(form, local_knl.kinfo) + self._active_orientations_cell = _FormHandler.iter_active_orientations_cell(form, local_knl.kinfo) + self._active_orientations_exterior_facet = _FormHandler.iter_active_orientations_exterior_facet(form, local_knl.kinfo) + self._active_orientations_interior_facet = _FormHandler.iter_active_orientations_interior_facet(form, local_knl.kinfo) def build(self, tensor: op2.Global | op2.Dat | op2.Mat) -> op2.Parloop: """Construct the parloop. @@ -2001,16 +2083,18 @@ def collect_lgmaps(self): row_bcs, col_bcs = self._filter_bcs(i, j) # the tensor is already indexed rlgmap, clgmap = self._tensor.local_to_global_maps - rlgmap = self.test_function_space[i].local_to_global_map(row_bcs, rlgmap) - clgmap = self.trial_function_space[j].local_to_global_map(col_bcs, clgmap) + mat_type = self._tensor.handle.getType() + rlgmap = self.test_function_space[i].local_to_global_map(row_bcs, rlgmap, mat_type=mat_type) + clgmap = self.trial_function_space[j].local_to_global_map(col_bcs, clgmap, mat_type=mat_type) return ((rlgmap, clgmap),) else: lgmaps = [] for i, j in self.get_indicess(): row_bcs, col_bcs = self._filter_bcs(i, j) rlgmap, clgmap = self._tensor[i, j].local_to_global_maps - rlgmap = self.test_function_space[i].local_to_global_map(row_bcs, rlgmap) - clgmap = self.trial_function_space[j].local_to_global_map(col_bcs, clgmap) + mat_type = self._tensor[i, j].handle.getType() + rlgmap = self.test_function_space[i].local_to_global_map(row_bcs, rlgmap, mat_type=mat_type) + clgmap = self.trial_function_space[j].local_to_global_map(col_bcs, clgmap, mat_type=mat_type) lgmaps.append((rlgmap, clgmap)) return tuple(lgmaps) else: @@ -2034,7 +2118,8 @@ def _indexed_function_spaces(self): @cached_property def _mesh(self): - return self._form.ufl_domains()[self._kinfo.domain_number] + all_meshes = extract_domains(self._form) + return all_meshes[self._kinfo.domain_number] @cached_property def _iterset(self): @@ -2106,7 +2191,21 @@ def _as_parloop_arg_output(_, self): @_as_parloop_arg.register(kernel_args.CoordinatesKernelArg) def _as_parloop_arg_coordinates(_, self): - func = self._mesh.coordinates + func = next(self._active_coordinates) + map_ = self._get_map(func.function_space()) + return op2.DatParloopArg(func.dat, map_) + + +@_as_parloop_arg.register(kernel_args.CellOrientationsKernelArg) +def _as_parloop_arg_cell_orientations(_, self): + func = next(self._active_cell_orientations) + map_ = self._get_map(func.function_space()) + return op2.DatParloopArg(func.dat, map_) + + +@_as_parloop_arg.register(kernel_args.CellSizesKernelArg) +def _as_parloop_arg_cell_sizes(_, self): + func = next(self._active_cell_sizes) map_ = self._get_map(func.function_space()) return op2.DatParloopArg(func.dat, map_) @@ -2127,38 +2226,59 @@ def _as_parloop_arg_constant(arg, self): return op2.GlobalParloopArg(const.dat) -@_as_parloop_arg.register(kernel_args.CellOrientationsKernelArg) -def _as_parloop_arg_cell_orientations(_, self): - func = self._mesh.cell_orientations() - m = self._get_map(func.function_space()) - return op2.DatParloopArg(func.dat, m) - - -@_as_parloop_arg.register(kernel_args.CellSizesKernelArg) -def _as_parloop_arg_cell_sizes(_, self): - func = self._mesh.cell_sizes - m = self._get_map(func.function_space()) - return op2.DatParloopArg(func.dat, m) - - @_as_parloop_arg.register(kernel_args.ExteriorFacetKernelArg) def _as_parloop_arg_exterior_facet(_, self): - return op2.DatParloopArg(self._mesh.exterior_facets.local_facet_dat) + mesh = next(self._active_exterior_facets) + if mesh is self._mesh: + m = None + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "exterior_facet" + return op2.DatParloopArg(mesh.exterior_facets.local_facet_dat, m) @_as_parloop_arg.register(kernel_args.InteriorFacetKernelArg) def _as_parloop_arg_interior_facet(_, self): - return op2.DatParloopArg(self._mesh.interior_facets.local_facet_dat) + mesh = next(self._active_interior_facets) + if mesh is self._mesh: + m = None + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "interior_facet" + return op2.DatParloopArg(mesh.interior_facets.local_facet_dat, m) + +@_as_parloop_arg.register(kernel_args.OrientationsCellKernelArg) +def _(_, self): + mesh = next(self._active_orientations_cell) + if mesh is self._mesh: + m = None + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "cell" + return op2.DatParloopArg(mesh.local_cell_orientation_dat, m) -@_as_parloop_arg.register(kernel_args.ExteriorFacetOrientationKernelArg) -def _as_parloop_arg_exterior_facet_orientation(_, self): - return op2.DatParloopArg(self._mesh.exterior_facets.local_facet_orientation_dat) +@_as_parloop_arg.register(kernel_args.OrientationsExteriorFacetKernelArg) +def _(_, self): + mesh = next(self._active_orientations_exterior_facet) + if mesh is self._mesh: + m = None + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "exterior_facet" + return op2.DatParloopArg(mesh.exterior_facets.local_facet_orientation_dat, m) -@_as_parloop_arg.register(kernel_args.InteriorFacetOrientationKernelArg) -def _as_parloop_arg_interior_facet_orientation(_, self): - return op2.DatParloopArg(self._mesh.interior_facets.local_facet_orientation_dat) + +@_as_parloop_arg.register(kernel_args.OrientationsInteriorFacetKernelArg) +def _(_, self): + mesh = next(self._active_orientations_interior_facet) + if mesh is self._mesh: + m = None + else: + m, integral_type = mesh.topology.trans_mesh_entity_map(self._mesh.topology, self._integral_type, self._subdomain_id, self._all_integer_subdomain_ids) + assert integral_type == "interior_facet" + return op2.DatParloopArg(mesh.interior_facets.local_facet_orientation_dat, m) @_as_parloop_arg.register(CellFacetKernelArg) @@ -2180,6 +2300,27 @@ def _as_parloop_arg_layer_count(_, self): class _FormHandler: """Utility class for inspecting forms and local kernels.""" + @staticmethod + def iter_active_coordinates(form, kinfo): + """Yield the form coordinates referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.coordinates: + yield all_meshes[i].coordinates + + @staticmethod + def iter_active_cell_orientations(form, kinfo): + """Yield the form cell orientations referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.cell_orientations: + yield all_meshes[i].cell_orientations() + + @staticmethod + def iter_active_cell_sizes(form, kinfo): + """Yield the form cell sizes referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.cell_sizes: + yield all_meshes[i].cell_sizes + @staticmethod def iter_active_coefficients(form, kinfo): """Yield the form coefficients referenced in ``kinfo``.""" @@ -2198,6 +2339,46 @@ def iter_constants(form, kinfo): for constant_index in kinfo.constant_numbers: yield all_constants[constant_index] + @staticmethod + def iter_active_exterior_facets(form, kinfo): + """Yield the form exterior facets referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.exterior_facets: + mesh = all_meshes[i] + yield mesh + + @staticmethod + def iter_active_interior_facets(form, kinfo): + """Yield the form interior facets referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.interior_facets: + mesh = all_meshes[i] + yield mesh + + @staticmethod + def iter_active_orientations_cell(form, kinfo): + """Yield the form cell orientations referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.orientations_cell: + mesh = all_meshes[i] + yield mesh + + @staticmethod + def iter_active_orientations_exterior_facet(form, kinfo): + """Yield the form exterior facet orientations referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.orientations_exterior_facet: + mesh = all_meshes[i] + yield mesh + + @staticmethod + def iter_active_orientations_interior_facet(form, kinfo): + """Yield the form interior facet orientations referenced in ``kinfo``.""" + all_meshes = extract_domains(form) + for i in kinfo.active_domain_numbers.orientations_interior_facet: + mesh = all_meshes[i] + yield mesh + @staticmethod def index_function_spaces(form, indices): """Return the function spaces of the form's arguments, indexed diff --git a/firedrake/assign.py b/firedrake/assign.py index 1617d6e4d9..065fd90ff8 100644 --- a/firedrake/assign.py +++ b/firedrake/assign.py @@ -2,8 +2,10 @@ import operator import numpy as np +from functools import cached_property + from pyadjoint.tape import annotate_tape -from pyop2.utils import cached_property +from pyop2 import op2 import pytools import finat.ufl from ufl.algorithms import extract_coefficients @@ -12,19 +14,22 @@ from ufl.corealg.multifunction import MultiFunction from ufl.domain import extract_unique_domain +from firedrake.cofunction import Cofunction from firedrake.constant import Constant from firedrake.function import Function from firedrake.petsc import PETSc from firedrake.utils import ScalarType, split_by +from mpi4py import MPI + def _isconstant(expr): return isinstance(expr, Constant) or \ - (isinstance(expr, Function) and expr.ufl_element().family() == "Real") + (isinstance(expr, (Function, Cofunction)) and expr.ufl_element().family() == "Real") def _isfunction(expr): - return isinstance(expr, Function) and expr.ufl_element().family() != "Real" + return isinstance(expr, (Function, Cofunction)) and expr.ufl_element().family() != "Real" class CoefficientCollector(MultiFunction): @@ -99,6 +104,9 @@ def component_tensor(self, o, a, _): def coefficient(self, o): return ((o, 1),) + def cofunction(self, o): + return ((o, 1),) + def constant_value(self, o): return ((o, 1),) @@ -130,11 +138,17 @@ def _as_scalar(self, weighted_coefficients): class Assigner: - """Class performing pointwise assignment of an expression to a :class:`firedrake.function.Function`. + """Class performing pointwise assignment of an expression to a function or a cofunction. + + Parameters + ---------- + assignee : firedrake.function.Function or firedrake.cofunction.Cofunction + Function or Cofunction being assigned to. + expression : ufl.core.expr.Expr or ufl.form.BaseForm + Expression to be assigned. + subset : pyop2.types.set.Set or pyop2.types.set.Subset or pyop2.types.set.MixedSet + Subset to apply the assignment over. - :param assignee: The :class:`~.firedrake.function.Function` being assigned to. - :param expression: The :class:`ufl.core.expr.Expr` to evaluate. - :param subset: Optional subset (:class:`pyop2.types.set.Subset`) to apply the assignment over. """ symbol = "=" @@ -142,22 +156,40 @@ class Assigner: def __init__(self, assignee, expression, subset=None): expression = as_ufl(expression) - + source_meshes = set() for coeff in extract_coefficients(expression): - if isinstance(coeff, Function) and coeff.ufl_element().family() != "Real": + if isinstance(coeff, (Function, Cofunction)) and coeff.ufl_element().family() != "Real": if coeff.ufl_element() != assignee.ufl_element(): raise ValueError("All functions in the expression must have the same " "element as the assignee") - if extract_unique_domain(coeff) != extract_unique_domain(assignee): - raise ValueError("All functions in the expression must use the same " - "mesh as the assignee") - - if (subset and type(assignee.ufl_element()) == finat.ufl.MixedElement - and any(el.family() == "Real" - for el in assignee.ufl_element().sub_elements)): - raise ValueError("Subset is not a valid argument for assigning to a mixed " - "element including a real element") - + source_meshes.add(extract_unique_domain(coeff, expand_mesh_sequence=False)) + if len(source_meshes) == 0: + pass + elif len(source_meshes) == 1: + target_mesh = extract_unique_domain(assignee, expand_mesh_sequence=False) + source_mesh, = source_meshes + if target_mesh == source_mesh: + pass + elif target_mesh.submesh_youngest_common_ancestor(source_mesh) is None: + raise ValueError( + "All functions in the expression must be defined on a single domain " + "that is in the same submesh family as domain of the assignee" + ) + else: + raise ValueError( + "All functions in the expression must be defined on a single domain" + ) + if subset is None: + subset = tuple(None for _ in assignee.function_space()) + if len(subset) != len(assignee.function_space()): + raise ValueError(f"Provided subset ({subset}) incompatible with assignee ({assignee})") + if type(assignee.ufl_element()) == finat.ufl.MixedElement: + for subs, el in zip(subset, assignee.function_space().ufl_element().sub_elements): + if subs is not None and el.family() == "Real": + raise ValueError( + "Subset is not a valid argument for assigning to a mixed " + "element including a real element" + ) self._assignee = assignee self._expression = expression self._subset = subset @@ -169,14 +201,21 @@ def __repr__(self): return f"{self.__class__.__name__}({self._assignee!r}, {self._expression!r})" @PETSc.Log.EventDecorator() - def assign(self): - """Perform the assignment.""" + def assign(self, allow_missing_dofs=False): + """Perform the assignment. + + Parameters + ---------- + allow_missing_dofs : bool + Permit assignment between objects with mismatching nodes. If `True` then + assignee nodes with no matching assigner nodes are ignored. + + """ if annotate_tape(): raise NotImplementedError( "Taping with explicit Assigner objects is not supported yet. " "Use Function.assign instead." ) - # To minimize communication during assignment we perform a number of tricks: # * If we are not assigning to a subset then we can always write to the # halo. The validity of the original assignee dat halo does not matter @@ -191,28 +230,131 @@ def assign(self): # end up doing a lot of halo exchanges for the expression just to avoid # a single halo exchange for the assignee. # * If we do write to the halo then the resulting halo will never be dirty. - - func_halos_valid = all(f.dat.halo_valid for f in self._functions) - assign_to_halos = ( - func_halos_valid and (not self._subset or self._assignee.dat.halo_valid)) - + # If mixed, loop over individual components + for lhs_func, subset, *funcs in zip(self._assignee.subfunctions, self._subset, *(f.subfunctions for f in self._functions)): + target_mesh = extract_unique_domain(lhs_func) + target_V = lhs_func.function_space() + # Validate / Process subset. + if subset is not None: + if subset is target_V.node_set: + # The whole set. + subset = None + elif subset.superset is target_V.node_set: + # op2.Subset of target_V.node_set + pass + else: + raise ValueError(f"subset ({subset}) not a subset of target_V.node_set ({target_V.node_set})") + source_meshes = set(extract_unique_domain(f) for f in funcs) + if len(source_meshes) == 0: + # Assign constants only. + single_mesh_assign = True + elif len(source_meshes) == 1: + source_mesh, = source_meshes + if target_mesh is source_mesh: + # Assign (co)functions from one mesh to the same mesh. + single_mesh_assign = True + else: + # Assign (co)functions between a submesh and the parent or between two submeshes. + single_mesh_assign = False + else: + raise ValueError("All functions in the expression must be defined on a single domain") + if single_mesh_assign: + self._assign_single_mesh(lhs_func, subset, funcs, operator) + else: + self._assign_multi_mesh(lhs_func, subset, funcs, operator, allow_missing_dofs) + + def _assign_single_mesh(self, lhs_func, subset, funcs, operator): + assign_to_halos = all(f.dat.halo_valid for f in funcs) and (lhs_func.dat.halo_valid or subset is None) if assign_to_halos: - subset_indices = self._subset.indices if self._subset else ... + indices = operator.attrgetter("indices") data_ro = operator.attrgetter("data_ro_with_halos") + values = operator.attrgetter("values_with_halo") else: - subset_indices = self._subset.owned_indices if self._subset else ... + indices = operator.attrgetter("owned_indices") data_ro = operator.attrgetter("data_ro") - - # If mixed, loop over individual components - for lhs_dat, *func_dats in zip(self._assignee.dat.split, - *(f.dat.split for f in self._functions)): - func_data = np.array([data_ro(f)[subset_indices] for f in func_dats]) - rvalue = self._compute_rvalue(func_data) - self._assign_single_dat(lhs_dat, subset_indices, rvalue, assign_to_halos) - - # if we have bothered writing to halo it naturally must not be dirty + values = operator.attrgetter("values") + subset_indices = Ellipsis if subset is None else indices(subset) + + def source_indices(f): + target_space = lhs_func.function_space() + target_map = target_space.cell_node_map() + source_map = f.function_space().cell_node_map() + if source_map is target_map: + # Source and target spaces have the same DoF ordering. + return subset_indices + else: + # Permute source indices into the target ordering. + size = target_space.dof_dset.total_size + perm = np.empty((size,), dtype=source_map.values.dtype) + np.put(perm, values(target_map), values(source_map)) + if not assign_to_halos: + perm = perm[:target_space.dof_dset.size] + return perm[subset_indices] + + func_data = np.array([data_ro(f.dat)[source_indices(f)] for f in funcs]) + rvalue = self._compute_rvalue(func_data) + self._assign_single_dat(lhs_func.dat, subset_indices, rvalue, assign_to_halos) if assign_to_halos: - self._assignee.dat.halo_valid = True + lhs_func.dat.halo_valid = True + + def _assign_multi_mesh(self, lhs_func, subset, funcs, operator, allow_missing_dofs): + target_mesh = extract_unique_domain(lhs_func) + target_V = lhs_func.function_space() + source_V, = set(f.function_space() for f in funcs) + composed_map = source_V.topological.entity_node_map(target_mesh.topology, "cell", "everywhere", None) + indices_active = composed_map.indices_active_with_halo + indices_active_all = indices_active.all() + indices_active_all = target_mesh.comm.allreduce(indices_active_all, op=MPI.LAND) + if subset is None: + if not indices_active_all and not allow_missing_dofs: + raise ValueError("Found assignee nodes with no matching assigner nodes: run with `allow_missing_dofs=True`") + subset_indices_target = target_V.cell_node_map().values_with_halo[indices_active, :].flatten() + subset_indices_source = composed_map.values_with_halo[indices_active, :].flatten() + else: + subset_indices_target, perm, _ = np.intersect1d( + target_V.cell_node_map().values_with_halo[indices_active, :].flatten(), + subset.indices, + return_indices=True, + ) + if len(subset.indices) > len(subset_indices_target) and not allow_missing_dofs: + raise ValueError("Found assignee nodes with no matching assigner nodes: run with `allow_missing_dofs=True`") + subset_indices_source = composed_map.values_with_halo[indices_active, :].flatten()[perm] + # Use buffer array to make sure that owned DoFs are updated upon assigning. + # The following example illustrates the issue that a naive assignment would cause. + # + # Consider the following target/source meshes distributed over 2 processes + # with no partition overlap: + # + # 0----0----0----1----1 + # | | | + # target 0 0 0 1 1 + # (parent mesh) | | | + # 0----0----0----1----1 (owning ranks are shown) + # + # 1----1----1 + # | | + # source 1 1 1 + # (submesh) | | + # 1----1----1 (owning ranks are shown) + # + # Consider CG1 functions f (on parent) and fsub (on submesh). By a naive + # f.assign(fsub, subset=...), the DoFs shared by rank 0 and rank 1 would + # only be updated on rank 1, which sees those DoFs as ghost, and those + # updated values on rank 1 would be overridden by the old values on rank 0 + # upon a halo exchange. + # + # TODO: Use work array for buffer? + buffer = type(lhs_func)(target_V) + finfo = np.finfo(lhs_func.dat.dtype) + buffer.dat._data[:] = finfo.max + func_data = np.array([f.dat.data_ro_with_halos[subset_indices_source] for f in funcs]) + rvalue = self._compute_rvalue(func_data) + self._assign_single_dat(buffer.dat, subset_indices_target, rvalue, True) + # Make all owned DoFs up-to-date; ghost DoFs may or may not be up-to-date after this. + buffer.dat.local_to_global_begin(op2.MIN) + buffer.dat.local_to_global_end(op2.MIN) + indices = np.where(buffer.dat.data_ro_with_halos < finfo.max * 0.999999999999) + lhs_func.dat.data_wo_with_halos[indices] = buffer.dat.data_ro_with_halos[indices] @cached_property def _constants(self): diff --git a/firedrake/bcs.py b/firedrake/bcs.py index c994bc8df5..2203f2b3d5 100644 --- a/firedrake/bcs.py +++ b/firedrake/bcs.py @@ -1,9 +1,11 @@ # A module implementing strong (Dirichlet) boundary conditions. -import numpy as np -import functools +from functools import partial, reduce, cached_property import itertools +import numpy as np +from mpi4py import MPI + import ufl from ufl import as_ufl, as_tensor from finat.ufl import VectorElement @@ -11,15 +13,16 @@ import pyop2 as op2 from pyop2 import exceptions +from pyop2.mpi import temp_internal_comm from pyop2.utils import as_tuple import firedrake import firedrake.matrix as matrix -import firedrake.utils as utils from firedrake import ufl_expr from firedrake import slate from firedrake import solving from firedrake.formmanipulation import ExtractSubBlock +from firedrake.logging import logger from firedrake.adjoint_utils.dirichletbc import DirichletBCMixin from firedrake.petsc import PETSc @@ -88,7 +91,7 @@ def function_space_index(self): raise RuntimeError("This function should only be called when function space is indexed") return fs.index - @utils.cached_property + @cached_property def domain_args(self): r"""The sub_domain the BC applies to.""" # Define facet, edge, vertex using tuples: @@ -123,19 +126,19 @@ def domain_args(self): s.append((ndim - 1 - i, as_tuple(sd[i]))) return as_tuple(s) - @utils.cached_property + @cached_property def nodes(self): '''The list of nodes at which this boundary condition applies.''' # First, we bail out on zany elements. We don't know how to do BC's for them. V = self._function_space if isinstance(V.finat_element, (finat.Argyris, finat.Morley, finat.Bell)) or \ - (isinstance(V.finat_element, finat.Hermite) and V.mesh().topological_dimension() > 1): + (isinstance(V.finat_element, finat.Hermite) and V.mesh().topological_dimension > 1): raise NotImplementedError("Strong BCs not implemented for element %r, use Nitsche-type methods until we figure this out" % V.finat_element) def hermite_stride(bcnodes): fe = self._function_space.finat_element - tdim = self._function_space.mesh().topological_dimension() + tdim = self._function_space.mesh().topological_dimension if isinstance(fe, finat.Hermite) and tdim == 1: bcnodes = bcnodes[::2] # every second dof is the vertex value elif fe.complex.is_macrocell() and self._function_space.ufl_element().sobolev_space == ufl.H1: @@ -148,7 +151,7 @@ def hermite_stride(bcnodes): bcnodes = np.setdiff1d(bcnodes, deriv_ids) return bcnodes - sub_d = (self.sub_domain, ) if isinstance(self.sub_domain, str) else as_tuple(self.sub_domain) + sub_d = (self.sub_domain,) if isinstance(self.sub_domain, str) else as_tuple(self.sub_domain) sub_d = [s if isinstance(s, str) else as_tuple(s) for s in sub_d] bcnodes = [] for s in sub_d: @@ -162,18 +165,24 @@ def hermite_stride(bcnodes): # take intersection of facet nodes, and add it to bcnodes # i, j, k can also be strings. bcnodes1 = [] - if len(s) > 1 and not isinstance(self._function_space.finat_element, (finat.Lagrange, finat.GaussLobattoLegendre)): - raise TypeError("Currently, edge conditions have only been tested with CG Lagrange elements") for ss in s: # intersection of facets # Edge conditions have only been tested with Lagrange elements. # Need to expand the list. bcnodes1.append(hermite_stride(self._function_space.boundary_nodes(ss))) - bcnodes1 = functools.reduce(np.intersect1d, bcnodes1) + bcnodes1 = reduce(np.intersect1d, bcnodes1) bcnodes.append(bcnodes1) - return np.concatenate(bcnodes) + bcnodes = np.concatenate(bcnodes) + + with temp_internal_comm(self._function_space.mesh().comm) as icomm: + num_global_nodes = icomm.reduce(len(bcnodes), MPI.SUM, root=0) + if num_global_nodes == 0 and icomm.rank == 0: + logger.warn(f"Subdomain {self.sub_domain} is empty. This is likely an error. " + "Did you choose the right label?") + + return bcnodes - @utils.cached_property + @cached_property def node_set(self): '''The subset corresponding to the nodes at which this boundary condition applies.''' @@ -271,24 +280,11 @@ class DirichletBC(BCBase, DirichletBCMixin): to indicate all of the boundaries of the domain. In the case of extrusion the ``top`` and ``bottom`` strings are used to flag the bcs application on the top and bottom boundaries of the extruded mesh respectively. - :arg method: the method for determining boundary nodes. - DEPRECATED. The only way boundary nodes are identified is by - topological association. ''' @DirichletBCMixin._ad_annotate_init - def __init__(self, V, g, sub_domain, method=None): - if method == "geometric": - raise NotImplementedError("'geometric' bcs are no longer implemented. Please enforce them weakly") - if method not in {None, "topological"}: - raise ValueError(f"Unhandled boundary condition method '{method}'") - if method is not None: - import warnings - with warnings.catch_warnings(): - warnings.simplefilter('always', DeprecationWarning) - warnings.warn("Selecting a bcs method is deprecated. Only topological association is supported", - DeprecationWarning) + def __init__(self, V, g, sub_domain): super().__init__(V, sub_domain) if len(V.boundary_set) and not set(self.sub_domain).issubset(V.boundary_set): raise ValueError(f"Sub-domain {self.sub_domain} not in the boundary set of the restricted space {V.boundary_set}.") @@ -361,11 +357,11 @@ def function_arg(self, g): raise RuntimeError(f"Provided boundary value {g} does not match shape of space") try: self._function_arg = firedrake.Function(V) - # Use `Interpolator` instead of assembling an `Interpolate` form - # as the expression compilation needs to happen at this stage to - # determine if we should use interpolation or projection - # -> e.g. interpolation may not be supported for the element. - self._function_arg_update = firedrake.Interpolator(g, self._function_arg)._interpolate + interpolator = firedrake.get_interpolator(firedrake.interpolate(g, V)) + # Call this here to check if the element supports interpolation + # TODO: It's probably better to have a more explicit way of checking this + interpolator._get_callable() + self._function_arg_update = partial(interpolator.assemble, tensor=self._function_arg) except (NotImplementedError, AttributeError): # Element doesn't implement interpolation self._function_arg = firedrake.Function(V).project(g) diff --git a/firedrake/checkpointing.py b/firedrake/checkpointing.py index 3615d8c0de..d09538bb00 100644 --- a/firedrake/checkpointing.py +++ b/firedrake/checkpointing.py @@ -3,12 +3,12 @@ from petsc4py.PETSc import ViewerHDF5 import finat.ufl from pyop2 import op2 -from pyop2.mpi import COMM_WORLD, internal_comm, MPI +from pyop2.mpi import COMM_WORLD, MPI from petsctools import OptionsManager from firedrake.cython import hdf5interface as h5i from firedrake.cython import dmcommon from firedrake.petsc import PETSc -from firedrake.mesh import MeshTopology, ExtrudedMeshTopology, DEFAULT_MESH_NAME, make_mesh_from_coordinates, DistributedMeshOverlapType +from firedrake.mesh import MeshTopology, ExtrudedMeshTopology, MeshSequenceGeometry, DEFAULT_MESH_NAME, make_mesh_from_coordinates, DistributedMeshOverlapType from firedrake.functionspace import FunctionSpace from firedrake import functionspaceimpl as impl from firedrake.functionspacedata import get_global_numbering, create_element @@ -104,7 +104,6 @@ def __init__(self, basename, single_file=True, warnings.warn("DumbCheckpoint class will soon be deprecated; use CheckpointFile class instead.", DeprecationWarning) self.comm = comm or COMM_WORLD - self._comm = internal_comm(self.comm, self) self.mode = mode self._single = single_file @@ -195,7 +194,7 @@ def new_file(self, name=None): if mode == FILE_UPDATE and not exists: mode = FILE_CREATE self._vwr = PETSc.ViewerHDF5().create(name, mode=mode, - comm=self._comm) + comm=self.comm) if self.mode == FILE_READ: nprocs = self.read_attribute("/", "nprocs") if nprocs != self.comm.size: @@ -379,7 +378,6 @@ def __init__(self, filename, file_mode, comm=None): warnings.warn("HDF5File class will soon be deprecated; use CheckpointFile class instead.", DeprecationWarning) self.comm = comm or COMM_WORLD - self._comm = internal_comm(self.comm, self) self._filename = filename self._mode = file_mode @@ -397,7 +395,7 @@ def __init__(self, filename, file_mode, comm=None): # Try to use MPI try: - self._h5file = h5py.File(filename, file_mode, driver="mpio", comm=self._comm) + self._h5file = h5py.File(filename, file_mode, driver="mpio", comm=self.comm) except NameError: # the error you get if h5py isn't compiled against parallel HDF5 raise RuntimeError("h5py *must* be installed with MPI support") @@ -506,6 +504,165 @@ def __del__(self): self.close() +def _generate_function_space_name(V): + """Return a unique function space name. + + Parameters + ---------- + V : FunctionSpace + The function space to generate a name for. + """ + V_names = [PREFIX + "_function_space"] + for Vsub in V: + elem = Vsub.ufl_element() + if isinstance(elem, finat.ufl.RestrictedElement): + # RestrictedElement.shortstr() contains '<>|{}'. + elem_name = "RestrictedElement(%s,%s)" % (elem.sub_element().shortstr(), elem.restriction_domain()) + elif isinstance(elem, finat.ufl.EnrichedElement): + # EnrichedElement.shortstr() contains '<>+'. + elem_name = "EnrichedElement(%s)" % ",".join(e.shortstr() for e in elem._elements) + else: + elem_name = elem.shortstr() + elem_name = elem_name.replace('?', 'None') + # MixedElement, VectorElement, TensorElement + # use '<' and '>' in shortstr(), but changing + # these to '(' and ')' causes no confusion. + elem_name = elem_name.replace('<', '(').replace('>', ')') + mesh = Vsub.mesh() + # Unwrap MeshSequenceGeometry to get the concrete mesh name. + # CheckpointFile.save_function calls mesh.unique() before reaching + # here, so this is a no-op on that path. But TemporaryFunctionCheckpointFile + # calls _generate_function_space_name directly without prior unwrapping, + # so we need to handle it here to produce consistent dataset names. + if isinstance(mesh, MeshSequenceGeometry): + mesh = mesh[-1] + V_names.append("_".join([mesh.name, elem_name])) + return "_".join(V_names) + + +class TemporaryFunctionCheckpointFile: + """An HDF5 file for saving and loading :class:`~.Function` data on a sub-communicator. + + This class has a deliberately narrow contract that differs from + :class:`CheckpointFile` in several important ways: + + - It operates on any communicator, typically a sub-communicator of + COMM_WORLD (e.g. ``COMM_SELF`` for per-rank files, or a node-local + communicator for per-node files). All I/O is collective only on that + communicator — no COMM_WORLD operations are performed. + - It stores and retrieves :class:`~.Function` *data* (the local Vec + array) only. It has no knowledge of mesh topology, DM sections, or + PETSc SF. This is why :meth:`load_function` takes a + :class:`~.FunctionSpace` rather than a mesh: the caller already holds + the function space and we simply fill in the values. + - It is ephemeral: files are not intended to survive between programme + runs. The communicator layout (partition) must be identical on save + and restore. + - The caller is responsible for assigning unique ``name``/``idx`` pairs + on save and for restoring the correct ``name`` and ``count`` on the + returned :class:`~.Function` after load. + + These constraints are intentional. Using :class:`CheckpointFile` with a + sub-communicator deadlocks on load because the mesh DM operations + (``sectionLoad``, ``globalVectorLoad``) are collective on COMM_WORLD. + This class deliberately bypasses that path. + + Parameters + ---------- + comm : mpi4py.MPI.Intracomm + The communicator on which I/O is collective. All ranks in this + communicator must enter save/load together. + filepath : str + Path to the HDF5 file to open. + mode : str + File access mode: ``'r'`` for reading, ``'w'`` to create/truncate, + ``'a'`` to append. + """ + + def __init__(self, comm, filepath, mode): + self.comm = comm + self.filepath = filepath + self._viewer = PETSc.ViewerHDF5() + self._viewer.create(filepath, mode=mode, comm=comm) + + def save_function(self, function, name=None, idx=None): + """Save a Function's local data to this file. + + Parameters + ---------- + function : Function + The function whose data to save. + name : str + Dataset name. The caller is responsible for uniqueness across + saves to this file (see :class:`CheckpointFunction`). + idx : int + Dataset index. Together with ``name`` this uniquely identifies + the stored dataset. + """ + vec_name = f"{name}_{idx}" + + with function.dat.vec_ro as v: + local_array = v.getArray().copy() + + local_vec = PETSc.Vec().createWithArray( + local_array, size=(len(local_array), PETSc.DECIDE), comm=self.comm + ) + local_vec.setName(vec_name) + local_vec.view(self._viewer) + local_vec.destroy() + + def load_function(self, function_space, name, idx=None): + """Load a Function's data from this file. + + Parameters + ---------- + function_space : FunctionSpace + The function space for the returned Function. + name : str + Dataset name as passed to :meth:`save_function`. + idx : int + Dataset index as passed to :meth:`save_function`. + + Returns + ------- + Function + A new Function with data loaded from disk. The caller is + responsible for restoring the correct ``name`` and ``count`` + on the returned Function. + """ + from firedrake import Function + + vec_name = f"{name}_{idx}" + f = Function(function_space) + + with f.dat.vec_wo as v: + local_size = v.getLocalSize() + local_vec = PETSc.Vec().createMPI( + (local_size, PETSc.DECIDE), comm=self.comm + ) + local_vec.setName(vec_name) + local_vec.load(self._viewer) + v.setArray(local_vec.getArray()) + local_vec.destroy() + + return f + + def close(self): + """Close the underlying HDF5 viewer.""" + if hasattr(self, '_viewer'): + self._viewer.destroy() + del self._viewer + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __del__(self): + self.close() + + class CheckpointFile: r"""Checkpointing meshes and :class:`~.Function` s in an HDF5 file. @@ -539,10 +696,9 @@ def __init__(self, filename, mode, comm=COMM_WORLD): self.viewer = ViewerHDF5() self.filename = filename self.comm = comm - self._comm = internal_comm(comm, self) r"""The neme of the checkpoint file.""" - self.viewer.create(filename, mode=mode, comm=self._comm) - self.commkey = self._comm.py2f() + self.viewer.create(filename, mode=mode, comm=self.comm) + self.commkey = self.comm.py2f() assert self.commkey != MPI.COMM_NULL.py2f() self._function_spaces = {} self._function_load_utils = {} @@ -581,11 +737,13 @@ def save_mesh(self, mesh, distribution_name=None, permutation_name=None): :kwarg distribution_name: the name under which distribution is saved; if `None`, auto-generated name will be used. :kwarg permutation_name: the name under which permutation is saved; if `None`, auto-generated name will be used. """ + # TODO: Add general MeshSequence support. + mesh = mesh.unique() # Handle extruded mesh tmesh = mesh.topology if mesh.extruded: # -- Save mesh topology -- - base_tmesh = mesh._base_mesh.topology + base_tmesh = mesh.topology._base_mesh self._save_mesh_topology(base_tmesh) if tmesh.name not in self.require_group(self._path_to_topologies()): # The tmesh (an ExtrudedMeshTopology) is treated as if it was a first class topology object. It @@ -606,11 +764,11 @@ def save_mesh(self, mesh, distribution_name=None, permutation_name=None): # Save tmesh.layers, which contains (start layer, stop layer)-tuple for each cell # Conceptually, we project these integer pairs onto DG0 vector space of dim=2. cell = base_tmesh.ufl_cell() - element = finat.ufl.VectorElement("DP" if cell.is_simplex() else "DQ", cell, 0, dim=2) + element = finat.ufl.VectorElement("DP" if cell.is_simplex else "DQ", cell, 0, dim=2) layers_tV = impl.FunctionSpace(base_tmesh, element) self._save_function_space_topology(layers_tV) # Note that _cell_numbering coincides with DG0 section, so we can use tmesh.layers directly. - layers_iset = PETSc.IS().createGeneral(tmesh.layers[:tmesh.cell_set.size, :], comm=tmesh._comm) + layers_iset = PETSc.IS().createGeneral(tmesh.layers[:tmesh.cell_set.size, :], comm=tmesh.comm) layers_iset.setName("_".join([PREFIX_EXTRUDED, "layers_iset"])) self.viewer.pushGroup(path) layers_iset.view(self.viewer) @@ -637,8 +795,9 @@ def save_mesh(self, mesh, distribution_name=None, permutation_name=None): # The followings are conceptually redundant, but needed. path = os.path.join(self._path_to_mesh(tmesh.name, mesh.name), PREFIX_EXTRUDED) self.require_group(path) - self.save_mesh(mesh._base_mesh) - self.set_attr(path, PREFIX_EXTRUDED + "_base_mesh", mesh._base_mesh.name) + if mesh._base_mesh: + self.save_mesh(mesh._base_mesh) + self.set_attr(path, PREFIX_EXTRUDED + "_base_mesh", mesh._base_mesh.name) else: # -- Save mesh topology -- self._save_mesh_topology(tmesh) @@ -674,7 +833,7 @@ def save_mesh(self, mesh, distribution_name=None, permutation_name=None): reflected = o_r_map[tmesh.entity_orientations[:tmesh.cell_set.size, -1]] reflected_indices = (reflected == 1) canonical_cell_orientations[reflected_indices] = 1 - canonical_cell_orientations[reflected_indices] - cell_orientations_iset = PETSc.IS().createGeneral(canonical_cell_orientations, comm=tmesh._comm) + cell_orientations_iset = PETSc.IS().createGeneral(canonical_cell_orientations, comm=tmesh.comm) cell_orientations_iset.setName("_".join([PREFIX_IMMERSED, "cell_orientations_iset"])) self.viewer.pushGroup(path) cell_orientations_iset.view(self.viewer) @@ -850,15 +1009,17 @@ def get_timestepping_history(self, mesh, name): @PETSc.Log.EventDecorator("SaveFunctionSpace") def _save_function_space(self, V): mesh = V.mesh() + # TODO: Add general MeshSequence support. + mesh = mesh.unique() if isinstance(V.topological, impl.MixedFunctionSpace): - V_name = self._generate_function_space_name(V) + V_name = _generate_function_space_name(V) base_path = self._path_to_mixed_function_space(mesh.name, V_name) self.require_group(base_path) self.set_attr(base_path, PREFIX + "_num_sub_spaces", V.num_sub_spaces()) for i, Vsub in enumerate(V): path = os.path.join(base_path, str(i)) self.require_group(path) - Vsub_name = self._generate_function_space_name(Vsub) + Vsub_name = _generate_function_space_name(Vsub) self.set_attr(path, PREFIX + "_function_space", Vsub_name) self._save_function_space(Vsub) else: @@ -870,7 +1031,7 @@ def _save_function_space(self, V): # -- Save function space -- tmesh = tV.mesh() element = tV.ufl_element() - V_name = self._generate_function_space_name(V) + V_name = _generate_function_space_name(V) path = self._path_to_function_spaces(tmesh.name, mesh.name) if V_name not in self.require_group(path): # Save UFL element @@ -925,14 +1086,16 @@ def save_function(self, f, idx=None, name=None, timestepping_info={}): each index. """ V = f.function_space() - mesh = V.mesh() if name: g = Function(V, val=f.dat, name=name) return self.save_function(g, idx=idx, timestepping_info=timestepping_info) + mesh = V.mesh() + # TODO: Add general MeshSequence support. + mesh = mesh.unique() # -- Save function space -- self._save_function_space(V) # -- Save function -- - V_name = self._generate_function_space_name(V) + V_name = _generate_function_space_name(V) if isinstance(V.topological, impl.MixedFunctionSpace): base_path = self._path_to_mixed_function(mesh.name, V_name, f.name()) self.require_group(base_path) @@ -1065,7 +1228,7 @@ def load_mesh(self, name=DEFAULT_MESH_NAME, reorder=None, distribution_parameter variable_layers = self.get_attr(path, PREFIX_EXTRUDED + "_variable_layers") if variable_layers: cell = base_tmesh.ufl_cell() - element = finat.ufl.VectorElement("DP" if cell.is_simplex() else "DQ", cell, 0, dim=2) + element = finat.ufl.VectorElement("DP" if cell.is_simplex else "DQ", cell, 0, dim=2) _ = self._load_function_space_topology(base_tmesh, element) base_tmesh_key = self._generate_mesh_key_from_names(base_tmesh.name, base_tmesh._distribution_name, @@ -1074,7 +1237,7 @@ def load_mesh(self, name=DEFAULT_MESH_NAME, reorder=None, distribution_parameter _, _, lsf = self._function_load_utils[base_tmesh_key + sd_key] nroots, _, _ = lsf.getGraph() layers_a = np.empty(nroots, dtype=utils.IntType) - layers_a_iset = PETSc.IS().createGeneral(layers_a, comm=self._comm) + layers_a_iset = PETSc.IS().createGeneral(layers_a, comm=self.comm) layers_a_iset.setName("_".join([PREFIX_EXTRUDED, "layers_iset"])) self.viewer.pushGroup(path) layers_a_iset.load(self.viewer) @@ -1098,13 +1261,17 @@ def load_mesh(self, name=DEFAULT_MESH_NAME, reorder=None, distribution_parameter radial_coord_name = self.get_attr(path, PREFIX + "_radial_coordinates") radial_coordinates = self._load_function_topology(tmesh, radial_coord_element, radial_coord_name) tV_radial_coord = impl.FunctionSpace(tmesh, radial_coord_element) - V_radial_coord = impl.WithGeometry.create(tV_radial_coord, mesh) + V_radial_coord = impl.WithGeometry(tV_radial_coord, mesh) radial_coord_function_name = self.get_attr(path, PREFIX + "_radial_coordinate_function") mesh.radial_coordinates = Function(V_radial_coord, val=radial_coordinates, name=radial_coord_function_name) # The followings are conceptually redundant, but needed. path = os.path.join(self._path_to_mesh(tmesh_name, name), PREFIX_EXTRUDED) - base_mesh_name = self.get_attr(path, PREFIX_EXTRUDED + "_base_mesh") - mesh._base_mesh = self.load_mesh(base_mesh_name, reorder=reorder, distribution_parameters=distribution_parameters, topology=base_tmesh) + try: + base_mesh_name = self.get_attr(path, PREFIX_EXTRUDED + "_base_mesh") + except KeyError: + pass + else: + mesh._base_mesh = self.load_mesh(base_mesh_name, reorder=reorder, distribution_parameters=distribution_parameters, topology=base_tmesh) else: utils._init() # -- Load mesh topology -- @@ -1128,7 +1295,7 @@ def load_mesh(self, name=DEFAULT_MESH_NAME, reorder=None, distribution_parameter path = self._path_to_mesh_immersed(tmesh.name, name) if path in self.h5pyfile: cell = tmesh.ufl_cell() - element = finat.ufl.FiniteElement("DP" if cell.is_simplex() else "DQ", cell, 0) + element = finat.ufl.FiniteElement("DP" if cell.is_simplex else "DQ", cell, 0) cell_orientations_tV = self._load_function_space_topology(tmesh, element) tmesh_key = self._generate_mesh_key_from_names(tmesh.name, tmesh._distribution_name, @@ -1137,7 +1304,7 @@ def load_mesh(self, name=DEFAULT_MESH_NAME, reorder=None, distribution_parameter _, _, lsf = self._function_load_utils[tmesh_key + sd_key] nroots, _, _ = lsf.getGraph() cell_orientations_a = np.empty(nroots, dtype=utils.IntType) - cell_orientations_a_iset = PETSc.IS().createGeneral(cell_orientations_a, comm=self._comm) + cell_orientations_a_iset = PETSc.IS().createGeneral(cell_orientations_a, comm=self.comm) cell_orientations_a_iset.setName("_".join([PREFIX_IMMERSED, "cell_orientations_iset"])) self.viewer.pushGroup(path) cell_orientations_a_iset.load(self.viewer) @@ -1174,7 +1341,7 @@ def _load_mesh_topology(self, tmesh_name, reorder, distribution_parameters): _distribution_name, = self.h5pyfile[path].keys() path = self._path_to_distribution(tmesh_name, _distribution_name) _comm_size = self.get_attr(path, "comm_size") - if _comm_size == self._comm.size and \ + if _comm_size == self.comm.size and \ distribution_parameters is None and reorder is None: load_distribution_permutation = True if load_distribution_permutation: @@ -1194,7 +1361,7 @@ def _load_mesh_topology(self, tmesh_name, reorder, distribution_parameters): permutation_name = None perm_is = None plex = PETSc.DMPlex() - plex.create(comm=self._comm) + plex.create(comm=self.comm) plex.setName(tmesh_name) # Check format path = os.path.join(self._path_to_topology(tmesh_name), "topology") @@ -1212,7 +1379,7 @@ def _load_mesh_topology(self, tmesh_name, reorder, distribution_parameters): plex.removeLabel("pyop2_ghost") if load_distribution_permutation: chart_size = np.empty(1, dtype=utils.IntType) - chart_sizes_iset = PETSc.IS().createGeneral(chart_size, comm=self._comm) + chart_sizes_iset = PETSc.IS().createGeneral(chart_size, comm=self.comm) chart_sizes_iset.setName("chart_sizes") path = self._path_to_distribution(tmesh_name, distribution_name) self.viewer.pushGroup(path) @@ -1220,7 +1387,7 @@ def _load_mesh_topology(self, tmesh_name, reorder, distribution_parameters): self.viewer.popGroup() chart_size = chart_sizes_iset.getIndices().item() perm = np.empty(chart_size, dtype=utils.IntType) - perm_is = PETSc.IS().createGeneral(perm, comm=self._comm) + perm_is = PETSc.IS().createGeneral(perm, comm=self.comm) path = self._path_to_permutation(tmesh_name, distribution_name, permutation_name) self.viewer.pushGroup(path) perm_is.setName("permutation") @@ -1239,6 +1406,8 @@ def _load_mesh_topology(self, tmesh_name, reorder, distribution_parameters): @PETSc.Log.EventDecorator("LoadFunctionSpace") def _load_function_space(self, mesh, name): + # TODO: Add general MeshSequence support. + mesh = mesh.unique() mesh_key = self._generate_mesh_key_from_names(mesh.name, mesh.topology._distribution_name, mesh.topology._permutation_name) @@ -1262,7 +1431,7 @@ def _load_function_space(self, mesh, name): element = self._load_ufl_element(path, PREFIX + "_ufl_element") tV = self._load_function_space_topology(tmesh, element) # Construct function space - V = impl.WithGeometry.create(tV, mesh) + V = impl.WithGeometry(tV, mesh) else: raise RuntimeError(f""" FunctionSpace ({name}) not found in either of the following path in {self.filename}: @@ -1283,10 +1452,10 @@ def _load_function_space_topology(self, tmesh, element): sd_key = self._get_shared_data_key_for_checkpointing(tmesh, element) if tmesh_key + sd_key not in self._function_load_utils: topology_dm = tmesh.topology_dm - dm = PETSc.DMShell().create(comm=tmesh._comm) + dm = PETSc.DMShell().create(comm=tmesh.comm) dm.setName(self._get_dm_name_for_checkpointing(tmesh, element)) dm.setPointSF(topology_dm.getPointSF()) - section = PETSc.Section().create(comm=tmesh._comm) + section = PETSc.Section().create(comm=tmesh.comm) section.setPermutation(tmesh._dm_renumbering) dm.setSection(section) base_tmesh = tmesh._base_mesh if isinstance(tmesh, ExtrudedMeshTopology) else tmesh @@ -1314,6 +1483,8 @@ def load_function(self, mesh, name, idx=None): be loaded with idx only when it was saved with idx. :returns: the loaded :class:`~.Function`. """ + # TODO: Add general MeshSequence support. + mesh = mesh.unique() tmesh = mesh.topology if name in self._get_mixed_function_name_mixed_function_space_name_map(mesh.name): V_name = self._get_mixed_function_name_mixed_function_space_name_map(mesh.name)[name] @@ -1340,10 +1511,7 @@ def load_function(self, mesh, name, idx=None): path = self._path_to_function_embedded(tmesh_name, mesh.name, V_name, name) _name = self.get_attr(path, PREFIX_EMBEDDED + "_function") _f = self.load_function(mesh, _name, idx=idx) - element = V.ufl_element() - _element = get_embedding_element_for_checkpointing(element, V.value_shape) - method = get_embedding_method_for_checkpointing(element) - assert _element == _f.function_space().ufl_element() + method = get_embedding_method_for_checkpointing(V.ufl_element()) f = Function(V, name=name) self._project_function_for_checkpointing(f, _f, method) return f @@ -1404,27 +1572,6 @@ def _generate_mesh_key(self, mesh_name, distribution_name, permutation_name, reo def _generate_mesh_key_from_names(self, mesh_name, distribution_name, permutation_name): return (self.filename, self.commkey, mesh_name, distribution_name, permutation_name) - def _generate_function_space_name(self, V): - """Return a unique function space name.""" - V_names = [PREFIX + "_function_space"] - for Vsub in V: - elem = Vsub.ufl_element() - if isinstance(elem, finat.ufl.RestrictedElement): - # RestrictedElement.shortstr() contains '<>|{}'. - elem_name = "RestrictedElement(%s,%s)" % (elem.sub_element().shortstr(), elem.restriction_domain()) - elif isinstance(elem, finat.ufl.EnrichedElement): - # EnrichedElement.shortstr() contains '<>+'. - elem_name = "EnrichedElement(%s)" % ",".join(e.shortstr() for e in elem._elements) - else: - elem_name = elem.shortstr() - elem_name = elem_name.replace('?', 'None') - # MixedElement, VectorElement, TensorElement - # use '<' and '>' in shortstr(), but changing - # these to '(' and ')' causes no confusion. - elem_name = elem_name.replace('<', '(').replace('>', ')') - V_names.append("_".join([Vsub.mesh().name, elem_name])) - return "_".join(V_names) - def _generate_dm_name(self, nodes_per_entity, real_tensorproduct, block_size): return "_".join([PREFIX, "dm"] + [str(n) for n in nodes_per_entity] @@ -1451,7 +1598,7 @@ def _get_dm_for_checkpointing(self, tV): nodes_per_entity, real_tensorproduct, block_size = sd_key global_numbering, _ = tV.mesh().create_section(nodes_per_entity, real_tensorproduct, block_size=block_size) topology_dm = tV.mesh().topology_dm - dm = PETSc.DMShell().create(tV.mesh()._comm) + dm = PETSc.DMShell().create(tV.mesh().comm) dm.setPointSF(topology_dm.getPointSF()) dm.setSection(global_numbering) else: diff --git a/firedrake/citations.py b/firedrake/citations.py index b3e9210b47..6cfb7dd49a 100644 --- a/firedrake/citations.py +++ b/firedrake/citations.py @@ -320,3 +320,19 @@ url = {http://arxiv.org/abs/1410.5620} } """) + +petsctools.add_citation("Croci2018", """ +@article{Croci2018, + title={Efficient White Noise Sampling and Coupling for Multilevel Monte Carlo with Nonnested Meshes}, + volume={6}, + ISSN={2166-2525}, + DOI={10.1137/18M1175239}, + number={4}, + journal={SIAM/ASA Journal on Uncertainty Quantification}, + author={Croci, M. and Giles, M. B. and Rognes, M. E. and Farrell, P. E.}, + year={2018}, + month={jan}, + pages={1630–1655}, + language={en} +} +""") diff --git a/firedrake/cofunction.py b/firedrake/cofunction.py index b08743c4a6..c826a455ea 100644 --- a/firedrake/cofunction.py +++ b/firedrake/cofunction.py @@ -3,7 +3,7 @@ import ufl from ufl.form import BaseForm -from pyop2 import op2, mpi +from pyop2 import op2 from pyadjoint.tape import stop_annotating, annotate_tape, get_working_tape from finat.ufl import MixedElement import firedrake.assemble @@ -16,6 +16,9 @@ from firedrake.petsc import PETSc +__all__ = ["Cofunction", "RieszMap"] + + class Cofunction(ufl.Cofunction, CofunctionMixin): r"""A :class:`Cofunction` represents a function on a dual space. @@ -65,10 +68,8 @@ def __init__(self, function_space, val=None, name=None, dtype=ScalarType, # User comm self.comm = V.comm - # Internal comm - self._comm = mpi.internal_comm(V.comm, self) self._function_space = V - self.uid = utils._new_uid(self._comm) + self.uid = utils._new_uid(self.comm) self._name = name or 'cofunction_%d' % self.uid self._label = "a cofunction" @@ -76,14 +77,11 @@ def __init__(self, function_space, val=None, name=None, dtype=ScalarType, val = val.dat if isinstance(val, (op2.Dat, op2.DatView, op2.MixedDat, op2.Global)): - assert val.comm == self._comm + assert val.comm == self.comm self.dat = val else: self.dat = function_space.make_dat(val, dtype, self.name()) - if isinstance(function_space, Cofunction): - self.dat.copy(function_space.dat) - @PETSc.Log.EventDecorator() def copy(self, deepcopy=True): r"""Return a copy of this :class:`firedrake.function.CoordinatelessFunction`. @@ -106,14 +104,14 @@ def _analyze_form_arguments(self): self._arguments = (ufl_expr.Argument(self.function_space().dual(), 0),) self._coefficients = (self,) - @utils.cached_property + @cached_property @CofunctionMixin._ad_annotate_subfunctions def subfunctions(self): r"""Extract any sub :class:`Cofunction`\s defined on the component spaces of this this :class:`Cofunction`'s :class:`.FunctionSpace`.""" return tuple(type(self)(fs, dat) for fs, dat in zip(self.function_space(), self.dat)) - @utils.cached_property + @cached_property def _components(self): if self.function_space().rank == 0: return (self, ) @@ -172,30 +170,47 @@ def zero(self, subset=None): @PETSc.Log.EventDecorator() @utils.known_pyop2_safe - def assign(self, expr, subset=None, expr_from_assemble=False): - r"""Set the :class:`Cofunction` value to the pointwise value of - expr. expr may only contain :class:`Cofunction`\s on the same - :class:`.FunctionSpace` as the :class:`Cofunction` being assigned to. + def assign(self, expr, subset=None, expr_from_assemble=False, allow_missing_dofs=False): + """Set value to the pointwise value of expr. + Parameters + ---------- + expr : ufl.form.BaseForm + Expression to be assigned. + subset : pyop2.types.set.Set or pyop2.types.set.Subset or pyop2.types.set.MixedSet + ``self.node_set`` or `pyop2.types.set.Subset` of ``self.node_set`` or + `pyop2.types.set.MixedSet` composed of them if `self` is a mixed cofunction. + expr_from_assemble : bool + Flag indicating whether the expression results from an assemble operation + performed within the current method. Required for the `CofunctionAssignBlock`. + allow_missing_dofs : bool + Permit assignment between objects with mismatching nodes. If `True` then + assignee nodes with no matching assigner nodes are ignored. + Only significant if assigning across submeshes. + + Returns + ------- + firedrake.cofunction.Cofunction + Returns `self`. + + Notes + ----- + expr may only contain :class:`Cofunction` s on the same :class:`.FiredrakeDualSpace` as the + assignee :class:`Cofunction` or those on the similar spaces on submeshes. Similar functionality is available for the augmented assignment - operators `+=`, `-=`, `*=` and `/=`. For example, if `f` and `g` are - both Cofunctions on the same :class:`.FunctionSpace` then:: + operators `+=`, `-=`, `*=` and `/=`. For example, if ``f`` and ``g`` are + both Cofunctions on the same :class:`.FiredrakeDualSpace` then:: f += 2 * g - will add twice `g` to `f`. + will add twice ``g`` to ``f``. - If present, subset must be an :class:`pyop2.types.set.Subset` of this - :class:`Cofunction`'s ``node_set``. The expression will then - only be assigned to the nodes on that subset. + Assignment can only be performed for simple weighted sum expressions and constant + values. Things like ``u.assign(2*v + Constant(3.0))``. - The `expr_from_assemble` optional argument indicates whether the - expression results from an assemble operation performed within the - current method. `expr_from_assemble` is required for the - `CofunctionAssignBlock`. """ expr = ufl.as_ufl(expr) - if isinstance(expr, ufl.classes.Zero): + if isinstance(expr, (ufl.classes.Zero, ufl.ZeroBaseForm)): with stop_annotating(modifies=(self,)): self.dat.zero(subset=subset) return self @@ -209,6 +224,10 @@ def assign(self, expr, subset=None, expr_from_assemble=False): self.block_variable = self.create_block_variable() self.block_variable._checkpoint = DelegatedFunctionCheckpoint( expr.block_variable) + # We set CofunctionAssignBlock(..., rhs_from_assemble=True) + # so that we do not annotate the recursive call to assign + # within Cofunction.assign(BaseForm, subset=...). + # But we currently do not implement annotation for subset != None. get_working_tape().add_block( CofunctionAssignBlock( self, expr, rhs_from_assemble=expr_from_assemble) @@ -216,17 +235,18 @@ def assign(self, expr, subset=None, expr_from_assemble=False): expr.dat.copy(self.dat, subset=subset) return self - elif isinstance(expr, BaseForm): + elif isinstance(expr, BaseForm) and not isinstance(expr, Cofunction): # Enable c.assign(B) where c is a Cofunction and B an appropriate # BaseForm object. If annotation is enabled, the following # operation will result in an assemble block on the Pyadjoint tape. - assembled_expr = firedrake.assemble(expr) - return self.assign( - assembled_expr, subset=subset, - expr_from_assemble=True) + if subset is None: + return firedrake.assemble(expr, tensor=self) + else: + assembled_expr = firedrake.assemble(expr) + return self.assign(assembled_expr, subset=subset, expr_from_assemble=True) else: from firedrake.assign import Assigner - Assigner(self, expr, subset).assign() + Assigner(self, expr, subset).assign(allow_missing_dofs=allow_missing_dofs) return self def riesz_representation(self, riesz_map='L2', *, bcs=None, @@ -318,7 +338,7 @@ def interpolate(self, Parameters ---------- expression - A dual UFL expression to interpolate. + A UFL BaseForm to adjoint interpolate. ad_block_tag An optional string for tagging the resulting assemble block on the Pyadjoint tape. @@ -331,9 +351,9 @@ def interpolate(self, firedrake.cofunction.Cofunction Returns `self` """ - from firedrake import interpolation, assemble + from firedrake import interpolate, assemble v, = self.arguments() - interp = interpolation.Interpolate(v, expression, **kwargs) + interp = interpolate(v, expression, **kwargs) return assemble(interp, tensor=self, ad_block_tag=ad_block_tag) @property @@ -413,6 +433,8 @@ class RieszMap: variational problem that solves for the Riesz map. restrict: bool If `True`, use restricted function spaces in the Riesz map solver. + constant_jacobian : bool + Whether the matrix associated with the map is constant. """ def __init__(self, function_space_or_inner_product=None, @@ -517,3 +539,10 @@ def __call__(self, value): f"Unable to ascertain if {value} is primal or dual." ) return output + + @property + def constant_jacobian(self) -> bool: + """Whether the matrix associated with the map is constant. + """ + + return self._constant_jacobian diff --git a/firedrake/constant.py b/firedrake/constant.py index 3364502ecc..51f731dcde 100644 --- a/firedrake/constant.py +++ b/firedrake/constant.py @@ -16,6 +16,7 @@ Expression2UnicodeHandler, UC, subscript_number, PrecedenceRules, colorama, ) +from functools import cached_property from ufl.utils.counted import Counted @@ -124,7 +125,7 @@ def function_space(self): """Return a null function space.""" return None - @utils.cached_property + @cached_property def subfunctions(self): return (self,) diff --git a/firedrake/cython/dmcommon.pyx b/firedrake/cython/dmcommon.pyx index db194b04b7..a4d4d92460 100644 --- a/firedrake/cython/dmcommon.pyx +++ b/firedrake/cython/dmcommon.pyx @@ -788,6 +788,7 @@ def quadrilateral_closure_ordering(PETSc.DM plex, PetscInt nclosure, p, vi, v, fi, i PetscInt start_v, off PetscInt *closure = NULL + PetscInt closure_tmp[2*9] PetscInt c_vertices[4] PetscInt c_facets[4] PetscInt g_vertices[4] @@ -804,13 +805,13 @@ def quadrilateral_closure_ordering(PETSc.DM plex, ncells = cEnd - cStart entity_per_cell = 4 + 4 + 1 + CHKERR(PetscMalloc1(2*9, &closure)) + cell_closure = np.empty((ncells, entity_per_cell), dtype=IntType) for c in range(cStart, cEnd): CHKERR(PetscSectionGetOffset(cell_numbering.sec, c, &cell)) get_transitive_closure(plex.dm, c, PETSC_TRUE, &nclosure, &closure) - # First extract the facets (edges) and the vertices - # from the transitive closure into c_facets and c_vertices. # Here we assume that DMPlex gives entities in the order: # # 8--3--7 @@ -821,7 +822,65 @@ def quadrilateral_closure_ordering(PETSc.DM plex, # # where the starting vertex and order of traversal is arbitrary. # (We fix that later.) + + # If we have a periodic mesh with only a single cell in the periodic + # direction then the closure will look like + # + # 4--1--5 + # | | + # 3 0 2 (vertical periodicity) + # | | + # 4--1--5 # + # or + # + # 5--3--5 + # | | + # 2 0 2 (horizontal periodicity) + # | | + # 4--1--4 + # + # and only have 6 entries instead of 9. For the following to work we have + # to blow this out to a 9 entry array including the repeats. + if nclosure == 4: + raise NotImplementedError("Single-cell periodic quad meshes are " + "not supported") + elif nclosure == 6: + horiz_periodicity, vert_periodicity = _get_periodicity(plex) + (_, horiz_unit_periodic) = horiz_periodicity + (_, vert_unit_periodic) = vert_periodicity + if vert_unit_periodic: + assert not horiz_unit_periodic + closure_tmp[2*0] = closure[2*0] + closure_tmp[2*1] = closure[2*1] + closure_tmp[2*2] = closure[2*2] + closure_tmp[2*3] = closure[2*1] + closure_tmp[2*4] = closure[2*3] + closure_tmp[2*5] = closure[2*4] + closure_tmp[2*6] = closure[2*5] + closure_tmp[2*7] = closure[2*5] + closure_tmp[2*8] = closure[2*4] + else: + assert horiz_unit_periodic + assert not vert_unit_periodic + closure_tmp[2*0] = closure[2*0] + closure_tmp[2*1] = closure[2*1] + closure_tmp[2*2] = closure[2*2] + closure_tmp[2*3] = closure[2*3] + closure_tmp[2*4] = closure[2*2] + closure_tmp[2*5] = closure[2*4] + closure_tmp[2*6] = closure[2*4] + closure_tmp[2*7] = closure[2*5] + closure_tmp[2*8] = closure[2*5] + + nclosure = 9 + for i in range(9): + closure[2*i] = closure_tmp[2*i] + else: + assert nclosure == 9 + + # Extract the facets (edges) and the vertices + # from the transitive closure into c_facets and c_vertices. # For the vertices, we also retrieve the global numbers into g_vertices. vi = 0 fi = 0 @@ -923,8 +982,7 @@ def quadrilateral_closure_ordering(PETSc.DM plex, cell_closure[cell, 4 + 3] = facets[1] cell_closure[cell, 8] = c - if closure != NULL: - restore_transitive_closure(plex.dm, 0, PETSC_TRUE, &nclosure, &closure) + CHKERR(PetscFree(closure)) return cell_closure @@ -1263,7 +1321,7 @@ def create_section(mesh, nodes_per_entity, on_base=False, block_size=1, boundary nodes = sum(nodes_per_entity[:, i]*(mesh.layers - i) for i in range(2)).reshape(dimension + 1, -1) else: nodes = nodes_per_entity.reshape(dimension + 1, -1) - section = PETSc.Section().create(comm=mesh._comm) + section = PETSc.Section().create(comm=mesh.comm) get_chart(dm.dm, &pStart, &pEnd) section.setChart(pStart, pEnd) @@ -1987,7 +2045,7 @@ def reordered_coords(PETSc.DM dm, PETSc.Section global_numbering, shape, referen get_depth_stratum(dm.dm, 0, &vStart, &vEnd) if isinstance(dm, PETSc.DMPlex): if not dm.getCoordinatesLocalized(): - # Use CG coordiantes. + # Use CG coordinates. dm_sec = dm.getCoordinateSection() dm_coords = dm.getCoordinatesLocal().array.reshape(shape) coords = np.empty_like(dm_coords) @@ -1998,12 +2056,11 @@ def reordered_coords(PETSc.DM dm, PETSc.Section global_numbering, shape, referen for i in range(dim): coords[offset, i] = dm_coords[dm_offset, i] else: - # Use DG coordiantes. + # Use DG coordinates. get_height_stratum(dm.dm, 0, &cStart, &cEnd) dim = dm.getCoordinateDim() ndofs, perm, perm_offsets = _get_firedrake_plex_permutation_dg_transitive_closure(dm) - dm_sec = dm.getCellCoordinateSection() - dm_coords = dm.getCellCoordinatesLocal().array.reshape(((cEnd - cStart) * ndofs[0], dim)) + dm_coords, dm_sec = _get_expanded_dm_dg_coords(dm, ndofs) coords = np.empty_like(dm_coords) for c in range(cStart, cEnd): CHKERR(PetscSectionGetOffset(global_numbering.sec, c, &offset)) # scalar offset @@ -2031,6 +2088,138 @@ def reordered_coords(PETSc.DM dm, PETSc.Section global_numbering, shape, referen raise ValueError("Only DMPlex and DMSwarm are supported.") return coords + +def _get_expanded_dm_dg_coords(dm: PETSc.DM, ndofs: np.ndarray): + """Return the DM DG coordinates expanded to the full closure size. + + This transformation accounts for the fact that single-cell periodic + domains have closures that are smaller than expected (due to repeated + points). + + """ + cdef: + const PetscReal *L + + PETSc.Section dm_sec_expanded + + cStart, cEnd = dm.getHeightStratum(0) + dim = dm.getCoordinateDim() + coords_shape = ((cEnd-cStart) * ndofs[0], dim) + + if dm.getCellCoordinateSection().getDof(cStart) < ndofs[0] * dim: + # Fewer cell coordinates available, we must be single-cell periodic + if dm.getCellType(cStart) == PETSc.DM.PolytopeType.QUADRILATERAL: + # If we have a periodic mesh with only a single cell in the periodic + # direction then the cell coordinates will be + # + # 1-----2 + # | | + # | | (vertical periodicity) + # | | + # 1-----2 + # + # or + # + # 2-----2 + # | | + # | | (horizontal periodicity) + # | | + # 1-----1 + # + # when the standard layout is + # + # 4-----3 + # | | + # | | + # | | + # 1-----2 + assert ndofs[0] == 4, "Not expecting high order coords here" + dm_coords_orig = dm.getCellCoordinatesLocal().array_r.reshape(((cEnd-cStart) * 2, dim)) + dm_coords_expanded = np.empty(coords_shape, dtype=dm_coords_orig.dtype) + + # Create a new cell coordinate section + dm_sec_orig = dm.getCellCoordinateSection() + dm_sec_expanded = PETSc.Section().create(comm=dm_sec_orig.comm) + dm_sec_expanded.setChart(*dm_sec_orig.getChart()) + dm_sec_expanded.setPermutation(dm_sec_orig.getPermutation()) + + horiz_periodicity, vert_periodicity = _get_periodicity(dm) + (_, horiz_unit_periodic) = horiz_periodicity + (_, vert_unit_periodic) = vert_periodicity + + # Find the domain sizes + CHKERR(DMGetPeriodicity(dm.dm, NULL, NULL, &L)) + + if horiz_unit_periodic: + if vert_unit_periodic: + raise NotImplementedError("Single-cell periodic quad meshes are " + "not supported") + else: + cell_width = L[0] + + for c in range(cStart, cEnd): + CHKERR(PetscSectionSetDof(dm_sec_expanded.sec, c, 8)) + + dm_coords_expanded[4*c+0, 0] = dm_coords_orig[2*c+0, 0] + dm_coords_expanded[4*c+1, 0] = dm_coords_orig[2*c+0, 0] + cell_width + dm_coords_expanded[4*c+2, 0] = dm_coords_orig[2*c+1, 0] + cell_width + dm_coords_expanded[4*c+3, 0] = dm_coords_orig[2*c+1, 0] + dm_coords_expanded[4*c+0, 1] = dm_coords_orig[2*c+0, 1] + dm_coords_expanded[4*c+1, 1] = dm_coords_orig[2*c+0, 1] + dm_coords_expanded[4*c+2, 1] = dm_coords_orig[2*c+1, 1] + dm_coords_expanded[4*c+3, 1] = dm_coords_orig[2*c+1, 1] + + else: + assert vert_unit_periodic + cell_height = L[1] + + for c in range(cStart, cEnd): + CHKERR(PetscSectionSetDof(dm_sec_expanded.sec, c, 8)) + + dm_coords_expanded[4*c+0, 0] = dm_coords_orig[2*c+0, 0] + dm_coords_expanded[4*c+1, 0] = dm_coords_orig[2*c+1, 0] + dm_coords_expanded[4*c+2, 0] = dm_coords_orig[2*c+1, 0] + dm_coords_expanded[4*c+3, 0] = dm_coords_orig[2*c+0, 0] + dm_coords_expanded[4*c+0, 1] = dm_coords_orig[2*c+0, 1] + dm_coords_expanded[4*c+1, 1] = dm_coords_orig[2*c+1, 1] + dm_coords_expanded[4*c+2, 1] = dm_coords_orig[2*c+1, 1] + cell_height + dm_coords_expanded[4*c+3, 1] = dm_coords_orig[2*c+0, 1] + cell_height + + dm_sec_expanded.setUp() + + dm_coords = dm_coords_expanded + dm_sec = dm_sec_expanded + + else: + raise NotImplementedError("Single cell periodicity for cell type " + f"{dm.getCellType(cStart)} is not supported") + + else: + dm_coords = dm.getCellCoordinatesLocal().array_r.reshape(coords_shape) + dm_sec = dm.getCellCoordinateSection() + + return dm_coords, dm_sec + + +def _get_periodicity(dm: PETSc.DM) -> tuple[tuple[bool, bool], ...]: + """Return mesh periodicity information. + + This function returns a 2-tuple of bools per dimension where the first entry indicates + whether the mesh is periodic in that dimension, and the second indicates whether the + mesh is single-cell periodic in that dimension. + + """ + cdef: + const PetscReal *maxCell, *L + + dim = dm.getCoordinateDim() + CHKERR(DMGetPeriodicity(dm.dm, &maxCell, NULL, &L)) + return tuple( + (L[d] >= 0, maxCell[d] >= L[d]) + for d in range(dim) + ) + + @cython.boundscheck(False) @cython.wraparound(False) def mark_entity_classes(PETSc.DM dm): @@ -3480,7 +3669,7 @@ cdef int DMPlexGetAdjacency_Facet_Support(PETSc.PetscDM dm, numAdj += 1 # Too many adjacent points for the provided output array. if numAdj > maxAdjSize: - SETERR(77) + CHKERR(PETSC_ERR_LIB) CHKERR(DMPlexRestoreTransitiveClosure(dm, point, PETSC_TRUE, &closureSize, &closure)) adjSize[0] = numAdj return 0 @@ -3565,7 +3754,7 @@ cdef int DMPlexGetAdjacency_Closure_Star_Ridge( numAdj += 1 # Too many adjacent points for the provided output array. if numAdj > maxAdjSize: - SETERR(77) + CHKERR(PETSC_ERR_LIB) CHKERR(DMPlexRestoreTransitiveClosure(dm, point, PETSC_TRUE, &closureSize, &closure)) CHKERR(DMPlexRestoreTransitiveClosure(dm, p, PETSC_FALSE, &starSize, &star)) adjSize[0] = numAdj @@ -3700,42 +3889,6 @@ def mark_points_with_function_array(PETSc.DM plex, CHKERR(DMLabelSetValue(dmlabel.dmlabel, p, label_value)) -def to_petsc_local_numbering(PETSc.Vec vec, V): - """ - Reorder a PETSc Vec corresponding to a Firedrake Function w.r.t. - the PETSc natural numbering. - - :arg vec: the PETSc Vec to reorder; must be a global vector - :arg V: the FunctionSpace of the Function which the Vec comes from - :ret out: a copy of the Vec, ordered with the PETSc natural numbering - """ - cdef int dim, idx, start, end, p, d, k - cdef PetscInt dof, off - cdef PETSc.Vec out - cdef PETSc.Section section - cdef np.ndarray varray, oarray - - section = V.dm.getGlobalSection() - out = vec.duplicate() - varray = vec.array_r - oarray = out.array - dim = V.value_size - idx = 0 - start, end = vec.getOwnershipRange() - for p in range(*section.getChart()): - CHKERR(PetscSectionGetDof(section.sec, p, &dof)) - if dof > 0: - CHKERR(PetscSectionGetOffset(section.sec, p, &off)) - assert off >= 0 - off *= dim - for d in range(dof): - for k in range(dim): - oarray[idx] = varray[off + dim * d + k - start] - idx += 1 - assert idx == (end - start) - return out - - def create_halo_exchange_sf(PETSc.DM dm): """Create the halo exchange sf. @@ -3819,7 +3972,8 @@ def submesh_create(PETSc.DM dm, PetscInt subdim, label_name, PetscInt label_value, - PetscBool ignore_label_halo): + PetscBool ignore_label_halo, + comm=None): """Create submesh. Parameters @@ -3834,12 +3988,12 @@ def submesh_create(PETSc.DM dm, Value in the label ignore_label_halo : bool If labeled points in the halo are ignored. + comm : PETSc.Comm | None + An optional sub-communicator to define the submesh. """ cdef: - PETSc.DM subdm = PETSc.DMPlex() PETSc.DMLabel label, temp_label - PETSc.SF ownership_transfer_sf = PETSc.SF() char *temp_label_name = "firedrake_submesh_temp_label" PetscInt pStart, pEnd, p, i, stratum_size PETSc.PetscIS stratum_is = NULL @@ -3863,7 +4017,11 @@ def submesh_create(PETSc.DM dm, CHKERR(ISRestoreIndices(stratum_is, &stratum_indices)) CHKERR(ISDestroy(&stratum_is)) # Make submesh using temp_label. - CHKERR(DMPlexFilter(dm.dm, temp_label.dmlabel, label_value, ignore_label_halo, PETSC_TRUE, &ownership_transfer_sf.sf, &subdm.dm)) + subdm, ownership_transfer_sf = dm.filter(label=temp_label, + value=label_value, + ignoreHalo=ignore_label_halo, + sanitizeSubMesh=PETSC_TRUE, + comm=comm) # Destroy temp_label. dm.removeLabel(temp_label_name) subdm.removeLabel(temp_label_name) @@ -3903,50 +4061,69 @@ def submesh_correct_entity_classes(PETSc.DM dm, if dm.comm.size == 1: return + CHKERR(DMPlexGetChart(dm.dm, &pStart, &pEnd)) CHKERR(DMPlexGetChart(subdm.dm, &subpStart, &subpEnd)) - CHKERR(PetscSFGetGraph(ownership_transfer_sf.sf, &nroots, &nleaves, &ilocal, &iremote)) - assert nroots == pEnd - pStart assert pStart == 0 - ownership_loss = np.zeros(pEnd - pStart, dtype=IntType) - ownership_gain = np.zeros(pEnd - pStart, dtype=IntType) - for i in range(nleaves): - p = ilocal[i] if ilocal else i - ownership_loss[p] = 1 - unit = MPI._typedict[np.dtype(IntType).char] - ownership_transfer_sf.reduceBegin(unit, ownership_loss, ownership_gain, MPI.REPLACE) - ownership_transfer_sf.reduceEnd(unit, ownership_loss, ownership_gain, MPI.REPLACE) - subpoint_is = subdm.getSubpointIS() - CHKERR(ISGetSize(subpoint_is.iset, &nsubpoints)) - assert nsubpoints == subpEnd - subpStart assert subpStart == 0 - CHKERR(ISGetIndices(subpoint_is.iset, &subpoint_indices)) CHKERR(DMGetLabel(subdm.dm, b"pyop2_core", &lbl_core)) CHKERR(DMGetLabel(subdm.dm, b"pyop2_owned", &lbl_owned)) CHKERR(DMGetLabel(subdm.dm, b"pyop2_ghost", &lbl_ghost)) CHKERR(DMLabelCreateIndex(lbl_core, subpStart, subpEnd)) CHKERR(DMLabelCreateIndex(lbl_owned, subpStart, subpEnd)) CHKERR(DMLabelCreateIndex(lbl_ghost, subpStart, subpEnd)) - for subp in range(subpStart, subpEnd): - p = subpoint_indices[subp] - if ownership_loss[p] == 1: - CHKERR(DMLabelHasPoint(lbl_core, subp, &has)) - assert has == PETSC_FALSE - CHKERR(DMLabelHasPoint(lbl_owned, subp, &has)) - assert has == PETSC_TRUE - CHKERR(DMLabelClearValue(lbl_owned, subp, 1)) - CHKERR(DMLabelSetValue(lbl_ghost, subp, 1)) - if ownership_gain[p] == 1: + + if subdm.comm.size == 1: + # Undistributed case: relabel every point as core + for subp in range(subpStart, subpEnd): CHKERR(DMLabelHasPoint(lbl_core, subp, &has)) - assert has == PETSC_FALSE + if has: + continue CHKERR(DMLabelHasPoint(lbl_ghost, subp, &has)) - assert has == PETSC_TRUE - CHKERR(DMLabelClearValue(lbl_ghost, subp, 1)) - CHKERR(DMLabelSetValue(lbl_owned, subp, 1)) + if has: + CHKERR(DMLabelClearValue(lbl_ghost, subp, 1)) + CHKERR(DMLabelHasPoint(lbl_owned, subp, &has)) + if has: + CHKERR(DMLabelClearValue(lbl_owned, subp, 1)) + CHKERR(DMLabelSetValue(lbl_core, subp, 1)) + else: + ownership_loss = np.zeros(pEnd - pStart, dtype=IntType) + ownership_gain = np.zeros(pEnd - pStart, dtype=IntType) + CHKERR(PetscSFGetGraph(ownership_transfer_sf.sf, &nroots, &nleaves, &ilocal, &iremote)) + assert nroots == pEnd - pStart + for i in range(nleaves): + p = ilocal[i] if ilocal else i + ownership_loss[p] = 1 + unit = MPI._typedict[np.dtype(IntType).char] + ownership_transfer_sf.reduceBegin(unit, ownership_loss, ownership_gain, MPI.REPLACE) + ownership_transfer_sf.reduceEnd(unit, ownership_loss, ownership_gain, MPI.REPLACE) + + subpoint_is = subdm.getSubpointIS() + CHKERR(ISGetSize(subpoint_is.iset, &nsubpoints)) + assert nsubpoints == subpEnd - subpStart + CHKERR(ISGetIndices(subpoint_is.iset, &subpoint_indices)) + + for subp in range(subpStart, subpEnd): + p = subpoint_indices[subp] + if ownership_loss[p] == 1: + CHKERR(DMLabelHasPoint(lbl_core, subp, &has)) + assert has == PETSC_FALSE + CHKERR(DMLabelHasPoint(lbl_owned, subp, &has)) + assert has == PETSC_TRUE + CHKERR(DMLabelClearValue(lbl_owned, subp, 1)) + CHKERR(DMLabelSetValue(lbl_ghost, subp, 1)) + if ownership_gain[p] == 1: + CHKERR(DMLabelHasPoint(lbl_core, subp, &has)) + assert has == PETSC_FALSE + CHKERR(DMLabelHasPoint(lbl_ghost, subp, &has)) + assert has == PETSC_TRUE + CHKERR(DMLabelClearValue(lbl_ghost, subp, 1)) + CHKERR(DMLabelSetValue(lbl_owned, subp, 1)) + + CHKERR(ISRestoreIndices(subpoint_is.iset, &subpoint_indices)) CHKERR(DMLabelDestroyIndex(lbl_core)) CHKERR(DMLabelDestroyIndex(lbl_owned)) CHKERR(DMLabelDestroyIndex(lbl_ghost)) - CHKERR(ISRestoreIndices(subpoint_is.iset, &subpoint_indices)) @cython.boundscheck(False) diff --git a/firedrake/cython/extrusion_numbering.pyx b/firedrake/cython/extrusion_numbering.pyx index 86167ed20b..52e6e30944 100644 --- a/firedrake/cython/extrusion_numbering.pyx +++ b/firedrake/cython/extrusion_numbering.pyx @@ -291,7 +291,7 @@ def layer_extents(PETSc.DM dm, PETSc.Section cell_numbering, tmp = numpy.copy(layer_extents) # To get owned points correct, we do a reduction over the SF. - CHKERR(MPI_Op_create(extents_reduce, 4, &EXTENTS_REDUCER)) + CHKERRMPI(MPI_Op_create(extents_reduce, 4, &EXTENTS_REDUCER)) CHKERR(PetscSFReduceBegin(sf.sf, contig.ob_mpi, layer_extents.data, tmp.data, @@ -300,7 +300,7 @@ def layer_extents(PETSc.DM dm, PETSc.Section cell_numbering, layer_extents.data, tmp.data, EXTENTS_REDUCER)) - CHKERR(MPI_Op_free(&EXTENTS_REDUCER)) + CHKERRMPI(MPI_Op_free(&EXTENTS_REDUCER)) layer_extents[:] = tmp[:] # OK, now we have the correct extents for owned points, but # potentially incorrect extents for ghost points, so do a SF Bcast @@ -564,7 +564,7 @@ def top_bottom_boundary_nodes(mesh, layer_extents = mesh.layer_extents cell_closure = mesh.cell_closure ncell, nclosure = mesh.cell_closure.shape - n_vert_facet = mesh._base_mesh.ufl_cell().num_facets() + n_vert_facet = mesh._base_mesh.ufl_cell().num_facets assert facet_points.shape[0] == n_vert_facet + 2 bottom_facet = facet_points[n_vert_facet] diff --git a/firedrake/cython/patchimpl.pyx b/firedrake/cython/patchimpl.pyx index 32fc858e4e..827681f235 100644 --- a/firedrake/cython/patchimpl.pyx +++ b/firedrake/cython/patchimpl.pyx @@ -2,9 +2,9 @@ from libc.stdint cimport uintptr_t include "petschdr.pxi" -def set_patch_residual(patch, function, ctx, is_snes=False, interior_facets=False): +def set_patch_residual(patch, function, ctx, is_snes=False, interior_facets=False, exterior_facets=False): if is_snes: - if interior_facets: + if interior_facets or exterior_facets: raise NotImplementedError CHKERR(SNESPatchSetComputeFunction((patch).snes, function, @@ -14,15 +14,19 @@ def set_patch_residual(patch, function, ctx, is_snes=False, interior_facets=Fals CHKERR(PCPatchSetComputeFunctionInteriorFacets((patch).pc, function, ctx)) + elif exterior_facets: + CHKERR(PCPatchSetComputeFunctionExteriorFacets((patch).pc, + function, + ctx)) else: CHKERR(PCPatchSetComputeFunction((patch).pc, function, ctx)) -def set_patch_jacobian(patch, function, ctx, is_snes=False, interior_facets=False): +def set_patch_jacobian(patch, function, ctx, is_snes=False, interior_facets=False, exterior_facets=False): if is_snes: - if interior_facets: + if interior_facets or exterior_facets: raise NotImplementedError CHKERR(SNESPatchSetComputeOperator((patch).snes, function, @@ -32,6 +36,10 @@ def set_patch_jacobian(patch, function, ctx, is_snes=False, interior_facets=Fals CHKERR(PCPatchSetComputeOperatorInteriorFacets((patch).pc, function, ctx)) + elif exterior_facets: + CHKERR(PCPatchSetComputeOperatorExteriorFacets((patch).pc, + function, + ctx)) else: CHKERR(PCPatchSetComputeOperator((patch).pc, function, diff --git a/firedrake/cython/petschdr.pxi b/firedrake/cython/petschdr.pxi index a94de11e56..42ac97e24d 100644 --- a/firedrake/cython/petschdr.pxi +++ b/firedrake/cython/petschdr.pxi @@ -1,4 +1,5 @@ cimport petsc4py.PETSc as PETSc +from petsc4py.PETSc cimport CHKERR, CHKERRMPI cimport mpi4py.MPI as MPI cimport numpy as np @@ -21,13 +22,16 @@ cdef extern from "petsc.h": PETSC_SCALAR, PETSC_COMPLEX, PETSC_DATATYPE_UNKNOWN + ctypedef enum PetscErrorCode: + PETSC_SUCCESS + PETSC_ERR_LIB cdef extern from "petscsys.h" nogil: - int PetscMalloc1(PetscInt,void*) - int PetscMalloc2(PetscInt,void*,PetscInt,void*) - int PetscFree(void*) - int PetscFree2(void*,void*) - int PetscSortIntWithArray(PetscInt,PetscInt[],PetscInt[]) + PetscErrorCode PetscMalloc1(PetscInt,void*) + PetscErrorCode PetscMalloc2(PetscInt,void*,PetscInt,void*) + PetscErrorCode PetscFree(void*) + PetscErrorCode PetscFree2(void*,void*) + PetscErrorCode PetscSortIntWithArray(PetscInt,PetscInt[],PetscInt[]) cdef extern from "petscdmtypes.h" nogil: ctypedef enum PetscDMPolytopeType "DMPolytopeType": @@ -51,90 +55,93 @@ cdef extern from "petscdmtypes.h" nogil: DM_NUM_POLYTOPES cdef extern from "petscdmplex.h" nogil: - int DMPlexGetHeightStratum(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt*) - int DMPlexGetDepthStratum(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt*) - int DMPlexGetPointHeight(PETSc.PetscDM,PetscInt,PetscInt*) - int DMPlexGetPointDepth(PETSc.PetscDM,PetscInt,PetscInt*) - - int DMPlexGetChart(PETSc.PetscDM,PetscInt*,PetscInt*) - int DMPlexGetConeSize(PETSc.PetscDM,PetscInt,PetscInt*) - int DMPlexGetCone(PETSc.PetscDM,PetscInt,PetscInt*[]) - int DMPlexGetConeOrientation(PETSc.PetscDM,PetscInt,PetscInt*[]) - int DMPlexGetSupportSize(PETSc.PetscDM,PetscInt,PetscInt*) - int DMPlexGetSupport(PETSc.PetscDM,PetscInt,PetscInt*[]) - int DMPlexGetMaxSizes(PETSc.PetscDM,PetscInt*,PetscInt*) - - int DMPlexGetTransitiveClosure(PETSc.PetscDM,PetscInt,PetscBool,PetscInt *,PetscInt *[]) - int DMPlexRestoreTransitiveClosure(PETSc.PetscDM,PetscInt,PetscBool,PetscInt *,PetscInt *[]) - int DMPlexDistributeData(PETSc.PetscDM,PETSc.PetscSF,PETSc.PetscSection,MPI.MPI_Datatype,void*,PETSc.PetscSection,void**) - int DMPlexSetAdjacencyUser(PETSc.PetscDM,int(*)(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt[],void*),void*) - int DMPlexCreatePointNumbering(PETSc.PetscDM,PETSc.PetscIS*) - int DMPlexLabelComplete(PETSc.PetscDM, PETSc.PetscDMLabel) - int DMPlexDistributeOverlap(PETSc.PetscDM,PetscInt,PETSc.PetscSF*,PETSc.PetscDM*) - - int DMPlexFilter(PETSc.PetscDM,PETSc.PetscDMLabel,PetscInt,PetscBool,PetscBool,PETSc.PetscSF*,PETSc.PetscDM*) - int DMPlexGetSubpointIS(PETSc.PetscDM,PETSc.PetscIS*) - int DMPlexGetSubpointMap(PETSc.PetscDM,PETSc.PetscDMLabel*) - int DMPlexSetSubpointMap(PETSc.PetscDM,PETSc.PetscDMLabel) - - int DMPlexSetCellType(PETSc.PetscDM,PetscInt,PetscDMPolytopeType) - int DMPlexGetCellType(PETSc.PetscDM,PetscInt,PetscDMPolytopeType*) + PetscErrorCode DMPlexGetHeightStratum(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt*) + PetscErrorCode DMPlexGetDepthStratum(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt*) + PetscErrorCode DMPlexGetPointHeight(PETSc.PetscDM,PetscInt,PetscInt*) + PetscErrorCode DMPlexGetPointDepth(PETSc.PetscDM,PetscInt,PetscInt*) + + PetscErrorCode DMPlexGetChart(PETSc.PetscDM,PetscInt*,PetscInt*) + PetscErrorCode DMPlexGetConeSize(PETSc.PetscDM,PetscInt,PetscInt*) + PetscErrorCode DMPlexGetCone(PETSc.PetscDM,PetscInt,PetscInt*[]) + PetscErrorCode DMPlexGetConeOrientation(PETSc.PetscDM,PetscInt,PetscInt*[]) + PetscErrorCode DMPlexGetSupportSize(PETSc.PetscDM,PetscInt,PetscInt*) + PetscErrorCode DMPlexGetSupport(PETSc.PetscDM,PetscInt,PetscInt*[]) + PetscErrorCode DMPlexGetMaxSizes(PETSc.PetscDM,PetscInt*,PetscInt*) + + PetscErrorCode DMPlexGetTransitiveClosure(PETSc.PetscDM,PetscInt,PetscBool,PetscInt *,PetscInt *[]) + PetscErrorCode DMPlexRestoreTransitiveClosure(PETSc.PetscDM,PetscInt,PetscBool,PetscInt *,PetscInt *[]) + PetscErrorCode DMPlexDistributeData(PETSc.PetscDM,PETSc.PetscSF,PETSc.PetscSection,MPI.MPI_Datatype,void*,PETSc.PetscSection,void**) + PetscErrorCode DMPlexSetAdjacencyUser(PETSc.PetscDM,int(*)(PETSc.PetscDM,PetscInt,PetscInt*,PetscInt[],void*),void*) + PetscErrorCode DMPlexCreatePointNumbering(PETSc.PetscDM,PETSc.PetscIS*) + PetscErrorCode DMPlexLabelComplete(PETSc.PetscDM, PETSc.PetscDMLabel) + PetscErrorCode DMPlexDistributeOverlap(PETSc.PetscDM,PetscInt,PETSc.PetscSF*,PETSc.PetscDM*) + + PetscErrorCode DMPlexGetSubpointIS(PETSc.PetscDM,PETSc.PetscIS*) + PetscErrorCode DMPlexGetSubpointMap(PETSc.PetscDM,PETSc.PetscDMLabel*) + PetscErrorCode DMPlexSetSubpointMap(PETSc.PetscDM,PETSc.PetscDMLabel) + + PetscErrorCode DMPlexSetCellType(PETSc.PetscDM,PetscInt,PetscDMPolytopeType) + PetscErrorCode DMPlexGetCellType(PETSc.PetscDM,PetscInt,PetscDMPolytopeType*) cdef extern from "petscdmlabel.h" nogil: struct _n_DMLabel ctypedef _n_DMLabel* DMLabel "DMLabel" - int DMLabelCreateIndex(DMLabel, PetscInt, PetscInt) - int DMLabelDestroyIndex(DMLabel) - int DMLabelDestroy(DMLabel*) - int DMLabelHasPoint(DMLabel, PetscInt, PetscBool*) - int DMLabelSetValue(DMLabel, PetscInt, PetscInt) - int DMLabelGetValue(DMLabel, PetscInt, PetscInt*) - int DMLabelClearValue(DMLabel, PetscInt, PetscInt) - int DMLabelGetStratumSize(DMLabel, PetscInt, PetscInt*) - int DMLabelGetStratumIS(DMLabel, PetscInt, PETSc.PetscIS*) + PetscErrorCode DMLabelCreateIndex(DMLabel, PetscInt, PetscInt) + PetscErrorCode DMLabelDestroyIndex(DMLabel) + PetscErrorCode DMLabelDestroy(DMLabel*) + PetscErrorCode DMLabelHasPoint(DMLabel, PetscInt, PetscBool*) + PetscErrorCode DMLabelSetValue(DMLabel, PetscInt, PetscInt) + PetscErrorCode DMLabelGetValue(DMLabel, PetscInt, PetscInt*) + PetscErrorCode DMLabelClearValue(DMLabel, PetscInt, PetscInt) + PetscErrorCode DMLabelGetStratumSize(DMLabel, PetscInt, PetscInt*) + PetscErrorCode DMLabelGetStratumIS(DMLabel, PetscInt, PETSc.PetscIS*) cdef extern from "petscdm.h" nogil: - int DMCreateLabel(PETSc.PetscDM,char[]) - int DMGetLabel(PETSc.PetscDM,char[],DMLabel*) - int DMGetPointSF(PETSc.PetscDM,PETSc.PetscSF*) - int DMSetLabelValue(PETSc.PetscDM,char[],PetscInt,PetscInt) - int DMGetLabelValue(PETSc.PetscDM,char[],PetscInt,PetscInt*) + PetscErrorCode DMCreateLabel(PETSc.PetscDM,char[]) + PetscErrorCode DMGetLabel(PETSc.PetscDM,char[],DMLabel*) + PetscErrorCode DMGetPointSF(PETSc.PetscDM,PETSc.PetscSF*) + PetscErrorCode DMSetLabelValue(PETSc.PetscDM,char[],PetscInt,PetscInt) + PetscErrorCode DMGetLabelValue(PETSc.PetscDM,char[],PetscInt,PetscInt*) + + PetscErrorCode DMGetPeriodicity(PETSc.PetscDM,PetscReal *[], PetscReal *[], PetscReal *[]) + PetscErrorCode DMGetSparseLocalize(PETSc.PetscDM,PetscBool *) + PetscErrorCode DMSetSparseLocalize(PETSc.PetscDM,PetscBool) cdef extern from "petscdmswarm.h" nogil: - int DMSwarmGetLocalSize(PETSc.PetscDM,PetscInt*) - int DMSwarmGetCellDM(PETSc.PetscDM, PETSc.PetscDM*) - int DMSwarmGetCellDMActive(PETSc.PetscDM, PETSc.PetscDMSwarmCellDM*) - int DMSwarmCellDMGetCellID(PETSc.PetscDMSwarmCellDM, const char *[]) - int DMSwarmGetField(PETSc.PetscDM,const char[],PetscInt*,PetscDataType*,void**) - int DMSwarmRestoreField(PETSc.PetscDM,const char[],PetscInt*,PetscDataType*,void**) + PetscErrorCode DMSwarmGetLocalSize(PETSc.PetscDM,PetscInt*) + PetscErrorCode DMSwarmGetCellDM(PETSc.PetscDM, PETSc.PetscDM*) + PetscErrorCode DMSwarmGetCellDMActive(PETSc.PetscDM, PETSc.PetscDMSwarmCellDM*) + PetscErrorCode DMSwarmCellDMGetCellID(PETSc.PetscDMSwarmCellDM, const char *[]) + PetscErrorCode DMSwarmGetField(PETSc.PetscDM,const char[],PetscInt*,PetscDataType*,void**) + PetscErrorCode DMSwarmRestoreField(PETSc.PetscDM,const char[],PetscInt*,PetscDataType*,void**) cdef extern from "petscvec.h" nogil: - int VecGetArray(PETSc.PetscVec,PetscScalar**) - int VecRestoreArray(PETSc.PetscVec,PetscScalar**) - int VecGetArrayRead(PETSc.PetscVec,const PetscScalar**) - int VecRestoreArrayRead(PETSc.PetscVec,const PetscScalar**) + PetscErrorCode VecGetArray(PETSc.PetscVec,PetscScalar**) + PetscErrorCode VecRestoreArray(PETSc.PetscVec,PetscScalar**) + PetscErrorCode VecGetArrayRead(PETSc.PetscVec,const PetscScalar**) + PetscErrorCode VecRestoreArrayRead(PETSc.PetscVec,const PetscScalar**) cdef extern from "petscis.h" nogil: - int PetscSectionGetOffset(PETSc.PetscSection,PetscInt,PetscInt*) - int PetscSectionGetDof(PETSc.PetscSection,PetscInt,PetscInt*) - int PetscSectionSetDof(PETSc.PetscSection,PetscInt,PetscInt) - int PetscSectionSetFieldDof(PETSc.PetscSection,PetscInt,PetscInt,PetscInt) - int PetscSectionGetFieldDof(PETSc.PetscSection,PetscInt,PetscInt,PetscInt*) - int PetscSectionGetConstraintDof(PETSc.PetscSection,PetscInt,PetscInt*) - int PetscSectionSetConstraintDof(PETSc.PetscSection,PetscInt,PetscInt) - int PetscSectionSetConstraintIndices(PETSc.PetscSection,PetscInt, PetscInt[]) - int PetscSectionGetConstraintIndices(PETSc.PetscSection,PetscInt, const PetscInt**) - int PetscSectionGetMaxDof(PETSc.PetscSection,PetscInt*) - int PetscSectionSetPermutation(PETSc.PetscSection,PETSc.PetscIS) - int ISGetIndices(PETSc.PetscIS,PetscInt*[]) - int ISGetSize(PETSc.PetscIS,PetscInt*) - int ISRestoreIndices(PETSc.PetscIS,PetscInt*[]) - int ISGeneralSetIndices(PETSc.PetscIS,PetscInt,PetscInt[],PetscCopyMode) - int ISLocalToGlobalMappingCreateIS(PETSc.PetscIS,PETSc.PetscLGMap*) - int ISLocalToGlobalMappingGetSize(PETSc.PetscLGMap,PetscInt*) - int ISLocalToGlobalMappingGetBlockIndices(PETSc.PetscLGMap, const PetscInt**) - int ISLocalToGlobalMappingRestoreBlockIndices(PETSc.PetscLGMap, const PetscInt**) - int ISDestroy(PETSc.PetscIS*) + PetscErrorCode PetscSectionGetOffset(PETSc.PetscSection,PetscInt,PetscInt*) + PetscErrorCode PetscSectionGetDof(PETSc.PetscSection,PetscInt,PetscInt*) + PetscErrorCode PetscSectionSetDof(PETSc.PetscSection,PetscInt,PetscInt) + PetscErrorCode PetscSectionSetFieldDof(PETSc.PetscSection,PetscInt,PetscInt,PetscInt) + PetscErrorCode PetscSectionGetFieldDof(PETSc.PetscSection,PetscInt,PetscInt,PetscInt*) + PetscErrorCode PetscSectionGetConstraintDof(PETSc.PetscSection,PetscInt,PetscInt*) + PetscErrorCode PetscSectionSetConstraintDof(PETSc.PetscSection,PetscInt,PetscInt) + PetscErrorCode PetscSectionSetConstraintIndices(PETSc.PetscSection,PetscInt, PetscInt[]) + PetscErrorCode PetscSectionGetConstraintIndices(PETSc.PetscSection,PetscInt, const PetscInt**) + PetscErrorCode PetscSectionGetMaxDof(PETSc.PetscSection,PetscInt*) + PetscErrorCode PetscSectionSetPermutation(PETSc.PetscSection,PETSc.PetscIS) + PetscErrorCode ISGetIndices(PETSc.PetscIS,PetscInt*[]) + PetscErrorCode ISGetSize(PETSc.PetscIS,PetscInt*) + PetscErrorCode ISRestoreIndices(PETSc.PetscIS,PetscInt*[]) + PetscErrorCode ISGeneralSetIndices(PETSc.PetscIS,PetscInt,PetscInt[],PetscCopyMode) + PetscErrorCode ISLocalToGlobalMappingCreateIS(PETSc.PetscIS,PETSc.PetscLGMap*) + PetscErrorCode ISLocalToGlobalMappingGetSize(PETSc.PetscLGMap,PetscInt*) + PetscErrorCode ISLocalToGlobalMappingGetBlockIndices(PETSc.PetscLGMap, const PetscInt**) + PetscErrorCode ISLocalToGlobalMappingRestoreBlockIndices(PETSc.PetscLGMap, const PetscInt**) + PetscErrorCode ISDestroy(PETSc.PetscIS*) cdef extern from "petscsf.h" nogil: struct PetscSFNode_: @@ -142,14 +149,14 @@ cdef extern from "petscsf.h" nogil: PetscInt index ctypedef PetscSFNode_ PetscSFNode "PetscSFNode" - int PetscSFGetGraph(PETSc.PetscSF,PetscInt*,PetscInt*,PetscInt**,PetscSFNode**) - int PetscSFSetGraph(PETSc.PetscSF,PetscInt,PetscInt,PetscInt*,PetscCopyMode,PetscSFNode*,PetscCopyMode) - int PetscSFBcastBegin(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,) - int PetscSFBcastEnd(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*) - int PetscSFReduceBegin(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,MPI.MPI_Op) - int PetscSFReduceEnd(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,MPI.MPI_Op) + PetscErrorCode PetscSFGetGraph(PETSc.PetscSF,PetscInt*,PetscInt*,PetscInt**,PetscSFNode**) + PetscErrorCode PetscSFSetGraph(PETSc.PetscSF,PetscInt,PetscInt,PetscInt*,PetscCopyMode,PetscSFNode*,PetscCopyMode) + PetscErrorCode PetscSFBcastBegin(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,) + PetscErrorCode PetscSFBcastEnd(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*) + PetscErrorCode PetscSFReduceBegin(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,MPI.MPI_Op) + PetscErrorCode PetscSFReduceEnd(PETSc.PetscSF,MPI.MPI_Datatype,const void*, void*,MPI.MPI_Op) -ctypedef int (*PetscPCPatchComputeFunction)(PETSc.PetscPC, +ctypedef PetscErrorCode (*PetscPCPatchComputeFunction)(PETSc.PetscPC, PetscInt, PETSc.PetscVec, PETSc.PetscVec, @@ -158,7 +165,7 @@ ctypedef int (*PetscPCPatchComputeFunction)(PETSc.PetscPC, const PetscInt*, const PetscInt*, void*) -ctypedef int (*PetscPCPatchComputeOperator)(PETSc.PetscPC, +ctypedef PetscErrorCode (*PetscPCPatchComputeOperator)(PETSc.PetscPC, PetscInt, PETSc.PetscVec, PETSc.PetscMat, @@ -168,52 +175,30 @@ ctypedef int (*PetscPCPatchComputeOperator)(PETSc.PetscPC, const PetscInt*, void*) cdef extern from "petscsnes.h" nogil: - int SNESPatchSetComputeFunction(PETSc.PetscSNES, PetscPCPatchComputeFunction, void *) - int SNESPatchSetComputeOperator(PETSc.PetscSNES, PetscPCPatchComputeOperator, void *) + PetscErrorCode SNESPatchSetComputeFunction(PETSc.PetscSNES, PetscPCPatchComputeFunction, void *) + PetscErrorCode SNESPatchSetComputeOperator(PETSc.PetscSNES, PetscPCPatchComputeOperator, void *) cdef extern from "petscpc.h" nogil: - int PCPatchSetComputeFunction(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) - int PCPatchSetComputeFunctionInteriorFacets(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) - int PCPatchSetComputeOperator(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) - int PCPatchSetComputeOperatorInteriorFacets(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) + PetscErrorCode PCPatchSetComputeFunction(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) + PetscErrorCode PCPatchSetComputeFunctionInteriorFacets(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) + PetscErrorCode PCPatchSetComputeOperator(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) + PetscErrorCode PCPatchSetComputeOperatorInteriorFacets(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) + PetscErrorCode PCPatchSetComputeOperatorExteriorFacets(PETSc.PetscPC, PetscPCPatchComputeOperator, void *) + PetscErrorCode PCPatchSetComputeFunctionExteriorFacets(PETSc.PetscPC, PetscPCPatchComputeFunction, void *) cdef extern from "petscbt.h" nogil: ctypedef char * PetscBT - int PetscBTCreate(PetscInt,PetscBT*) - int PetscBTDestroy(PetscBT*) + PetscErrorCode PetscBTCreate(PetscInt,PetscBT*) + PetscErrorCode PetscBTDestroy(PetscBT*) char PetscBTLookup(PetscBT,PetscInt) - int PetscBTSet(PetscBT,PetscInt) + PetscErrorCode PetscBTSet(PetscBT,PetscInt) cdef extern from "petscmat.h" nogil: - int MatSetValuesLocal(PETSc.PetscMat, PetscInt, const PetscInt[], PetscInt, const PetscInt[], + PetscErrorCode MatSetValuesLocal(PETSc.PetscMat, PetscInt, const PetscInt[], PetscInt, const PetscInt[], const PetscScalar[], PetscInt) - int MatAssemblyBegin(PETSc.PetscMat, PetscInt) - int MatAssemblyEnd(PETSc.PetscMat, PetscInt) + PetscErrorCode MatAssemblyBegin(PETSc.PetscMat, PetscInt) + PetscErrorCode MatAssemblyEnd(PETSc.PetscMat, PetscInt) PetscInt MAT_FINAL_ASSEMBLY = 0 cdef extern from * nogil: - int PetscObjectTypeCompare(PETSc.PetscObject, char[], PetscBool*) - -# --- Error handling taken from petsc4py (src/PETSc.pyx) ------------- - -cdef extern from *: - void PyErr_SetObject(object, object) - void *PyExc_RuntimeError - -cdef object PetscError = PyExc_RuntimeError - -cdef inline int SETERR(int ierr) with gil: - if (PetscError) != NULL: - PyErr_SetObject(PetscError, ierr) - else: - PyErr_SetObject(PyExc_RuntimeError, ierr) - return ierr - -cdef inline int CHKERR(int ierr) except -1 nogil: - if ierr == 0: - return 0 # no error - else: - SETERR(ierr) - return -1 - -# -------------------------------------------------------------------- + PetscErrorCode PetscObjectTypeCompare(PETSc.PetscObject, char[], PetscBool*) diff --git a/firedrake/cython/spatialindex.pyx b/firedrake/cython/spatialindex.pyx index a415993a3a..88466650e1 100644 --- a/firedrake/cython/spatialindex.pyx +++ b/firedrake/cython/spatialindex.pyx @@ -5,7 +5,6 @@ import numpy as np import ctypes import cython from libc.stdint cimport uintptr_t -from libc.stdlib cimport free include "spatialindexinc.pxi" @@ -14,37 +13,11 @@ cdef class SpatialIndex(object): cdef IndexH index - def __cinit__(self, uint32_t dim): - """Initialize a native spatial index. - - :arg dim: spatial (geometric) dimension - """ - cdef IndexPropertyH ps = NULL - cdef RTError err = RT_None - + def __cinit__(self, uintptr_t handle): self.index = NULL - try: - ps = IndexProperty_Create() - if ps == NULL: - raise RuntimeError("failed to create index properties") - - err = IndexProperty_SetIndexType(ps, RT_RTree) - if err != RT_None: - raise RuntimeError("failed to set index type") - - err = IndexProperty_SetDimension(ps, dim) - if err != RT_None: - raise RuntimeError("failed to set dimension") - - err = IndexProperty_SetIndexStorage(ps, RT_Memory) - if err != RT_None: - raise RuntimeError("failed to set index storage") - - self.index = Index_Create(ps) - if self.index == NULL: - raise RuntimeError("failed to create index") - finally: - IndexProperty_Destroy(ps) + if handle == 0: + raise ValueError("SpatialIndex handle must be nonzero") + self.index = handle def __dealloc__(self): Index_Destroy(self.index) @@ -55,6 +28,32 @@ cdef class SpatialIndex(object): return ctypes.c_void_p( self.index) +cdef IndexPropertyH _make_index_properties(uint32_t dim) except *: + cdef IndexPropertyH ps = NULL + cdef RTError err = RT_None + + ps = IndexProperty_Create() + if ps == NULL: + raise RuntimeError("failed to create index properties") + + err = IndexProperty_SetIndexType(ps, RT_RTree) + if err != RT_None: + IndexProperty_Destroy(ps) + raise RuntimeError("failed to set index type") + + err = IndexProperty_SetDimension(ps, dim) + if err != RT_None: + IndexProperty_Destroy(ps) + raise RuntimeError("failed to set dimension") + + err = IndexProperty_SetIndexStorage(ps, RT_Memory) + if err != RT_None: + IndexProperty_Destroy(ps) + raise RuntimeError("failed to set index storage") + + return ps + + @cython.boundscheck(False) @cython.wraparound(False) def from_regions(np.ndarray[np.float64_t, ndim=2, mode="c"] regions_lo, @@ -67,40 +66,45 @@ def from_regions(np.ndarray[np.float64_t, ndim=2, mode="c"] regions_lo, """ cdef: SpatialIndex spatial_index - int64_t i + np.ndarray[np.int64_t, ndim=1, mode="c"] ids + IndexPropertyH ps + IndexH index + uint64_t n uint32_t dim - RTError err + uint64_t i_stri + uint64_t d_i_stri + uint64_t d_j_stri assert regions_lo.shape[0] == regions_hi.shape[0] assert regions_lo.shape[1] == regions_hi.shape[1] - dim = regions_lo.shape[1] + n = regions_lo.shape[0] + dim = regions_lo.shape[1] + + ps = NULL + index = NULL + try: + ps = _make_index_properties(dim) + if n == 0: + # Index_CreateWithArray will fail for n=0, so create an empty index instead. + index = Index_Create(ps) + else: + ids = np.arange(n, dtype=np.int64) + + # Calculate the strides + i_stri = (ids.strides[0] // ids.itemsize) + d_i_stri = (regions_lo.strides[0] // regions_lo.itemsize) + d_j_stri = (regions_lo.strides[1] // regions_lo.itemsize) + + index = Index_CreateWithArray(ps, n, dim, + i_stri, d_i_stri, d_j_stri, + ids.data, + regions_lo.data, + regions_hi.data) + if index == NULL: + raise RuntimeError("failed to create index") + + spatial_index = SpatialIndex(index) + finally: + IndexProperty_Destroy(ps) - spatial_index = SpatialIndex(dim) - for i in xrange(len(regions_lo)): - err = Index_InsertData(spatial_index.index, i, ®ions_lo[i, 0], ®ions_hi[i, 0], dim, NULL, 0) - if err != RT_None: - raise RuntimeError("failed to insert data into spatial index") return spatial_index - - -def bounding_boxes(SpatialIndex sidx not None, np.ndarray[np.float64_t, ndim=1] x): - """Given a spatial index and a point, return the bounding boxes the point is in. - - :arg sidx: the SpatialIndex - :arg x: the point - :returns: a numpy array of candidate bounding boxes.""" - cdef int dim = x.shape[0] - cdef int64_t *ids = NULL - cdef uint64_t i - cdef np.ndarray[np.int64_t, ndim=1, mode="c"] pyids - cdef uint64_t nids - - err = Index_Intersects_id(sidx.index, &x[0], &x[0], dim, &ids, &nids) - if err != RT_None: - raise RuntimeError("intersection failed") - - pyids = np.empty(nids, dtype=np.int64) - for i in range(nids): - pyids[i] = ids[i] - free(ids) - return pyids diff --git a/firedrake/cython/spatialindexinc.pxi b/firedrake/cython/spatialindexinc.pxi index 13b07805e1..8c0fb573af 100644 --- a/firedrake/cython/spatialindexinc.pxi +++ b/firedrake/cython/spatialindexinc.pxi @@ -1,4 +1,4 @@ -from libc.stdint cimport int64_t, uint8_t, uint32_t, uint64_t +from libc.stdint cimport int64_t, uint32_t, uint64_t cdef extern from "spatialindex/capi/sidx_api.h": ctypedef enum RTError: @@ -38,11 +38,10 @@ cdef extern from "spatialindex/capi/sidx_api.h": RTError IndexProperty_SetIndexVariant(IndexPropertyH hProp, RTIndexVariant value) RTError IndexProperty_SetIndexStorage(IndexPropertyH hProp, RTStorageType value) void IndexProperty_Destroy(IndexPropertyH hProp) - IndexH Index_Create(IndexPropertyH hProp) - RTError Index_InsertData(IndexH index, int64_t id, - double* pdMin, double* pdMax, uint32_t nDimension, - const uint8_t* pData, uint32_t nDataLength) + IndexH Index_CreateWithArray(IndexPropertyH hProp, uint64_t n, uint32_t dimension, + uint64_t i_stri, uint64_t d_i_stri, uint64_t d_j_stri, + int64_t *ids, double *mins, double *maxs) RTError Index_Intersects_id(IndexH index, double* pdMin, double* pdMax, uint32_t nDimension, int64_t** ids, uint64_t* nResults) void Index_Destroy(IndexH index) diff --git a/firedrake/cython/supermeshimpl.pyx b/firedrake/cython/supermeshimpl.pyx index ad3387cd82..fde3fa6ba9 100644 --- a/firedrake/cython/supermeshimpl.pyx +++ b/firedrake/cython/supermeshimpl.pyx @@ -69,7 +69,7 @@ def assemble_mixed_mass_matrix(V_A, V_B, candidates, vertex_map_B = mesh_B.coordinates.cell_node_map().values_with_halo num_vertices = vertex_map_A.shape[1] - gdim = mesh_A.geometric_dimension() + gdim = mesh_A.geometric_dimension simplex_A = numpy.empty((num_vertices, gdim), dtype=ScalarType) simplex_B = numpy.empty_like(simplex_A, dtype=ScalarType) simplices_C = numpy.empty(MAGIC[gdim], dtype=ScalarType) @@ -119,10 +119,10 @@ def intersection_finder(mesh_A, mesh_B): long nnodes_A, nnodes_B, ncells_A, ncells_B int dim_A, dim_B, loc_A, loc_B - dim = mesh_A.geometric_dimension() - assert dim == mesh_B.geometric_dimension() - assert dim == mesh_A.topological_dimension() - assert dim == mesh_B.topological_dimension() + dim = mesh_A.geometric_dimension + assert dim == mesh_B.geometric_dimension + assert dim == mesh_A.topological_dimension + assert dim == mesh_B.topological_dimension assert mesh_A.coordinates.function_space().ufl_element().degree() == 1 assert mesh_B.coordinates.function_space().ufl_element().degree() == 1 @@ -144,8 +144,8 @@ def intersection_finder(mesh_A, mesh_B): vertex_map_B = mesh_B.coordinates.cell_node_map().values_with_halo.astype(int) nnodes_A = mesh_A.coordinates.dof_dset.total_size nnodes_B = mesh_B.coordinates.dof_dset.total_size - dim_A = mesh_A.geometric_dimension() - dim_B = mesh_B.geometric_dimension() + dim_A = mesh_A.geometric_dimension + dim_B = mesh_B.geometric_dimension ncells_A = mesh_A.num_cells() ncells_B = mesh_B.num_cells() loc_A = vertex_map_A.shape[1] diff --git a/firedrake/dmhooks.py b/firedrake/dmhooks.py index 046852b2e6..5b1562e84c 100644 --- a/firedrake/dmhooks.py +++ b/firedrake/dmhooks.py @@ -43,6 +43,7 @@ import firedrake from firedrake.petsc import PETSc +from firedrake.mesh import MeshSequenceGeometry @PETSc.Log.EventDecorator() @@ -53,8 +54,11 @@ def get_function_space(dm): :raises RuntimeError: if no function space was found. """ info = dm.getAttr("__fs_info__") - meshref, element, indices, (name, names), boundary_sets = info - mesh = meshref() + meshref_tuple, element, indices, (name, names), boundary_sets = info + if len(meshref_tuple) == 1: + mesh = meshref_tuple[0]() + else: + mesh = MeshSequenceGeometry([meshref() for meshref in meshref_tuple]) if mesh is None: raise RuntimeError("Somehow your mesh was collected, this should never happen") V = firedrake.FunctionSpace(mesh, element, name=name) @@ -80,8 +84,6 @@ def set_function_space(dm, V): This stores the information necessary to make a function space given a DM. """ - mesh = V.mesh() - indices = [] names = [] while V.parent is not None: @@ -92,11 +94,12 @@ def set_function_space(dm, V): assert V.index is None indices.append(V.component) V = V.parent + mesh = V.mesh() if len(V) > 1: names = tuple(V_.name for V_ in V) element = V.ufl_element() boundary_sets = tuple(V_.boundary_set for V_ in V) - info = (weakref.ref(mesh), element, tuple(reversed(indices)), (V.name, names), boundary_sets) + info = (tuple(weakref.ref(m) for m in mesh), element, tuple(reversed(indices)), (V.name, names), boundary_sets) dm.setAttr("__fs_info__", info) @@ -390,7 +393,7 @@ def create_subdm(dm, fields, *args, **kwargs): # Index set mapping from W into subspace. iset = PETSc.IS().createGeneral(numpy.concatenate([W._ises[f].indices for f in fields]), - comm=W._comm) + comm=W.comm) if ctx is not None: ctx, = ctx.split([fields]) add_hook(parent, setup=partial(push_appctx, subspace.dm, ctx), @@ -414,7 +417,9 @@ def coarsen(dm, comm): """ from firedrake.mg.utils import get_level V = get_function_space(dm) - hierarchy, level = get_level(V.mesh()) + # TODO: Think harder. + m, = set(m_ for m_ in V.mesh()) + hierarchy, level = get_level(m) if level < 1: raise RuntimeError("Cannot coarsen coarsest DM") coarsen = get_ctx_coarsener(dm) diff --git a/firedrake/eigensolver.py b/firedrake/eigensolver.py index 4df85aec95..be768b3049 100644 --- a/firedrake/eigensolver.py +++ b/firedrake/eigensolver.py @@ -4,9 +4,9 @@ from firedrake.bcs import extract_subdomain_ids, restricted_function_space from firedrake.function import Function from firedrake.ufl_expr import TrialFunction, TestFunction -from firedrake import utils from firedrake.exceptions import ConvergenceError from ufl import replace, inner, dx +from functools import cached_property try: from slepc4py import SLEPc except ImportError: @@ -87,7 +87,7 @@ def dirichlet_bcs(self): for bc in self.bcs: yield from bc.dirichlet_bcs() - @utils.cached_property + @cached_property def dm(self): r"""Return the dm associated with the output space.""" if self.restrict: @@ -142,9 +142,10 @@ class LinearEigensolver(OptionsManager): "eps_largest_real": None """ - DEFAULT_EPS_PARAMETERS = {"eps_type": "krylovschur", - "eps_tol": 1e-10, - "eps_target": 0.0} + DEFAULT_EPS_PARAMETERS = { + "eps_type": "krylovschur", + "eps_tol": 1e-10, + } def __init__(self, problem, n_evals, *, options_prefix=None, solver_parameters=None, ncv=None, mpd=None): @@ -158,8 +159,6 @@ def __init__(self, problem, n_evals, *, options_prefix=None, for key in self.DEFAULT_EPS_PARAMETERS: value = self.DEFAULT_EPS_PARAMETERS[key] solver_parameters.setdefault(key, value) - if self._problem.bcs: - solver_parameters.setdefault("st_type", "sinvert") super().__init__(solver_parameters, options_prefix) self.set_from_options(self.es) diff --git a/firedrake/embedding.py b/firedrake/embedding.py index 517be9387e..15f8a09075 100644 --- a/firedrake/embedding.py +++ b/firedrake/embedding.py @@ -5,15 +5,15 @@ def get_embedding_dg_element(element, value_shape, broken_cg=False): - cell = element.cell - family = lambda c: "DG" if c.is_simplex() else "DQ" + cell, = set(element.cell.cells) + family = lambda c: "DG" if c.is_simplex else "DQ" if isinstance(cell, ufl.TensorProductCell): degree = element.degree() if type(degree) is int: scalar_element = finat.ufl.FiniteElement("DQ", cell=cell, degree=degree) else: scalar_element = finat.ufl.TensorProductElement(*(finat.ufl.FiniteElement(family(c), cell=c, degree=d) - for (c, d) in zip(cell.sub_cells(), degree))) + for (c, d) in zip(cell.sub_cells, degree))) else: degree = element.embedded_superdegree scalar_element = finat.ufl.FiniteElement(family(cell), cell=cell, degree=degree) diff --git a/firedrake/ensemble/__init__.py b/firedrake/ensemble/__init__.py index 6662a06f72..7d9795a92a 100644 --- a/firedrake/ensemble/__init__.py +++ b/firedrake/ensemble/__init__.py @@ -1,3 +1,9 @@ -from firedrake.ensemble.ensemble import * # noqa: F401 -from firedrake.ensemble.ensemble_function import * # noqa: F401 -from firedrake.ensemble.ensemble_functionspace import * # noqa: F401 +from firedrake.ensemble.ensemble import Ensemble # noqa F401 +from firedrake.ensemble.ensemble_function import ( # noqa F401 + EnsembleFunction, EnsembleCofunction +) +from firedrake.ensemble.ensemble_functionspace import ( # noqa F401 + EnsembleFunctionSpace, EnsembleDualSpace +) +from firedrake.ensemble.ensemble_mat import EnsembleBlockDiagonalMat # noqa: F401 +from firedrake.ensemble.ensemble_pc import EnsembleBJacobiPC # noqa: F401 diff --git a/firedrake/ensemble/ensemble.py b/firedrake/ensemble/ensemble.py index 49ab7e2c03..6c56a372d3 100644 --- a/firedrake/ensemble/ensemble.py +++ b/firedrake/ensemble/ensemble.py @@ -1,21 +1,62 @@ +from functools import wraps import weakref +from contextlib import contextmanager from itertools import zip_longest +from types import SimpleNamespace from firedrake.petsc import PETSc +from firedrake.function import Function +from firedrake.cofunction import Cofunction from pyop2.mpi import MPI, internal_comm -__all__ = ("Ensemble", ) + +def _ensemble_mpi_dispatch(func): + """ + This wrapper checks if any arg or kwarg of the wrapped + ensemble method is a Function or Cofunction, and if so + it calls the specialised Firedrake implementation. + Otherwise the standard mpi4py implementation is called. + """ + @wraps(func) + def _mpi_dispatch(self, *args, **kwargs): + if any(isinstance(arg, (Function, Cofunction)) + for arg in [*args, *kwargs.values()]): + return func(self, *args, **kwargs) + else: + mpicall = getattr(self._ensemble_comm, func.__name__) + return mpicall(*args, **kwargs) + return _mpi_dispatch -class Ensemble(object): - def __init__(self, comm, M, **kwargs): +class Ensemble: + def __init__(self, comm: MPI.Comm, M: int, **kwargs): """ Create a set of space and ensemble subcommunicators. - :arg comm: The communicator to split. - :arg M: the size of the communicators used for spatial parallelism. - :kwarg ensemble_name: string used as communicator name prefix, for debugging. - :raises ValueError: if ``M`` does not divide ``comm.size`` exactly. + Wrapper methods around many MPI communication functions are + provided for sending :class:`.Function` and :class:`.Cofunction` + objects between spatial communicators. + + For non-Firedrake objects these wrappers will dispatch to the + normal implementations on :class:`mpi4py.MPI.Comm`, which means + that the same call site can be used for both Firedrake and + non-Firedrake types. + + Parameters + ---------- + comm : + The communicator to split. + M : + The size of the communicators used for spatial parallelism. + Must be an integer divisor of the size of ``comm``. + kwargs : + Can include an ``ensemble_name`` string used as a communicator + name prefix, for debugging. + + Raises + ------ + ValueError + If ``M`` does not divide ``comm.size`` exactly. """ size = comm.size @@ -26,8 +67,6 @@ def __init__(self, comm, M, **kwargs): # Global comm self.global_comm = comm - # Internal global comm - self._comm = internal_comm(comm, self) ensemble_name = kwargs.get("ensemble_name", "Ensemble") # User and internal communicator for spatial parallelism, contains a @@ -35,60 +74,92 @@ def __init__(self, comm, M, **kwargs): self.comm = self.global_comm.Split(color=(rank // M), key=rank) self.comm.name = f"{ensemble_name} spatial comm" weakref.finalize(self, self.comm.Free) - self._spatial_comm = internal_comm(self.comm, self) # User and internal communicator for ensemble parallelism, contains all # processes in `global_comm` which have the same rank in `comm`. self.ensemble_comm = self.global_comm.Split(color=(rank % M), key=rank) self.ensemble_comm.name = f"{ensemble_name} ensemble comm" weakref.finalize(self, self.ensemble_comm.Free) + # Keep a reference to the internal communicator because some methods return + # non-blocking requests and we need to avoid cleaning up the communicator before + # they complete. Note that this communicator should *never* be passed to PETSc, as + # objects created with the communicator will never get cleaned up. self._ensemble_comm = internal_comm(self.ensemble_comm, self) - assert self.comm.size == M - assert self.ensemble_comm.size == (size // M) + if (self.comm.size != M) or (self.ensemble_comm.size != (size // M)): + raise ValueError(f"{M=} does not exactly divide {comm.size=}") @property - def ensemble_size(self): + def ensemble_size(self) -> int: """The number of ensemble members. """ return self.ensemble_comm.size @property - def ensemble_rank(self): + def ensemble_rank(self) -> int: """The rank of the local ensemble member. """ return self.ensemble_comm.rank - def _check_function(self, f, g=None): + def _check_function(self, f: Function | Cofunction, + g: Function | Cofunction | None = None): """ - Check if function f (and possibly a second function g) is a - valid argument for ensemble mpi routines - - :arg f: The function to check - :arg g: Second function to check - :raises ValueError: if function communicators mismatch each other or the ensemble + Check if :class:`.Function` ``f`` (and possibly a second + :class:`.Function` ``g``) is a valid argument for ensemble MPI routines + + Parameters + ---------- + f : + The :class:`.Function` to check. + g : + Second :class:`.Function` to check. + + Raises + ------ + ValueError + If ``Function`` communicators mismatch each other or the ensemble spatial communicator, or is the functions are in different spaces """ - if MPI.Comm.Compare(f._comm, self._spatial_comm) not in {MPI.CONGRUENT, MPI.IDENT}: + if MPI.Comm.Compare(f.comm, self.comm) not in {MPI.CONGRUENT, MPI.IDENT}: raise ValueError("Function communicator does not match space communicator") if g is not None: - if MPI.Comm.Compare(f._comm, g._comm) not in {MPI.CONGRUENT, MPI.IDENT}: + if MPI.Comm.Compare(f.comm, g.comm) not in {MPI.CONGRUENT, MPI.IDENT}: raise ValueError("Mismatching communicators for functions") if f.function_space() != g.function_space(): raise ValueError("Mismatching function spaces for functions") @PETSc.Log.EventDecorator() - def allreduce(self, f, f_reduced, op=MPI.SUM): + @_ensemble_mpi_dispatch + def allreduce(self, f: Function | Cofunction, + f_reduced: Function | Cofunction | None = None, + op: MPI.Op = MPI.SUM + ) -> Function | Cofunction: """ - Allreduce a function f into f_reduced over ``ensemble_comm`` . - - :arg f: The a :class:`.Function` to allreduce. - :arg f_reduced: the result of the reduction. - :arg op: MPI reduction operator. Defaults to MPI.SUM. - :raises ValueError: if function communicators mismatch each other or the ensemble - spatial communicator, or if the functions are in different spaces + Allreduce a :class:`.Function` ``f`` into ``f_reduced``. + + Parameters + ---------- + f : + The :class:`.Function` to allreduce. + f_reduced : + The result of the reduction. Must be in the same + :func:`~firedrake.functionspace.FunctionSpace` as ``f``. + op : + MPI reduction operator. Defaults to MPI.SUM. + + Returns + ------- + Function | Cofunction : + The result of the reduction. + + Raises + ------ + ValueError + If Function communicators mismatch each other or the ensemble + spatial communicator, or if the Functions are in different spaces """ + f_reduced = f_reduced or Function(f.function_space()) self._check_function(f, f_reduced) with f_reduced.dat.vec_wo as vout, f.dat.vec_ro as vin: @@ -96,34 +167,74 @@ def allreduce(self, f, f_reduced, op=MPI.SUM): return f_reduced @PETSc.Log.EventDecorator() - def iallreduce(self, f, f_reduced, op=MPI.SUM): + @_ensemble_mpi_dispatch + def iallreduce(self, f: Function | Cofunction, + f_reduced: Function | Cofunction | None = None, + op: MPI.Op = MPI.SUM + ) -> list[MPI.Request]: """ - Allreduce (non-blocking) a function f into f_reduced over ``ensemble_comm`` . - - :arg f: The a :class:`.Function` to allreduce. - :arg f_reduced: the result of the reduction. - :arg op: MPI reduction operator. Defaults to MPI.SUM. - :returns: list of MPI.Request objects (one for each of f.subfunctions). - :raises ValueError: if function communicators mismatch each other or the ensemble - spatial communicator, or if the functions are in different spaces + Allreduce (non-blocking) a :class:`.Function` ``f`` into ``f_reduced``. + + Parameters + ---------- + f : + The a :class:`.Function` to allreduce. + f_reduced : + The result of the reduction. Must be in the same + :func:`~firedrake.functionspace.FunctionSpace` as ``f``. + op : + MPI reduction operator. Defaults to MPI.SUM. + + Returns + ------- + list[mpi4py.MPI.Request] : + Requests one for each of ``f.subfunctions``. + + Raises + ------ + ValueError + If Function communicators mismatch each other or the ensemble + spatial communicator, or if the Functions are in different spaces """ + f_reduced = f_reduced or Function(f.function_space()) self._check_function(f, f_reduced) return [self._ensemble_comm.Iallreduce(fdat.data, rdat.data, op=op) for fdat, rdat in zip(f.dat, f_reduced.dat)] @PETSc.Log.EventDecorator() - def reduce(self, f, f_reduced, op=MPI.SUM, root=0): + @_ensemble_mpi_dispatch + def reduce(self, f: Function | Cofunction, + f_reduced: Function | Cofunction | None = None, + op: MPI.Op = MPI.SUM, root: int = 0 + ) -> Function | Cofunction: """ - Reduce a function f into f_reduced over ``ensemble_comm`` to rank root - - :arg f: The a :class:`.Function` to reduce. - :arg f_reduced: the result of the reduction on rank root. - :arg op: MPI reduction operator. Defaults to MPI.SUM. - :arg root: rank to reduce to. Defaults to 0. - :raises ValueError: if function communicators mismatch each other or the ensemble - spatial communicator, or is the functions are in different spaces + Reduce a :class:`.Function` ``f`` into ``f_reduced``. + + Parameters + ---------- + f : + The :class:`.Function` to reduce. + f_reduced : + The result of the reduction. Must be in the same + :func:`~firedrake.functionspace.FunctionSpace` as ``f``. + op : + MPI reduction operator. Defaults to MPI.SUM. + root : + The ensemble rank to reduce to. + + Returns + ------- + Function | Cofunction : + The result of the reduction. + + Raises + ------ + ValueError + If Function communicators mismatch each other or the ensemble + spatial communicator, or if the Functions are in different spaces """ + f_reduced = f_reduced or Function(f.function_space()) self._check_function(f, f_reduced) if self.ensemble_comm.rank == root: @@ -136,31 +247,67 @@ def reduce(self, f, f_reduced, op=MPI.SUM, root=0): return f_reduced @PETSc.Log.EventDecorator() - def ireduce(self, f, f_reduced, op=MPI.SUM, root=0): + @_ensemble_mpi_dispatch + def ireduce(self, f: Function | Cofunction, + f_reduced: Function | Cofunction | None = None, + op: MPI.Op = MPI.SUM, root: int = 0 + ) -> list[MPI.Request]: """ - Reduce (non-blocking) a function f into f_reduced over ``ensemble_comm`` to rank root - - :arg f: The a :class:`.Function` to reduce. - :arg f_reduced: the result of the reduction on rank root. - :arg op: MPI reduction operator. Defaults to MPI.SUM. - :arg root: rank to reduce to. Defaults to 0. - :returns: list of MPI.Request objects (one for each of f.subfunctions). - :raises ValueError: if function communicators mismatch each other or the ensemble - spatial communicator, or is the functions are in different spaces + Reduce (non-blocking) a :class:`.Function` ``f`` into ``f_reduced``. + + Parameters + ---------- + f : + The a :class:`.Function` to reduce. + f_reduced : + The result of the reduction. Must be in the same + :func:`~firedrake.functionspace.FunctionSpace` as ``f``. + op : + MPI reduction operator. Defaults to MPI.SUM. + root : + The ensemble rank to reduce to. + + Returns + ------- + list[mpi4py.MPI.Request] + Requests one for each of ``f.subfunctions``. + + Raises + ------ + ValueError + If Function communicators mismatch each other or the ensemble + spatial communicator, or if the Functions are in different spaces """ + f_reduced = f_reduced or Function(f.function_space()) self._check_function(f, f_reduced) return [self._ensemble_comm.Ireduce(fdat.data_ro, rdat.data, op=op, root=root) for fdat, rdat in zip(f.dat, f_reduced.dat)] @PETSc.Log.EventDecorator() - def bcast(self, f, root=0): + @_ensemble_mpi_dispatch + def bcast(self, f: Function | Cofunction, root: int = 0 + ) -> Function | Cofunction: """ - Broadcast a function f over ``ensemble_comm`` from rank root - - :arg f: The :class:`.Function` to broadcast. - :arg root: rank to broadcast from. Defaults to 0. - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Broadcast a :class:`.Function` ``f`` over ``ensemble_comm`` + from :attr:`~.Ensemble.ensemble_rank` ``root``. + + Parameters + ---------- + f : + The :class:`.Function` to broadcast. + root : + The rank to broadcast from. + + Returns + ------- + Function | Cofunction : + The result of the broadcast. + + Raises + ------ + ValueError + If the Function communicator mismatches the ``ensemble.comm``. """ self._check_function(f) with f.dat.vec as vec: @@ -169,129 +316,363 @@ def bcast(self, f, root=0): return f @PETSc.Log.EventDecorator() - def ibcast(self, f, root=0): + @_ensemble_mpi_dispatch + def ibcast(self, f: Function | Cofunction, root: int = 0 + ) -> list[MPI.Request]: """ - Broadcast (non-blocking) a function f over ``ensemble_comm`` from rank root - - :arg f: The :class:`.Function` to broadcast. - :arg root: rank to broadcast from. Defaults to 0. - :returns: list of MPI.Request objects (one for each of f.subfunctions). - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Broadcast (non-blocking) a :class:`.Function` ``f`` over + ``ensemble_comm`` :attr:`~.Ensemble.ensemble_rank` ``root``. + + Parameters + ---------- + f : + The :class:`.Function` to broadcast. + root : + The rank to broadcast from. + + Returns + ------- + list[mpi4py.MPI.Request] + Requests one for each of ``f.subfunctions``. + + Raises + ------ + ValueError + If the Function communicator mismatches the ``ensemble.comm``. """ self._check_function(f) - return [self._ensemble_comm.Ibcast(dat.data, root=root) for dat in f.dat] @PETSc.Log.EventDecorator() - def send(self, f, dest, tag=0): + @_ensemble_mpi_dispatch + def send(self, f: Function | Cofunction, dest: int, tag: int = 0): """ - Send (blocking) a function f over ``ensemble_comm`` to another - ensemble rank. - - :arg f: The a :class:`.Function` to send - :arg dest: the rank to send to - :arg tag: the tag of the message. Defaults to 0 - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Send (blocking) a :class:`.Function` ``f`` over ``ensemble_comm`` + to another :attr:`~.Ensemble.ensemble_rank`. + + Parameters + ---------- + f : + The a :class:`.Function` to send. + dest : + The :attr:`~.Ensemble.ensemble_rank` to send ``f`` to. + tag : + The tag of the message. + + Raises + ------ + ValueError + If the Function communicator mismatches the ``ensemble.comm``. """ self._check_function(f) for dat in f.dat: self._ensemble_comm.Send(dat.data_ro, dest=dest, tag=tag) @PETSc.Log.EventDecorator() - def recv(self, f, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, statuses=None): + @_ensemble_mpi_dispatch + def recv(self, f: Function | Cofunction, source: int = MPI.ANY_SOURCE, + tag: int = MPI.ANY_TAG, statuses: list[MPI.Status] | MPI.Status = None, + ) -> Function | Cofunction: """ - Receive (blocking) a function f over ``ensemble_comm`` from - another ensemble rank. - - :arg f: The a :class:`.Function` to receive into - :arg source: the rank to receive from. Defaults to MPI.ANY_SOURCE. - :arg tag: the tag of the message. Defaults to MPI.ANY_TAG. - :arg statuses: MPI.Status objects (one for each of f.subfunctions or None). - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Receive (blocking) a :class:`.Function` ``f`` over + ``ensemble_comm`` from another :attr:`~.Ensemble.ensemble_rank`. + + Parameters + ---------- + f : + The :class:`.Function` to receive into. + source : + The :attr:`~.Ensemble.ensemble_rank` to receive ``f`` from. + tag : + The tag of the message. + statuses : + The :class:`mpi4py.MPI.Status` of the internal recv calls + (one for each of the ``subfunctions`` of ``f``). + + Returns + ------- + Function | Cofunction : + ``f`` with the received data. + + Raises + ------ + ValueError + If the Function communicator mismatches the ``ensemble.comm``. + ValueError + If the number of ``statuses`` provided is not the number of + subfunctions of ``f``. """ self._check_function(f) + if statuses is not None and isinstance(statuses, MPI.Status): + statuses = [statuses] if statuses is not None and len(statuses) != len(f.dat): raise ValueError("Need to provide enough status objects for all parts of the Function") for dat, status in zip_longest(f.dat, statuses or (), fillvalue=None): self._ensemble_comm.Recv(dat.data, source=source, tag=tag, status=status) + return f @PETSc.Log.EventDecorator() - def isend(self, f, dest, tag=0): + @_ensemble_mpi_dispatch + def isend(self, f: Function | Cofunction, dest: int, tag: int = 0 + ) -> list[MPI.Request]: """ - Send (non-blocking) a function f over ``ensemble_comm`` to another - ensemble rank. - - :arg f: The a :class:`.Function` to send - :arg dest: the rank to send to - :arg tag: the tag of the message. Defaults to 0. - :returns: list of MPI.Request objects (one for each of f.subfunctions). - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Send (non-blocking) a :class:`.Function` ``f`` over ``ensemble_comm`` + to another :attr:`~.Ensemble.ensemble_rank`. + + Parameters + ---------- + f : + The a :class:`.Function` to send. + dest : + The :attr:`~.Ensemble.ensemble_rank` to send ``f`` to. + tag : + The tag of the message. + + Returns + ------- + list[mpi4py.MPI.Request] + Requests one for each of ``f.subfunctions``. + + Raises + ------ + ValueError + If the Function communicator mismatches the ``ensemble.comm``. """ self._check_function(f) return [self._ensemble_comm.Isend(dat.data_ro, dest=dest, tag=tag) for dat in f.dat] @PETSc.Log.EventDecorator() - def irecv(self, f, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG): + @_ensemble_mpi_dispatch + def irecv(self, f: Function | Cofunction, + source: int = MPI.ANY_SOURCE, + tag: int = MPI.ANY_TAG + ) -> list[MPI.Request]: """ - Receive (non-blocking) a function f over ``ensemble_comm`` from - another ensemble rank. - - :arg f: The a :class:`.Function` to receive into - :arg source: the rank to receive from. Defaults to MPI.ANY_SOURCE. - :arg tag: the tag of the message. Defaults to MPI.ANY_TAG. - :returns: list of MPI.Request objects (one for each of f.subfunctions). - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Receive (non-blocking) a :class:`.Function` ``f`` over + ``ensemble_comm`` from another :attr:`~.Ensemble.ensemble_rank`. + + Parameters + ---------- + f : + The :class:`.Function` to receive into. + source : + The :attr:`~.Ensemble.ensemble_rank` to receive ``f`` from. + tag : + The tag of the message. + + Returns + ------- + list[mpi4py.MPI.Request] + Requests one for each of ``f.subfunctions``. + + Raises + ------ + ValueError + If the Function communicator mismatches the ``ensemble.comm``. """ self._check_function(f) return [self._ensemble_comm.Irecv(dat.data, source=source, tag=tag) for dat in f.dat] @PETSc.Log.EventDecorator() - def sendrecv(self, fsend, dest, sendtag=0, frecv=None, source=MPI.ANY_SOURCE, recvtag=MPI.ANY_TAG, status=None): + @_ensemble_mpi_dispatch + def sendrecv(self, fsend: Function | Cofunction, dest: int, sendtag: int = 0, + frecv: Function | Cofunction | None = None, source: int = MPI.ANY_SOURCE, + recvtag: int = MPI.ANY_TAG, statuses: list[MPI.Status] | MPI.Status = None + ) -> Function | Cofunction: """ - Send (blocking) a function fsend and receive a function frecv over ``ensemble_comm`` to another - ensemble rank. - - :arg fsend: The a :class:`.Function` to send. - :arg dest: the rank to send to. - :arg sendtag: the tag of the send message. Defaults to 0. - :arg frecv: The a :class:`.Function` to receive into. - :arg source: the rank to receive from. Defaults to MPI.ANY_SOURCE. - :arg recvtag: the tag of the received message. Defaults to MPI.ANY_TAG. - :arg status: MPI.Status object or None. - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Send (blocking) a :class:`.Function` ``fsend`` and receive a + :class:`.Function` ``frecv`` over ``ensemble_comm`` to/from other + :attr:`~.Ensemble.ensemble_rank`. + + ``fsend`` and ``frecv`` do not need to be in the same function space + but do need to have the same number of subfunctions. + + Parameters + ---------- + fsend : + The a :class:`.Function` to send. + dest : + The :attr:`~.Ensemble.ensemble_rank` to send ``fsend`` to. + sendtag : + The tag of the send message. + frecv : + The :class:`.Function` to receive into. + source : + The :attr:`~.Ensemble.ensemble_rank` to receive ``frecv`` from. + recvtag : + The tag of the receive message. + statuses : + The :class:`mpi4py.MPI.Status` of the internal recv calls + (one for each of the ``subfunctions`` of ``frecv``). + + Returns + ------- + Function | Cofunction + ``frecv`` with the received data. + + Raises + ------ + ValueError + If the Function communicators mismatches each other or the + ``ensemble.comm``. + ValueError + If the number of ``statuses`` provided is not the number of + subfunctions of ``f``. """ + frecv = frecv or Function(fsend.function_space()) # functions don't necessarily have to match self._check_function(fsend) self._check_function(frecv) + if statuses is not None and isinstance(statuses, MPI.Status): + statuses = [statuses] + if statuses is not None and len(statuses) != len(frecv.dat): + raise ValueError("Need to provide enough status objects for all parts of the Function") with fsend.dat.vec_ro as sendvec, frecv.dat.vec_wo as recvvec: self._ensemble_comm.Sendrecv(sendvec, dest, sendtag=sendtag, recvbuf=recvvec, source=source, recvtag=recvtag, - status=status) + status=statuses) + return frecv @PETSc.Log.EventDecorator() - def isendrecv(self, fsend, dest, sendtag=0, frecv=None, source=MPI.ANY_SOURCE, recvtag=MPI.ANY_TAG): + @_ensemble_mpi_dispatch + def isendrecv(self, fsend: Function | Cofunction, dest: int, sendtag: int = 0, + frecv: Function | Cofunction | None = None, + source: int = MPI.ANY_SOURCE, recvtag: int = MPI.ANY_TAG + ) -> list[MPI.Request]: """ - Send a function fsend and receive a function frecv over ``ensemble_comm`` to another - ensemble rank. - - :arg fsend: The a :class:`.Function` to send. - :arg dest: the rank to send to. - :arg sendtag: the tag of the send message. Defaults to 0. - :arg frecv: The a :class:`.Function` to receive into. - :arg source: the rank to receive from. Defaults to MPI.ANY_SOURCE. - :arg recvtag: the tag of the received message. Defaults to MPI.ANY_TAG. - :returns: list of MPI.Request objects (one for each of fsend.subfunctions and frecv.subfunctions). - :raises ValueError: if function communicator mismatches the ensemble spatial communicator. + Send (non-blocking) a :class:`.Function` ``fsend`` and receive a + :class:`.Function` ``frecv`` over ``ensemble_comm`` to/from other + :attr:`~.Ensemble.ensemble_rank`. + + ``fsend`` and ``frecv`` do not need to be in the same function space. + + Parameters + ---------- + fsend : + The a :class:`.Function` to send. + dest : + The :attr:`~.Ensemble.ensemble_rank` to send ``fsend`` to. + sendtag : + The tag of the send message. + frecv : + The :class:`.Function` to receive into. + source : + The :attr:`~.Ensemble.ensemble_rank` to receive ``frecv`` from. + recvtag : + The tag of the receive message. + + Returns + ------- + list[mpi4py.MPI.Request] + Requests one for each of ``f.subfunctions``. + + Raises + ------ + ValueError + If the Function communicators mismatches each other or the + ``ensemble.comm``. """ + frecv = frecv or Function(fsend.function_space()) # functions don't necessarily have to match self._check_function(fsend) self._check_function(frecv) + requests = [] requests.extend([self._ensemble_comm.Isend(dat.data_ro, dest=dest, tag=sendtag) for dat in fsend.dat]) requests.extend([self._ensemble_comm.Irecv(dat.data, source=source, tag=recvtag) for dat in frecv.dat]) return requests + + @contextmanager + def sequential(self, *, synchronise: bool = False, reverse: bool = False, **kwargs): + """ + Context manager for executing code on each ensemble + member consecutively (ordered by increasing + :attr:`~.Ensemble.ensemble_rank`). + + Any data in ``kwargs`` will be made available in the returned + context and will be communicated forward after each ensemble + member exits. :class:`.Function` or :class:`.Cofunction` + ``kwargs`` will be sent with the corresponding Ensemble methods. + + For example: + + .. code-block:: python3 + + with ensemble.sequential(index=0) as ctx: + print(ensemble.ensemble_rank, ctx.index) + ctx.index += 2 + + Would print: + + .. code-block:: + + 0 0 + 1 2 + 2 4 + 3 6 + ... + + If ``reverse is True`` then the ensemble ranks will be looped through + in decreasing order i.e. ``ensemble_rank == (ensemble_size - 1)`` will + run first, then ``ensemble_rank == (ensemble_size - 2)`` etc. + + Parameters + ---------- + synchronise : + If True then MPI_Barrier will be called on the ``global_comm`` + at the beginning and end of this method. + + reverse : + If True then will iterate through spatial comms in order of + decreasing ``ensemble_rank``. + + kwargs : + Data to be passed forward by each rank and made available + in the returned ``ctx``. + """ + rank = self.ensemble_rank + if reverse: # send backwards + src = rank + 1 + dst = rank - 1 + first_rank = (rank == self.ensemble_size - 1) + last_rank = (rank == 0) + else: # send forwards + src = rank - 1 + dst = rank + 1 + first_rank = (rank == 0) + last_rank = (rank == self.ensemble_size - 1) + + if synchronise: + self.global_comm.Barrier() + + if not first_rank: + for i, (k, v) in enumerate(kwargs.items()): + if isinstance(v, (Function, Cofunction)): + # Functions are sent in-place, everything else is pickled + recv_args = [kwargs[k]] + else: + recv_args = [] + kwargs[k] = self.recv(*recv_args, source=src, tag=rank+i*100) + + ctx = SimpleNamespace(**kwargs) + yield ctx + + if not last_rank: + for i, v in enumerate((getattr(ctx, k) + for k in kwargs.keys())): + try: + self.send(v, dest=dst, tag=dst+i*100) + except Exception as error: + raise TypeError( + "Failed to send object of type {type(v)__name__}. kwargs for" + " Ensemble.sequential must be Functions, Cofunctions," + " or acceptable arguments to mpi4py.MPI.Comm.send." + ) from error + + if synchronise: + self.global_comm.Barrier() diff --git a/firedrake/ensemble/ensemble_function.py b/firedrake/ensemble/ensemble_function.py index bd53e8b739..74a5fbf305 100644 --- a/firedrake/ensemble/ensemble_function.py +++ b/firedrake/ensemble/ensemble_function.py @@ -9,35 +9,36 @@ from firedrake.function import Function from firedrake.norms import norm -__all__ = ("EnsembleFunction", "EnsembleCofunction") - class EnsembleFunctionBase(EnsembleFunctionMixin): """ A mixed (co)function defined on a :class:`~.ensemble.Ensemble`. The subcomponents are distributed over the ensemble members, and - are specified locally in a :class:`~firedrake.EnsembleFunctionSpace`. + are specified locally in an + :class:`~.ensemble_functionspace.EnsembleFunctionSpace`. Parameters ---------- - function_space : `~ensemble_functionspace.EnsembleFunctionSpace`. + function_space : The function space of the (co)function. Notes ----- - Passing an `EnsembleDualSpace` to `EnsembleFunction` - will return an instance of :class:`~firedrake.EnsembleCofunction`. + Passing an :class:`~.ensemble_functionspace.EnsembleDualSpace` to + :class:`EnsembleFunction` will return an instance of :class:`EnsembleCofunction`. This class does not carry UFL symbolic information, unlike a :class:`~firedrake.function.Function`. UFL expressions can only be defined locally on each ensemble member using a `~firedrake.function.Function` - from `EnsembleFunction.subfunctions`. + from ``EnsembleFunction.subfunctions``. See Also -------- - - Primal ensemble objects: :class:`~ensemble_functionspace.EnsembleFunctionSpace` and :class:`~firedrake.EnsembleFunction`. - - Dual ensemble objects: :class:`~firedrake.EnsembleDualSpace` and :class:`~firedrake.EnsembleCofunction`. + .ensemble_functionspace.EnsembleFunctionSpace + .ensemble_functionspace.EnsembleDualSpace + EnsembleFunction + EnsembleCofunction """ @PETSc.Log.EventDecorator() @@ -134,7 +135,7 @@ def assign(self, other, subsets=None): The value to assign from. subsets : Collection[Optional[:class:`pyop2.types.set.Subset`]] - One subset for each local :class:`firedrake.functionFunction`. + One subset for each local :class:`firedrake.function.Function`. None elements will be ignored. The values of each local function will only be assigned on the nodes on the corresponding subset. """ @@ -190,12 +191,23 @@ def __imul__(self, other): us *= other return self + @PETSc.Log.EventDecorator() + def __isub__(self, other): + self += -1*other + return self + @PETSc.Log.EventDecorator() def __add__(self, other): new = self.copy() new += other return new + @PETSc.Log.EventDecorator() + def __sub__(self, other): + new = self.copy() + new -= other + return new + @PETSc.Log.EventDecorator() def __mul__(self, other): new = self.copy() @@ -204,18 +216,23 @@ def __mul__(self, other): @PETSc.Log.EventDecorator() def __rmul__(self, other): + new = self.copy() if type(other) is type(self): - for us, uo in zip(self.subfunctions, other.subfunctions): - us.assign(us*uo) + for un, uo in zip(new.subfunctions, other.subfunctions): + un.assign(uo*un) else: - for us in self.subfunctions: - us *= other - return self + for un in new.subfunctions: + un.assign(other*un) + return new + + @PETSc.Log.EventDecorator() + def __neg__(self): + return (-1)*self @contextmanager def vec(self): """ - Context manager for the global :class:`petsc4py.PETSc.Vec` with + Context manager for the global ``PETSc.Vec`` with read/write access. It is invalid to access the ``Vec`` outside of a context manager. @@ -234,7 +251,7 @@ def vec(self): @contextmanager def vec_ro(self): """ - Context manager for the global :class:`petsc4py.PETSc.Vec` with + Context manager for the global ``PETSc.Vec`` with read only access. It is invalid to access the ``Vec`` outside of a context manager. @@ -250,7 +267,7 @@ def vec_ro(self): @contextmanager def vec_wo(self): """ - Context manager for the global :class:`petsc4py.PETSc.Vec` with + Context manager for the global ``PETSc.Vec`` with write only access. It is invalid to access the ``Vec`` outside of a context manager. @@ -272,7 +289,7 @@ class EnsembleFunction(EnsembleFunctionBase): """ A mixed Function defined on a :class:`~.ensemble.Ensemble`. The subcomponents are distributed over the ensemble members, and - are specified locally in a :class:`~firedrake.ensemble.ensemble_functionspace.EnsembleFunctionSpace`. + are specified locally in an :class:`~firedrake.ensemble.ensemble_functionspace.EnsembleFunctionSpace`. Parameters ---------- diff --git a/firedrake/ensemble/ensemble_functionspace.py b/firedrake/ensemble/ensemble_functionspace.py index 7a0c7b577c..ee7c0582e8 100644 --- a/firedrake/ensemble/ensemble_functionspace.py +++ b/firedrake/ensemble/ensemble_functionspace.py @@ -2,13 +2,11 @@ from typing import Collection from ufl.duals import is_primal, is_dual -from pyop2.mpi import internal_comm, MPI +from pyop2.mpi import MPI from firedrake.petsc import PETSc from firedrake.ensemble.ensemble import Ensemble from firedrake.functionspace import MixedFunctionSpace -__all__ = ("EnsembleFunctionSpace", "EnsembleDualSpace") - def _is_primal_or_dual(local_spaces, ensemble): """ @@ -83,16 +81,19 @@ class EnsembleFunctionSpaceBase: will return an instance of :class:`EnsembleDualSpace`. This class does not carry UFL symbolic information, unlike a - :class:`~firedrake.functionspaceimpl.FunctionSpace`. UFL expressions can only be defined locally - on each ensemble member using a :class:`~firedrake.functionspaceimpl.FunctionSpace` from + :class:`~firedrake.functionspace.FunctionSpace`. UFL expressions can only be defined locally + on each ensemble member using a :class:`~firedrake.functionspace.FunctionSpace` from `EnsembleFunctionSpace.local_spaces`. - See also: - - Primal ensemble objects: :class:`EnsembleFunctionSpace` and :class:`~firedrake.ensemble.ensemble_function.EnsembleFunction`. - - Dual ensemble objects: :class:`EnsembleDualSpace` and :class:`~firedrake.ensemble.ensemble_function.EnsembleCofunction`. + See Also + -------- + EnsembleFunctionSpace + EnsembleDualSpace + .ensemble_function.EnsembleFunction + .ensemble_function.EnsembleCofunction """ def __init__(self, local_spaces: Collection, ensemble: Ensemble): - meshes = set(V.mesh() for V in local_spaces) + meshes = set(V.mesh().unique() for V in local_spaces) nlocal_meshes = len(meshes) max_local_meshes = ensemble.ensemble_comm.allreduce(nlocal_meshes, MPI.MAX) if max_local_meshes > 1: @@ -107,11 +108,6 @@ def __init__(self, local_spaces: Collection, ensemble: Ensemble): # subfunctions that view the correct subfunctions of this big space. self._full_local_space = MixedFunctionSpace(self.local_spaces) - # ensemble._comm is congruent with ensemble.global_comm not ensemble.comm - # because obj._comm is used for garbage collection, so it needs to be the - # communicator that the ensemble objects are collective over. - self._comm = internal_comm(ensemble._comm, self) - @property def ensemble(self): """The :class:`~.ensemble.Ensemble` that the function space is defined over @@ -143,7 +139,7 @@ def local_spaces(self): return self._local_spaces def mesh(self): - """The :class:`~firedrake.Mesh` on the local ensemble.comm. + """The :class:`~firedrake.mesh.Mesh` on the local ensemble.comm. """ return self._mesh @@ -156,7 +152,7 @@ def dual(self): @cached_property def nlocal_spaces(self): - """The total number of subspaces across all ensemble ranks. + """The number of subspaces on this ensemble rank. """ return len(self.local_spaces) @@ -184,6 +180,12 @@ def nglobal_dofs(self): """ return self.ensemble_comm.allreduce(self.nlocal_comm_dofs) + @cached_property + def global_spaces_offset(self): + """Index of the first local subspace in the global mixed space. + """ + return self.ensemble.ensemble_comm.exscan(self.nlocal_spaces) or 0 + def _component_indices(self, i): """ Return the indices into the local mixed function storage @@ -194,15 +196,19 @@ def _component_indices(self, i): def create_vec(self): """Return a PETSc Vec on the ``Ensemble.global_comm`` with the same layout - as a :class:`~firedrake.ensemble.ensemble_functionspace.EnsembleFunction` - or :class:`~firedrake.ensemble.ensemble_functionspace.EnsembleCofunction` + as an :class:`~firedrake.ensemble.ensemble_function.EnsembleFunction` + or :class:`~firedrake.ensemble.ensemble_function.EnsembleCofunction` in this function space. """ vec = PETSc.Vec().create(comm=self.global_comm) - vec.setSizes((self.nlocal_dofs, self.nglobal_dofs)) + vec.setSizes((self.nlocal_rank_dofs, self.nglobal_dofs)) vec.setUp() return vec + @cached_property + def layout_vec(self): + return self.create_vec() + def __eq__(self, other): if not isinstance(other, type(self)): local_eq = False diff --git a/firedrake/ensemble/ensemble_mat.py b/firedrake/ensemble/ensemble_mat.py new file mode 100644 index 0000000000..f600937594 --- /dev/null +++ b/firedrake/ensemble/ensemble_mat.py @@ -0,0 +1,262 @@ +from typing import Iterable +from firedrake.petsc import PETSc +from firedrake.ensemble.ensemble_function import EnsembleFunction, EnsembleFunctionBase +from firedrake.ensemble.ensemble_functionspace import EnsembleFunctionSpaceBase + + +class EnsembleMatCtxBase: + """ + Base class for python type Mats defined over an :class:`~.ensemble.Ensemble`. + + Parameters + ---------- + row_space : + The function space that the matrix acts on. + Must have the same number of subspaces on each ensemble rank as col_space. + col_space : + The function space for the result of the matrix action. + Must have the same number of subspaces on each ensemble rank as row_space. + + Notes + ----- + The main use of this base class is to enable users to implement the matrix + action as acting on and resulting in an :class:`~.ensemble_function.EnsembleFunction`. + This is done by implementing the ``mult_impl`` method. + + See Also + -------- + .ensemble_pc.EnsemblePCBase + """ + def __init__(self, row_space: EnsembleFunctionSpaceBase, + col_space: EnsembleFunctionSpaceBase): + name = type(self).__name__ + if not isinstance(row_space, EnsembleFunctionSpaceBase): + raise ValueError( + f"{name} row_space must be EnsembleFunctionSpace not {type(row_space).__name__}") + if not isinstance(col_space, EnsembleFunctionSpaceBase): + raise ValueError( + f"{name} col_space must be EnsembleFunctionSpace not {type(col_space).__name__}") + + if row_space.ensemble != col_space.ensemble: + raise ValueError( + f"{name} row and column spaces must have the same Ensemble") + + self.ensemble = row_space.ensemble + self.row_space = row_space + self.col_space = col_space + + # input/output Vecs will be copied in/out of these + # so that base classes can implement mult only in + # terms of Ensemble objects not Vecs. + self.x = EnsembleFunction(self.row_space) + self.y = EnsembleFunction(self.col_space) + + def mult(self, A, x, y): + """Apply the action of the matrix to x, putting the result in y. + + This method will be called by PETSc with x and y as Vecs, and acts + as a wrapper around the ``mult_impl`` method which has x and y as + EnsembleFunction for convenience. + y is not guaranteed to be zero on entry. + + Parameters + ---------- + A : PETSc.Mat + The PETSc matrix that self is the python context of. + x : PETSc.Vec + The vector acted on by the matrix. + y : PETSc.Vec + The result of the matrix action. + + See Also + -------- + EnsembleMatCtxBase.mult_impl + """ + with self.x.vec_wo() as xvec: + x.copy(result=xvec) + + self.mult_impl(A, self.x, self.y) + + with self.y.vec_ro() as yvec: + yvec.copy(result=y) + + def mult_impl(self, A, x: EnsembleFunctionBase, y: EnsembleFunctionBase): + """Apply the action of the matrix to x, putting the result in y. + + y is not guaranteed to be zero on entry. + This is a convenience method allowing the matrix action to be + implemented in terms of EnsembleFunction input and outputs by + inheriting classes. + + Parameters + ---------- + A : PETSc.Mat + The PETSc matrix that self is the python context of. + x : + The vector acted on by the matrix. + y : + The result of the matrix action. + + See Also + -------- + EnsembleMatCtxBase.mult + """ + raise NotImplementedError + + +class EnsembleBlockDiagonalMatCtx(EnsembleMatCtxBase): + """ + A python Mat context for a block diagonal matrix defined over an :class:`~.ensemble.Ensemble`. + Each block acts on a single subspace of an :class:`~.ensemble_functionspace.EnsembleFunctionSpace`. + + Parameters + ---------- + block_mats : Iterable[PETSc.Mat] + The PETSc Mats for each block. On each ensemble rank there must be as many + Mats as there are local subspaces of ``row_space`` and ``col_space``, and + the Mat sizes must match the sizes of the corresponding subspaces. + row_space : + The function space that the matrix acts on. + Must have the same number of subspaces on each ensemble rank as col_space. + col_space : + The function space for the result of the matrix action. + Must have the same number of subspaces on each ensemble rank as row_space. + + Notes + ----- + This is a python context, not an actual PETSc.Mat. To create the corresponding + PETSc.Mat users should call :func:`~.EnsembleBlockDiagonalMat`. + + See Also + -------- + EnsembleBlockDiagonalMat + ~.ensemble_pc.EnsembleBJacobiPC + """ + def __init__(self, block_mats: Iterable, + row_space: EnsembleFunctionSpaceBase, + col_space: EnsembleFunctionSpaceBase): + super().__init__(row_space, col_space) + self.block_mats = block_mats + + if self.row_space.nlocal_spaces != self.col_space.nlocal_spaces: + raise ValueError( + "EnsembleBlockDiagonalMat row and col spaces must be the same length," + f" not {row_space.nlocal_spaces=} and {col_space.nlocal_spaces=}") + + if len(self.block_mats) != self.row_space.nlocal_spaces: + raise ValueError( + f"EnsembleBlockDiagonalMat requires one submatrix for each of the" + f" {self.row_space.nlocal_spaces} local subfunctions of the EnsembleFunctionSpace," + f" but only {len(self.block_mats)} provided.") + + for i, (Vrow, Vcol, block) in enumerate(zip(self.row_space.local_spaces, + self.col_space.local_spaces, + self.block_mats)): + if not isinstance(block, PETSc.Mat): + raise TypeError( + f"Block {i} must be a PETSc.Mat not a {type(block).__name__}.\n" + "Did you mean to use assemble(block).petscmat instead?") + # number of columns is row length, and vice-versa + vr_sizes = Vrow.dof_dset.layout_vec.sizes + vc_sizes = Vcol.dof_dset.layout_vec.sizes + mc_sizes, mr_sizes = block.sizes + if (vr_sizes[0] != mr_sizes[0]) or (vr_sizes[1] != mr_sizes[1]): + raise ValueError( + f"Row sizes {mr_sizes} of block {i} and {vr_sizes} of row_space {i} of EnsembleBlockDiagonalMat must match.") + if (vc_sizes[0] != mc_sizes[0]) or (vc_sizes[1] != mc_sizes[1]): + raise ValueError( + f"Col sizes of block {i} and col_space {i} of EnsembleBlockDiagonalMat must match.") + + def mult_impl(self, A, x, y): + for block, xsub, ysub in zip(self.block_mats, + x.subfunctions, + y.subfunctions): + with xsub.dat.vec_ro as xvec, ysub.dat.vec_wo as yvec: + block.mult(xvec, yvec) + + def setUp(self, mat): + for bmat in self.block_mats: + bmat.setUp() + + def view(self, mat, viewer=None): + if viewer is None: + return + if viewer.getType() != PETSc.Viewer.Type.ASCII: + return + viewer.printfASCII(f" firedrake block diagonal Ensemble matrix: {type(self).__name__}\n") + viewer.printfASCII(f" Number of blocks = {self.col_space.nglobal_spaces}, Number of ensemble ranks = {self.ensemble.ensemble_size}\n") + + if viewer.getFormat() != PETSc.Viewer.Format.ASCII_INFO_DETAIL: + viewer.printfASCII(" Local information for first block is in the following Mat objects on rank 0:\n") + prefix = mat.getOptionsPrefix() or "" + viewer.printfASCII(f" Use -{prefix}ksp_view ::ascii_info_detail to display information for all blocks\n") + subviewer = viewer.getSubViewer(self.ensemble.comm) + if self.ensemble.ensemble_rank == 0: + subviewer.pushASCIITab() + self.block_mats[0].view(subviewer) + subviewer.popASCIITab() + viewer.restoreSubViewer(subviewer) + # Comment taken from PCView_BJacobi in https://petsc.org/release/src/ksp/pc/impls/bjacobi/bjacobi.c.html#PCBJACOBI + # extra call needed because of the two calls to PetscViewerASCIIPushSynchronized() in PetscViewerGetSubViewer() + viewer.popASCIISynchronized() + + else: + viewer.pushASCIISynchronized() + viewer.printfASCII(" Local information for each block is in the following Mat objects:\n") + viewer.pushASCIITab() + subviewer = viewer.getSubViewer(self.ensemble.comm) + r = self.ensemble.ensemble_rank + offset = self.col_space.global_spaces_offset + subviewer.printfASCII(f"[{r}] number of local blocks = {self.col_space.nlocal_spaces}, first local block number = {offset}\n") + for i, submat in enumerate(self.block_mats): + subviewer.printfASCII(f"[{r}] local block number {i}, global block number {offset + i}\n") + submat.view(subviewer) + subviewer.printfASCII("- - - - - - - - - - - - - - - - - -\n") + viewer.restoreSubViewer(subviewer) + viewer.popASCIITab() + viewer.popASCIISynchronized() + + +def EnsembleBlockDiagonalMat(block_mats: Iterable, + row_space: EnsembleFunctionSpaceBase, + col_space: EnsembleFunctionSpaceBase): + """ + A Mat for a block diagonal matrix defined over an :class:`~.ensemble.Ensemble`. + Each block acts on a single subspace of an :class:`~.ensemble_functionspace.EnsembleFunctionSpace`. + This is a convenience function to create a PETSc.Mat with a :class:`.EnsembleBlockDiagonalMatCtx` Python context. + + Parameters + ---------- + block_mats : Iterable[PETSc.Mat] + The PETSc Mats for each block. On each ensemble rank there must be as many + Mats as there are local subspaces of ``row_space`` and ``col_space``, and + the Mat sizes must match the sizes of the corresponding subspaces. + row_space : + The function space that the matrix acts on. + Must have the same number of subspaces on each ensemble rank as col_space. + col_space : + The function space for the result of the matrix action. + Must have the same number of subspaces on each ensemble rank as row_space. + + Returns + ------- + PETSc.Mat : + The PETSc.Mat with an :class:`.EnsembleBlockDiagonalMatCtx` Python context. + + See Also + -------- + EnsembleBlockDiagonalMatCtx + ~.ensemble_pc.EnsembleBJacobiPC + """ + ctx = EnsembleBlockDiagonalMatCtx(block_mats, row_space, col_space) + + # number of columns is row length, and vice-versa + ncols = ctx.col_space.layout_vec.getSizes() + nrows = ctx.row_space.layout_vec.getSizes() + + mat = PETSc.Mat().createPython( + (ncols, nrows), ctx, + comm=ctx.ensemble.global_comm) + mat.setUp() + mat.assemble() + return mat diff --git a/firedrake/ensemble/ensemble_pc.py b/firedrake/ensemble/ensemble_pc.py new file mode 100644 index 0000000000..1a3f96aaae --- /dev/null +++ b/firedrake/ensemble/ensemble_pc.py @@ -0,0 +1,187 @@ +import petsctools +from firedrake.petsc import PETSc +from firedrake.ensemble.ensemble_function import EnsembleFunction +from firedrake.ensemble.ensemble_mat import EnsembleMatCtxBase, EnsembleBlockDiagonalMatCtx + + +def obj_name(obj): + return f"{type(obj).__module__}.{type(obj).__name__}" + + +class EnsemblePCBase(petsctools.PCBase): + """ + Base class for python type PCs defined over an :class:`~.ensemble.Ensemble`. + + The pc operators must be python Mats with :class:`~.ensemble_mat.EnsembleMatCtxBase`. + + Notes + ----- + The main use of this base class is to enable users to implement the preconditioner + action as acting on and resulting in an :class:`~.ensemble_function.EnsembleFunction`. + This is done by implementing the ``apply_impl`` method. + + See Also + -------- + ~.ensemble_mat.EnsembleMatCtxBase + """ + needs_python_pmat = True + + def initialize(self, pc): + super().initialize(pc) + + if not isinstance(self.pmat, EnsembleMatCtxBase): + pcname = obj_name(self) + pmatname = obj_name(self.pmat) + raise TypeError( + f"PC {pcname} needs an EnsembleMatBase pmat, but it is a {pmatname}") + + self.ensemble = self.pmat.ensemble + + self.row_space = self.pmat.row_space.dual() + self.col_space = self.pmat.col_space.dual() + + self.x = EnsembleFunction(self.row_space) + self.y = EnsembleFunction(self.col_space) + + def apply(self, pc, x, y): + with self.x.vec_wo() as v: + x.copy(result=v) + + self.apply_impl(pc, self.x, self.y) + + with self.y.vec_ro() as v: + v.copy(result=y) + + def apply_impl(self, pc, x, y): + raise NotImplementedError + + +class EnsembleBJacobiPC(EnsemblePCBase): + """ + A python PC context for a block Jacobi method defined over an :class:`~.ensemble.Ensemble`. + Each block acts on a single subspace of an :class:`~.ensemble_functionspace.EnsembleFunctionSpace` + and is (approximately) solved with its own KSP, which defaults to -ksp_type preonly. + + Available options: + + * ``-pc_use_amat`` - use Amat to apply block of operator in inner Krylov method + * ``-sub_%d`` - set options for the ``%d``'th block, numbered from ensemble rank 0. + * ``-sub_`` - set default options for all blocks. + + Notes + ----- + Currently this is only implemented for :class:`~.ensemble_mat.EnsembleBlockDiagonalMatCtx` matrices. + + See Also + -------- + ~.ensemble_mat.EnsembleBlockDiagonalMatCtx + ~.ensemble_mat.EnsembleBlockDiagonalMat + """ + prefix = "ebjacobi_" + + def initialize(self, pc): + super().initialize(pc) + + use_amat_prefix = self.parent_prefix + "pc_use_amat" + self.use_amat = PETSc.Options().getBool(use_amat_prefix, False) + + if not isinstance(self.pmat, EnsembleBlockDiagonalMatCtx): + pcname = obj_name(self) + matname = obj_name(self.pmat) + raise TypeError( + f"PC {pcname} needs an EnsembleBlockDiagonalMatCtx pmat, but it is a {matname}") + + if self.use_amat: + if not isinstance(self.amat, EnsembleBlockDiagonalMatCtx): + pcname = obj_name(self) + matname = obj_name(self.amat) + raise TypeError( + f"PC {pcname} needs an EnsembleBlockDiagonalMatCtx amat, but it is a {matname}") + + default_sub_prefix = self.parent_prefix + "sub_" + + default_options = petsctools.DefaultOptionSet( + base_prefix=default_sub_prefix, + custom_prefix_endings=range(self.col_space.nglobal_spaces)) + + block_offset = self.col_space.global_spaces_offset + + sub_ksps = [] + for i in range(len(self.pmat.block_mats)): + sub_ksp = PETSc.KSP().create( + comm=self.ensemble.comm) + + if self.use_amat: + sub_amat = self.amat.block_mats[i] + else: + sub_amat = self.pmat.block_mats[i] + sub_pmat = self.pmat.block_mats[i] + sub_ksp.setOperators(sub_amat, sub_pmat) + + sub_prefix = default_sub_prefix + str(block_offset + i) + + petsctools.attach_options( + sub_ksp, parameters={}, + options_prefix=sub_prefix, + default_options_set=default_options) + + # default to behaving like a PC + petsctools.set_default_parameter( + sub_ksp, "ksp_type", "preonly") + + petsctools.set_from_options(sub_ksp) + + sub_ksp.incrementTabLevel(1, parent=pc) + sub_ksp.pc.incrementTabLevel(1, parent=pc) + + sub_ksps.append(sub_ksp) + + self.sub_ksps = tuple(sub_ksps) + + def apply_impl(self, pc, x, y): + sub_vecs = zip(self.x.subfunctions, self.y.subfunctions) + for sub_ksp, (subx, suby) in zip(self.sub_ksps, sub_vecs): + with subx.dat.vec_ro as rhs, suby.dat.vec_wo as sol: + with petsctools.inserted_options(sub_ksp): + sub_ksp.solve(rhs, sol) + + def update(self, pc): + for sub_ksp in self.sub_ksps: + sub_ksp.setUp() + + def view(self, pc, viewer=None): + super().view(pc, viewer=viewer) + viewer.printfASCII(" firedrake block Jacobi preconditioner for ensemble Mats\n") + if self.use_amat: + viewer.printfASCII(" using Amat local matrix\n") + viewer.printfASCII(f" Number of blocks = {self.col_space.nglobal_spaces}, Number of ensemble ranks = {self.ensemble.ensemble_size}\n") + + if viewer.getFormat() != PETSc.Viewer.Format.ASCII_INFO_DETAIL: + viewer.printfASCII(" Local solver information for first block is in the following KSP and PC objects on rank 0:\n") + prefix = self.parent_prefix + viewer.printfASCII(f" Use -{prefix}ksp_view ::ascii_info_detail to display information for all blocks\n") + subviewer = viewer.getSubViewer(self.ensemble.comm) + if self.ensemble.ensemble_rank == 0: + subviewer.pushASCIITab() + self.sub_ksps[0].view(subviewer) + subviewer.popASCIITab() + viewer.restoreSubViewer(subviewer) + # Comment taken from PCView_BJacobi in https://petsc.org/release/src/ksp/pc/impls/bjacobi/bjacobi.c.html#PCBJACOBI + # extra call needed because of the two calls to PetscViewerASCIIPushSynchronized() in PetscViewerGetSubViewer() + viewer.popASCIISynchronized() + + else: + viewer.pushASCIISynchronized() + viewer.printfASCII(" Local solver information for each block is in the following KSP and PC objects:\n") + viewer.pushASCIITab() + subviewer = viewer.getSubViewer(self.ensemble.comm) + r = self.ensemble.ensemble_rank + offset = self.col_space.global_spaces_offset + subviewer.printfASCII(f"[{r}] number of local blocks = {self.col_space.nlocal_spaces}, first local block number = {offset}\n") + for i, subksp in enumerate(self.sub_ksps): + subviewer.printfASCII(f"[{r}] local block number {i}, global block number {offset + i}\n") + subksp.view(subviewer) + subviewer.printfASCII("- - - - - - - - - - - - - - - - - -\n") + viewer.restoreSubViewer(subviewer) + viewer.popASCIITab() + viewer.popASCIISynchronized() diff --git a/firedrake/exceptions.py b/firedrake/exceptions.py index c80853ef23..726de214e3 100644 --- a/firedrake/exceptions.py +++ b/firedrake/exceptions.py @@ -1,4 +1,55 @@ +from tsfc.exceptions import MismatchingDomainError # noqa: F401 -class ConvergenceError(Exception): - """Error raised when a solver fails to converge""" +class FiredrakeException(Exception): + """Base class for all Firedrake exceptions.""" + + +class ConvergenceError(FiredrakeException): + """Error raised when a solver fails to converge.""" + + +class DofNotDefinedError(FiredrakeException): + r"""Raised when attempting to interpolate across function spaces where the + target function space contains degrees of freedom (i.e. nodes) which cannot + be defined in the source function space. This typically occurs when the + target mesh covers a larger domain than the source mesh. + """ + + +class DofTypeError(FiredrakeException): + """Raised when an operation is attempted on a degree of freedom (DoF) + type which is not supported. + """ + + +class VertexOnlyMeshMissingPointsError(FiredrakeException): + """Exception raised when 1 or more points are not found by a + :func:`~.VertexOnlyMesh` in its parent mesh. + + Attributes + ---------- + n_missing_points + The number of points which were not found in the parent mesh. + """ + + def __init__(self, n_missing_points: int): + self.n_missing_points = n_missing_points + + def __str__(self): + return ( + f"{self.n_missing_points} vertices are outside the mesh and have " + "been removed from the VertexOnlyMesh." + ) + + +class NonUniqueMeshSequenceError(FiredrakeException): + """Raised when calling `.unique()` on a MeshSequence which contains + non-unique meshes. + """ + + +class UnrecognisedDeviceError(FiredrakeException): + """Raised when a GPU device has been initialised in PETSc that Firedrake + does not support. + """ diff --git a/firedrake/external_operators/__init__.py b/firedrake/external_operators/__init__.py index 363751d49b..8f2e6717f5 100644 --- a/firedrake/external_operators/__init__.py +++ b/firedrake/external_operators/__init__.py @@ -1,3 +1,7 @@ -from firedrake.external_operators.abstract_external_operators import * # noqa: F401 -from firedrake.external_operators.point_expr_operator import * # noqa: F401 -from firedrake.external_operators.ml_operator import * # noqa: F401 +from firedrake.external_operators.abstract_external_operators import ( # noqa F401 + AbstractExternalOperator, assemble_method +) +from firedrake.external_operators.point_expr_operator import ( # noqa F401 + PointexprOperator, point_expr +) +from firedrake.external_operators.ml_operator import MLOperator # noqa F401 diff --git a/firedrake/external_operators/point_expr_operator.py b/firedrake/external_operators/point_expr_operator.py index 3aa40e1d5b..4e7183e47f 100644 --- a/firedrake/external_operators/point_expr_operator.py +++ b/firedrake/external_operators/point_expr_operator.py @@ -5,7 +5,7 @@ import firedrake.ufl_expr as ufl_expr from firedrake.assemble import assemble -from firedrake.interpolation import Interpolate +from firedrake.interpolation import interpolate from firedrake.external_operators import AbstractExternalOperator, assemble_method @@ -58,7 +58,7 @@ def assemble_operator(self, *args, **kwargs): V = self.function_space() expr = as_ufl(self.expr(*self.ufl_operands)) if len(V) < 2: - interp = Interpolate(expr, self.function_space()) + interp = interpolate(expr, self.function_space()) return assemble(interp) # Interpolation of UFL expressions for mixed functions is not yet supported # -> `Function.assign` might be enough in some cases. @@ -72,7 +72,7 @@ def assemble_operator(self, *args, **kwargs): def assemble_Jacobian_action(self, *args, **kwargs): V = self.function_space() expr = as_ufl(self.expr(*self.ufl_operands)) - interp = Interpolate(expr, V) + interp = interpolate(expr, V) u, = [e for i, e in enumerate(self.ufl_operands) if self.derivatives[i] == 1] w = self.argument_slots()[-1] @@ -83,7 +83,7 @@ def assemble_Jacobian_action(self, *args, **kwargs): def assemble_Jacobian(self, *args, assembly_opts, **kwargs): V = self.function_space() expr = as_ufl(self.expr(*self.ufl_operands)) - interp = Interpolate(expr, V) + interp = interpolate(expr, V) u, = [e for i, e in enumerate(self.ufl_operands) if self.derivatives[i] == 1] jac = ufl_expr.derivative(interp, u) @@ -99,7 +99,7 @@ def assemble_Jacobian_adjoint(self, *args, assembly_opts, **kwargs): def assemble_Jacobian_adjoint_action(self, *args, **kwargs): V = self.function_space() expr = as_ufl(self.expr(*self.ufl_operands)) - interp = Interpolate(expr, V) + interp = interpolate(expr, V) u, = [e for i, e in enumerate(self.ufl_operands) if self.derivatives[i] == 1] ustar = self.argument_slots()[0] diff --git a/firedrake/extrusion_utils.py b/firedrake/extrusion_utils.py index 0b65f6d11d..e0e3d91e02 100644 --- a/firedrake/extrusion_utils.py +++ b/firedrake/extrusion_utils.py @@ -65,7 +65,7 @@ def make_extruded_coords(extruded_topology, base_coords, ext_coords, layer_height = numpy.cumsum(numpy.concatenate(([0], layer_height))) layer_heights = layer_height.size - layer_height = op2.Global(layer_heights, layer_height, dtype=RealType, comm=extruded_topology._comm) + layer_height = op2.Global(layer_heights, layer_height, dtype=RealType, comm=extruded_topology.comm) if kernel is not None: op2.ParLoop(kernel, @@ -145,7 +145,7 @@ def _get_lp_domains(_inames, _extents): elif extrusion_type == 'radial_hedgehog': # Only implemented for interval in 2D and triangle in 3D. # gdim != tdim already checked in ExtrudedMesh constructor. - tdim = extract_unique_domain(base_coords).ufl_cell().topological_dimension() + tdim = extract_unique_domain(base_coords).ufl_cell().topological_dimension if tdim not in [1, 2]: raise NotImplementedError("Hedgehog extrusion not implemented for %s" % extract_unique_domain(base_coords).ufl_cell()) # tdim == 1: diff --git a/firedrake/fml/__init__.py b/firedrake/fml/__init__.py index d0f951305d..56d86fd9c1 100644 --- a/firedrake/fml/__init__.py +++ b/firedrake/fml/__init__.py @@ -1,2 +1,7 @@ -from firedrake.fml.form_manipulation_language import * # noqa -from firedrake.fml.replacement import * # noqa +from firedrake.fml.form_manipulation_language import ( # noqa F401 + Label, Term, LabelledForm, identity, drop, all_terms, + keep, subject, name_label +) +from firedrake.fml.replacement import ( # noqa F401 + replace_test_function, replace_trial_function, replace_subject +) diff --git a/firedrake/formmanipulation.py b/firedrake/formmanipulation.py index eb830493e1..737b630519 100644 --- a/firedrake/formmanipulation.py +++ b/firedrake/formmanipulation.py @@ -49,7 +49,19 @@ def indexed(self, o, child, multiindex): return ListTensor(*(child[i] for i in indices)) return self.expr(o, child, multiindex) - index_inliner = IndexInliner() + @property + def index_inliner(self): + """Return an IndexInliner multifunction. + + This is a property so that the IndexInliner is not created on import. + This is a workaround for issues in Irksome caused by the UFL typecode + system. + """ + try: + return self._index_inliner + except AttributeError: + type(self)._index_inliner = self.IndexInliner() + return self._index_inliner def _subspace_argument(self, a): return type(a)(subspace(a.function_space(), self.blocks[a.number()]), diff --git a/firedrake/function.py b/firedrake/function.py index d37e62b91e..b447a83174 100644 --- a/firedrake/function.py +++ b/firedrake/function.py @@ -13,9 +13,10 @@ from collections.abc import Collection from numbers import Number from pathlib import Path -from functools import partial +from functools import partial, cached_property from typing import Tuple +import petsctools from pyop2 import op2, mpi from pyop2.exceptions import DataTypeError, DataValueError @@ -73,15 +74,13 @@ def __init__(self, function_space, val=None, name=None, dtype=ScalarType): # User comm self.comm = function_space.comm - # Internal comm - self._comm = mpi.internal_comm(function_space.comm, self) self._function_space = function_space - self.uid = utils._new_uid(self._comm) + self.uid = utils._new_uid(self.comm) self._name = name or 'function_%d' % self.uid self._label = "a function" if isinstance(val, (op2.Dat, op2.DatView, op2.MixedDat, op2.Global)): - assert val.comm == self._comm + assert val.comm == self.comm self.dat = val else: self.dat = function_space.make_dat(val, dtype, self.name()) @@ -111,7 +110,7 @@ def copy(self, deepcopy=False): def ufl_id(self): return self.uid - @utils.cached_property + @cached_property def subfunctions(self): r"""Extract any sub :class:`Function`\s defined on the component spaces of this this :class:`Function`'s :class:`.FunctionSpace`.""" @@ -119,7 +118,7 @@ def subfunctions(self): for i, (fs, dat) in enumerate(zip(self.function_space(), self.dat))) - @utils.cached_property + @cached_property def _components(self): if self.function_space().rank == 0: return (self, ) @@ -308,7 +307,7 @@ def __dir__(self): current = super(Function, self).__dir__() return list(dict.fromkeys(dir(self._data) + current)) - @utils.cached_property + @cached_property @FunctionMixin._ad_annotate_subfunctions def subfunctions(self): r"""Extract any sub :class:`Function`\s defined on the component spaces @@ -316,7 +315,7 @@ def subfunctions(self): return tuple(type(self)(V, val) for (V, val) in zip(self.function_space(), self.topological.subfunctions)) - @utils.cached_property + @cached_property def _components(self): if self.function_space().rank == 0: return (self, ) @@ -382,9 +381,9 @@ def interpolate(self, firedrake.function.Function Returns `self` """ - from firedrake import interpolation, assemble + from firedrake import interpolate, assemble V = self.function_space() - interp = interpolation.Interpolate(expression, V, **kwargs) + interp = interpolate(expression, V, **kwargs) return assemble(interp, tensor=self, ad_block_tag=ad_block_tag) def zero(self, subset=None): @@ -407,29 +406,43 @@ def zero(self, subset=None): @PETSc.Log.EventDecorator() @FunctionMixin._ad_annotate_assign - def assign(self, expr, subset=None): - r"""Set the :class:`Function` value to the pointwise value of - expr. expr may only contain :class:`Function`\s on the same - :class:`.FunctionSpace` as the :class:`Function` being assigned to. + def assign(self, expr, subset=None, allow_missing_dofs=False): + """Set value to the pointwise value of expr. + Parameters + ---------- + expr : ufl.core.expr.Expr + Expression to be assigned. + subset : pyop2.types.set.Set or pyop2.types.set.Subset or pyop2.types.set.MixedSet + ``self.node_set`` or `pyop2.types.set.Subset` of ``self.node_set`` or + `pyop2.types.set.MixedSet` composed of them if `self` is a mixed function. + allow_missing_dofs : bool + Permit assignment between objects with mismatching nodes. If `True` then + assignee nodes with no matching assigner nodes are ignored. + Only significant if assigning across submeshes. + + Returns + ------- + firedrake.function.Function + Returns `self`. + + Notes + ----- + expr may only contain :class:`Function` s on the same :class:`.WithGeometry` as the + assignee :class:`Function` or those on the similar spaces on submeshes. Similar functionality is available for the augmented assignment - operators `+=`, `-=`, `*=` and `/=`. For example, if `f` and `g` are + operators `+=`, `-=`, `*=` and `/=`. For example, if ``f`` and ``g`` are both Functions on the same :class:`.FunctionSpace` then:: f += 2 * g - will add twice `g` to `f`. - - If present, subset must be an :class:`pyop2.types.set.Subset` of this - :class:`Function`'s ``node_set``. The expression will then - only be assigned to the nodes on that subset. + will add twice ``g`` to ``f``. - .. note:: + Assignment can only be performed for simple weighted sum expressions and constant + values. Things like ``u.assign(2*v + Constant(3.0))``. For more complicated + expressions (e.g. involving the product of functions) :meth:`.Function.interpolate` + should be used. - Assignment can only be performed for simple weighted sum expressions and constant - values. Things like ``u.assign(2*v + Constant(3.0))``. For more complicated - expressions (e.g. involving the product of functions) :meth:`.Function.interpolate` - should be used. """ if self.ufl_element().family() == "Real" and isinstance(expr, (Number, Collection)): try: @@ -440,7 +453,7 @@ def assign(self, expr, subset=None): self.dat.zero(subset=subset) else: from firedrake.assign import Assigner - Assigner(self, expr, subset).assign() + Assigner(self, expr, subset).assign(allow_missing_dofs=allow_missing_dofs) return self def riesz_representation(self, riesz_map='L2'): @@ -502,7 +515,8 @@ def __float__(self): else: raise ValueError("Can only cast scalar 'Real' Functions to float.") - @utils.cached_property + @cached_property + @PETSc.Log.EventDecorator() def _constant_ctypes(self): # Retrieve data from Python object function_space = self.function_space() @@ -527,6 +541,7 @@ def _constant_ctypes(self): return c_function @property + @PETSc.Log.EventDecorator() def _ctypes(self): mesh = extract_unique_domain(self) c_function = self._constant_ctypes @@ -597,20 +612,24 @@ def _at(self, arg, *args, **kwargs): tolerance = kwargs.get('tolerance', None) mesh = self.function_space().mesh() + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") if tolerance is None: - tolerance = mesh.tolerance + tolerance = mesh_unique.tolerance else: - mesh.tolerance = tolerance + mesh_unique.tolerance = tolerance # Handle f._at(0.3) if not arg.shape: arg = arg.reshape(-1) - if mesh.variable_layers: + if mesh_unique.variable_layers: raise NotImplementedError("Point evaluation not implemented for variable layers") # Validate geometric dimension - gdim = mesh.geometric_dimension() + gdim = mesh.geometric_dimension if arg.shape[-1] == gdim: pass elif len(arg.shape) == 1 and gdim == 1: @@ -619,9 +638,10 @@ def _at(self, arg, *args, **kwargs): raise ValueError("Point dimension (%d) does not match geometric dimension (%d)." % (arg.shape[-1], gdim)) # Check if we have got the same points on each process - root_arg = self._comm.bcast(arg, root=0) - same_arg = arg.shape == root_arg.shape and np.allclose(arg, root_arg) - diff_arg = self._comm.allreduce(int(not same_arg), op=MPI.SUM) + with mpi.temp_internal_comm(self.comm) as icomm: + root_arg = icomm.bcast(arg, root=0) + same_arg = arg.shape == root_arg.shape and np.allclose(arg, root_arg) + diff_arg = icomm.allreduce(int(not same_arg), op=MPI.SUM) if diff_arg: raise ValueError("Points to evaluate are inconsistent among processes.") @@ -701,7 +721,7 @@ def __init__(self, domain, point): self.point = point def __str__(self): - return "domain %s does not contain point %s" % (self.domain, self.point) + return f"Domain {self.domain} does not contain point {self.point}" class PointEvaluator: @@ -732,7 +752,7 @@ def __init__(self, mesh: MeshGeometry, points: np.ndarray | list, tolerance: flo self.points = np.asarray(points, dtype=utils.ScalarType) if not self.points.shape: self.points = self.points.reshape(-1) - gdim = mesh.geometric_dimension() + gdim = mesh.geometric_dimension if self.points.shape[-1] != gdim and (len(self.points.shape) != 1 or gdim != 1): raise ValueError(f"Point dimension ({self.points.shape[-1]}) does not match geometric dimension ({gdim}).") self.points = self.points.reshape(-1, gdim) @@ -782,7 +802,7 @@ def evaluate(self, function: Function) -> np.ndarray | Tuple[np.ndarray, ...]: if function.function_space().ufl_element().family() == "Real": return function.dat.data_ro - function_mesh = function.function_space().mesh() + function_mesh = function.function_space().mesh().unique() if function_mesh is not self.mesh: raise ValueError("Function mesh must be the same Mesh object as the PointEvaluator mesh.") if coord_changed := function_mesh.coordinates.dat.dat_version != self.mesh._saved_coordinate_dat_version: @@ -831,7 +851,6 @@ def make_c_evaluate(function, c_name="evaluate", ldargs=None, tolerance=None): from os import path from firedrake.pointeval_utils import compile_element from pyop2 import compilation - from pyop2.utils import get_petsc_dir from pyop2.parloop import generate_single_cell_wrapper import firedrake.pointquery_utils as pq_utils @@ -866,8 +885,9 @@ def make_c_evaluate(function, c_name="evaluate", ldargs=None, tolerance=None): cppargs=[ f"-I{path.dirname(__file__)}", f"-I{sys.prefix}/include", - f"-I{rtree.finder.get_include()}" - ] + [f"-I{d}/include" for d in get_petsc_dir()], + f"-I{rtree.finder.get_include()}", + *petsctools.get_petsc_dirs(prefix="-I", subdir="include"), + ], ldargs=ldargs, comm=function.comm ) diff --git a/firedrake/functionspace.py b/firedrake/functionspace.py index 617e7f6780..c8134e774c 100644 --- a/firedrake/functionspace.py +++ b/firedrake/functionspace.py @@ -4,6 +4,7 @@ API is functional, rather than object-based, to allow for simple backwards-compatibility, argument checking, and dispatch. """ +import itertools import ufl import finat.ufl @@ -30,7 +31,8 @@ def make_scalar_element(mesh, family, degree, vfamily, vdegree, variant, quad_sc family : The finite element family. degree : - The degree of the finite element. + The degree of the finite element. If unspecified this will default + to the lowest degree available for the given family. vfamily : The finite element in the vertical dimension (extruded meshes only). @@ -48,9 +50,6 @@ def make_scalar_element(mesh, family, degree, vfamily, vdegree, variant, quad_sc :class:`finat.ufl.finiteelementbase.FiniteElementBase`, in which case all other arguments are ignored and the element is returned immediately. - As a side effect, this function finalises the initialisation of - the provided mesh, by calling :meth:`.AbstractMeshTopology.init` (or - :meth:`.MeshGeometry.init`) as appropriate. """ topology = mesh.topology cell = topology.ufl_cell() @@ -60,7 +59,7 @@ def make_scalar_element(mesh, family, degree, vfamily, vdegree, variant, quad_sc if isinstance(cell, ufl.TensorProductCell) \ and vfamily is not None and vdegree is not None: la = finat.ufl.FiniteElement(family, - cell=cell.sub_cells()[0], + cell=cell.sub_cells[0], degree=degree, variant=variant, quad_scheme=quad_scheme) @@ -88,7 +87,8 @@ def FunctionSpace(mesh, family, degree=None, name=None, family : The finite element family. degree : - The degree of the finite element. + The degree of the finite element. If unspecified this will default + to the lowest degree available for the given family. name: An optional name for the function space. vfamily : @@ -127,7 +127,8 @@ def DualSpace(mesh, family, degree=None, name=None, family : The finite element family. degree : - The degree of the finite element. + The degree of the finite element. If unspecified this will default + to the lowest degree available for the given family. name : An optional name for the function space. vfamily: @@ -166,7 +167,8 @@ def VectorFunctionSpace(mesh, family, degree=None, dim=None, name=None, family : The finite element family. degree : - The degree of the finite element. + The degree of the finite element. If unspecified this will default + to the lowest degree available for the given family. dim : An optional number of degrees of freedom per function space node (defaults to the geometric dimension of the mesh). @@ -198,8 +200,8 @@ def VectorFunctionSpace(mesh, family, degree=None, dim=None, name=None, """ sub_element = make_scalar_element(mesh, family, degree, vfamily, vdegree, variant, quad_scheme) if dim is None: - dim = mesh.geometric_dimension() - if not (isinstance(dim, numbers.Integral) and dim > 0): + dim = mesh.geometric_dimension + if not isinstance(dim, numbers.Integral) and dim > 0: raise ValueError(f"Can't make VectorFunctionSpace with dim={dim}") element = finat.ufl.VectorElement(sub_element, dim=dim) return FunctionSpace(mesh, element, name=name) @@ -218,7 +220,8 @@ def TensorFunctionSpace(mesh, family, degree=None, shape=None, family : The finite element family. degree : - The degree of the finite element. + The degree of the finite element. If unspecified this will default + to the lowest degree available for the given family. shape : An optional shape for the tensor-valued degrees of freedom at each function space node (defaults to a square tensor using the @@ -254,7 +257,7 @@ def TensorFunctionSpace(mesh, family, degree=None, shape=None, """ sub_element = make_scalar_element(mesh, family, degree, vfamily, vdegree, variant, quad_scheme) if shape is None: - shape = (mesh.geometric_dimension(),) * 2 + shape = (mesh.geometric_dimension,) * 2 element = finat.ufl.TensorElement(sub_element, shape=shape, symmetry=symmetry) return FunctionSpace(mesh, element, name=name) @@ -275,6 +278,8 @@ def MixedFunctionSpace(spaces, name=None, mesh=None): :class:`finat.ufl.mixedelement.MixedElement`, ignored otherwise. """ + from firedrake.mesh import MeshSequenceGeometry + if isinstance(spaces, finat.ufl.FiniteElementBase): # Build the spaces if we got a mixed element assert type(spaces) is finat.ufl.MixedElement and mesh is not None @@ -289,13 +294,8 @@ def rec(eles): sub_elements.append(ele) rec(spaces.sub_elements) spaces = [FunctionSpace(mesh, element) for element in sub_elements] - - # Check that function spaces are on the same mesh - meshes = [space.mesh() for space in spaces] - for i in range(1, len(meshes)): - if meshes[i] is not meshes[0]: - raise ValueError("All function spaces must be defined on the same mesh!") - + # Flatten MeshSequences. + meshes = list(itertools.chain(*[space.mesh() for space in spaces])) try: cls, = set(type(s) for s in spaces) except ValueError: @@ -303,8 +303,6 @@ def rec(eles): # We had not implemented something in between, so let's make it primal cls = impl.WithGeometry - # Select mesh - mesh = meshes[0] # Get topological spaces spaces = tuple(s.topological for s in flatten(spaces)) # Error checking @@ -318,10 +316,9 @@ def rec(eles): else: raise ValueError("Can't make mixed space with %s" % type(space)) - new = impl.MixedFunctionSpace(spaces, name=name) - if mesh is not mesh.topology: - new = cls.create(new, mesh) - return new + mixed_mesh_geometry = MeshSequenceGeometry(meshes) + new = impl.MixedFunctionSpace(spaces, mixed_mesh_geometry.topology, name=name) + return cls(new, mixed_mesh_geometry) @PETSc.Log.EventDecorator("CreateFunctionSpace") @@ -339,7 +336,7 @@ def RestrictedFunctionSpace(function_space, boundary_set=[], name=None): An optional name for the function space. """ - return impl.WithGeometry.create(impl.RestrictedFunctionSpace(function_space, - boundary_set=boundary_set, - name=name), - function_space.mesh()) + return impl.WithGeometry(impl.RestrictedFunctionSpace(function_space, + boundary_set=boundary_set, + name=name), + function_space.mesh()) diff --git a/firedrake/functionspaceimpl.py b/firedrake/functionspaceimpl.py index 4dda3266e1..b5b2e90a0a 100644 --- a/firedrake/functionspaceimpl.py +++ b/firedrake/functionspaceimpl.py @@ -4,23 +4,25 @@ classes for attaching extra information to instances of these. """ +import warnings from collections import OrderedDict -from dataclasses import dataclass -from typing import Optional import numpy import ufl import finat.ufl +from finat.quadrature import QuadratureRule +from ufl.cell import CellSequence from ufl.duals import is_dual, is_primal -from pyop2 import op2, mpi +from pyop2 import op2 from pyop2.utils import as_tuple -from firedrake import dmhooks, utils +from firedrake import dmhooks +from firedrake.mesh import MeshGeometry, MeshSequenceTopology, MeshSequenceGeometry from firedrake.functionspacedata import get_shared_data, create_element -from firedrake.mesh import MeshGeometry from firedrake.petsc import PETSc +from functools import cached_property def check_element(element, top=True): @@ -52,7 +54,10 @@ def check_element(element, top=True): ValueError If the element is illegal. """ - if element.cell.cellname() == "hexahedron" and \ + if isinstance(element.cell, CellSequence) and \ + type(element) is not finat.ufl.MixedElement: + raise ValueError("MixedElement modifier must be outermost") + if element.cell.cellname == "hexahedron" and \ element.family() not in ["Q", "DQ", "Real"]: raise NotImplementedError("Currently can only use 'Q', 'DQ', and/or 'Real' elements on hexahedral meshes, not", element.family()) if type(element) in (finat.ufl.BrokenElement, finat.ufl.RestrictedElement, @@ -73,94 +78,87 @@ def check_element(element, top=True): check_element(e, top=False) -class WithGeometryBase(object): +class WithGeometryBase: r"""Attach geometric information to a :class:`~.FunctionSpace`. Function spaces on meshes with different geometry but the same topology can share data, except for their UFL cell. This class facilitates that. - Users should not instantiate a :class:`WithGeometryBase` object - explicitly except in a small number of cases. - - When instantiating a :class:`WithGeometryBase`, users should call - :meth:`WithGeometryBase.create` rather than ``__init__``. + Parameters + ---------- + function_space : FunctionSpace or MixedFunctionSpace + Topological function space to attach geometry to. + mesh : MeshGeometry + Mesh with geometric information to use. + parent : WithGeometry + Parent geometric function space if exists. - :arg mesh: The mesh with geometric information to use. - :arg element: The UFL element. - :arg component: The component of this space in a parent vector - element space, or ``None``. - :arg cargo: :class:`FunctionSpaceCargo` instance carrying - Firedrake-specific data that is not required for code - generation. """ - def __init__(self, mesh, element, component=None, cargo=None): - assert component is None or isinstance(component, int) - assert cargo is None or isinstance(cargo, FunctionSpaceCargo) - - super().__init__(mesh, element, label=cargo.topological._label or "") - self.component = component - self.cargo = cargo - self.comm = mesh.comm - self._comm = mpi.internal_comm(mesh.comm, self) - - @classmethod - def create(cls, function_space, mesh): - """Create a :class:`WithGeometry`. + def __init__(self, function_space, mesh, parent=None): + if isinstance(function_space, MixedFunctionSpace): + if not isinstance(mesh, MeshSequenceGeometry): + raise TypeError(f"Can only use MixedFunctionSpace with MeshSequenceGeometry: got {type(mesh)}") - :arg function_space: The topological function space to attach - geometry to. - :arg mesh: The mesh with geometric information to use. - """ function_space = function_space.topological - assert mesh.topology is function_space.mesh() - assert mesh.topology is not mesh - - element = function_space.ufl_element().reconstruct(cell=mesh.ufl_cell()) - - topological = function_space - component = function_space.component + assert mesh.topology == function_space.mesh() + assert mesh.topology != mesh if function_space.parent is not None: - parent = cls.create(function_space.parent, mesh) + if parent is None: + raise ValueError("Must pass parent if function_space.parent is not None") else: parent = None - cargo = FunctionSpaceCargo(topological, parent) - return cls(mesh, element, component=component, cargo=cargo) + element = function_space.ufl_element().reconstruct(cell=mesh.ufl_cell()) + if type(element) is finat.ufl.MixedElement: + if not isinstance(mesh, MeshSequenceGeometry): + raise TypeError(f"Can only use MixedElement with MeshSequenceGeometry: got {type(mesh)}") + assert function_space.component is None or isinstance(function_space.component, int) - def _ufl_signature_data_(self, *args, **kwargs): - return (type(self), self.component, - super()._ufl_signature_data_(*args, **kwargs)) + self.topological = function_space + self.parent = parent + self.component = function_space.component + self.comm = mesh.comm + super().__init__(mesh, element, label=function_space._label or "") - @property - def parent(self): - return self.cargo.parent + @classmethod + def create(cls, function_space, mesh, parent=None): + """Create a :class:`WithGeometry`. - @parent.setter - def parent(self, val): - self.cargo.parent = val + This factory function is deprecated. Use the `WithGeometry` constructor + instead. - @property - def topological(self): - return self.cargo.topological + Parameters + ---------- + function_space : FunctionSpace or MixedFunctionSpace + Topological function space to attach geometry to. + mesh : MeshGeometry + Mesh with geometric information to use. + parent : WithGeometry + Parent geometric function space if exists. + + """ + warnings.warn( + "'WithGeometry.create' is deprecated, instantiate them directly instead", + FutureWarning, + ) + return cls(function_space, mesh, parent=parent) - @topological.setter - def topological(self, val): - self.cargo.topological = val + def _ufl_signature_data_(self, *args, **kwargs): + return (type(self), self.component, + super()._ufl_signature_data_(*args, **kwargs)) - @utils.cached_property + @cached_property def subspaces(self): r"""Split into a tuple of constituent spaces.""" - return tuple(type(self).create(subspace, self.mesh()) - for subspace in self.topological.subspaces) - - @property - def subfunctions(self): - import warnings - warnings.warn("The 'subfunctions' property is deprecated for function spaces, please use the " - "'subspaces' property instead", category=FutureWarning) - return self.subspaces + if isinstance(self.topological, MixedFunctionSpace): + return tuple( + type(self)(subspace, mesh, parent=self) + for mesh, subspace in zip(self.mesh(), self.topological.subspaces, strict=True) + ) + else: + return (self, ) mesh = ufl.FunctionSpace.ufl_domain @@ -176,13 +174,10 @@ def ufl_cell(self): r"""The :class:`~ufl.classes.Cell` this FunctionSpace is defined on.""" return self.mesh().ufl_cell() - @utils.cached_property + @cached_property def _components(self): - if len(self) == 1: - return tuple(type(self).create(self.topological.sub(i), self.mesh()) - for i in range(self.block_size)) - else: - return self.subspaces + return tuple(type(self)(self.topological.sub(i), self.mesh(), parent=self) + for i in range(self.block_size)) @PETSc.Log.EventDecorator() def sub(self, i): @@ -190,7 +185,7 @@ def sub(self, i): data = self.subspaces if mixed else self._components return data[i] - @utils.cached_property + @cached_property def dm(self): dm = self._dm() dmhooks.set_function_space(dm, self) @@ -301,7 +296,7 @@ def __eq__(self, other): return False try: return self.topological == other.topological and \ - self.mesh() is other.mesh() + self.mesh() == other.mesh() except AttributeError: return False @@ -355,7 +350,7 @@ def boundary_nodes(self, sub_domain): return self._shared_data.boundary_nodes(self, sub_domain) def collapse(self): - return type(self).create(self.topological.collapse(), self.mesh()) + return type(self)(self.topological.collapse(), self.mesh()) @classmethod def make_function_space(cls, mesh, element, name=None): @@ -363,9 +358,17 @@ def make_function_space(cls, mesh, element, name=None): topology = mesh.topology # Create a new abstract (Mixed/Real)FunctionSpace, these are neither primal nor dual. if type(element) is finat.ufl.MixedElement: - spaces = [cls.make_function_space(topology, e) for e in element.sub_elements] - new = MixedFunctionSpace(spaces, name=name) + if isinstance(mesh, MeshGeometry): + mesh = MeshSequenceGeometry([mesh for _ in element.sub_elements]) + topology = mesh.topology + else: + if not isinstance(mesh, MeshSequenceGeometry): + raise TypeError(f"mesh must be MeshSequenceGeometry: got {mesh}") + spaces = [cls.make_function_space(topo, e) for topo, e in zip(topology, element.sub_elements, strict=True)] + new = MixedFunctionSpace(spaces, topology, name=name) else: + if isinstance(mesh, MeshSequenceGeometry): + raise TypeError(f"mesh must not be MeshSequenceGeometry: got {mesh}") # Check that any Vector/Tensor/Mixed modifiers are outermost. check_element(element) if element.family() == "Real": @@ -375,9 +378,47 @@ def make_function_space(cls, mesh, element, name=None): # Skip this if we are just building subspaces of an abstract MixedFunctionSpace if mesh is not topology: # Create a concrete WithGeometry or FiredrakeDualSpace on this mesh - new = cls.create(new, mesh) + new = cls(new, mesh) return new + def broken_space(self): + """Return a :class:`.WithGeometryBase` with a :class:`finat.ufl.brokenelement.BrokenElement` + constructed from this function space's FiniteElement. + + Returns + ------- + WithGeometryBase : + The new function space with a :class:`~finat.ufl.brokenelement.BrokenElement`. + """ + return type(self).make_function_space( + self.mesh(), finat.ufl.BrokenElement(self.ufl_element()), + name=f"{self.name}_broken" if self.name else None) + + def quadrature_space(self): + """Return a :class:`.WithGeometryBase` with a ``Quadrature`` element + defined on the point set required for interpolating external data into this space. + + Returns + ------- + WithGeometryBase : + The new function space with a ``Quadrature`` FiniteElement. + """ + ufl_element = self.ufl_element() + if not self.finat_element.has_pointwise_dual_basis: + # Grab the point set for interpolation + _, ps = self.finat_element.dual_basis + # Invalidate the weights. This quadrature scheme is not for integration. + weights = numpy.full(len(ps.points), numpy.nan) + quad_scheme = QuadratureRule(ps, weights, self.finat_element.cell) + + ufl_element = finat.ufl.FiniteElement("Quadrature", + cell=ufl_element.cell, + degree=self.finat_element.degree, + quad_scheme=quad_scheme) + if self.value_shape: + ufl_element = finat.ufl.TensorElement(ufl_element, shape=self.value_shape) + return self.collapse().reconstruct(element=ufl_element) + def reconstruct( self, mesh: MeshGeometry | None = None, @@ -442,29 +483,21 @@ def reconstruct( return V -class WithGeometry(WithGeometryBase, ufl.FunctionSpace): - - def __init__(self, mesh, element, component=None, cargo=None): - super(WithGeometry, self).__init__(mesh, element, - component=component, - cargo=cargo) +class WithGeometry(WithGeometryBase, ufl.functionspace.FunctionSpace): def dual(self): - return FiredrakeDualSpace.create(self.topological, self.mesh()) + parent = None if self.parent is None else self.parent.dual() + return FiredrakeDualSpace(self.topological, self.mesh(), parent=parent) class FiredrakeDualSpace(WithGeometryBase, ufl.functionspace.DualSpace): - def __init__(self, mesh, element, component=None, cargo=None): - super(FiredrakeDualSpace, self).__init__(mesh, element, - component=component, - cargo=cargo) - def dual(self): - return WithGeometry.create(self.topological, self.mesh()) + parent = None if self.parent is None else self.parent.dual() + return WithGeometry(self.topological, self.mesh(), parent=parent) -class FunctionSpace(object): +class FunctionSpace: r"""A representation of a function space. A :class:`FunctionSpace` associates degrees of freedom with @@ -541,10 +574,7 @@ def __init__(self, mesh, element, name=None): space node.""" self.name = name r"""The (optional) descriptive name for this space.""" - # User comm self.comm = mesh.comm - # Internal comm - self._comm = mpi.internal_comm(self.comm, self) self.set_shared_data() self.dof_dset = self.make_dof_dset() @@ -591,7 +621,7 @@ def __eq__(self, other): if not isinstance(other, FunctionSpace): return False # FIXME: Think harder about equality - return self.mesh() is other.mesh() and \ + return self.mesh() == other.mesh() and \ self.dof_dset is other.dof_dset and \ self.ufl_element() == other.ufl_element() and \ self.component == other.component @@ -602,11 +632,11 @@ def __ne__(self, other): def __hash__(self): return hash((self.mesh(), self.dof_dset, self.ufl_element())) - @utils.cached_property + @cached_property def _ad_parent_space(self): return self.parent - @utils.cached_property + @cached_property def dm(self): r"""A PETSc DM describing the data layout for this FunctionSpace.""" dm = self._dm() @@ -624,16 +654,16 @@ def _dm(self): dmhooks.set_function_space(dm, self) return dm - @utils.cached_property + @cached_property def _ises(self): return self.dof_dset.field_ises - @utils.cached_property + @cached_property def cell_node_list(self): r"""A numpy array mapping mesh cells to function space nodes.""" return self._shared_data.entity_node_lists[self.mesh().cell_set] - @utils.cached_property + @cached_property def topological(self): r"""Function space on a mesh topology.""" return self @@ -667,25 +697,18 @@ def __repr__(self): def __str__(self): return self.__repr__() - @utils.cached_property + @cached_property def subspaces(self): """Split into a tuple of constituent spaces.""" return (self,) - @property - def subfunctions(self): - import warnings - warnings.warn("The 'subfunctions' property is deprecated for function spaces, please use the " - "'subspaces' property instead", category=FutureWarning) - return self.subspaces - def __getitem__(self, i): r"""Return the ith subspace.""" if i != 0: raise IndexError("Only index 0 supported on a FunctionSpace") return self - @utils.cached_property + @cached_property def _components(self): if self.rank == 0: return self.subspaces @@ -702,7 +725,7 @@ def __mul__(self, other): from firedrake.functionspace import MixedFunctionSpace return MixedFunctionSpace((self, other)) - @utils.cached_property + @cached_property def node_count(self): r"""The number of nodes (includes halo nodes) of this function space on this process. If the :class:`FunctionSpace` has :attr:`FunctionSpace.rank` 0, this @@ -713,7 +736,7 @@ def node_count(self): constrained_node_set.update(self._shared_data.boundary_nodes(self, sub_domain)) return self.node_set.total_size - len(constrained_node_set) - @utils.cached_property + @cached_property def dof_count(self): r"""The number of degrees of freedom (includes halo dofs) of this function space on this process. Cf. :attr:`FunctionSpace.node_count` .""" @@ -828,10 +851,31 @@ def boundary_nodes(self, sub_domain): return self._shared_data.boundary_nodes(self, sub_domain) @PETSc.Log.EventDecorator() - def local_to_global_map(self, bcs, lgmap=None): + def local_to_global_map(self, bcs, lgmap=None, mat_type=None): r"""Return a map from process local dof numbering to global dof numbering. - If BCs is provided, mask out those dofs which match the BC nodes.""" + Parameters + ---------- + bcs: [firedrake.bcs.BCBase] + If provided, mask out those dofs which match the BC nodes. + lgmap: PETSc.LGMap + The base local-to-global map, which might be partially masked. + mat_type: str + The matrix assembly type. This is required as different matrix types + handle the LGMap differently for MixedFunctionSpace. + + Note + ---- + For a :func:`.VectorFunctionSpace` or :func:`.TensorFunctionSpace` the returned + LGMap will be the scalar one, unless the bcs are imposed on a particular component. + For a :class:`MixedFunctionSpace` the returned LGMap is unblocked, + unless mat_type == "is". + + Returns + ------- + PETSc.LGMap + A local-to-global map with masked BC dofs. + """ # Caching these things is too complicated, since it depends # not just on the bcs, but also the parent space, and anything # this space has been recursively split out from [e.g. inside @@ -856,10 +900,16 @@ def local_to_global_map(self, bcs, lgmap=None): bsize = lgmap.getBlockSize() assert bsize == self.block_size else: - # MatBlock case, LGMap is already unrolled. - indices = lgmap.block_indices.copy() + # MatBlock case, the LGMap is implementation dependent bsize = lgmap.getBlockSize() - unblocked = True + assert bsize == self.block_size + if mat_type == "is": + indices = lgmap.indices.copy() + unblocked = False + else: + # LGMap is already unrolled + indices = lgmap.block_indices.copy() + unblocked = True nodes = [] for bc in bcs: if bc.function_space().component is not None: @@ -927,6 +977,7 @@ def __init__(self, function_space, boundary_set=frozenset(), name=None): function_space.ufl_element(), label=self._label) self.function_space = function_space + self.topological = self self.name = name or function_space.name def set_shared_data(self): @@ -969,7 +1020,7 @@ def __hash__(self): return hash((self.mesh(), self.dof_dset, self.ufl_element(), self.boundary_set)) - def local_to_global_map(self, bcs, lgmap=None): + def local_to_global_map(self, bcs, lgmap=None, mat_type=None): return lgmap or self.dof_dset.lgmap def collapse(self): @@ -991,11 +1042,14 @@ class MixedFunctionSpace(object): but should instead use the functional interface provided by :func:`.MixedFunctionSpace`. """ - def __init__(self, spaces, name=None): + def __init__(self, spaces, mesh, name=None): super(MixedFunctionSpace, self).__init__() + if not isinstance(mesh, MeshSequenceTopology): + raise TypeError(f"mesh must be MeshSequenceTopology: got {mesh}") + if len(mesh) != len(spaces): + raise RuntimeError(f"len(mesh) ({len(mesh)}) != len(spaces) ({len(spaces)})") self._spaces = tuple(IndexedFunctionSpace(i, s, self) for i, s in enumerate(spaces)) - mesh, = set(s.mesh() for s in spaces) self._ufl_function_space = ufl.FunctionSpace(mesh.ufl_mesh(), finat.ufl.MixedElement(*[s.ufl_element() for s in spaces])) self.name = name or "_".join(str(s.name) for s in spaces) @@ -1007,7 +1061,6 @@ def __init__(self, spaces, name=None): self._subspaces = {} self._mesh = mesh self.comm = mesh.comm - self._comm = mpi.internal_comm(self.node_set.comm, self) # These properties are so a mixed space can behave like a normal FunctionSpace. index = None @@ -1042,19 +1095,12 @@ def __ne__(self, other): def __hash__(self): return hash(tuple(self)) - @utils.cached_property + @cached_property def subspaces(self): r"""The list of :class:`FunctionSpace`\s of which this :class:`MixedFunctionSpace` is composed.""" return self._spaces - @property - def subfunctions(self): - import warnings - warnings.warn("The 'subfunctions' property is deprecated for function spaces, please use the " - "'subspaces' property instead", category=FutureWarning) - return self.subspaces - def sub(self, i): r"""Return the `i`th :class:`FunctionSpace` in this :class:`MixedFunctionSpace`.""" @@ -1085,21 +1131,21 @@ def __repr__(self): def __str__(self): return "MixedFunctionSpace(%s)" % ", ".join(str(s) for s in self) - @utils.cached_property + @cached_property def value_size(self): r"""Return the sum of the :attr:`FunctionSpace.value_size`\s of the :class:`FunctionSpace`\s this :class:`MixedFunctionSpace` is composed of.""" return sum(fs.value_size for fs in self._spaces) - @utils.cached_property + @cached_property def node_count(self): r"""Return a tuple of :attr:`FunctionSpace.node_count`\s of the :class:`FunctionSpace`\s of which this :class:`MixedFunctionSpace` is composed.""" return tuple(fs.node_count for fs in self._spaces) - @utils.cached_property + @cached_property def dof_count(self): r"""Return a tuple of :attr:`FunctionSpace.dof_count`\s of the :class:`FunctionSpace`\s of which this :class:`MixedFunctionSpace` is @@ -1112,7 +1158,7 @@ def dim(self): See also :attr:`FunctionSpace.dof_count` and :attr:`FunctionSpace.node_count`.""" return self.dof_dset.layout_vec.getSize() - @utils.cached_property + @cached_property def node_set(self): r"""A :class:`pyop2.types.set.MixedSet` containing the nodes of this :class:`MixedFunctionSpace`. This is composed of the @@ -1122,7 +1168,7 @@ def node_set(self): are stored at each node.""" return op2.MixedSet(s.node_set for s in self._spaces) - @utils.cached_property + @cached_property def dof_dset(self): r"""A :class:`pyop2.types.dataset.MixedDataSet` containing the degrees of freedom of this :class:`MixedFunctionSpace`. This is composed of the @@ -1173,7 +1219,7 @@ def exterior_facet_node_map(self): function space nodes.""" return op2.MixedMap(s.exterior_facet_node_map() for s in self) - def local_to_global_map(self, bcs): + def local_to_global_map(self, bcs, lgmap=None, mat_type=None): r"""Return a map from process local dof numbering to global dof numbering. If BCs is provided, mask out those dofs which match the BC nodes.""" @@ -1189,7 +1235,7 @@ def make_dat(self, val=None, valuetype=None, name=None): return op2.MixedDat(s.make_dat(v, valuetype, "%s[cmpt-%d]" % (name, i)) for i, (s, v) in enumerate(zip(self._spaces, val))) - @utils.cached_property + @cached_property def dm(self): r"""A PETSc DM describing the data layout for fieldsplit solvers.""" dm = self._dm() @@ -1199,11 +1245,13 @@ def dm(self): def _dm(self): from firedrake.mg.utils import get_level dm = self.dof_dset.dm - _, level = get_level(self.mesh()) + # TODO: Think harder. + m = self.mesh()[0] + _, level = get_level(m) dmhooks.attach_hooks(dm, level=level) return dm - @utils.cached_property + @cached_property def _ises(self): return self.dof_dset.field_ises @@ -1227,7 +1275,7 @@ def __new__(cls, mesh, element, name=None): topology = mesh.topology self = super(ProxyFunctionSpace, cls).__new__(cls) if mesh is not topology: - return WithGeometry.create(self, mesh) + return WithGeometry(self, mesh) else: return self @@ -1282,7 +1330,7 @@ def __new__(cls, function_space, boundary_set=frozenset(), name=None): topology = function_space._mesh.topology self = super(ProxyRestrictedFunctionSpace, cls).__new__(cls) if function_space._mesh is not topology: - return WithGeometry.create(self, function_space._mesh) + return WithGeometry(self, function_space._mesh) else: return self @@ -1376,7 +1424,7 @@ def __eq__(self, other): if not isinstance(other, RealFunctionSpace): return False # FIXME: Think harder about equality - return self.mesh() is other.mesh() and \ + return self.mesh() == other.mesh() and \ self.ufl_element() == other.ufl_element() def __ne__(self, other): @@ -1394,7 +1442,7 @@ def make_dof_dset(self): def make_dat(self, val=None, valuetype=None, name=None): r"""Return a newly allocated :class:`pyop2.types.glob.Global` representing the data for a :class:`.Function` on this space.""" - return op2.Global(self.block_size, val, valuetype, name, self._comm) + return op2.Global(self.block_size, val, valuetype, name, self.comm) def entity_node_map(self, source_mesh, source_integral_type, source_subdomain_id, source_all_integer_subdomain_ids): return None @@ -1419,19 +1467,6 @@ def top_nodes(self): ":class:`RealFunctionSpace` objects have no bottom nodes." return None - def local_to_global_map(self, bcs, lgmap=None): + def local_to_global_map(self, bcs, lgmap=None, mat_type=None): assert len(bcs) == 0 return None - - -@dataclass -class FunctionSpaceCargo: - """Helper class carrying data for a :class:`WithGeometryBase`. - - It is required because it permits Firedrake to have stripped forms - that still know Firedrake-specific information (e.g. that they are a - component of a parent function space). - """ - - topological: FunctionSpace - parent: Optional[WithGeometryBase] diff --git a/firedrake/halo.py b/firedrake/halo.py index 7e44a9fc7e..1cfc27ad76 100644 --- a/firedrake/halo.py +++ b/firedrake/halo.py @@ -1,12 +1,12 @@ -from pyop2 import mpi, op2, utils +from pyop2 import op2 from mpi4py import MPI import numpy -from functools import partial +from functools import partial, cached_property from firedrake.petsc import PETSc +from firedrake.utils import ScalarType, complex_mode import firedrake.cython.dmcommon as dmcommon - _MPI_types = {} @@ -55,13 +55,16 @@ def _get_dtype(datatype): base, combiner, _ = datatype.decode() while combiner == "DUP": base, combiner, _ = base.decode() - if combiner != "CONTIGUOUS": - raise RuntimeError("Can only handle contiguous types") + # Allow for "NAMED", too, for complex scalar {MAX, MIN}. + if not (combiner == "CONTIGUOUS" or (complex_mode and combiner == "NAMED")): + raise RuntimeError( + f"Can only handle contiguous types or named types for complex scalar: " + f"found combiner={combiner}" + ) try: tdict = MPI.__TypeDict__ except AttributeError: tdict = MPI._typedict - tdict = dict((v.py2f(), k) for k, v in tdict.items()) try: base = tdict[base.py2f()] @@ -93,15 +96,14 @@ class Halo(op2.Halo): def __init__(self, dm, section, comm): super(Halo, self).__init__() self.comm = comm - self._comm = mpi.internal_comm(comm, self) # Use a DM to create the halo SFs if MPI.Comm.Compare(comm, dm.comm.tompi4py()) not in {MPI.CONGRUENT, MPI.IDENT}: raise ValueError("Communicator used to create `Halo` must be at least congruent to the communicator used to create the mesh") - self.dm = PETSc.DMShell().create(self._comm) + self.dm = PETSc.DMShell().create(self.comm) self.dm.setPointSF(dm.getPointSF()) self.dm.setDefaultSection(section) - @utils.cached_property + @cached_property def sf(self): sf = dmcommon.create_halo_exchange_sf(self.dm) sf.setFromOptions() @@ -109,11 +111,11 @@ def sf(self): raise RuntimeError("Windowed SFs expose bugs in OpenMPI (use -sf_type basic)") return sf - @utils.cached_property + @cached_property def comm(self): return self.comm - @utils.cached_property + @cached_property def local_to_global_numbering(self): lsec = self.dm.getDefaultSection() gsec = self.dm.getDefaultGlobalSection() @@ -140,13 +142,16 @@ def local_to_global_begin(self, dat, insert_mode): assert insert_mode in {op2.INC, op2.MIN, op2.MAX}, "%s LtoG not supported" % insert_mode if self.comm.size == 1: return + complex_type = complex_mode and dat.dtype == ScalarType mtype, builtin = _get_mtype(dat) - op = {(False, op2.INC): MPI.SUM, - (True, op2.INC): MPI.SUM, - (False, op2.MIN): _contig_min_op, - (True, op2.MIN): MPI.MIN, - (False, op2.MAX): _contig_max_op, - (True, op2.MAX): MPI.MAX}[(builtin, insert_mode)] + op = { + (False, op2.INC): MPI.SUM, + (True, op2.INC): MPI.SUM, + (False, op2.MIN): _contig_min_op, + (True, op2.MIN): _contig_min_op if complex_type else MPI.MIN, + (False, op2.MAX): _contig_max_op, + (True, op2.MAX): _contig_max_op if complex_type else MPI.MAX, + }[(builtin, insert_mode)] self.sf.reduceBegin(mtype, dat._data, dat._data, op) @PETSc.Log.EventDecorator() @@ -154,11 +159,14 @@ def local_to_global_end(self, dat, insert_mode): assert insert_mode in {op2.INC, op2.MIN, op2.MAX}, "%s LtoG not supported" % insert_mode if self.comm.size == 1: return + complex_type = complex_mode and dat.dtype == ScalarType mtype, builtin = _get_mtype(dat) - op = {(False, op2.INC): MPI.SUM, - (True, op2.INC): MPI.SUM, - (False, op2.MIN): _contig_min_op, - (True, op2.MIN): MPI.MIN, - (False, op2.MAX): _contig_max_op, - (True, op2.MAX): MPI.MAX}[(builtin, insert_mode)] + op = { + (False, op2.INC): MPI.SUM, + (True, op2.INC): MPI.SUM, + (False, op2.MIN): _contig_min_op, + (True, op2.MIN): _contig_min_op if complex_type else MPI.MIN, + (False, op2.MAX): _contig_max_op, + (True, op2.MAX): _contig_max_op if complex_type else MPI.MAX, + }[(builtin, insert_mode)] self.sf.reduceEnd(mtype, dat._data, dat._data, op) diff --git a/firedrake/interpolation.py b/firedrake/interpolation.py index c4d0c8f6ae..6f363218a1 100644 --- a/firedrake/interpolation.py +++ b/firedrake/interpolation.py @@ -2,447 +2,409 @@ import os import tempfile import abc -import warnings -from collections.abc import Iterable -from typing import Literal -from functools import partial, singledispatch -from typing import Hashable - -import FIAT -import ufl -import finat.ufl -from ufl.algorithms import extract_arguments, extract_coefficients, replace -from ufl.domain import as_domain, extract_unique_domain + +from functools import cached_property, partial +from typing import Hashable, Literal, Callable, Iterable +from dataclasses import asdict, dataclass +from numbers import Number + +from ufl.algorithms import extract_arguments, replace +from ufl.domain import extract_unique_domain +from ufl.classes import Expr +from ufl.duals import is_dual +from ufl.constantvalue import zero, as_ufl +from ufl.form import ZeroBaseForm, BaseForm +from ufl.core.interpolate import Interpolate as UFLInterpolate from pyop2 import op2 from pyop2.caching import memory_and_disk_cache -from finat.element_factory import create_element, as_fiat_cell -from tsfc import compile_expression_dual_evaluation -from tsfc.ufl_utils import extract_firedrake_constants, hash_expr +from finat.ufl import TensorElement, VectorElement, MixedElement, FiniteElementBase +from finat.element_factory import create_element -import gem -import finat +from tsfc.driver import compile_expression_dual_evaluation +from tsfc.ufl_utils import extract_firedrake_constants, hash_expr -import firedrake -import firedrake.bcs -from firedrake import tsfc_interface, utils, functionspaceimpl -from firedrake.ufl_expr import Argument, Coargument, action, adjoint as expr_adjoint -from firedrake.mesh import MissingPointsBehaviour, VertexOnlyMeshMissingPointsError, VertexOnlyMeshTopology +from firedrake.utils import IntType, ScalarType, known_pyop2_safe, tuplify +from firedrake.pointeval_utils import runtime_quadrature_element +from firedrake.tsfc_interface import extract_numbered_coefficients, _cachedir +from firedrake.ufl_expr import Argument, Coargument, TrialFunction, TestFunction, action +from firedrake.mesh import MissingPointsBehaviour, VertexOnlyMeshTopology, MeshGeometry, MeshTopology, VertexOnlyMesh from firedrake.petsc import PETSc -from firedrake.halo import _get_mtype as get_dat_mpi_type +from firedrake.halo import _get_mtype +from firedrake.functionspaceimpl import WithGeometry +from firedrake.matrix import MatrixBase, AssembledMatrix +from firedrake.bcs import DirichletBC +from firedrake.formmanipulation import split_form +from firedrake.functionspace import VectorFunctionSpace, TensorFunctionSpace, FunctionSpace +from firedrake.constant import Constant +from firedrake.function import Function from firedrake.cofunction import Cofunction +from firedrake.exceptions import ( + DofNotDefinedError, VertexOnlyMeshMissingPointsError, NonUniqueMeshSequenceError, + DofTypeError, +) + from mpi4py import MPI -from pyadjoint import stop_annotating, no_annotations +from pyadjoint.tape import stop_annotating, no_annotations __all__ = ( "interpolate", - "Interpolator", "Interpolate", - "DofNotDefinedError", - "CrossMeshInterpolator", - "SameMeshInterpolator", + "get_interpolator", + "InterpolateOptions", + "Interpolator" ) -class Interpolate(ufl.Interpolate): +@dataclass(kw_only=True) +class InterpolateOptions: + """Options for interpolation operations. + + Parameters + ---------- + subset : pyop2.types.set.Subset or None + An optional subset to apply the interpolation over. + Cannot, at present, be used when interpolating across meshes unless + the target mesh is a :func:`.VertexOnlyMesh`. + access : pyop2.types.access.Access or None + The pyop2 access descriptor for combining updates to shared + DoFs. Possible values include ``WRITE``, ``MIN``, ``MAX``, and ``INC``. + Only ``WRITE`` is supported at present when interpolating across meshes + unless the target mesh is a :func:`.VertexOnlyMesh`. Only ``INC`` is + supported for the matrix-free adjoint interpolation. + allow_missing_dofs : bool + For interpolation across meshes: allow degrees of freedom (aka DoFs/nodes) + in the target mesh that cannot be defined on the source mesh. + For example, where nodes are point evaluations, points in the target mesh + that are not in the source mesh. When ``False`` this raises a ``ValueError`` + should this occur. When ``True`` the corresponding values are either + (a) unchanged if some ``output`` is given to the :meth:`interpolate` method + or (b) set to zero. + Can be overwritten with the ``default_missing_val`` kwarg of :meth:`interpolate`. + This does not affect adjoint interpolation. Ignored if interpolating within + the same mesh or onto a :func:`.VertexOnlyMesh` (the behaviour of a + :func:`.VertexOnlyMesh` in this scenario is, at present, set when it is created). + default_missing_val : float or None + For interpolation across meshes: the optional value to assign to DoFs + in the target mesh that are outside the source mesh. If this is not set + then the values are either (a) unchanged if some ``output`` is given to + the :meth:`interpolate` method or (b) set to zero. + Ignored if interpolating within the same mesh or onto a :func:`.VertexOnlyMesh`. + """ + subset: op2.Subset | None = None + access: Literal[op2.WRITE, op2.MIN, op2.MAX, op2.INC] | None = None + allow_missing_dofs: bool = False + default_missing_val: float | None = None + - def __init__(self, expr, v, - subset=None, - access=None, - allow_missing_dofs=False, - default_missing_val=None, - matfree=True): +class Interpolate(UFLInterpolate): + + def __init__(self, expr: Expr, V: WithGeometry | BaseForm, **kwargs): """Symbolic representation of the interpolation operator. Parameters ---------- - expr : ufl.core.expr.Expr or ufl.BaseForm + expr : ufl.core.expr.Expr The UFL expression to interpolate. - v : firedrake.functionspaceimpl.WithGeometryBase or firedrake.ufl_expr.Coargument + V : firedrake.functionspaceimpl.WithGeometry or ufl.BaseForm The function space to interpolate into or the coargument defined on the dual of the function space to interpolate into. - subset : pyop2.types.set.Subset - An optional subset to apply the interpolation over. - Cannot, at present, be used when interpolating across meshes unless - the target mesh is a :func:`.VertexOnlyMesh`. - access : pyop2.types.access.Access - The pyop2 access descriptor for combining updates to shared - DoFs. Possible values include ``WRITE`` and ``INC``. Only ``WRITE`` is - supported at present when interpolating across meshes. See note in - :func:`.interpolate` if changing this from default. - allow_missing_dofs : bool - For interpolation across meshes: allow degrees of freedom (aka DoFs/nodes) - in the target mesh that cannot be defined on the source mesh. - For example, where nodes are point evaluations, points in the target mesh - that are not in the source mesh. When ``False`` this raises a ``ValueError`` - should this occur. When ``True`` the corresponding values are either - (a) unchanged if some ``output`` is given to the :meth:`interpolate` method - or (b) set to zero. - Can be overwritten with the ``default_missing_val`` kwarg of :meth:`interpolate`. - This does not affect adjoint interpolation. Ignored if interpolating within - the same mesh or onto a :func:`.VertexOnlyMesh` (the behaviour of a - :func:`.VertexOnlyMesh` in this scenario is, at present, set when it is created). - default_missing_val : float - For interpolation across meshes: the optional value to assign to DoFs - in the target mesh that are outside the source mesh. If this is not set - then the values are either (a) unchanged if some ``output`` is given to - the :meth:`interpolate` method or (b) set to zero. - Ignored if interpolating within the same mesh or onto a :func:`.VertexOnlyMesh`. - matfree : bool - If ``False``, then construct the permutation matrix for interpolating - between a VOM and its input ordering. Defaults to ``True`` which uses SF broadcast - and reduce operations. + **kwargs + Additional interpolation options. See :class:`InterpolateOptions` + for available parameters and their descriptions. """ - # Check function space - expr = ufl.as_ufl(expr) - if isinstance(v, functionspaceimpl.WithGeometry): - expr_args = extract_arguments(expr) - is_adjoint = len(expr_args) and expr_args[0].number() == 0 - v = Argument(v.dual(), 1 if is_adjoint else 0) + expr = as_ufl(expr) + expr_args = expr.arguments()[1:] if isinstance(expr, BaseForm) else extract_arguments(expr) + expr_arg_numbers = {arg.number() for arg in expr_args} + self.is_adjoint = expr_arg_numbers == {0} + if isinstance(V, WithGeometry): + # Need to create a Firedrake Argument so that it has a .function_space() method + V = Argument(V.dual(), 1 if self.is_adjoint else 0) - V = v.arguments()[0].function_space() - if len(expr.ufl_shape) != len(V.value_shape): - raise RuntimeError(f'Rank mismatch: Expression rank {len(expr.ufl_shape)}, FunctionSpace rank {len(V.value_shape)}') + self.target_space = V.arguments()[0].function_space() + if expr.ufl_shape != self.target_space.value_shape: + raise ValueError(f"Shape mismatch: Expression shape {expr.ufl_shape}, FunctionSpace shape {self.target_space.value_shape}.") - if expr.ufl_shape != V.value_shape: - raise RuntimeError('Shape mismatch: Expression shape {expr.ufl_shape}, FunctionSpace shape {V.value_shape}') - super().__init__(expr, v) + super().__init__(expr, V) - # -- Interpolate data (e.g. `subset` or `access`) -- # - self.interp_data = {"subset": subset, - "access": access, - "allow_missing_dofs": allow_missing_dofs, - "default_missing_val": default_missing_val, - "matfree": matfree} + self._options = InterpolateOptions(**kwargs) - function_space = ufl.Interpolate.ufl_function_space + function_space = UFLInterpolate.ufl_function_space - def _ufl_expr_reconstruct_(self, expr, v=None, **interp_data): - interp_data = interp_data or self.interp_data.copy() - return ufl.Interpolate._ufl_expr_reconstruct_(self, expr, v=v, **interp_data) + def _ufl_expr_reconstruct_( + self, expr: Expr, v: WithGeometry | BaseForm | None = None, **interp_data + ): + interp_data = interp_data or asdict(self.options) + return UFLInterpolate._ufl_expr_reconstruct_(self, expr, v=v, **interp_data) + @property + def options(self) -> InterpolateOptions: + """Access the interpolation options. -@PETSc.Log.EventDecorator() -def interpolate(expr, V, subset=None, access=None, allow_missing_dofs=False, default_missing_val=None, matfree=True): - """Returns a UFL expression for the interpolation operation of ``expr`` into ``V``. + Returns + ------- + InterpolateOptions + An :class:`InterpolateOptions` instance containing the interpolation options. + """ + return self._options - :arg expr: a UFL expression. - :arg V: a :class:`.FunctionSpace` to interpolate into, or a :class:`.Cofunction`, - or :class:`.Coargument`, or a :class:`ufl.form.Form` with one argument (a one-form). - If a :class:`.Cofunction` or a one-form is provided, then we do adjoint interpolation. - :kwarg subset: An optional :class:`pyop2.types.set.Subset` to apply the - interpolation over. Cannot, at present, be used when interpolating - across meshes unless the target mesh is a :func:`.VertexOnlyMesh`. - :kwarg access: The pyop2 access descriptor for combining updates to shared - DoFs. Possible values include ``WRITE`` and ``INC``. Only ``WRITE`` is - supported at present when interpolating across meshes unless the target - mesh is a :func:`.VertexOnlyMesh`. See note below. - :kwarg allow_missing_dofs: For interpolation across meshes: allow - degrees of freedom (aka DoFs/nodes) in the target mesh that cannot be - defined on the source mesh. For example, where nodes are point - evaluations, points in the target mesh that are not in the source mesh. - When ``False`` this raises a ``ValueError`` should this occur. When - ``True`` the corresponding values are either (a) unchanged if - some ``output`` is given to the :meth:`interpolate` method or (b) set - to zero. In either case, if ``default_missing_val`` is specified, that - value is used. This does not affect adjoint interpolation. Ignored if - interpolating within the same mesh or onto a :func:`.VertexOnlyMesh` - (the behaviour of a :func:`.VertexOnlyMesh` in this scenario is, at - present, set when it is created). - :kwarg default_missing_val: For interpolation across meshes: the optional - value to assign to DoFs in the target mesh that are outside the source - mesh. If this is not set then the values are either (a) unchanged if - some ``output`` is given to the :meth:`interpolate` method or (b) set - to zero. Ignored if interpolating within the same mesh or onto a - :func:`.VertexOnlyMesh`. - :kwarg matfree: If ``False``, then construct the permutation matrix for interpolating - between a VOM and its input ordering. Defaults to ``True`` which uses SF broadcast - and reduce operations. - :returns: A symbolic :class:`.Interpolate` object - - .. note:: - - If you use an access descriptor other than ``WRITE``, the - behaviour of interpolation changes if interpolating into a - function space, or an existing function. If the former, then - the newly allocated function will be initialised with - appropriate values (e.g. for MIN access, it will be initialised - with MAX_FLOAT). On the other hand, if you provide a function, - then it is assumed that its values should take part in the - reduction (hence using MIN will compute the MIN between the - existing values and any new values). - """ - if isinstance(V, (Cofunction, Coargument)): - dual_arg = V - elif isinstance(V, ufl.BaseForm): - rank = len(V.arguments()) - if rank == 1: - dual_arg = V - else: - raise TypeError(f"Expected a one-form, provided form had {rank} arguments") - elif isinstance(V, functionspaceimpl.WithGeometry): - dual_arg = Coargument(V.dual(), 0) - expr_args = extract_arguments(ufl.as_ufl(expr)) - if expr_args and expr_args[0].number() == 0: - warnings.warn("Passing argument numbered 0 in expression for forward interpolation is deprecated. " - "Use a TrialFunction in the expression.") - v, = expr_args - expr = replace(expr, {v: v.reconstruct(number=1)}) - else: - raise TypeError(f"V must be a FunctionSpace, Cofunction, Coargument or one-form, not a {type(V).__name__}") + @cached_property + def _interpolator(self): + """Access the numerical interpolator. - interp = Interpolate(expr, dual_arg, - subset=subset, access=access, - allow_missing_dofs=allow_missing_dofs, - default_missing_val=default_missing_val, - matfree=matfree) + Returns + ------- + Interpolator + An appropriate :class:`Interpolator` subclass for this + interpolation expression. + """ + arguments = self.arguments() + has_mixed_arguments = any(len(arg.function_space()) > 1 for arg in arguments) + if len(arguments) == 2 and has_mixed_arguments: + return MixedInterpolator(self) - return interp + operand, = self.ufl_operands + target_mesh = self.target_space.mesh() + try: + source_mesh = extract_unique_domain(operand) or target_mesh + except ValueError: + raise NotImplementedError( + "Interpolating an expression with no arguments defined on multiple meshes is not implemented yet." + ) -class Interpolator(abc.ABC): - """A reusable interpolation object. + try: + target_mesh = target_mesh.unique() + source_mesh = source_mesh.unique() + except NonUniqueMeshSequenceError: + return MixedInterpolator(self) + + submesh_interp_implemented = ( + all(isinstance(m.topology, MeshTopology) for m in [target_mesh, source_mesh]) + and target_mesh.submesh_ancestors[-1] is source_mesh.submesh_ancestors[-1] + and target_mesh.topological_dimension == source_mesh.topological_dimension + ) + if target_mesh is source_mesh or submesh_interp_implemented: + return SameMeshInterpolator(self) - This object can be used to carry out the same interpolation - multiple times (for example in a timestepping loop). + if isinstance(target_mesh.topology, VertexOnlyMeshTopology): + if isinstance(source_mesh.topology, VertexOnlyMeshTopology): + return VomOntoVomInterpolator(self) + if target_mesh.geometric_dimension != source_mesh.geometric_dimension: + raise ValueError("Cannot interpolate onto a VertexOnlyMesh of a different geometric dimension.") + return SameMeshInterpolator(self) - Parameters - ---------- - expr - The underlying ufl.Interpolate or the operand to the ufl.Interpolate. - V - The :class:`.FunctionSpace` or :class:`.Function` to - interpolate into. - subset - An optional :class:`pyop2.types.set.Subset` to apply the - interpolation over. Cannot, at present, be used when interpolating - across meshes unless the target mesh is a :func:`.VertexOnlyMesh`. - freeze_expr - Set to True to prevent the expression being - re-evaluated on each call. Cannot, at present, be used when - interpolating across meshes unless the target mesh is a - :func:`.VertexOnlyMesh`. - access - The pyop2 access descriptor for combining updates to shared DoFs. - Only ``op2.WRITE`` is supported at present when interpolating across meshes. - Only ``op2.INC`` is supported for the matrix-free adjoint interpolation. - See note in :func:`.interpolate` if changing this from default. - bcs - An optional list of boundary conditions to zero-out in the - output function space. Interpolator rows or columns which are - associated with boundary condition nodes are zeroed out when this is - specified. - allow_missing_dofs - For interpolation across meshes: allow - degrees of freedom (aka DoFs/nodes) in the target mesh that cannot be - defined on the source mesh. For example, where nodes are point - evaluations, points in the target mesh that are not in the source mesh. - When ``False`` this raises a ``ValueError`` should this occur. When - ``True`` the corresponding values are either (a) unchanged if - some ``output`` is given to the :meth:`interpolate` method or (b) set - to zero. Can be overwritten with the ``default_missing_val`` kwarg - of :meth:`interpolate`. This does not affect adjoint interpolation. - Ignored if interpolating within the same mesh or onto a - :func:`.VertexOnlyMesh` (the behaviour of a :func:`.VertexOnlyMesh` in - this scenario is, at present, set when it is created). - matfree - If ``False``, then construct the permutation matrix for interpolating - between a VOM and its input ordering. Defaults to ``True`` which uses SF broadcast - and reduce operations. + if has_mixed_arguments or len(self.target_space) > 1: + return MixedInterpolator(self) - Notes - ----- + return CrossMeshInterpolator(self) + + +@PETSc.Log.EventDecorator() +def interpolate(expr: Expr, V: WithGeometry | BaseForm, **kwargs) -> Interpolate: + """Returns a UFL expression for the interpolation operation of ``expr`` into ``V``. - The :class:`Interpolator` holds a reference to the provided - arguments (such that they won't be collected until the - :class:`Interpolator` is also collected). + Parameters + ---------- + expr : ufl.core.expr.Expr + The UFL expression to interpolate. + V : firedrake.functionspaceimpl.WithGeometry or ufl.BaseForm + The function space to interpolate into or the coargument defined + on the dual of the function space to interpolate into. + **kwargs + Additional interpolation options. See :class:`InterpolateOptions` + for available parameters and their descriptions. + Returns + ------- + Interpolate + A symbolic :class:`Interpolate` object representing the interpolation operation. """ + return Interpolate(expr, V, **kwargs) - def __new__(cls, expr, V, **kwargs): - V_target = V if isinstance(V, ufl.FunctionSpace) else V.function_space() - if not isinstance(expr, ufl.Interpolate): - expr = interpolate(expr, V_target) - arguments = expr.arguments() - has_mixed_arguments = any(len(a.function_space()) > 1 for a in arguments) - if len(arguments) == 2 and has_mixed_arguments: - return object.__new__(MixedInterpolator) - - operand, = expr.ufl_operands - target_mesh = as_domain(V) - source_mesh = extract_unique_domain(operand) or target_mesh - submesh_interp_implemented = \ - all(isinstance(m.topology, firedrake.mesh.MeshTopology) for m in [target_mesh, source_mesh]) and \ - target_mesh.submesh_ancesters[-1] is source_mesh.submesh_ancesters[-1] and \ - target_mesh.topological_dimension() == source_mesh.topological_dimension() - if target_mesh is source_mesh or submesh_interp_implemented: - return object.__new__(SameMeshInterpolator) - else: - if isinstance(target_mesh.topology, VertexOnlyMeshTopology): - return object.__new__(SameMeshInterpolator) - elif has_mixed_arguments or len(V_target) > 1: - return object.__new__(MixedInterpolator) - else: - return object.__new__(CrossMeshInterpolator) +class Interpolator(abc.ABC): + """Base class for calculating interpolation. Should not be instantiated directly; use the + :func:`get_interpolator` function. - def __init__( - self, - expr: ufl.Interpolate | ufl.classes.Expr, - V: ufl.FunctionSpace | firedrake.function.Function, - subset: op2.Subset | None = None, - freeze_expr: bool = False, - access: Literal[op2.WRITE, op2.MIN, op2.MAX, op2.INC] | None = None, - bcs: Iterable[firedrake.bcs.BCBase] | None = None, - allow_missing_dofs: bool = False, - matfree: bool = True - ): - if not isinstance(expr, ufl.Interpolate): - expr = interpolate(expr, V if isinstance(V, ufl.FunctionSpace) else V.function_space()) - dual_arg, operand = expr.argument_slots() - self.ufl_interpolate = expr - self.expr = operand - self.V = V - self.subset = subset - self.freeze_expr = freeze_expr - self.bcs = bcs - self._allow_missing_dofs = allow_missing_dofs - self.matfree = matfree - self.callable = None - - # TODO CrossMeshInterpolator and VomOntoVomXXX are not yet aware of - # self.ufl_interpolate (which carries the dual argument). - # See github issue https://github.com/firedrakeproject/firedrake/issues/4592 - target_mesh = as_domain(V) - source_mesh = extract_unique_domain(operand) or target_mesh - vom_onto_other_vom = ((source_mesh is not target_mesh) - and isinstance(self, SameMeshInterpolator) - and isinstance(source_mesh.topology, VertexOnlyMeshTopology) - and isinstance(target_mesh.topology, VertexOnlyMeshTopology)) - if isinstance(self, CrossMeshInterpolator) or vom_onto_other_vom: - # For bespoke interpolation, we currently rely on different assembly procedures: - # 1) Interpolate(Argument(V1, 1), Argument(V2.dual(), 0)) -> Forward operator (2-form) - # 2) Interpolate(Argument(V1, 0), Argument(V2.dual(), 1)) -> Adjoint operator (2-form) - # 3) Interpolate(Coefficient(V1), Argument(V2.dual(), 0)) -> Forward action (1-form) - # 4) Interpolate(Argument(V1, 0), Cofunction(V2.dual()) -> Adjoint action (1-form) - # 5) Interpolate(Coefficient(V1), Cofunction(V2.dual()) -> Double action (0-form) - - # CrossMeshInterpolator._interpolate only supports forward interpolation (cases 1 and 3). - # For case 2, we first redundantly assemble case 1 and then construct the transpose. - # For cases 4 and 5, we take the forward Interpolate that corresponds to dropping the Cofunction, - # and we separately compute the action against the dropped Cofunction within assemble(). - if not isinstance(dual_arg, ufl.Coargument): - # Drop the Cofunction - expr = expr._ufl_expr_reconstruct_(operand, dual_arg.function_space().dual()) - expr_args = extract_arguments(operand) - if expr_args and expr_args[0].number() == 0: - # Construct the symbolic forward Interpolate - v0, v1 = expr.arguments() - expr = ufl.replace(expr, {v0: v0.reconstruct(number=v1.number()), - v1: v1.reconstruct(number=v0.number())}) + Parameters + ---------- + expr : Interpolate + The symbolic interpolation expression. + """ + def __init__(self, expr: Interpolate): dual_arg, operand = expr.argument_slots() - self.expr_renumbered = operand - self.ufl_interpolate_renumbered = expr + self.ufl_interpolate = expr + """The symbolic UFL Interpolate expression.""" + self.interpolate_args = expr.arguments() + """Arguments of the Interpolate expression.""" + self.rank = len(self.interpolate_args) + """Number of arguments in the Interpolate expression.""" + self.operand = operand + """The primal argument slot of the Interpolate expression.""" + self.dual_arg = dual_arg + """The dual argument slot of the Interpolate expression.""" + self.target_space = dual_arg.function_space().dual() + """The primal space we are interpolating into.""" + # Delay calling .unique() because MixedInterpolator is fine with MeshSequence + self.target_mesh = self.target_space.mesh() + """The domain we are interpolating into.""" - if not isinstance(dual_arg, ufl.Coargument): - # Matrix-free assembly of 0-form or 1-form requires INC access - if access and access != op2.INC: - raise ValueError("Matfree adjoint interpolation requires INC access") - access = op2.INC - elif access is None: - # Default access for forward 1-form or 2-form (forward and adjoint) - access = op2.WRITE - self.access = access + try: + source_mesh = extract_unique_domain(operand) + except ValueError: + source_mesh = extract_unique_domain(operand, expand_mesh_sequence=False) + self.source_mesh = source_mesh or self.target_mesh + """The domain we are interpolating from.""" + + # Interpolation options + self.subset = expr.options.subset + self.allow_missing_dofs = expr.options.allow_missing_dofs + self.default_missing_val = expr.options.default_missing_val + self.access = expr.options.access - def interpolate(self, *function, transpose=None, adjoint=False, default_missing_val=None): - """ - .. warning:: + @abc.abstractmethod + def _get_callable( + self, + tensor: Function | Cofunction | MatrixBase | None = None, + bcs: Iterable[DirichletBC] | None = None, + mat_type: Literal["aij", "baij", "nest", "matfree"] | None = None, + sub_mat_type: Literal["aij", "baij"] | None = None, + ) -> Callable[[], Function | Cofunction | PETSc.Mat | Number]: + """Return a callable to perform interpolation. + + If ``self.rank == 2``, then the callable must return a PETSc matrix. + If ``self.rank == 1``, then the callable must return a ``Function`` + or ``Cofunction`` (in the forward and adjoint cases respectively). + If ``self.rank == 0``, then the callable must return a number. - This method has been removed. Use the function :func:`interpolate` to return a symbolic - :class:`Interpolate` object. + Parameters + ---------- + tensor + Optional tensor to store the result in, by default None. + bcs + An optional list of boundary conditions to zero-out in the + output function space. Interpolator rows or columns which are + associated with boundary condition nodes are zeroed out when this is + specified. By default None. + mat_type + The PETSc matrix type to use when assembling a rank 2 interpolation. + For cross-mesh interpolation, only ``"aij"`` is supported. For same-mesh + interpolation, ``"aij"`` and ``"baij"`` are supported. For same/cross mesh interpolation + between :func:`.MixedFunctionSpace`, ``"aij"`` and ``"nest"`` are supported. + For interpolation between input-ordering linked :func:`.VertexOnlyMesh`, + ``"aij"``, ``"baij"``, and ``"matfree"`` are supported. + sub_mat_type + The PETSc sub-matrix type to use when assembling a rank 2 interpolation between + :func:`.MixedFunctionSpace` with ``mat_type="nest"``. Only ``"aij"`` and ``"baij"`` + are supported. """ - raise FutureWarning( - "The 'interpolate' method on `Interpolator` objects has been " - "removed. Use the `interpolate` function instead." - ) + pass + @property @abc.abstractmethod - def _interpolate(self, *args, **kwargs): + def _allowed_mat_types(self) -> set[Literal["aij", "baij", "nest", "matfree"]]: + """Returns a set of valid matrix types for assembly of two-forms. """ - Compute the interpolation operation of interest. + pass - .. note:: - This method is called when an :class:`Interpolate` object is being assembled. + def assemble( + self, + tensor: Function | Cofunction | MatrixBase | None = None, + bcs: Iterable[DirichletBC] | None = None, + mat_type: Literal["aij", "baij", "nest", "matfree"] | None = None, + sub_mat_type: Literal["aij", "baij"] | None = None, + ) -> Function | Cofunction | MatrixBase | Number: + """Assemble the interpolation. The result depends on the rank (number of arguments) + of the :class:`Interpolate` expression: + + * rank 2: assemble the operator and return a matrix + * rank 1: assemble the action and return a function or cofunction + * rank 0: assemble the action and return a scalar by applying the dual argument + + Parameters + ---------- + tensor + Optional tensor to store the interpolated result. For rank 2 + expressions this is expected to be a subclass of + :class:`~firedrake.matrix.MatrixBase`. For rank 1 expressions + this is a :class:`~firedrake.function.Function` or :class:`~firedrake.cofunction.Cofunction`, + for forward and adjoint interpolation respectively. + bcs + An optional list of boundary conditions to zero-out in the + output function space. Interpolator rows or columns which are + associated with boundary condition nodes are zeroed out when this is + specified. By default None. + mat_type + The PETSc matrix type to use when assembling a rank 2 interpolation. + For cross-mesh interpolation, only ``"aij"`` is supported. For same-mesh + interpolation, ``"aij"`` and ``"baij"`` are supported. For same/cross mesh interpolation + between :func:`.MixedFunctionSpace`, ``"aij"`` and ``"nest"`` are supported. + For interpolation between input-ordering linked :func:`.VertexOnlyMesh`, + ``"aij"``, ``"baij"``, and ``"matfree"`` are supported. + sub_mat_type + The PETSc sub-matrix type to use when assembling a rank 2 interpolation between + :func:`.MixedFunctionSpace` with ``mat_type="nest"``. Only ``"aij"`` and ``"baij"`` + are supported. + Returns + ------- + Function | Cofunction | MatrixBase | numbers.Number + The function, cofunction, matrix, or scalar resulting from the + interpolation. """ - pass + self._check_mat_type(mat_type) - def assemble(self, tensor=None, default_missing_val=None): - """Assemble the operator (or its action).""" - from firedrake.assemble import assemble - needs_adjoint = self.ufl_interpolate_renumbered != self.ufl_interpolate - arguments = self.ufl_interpolate.arguments() - if len(arguments) == 2: + result = self._get_callable(tensor=tensor, bcs=bcs, mat_type=mat_type, sub_mat_type=sub_mat_type)() + if self.rank == 2: # Assembling the operator - res = tensor.petscmat if tensor else PETSc.Mat() - # Get the interpolation matrix - op2mat = self.callable() - petsc_mat = op2mat.handle - if needs_adjoint: - # Out-of-place Hermitian transpose - petsc_mat.hermitianTranspose(out=res) - elif tensor: - petsc_mat.copy(tensor.petscmat) - else: - res = petsc_mat - return tensor or firedrake.AssembledMatrix(arguments, self.bcs, res) + assert isinstance(tensor, MatrixBase | None) + assert isinstance(result, PETSc.Mat) + if tensor: + result.copy(tensor.petscmat) + return tensor + return AssembledMatrix(self.interpolate_args, bcs, result) else: - # Assembling the action - cofunctions = () - if needs_adjoint: - # The renumbered Interpolate has dropped Cofunctions. - # We need to explicitly operate on them. - dual_arg, _ = self.ufl_interpolate.argument_slots() - if not isinstance(dual_arg, ufl.Coargument): - cofunctions = (dual_arg,) - - if needs_adjoint and len(arguments) == 0: - Iu = self._interpolate(default_missing_val=default_missing_val) - return assemble(ufl.Action(*cofunctions, Iu), tensor=tensor) - else: - return self._interpolate(*cofunctions, output=tensor, adjoint=needs_adjoint, - default_missing_val=default_missing_val) + assert isinstance(tensor, Function | Cofunction | None) + return tensor.assign(result) if tensor else result + def _check_mat_type( + self, + mat_type: Literal["aij", "baij", "nest", "matfree"] | None, + ) -> None: + """Check that the given mat_type is valid for this Interpolator. -class DofNotDefinedError(Exception): - r"""Raised when attempting to interpolate across function spaces where the - target function space contains degrees of freedom (i.e. nodes) which cannot - be defined in the source function space. This typically occurs when the - target mesh covers a larger domain than the source mesh. + Parameters + ---------- + mat_type + The PETSc matrix type to check. + + Raises + ------ + NotImplementedError + If the given mat_type is not supported for this Interpolator. + """ + if self.rank == 2 and mat_type not in self._allowed_mat_types: + raise NotImplementedError(f"Assembly of matrix type {mat_type} not implemented yet for {type(self).__name__}.") + + +def get_interpolator(expr: Interpolate) -> Interpolator: + """Create an Interpolator. - Attributes + Parameters ---------- - src_mesh : :func:`.Mesh` - The source mesh. - dest_mesh : :func:`.Mesh` - The destination mesh. + expr : Interpolate + Symbolic interpolation expression. + Returns + ------- + Interpolator + An appropriate :class:`Interpolator` subclass for the given + interpolation expression. """ - - def __init__(self, src_mesh, dest_mesh): - self.src_mesh = src_mesh - self.dest_mesh = dest_mesh - - def __str__(self): - return ( - f"The given target function space on domain {repr(self.dest_mesh)} " - "contains degrees of freedom which cannot cannot be defined in the " - f"source function space on domain {repr(self.src_mesh)}. " - "This may be because the target mesh covers a larger domain than the " - "source mesh. To disable this error, set allow_missing_dofs=True." - ) + return expr._interpolator class CrossMeshInterpolator(Interpolator): @@ -451,281 +413,251 @@ class CrossMeshInterpolator(Interpolator): For arguments, see :class:`.Interpolator`. """ - @no_annotations - def __init__( - self, - expr, - V, - subset=None, - freeze_expr=False, - access=None, - bcs=None, - allow_missing_dofs=False, - matfree=True - ): - if subset: - raise NotImplementedError("subset not implemented") - if freeze_expr: - # Probably just need to pass freeze_expr to the various - # interpolators for this to work. - raise NotImplementedError("freeze_expr not implemented") - if bcs: - raise NotImplementedError("bcs not implemented") - - # TODO check V.finat_element.is_lagrange() once https://github.com/firedrakeproject/fiat/pull/200 is released - target_element = V.ufl_element() - if not ((isinstance(target_element, finat.ufl.MixedElement) - and all(sub.mapping() == "identity" for sub in target_element.sub_elements)) - or target_element.mapping() == "identity"): - # Identity mapping between reference cell and physical coordinates - # implies point evaluation nodes. A more general version would - # require finding the global coordinates of all quadrature points - # of the target function space in the source mesh. + def __init__(self, expr: Interpolate): + super().__init__(expr) + if self.access and self.access != op2.WRITE: raise NotImplementedError( - "Can only interpolate into spaces with point evaluation nodes." + "Access other than op2.WRITE not implemented for cross-mesh interpolation." ) - super().__init__(expr, V, subset, freeze_expr, access, bcs, allow_missing_dofs, matfree) + else: + self.access = op2.WRITE - if self.access != op2.WRITE: - raise NotImplementedError("access other than op2.WRITE not implemented") + if self.allow_missing_dofs: + self.missing_points_behaviour = MissingPointsBehaviour.IGNORE + else: + self.missing_points_behaviour = MissingPointsBehaviour.ERROR + + if self.source_mesh.unique().geometric_dimension != self.target_mesh.unique().geometric_dimension: + raise ValueError("Geometric dimensions of source and destination meshes must match.") + + # Interpolate into intermediate quadrature space for non-point-evaluation elements + if into_quadrature_space := not self.target_space.finat_element.has_pointwise_dual_basis: + self.original_target_space = self.target_space + r"""The original target space for interpolation, as specified by the user.""" + self.target_space = self.target_space.quadrature_space() + r"""The target space for the cross-mesh interpolation. Must have point-evaluation dofs. + If ``self.original_target_space`` does not have point-evaluation dofs, then this is + an intermediate quadrature space.""" + + self.into_quadrature_space = into_quadrature_space + + @cached_property + def _target_space_element(self) -> FiniteElementBase: + """The element of `self.target_space`. If `self.target_space` is tensor/vector valued, + the base scalar element. + + Returns + ------- + FiniteElementBase + The base element of `self.target_space`. + """ + dest_element = self.target_space.ufl_element() + if isinstance(dest_element, MixedElement): + if isinstance(dest_element, VectorElement | TensorElement): + # In this case all sub elements are equal + return dest_element.sub_elements[0] + else: + raise NotImplementedError("Interpolation with MixedFunctionSpace requires MixedInterpolator.") + else: + # scalar fiat/finat element + return dest_element - expr = self.expr_renumbered - self.arguments = extract_arguments(expr) - self.nargs = len(self.arguments) + @cached_property + def _target_space_type(self) -> Callable[..., WithGeometry]: + """Returns a callable which returns a function space matching the type of `self.target_space`. - if self._allow_missing_dofs: - missing_points_behaviour = MissingPointsBehaviour.IGNORE + Returns + ------- + Callable + A callable which returns a :class:`.WithGeometry` matching the type of `self.target_space`. + """ + # Get the correct type of function space + shape = self.target_space.value_shape + if len(shape) == 0: + return FunctionSpace + elif len(shape) == 1: + return partial(VectorFunctionSpace, dim=shape[0]) else: - missing_points_behaviour = MissingPointsBehaviour.ERROR - - # setup - V_dest = V.function_space() if isinstance(V, firedrake.Function) else V - src_mesh = extract_unique_domain(expr) - dest_mesh = as_domain(V_dest) - src_mesh_gdim = src_mesh.geometric_dimension() - dest_mesh_gdim = dest_mesh.geometric_dimension() - if src_mesh_gdim != dest_mesh_gdim: - raise ValueError( - "geometric dimensions of source and destination meshes must match" - ) - self.src_mesh = src_mesh - self.dest_mesh = dest_mesh - - # Create a VOM at the nodes of V_dest in src_mesh. We don't include halo - # node coordinates because interpolation doesn't usually include halos. - # NOTE: it is very important to set redundant=False, otherwise the - # input ordering VOM will only contain the points on rank 0! - # QUESTION: Should any of the below have annotation turned off? - ufl_scalar_element = V_dest.ufl_element() - if isinstance(ufl_scalar_element, finat.ufl.MixedElement): - if type(ufl_scalar_element) is finat.ufl.MixedElement: - raise TypeError("Interpolation matrix with MixedFunctionSpace requires MixedInterpolator") - - # For a VectorElement or TensorElement the correct - # VectorFunctionSpace equivalent is built from the scalar - # sub-element. - ufl_scalar_element, = set(ufl_scalar_element.sub_elements) - if ufl_scalar_element.reference_value_shape != (): - raise NotImplementedError( - "Can't yet cross-mesh interpolate onto function spaces made from VectorElements or TensorElements made from sub elements with value shape other than ()." - ) - + symmetry = self.target_space.ufl_element().symmetry() + return partial(TensorFunctionSpace, shape=shape, symmetry=symmetry) + + @cached_property + def _symbolic_expressions(self) -> tuple[Interpolate, Interpolate]: + """The symbolic ``Interpolate`` expressions for point evaluation of `self.target_space`s + dofs in the source mesh, and the corresponding input-ordering interpolation. + + Returns + ------- + tuple[Interpolate, Interpolate] + A tuple containing the point evaluation interpolation and the + input-ordering interpolation. + + Raises + ------ + DoFNotDefinedError + If any of the target spaces dofs cannot be defined in the source mesh. + DoFTypeError + If the target space does not have point-evaluation dofs. + """ from firedrake.assemble import assemble - V_dest_vec = firedrake.VectorFunctionSpace(dest_mesh, ufl_scalar_element) - f_dest_node_coords = Interpolate(dest_mesh.coordinates, V_dest_vec) - f_dest_node_coords = assemble(f_dest_node_coords) - dest_node_coords = f_dest_node_coords.dat.data_ro.reshape(-1, dest_mesh_gdim) + if not self.target_space.finat_element.has_pointwise_dual_basis: + raise DofTypeError(f"FunctionSpace {self.target_space} must have point-evaluation dofs.") + + # Immerse coordinates of target space point evaluation dofs in src_mesh + # If `self.into_quadrature_space` is true, then the point evaluation dofs + # are the quadrature points of the original target space. + target_mesh = self.target_space.mesh().unique() + target_space_vec = VectorFunctionSpace(target_mesh, self._target_space_element) + f_dest_node_coords = assemble(interpolate(target_mesh.coordinates, target_space_vec)) + dest_node_coords = f_dest_node_coords.dat.data_ro.reshape(-1, target_mesh.geometric_dimension) try: - self.vom_dest_node_coords_in_src_mesh = firedrake.VertexOnlyMesh( - src_mesh, + vom = VertexOnlyMesh( + self.source_mesh.unique(), dest_node_coords, redundant=False, - missing_points_behaviour=missing_points_behaviour, + missing_points_behaviour=self.missing_points_behaviour, ) except VertexOnlyMeshMissingPointsError: - raise DofNotDefinedError(src_mesh, dest_mesh) - # vom_dest_node_coords_in_src_mesh uses the parallel decomposition of - # the global node coordinates of V_dest in the SOURCE mesh (src_mesh). - # I first point evaluate my expression at these locations, giving a - # P0DG function on the VOM. As described in the manual, this is an - # interpolation operation. - shape = V_dest.ufl_function_space().value_shape - if len(shape) == 0: - fs_type = firedrake.FunctionSpace - elif len(shape) == 1: - fs_type = partial(firedrake.VectorFunctionSpace, dim=shape[0]) - else: - symmetry = V_dest.ufl_element().symmetry() - fs_type = partial(firedrake.TensorFunctionSpace, shape=shape, symmetry=symmetry) - P0DG_vom = fs_type(self.vom_dest_node_coords_in_src_mesh, "DG", 0) - self.point_eval_interpolate = Interpolate(self.expr_renumbered, P0DG_vom) - # The parallel decomposition of the nodes of V_dest in the DESTINATION - # mesh (dest_mesh) is retrieved using the input_ordering attribute of the - # VOM. This again is an interpolation operation, which, under the hood - # is a PETSc SF reduce. - P0DG_vom_i_o = fs_type( - self.vom_dest_node_coords_in_src_mesh.input_ordering, "DG", 0 - ) - self.to_input_ordering_interpolate = Interpolate( - firedrake.TrialFunction(P0DG_vom), P0DG_vom_i_o - ) - # The P0DG function outputted by the above interpolation has the - # correct parallel decomposition for the nodes of V_dest in dest_mesh so - # we can safely assign the dat values. This is all done in the actual - # interpolation method below. - - @PETSc.Log.EventDecorator() - def _interpolate( - self, - *function, - output=None, - transpose=None, - adjoint=False, - default_missing_val=None, - **kwargs, - ): - """Compute the interpolation. - - For arguments, see :class:`.Interpolator`. + raise DofNotDefinedError(f"The given target function space on domain {target_mesh} " + "contains degrees of freedom which cannot cannot be defined in the " + f"source function space on domain {self.source_mesh.unique()}. " + "This may be because the target mesh covers a larger domain than the " + "source mesh. To disable this error, set allow_missing_dofs=True.") + + # Expression for point evaluation at the dest_node_coords + P0DG_vom = self._target_space_type(vom, "DG", 0) + point_eval = interpolate(self.operand, P0DG_vom) + + # Expression for interpolating into the input-ordering VOM + P0DG_vom_input_ordering = self._target_space_type(vom.input_ordering, "DG", 0) + arg = Argument(P0DG_vom, 0 if self.ufl_interpolate.is_adjoint else 1) + point_eval_input_ordering = interpolate(arg, P0DG_vom_input_ordering) + + return point_eval, point_eval_input_ordering + + @cached_property + def _interpolate_from_quadrature(self) -> Interpolate: + """Returns symbolic expression for interpolation from the intermediate quadrature + space into the user-provided target space. Only relevant if `self.into_quadrature_space` is True. + + Returns + ------- + Interpolate + A symbolic interpolate expression. """ - from firedrake.assemble import assemble + if self.rank == 2: + if self.ufl_interpolate.is_adjoint: + return interpolate(TestFunction(self.target_space), self.original_target_space) + else: + return interpolate(TrialFunction(self.target_space), self.original_target_space) + elif self.ufl_interpolate.is_adjoint: + return interpolate(TestFunction(self.target_space), self.dual_arg) - if transpose is not None: - warnings.warn("'transpose' argument is deprecated, use 'adjoint' instead", FutureWarning) - adjoint = transpose or adjoint - if adjoint and not self.nargs: - raise ValueError( - "Can currently only apply adjoint interpolation with arguments." - ) - if self.nargs != len(function): - raise ValueError( - "Passed %d Functions to interpolate, expected %d" - % (len(function), self.nargs) - ) + def _get_callable(self, tensor=None, bcs=None, mat_type=None, sub_mat_type=None): + from firedrake.assemble import assemble + if bcs: + raise NotImplementedError("bcs not implemented for cross-mesh interpolation.") + mat_type = mat_type or "aij" - if self.nargs: - (f_src,) = function - if not hasattr(f_src, "dat"): - raise ValueError( - "The expression had arguments: we therefore need to be given a Function (not an expression) to interpolate!" - ) - else: - f_src = self.expr - - if adjoint: - try: - V_dest = self.expr.function_space().dual() - except AttributeError: - if self.nargs: - V_dest = self.arguments[-1].function_space().dual() - else: - coeffs = extract_coefficients(self.expr) - if len(coeffs): - V_dest = coeffs[0].function_space().dual() - else: - raise ValueError( - "Can't adjoint interpolate an expression with no coefficients or arguments." - ) - else: - if isinstance(self.V, (firedrake.Function, firedrake.Cofunction)): - V_dest = self.V.function_space() - else: - V_dest = self.V - if output: - if output.function_space() != V_dest: - raise ValueError("Given output has the wrong function space!") + if self.into_quadrature_space: + f = Function(self.target_space.dual() if self.ufl_interpolate.is_adjoint else self.target_space) else: - if isinstance(self.V, (firedrake.Function, firedrake.Cofunction)): - output = self.V - else: - output = firedrake.Function(V_dest) - - if not adjoint: - if f_src is self.expr: - # f_src is already contained in self.point_eval_interpolate - assert not self.nargs - f_src_at_dest_node_coords_src_mesh_decomp = ( - assemble(self.point_eval_interpolate) - ) - else: - f_src_at_dest_node_coords_src_mesh_decomp = ( - assemble(action(self.point_eval_interpolate, f_src)) - ) - f_src_at_dest_node_coords_dest_mesh_decomp = firedrake.Function( - self.to_input_ordering_interpolate.function_space() - ) - # We have to create the Function before interpolating so we can - # set default missing values (if requested). - if default_missing_val is not None: - f_src_at_dest_node_coords_dest_mesh_decomp.dat.data_wo[ - : - ] = default_missing_val - elif self._allow_missing_dofs: - # If we have allowed missing points we know we might end up - # with points in the target mesh that are not in the source - # mesh. However, since we haven't specified a default missing - # value we expect the interpolation to leave these points - # unchanged. By setting the dat values to NaN we can later - # identify these points and skip over them when assigning to - # the output function. - f_src_at_dest_node_coords_dest_mesh_decomp.dat.data_wo[:] = numpy.nan - - interp = action(self.to_input_ordering_interpolate, f_src_at_dest_node_coords_src_mesh_decomp) - assemble(interp, tensor=f_src_at_dest_node_coords_dest_mesh_decomp) - - # we can now confidently assign this to a function on V_dest - if self._allow_missing_dofs and default_missing_val is None: - indices = numpy.where( - ~numpy.isnan(f_src_at_dest_node_coords_dest_mesh_decomp.dat.data_ro) - )[0] - output.dat.data_wo[ - indices - ] = f_src_at_dest_node_coords_dest_mesh_decomp.dat.data_ro[indices] + f = tensor or Function(self.ufl_interpolate.function_space() or self.target_space) + + point_eval, point_eval_input_ordering = self._symbolic_expressions + P0DG_vom_input_ordering = point_eval_input_ordering.argument_slots()[0].function_space().dual() + + if self.rank == 2: + assert mat_type == "aij" + # The cross-mesh interpolation matrix is the product of the + # `self.point_eval_interpolate` and the permutation + # given by `self.to_input_ordering_interpolate`. + if self.ufl_interpolate.is_adjoint: + interp_expr = action(point_eval, point_eval_input_ordering) else: - output.dat.data_wo[ - : - ] = f_src_at_dest_node_coords_dest_mesh_decomp.dat.data_ro[:] + interp_expr = action(point_eval_input_ordering, point_eval) + + def callable() -> PETSc.Mat: + res = assemble(interp_expr, mat_type=mat_type).petscmat + if self.into_quadrature_space: + source_space = self.ufl_interpolate.function_space() + if self.ufl_interpolate.is_adjoint: + I = AssembledMatrix((Argument(source_space, 0), Argument(self.target_space.dual(), 1)), None, res) + return assemble(action(I, self._interpolate_from_quadrature)).petscmat + else: + I = AssembledMatrix((Argument(self.target_space.dual(), 0), Argument(source_space, 1)), None, res) + return assemble(action(self._interpolate_from_quadrature, I)).petscmat + else: + return res + elif self.ufl_interpolate.is_adjoint: + assert self.rank == 1 + + def callable() -> Cofunction: + if self.into_quadrature_space: + cofunc = assemble(self._interpolate_from_quadrature) + f_target = Cofunction(point_eval.function_space()) + else: + cofunc = self.dual_arg + f_target = f + + assert isinstance(cofunc, Cofunction) + + # Our first adjoint operation is to assign the dat values to a + # P0DG cofunction on our input ordering VOM. + f_input_ordering = Cofunction(P0DG_vom_input_ordering.dual()) + f_input_ordering.dat.data_wo[:] = cofunc.dat.data_ro[:] + + # The rest of the adjoint interpolation is the composition + # of the adjoint interpolators in the reverse direction. + # We don't worry about skipping over missing points here + # because we're going from the input ordering VOM to the original VOM + # and all points from the input ordering VOM are in the original. + f_src_at_src_node_coords = assemble(action(point_eval_input_ordering, f_input_ordering)) + assemble(action(point_eval, f_src_at_src_node_coords), tensor=f_target) + return f_target else: - # adjoint interpolation - - # f_src is a cofunction on V_dest.dual as originally specified when - # creating the interpolator. Our first adjoint operation is to - # assign the dat values to a P0DG cofunction on our input ordering - # VOM. This has the parallel decomposition V_dest on our orinally - # specified dest_mesh. We can therefore safely create a P0DG - # cofunction on the input-ordering VOM (which has this parallel - # decomposition and ordering) and assign the dat values. - f_src_at_dest_node_coords_dest_mesh_decomp = firedrake.Cofunction( - self.to_input_ordering_interpolate.function_space().dual() - ) - f_src_at_dest_node_coords_dest_mesh_decomp.dat.data_wo[ - : - ] = f_src.dat.data_ro[:] - - # The rest of the adjoint interpolation is merely the composition - # of the adjoint interpolators in the reverse direction. NOTE: I - # don't have to worry about skipping over missing points here - # because I'm going from the input ordering VOM to the original VOM - # and all points from the input ordering VOM are in the original. - interp = action(expr_adjoint(self.to_input_ordering_interpolate), f_src_at_dest_node_coords_dest_mesh_decomp) - f_src_at_src_node_coords = assemble(interp) - # NOTE: if I wanted the default missing value to be applied to - # adjoint interpolation I would have to do it here. However, - # this would require me to implement default missing values for - # adjoint interpolation from a point evaluation interpolator - # which I haven't done. I wonder if it is necessary - perhaps the - # adjoint operator always sets all the values of the resulting - # cofunction? My initial attempt to insert setting the dat values - # prior to performing the multHermitian operation in - # SameMeshInterpolator.interpolate did not effect the result. For - # now, I say in the docstring that it only applies to forward - # interpolation. - interp = action(expr_adjoint(self.point_eval_interpolate), f_src_at_src_node_coords) - assemble(interp, tensor=output) - - return output + assert self.rank in {0, 1} + + def callable() -> Function | Number: + # We create the input-ordering Function before interpolating so we can + # set default missing values if required. + f_point_eval_input_ordering = Function(P0DG_vom_input_ordering) + if self.default_missing_val is not None: + f_point_eval_input_ordering.assign(self.default_missing_val) + elif self.allow_missing_dofs: + # If we allow missing points there may be points in the target + # mesh that are not in the source mesh. If we don't specify a + # default missing value we set these to NaN so we can identify + # them later. + f_point_eval_input_ordering.dat.data_wo[:] = numpy.nan + + assemble(action(point_eval_input_ordering, point_eval), tensor=f_point_eval_input_ordering) + # We assign these values to the output function + if self.allow_missing_dofs and self.default_missing_val is None: + indices = numpy.where(~numpy.isnan(f_point_eval_input_ordering.dat.data_ro))[0] + f.dat.data_wo[indices] = f_point_eval_input_ordering.dat.data_ro[indices] + else: + f.dat.data_wo[:] = f_point_eval_input_ordering.dat.data_ro[:] + + if self.into_quadrature_space: + f_target = Function(self.original_target_space) + assemble(interpolate(f, self.original_target_space), tensor=f_target) + else: + f_target = f + + if self.rank == 0: + # We take the action of the dual_arg on the interpolated function + assert isinstance(self.dual_arg, Cofunction) + return assemble(action(self.dual_arg, f_target)) + else: + return f_target + return callable + + @property + def _allowed_mat_types(self): + return {"aij", None} class SameMeshInterpolator(Interpolator): @@ -737,247 +669,243 @@ class SameMeshInterpolator(Interpolator): """ @no_annotations - def __init__(self, expr, V, subset=None, freeze_expr=False, access=None, - bcs=None, matfree=True, allow_missing_dofs=False, **kwargs): + def __init__(self, expr): + super().__init__(expr) + subset = self.subset if subset is None: - if isinstance(expr, ufl.Interpolate): - operand, = expr.ufl_operands - else: - operand = expr - target_mesh = as_domain(V) - source_mesh = extract_unique_domain(operand) or target_mesh - target = target_mesh.topology - source = source_mesh.topology - if all(isinstance(m, firedrake.mesh.MeshTopology) for m in [target, source]) and target is not source: + target = self.target_mesh.unique().topology + source = self.source_mesh.unique().topology + if all(isinstance(m, MeshTopology) for m in [target, source]) and target is not source: composed_map, result_integral_type = source.trans_mesh_entity_map(target, "cell", "everywhere", None) if result_integral_type != "cell": - raise AssertionError("Only cell-cell interpolation supported") + raise AssertionError("Only cell-cell interpolation supported.") indices_active = composed_map.indices_active_with_halo make_subset = not indices_active.all() make_subset = target.comm.allreduce(make_subset, op=MPI.LOR) if make_subset: - if not allow_missing_dofs: - raise ValueError("iteration (sub)set unclear: run with `allow_missing_dofs=True`") + if not self.allow_missing_dofs: + raise ValueError("Iteration (sub)set unclear: run with `allow_missing_dofs=True`.") subset = op2.Subset(target.cell_set, numpy.where(indices_active)) else: # Do not need subset as target <= source. pass - super().__init__(expr, V, subset=subset, freeze_expr=freeze_expr, - access=access, bcs=bcs, matfree=matfree, allow_missing_dofs=allow_missing_dofs) - expr = self.ufl_interpolate_renumbered - try: - self.callable = make_interpolator(expr, V, subset, self.access, bcs=bcs, matfree=matfree) - except FIAT.hdiv_trace.TraceError: - raise NotImplementedError("Can't interpolate onto traces sorry") - self.arguments = expr.arguments() + self.subset = subset - @PETSc.Log.EventDecorator() - def _interpolate(self, *function, output=None, transpose=None, adjoint=False, **kwargs): - """Compute the interpolation. + if not isinstance(self.dual_arg, Coargument): + # Matrix-free assembly of 0-form or 1-form requires INC access + if self.access and self.access != op2.INC: + raise ValueError("Matfree adjoint interpolation requires INC access") + self.access = op2.INC + elif self.access is None: + # Default access for forward 1-form or 2-form (forward and adjoint) + self.access = op2.WRITE - For arguments, see :class:`.Interpolator`. - """ + def _get_tensor(self, mat_type: Literal["aij", "baij"]) -> op2.Mat | Function | Cofunction: + """Return a suitable tensor to interpolate into. - if transpose is not None: - warnings.warn("'transpose' argument is deprecated, use 'adjoint' instead", FutureWarning) - adjoint = transpose or adjoint - try: - assembled_interpolator = self.frozen_assembled_interpolator - copy_required = True - except AttributeError: - assembled_interpolator = self.callable() - copy_required = False # Return the original - if self.freeze_expr: - if len(self.arguments) == 2: - # Interpolation operator - self.frozen_assembled_interpolator = assembled_interpolator - else: - # Interpolation action - self.frozen_assembled_interpolator = assembled_interpolator.copy() - - if len(self.arguments) == 2 and len(function) > 0: - function, = function - if not hasattr(function, "dat"): - raise ValueError("The expression had arguments: we therefore need to be given a Function (not an expression) to interpolate!") - if adjoint: - mul = assembled_interpolator.handle.multHermitian - col, row = self.arguments - else: - mul = assembled_interpolator.handle.mult - row, col = self.arguments - V = row.function_space().dual() - assert function.function_space() == col.function_space() - - result = output or firedrake.Function(V) - with function.dat.vec_ro as x, result.dat.vec_wo as out: - if x is not out: - mul(x, out) + Parameters + ---------- + mat_type + The PETSc matrix type to use when assembling a rank 2 interpolation. + Only ``"aij"`` and ``"baij"`` are currently allowed. + + Returns + ------- + op2.Mat | Function | Cofunction + The tensor to interpolate into. + """ + if self.rank == 0: + R = FunctionSpace(self.target_mesh.unique(), "Real", 0) + f = Function(R, dtype=ScalarType) + elif self.rank == 1: + f = Function(self.ufl_interpolate.function_space()) + if self.access in {op2.MIN, op2.MAX}: + finfo = numpy.finfo(f.dat.dtype) + if self.access == op2.MIN: + val = Constant(finfo.max) else: - out_ = out.duplicate() - mul(x, out_) - out_.copy(result=out) - return result - + val = Constant(finfo.min) + f.assign(val) + elif self.rank == 2: + sparsity = self._get_monolithic_sparsity(mat_type) + f = op2.Mat(sparsity) else: - if output: - output.assign(assembled_interpolator) - return output - if isinstance(self.V, firedrake.Function): - if copy_required: - self.V.assign(assembled_interpolator) - return self.V - else: - if len(self.arguments) == 0: - return assembled_interpolator.dat.data.item() - elif copy_required: - return assembled_interpolator.copy() - else: - return assembled_interpolator + raise ValueError(f"Cannot interpolate an expression with {self.rank} arguments") + return f + def _get_monolithic_sparsity(self, mat_type: Literal["aij", "baij"]) -> op2.Sparsity: + """Returns op2.Sparsity for the interpolation matrix. Only mat_type 'aij' and 'baij' + are currently supported. -@PETSc.Log.EventDecorator() -def make_interpolator(expr, V, subset, access, bcs=None, matfree=True): - if not isinstance(expr, ufl.Interpolate): - raise ValueError(f"Expecting to interpolate a ufl.Interpolate, got {type(expr).__name__}.") - dual_arg, operand = expr.argument_slots() - target_mesh = as_domain(dual_arg) - source_mesh = extract_unique_domain(operand) or target_mesh - vom_onto_other_vom = ((source_mesh is not target_mesh) - and isinstance(source_mesh.topology, VertexOnlyMeshTopology) - and isinstance(target_mesh.topology, VertexOnlyMeshTopology)) - - arguments = expr.arguments() - rank = len(arguments) - if rank <= 1: - if rank == 0: - R = firedrake.FunctionSpace(target_mesh, "Real", 0) - f = firedrake.Function(R, dtype=utils.ScalarType) - elif isinstance(V, firedrake.Function): - f = V - V = f.function_space() - else: - V_dest = arguments[0].function_space().dual() - f = firedrake.Function(V_dest) - if access in {firedrake.MIN, firedrake.MAX}: - finfo = numpy.finfo(f.dat.dtype) - if access == firedrake.MIN: - val = firedrake.Constant(finfo.max) - else: - val = firedrake.Constant(finfo.min) - f.assign(val) - tensor = f.dat - elif rank == 2: - if isinstance(V, firedrake.Function): - raise ValueError("Cannot interpolate an expression with an argument into a Function") - Vrow = arguments[0].function_space() - Vcol = arguments[1].function_space() + Parameters + ---------- + mat_type + The PETSc matrix type to use when assembling a rank 2 interpolation. + Only ``"aij"`` and ``"baij"`` are currently allowed. + + Returns + ------- + op2.Sparsity + The sparsity pattern for the interpolation matrix. + """ + Vrow = self.interpolate_args[0].function_space() + Vcol = self.interpolate_args[1].function_space() if len(Vrow) > 1 or len(Vcol) > 1: - raise TypeError("Interpolation matrix with MixedFunctionSpace requires MixedInterpolator") - if isinstance(target_mesh.topology, VertexOnlyMeshTopology) and target_mesh is not source_mesh and not vom_onto_other_vom: - if not isinstance(target_mesh.topology, VertexOnlyMeshTopology): - raise NotImplementedError("Can only interpolate onto a VertexOnlyMesh") - if target_mesh.geometric_dimension() != source_mesh.geometric_dimension(): - raise ValueError("Cannot interpolate onto a mesh of a different geometric dimension") - if not hasattr(target_mesh, "_parent_mesh") or target_mesh._parent_mesh is not source_mesh: - raise ValueError("Can only interpolate across meshes where the source mesh is the parent of the target") - - if vom_onto_other_vom: - # We make our own linear operator for this case using PETSc SFs - tensor = None + raise NotImplementedError("Interpolation matrix with MixedFunctionSpace requires MixedInterpolator") + Vrow_map = get_interp_node_map(self.source_mesh.unique(), self.target_mesh.unique(), Vrow) + Vcol_map = get_interp_node_map(self.source_mesh.unique(), self.target_mesh.unique(), Vcol) + sparsity = op2.Sparsity((Vrow.dof_dset, Vcol.dof_dset), + [(Vrow_map, Vcol_map, None)], # non-mixed + name=f"{Vrow.name}_{Vcol.name}_sparsity", + nest=False, + block_sparse=(mat_type == "baij")) + return sparsity + + def _get_callable(self, tensor=None, bcs=None, mat_type=None, sub_mat_type=None): + mat_type = mat_type or "aij" + if (isinstance(tensor, Cofunction) and isinstance(self.dual_arg, Cofunction)) and set(tensor.dat).intersection(set(self.dual_arg.dat)): + # adjoint one-form case: we need an empty tensor, so if it shares dats with + # the dual_arg we cannot use it directly, so we store it + f = self._get_tensor(mat_type) + copyout = (partial(f.dat.copy, tensor.dat),) else: - Vrow_map = get_interp_node_map(source_mesh, target_mesh, Vrow) - Vcol_map = get_interp_node_map(source_mesh, target_mesh, Vcol) - sparsity = op2.Sparsity((Vrow.dof_dset, Vcol.dof_dset), - [(Vrow_map, Vcol_map, None)], # non-mixed - name="%s_%s_sparsity" % (Vrow.name, Vcol.name), - nest=False, - block_sparse=True) - tensor = op2.Mat(sparsity) - f = tensor - else: - raise ValueError(f"Cannot interpolate an expression with {rank} arguments") - - if vom_onto_other_vom: - wrapper = VomOntoVomWrapper(V, source_mesh, target_mesh, operand, matfree) - # NOTE: get_dat_mpi_type ensures we get the correct MPI type for the - # data, including the correct data size and dimensional information - # (so for vector function spaces in 2 dimensions we might need a - # concatenation of 2 MPI.DOUBLE types when we are in real mode) - if tensor is not None: - # Callable will do interpolation into our pre-supplied function f - # when it is called. - assert f.dat is tensor - wrapper.mpi_type, _ = get_dat_mpi_type(f.dat) - assert len(arguments) == 1 - - def callable(): - wrapper.forward_operation(f.dat) - return f - else: - assert len(arguments) == 2 - assert tensor is None - # we know we will be outputting either a function or a cofunction, - # both of which will use a dat as a data carrier. At present, the - # data type does not depend on function space dimension, so we can - # safely use the argument function space. NOTE: If this changes - # after cofunctions are fully implemented, this will need to be - # reconsidered. - temp_source_func = firedrake.Function(Vcol) - wrapper.mpi_type, _ = get_dat_mpi_type(temp_source_func.dat) - - # Leave wrapper inside a callable so we can access the handle - # property. If matfree is True, then the handle is a PETSc SF - # pretending to be a PETSc Mat. If matfree is False, then this - # will be a PETSc Mat representing the equivalent permutation - # matrix - def callable(): - return wrapper + f = tensor or self._get_tensor(mat_type) + copyout = () - return callable - else: + op2_tensor = f if isinstance(f, op2.Mat) else f.dat loops = [] - # Initialise to zero if needed - if access is op2.INC: - loops.append(tensor.zero) + if self.access is op2.INC: + loops.append(op2_tensor.zero) # Arguments in the operand are allowed to be from a MixedFunctionSpace # We need to split the target space V and generate separate kernels - if len(arguments) == 2: - # Matrix case assumes that the spaces are not mixed - expressions = {(0,): expr} - elif isinstance(dual_arg, Coargument): + if self.rank == 2: + expressions = {(0,): self.ufl_interpolate} + elif isinstance(self.dual_arg, Coargument): # Split in the coargument - expressions = dict(firedrake.formmanipulation.split_form(expr)) + expressions = dict(split_form(self.ufl_interpolate)) else: + assert isinstance(self.dual_arg, Cofunction) # Split in the cofunction: split_form can only split in the coargument # Replace the cofunction with a coargument to construct the Jacobian - interp = expr._ufl_expr_reconstruct_(operand, V) + interp = self.ufl_interpolate._ufl_expr_reconstruct_(self.operand, self.target_space) # Split the Jacobian into blocks - interp_split = dict(firedrake.formmanipulation.split_form(interp)) + interp_split = dict(split_form(interp)) # Split the cofunction - dual_split = dict(firedrake.formmanipulation.split_form(dual_arg)) + dual_split = dict(split_form(self.dual_arg)) # Combine the splits by taking their action expressions = {i: action(interp_split[i], dual_split[i[-1:]]) for i in interp_split} # Interpolate each sub expression into each function space for indices, sub_expr in expressions.items(): - sub_tensor = tensor[indices[0]] if rank == 1 else tensor - loops.extend(_interpolator(sub_tensor, sub_expr, subset, access, bcs=bcs)) - # Apply bcs - if bcs and rank == 1: + sub_op2_tensor = op2_tensor[indices[0]] if self.rank == 1 else op2_tensor + loops.extend(_build_interpolation_callables(sub_expr, sub_op2_tensor, self.access, self.subset, bcs)) + + if bcs and self.rank == 1: loops.extend(partial(bc.apply, f) for bc in bcs) - def callable(loops, f): + loops.extend(copyout) + + def callable() -> Function | Cofunction | PETSc.Mat | Number: for l in loops: l() - return f + if self.rank == 0: + return f.dat.data.item() + elif self.rank == 2: + return f.handle # In this case f is an op2.Mat + else: + return f + + return callable + + @property + def _allowed_mat_types(self): + return {"aij", "baij", None} - return partial(callable, loops, f) +class VomOntoVomInterpolator(SameMeshInterpolator): + + def __init__(self, expr: Interpolate): + super().__init__(expr) + + def _get_callable(self, tensor=None, bcs=None, mat_type=None, sub_mat_type=None): + if bcs: + raise NotImplementedError("bcs not implemented for vom-to-vom interpolation.") + mat_type = mat_type or "matfree" + self.mat = VomOntoVomMat(self, mat_type=mat_type) + if self.rank == 1: + f = tensor or self._get_tensor(mat_type) + # NOTE: get_dat_mpi_type ensures we get the correct MPI type for the + # data, including the correct data size and dimensional information + # (so for vector function spaces in 2 dimensions we might need a + # concatenation of 2 MPI.DOUBLE types when we are in real mode) + self.mat.mpi_type = _get_mtype(f.dat)[0] + if self.ufl_interpolate.is_adjoint: + assert isinstance(self.dual_arg, Cofunction) + assert isinstance(f, Cofunction) + + def callable() -> Cofunction: + with self.dual_arg.dat.vec_ro as source_vec: + coeff = self.mat.expr_as_coeff(source_vec) + with coeff.dat.vec_ro as coeff_vec, f.dat.vec_wo as target_vec: + self.mat.handle.multHermitian(coeff_vec, target_vec) + return f + else: + assert isinstance(f, Function) + + def callable() -> Function: + coeff = self.mat.expr_as_coeff() + with coeff.dat.vec_ro as coeff_vec, f.dat.vec_wo as target_vec: + self.mat.handle.mult(coeff_vec, target_vec) + return f + elif self.rank == 2: + # Create a temporary function to get the correct MPI type + temp_source_func = Function(self.interpolate_args[1].function_space()) + self.mat.mpi_type = _get_mtype(temp_source_func.dat)[0] -@utils.known_pyop2_safe -def _interpolator(tensor, expr, subset, access, bcs=None): - if isinstance(expr, ufl.ZeroBaseForm): + def callable() -> PETSc.Mat: + return self.mat.handle + + return callable + + @property + def _allowed_mat_types(self): + return {"aij", "baij", "matfree", None} + + +@known_pyop2_safe +def _build_interpolation_callables( + expr: Interpolate | ZeroBaseForm, + tensor: op2.Dat | op2.Mat | op2.Global, + access: Literal[op2.WRITE, op2.MIN, op2.MAX, op2.INC], + subset: op2.Subset | None = None, + bcs: Iterable[DirichletBC] | None = None +) -> tuple[Callable, ...]: + """Return a tuple of callables which calculate the interpolation. + + Parameters + ---------- + expr : ufl.Interpolate | ufl.ZeroBaseForm + The symbolic interpolation expression, or a ZeroBaseForm. ZeroBaseForms + are simplified here to avoid code generation when access is WRITE or INC. + tensor : op2.Dat | op2.Mat | op2.Global + Object to hold the result of the interpolation. + access : Literal[op2.WRITE, op2.MIN, op2.MAX, op2.INC] + op2 access descriptor + subset : op2.Subset | None + An optional subset to apply the interpolation over, by default None. + bcs : Iterable[DirichletBC] | None + An optional list of boundary conditions to zero-out in the + output function space. Interpolator rows or columns which are + associated with boundary condition nodes are zeroed out when this is + specified. By default None, by default None. + + Returns + ------- + tuple[Callable, ...] + Tuple of callables which perform the interpolation. + """ + if isinstance(expr, ZeroBaseForm): # Zero simplification, avoid code-generation if access is op2.INC: return () @@ -986,55 +914,29 @@ def _interpolator(tensor, expr, subset, access, bcs=None): # Unclear how to avoid codegen for MIN and MAX # Reconstruct the expression as an Interpolate V = expr.arguments()[-1].function_space().dual() - expr = interpolate(ufl.zero(V.value_shape), V) + expr = interpolate(zero(V.value_shape), V) - if not isinstance(expr, ufl.Interpolate): - raise ValueError("Expecting to interpolate a ufl.Interpolate") + if not isinstance(expr, Interpolate): + raise ValueError("Expecting to interpolate a symbolic Interpolate expression.") - arguments = expr.arguments() dual_arg, operand = expr.argument_slots() - V = dual_arg.arguments()[0].function_space() - - try: - to_element = create_element(V.ufl_element()) - except KeyError: - # FInAT only elements - raise NotImplementedError("Don't know how to create FIAT element for %s" % V.ufl_element()) + assert isinstance(dual_arg, Cofunction | Coargument) + V = dual_arg.function_space().dual() if access is op2.READ: raise ValueError("Can't have READ access for output function") # NOTE: The par_loop is always over the target mesh cells. - target_mesh = as_domain(V) + target_mesh = V.mesh() source_mesh = extract_unique_domain(operand) or target_mesh + target_element = V.ufl_element() if isinstance(target_mesh.topology, VertexOnlyMeshTopology): - if target_mesh is not source_mesh: - if not isinstance(target_mesh.topology, VertexOnlyMeshTopology): - raise NotImplementedError("Can only interpolate onto a Vertex Only Mesh") - if target_mesh.geometric_dimension() != source_mesh.geometric_dimension(): - raise ValueError("Cannot interpolate onto a mesh of a different geometric dimension") - if not hasattr(target_mesh, "_parent_mesh") or target_mesh._parent_mesh is not source_mesh: - raise ValueError("Can only interpolate across meshes where the source mesh is the parent of the target") - # For trans-mesh interpolation we use a FInAT QuadratureElement as the - # (base) target element with runtime point set expressions as their - # quadrature rule point set and weights from their dual basis. - # NOTE: This setup is useful for thinking about future design - in the - # future this `rebuild` function can be absorbed into FInAT as a - # transformer that eats an element and gives you an equivalent (which - # may or may not be a QuadratureElement) that lets you do run time - # tabulation. Alternatively (and this all depends on future design - # decision about FInAT how dual evaluation should work) the - # to_element's dual basis (which look rather like quadrature rules) can - # have their pointset(s) directly replaced with run-time tabulated - # equivalent(s) (i.e. finat.point_set.UnknownPointSet(s)) - rt_var_name = 'rt_X' - try: - cell = operand.ufl_element().ufl_cell() - except AttributeError: - # expression must be pure function of spatial coordinates so - # domain has correct ufl cell - cell = source_mesh.ufl_cell() - to_element = rebuild(to_element, cell, rt_var_name) + # For interpolation onto a VOM, we use a FInAT QuadratureElement as the + # target element with runtime point set expressions as their + # quadrature rule point set. + rt_var_name = "rt_X" + target_element = runtime_quadrature_element(source_mesh, target_element, + rt_var_name=rt_var_name) cell_set = target_mesh.cell_set if subset is not None: @@ -1042,7 +944,7 @@ def _interpolator(tensor, expr, subset, access, bcs=None): cell_set = subset parameters = {} - parameters['scalar_type'] = utils.ScalarType + parameters['scalar_type'] = ScalarType copyin = () copyout = () @@ -1050,11 +952,10 @@ def _interpolator(tensor, expr, subset, access, bcs=None): # For the matfree adjoint 1-form and the 0-form, the cellwise kernel will add multiple # contributions from the facet DOFs of the dual argument. # The incoming Cofunction needs to be weighted by the reciprocal of the DOF multiplicity. - needs_weight = isinstance(dual_arg, ufl.Cofunction) and not to_element.is_dg() - if needs_weight: + if isinstance(dual_arg, Cofunction) and not create_element(target_element).is_dg(): # Create a buffer for the weighted Cofunction W = dual_arg.function_space() - v = firedrake.Function(W) + v = Function(W) expr = expr._ufl_expr_reconstruct_(operand, v=v) copyin += (partial(dual_arg.dat.copy, v.dat),) @@ -1075,13 +976,7 @@ def _interpolator(tensor, expr, subset, access, bcs=None): with wdat.vec_ro as w, v.dat.vec as y: copyin += (partial(y.pointwiseMult, y, w),) - # We need to pass both the ufl element and the finat element - # because the finat elements might not have the right mapping - # (e.g. L2 Piola, or tensor element with symmetries) - # FIXME: for the runtime unknown point set (for cross-mesh - # interpolation) we have to pass the finat element we construct - # here. Ideally we would only pass the UFL element through. - kernel = compile_expression(cell_set.comm, expr, to_element, V.ufl_element(), + kernel = compile_expression(cell_set.comm, expr, target_element, domain=source_mesh, parameters=parameters) ast = kernel.ast oriented = kernel.oriented @@ -1094,7 +989,7 @@ def _interpolator(tensor, expr, subset, access, bcs=None): parloop_args = [kernel, cell_set] - coefficients = tsfc_interface.extract_numbered_coefficients(expr, coefficient_numbers) + coefficients = extract_numbered_coefficients(expr, coefficient_numbers) if needs_external_coords: coefficients = [source_mesh.coordinates] + coefficients @@ -1104,6 +999,8 @@ def _interpolator(tensor, expr, subset, access, bcs=None): if access is not op2.WRITE: copyin += (partial(output.copy, tensor), ) copyout += (partial(tensor.copy, output), ) + + arguments = expr.arguments() if isinstance(tensor, op2.Global): parloop_args.append(tensor(access)) elif isinstance(tensor, op2.Dat): @@ -1117,20 +1014,21 @@ def _interpolator(tensor, expr, subset, access, bcs=None): assert tensor.handle.getSize() == (Vrow.dim(), Vcol.dim()) rows_map = get_interp_node_map(source_mesh, target_mesh, Vrow) columns_map = get_interp_node_map(source_mesh, target_mesh, Vcol) - lgmaps = None if bcs: - if ufl.duals.is_dual(Vrow): + if is_dual(Vrow): Vrow = Vrow.dual() - if ufl.duals.is_dual(Vcol): + if is_dual(Vcol): Vcol = Vcol.dual() bc_rows = [bc for bc in bcs if bc.function_space() == Vrow] bc_cols = [bc for bc in bcs if bc.function_space() == Vcol] lgmaps = [(Vrow.local_to_global_map(bc_rows), Vcol.local_to_global_map(bc_cols))] parloop_args.append(tensor(access, (rows_map, columns_map), lgmaps=lgmaps)) + if oriented: co = target_mesh.cell_orientations() parloop_args.append(co.dat(op2.READ, co.cell_node_map())) + if needs_cell_sizes: cs = source_mesh.cell_sizes parloop_args.append(cs.dat(op2.READ, cs.cell_node_map())) @@ -1173,7 +1071,7 @@ def _interpolator(tensor, expr, subset, access, bcs=None): return copyin + (parloop, ) + copyout -def get_interp_node_map(source_mesh, target_mesh, fs): +def get_interp_node_map(source_mesh: MeshGeometry, target_mesh: MeshGeometry, fs: WithGeometry) -> op2.Map | None: """Return the map between cells of the target mesh and nodes of the function space. If the function space is defined on the source mesh then the node map is composed @@ -1204,7 +1102,7 @@ def get_interp_node_map(source_mesh, target_mesh, fs): else: raise ValueError("Have coefficient with unexpected mesh") else: - m_ = fs.entity_node_map(target_mesh.topology, "cell", None, None) + m_ = fs.entity_node_map(target_mesh.topology, "cell", "everywhere", None) return m_ @@ -1215,67 +1113,22 @@ def get_interp_node_map(source_mesh, target_mesh, fs): f"firedrake-tsfc-expression-kernel-cache-uid{os.getuid()}") -def _compile_expression_key(comm, expr, to_element, ufl_element, domain, parameters) -> tuple[Hashable, ...]: +def _compile_expression_key(comm, expr, ufl_element, domain, parameters) -> tuple[Hashable, ...]: """Generate a cache key suitable for :func:`tsfc.compile_expression_dual_evaluation`.""" dual_arg, operand = expr.argument_slots() - return (hash_expr(operand), type(dual_arg), hash(ufl_element), utils.tuplify(parameters)) + return (hash_expr(operand), type(dual_arg), hash(ufl_element), tuplify(parameters)) @memory_and_disk_cache( hashkey=_compile_expression_key, - cachedir=tsfc_interface._cachedir + cachedir=_cachedir ) @PETSc.Log.EventDecorator() def compile_expression(comm, *args, **kwargs): return compile_expression_dual_evaluation(*args, **kwargs) -@singledispatch -def rebuild(element, expr_cell, rt_var_name): - raise NotImplementedError(f"Cross mesh interpolation not implemented for a {element} element.") - - -@rebuild.register(finat.fiat_elements.ScalarFiatElement) -def rebuild_dg(element, expr_cell, rt_var_name): - # To tabulate on the given element (which is on a different mesh to the - # expression) we must do so at runtime. We therefore create a quadrature - # element with runtime points to evaluate for each point in the element's - # dual basis. This exists on the same reference cell as the input element - # and we can interpolate onto it before mapping the result back onto the - # target space. - expr_tdim = expr_cell.topological_dimension() - # Need point evaluations and matching weights from dual basis. - # This could use FIAT's dual basis as below: - # num_points = sum(len(dual.get_point_dict()) for dual in element.fiat_equivalent.dual_basis()) - # weights = [] - # for dual in element.fiat_equivalent.dual_basis(): - # pts = dual.get_point_dict().keys() - # for p in pts: - # for w, _ in dual.get_point_dict()[p]: - # weights.append(w) - # assert len(weights) == num_points - # but for now we just fix the values to what we know works: - if element.degree != 0 or not isinstance(element.cell, FIAT.reference_element.Point): - raise NotImplementedError("Cross mesh interpolation only implemented for P0DG on vertex cells.") - num_points = 1 - weights = [1.]*num_points - # gem.Variable name starting with rt_ forces TSFC runtime tabulation - assert rt_var_name.startswith("rt_") - runtime_points_expr = gem.Variable(rt_var_name, (num_points, expr_tdim)) - rule_pointset = finat.point_set.UnknownPointSet(runtime_points_expr) - rule = finat.quadrature.QuadratureRule(rule_pointset, weights=weights) - return finat.QuadratureElement(as_fiat_cell(expr_cell), rule) - - -@rebuild.register(finat.TensorFiniteElement) -def rebuild_te(element, expr_cell, rt_var_name): - return finat.TensorFiniteElement(rebuild(element.base_element, - expr_cell, rt_var_name), - element._shape, - transpose=element._transpose) - - -def compose_map_and_cache(map1, map2): +def compose_map_and_cache(map1: op2.Map, map2: op2.Map | None) -> op2.ComposedMap | None: """ Retrieve a :class:`pyop2.ComposedMap` map from the cache of map1 using map2 as the cache key. The composed map maps from the iterset @@ -1298,7 +1151,7 @@ def compose_map_and_cache(map1, map2): return cmap -def vom_cell_parent_node_map_extruded(vertex_only_mesh, extruded_cell_node_map): +def vom_cell_parent_node_map_extruded(vertex_only_mesh: MeshGeometry, extruded_cell_node_map: op2.Map) -> op2.Map: """Build a map from the cells of a vertex only mesh to the nodes of the nodes on the source mesh where the source mesh is extruded. @@ -1416,126 +1269,84 @@ def vom_cell_parent_node_map_extruded(vertex_only_mesh, extruded_cell_node_map): ) -class GlobalWrapper(object): - """Wrapper object that fakes a Global to behave like a Function.""" - def __init__(self, glob): - self.dat = glob - self.cell_node_map = lambda *arguments: None - self.ufl_domain = lambda: None - - -class VomOntoVomWrapper(object): - """Utility class for interpolating from one ``VertexOnlyMesh`` to it's - intput ordering ``VertexOnlyMesh``, or vice versa. - - Parameters - ---------- - V : `.FunctionSpace` - The P0DG function space (which may be vector or tensor valued) on the - source vertex-only mesh. - source_vom : `.VertexOnlyMesh` - The vertex-only mesh we interpolate from. - target_vom : `.VertexOnlyMesh` - The vertex-only mesh we interpolate to. - expr : `ufl.Expr` - The expression to interpolate. If ``arguments`` is not empty, those - arguments must be present within it. - matfree : bool - If ``False``, the matrix representating the permutation of the points is - constructed and used to perform the interpolation. If ``True``, then the - interpolation is performed using the broadcast and reduce operations on the - PETSc Star Forest. +class VomOntoVomMat: + """ + Object that facilitates interpolation between a VertexOnlyMesh and its + input_ordering VertexOnlyMesh. This is either a PETSc Star Forest wrapped + as a PETSc Mat, or a concrete PETSc Mat, depending on whether + `mat_type='matfree` is passed to assemble. """ + def __init__( + self, + interpolator: VomOntoVomInterpolator, + mat_type: Literal["aij", "baij", "matfree"], + ): + """Initialise the VomOntoVomMat. - def __init__(self, V, source_vom, target_vom, expr, matfree): - arguments = extract_arguments(expr) - reduce = False - if source_vom.input_ordering is target_vom: - reduce = True - original_vom = source_vom - elif target_vom.input_ordering is source_vom: - original_vom = target_vom + Parameters + ---------- + interpolator : VomOntoVomInterpolator + A :class:`VomOntoVomInterpolator` object. + mat_type : Literal["aij", "baij", "matfree"] | None, optional + The type of PETSc Mat to create. If 'matfree', a + matfree PETSc Mat wrapping the SF is created. If 'aij' or 'baij', + a concrete PETSc Mat is created. + + Raises + ------ + ValueError + If the source and target vertex-only meshes are not linked by input_ordering. + """ + if interpolator.source_mesh.input_ordering is interpolator.target_mesh: + self.forward_reduce = True + """True if the forward interpolation is a star forest reduction, False if broadcast.""" + self.original_vom = interpolator.source_mesh + """The original VOM from which the SF is constructed.""" + elif interpolator.target_mesh.input_ordering is interpolator.source_mesh: + self.forward_reduce = False + self.original_vom = interpolator.target_mesh else: raise ValueError( "The target vom and source vom must be linked by input ordering!" ) - self.V = V - self.source_vom = source_vom - self.expr = expr - self.arguments = arguments - self.reduce = reduce - # note that interpolation doesn't include halo cells - self.dummy_mat = VomOntoVomDummyMat( - original_vom.input_ordering_without_halos_sf, reduce, V, source_vom, expr, arguments - ) - if matfree: - # If matfree, we use the SF to perform the interpolation - self.handle = self.dummy_mat._wrap_dummy_mat() - else: - # Otherwise we create the permutation matrix - self.handle = self.dummy_mat._create_permutation_mat() - - @property - def mpi_type(self): - """ - The MPI type to use for the PETSc SF. - - Should correspond to the underlying data type of the PETSc Vec. - """ - return self.handle.mpi_type - - @mpi_type.setter - def mpi_type(self, val): - self.dummy_mat.mpi_type = val - - def forward_operation(self, target_dat): - coeff = self.dummy_mat.expr_as_coeff() - with coeff.dat.vec_ro as coeff_vec, target_dat.vec_wo as target_vec: - self.handle.mult(coeff_vec, target_vec) + self.sf = self.original_vom.input_ordering_without_halos_sf + """The PETSc Star Forest representing the permutation between the VOMs.""" + self.target_space = interpolator.target_space + """The FunctionSpace being interpolated into.""" + self.target_vom = interpolator.target_mesh + """The VOM being interpolated to.""" + self.source_vom = interpolator.source_mesh + """The VOM being interpolated from.""" + self.operand = interpolator.operand + """The expression in the primal slot of the Interpolate.""" + self.arguments = extract_arguments(self.operand) + """The arguments of the expression being interpolated.""" + self.is_adjoint = interpolator.ufl_interpolate.is_adjoint + """Are we doing the adjoint interpolation?""" - -class VomOntoVomDummyMat(object): - """Dummy object to stand in for a PETSc ``Mat`` when we are interpolating - between vertex-only meshes. - - Parameters - ---------- - sf: PETSc.sf - The PETSc Star Forest (SF) to use for the operation - forward_reduce : bool - If ``True``, the action of the operator (accessed via the `mult` - method) is to perform a SF reduce from the source vec to the target - vec, whilst the adjoint action (accessed via the `multHermitian` - method) is to perform a SF broadcast from the source vec to the target - vec. If ``False``, the opposite is true. - V : `.FunctionSpace` - The P0DG function space (which may be vector or tensor valued) on the - source vertex-only mesh. - source_vom : `.VertexOnlyMesh` - The vertex-only mesh we interpolate from. - expr : `ufl.Expr` - The expression to interpolate. If ``arguments`` is not empty, those - arguments must be present within it. - arguments : list of `ufl.Argument` - The arguments in the expression. - """ - - def __init__(self, sf, forward_reduce, V, source_vom, expr, arguments): - self.sf = sf - self.forward_reduce = forward_reduce - self.V = V - self.source_vom = source_vom - self.expr = expr - self.arguments = arguments # Calculate correct local and global sizes for the matrix - nroots, leaves, _ = sf.getGraph() + nroots, leaves, _ = self.sf.getGraph() self.nleaves = len(leaves) - self._local_sizes = V.comm.allgather(nroots) - self.source_size = (self.V.block_size * nroots, self.V.block_size * sum(self._local_sizes)) + """The local number of leaves in the SF.""" + self._local_sizes = self.target_space.comm.allgather(nroots) + """List of local number of roots on each process.""" + self.source_size = (self.target_space.block_size * nroots, self.target_space.block_size * sum(self._local_sizes)) + """Tuple containing the local and global size of the source space.""" self.target_size = ( - self.V.block_size * self.nleaves, - self.V.block_size * V.comm.allreduce(self.nleaves, op=MPI.SUM), + self.target_space.block_size * self.nleaves, + self.target_space.block_size * self.target_space.comm.allreduce(self.nleaves, op=MPI.SUM), ) + """Tuple containing the local and global size of the target space.""" + + if mat_type == "matfree": + # If matfree, we use the SF wrapped as a PETSc Mat + # to perform the permutation. + self.handle = self._wrap_python_mat() + else: + # Otherwise we build the concrete permutation + # matrix as a PETSc Mat. This is used to build the + # cross-mesh interpolation matrix. + self.handle = self._create_permutation_mat(mat_type) @property def mpi_type(self): @@ -1550,40 +1361,63 @@ def mpi_type(self): def mpi_type(self, val): self._mpi_type = val - def expr_as_coeff(self, source_vec=None): - """ - Return a coefficient that corresponds to the expression used at + def expr_as_coeff(self, source_vec: PETSc.Vec | None = None) -> Function: + """Return a Function that corresponds to the expression used at construction, where the expression has been interpolated into the P0DG function space on the source vertex-only mesh. Will fail if there are no arguments. + + Parameters + ---------- + source_vec : PETSc.Vec | None, optional + Optional vector used to replace arguments in the expression. + By default None. + + Returns + ------- + Function + A Function representing the expression as a coefficient on the + source vertex-only mesh. + """ # Since we always output a coefficient when we don't have arguments in # the expression, we should evaluate the expression on the source mesh # so its dat can be sent to the target mesh. with stop_annotating(): - element = self.V.ufl_element() # Could be vector/tensor valued - P0DG = firedrake.FunctionSpace(self.source_vom, element) + element = self.target_space.ufl_element() # Could be vector/tensor valued # if we have any arguments in the expression we need to replace # them with equivalent coefficients now - coeff_expr = self.expr if len(self.arguments): if len(self.arguments) > 1: - raise NotImplementedError( - "Can only interpolate expressions with one argument!" - ) + raise NotImplementedError("Can only interpolate expressions with one argument!") if source_vec is None: raise ValueError("Need to provide a source dat for the argument!") + arg = self.arguments[0] - arg_coeff = firedrake.Function(arg.function_space()) + source_space = arg.function_space() + P0DG = FunctionSpace(self.target_vom if self.is_adjoint else self.source_vom, element) + arg_coeff = Function(self.target_space if self.is_adjoint else source_space) arg_coeff.dat.data_wo[:] = source_vec.getArray(readonly=True).reshape( arg_coeff.dat.data_wo.shape ) - coeff_expr = ufl.replace(self.expr, {arg: arg_coeff}) - coeff = firedrake.Function(P0DG).interpolate(coeff_expr) + coeff_expr = replace(self.operand, {arg: arg_coeff}) + coeff = Function(P0DG).interpolate(coeff_expr) + else: + P0DG = FunctionSpace(self.source_vom, element) + coeff = Function(P0DG).interpolate(self.operand) return coeff - def reduce(self, source_vec, target_vec): + def reduce(self, source_vec: PETSc.Vec, target_vec: PETSc.Vec) -> None: + """Reduce data in source_vec using the PETSc SF. + + Parameters + ---------- + source_vec : PETSc.Vec + The vector to reduce. + target_vec : PETSc.Vec + The vector to store the result in. + """ source_arr = source_vec.getArray(readonly=True) target_arr = target_vec.getArray() self.sf.reduceBegin( @@ -1599,7 +1433,17 @@ def reduce(self, source_vec, target_vec): MPI.REPLACE, ) - def broadcast(self, source_vec, target_vec): + def broadcast(self, source_vec: PETSc.Vec, target_vec: PETSc.Vec) -> None: + """Broadcast data in source_vec using the PETSc SF, storing the + result in target_vec. + + Parameters + ---------- + source_vec : PETSc.Vec + The vector to broadcast. + target_vec : PETSc.Vec + The vector to store the result in. + """ source_arr = source_vec.getArray(readonly=True) target_arr = target_vec.getArray() self.sf.bcastBegin( @@ -1615,8 +1459,21 @@ def broadcast(self, source_vec, target_vec): MPI.REPLACE, ) - def mult(self, mat, source_vec, target_vec): - # need to evaluate expression before doing mult + def mult(self, mat: PETSc.Mat, source_vec: PETSc.Vec, target_vec: PETSc.Vec) -> None: + """Apply the interpolation operator to source_vec, storing the + result in target_vec. + + Parameters + ---------- + mat : PETSc.Mat + Required by petsc4py but unused. + source_vec : PETSc.Vec + The vector to interpolate. + target_vec : PETSc.Vec + The vector to store the result in. + """ + # Need to convert the expression into a coefficient + # so that we can broadcast/reduce it coeff = self.expr_as_coeff(source_vec) with coeff.dat.vec_ro as coeff_vec: if self.forward_reduce: @@ -1624,21 +1481,36 @@ def mult(self, mat, source_vec, target_vec): else: self.broadcast(coeff_vec, target_vec) - def multHermitian(self, mat, source_vec, target_vec): + def multHermitian(self, mat: PETSc.Mat, source_vec: PETSc.Vec, target_vec: PETSc.Vec) -> None: + """Apply the adjoint of the interpolation operator to source_vec, storing the + result in target_vec. Since ``VomOntoVomMat`` represents a permutation, it is + real-valued and thus the Hermitian adjoint is the transpose. + + Parameters + ---------- + mat : PETSc.Mat + Required by petsc4py but unused. + source_vec : PETSc.Vec + The vector to adjoint interpolate. + target_vec : PETSc.Vec + The vector to store the result in. + """ self.multTranspose(mat, source_vec, target_vec) - def multTranspose(self, mat, source_vec, target_vec): - # can only do adjoint if our expression exclusively contains a - # single argument, making the application of the adjoint operator - # straightforward (haven't worked out how to do this otherwise!) - if not len(self.arguments) == 1: - raise NotImplementedError( - "Can only apply adjoint to expressions with one argument!" - ) - if self.arguments[0] is not self.expr: - raise NotImplementedError( - "Can only apply adjoint to expressions consisting of a single argument at the moment." - ) + def multTranspose(self, mat: PETSc.Mat, source_vec: PETSc.Vec, target_vec: PETSc.Vec) -> None: + """Apply the tranpose of the interpolation operator to source_vec, storing the + result in target_vec. Called by `self.multHermitian`. + + Parameters + ---------- + mat : PETSc.Mat + Required by petsc4py but unused. + source_vec : PETSc.Vec + The vector to transpose interpolate. + target_vec : PETSc.Vec + The vector to store the result in. + + """ if self.forward_reduce: self.broadcast(source_vec, target_vec) else: @@ -1654,27 +1526,57 @@ def multTranspose(self, mat, source_vec, target_vec): target_vec.zeroEntries() self.reduce(source_vec, target_vec) - def _create_permutation_mat(self): - """Creates the PETSc matrix that represents the interpolation operator from a vertex-only mesh to - its input ordering vertex-only mesh""" - mat = PETSc.Mat().createAIJ((self.target_size, self.source_size), nnz=1, comm=self.V.comm) + def _create_permutation_mat(self, mat_type: Literal["aij", "baij"]) -> PETSc.Mat: + """Create the PETSc matrix that represents the interpolation operator from a vertex-only mesh to + its input ordering vertex-only mesh. + + Returns + ------- + PETSc.Mat + PETSc seqaij matrix + """ + if mat_type == "baij" and self.target_space.block_size > 1: + create = PETSc.Mat().createBAIJ + else: + create = PETSc.Mat().createAIJ + mat = create( + size=(self.target_size, self.source_size), + bsize=self.target_space.block_size, + nnz=1, + comm=self.target_space.comm + ) mat.setUp() - start = sum(self._local_sizes[:self.V.comm.rank]) + # To create the permutation matrix we broadcast an array of indices which are contiguous + # across all ranks and then use these indices to set the values of the matrix directly. + start = sum(self._local_sizes[:self.target_space.comm.rank]) end = start + self.source_size[0] - contiguous_indices = numpy.arange(start, end, dtype=utils.IntType) - perm = numpy.zeros(self.nleaves, dtype=utils.IntType) + contiguous_indices = numpy.arange(start, end, dtype=IntType) + perm = numpy.zeros(self.nleaves, dtype=IntType) # result stored in here self.sf.bcastBegin(MPI.INT, contiguous_indices, perm, MPI.REPLACE) self.sf.bcastEnd(MPI.INT, contiguous_indices, perm, MPI.REPLACE) - rows = numpy.arange(self.target_size[0] + 1, dtype=utils.IntType) - cols = (self.V.block_size * perm[:, None] + numpy.arange(self.V.block_size, dtype=utils.IntType)[None, :]).reshape(-1) - mat.setValuesCSR(rows, cols, numpy.ones_like(cols, dtype=utils.IntType)) + rows = numpy.arange(self.target_size[0] + 1, dtype=IntType) + # Vector and Tensor valued functions are stored in a flattened array, so + # we need to space out the column indices according to the block size + cols = (self.target_space.block_size * perm[:, None] + numpy.arange(self.target_space.block_size, dtype=IntType)[None, :]).reshape(-1) + mat.setValuesCSR(rows, cols, numpy.ones_like(cols, dtype=IntType)) mat.assemble() - if self.forward_reduce: + if self.forward_reduce and not self.is_adjoint: + # The mat we have constructed thus far takes us from the input-ordering VOM to the + # immersed VOM. If we're going the other way, then we need to transpose it, + # unless we're doing the adjoint interpolation, since source_mesh and target_mesh + # are defined assuming we're doing forward interpolation. mat.transpose() return mat - def _wrap_dummy_mat(self): - mat = PETSc.Mat().create(comm=self.V.comm) + def _wrap_python_mat(self) -> PETSc.Mat: + """Wrap this object as a PETSc Mat. Used for matfree interpolation. + + Returns + ------- + PETSc.Mat + A PETSc Mat of type python with this object as its context. + """ + mat = PETSc.Mat().create(comm=self.target_space.comm) if self.forward_reduce: mat_size = (self.source_size, self.target_size) else: @@ -1685,54 +1587,69 @@ def _wrap_dummy_mat(self): mat.setUp() return mat - def duplicate(self, mat=None, op=None): - return self._wrap_dummy_mat() + def duplicate(self, mat: PETSc.Mat | None = None, op: PETSc.Mat.DuplicateOption | None = None) -> PETSc.Mat: + """Duplicate the matrix. Needed to wrap as a PETSc Python Mat. + + Parameters + ---------- + mat : PETSc.Mat | None, optional + Unused, by default None + op : PETSc.Mat.DuplicateOption | None, optional + Unused, by default None + + Returns + ------- + PETSc.Mat + VomOntoVomMat wrapped as a PETSc Mat of type python. + """ + return self._wrap_python_mat() class MixedInterpolator(Interpolator): - """A reusable interpolation object between MixedFunctionSpaces. + """Interpolator between MixedFunctionSpaces.""" + def __init__(self, expr: Interpolate): + """Initialise MixedInterpolator. Should not be called directly; use `get_interpolator`. - Parameters - ---------- - expr - The underlying ufl.Interpolate or the operand to the ufl.Interpolate. - V - The :class:`.FunctionSpace` or :class:`.Function` to - interpolate into. - bcs - A list of boundary conditions. - **kwargs - Any extra kwargs are passed on to the sub Interpolators. - For details see :class:`firedrake.interpolation.Interpolator`. - """ - def __init__(self, expr, V, bcs=None, **kwargs): - super(MixedInterpolator, self).__init__(expr, V, bcs=bcs, **kwargs) - expr = self.ufl_interpolate - self.arguments = expr.arguments() + Parameters + ---------- + expr : Interpolate + Symbolic Interpolate expression. + """ + super().__init__(expr) + + def _get_sub_interpolators( + self, bcs: Iterable[DirichletBC] | None = None + ) -> dict[tuple[int] | tuple[int, int], tuple[Interpolator, list[DirichletBC]]]: + """Gets `Interpolator`s and boundary conditions for each sub-Interpolate + in the mixed expression. + + Returns + ------- + dict[tuple[int] | tuple[int, int], tuple[Interpolator, list[DirichletBC]]] + A map from block index tuples to `Interpolator`s and bcs. + """ # Get the primal spaces - spaces = tuple(a.function_space().dual() if isinstance(a, Coargument) else a.function_space() - for a in self.arguments) + spaces = tuple( + a.function_space().dual() if isinstance(a, Coargument) else a.function_space() for a in self.interpolate_args + ) # TODO consider a stricter equality test for indexed MixedFunctionSpace # See https://github.com/firedrakeproject/firedrake/issues/4668 space_equals = lambda V1, V2: V1 == V2 and V1.parent == V2.parent and V1.index == V2.index # We need a Coargument in order to split the Interpolate - needs_action = len([a for a in self.arguments if isinstance(a, Coargument)]) == 0 + needs_action = not any(isinstance(a, Coargument) for a in self.interpolate_args) if needs_action: - dual_arg, operand = expr.argument_slots() # Split the dual argument - dual_split = dict(firedrake.formmanipulation.split_form(dual_arg)) + dual_split = dict(split_form(self.dual_arg)) # Create the Jacobian to be split into blocks - expr = expr._ufl_expr_reconstruct_(operand, V) + self.ufl_interpolate = self.ufl_interpolate._ufl_expr_reconstruct_(self.operand, self.target_space) - Isub = {} - # Split in the arguments of the Interpolate - for indices, form in firedrake.formmanipulation.split_form(expr): - if isinstance(form, ufl.ZeroBaseForm): + # Get sub-interpolators and sub-bcs for each block + Isub: dict[tuple[int] | tuple[int, int], tuple[Interpolator, list[DirichletBC]]] = {} + for indices, form in split_form(self.ufl_interpolate): + if isinstance(form, ZeroBaseForm): # Ensure block sparsity continue - vi, _ = form.argument_slots() - Vtarget = vi.function_space().dual() sub_bcs = [] for space, index in zip(spaces, indices): subspace = space.sub(index) @@ -1740,44 +1657,55 @@ def __init__(self, expr, V, bcs=None, **kwargs): if needs_action: # Take the action of each sub-cofunction against each block form = action(form, dual_split[indices[-1:]]) + Isub[indices] = (get_interpolator(form), sub_bcs) - Isub[indices] = Interpolator(form, Vtarget, bcs=sub_bcs, **kwargs) - - self._sub_interpolators = Isub - self.callable = self._assemble_matnest - - def __getitem__(self, item): - return self._sub_interpolators[item] + return Isub - def __iter__(self): - return iter(self._sub_interpolators) - - def _assemble_matnest(self): - """Assemble the operator.""" - shape = tuple(len(a.function_space()) for a in self.arguments) + def _build_matnest( + self, + Isub: dict[tuple[int] | tuple[int, int], tuple[Interpolator, list[DirichletBC]]], + sub_mat_type: Literal["aij", "baij"], + ) -> PETSc.Mat: + """Return a PETSc nested matrix built from sub-interpolator matrices.""" + shape = tuple(len(a.function_space()) for a in self.interpolate_args) blocks = numpy.full(shape, PETSc.Mat(), dtype=object) - # Assemble the sparse block matrix - for i in self: - blocks[i] = self[i].callable().handle - petscmat = PETSc.Mat().createNest(blocks) - tensor = firedrake.AssembledMatrix(self.arguments, self.bcs, petscmat) - return tensor.M - - def _interpolate(self, *function, output=None, adjoint=False, **kwargs): - """Assemble the action.""" - rank = len(self.arguments) - if rank == 0: - result = sum(self[i].assemble(**kwargs) for i in self) - return output.assign(result) if output else result - - if output is None: - output = firedrake.Function(self.arguments[-1].function_space().dual()) - - if rank == 1: - for k, sub_tensor in enumerate(output.subfunctions): - sub_tensor.assign(sum(self[i].assemble(**kwargs) for i in self if i[0] == k)) - elif rank == 2: - for k, sub_tensor in enumerate(output.subfunctions): - sub_tensor.assign(sum(self[i]._interpolate(*function, adjoint=adjoint, **kwargs) - for i in self if i[0] == k)) - return output + for indices, (interp, sub_bcs) in Isub.items(): + blocks[indices] = interp._get_callable(bcs=sub_bcs, mat_type=sub_mat_type)() + return PETSc.Mat().createNest(blocks) + + def _build_aij( + self, + Isub: dict[tuple[int] | tuple[int, int], tuple[Interpolator, list[DirichletBC]]], + ) -> PETSc.Mat: + """Return a PETSc AIJ matrix built from sub-interpolator matrices by converting a + nested matrix.""" + matnest = self._build_matnest(Isub, sub_mat_type="aij") + return matnest.convert("aij") + + def _get_callable(self, tensor=None, bcs=None, mat_type=None, sub_mat_type=None): + mat_type = mat_type or "aij" + sub_mat_type = sub_mat_type or "aij" + Isub = self._get_sub_interpolators(bcs=bcs) + V_dest = self.ufl_interpolate.function_space() or self.target_space + f = tensor or Function(V_dest) + if self.rank == 2: + if mat_type == "nest": + callable = partial(self._build_matnest, Isub, sub_mat_type) + else: + assert mat_type == "aij" + callable = partial(self._build_aij, Isub) + elif self.rank == 1: + def callable() -> Function | Cofunction: + for k, sub_tensor in enumerate(f.subfunctions): + sub_tensor.assign(sum( + interp.assemble(bcs=sub_bcs) for indices, (interp, sub_bcs) in Isub.items() if indices[0] == k + )) + return f + else: + def callable() -> Number: + return sum(interp.assemble(bcs=sub_bcs) for (interp, sub_bcs) in Isub.values()) + return callable + + @property + def _allowed_mat_types(self): + return {"aij", "nest", None} diff --git a/firedrake/linear_solver.py b/firedrake/linear_solver.py index 1e060ce906..d4b7e70b88 100644 --- a/firedrake/linear_solver.py +++ b/firedrake/linear_solver.py @@ -2,7 +2,6 @@ from firedrake.cofunction import Cofunction from firedrake.matrix import MatrixBase from firedrake.petsc import PETSc -from pyop2.mpi import internal_comm from firedrake.variational_solver import LinearVariationalProblem, LinearVariationalSolver __all__ = ["LinearSolver"] @@ -55,7 +54,6 @@ def __init__(self, A, *, P=None, **kwargs): self.A = A self.comm = A.comm - self._comm = internal_comm(self.comm, self) self.P = P if P is not None else A self.ksp = self.snes.ksp diff --git a/firedrake/matrix.py b/firedrake/matrix.py index 2f33841289..c9f363a581 100644 --- a/firedrake/matrix.py +++ b/firedrake/matrix.py @@ -2,7 +2,6 @@ import ufl from pyop2 import op2 -from pyop2.mpi import internal_comm from pyop2.utils import as_tuple from firedrake.petsc import PETSc @@ -52,7 +51,6 @@ def __init__(self, a, bcs, mat_type, fc_params=None): bcs = () self.bcs = bcs self.comm = test.function_space().comm - self._comm = internal_comm(self.comm, self) self.block_shape = (len(test.function_space()), len(trial.function_space())) self.mat_type = mat_type @@ -209,7 +207,7 @@ def __init__(self, a, bcs, *args, **kwargs): col_bcs=self.bcs, fc_params=fc_params, appctx=appctx) - self.petscmat = PETSc.Mat().create(comm=self._comm) + self.petscmat = PETSc.Mat().create(comm=self.comm) self.petscmat.setType("python") self.petscmat.setSizes((ctx.row_sizes, ctx.col_sizes), bsize=ctx.block_size) diff --git a/firedrake/matrix_free/operators.py b/firedrake/matrix_free/operators.py index 18a767557d..554663428d 100644 --- a/firedrake/matrix_free/operators.py +++ b/firedrake/matrix_free/operators.py @@ -4,12 +4,13 @@ from mpi4py import MPI import numpy -from pyop2.mpi import internal_comm, temp_internal_comm +from pyop2.mpi import temp_internal_comm from firedrake.ufl_expr import adjoint, action from firedrake.formmanipulation import ExtractSubBlock from firedrake.bcs import DirichletBC, EquationBCSplit +from functools import cached_property + from firedrake.petsc import PETSc -from firedrake.utils import cached_property from firedrake.function import Function from ufl.form import ZeroBaseForm @@ -94,7 +95,6 @@ def __init__(self, a, row_bcs=[], col_bcs=[], self.a = a self.aT = adjoint(a) self.comm = a.arguments()[0].function_space().comm - self._comm = internal_comm(self.comm, self) self.fc_params = fc_params self.appctx = appctx @@ -352,10 +352,12 @@ def getInfo(self, mat, info=None): if info == PETSc.Mat.InfoType.LOCAL: return {"memory": memory} elif info == PETSc.Mat.InfoType.GLOBAL_SUM: - gmem = self._comm.allreduce(memory, op=MPI.SUM) + with temp_internal_comm(self.comm) as icomm: + gmem = icomm.allreduce(memory, op=MPI.SUM) return {"memory": gmem} elif info == PETSc.Mat.InfoType.GLOBAL_MAX: - gmem = self._comm.allreduce(memory, op=MPI.MAX) + with temp_internal_comm(self.comm) as icomm: + gmem = icomm.allreduce(memory, op=MPI.MAX) return {"memory": gmem} else: raise ValueError("Unknown info type %s" % info) @@ -377,11 +379,20 @@ def createSubMatrix(self, mat, row_is, col_is, target=None): row_ises = self._y.function_space().dof_dset.field_ises col_ises = self._x.function_space().dof_dset.field_ises - row_inds = find_sub_block(row_is, row_ises, comm=self.comm) - if row_is == col_is and row_ises == col_ises: - col_inds = row_inds - else: - col_inds = find_sub_block(col_is, col_ises, comm=self.comm) + try: + row_inds = find_sub_block(row_is, row_ises, comm=self.comm) + if row_is == col_is and row_ises == col_ises: + col_inds = row_inds + else: + col_inds = find_sub_block(col_is, col_ises, comm=self.comm) + except LookupError: + # Attemping to extract a submatrix that does not match with a subfield. + # Use default PETSc implementation (MatCreateSubMatrixVirtual) via MATSHELL instead. + popmethod = self.createSubMatrix + self.createSubMatrix = None + submat = mat.createSubMatrix(row_is, col_is) + self.createSubMatrix = popmethod + return submat splitter = ExtractSubBlock() asub = splitter.split(self.a, @@ -417,7 +428,7 @@ def createSubMatrix(self, mat, row_is, col_is, target=None): fc_params=self.fc_params, appctx=self.appctx) submat_ctx.on_diag = self.on_diag and row_inds == col_inds - submat = PETSc.Mat().create(comm=self._comm) + submat = PETSc.Mat().create(comm=self.comm) submat.setType("python") submat.setSizes((submat_ctx.row_sizes, submat_ctx.col_sizes), bsize=submat_ctx.block_size) @@ -436,7 +447,7 @@ def duplicate(self, mat, copy): col_bcs=self.bcs_col, fc_params=self.fc_params, appctx=self.appctx) - newmat = PETSc.Mat().create(comm=self._comm) + newmat = PETSc.Mat().create(comm=self.comm) newmat.setType("python") newmat.setSizes((newmat_ctx.row_sizes, newmat_ctx.col_sizes), bsize=newmat_ctx.block_size) diff --git a/firedrake/mesh.py b/firedrake/mesh.py index 7ceac6f55a..f0ec0debdf 100644 --- a/firedrake/mesh.py +++ b/firedrake/mesh.py @@ -1,3 +1,4 @@ +import dataclasses import numpy as np import ctypes import os @@ -10,6 +11,7 @@ from collections import OrderedDict, defaultdict from collections.abc import Sequence from ufl.classes import ReferenceGrad +from ufl.cell import CellSequence from ufl.domain import extract_unique_domain import enum import numbers @@ -17,11 +19,14 @@ import rtree from textwrap import dedent from pathlib import Path +import typing +import warnings from pyop2 import op2 from pyop2.mpi import ( - MPI, COMM_WORLD, internal_comm, temp_internal_comm + MPI, COMM_WORLD, temp_internal_comm ) +from functools import cached_property from pyop2.utils import as_tuple import petsctools from petsctools import OptionsManager, get_external_packages @@ -33,11 +38,11 @@ import firedrake.cython.spatialindex as spatialindex import firedrake.utils as utils from firedrake.utils import as_cstr, IntType, RealType -from firedrake.logging import info_red +from firedrake.logging import info_red, logger from firedrake.parameters import parameters from firedrake.petsc import PETSc, DEFAULT_PARTITIONER from firedrake.adjoint_utils import MeshGeometryMixin -from pyadjoint import stop_annotating +from firedrake.exceptions import VertexOnlyMeshMissingPointsError, NonUniqueMeshSequenceError import gem try: @@ -50,12 +55,16 @@ from finat.element_factory import as_fiat_cell +if typing.TYPE_CHECKING: + from firedrake import CoordinatelessFunction, Function + + __all__ = [ 'Mesh', 'ExtrudedMesh', 'VertexOnlyMesh', 'RelabeledMesh', - 'SubDomainData', 'unmarked', 'DistributedMeshOverlapType', + 'SubDomainData', 'UNMARKED', 'DistributedMeshOverlapType', 'DEFAULT_MESH_NAME', 'MeshGeometry', 'MeshTopology', 'AbstractMeshTopology', 'ExtrudedMeshTopology', 'VertexOnlyMeshTopology', - 'VertexOnlyMeshMissingPointsError', + 'MeshSequenceGeometry', 'MeshSequenceTopology', 'Submesh' ] @@ -74,12 +83,18 @@ ("interval * interval", 3)] -unmarked = -1 +UNMARKED = -1 """A mesh marker that selects all entities that are not explicitly marked.""" DEFAULT_MESH_NAME = "_".join(["firedrake", "default"]) """The default name of the mesh.""" +DISTRIBUTION_PARAMETERS_NOOP = { + "partition": False, + "overlap_type": (DistributedMeshOverlapType.NONE, 0), +} +"""Distribution parameters for derived meshes (RelabeledMesh/Submesh).""" + def _generate_default_submesh_name(name): """Generate the default submesh name from the mesh name. @@ -156,13 +171,13 @@ class _Facets(object): The unique_markers argument **must** be the same on all processes.""" @PETSc.Log.EventDecorator() - def __init__(self, mesh, facets, classes, kind, facet_cell, local_facet_number, + def __init__(self, mesh, facets, classes, set_, kind, facet_cell, local_facet_number, unique_markers=None): self.mesh = mesh self.facets = facets - classes = as_tuple(classes, int, 3) self.classes = classes + self.set = set_ self.kind = kind assert kind in ["interior", "exterior"] @@ -187,25 +202,6 @@ def __init__(self, mesh, facets, classes, kind, facet_cell, local_facet_number, self.unique_markers = [] if unique_markers is None else unique_markers self._subsets = {} - @utils.cached_property - def set(self): - size = self.classes - if isinstance(self.mesh, ExtrudedMeshTopology): - label = "%s_facets" % self.kind - layers = self.mesh.entity_layers(1, label) - base = getattr(self.mesh._base_mesh, label).set - return op2.ExtrudedSet(base, layers=layers) - return op2.Set(size, "%sFacets" % self.kind.capitalize()[:3], - comm=self.mesh.comm) - - @utils.cached_property - def _null_subset(self): - '''Empty subset for the case in which there are no facets with - a given marker value. This is required because not all - markers need be represented on all processors.''' - - return op2.Subset(self.set, []) - @PETSc.Log.EventDecorator() def measure_set(self, integral_type, subdomain_id, all_integer_subdomain_ids=None): @@ -256,7 +252,7 @@ def subset(self, markers): :param markers: integer marker id or an iterable of marker ids (or ``None``, for an empty subset). """ - valid_markers = set([unmarked]).union(self.unique_markers) + valid_markers = set([UNMARKED]).union(self.unique_markers) markers = as_tuple(markers, numbers.Integral) try: return self._subsets[markers] @@ -270,7 +266,7 @@ def subset(self, markers): # markers marked_points_list = [] for i in markers: - if i == unmarked: + if i == UNMARKED: _markers = self.mesh.topology_dm.getLabelIdIS(dmcommon.FACE_SETS_LABEL).indices # Can exclude points labeled with i\in markers here, # as they will be included in the below anyway. @@ -280,9 +276,16 @@ def subset(self, markers): marked_points_list.append(self.mesh.topology_dm.getStratumIS(dmcommon.FACE_SETS_LABEL, i).indices) if marked_points_list: _, indices, _ = np.intersect1d(self.facets, np.concatenate(marked_points_list), return_indices=True) - return self._subsets.setdefault(markers, op2.Subset(self.set, indices)) else: - return self._subsets.setdefault(markers, self._null_subset) + indices = np.empty(0, dtype=IntType) + + with temp_internal_comm(self.mesh.comm) as icomm: + num_global_indices = icomm.reduce(len(indices), MPI.SUM, root=0) + if num_global_indices == 0 and icomm.rank == 0: + logger.warn(f"Subdomain {markers} is empty. This is likely an error. " + "Did you choose the right label?") + + return self._subsets.setdefault(markers, op2.Subset(self.set, indices)) def _collect_unmarked_points(self, markers): """Collect points that are not marked by markers.""" @@ -296,13 +299,13 @@ def _collect_unmarked_points(self, markers): else: return self.facets - @utils.cached_property + @cached_property def facet_cell_map(self): """Map from facets to cells.""" return op2.Map(self.set, self.mesh.cell_set, self._rank, self.facet_cell, "facet_to_cell_map") - @utils.cached_property + @cached_property def local_facet_orientation_dat(self): """Dat for the local facet orientations.""" dtype = gem.uint_type @@ -317,22 +320,36 @@ def local_facet_orientation_dat(self): local_facet_end = offsets[-2] map_from_cell_to_facet_orientations = self.mesh.entity_orientations[:, local_facet_start:local_facet_end] # Make output data; - # this is a map from an exterior/interior facet to the corresponding local facet orientation/orientations. - # Halo data are required by design, but not actually used. - # -- Reshape as (-1, self._rank) to uniformly handle exterior and interior facets. - data = np.empty_like(self.local_facet_dat.data_ro_with_halos).reshape((-1, self._rank)) - data.fill(np.iinfo(dtype).max) - # Set local facet orientations on the block corresponding to the owned facets; i.e., data[:shape[0], :] below. - local_facets = self.local_facet_dat.data_ro # do not need halos. - # -- Reshape as (-1, self._rank) to uniformly handle exterior and interior facets. - local_facets = local_facets.reshape((-1, self._rank)) - shape = local_facets.shape - map_from_owned_facet_to_cells = self.facet_cell[:shape[0], :] - data[:shape[0], :] = np.take_along_axis( - map_from_cell_to_facet_orientations[map_from_owned_facet_to_cells], - local_facets.reshape(shape + (1, )), # reshape as required by take_along_axis. + # this is a map from an exterior/interior facet to the corresponding + # local facet orientation/orientations. + # The local facet orientation/orientations of a halo facet is/are also + # used in some submesh problems. + # + # Example: + # + # +-------+-------+ + # | | | + # meshA | g g o | + # | | | + # +-------+-------+ + # +-------+ + # | | + # meshB o o | o: owned + # | | g: ghost + # +-------+ + # + # form = FacetNormal(meshA)[0] * ds(meshB, interface) + # + # Reshape local_facets as (-1, self._rank) to uniformly handle exterior and interior facets. + local_facets = self.local_facet_dat.data_ro_with_halos.reshape((-1, self._rank)) + # Make slice for masking out rows for which orientations are not needed. + slice_ = (self.facet_cell != -1).all(axis=1) + data = np.full_like(local_facets, np.iinfo(dtype).max) + data[slice_, :] = np.take_along_axis( + map_from_cell_to_facet_orientations[self.facet_cell[slice_, :]], + local_facets.reshape(local_facets.shape + (1, ))[slice_, :, :], # reshape as required by take_along_axis. axis=2, - ).reshape(shape) + ).reshape((-1, self._rank)) return op2.Dat( self.local_facet_dat.dataset, data, @@ -420,7 +437,7 @@ def _from_triangle(filename, dim, comm): tdim = icomm.bcast(None, root=0) cells = None coordinates = None - plex = plex_from_cell_list(tdim, cells, coordinates, icomm) + plex = plex_from_cell_list(tdim, cells, coordinates, comm) # Apply boundary IDs if icomm.rank == 0: @@ -455,19 +472,19 @@ def plex_from_cell_list(dim, cells, coords, comm, name=None): :arg comm: communicator to build the mesh on. Must be a PyOP2 internal communicator :kwarg name: name of the plex """ + # These types are /correct/, DMPlexCreateFromCellList wants int + # and double (not PetscInt, PetscReal). with temp_internal_comm(comm) as icomm: - # These types are /correct/, DMPlexCreateFromCellList wants int - # and double (not PetscInt, PetscReal). if comm.rank == 0: cells = np.asarray(cells, dtype=np.int32) coords = np.asarray(coords, dtype=np.double) - comm.bcast(cells.shape, root=0) - comm.bcast(coords.shape, root=0) + icomm.bcast(cells.shape, root=0) + icomm.bcast(coords.shape, root=0) # Provide the actual data on rank 0. - plex = PETSc.DMPlex().createFromCellList(dim, cells, coords, comm=icomm) + plex = PETSc.DMPlex().createFromCellList(dim, cells, coords, comm=comm) else: - cell_shape = list(comm.bcast(None, root=0)) - coord_shape = list(comm.bcast(None, root=0)) + cell_shape = list(icomm.bcast(None, root=0)) + coord_shape = list(icomm.bcast(None, root=0)) cell_shape[0] = 0 coord_shape[0] = 0 # Provide empty plex on other ranks @@ -475,7 +492,7 @@ def plex_from_cell_list(dim, cells, coords, comm, name=None): plex = PETSc.DMPlex().createFromCellList(dim, np.zeros(cell_shape, dtype=np.int32), np.zeros(coord_shape, dtype=np.double), - comm=icomm) + comm=comm) if name is not None: plex.setName(name) return plex @@ -526,10 +543,9 @@ def __init__(self, topology_dm, name, reorder, sfXB, perm_is, distribution_name, self.sfXB = sfXB r"The PETSc SF that pushes the global point number slab [0, NX) to input (naive) plex." self.submesh_parent = submesh_parent + self.sfBC_orig = None # User comm self.user_comm = comm - # Internal comm - self._comm = internal_comm(self.user_comm, self) dmcommon.label_facets(self.topology_dm) self._distribute() self._grown_halos = False @@ -704,6 +720,12 @@ def entity_orientations(self): """ pass + @property + @abc.abstractmethod + def local_cell_orientation_dat(self): + """Local cell orientation dat.""" + pass + @abc.abstractmethod def _facets(self, kind): pass @@ -808,12 +830,12 @@ def size(self, d): def cell_dimension(self): """Returns the cell dimension.""" - return self.ufl_cell().topological_dimension() + return self.ufl_cell().topological_dimension def facet_dimension(self): """Returns the facet dimension.""" # Facets have co-dimension 1 - return self.ufl_cell().topological_dimension() - 1 + return self.ufl_cell().topological_dimension - 1 @property @abc.abstractmethod @@ -918,22 +940,28 @@ def mark_entities(self, tf, label_value, label_name=None): """ pass - @utils.cached_property + @cached_property def extruded_periodic(self): return self.cell_set._extruded_periodic + def __iter__(self): + yield self + + def unique(self): + return self + # submesh - @utils.cached_property - def submesh_ancesters(self): - """Tuple of submesh ancesters.""" + @cached_property + def submesh_ancestors(self): + """Tuple of submesh ancestors.""" if self.submesh_parent: - return (self, ) + self.submesh_parent.submesh_ancesters + return (self, ) + self.submesh_parent.submesh_ancestors else: return (self, ) - def submesh_youngest_common_ancester(self, other): - """Return the youngest common ancester of self and other. + def submesh_youngest_common_ancestor(self, other): + """Return the youngest common ancestor of self and other. Parameters ---------- @@ -943,18 +971,18 @@ def submesh_youngest_common_ancester(self, other): Returns ------- AbstractMeshTopology or None - Youngest common ancester or None if not found. + Youngest common ancestor or None if not found. """ # self --- ... --- m --- common --- common --- common # / # other --- ... --- m - self_ancesters = list(self.submesh_ancesters) - other_ancesters = list(other.submesh_ancesters) + self_ancestors = list(self.submesh_ancestors) + other_ancestors = list(other.submesh_ancestors) c = None - while self_ancesters and other_ancesters: - a = self_ancesters.pop() - b = other_ancesters.pop() + while self_ancestors and other_ancestors: + a = self_ancestors.pop() + b = other_ancestors.pop() if a is b: c = a else: @@ -999,17 +1027,17 @@ def submesh_map_composed(self, other, other_integral_type, other_subset_points): Tuple of `op2.ComposedMap` from other to self, integral_type on self, and points on self. """ - common = self.submesh_youngest_common_ancester(other) + common = self.submesh_youngest_common_ancestor(other) if common is None: raise ValueError(f"Unable to create composed map between (sub)meshes: {self} and {other} are unrelated") maps = [] integral_type = other_integral_type subset_points = other_subset_points - aa = other.submesh_ancesters + aa = other.submesh_ancestors for a in aa[:aa.index(common)]: m, integral_type, subset_points = a.submesh_map_child_parent(integral_type, subset_points) maps.append(m) - bb = self.submesh_ancesters + bb = self.submesh_ancestors for b in reversed(bb[:bb.index(common)]): m, integral_type, subset_points = b.submesh_map_child_parent(integral_type, subset_points, reverse=True) maps.append(m) @@ -1118,6 +1146,7 @@ def _distribute(self): sfBC = plex.distribute(overlap=0) plex.setName(original_name) self.sfBC = sfBC + self.sfBC_orig = sfBC # plex carries a new dm after distribute, which # does not inherit partitioner from the old dm. # It probably makes sense as chaco does not work @@ -1151,7 +1180,7 @@ def _add_overlap(self): def _mark_entity_classes(self): dmcommon.mark_entity_classes(self.topology_dm) - @utils.cached_property + @cached_property def _ufl_cell(self): plex = self.topology_dm tdim = plex.getDimension() @@ -1163,7 +1192,8 @@ def _ufl_cell(self): nfacets = plex.getConeSize(cStart) # TODO: this needs to be updated for mixed-cell meshes. - nfacets = self._comm.allreduce(nfacets, op=MPI.MAX) + with temp_internal_comm(self.comm) as icomm: + nfacets = icomm.allreduce(nfacets, op=MPI.MAX) # Note that the geometric dimension of the cell is not set here # despite it being a property of a UFL cell. It will default to @@ -1174,10 +1204,10 @@ def _ufl_cell(self): # corresponding UFL mesh. return ufl.Cell(_cells[tdim][nfacets]) - @utils.cached_property + @cached_property def _ufl_mesh(self): cell = self._ufl_cell - return ufl.Mesh(finat.ufl.VectorElement("Lagrange", cell, 1, dim=cell.topological_dimension())) + return ufl.Mesh(finat.ufl.VectorElement("Lagrange", cell, 1, dim=cell.topological_dimension)) @property def _default_reordering(self): @@ -1200,7 +1230,7 @@ def dm_cell_types(self): """All DM.PolytopeTypes of cells in the mesh.""" return dmcommon.get_dm_cell_types(self.topology_dm) - @utils.cached_property + @cached_property def cell_closure(self): """2D array of ordered cell closures @@ -1214,9 +1244,10 @@ def cell_closure(self): vertex_numbering = self._vertex_numbering.createGlobalSection(plex.getPointSF()) cell = self.ufl_cell() - assert tdim == cell.topological_dimension() + assert tdim == cell.topological_dimension if self.submesh_parent is not None and \ - not (self.submesh_parent.ufl_cell().cellname() == "hexahedron" and cell.cellname() == "quadrilateral"): + not (self.submesh_parent.ufl_cell().cellname == "hexahedron" and cell.cellname == "quadrilateral") and \ + len(self.submesh_parent.dm_cell_types) == 1: # Codim-1 submesh of a hex mesh (i.e. a quad submesh) can not # inherit cell_closure from the hex mesh as the cell_closure # must follow the special orientation restriction. This means @@ -1235,7 +1266,7 @@ def cell_closure(self): self.submesh_parent.cell_closure, entity_per_cell, ) - elif cell.is_simplex(): + elif cell.is_simplex: topology = FIAT.ufc_cell(cell).get_topology() entity_per_cell = np.zeros(len(topology), dtype=IntType) for d, ents in topology.items(): @@ -1244,7 +1275,7 @@ def cell_closure(self): return dmcommon.closure_ordering(plex, vertex_numbering, cell_numbering, entity_per_cell) - elif cell.cellname() == "quadrilateral": + elif cell.cellname == "quadrilateral": petsctools.cite("Homolya2016") petsctools.cite("McRae2016") # Quadrilateral mesh @@ -1263,7 +1294,7 @@ def cell_closure(self): return dmcommon.quadrilateral_closure_ordering( plex, vertex_numbering, cell_numbering, cell_orientations) - elif cell.cellname() == "hexahedron": + elif cell.cellname == "hexahedron": # TODO: Should change and use create_cell_closure() for all cell types. topology = FIAT.ufc_cell(cell).get_topology() closureSize = sum([len(ents) for _, ents in topology.items()]) @@ -1271,18 +1302,27 @@ def cell_closure(self): else: raise NotImplementedError("Cell type '%s' not supported." % cell) - @utils.cached_property + @cached_property def entity_orientations(self): return dmcommon.entity_orientations(self, self.cell_closure) + @cached_property + def local_cell_orientation_dat(self): + """Local cell orientation dat.""" + return op2.Dat( + op2.DataSet(self.cell_set, 1), + self.entity_orientations[:, [-1]], + gem.uint_type, + f"{self.name}_local_cell_orientation" + ) + @PETSc.Log.EventDecorator() def _facets(self, kind): if kind not in ["interior", "exterior"]: raise ValueError("Unknown facet type '%s'" % kind) dm = self.topology_dm - facets, classes = dmcommon.get_facets_by_class(dm, (kind + "_facets"), - self._facet_ordering) + facets, classes, set_ = getattr(self, "_" + kind + "_facet_numbers_classes_set") label = dmcommon.FACE_SETS_LABEL if dm.hasLabel(label): from mpi4py import MPI @@ -1293,8 +1333,9 @@ def merge_ids(x, y, datatype): op = MPI.Op.Create(merge_ids, commute=True) - unique_markers = np.asarray(sorted(self._comm.allreduce(local_markers, op=op)), - dtype=IntType) + with temp_internal_comm(self.comm) as icomm: + unique_markers = np.asarray(sorted(icomm.allreduce(local_markers, op=op)), + dtype=IntType) op.Free() else: unique_markers = None @@ -1304,23 +1345,43 @@ def merge_ids(x, y, datatype): self._cell_numbering, self.cell_closure) - point2facetnumber = np.full(facets.max(initial=0)+1, -1, dtype=IntType) + _, pEnd = dm.getChart() + point2facetnumber = np.full(pEnd, -1, dtype=IntType) point2facetnumber[facets] = np.arange(len(facets), dtype=IntType) - obj = _Facets(self, facets, classes, kind, + obj = _Facets(self, facets, classes, set_, kind, facet_cell, local_facet_number, unique_markers=unique_markers) obj.point2facetnumber = point2facetnumber return obj - @utils.cached_property + @cached_property def exterior_facets(self): return self._facets("exterior") - @utils.cached_property + @cached_property def interior_facets(self): return self._facets("interior") - @utils.cached_property + def _facet_numbers_classes_set(self, kind): + if kind not in ["interior", "exterior"]: + raise ValueError("Unknown facet type '%s'" % kind) + # Can not call target.{interior, exterior}_facets.facets + # if target is a mixed cell mesh (cell_closure etc. can not be defined), + # so directly call dmcommon.get_facets_by_class. + _numbers, _classes = dmcommon.get_facets_by_class(self.topology_dm, (kind + "_facets"), self._facet_ordering) + _classes = as_tuple(_classes, int, 3) + _set = op2.Set(_classes, f"{kind.capitalize()[:3]}Facets", comm=self.comm) + return _numbers, _classes, _set + + @cached_property + def _exterior_facet_numbers_classes_set(self): + return self._facet_numbers_classes_set("exterior") + + @cached_property + def _interior_facet_numbers_classes_set(self): + return self._facet_numbers_classes_set("interior") + + @cached_property def cell_to_facets(self): """Returns a :class:`pyop2.types.dat.Dat` that maps from a cell index to the local facet types on each cell, including the relevant subdomain markers. @@ -1365,10 +1426,10 @@ def num_entities(self, d): eStart, eEnd = self.topology_dm.getDepthStratum(d) return eEnd - eStart - @utils.cached_property + @cached_property def cell_set(self): size = list(self._entity_classes[self.cell_dimension(), :]) - return op2.Set(size, "Cells", comm=self._comm) + return op2.Set(size, "Cells", comm=self.comm) @PETSc.Log.EventDecorator() def _set_partitioner(self, plex, distribute, partitioner_type=None): @@ -1451,7 +1512,7 @@ def mark_entities(self, tf, label_value, label_name=None): label_name = label_name or dmcommon.CELL_SETS_LABEL elif (elem.family() == "HDiv Trace" and elem.degree() == 0 and self.cell_dimension() > 1) or \ (elem.family() == "Lagrange" and elem.degree() == 1 and self.cell_dimension() == 1) or \ - (elem.family() == "Q" and elem.degree() == 2 and self.ufl_cell().cellname() == "hexahedron"): + (elem.family() == "Q" and elem.degree() == 2 and self.ufl_cell().cellname == "hexahedron"): # facets height = 1 label_name = label_name or dmcommon.FACE_SETS_LABEL @@ -1480,45 +1541,77 @@ def _submesh_make_entity_entity_map(self, from_set, to_set, from_points, to_poin values[from_indices] = to_indices return op2.Map(from_set, to_set, 1, values.reshape((-1, 1)), f"{self}_submesh_map_{from_set}_{to_set}") - @utils.cached_property + @cached_property def submesh_child_cell_parent_cell_map(self): return self._submesh_make_entity_entity_map(self.cell_set, self.submesh_parent.cell_set, self.cell_closure[:, -1], self.submesh_parent.cell_closure[:, -1], True) - @utils.cached_property + @cached_property def submesh_child_exterior_facet_parent_exterior_facet_map(self): - return self._submesh_make_entity_entity_map(self.exterior_facets.set, self.submesh_parent.exterior_facets.set, self.exterior_facets.facets, self.submesh_parent.exterior_facets.facets, True) + _self_numbers, _, _self_set = self._exterior_facet_numbers_classes_set + _parent_numbers, _, _parent_set = self.submesh_parent._exterior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_self_set, _parent_set, _self_numbers, _parent_numbers, True) - @utils.cached_property + @cached_property def submesh_child_exterior_facet_parent_interior_facet_map(self): - return self._submesh_make_entity_entity_map(self.exterior_facets.set, self.submesh_parent.interior_facets.set, self.exterior_facets.facets, self.submesh_parent.interior_facets.facets, True) + _self_numbers, _, _self_set = self._exterior_facet_numbers_classes_set + _parent_numbers, _, _parent_set = self.submesh_parent._interior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_self_set, _parent_set, _self_numbers, _parent_numbers, True) - @utils.cached_property + @cached_property def submesh_child_interior_facet_parent_exterior_facet_map(self): raise RuntimeError("Should never happen") - @utils.cached_property + @cached_property def submesh_child_interior_facet_parent_interior_facet_map(self): - return self._submesh_make_entity_entity_map(self.interior_facets.set, self.submesh_parent.interior_facets.set, self.interior_facets.facets, self.submesh_parent.interior_facets.facets, True) + _self_numbers, _, _self_set = self._interior_facet_numbers_classes_set + _parent_numbers, _, _parent_set = self.submesh_parent._interior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_self_set, _parent_set, _self_numbers, _parent_numbers, True) + + @cached_property + def submesh_child_cell_parent_interior_facet_map(self): + _parent_numbers, _, _parent_set = self.submesh_parent._interior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(self.cell_set, _parent_set, self.cell_closure[:, -1], _parent_numbers, True) - @utils.cached_property + @cached_property + def submesh_child_cell_parent_exterior_facet_map(self): + _parent_numbers, _, _parent_set = self.submesh_parent._exterior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(self.cell_set, _parent_set, self.cell_closure[:, -1], _parent_numbers, True) + + @cached_property def submesh_parent_cell_child_cell_map(self): return self._submesh_make_entity_entity_map(self.submesh_parent.cell_set, self.cell_set, self.submesh_parent.cell_closure[:, -1], self.cell_closure[:, -1], False) - @utils.cached_property + @cached_property def submesh_parent_exterior_facet_child_exterior_facet_map(self): - return self._submesh_make_entity_entity_map(self.submesh_parent.exterior_facets.set, self.exterior_facets.set, self.submesh_parent.exterior_facets.facets, self.exterior_facets.facets, False) + _self_numbers, _, _self_set = self._exterior_facet_numbers_classes_set + _parent_numbers, _, _parent_set = self.submesh_parent._exterior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_parent_set, _self_set, _parent_numbers, _self_numbers, False) - @utils.cached_property + @cached_property def submesh_parent_exterior_facet_child_interior_facet_map(self): raise RuntimeError("Should never happen") - @utils.cached_property + @cached_property def submesh_parent_interior_facet_child_exterior_facet_map(self): - return self._submesh_make_entity_entity_map(self.submesh_parent.interior_facets.set, self.exterior_facets.set, self.submesh_parent.interior_facets.facets, self.exterior_facets.facets, False) + _self_numbers, _, _self_set = self._exterior_facet_numbers_classes_set + _parent_numbers, _, _parent_set = self.submesh_parent._interior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_parent_set, _self_set, _parent_numbers, _self_numbers, False) - @utils.cached_property + @cached_property def submesh_parent_interior_facet_child_interior_facet_map(self): - return self._submesh_make_entity_entity_map(self.submesh_parent.interior_facets.set, self.interior_facets.set, self.submesh_parent.interior_facets.facets, self.interior_facets.facets, False) + _self_numbers, _, _self_set = self._interior_facet_numbers_classes_set + _parent_numbers, _, _parent_set = self.submesh_parent._interior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_parent_set, _self_set, _parent_numbers, _self_numbers, False) + + @cached_property + def submesh_parent_exterior_facet_child_cell_map(self): + _parent_numbers, _, _parent_set = self.submesh_parent._exterior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_parent_set, self.cell_set, _parent_numbers, self.cell_closure[:, -1], False) + + @cached_property + def submesh_parent_interior_facet_child_cell_map(self): + _parent_numbers, _, _parent_set = self.submesh_parent._interior_facet_numbers_classes_set + return self._submesh_make_entity_entity_map(_parent_set, self.cell_set, _parent_numbers, self.cell_closure[:, -1], False) def submesh_map_child_parent(self, source_integral_type, source_subset_points, reverse=False): """Return the map from submesh child entities to submesh parent entities or its reverse. @@ -1548,24 +1641,58 @@ def submesh_map_child_parent(self, source_integral_type, source_subset_points, r target = self.submesh_parent target_dim = target.topology_dm.getDimension() source_dim = source.topology_dm.getDimension() - if source_dim != target_dim: - raise NotImplementedError(f"Not implemented for (source_dim, target_dim) == ({source_dim}, {target_dim})") - if source_integral_type == "cell": + if target_dim == source_dim: + if source_integral_type == "cell": + target_integral_type_temp = "cell" + elif source_integral_type in ["interior_facet", "exterior_facet"]: + target_integral_type_temp = "facet" + else: + raise NotImplementedError("Unsupported combination") + elif target_dim - 1 == source_dim: + if source_integral_type == "cell": + target_integral_type_temp = "facet" + else: + raise NotImplementedError("Unsupported combination") + elif target_dim == source_dim - 1: + if source_integral_type in ["interior_facet", "exterior_facet"]: + target_integral_type_temp = "cell" + else: + raise NotImplementedError("Unsupported combination") + else: + raise NotImplementedError("Unsupported combination") + if target_integral_type_temp == "cell": + _cell_numbers = target.cell_closure[:, -1] + with self.topology_dm.getSubpointIS() as subpoints: + if reverse: + _, target_indices_cell, source_indices_cell = np.intersect1d(subpoints[_cell_numbers], source_subset_points, return_indices=True) + else: + target_subset_points = subpoints[source_subset_points] + _, target_indices_cell, source_indices_cell = np.intersect1d(_cell_numbers, target_subset_points, return_indices=True) + n_cell = len(source_indices_cell) + with temp_internal_comm(self.comm) as icomm: + n_cell_max = icomm.allreduce(n_cell, op=MPI.MAX) + if n_cell_max > 0: + if n_cell > len(source_subset_points): + raise RuntimeError("Found inconsistent data") target_integral_type = "cell" - target_subset_points = None - elif source_integral_type in ["interior_facet", "exterior_facet"]: + if reverse: + target_subset_points = _cell_numbers[target_indices_cell] + elif target_integral_type_temp == "facet": + _exterior_facet_numbers, _, _ = target._exterior_facet_numbers_classes_set + _interior_facet_numbers, _, _ = target._interior_facet_numbers_classes_set with self.topology_dm.getSubpointIS() as subpoints: if reverse: - _, target_indices_int, source_indices_int = np.intersect1d(subpoints[target.interior_facets.facets], source_subset_points, return_indices=True) - _, target_indices_ext, source_indices_ext = np.intersect1d(subpoints[target.exterior_facets.facets], source_subset_points, return_indices=True) + _, target_indices_int, source_indices_int = np.intersect1d(subpoints[_interior_facet_numbers], source_subset_points, return_indices=True) + _, target_indices_ext, source_indices_ext = np.intersect1d(subpoints[_exterior_facet_numbers], source_subset_points, return_indices=True) else: target_subset_points = subpoints[source_subset_points] - _, target_indices_int, source_indices_int = np.intersect1d(target.interior_facets.facets, target_subset_points, return_indices=True) - _, target_indices_ext, source_indices_ext = np.intersect1d(target.exterior_facets.facets, target_subset_points, return_indices=True) + _, target_indices_int, source_indices_int = np.intersect1d(_interior_facet_numbers, target_subset_points, return_indices=True) + _, target_indices_ext, source_indices_ext = np.intersect1d(_exterior_facet_numbers, target_subset_points, return_indices=True) n_int = len(source_indices_int) n_ext = len(source_indices_ext) - n_int_max = self._comm.allreduce(n_int, op=MPI.MAX) - n_ext_max = self._comm.allreduce(n_ext, op=MPI.MAX) + with temp_internal_comm(self.comm) as icomm: + n_int_max = icomm.allreduce(n_int, op=MPI.MAX) + n_ext_max = icomm.allreduce(n_ext, op=MPI.MAX) if n_int_max > 0: if n_ext_max != 0: raise RuntimeError(f"integral_type on the target mesh is interior facet, but {n_ext_max} exterior facet entities are also included") @@ -1582,11 +1709,11 @@ def submesh_map_child_parent(self, source_integral_type, source_subset_points, r raise RuntimeError("Can not find a map from source to target.") if reverse: if target_integral_type == "interior_facet": - target_subset_points = target.interior_facets.facets[target_indices_int] + target_subset_points = _interior_facet_numbers[target_indices_int] elif target_integral_type == "exterior_facet": - target_subset_points = target.exterior_facets.facets[target_indices_ext] + target_subset_points = _exterior_facet_numbers[target_indices_ext] else: - raise NotImplementedError(f"Not implemented for (source_dim, target_dim, source_integral_type) == ({source_dim}, {target_dim}, {source_integral_type})") + raise NotImplementedError if reverse: map_ = getattr(self, f"submesh_parent_{source_integral_type}_child_{target_integral_type}_map") else: @@ -1615,20 +1742,23 @@ def trans_mesh_entity_map(self, base_mesh, base_integral_type, base_subdomain_id `tuple` of `op2.ComposedMap` from base_mesh to `self` and integral_type on `self`. """ - common = self.submesh_youngest_common_ancester(base_mesh) + common = self.submesh_youngest_common_ancestor(base_mesh) if common is None: raise NotImplementedError(f"Currently only implemented for (sub)meshes in the same family: got {self} and {base_mesh}") elif base_mesh is self: raise NotImplementedError("Currenlty can not return identity map") else: if base_integral_type == "cell": - base_subset_points = None + base_subset = base_mesh.measure_set(base_integral_type, base_subdomain_id, all_integer_subdomain_ids=base_all_integer_subdomain_ids) + base_subset_points = base_mesh.cell_closure[:, -1][base_subset.indices] elif base_integral_type in ["interior_facet", "exterior_facet"]: base_subset = base_mesh.measure_set(base_integral_type, base_subdomain_id, all_integer_subdomain_ids=base_all_integer_subdomain_ids) if base_integral_type == "interior_facet": - base_subset_points = base_mesh.interior_facets.facets[base_subset.indices] + _interior_facet_numbers, _, _ = base_mesh._interior_facet_numbers_classes_set + base_subset_points = _interior_facet_numbers[base_subset.indices] elif base_integral_type == "exterior_facet": - base_subset_points = base_mesh.exterior_facets.facets[base_subset.indices] + _exterior_facet_numbers, _, _ = base_mesh._exterior_facet_numbers_classes_set + base_subset_points = _exterior_facet_numbers[base_subset.indices] else: raise NotImplementedError(f"Unknown integration type : {base_integral_type}") composed_map, integral_type, _ = self.submesh_map_composed(base_mesh, base_integral_type, base_subset_points) @@ -1662,7 +1792,6 @@ def __init__(self, mesh, layers, periodic=False, name=None): self._base_mesh = mesh self.user_comm = mesh.comm - self._comm = internal_comm(mesh._comm, self) if name is not None and name == mesh.name: raise ValueError("Extruded mesh topology and base mesh topology can not have the same name") self.name = name if name is not None else mesh.name + "_extruded" @@ -1699,21 +1828,21 @@ def __init__(self, mesh, layers, periodic=False, name=None): # submesh self.submesh_parent = None - @utils.cached_property + @cached_property def _ufl_cell(self): return ufl.TensorProductCell(self._base_mesh.ufl_cell(), ufl.interval) - @utils.cached_property + @cached_property def _ufl_mesh(self): cell = self._ufl_cell - return ufl.Mesh(finat.ufl.VectorElement("Lagrange", cell, 1, dim=cell.topological_dimension())) + return ufl.Mesh(finat.ufl.VectorElement("Lagrange", cell, 1, dim=cell.topological_dimension)) @property def dm_cell_types(self): """All DM.PolytopeTypes of cells in the mesh.""" raise NotImplementedError("'dm_cell_types' is not implemented for ExtrudedMeshTopology") - @utils.cached_property + @cached_property def cell_closure(self): """2D array of ordered cell closures @@ -1721,15 +1850,23 @@ def cell_closure(self): """ return self._base_mesh.cell_closure - @utils.cached_property + @cached_property def entity_orientations(self): return self._base_mesh.entity_orientations + @cached_property + def local_cell_orientation_dat(self): + """Local cell orientation dat.""" + return self._base_mesh.local_cell_orientation_dat + def _facets(self, kind): if kind not in ["interior", "exterior"]: raise ValueError("Unknown facet type '%s'" % kind) - base = getattr(self._base_mesh, "%s_facets" % kind) - return _Facets(self, base.facets, base.classes, + label = f"{kind}_facets" + base = getattr(self._base_mesh, label) + layers = self.entity_layers(1, label) + set_ = op2.ExtrudedSet(base.set, layers=layers) + return _Facets(self, base.facets, base.classes, set_, kind, base.facet_cell, base.local_facet_dat.data_ro_with_halos, @@ -1798,7 +1935,7 @@ def node_classes(self, nodes_per_entity, real_tensorproduct=False): nodes_per_entity = sum(nodes[:, i]*(self.layers - i) for i in range(2)) return super(ExtrudedMeshTopology, self).node_classes(nodes_per_entity) - @utils.cached_property + @cached_property def layers(self): """Return the layers parameter used to construct the mesh topology, which is the number of layers represented by the number of occurences @@ -1922,14 +2059,14 @@ def _mark_entity_classes(self): assert isinstance(self._parent_mesh, VertexOnlyMeshTopology) dmcommon.mark_entity_classes(self.topology_dm) - @utils.cached_property + @cached_property def _ufl_cell(self): return ufl.Cell(_cells[0][0]) - @utils.cached_property + @cached_property def _ufl_mesh(self): cell = self._ufl_cell - return ufl.Mesh(finat.ufl.VectorElement("DG", cell, 0, dim=cell.topological_dimension())) + return ufl.Mesh(finat.ufl.VectorElement("DG", cell, 0, dim=cell.topological_dimension)) def _renumber_entities(self, reorder): if reorder: @@ -1956,7 +2093,7 @@ def dm_cell_types(self): """All DM.PolytopeTypes of cells in the mesh.""" return (PETSc.DM.PolytopeType.POINT,) - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_closure(self): """2D array of ordered cell closures @@ -1970,8 +2107,8 @@ def cell_closure(self): vertex_numbering = self._vertex_numbering.createGlobalSection(swarm.getPointSF()) cell = self.ufl_cell() - assert tdim == cell.topological_dimension() - assert cell.is_simplex() + assert tdim == cell.topological_dimension + assert cell.is_simplex import FIAT topology = FIAT.ufc_cell(cell).get_topology() @@ -1984,6 +2121,11 @@ def cell_closure(self): entity_orientations = None + @property + def local_cell_orientation_dat(self): + """Local cell orientation dat.""" + raise NotImplementedError("Not implemented for VertexOnlyMeshTopology") + def _facets(self, kind): """Raises an AttributeError since cells in a `VertexOnlyMeshTopology` have no facets. @@ -1992,15 +2134,15 @@ def _facets(self, kind): raise ValueError("Unknown facet type '%s'" % kind) raise AttributeError("Cells in a VertexOnlyMeshTopology have no facets.") - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def exterior_facets(self): return self._facets("exterior") - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def interior_facets(self): return self._facets("interior") - @utils.cached_property + @cached_property def cell_to_facets(self): """Raises an AttributeError since cells in a `VertexOnlyMeshTopology` have no facets. @@ -2028,12 +2170,12 @@ def num_entities(self, d): else: return self.num_vertices() - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_set(self): size = list(self._entity_classes[self.cell_dimension(), :]) return op2.Set(size, "Cells", comm=self.comm) - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_parent_cell_list(self): """Return a list of parent mesh cells numbers in vertex only mesh cell order. @@ -2042,7 +2184,7 @@ def cell_parent_cell_list(self): self.topology_dm.restoreField("parentcellnum") return cell_parent_cell_list[self.cell_closure[:, -1]] - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_parent_cell_map(self): """Return the :class:`pyop2.types.map.Map` from vertex only mesh cells to parent mesh cells. @@ -2050,7 +2192,7 @@ def cell_parent_cell_map(self): return op2.Map(self.cell_set, self._parent_mesh.cell_set, 1, self.cell_parent_cell_list, "cell_parent_cell") - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_parent_base_cell_list(self): """Return a list of parent mesh base cells numbers in vertex only mesh cell order. @@ -2061,7 +2203,7 @@ def cell_parent_base_cell_list(self): self.topology_dm.restoreField("parentcellbasenum") return cell_parent_base_cell_list[self.cell_closure[:, -1]] - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_parent_base_cell_map(self): """Return the :class:`pyop2.types.map.Map` from vertex only mesh cells to parent mesh base cells. @@ -2071,7 +2213,7 @@ def cell_parent_base_cell_map(self): return op2.Map(self.cell_set, self._parent_mesh.cell_set, 1, self.cell_parent_base_cell_list, "cell_parent_base_cell") - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_parent_extrusion_height_list(self): """Return a list of parent mesh extrusion heights in vertex only mesh cell order. @@ -2082,7 +2224,7 @@ def cell_parent_extrusion_height_list(self): self.topology_dm.restoreField("parentcellextrusionheight") return cell_parent_extrusion_height_list[self.cell_closure[:, -1]] - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_parent_extrusion_height_map(self): """Return the :class:`pyop2.types.map.Map` from vertex only mesh cells to parent mesh extrusion heights. @@ -2095,14 +2237,14 @@ def cell_parent_extrusion_height_map(self): def mark_entities(self, tf, label_value, label_name=None): raise NotImplementedError("Currently not implemented for VertexOnlyMesh") - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def cell_global_index(self): """Return a list of unique cell IDs in vertex only mesh cell order.""" cell_global_index = np.copy(self.topology_dm.getField("globalindex").ravel()) self.topology_dm.restoreField("globalindex") return cell_global_index - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def input_ordering(self): """ Return the input ordering of the mesh vertices as a @@ -2148,7 +2290,7 @@ def _make_input_ordering_sf(swarm, nroots, ilocal): sf.setGraph(nroots, ilocal, input_ranks_and_idxs) return sf - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def input_ordering_sf(self): """ Return a PETSc SF which has :func:`~.VertexOnlyMesh` input ordering @@ -2165,7 +2307,7 @@ def input_ordering_sf(self): ilocal[e_p_map - cStart] = np.arange(len(e_p_map)) return VertexOnlyMeshTopology._make_input_ordering_sf(self.topology_dm, nroots, ilocal) - @utils.cached_property # TODO: Recalculate if mesh moves + @cached_property # TODO: Recalculate if mesh moves def input_ordering_without_halos_sf(self): """ Return a PETSc SF which has :func:`~.VertexOnlyMesh` input ordering @@ -2181,44 +2323,23 @@ class CellOrientationsRuntimeError(RuntimeError): pass -class MeshGeometryCargo: - """Helper class carrying data for a :class:`MeshGeometry`. - - It is required because it permits Firedrake to have stripped forms - that still know that they are on an extruded mesh (for example). - """ - - def __init__(self, ufl_id): - self._ufl_id = ufl_id - - def ufl_id(self): - return self._ufl_id +@dataclasses.dataclass(frozen=True) +class _MultiCellTypeDummyCoordinates: + """Placeholder object for the coordinates of a mesh with >1 cell types.""" + topology: AbstractMeshTopology + _ufl_element: finat.ufl.FiniteElementBase - def init(self, coordinates): - """Initialise the cargo. + def ufl_element(self) -> finat.ufl.FiniteElementBase: + return self._ufl_element - This function is separate to __init__ because of the two-step process we have - for initialising a :class:`MeshGeometry`. - """ - self.topology = coordinates.function_space().mesh() - self.coordinates = coordinates - self.geometric_shared_data_cache = defaultdict(dict) + @property + def comm(self) -> MPI.Comm: + return self.topology.comm class MeshGeometry(ufl.Mesh, MeshGeometryMixin): """A representation of mesh topology and geometry.""" - def __new__(cls, element, comm): - """Create mesh geometry object.""" - utils._init() - mesh = super(MeshGeometry, cls).__new__(cls) - uid = utils._new_uid(internal_comm(comm, mesh)) - mesh.uid = uid - cargo = MeshGeometryCargo(uid) - assert isinstance(element, finat.ufl.FiniteElementBase) - ufl.Mesh.__init__(mesh, element, ufl_id=mesh.uid, cargo=cargo) - return mesh - @MeshGeometryMixin._ad_annotate_init def __init__(self, coordinates): """Initialise a mesh geometry from coordinates. @@ -2229,17 +2350,32 @@ def __init__(self, coordinates): The `CoordinatelessFunction` containing the coordinates. """ - topology = coordinates.function_space().mesh() + import firedrake.functionspaceimpl as functionspaceimpl + import firedrake.function as function + + utils._init() + + element = coordinates.ufl_element() + uid = utils._new_uid(coordinates.comm) + super().__init__(element, ufl_id=uid) + + if isinstance(coordinates, _MultiCellTypeDummyCoordinates): + topology = coordinates.topology + else: + topology = coordinates.function_space().mesh() # this is codegen information so we attach it to the MeshGeometry rather than its cargo self.extruded = isinstance(topology, ExtrudedMeshTopology) self.variable_layers = self.extruded and topology.variable_layers + self._base_mesh = None # this is set by extruded meshes in a later step - # initialise the mesh cargo - self.ufl_cargo().init(coordinates) + self.topology = topology + self.geometric_shared_data_cache = defaultdict(dict) - # Cache mesh object on the coordinateless coordinates function - coordinates._as_mesh_geometry = weakref.ref(self) + # A lot of the infrastructure of MeshGeometry does not work for meshes + # with multiple cell types + if isinstance(coordinates, _MultiCellTypeDummyCoordinates): + return # submesh self.submesh_parent = None @@ -2248,98 +2384,28 @@ def __init__(self, coordinates): self._spatial_index = None self._saved_coordinate_dat_version = coordinates.dat.dat_version + # Cache mesh object on the coordinateless coordinates function + coordinates._as_mesh_geometry = weakref.ref(self) + + # Save the coordinates as a 'CoordinatelessFunction' and as a 'Function' + self._coordinates = coordinates + V = functionspaceimpl.WithGeometry(coordinates.function_space(), self) + self._coordinates_function = function.Function(V, val=coordinates) + def _ufl_signature_data_(self, *args, **kwargs): return (type(self), self.extruded, self.variable_layers, super()._ufl_signature_data_(*args, **kwargs)) - def _init_topology(self, topology): - """Initialise the topology. - - :arg topology: The :class:`.MeshTopology` object. - - A mesh is fully initialised with its topology and coordinates. - In this method we partially initialise the mesh by registering - its topology. We also set the `_callback` attribute that is - later called to set its coordinates and finalise the initialisation. - """ - import firedrake.functionspace as functionspace - import firedrake.function as function - - self._topology = topology - coordinates_fs = functionspace.FunctionSpace(self.topology, self.ufl_coordinate_element()) - coordinates_data = dmcommon.reordered_coords(topology.topology_dm, coordinates_fs.dm.getDefaultSection(), - (self.num_vertices(), self.geometric_dimension())) - coordinates = function.CoordinatelessFunction(coordinates_fs, - val=coordinates_data, - name=_generate_default_mesh_coordinates_name(self.name)) - self.__init__(coordinates) - - @property - def topology(self): - """The underlying mesh topology object.""" - return self.ufl_cargo().topology - - @topology.setter - def topology(self, val): - self.ufl_cargo().topology = val - - @property - def _topology(self): - return self.topology - - @_topology.setter - def _topology(self, val): - self.topology = val - - @property - def _parent_mesh(self): - return self.ufl_cargo()._parent_mesh - - @_parent_mesh.setter - def _parent_mesh(self, val): - self.ufl_cargo()._parent_mesh = val - - @property - def _coordinates(self): - return self.ufl_cargo().coordinates - - @property - def _geometric_shared_data_cache(self): - return self.ufl_cargo().geometric_shared_data_cache - @property def topological(self): """Alias of topology. This is to ensure consistent naming for some multigrid codes.""" - return self._topology - - @property - def _topology_dm(self): - """Alias of topology_dm""" - from warnings import warn - warn("_topology_dm is deprecated (use topology_dm instead)", DeprecationWarning, stacklevel=2) - return self.topology_dm - - @property - @MeshGeometryMixin._ad_annotate_coordinates_function - def _coordinates_function(self): - """The :class:`.Function` containing the coordinates of this mesh.""" - import firedrake.functionspaceimpl as functionspaceimpl - import firedrake.function as function - - if hasattr(self.ufl_cargo(), "_coordinates_function"): - return self.ufl_cargo()._coordinates_function - else: - coordinates_fs = self._coordinates.function_space() - V = functionspaceimpl.WithGeometry.create(coordinates_fs, self) - f = function.Function(V, val=self._coordinates) - self.ufl_cargo()._coordinates_function = f - return f + return self.topology @property - def coordinates(self): - """The :class:`.Function` containing the coordinates of this mesh.""" + def coordinates(self) -> "Function": + """The coordinates of the mesh.""" return self._coordinates_function @coordinates.setter @@ -2353,7 +2419,7 @@ def coordinates(self, value): raise AttributeError(message) - @utils.cached_property + @cached_property def cell_sizes(self): """A :class:`~.Function` in the :math:`P^1` space containing the local mesh size. @@ -2413,7 +2479,8 @@ def clear_spatial_index(self): the coordinate field).""" self._spatial_index = None - @utils.cached_property + @cached_property + @PETSc.Log.EventDecorator() def bounding_box_coords(self) -> Tuple[np.ndarray, np.ndarray] | None: """Calculates bounding boxes for spatial indexing. @@ -2435,7 +2502,7 @@ def bounding_box_coords(self) -> Tuple[np.ndarray, np.ndarray] | None: from firedrake import function, functionspace from firedrake.parloops import par_loop, READ, MIN, MAX - gdim = self.geometric_dimension() + gdim = self.geometric_dimension if gdim <= 1: info_red("libspatialindex does not support 1-dimension, falling back on brute force.") return None @@ -2459,14 +2526,6 @@ def bounding_box_coords(self) -> Tuple[np.ndarray, np.ndarray] | None: f.interpolate(self.coordinates) mesh = Mesh(f) - # Calculate the bounding boxes for all cells by running a kernel - V = functionspace.VectorFunctionSpace(mesh, "DG", 0, dim=gdim) - coords_min = function.Function(V, dtype=RealType) - coords_max = function.Function(V, dtype=RealType) - - coords_min.dat.data.fill(np.inf) - coords_max.dat.data.fill(-np.inf) - if utils.complex_mode: if not np.allclose(mesh.coordinates.dat.data_ro.imag, 0): raise ValueError("Coordinate field has non-zero imaginary part") @@ -2477,6 +2536,18 @@ def bounding_box_coords(self) -> Tuple[np.ndarray, np.ndarray] | None: coords = mesh.coordinates cell_node_list = mesh.coordinates.function_space().cell_node_list + if not mesh.extruded: + all_coords = coords.dat.data_ro_with_halos[cell_node_list] + return np.min(all_coords, axis=1), np.max(all_coords, axis=1) + + # Extruded case: calculate the bounding boxes for all cells by running a kernel + V = functionspace.VectorFunctionSpace(mesh, "DG", 0, dim=gdim) + coords_min = function.Function(V, dtype=RealType) + coords_max = function.Function(V, dtype=RealType) + + coords_min.dat.data.fill(np.inf) + coords_max.dat.data.fill(-np.inf) + _, nodes_per_cell = cell_node_list.shape domain = f"{{[d, i]: 0 <= d < {gdim} and 0 <= i < {nodes_per_cell}}}" @@ -2499,6 +2570,7 @@ def bounding_box_coords(self) -> Tuple[np.ndarray, np.ndarray] | None: return coords_min, coords_max @property + @PETSc.Log.EventDecorator() def spatial_index(self): """Builds spatial index from bounding box coordinates, expanding the bounding box by the mesh tolerance. @@ -2545,7 +2617,8 @@ def spatial_index(self): coords_max = coords_mid + (tolerance + 0.5)*d # Build spatial index - self._spatial_index = spatialindex.from_regions(coords_min, coords_max) + with PETSc.Log.Event("spatial_index_build"): + self._spatial_index = spatialindex.from_regions(coords_min, coords_max) self._saved_coordinate_dat_version = self.coordinates.dat.dat_version return self._spatial_index @@ -2594,14 +2667,15 @@ def locate_cell_and_reference_coordinate(self, x, tolerance=None, cell_ignore=No or, when point is not in the domain, (None, None). """ x = np.asarray(x) - if x.size != self.geometric_dimension(): + if x.size != self.geometric_dimension: raise ValueError("Point must have the same geometric dimension as the mesh") - x = x.reshape((1, self.geometric_dimension())) + x = x.reshape((1, self.geometric_dimension)) cells, ref_coords, _ = self.locate_cells_ref_coords_and_dists(x, tolerance=tolerance, cells_ignore=[[cell_ignore]]) if cells[0] == -1: return None, None return cells[0], ref_coords[0] + @PETSc.Log.EventDecorator() def locate_cells_ref_coords_and_dists(self, xs, tolerance=None, cells_ignore=None): """Locate cell containing a given point and the reference coordinates of the point within the cell. @@ -2632,7 +2706,7 @@ def locate_cells_ref_coords_and_dists(self, xs, tolerance=None, cells_ignore=Non self.tolerance = tolerance xs = np.asarray(xs, dtype=utils.ScalarType) xs = xs.real.copy() - if xs.shape[1] != self.geometric_dimension(): + if xs.shape[1] != self.geometric_dimension: raise ValueError("Point coordinate dimension does not match mesh geometric dimension") Xs = np.empty_like(xs) npoints = len(xs) @@ -2645,20 +2719,19 @@ def locate_cells_ref_coords_and_dists(self, xs, tolerance=None, cells_ignore=Non assert cells_ignore.shape == (npoints, cells_ignore.shape[1]) ref_cell_dists_l1 = np.empty(npoints, dtype=utils.RealType) cells = np.empty(npoints, dtype=IntType) - assert xs.size == npoints * self.geometric_dimension() - self._c_locator(tolerance=tolerance)(self.coordinates._ctypes, - xs.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), - Xs.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), - ref_cell_dists_l1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), - cells.ctypes.data_as(ctypes.POINTER(ctypes.c_int)), - npoints, - cells_ignore.shape[1], - cells_ignore) + assert xs.size == npoints * self.geometric_dimension + run_c = self._c_locator(tolerance=tolerance) + cells_data = cells.ctypes.data_as(ctypes.POINTER(ctypes.c_int)) + ref_cells_dists = ref_cell_dists_l1.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + xs_data = xs.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + Xs_data = Xs.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + with PETSc.Log.Event("c_locator_run"): + run_c(self.coordinates._ctypes, xs_data, Xs_data, ref_cells_dists, cells_data, npoints, cells_ignore.shape[1], cells_ignore) return cells, Xs, ref_cell_dists_l1 + @PETSc.Log.EventDecorator() def _c_locator(self, tolerance=None): from pyop2 import compilation - from pyop2.utils import get_petsc_dir import firedrake.function as function import firedrake.pointquery_utils as pq_utils @@ -2684,9 +2757,9 @@ def _c_locator(self, tolerance=None): not run at c-loop speed. */ /* cells_ignore has shape (npoints, ncells_ignore) - find the ith row */ int *cells_ignore_i = cells_ignore + i*ncells_ignore; - cells[i] = locate_cell(f, &x[j], {self.geometric_dimension()}, &to_reference_coords, &to_reference_coords_xtr, &temp_reference_coords, &found_reference_coords, &ref_cell_dists_l1[i], ncells_ignore, cells_ignore_i); + cells[i] = locate_cell(f, &x[j], {self.geometric_dimension}, &to_reference_coords, &to_reference_coords_xtr, &temp_reference_coords, &found_reference_coords, &ref_cell_dists_l1[i], ncells_ignore, cells_ignore_i); - for (int k = 0; k < {self.geometric_dimension()}; k++) {{ + for (int k = 0; k < {self.geometric_dimension}; k++) {{ X[j] = found_reference_coords.X[k]; j++; }} @@ -2702,8 +2775,9 @@ def _c_locator(self, tolerance=None): cppargs=[ f"-I{os.path.dirname(__file__)}", f"-I{sys.prefix}/include", - f"-I{rtree.finder.get_include()}" - ] + [f"-I{d}/include" for d in get_petsc_dir()], + f"-I{rtree.finder.get_include()}", + *petsctools.get_petsc_dirs(prefix="-I", subdir="include"), + ], ldargs=[ f"-L{sys.prefix}/lib", str(libspatialindex_so), @@ -2724,7 +2798,7 @@ def _c_locator(self, tolerance=None): locator.restype = ctypes.c_int return cache.setdefault(tolerance, locator) - @utils.cached_property # TODO: Recalculate if mesh moves. Extend this for regular meshes. + @cached_property # TODO: Recalculate if mesh moves. Extend this for regular meshes. def input_ordering(self): """ Return the input ordering of the mesh vertices as a @@ -2780,7 +2854,7 @@ def init_cell_orientations(self, expr): import firedrake.function as function import firedrake.functionspace as functionspace - if (self.ufl_cell().cellname(), self.geometric_dimension()) not in _supported_embedded_cell_types_and_gdims: + if (self.ufl_cell().cellname, self.geometric_dimension) not in _supported_embedded_cell_types_and_gdims: raise NotImplementedError('Only implemented for intervals embedded in 2d and triangles and quadrilaterals embedded in 3d') if hasattr(self, '_cell_orientations'): @@ -2789,16 +2863,16 @@ def init_cell_orientations(self, expr): if not isinstance(expr, ufl.classes.Expr): raise TypeError("UFL expression expected!") - if expr.ufl_shape != (self.geometric_dimension(), ): - raise ValueError(f"Mismatching shapes: expr.ufl_shape ({expr.ufl_shape}) != (self.geometric_dimension(), ) (({self.geometric_dimension}, ))") + if expr.ufl_shape != (self.geometric_dimension, ): + raise ValueError(f"Mismatching shapes: expr.ufl_shape ({expr.ufl_shape}) != (self.geometric_dimension, ) (({self.geometric_dimension}, ))") fs = functionspace.FunctionSpace(self, 'DG', 0) x = ufl.SpatialCoordinate(self) f = function.Function(fs) - if self.topological_dimension() == 1: + if self.topological_dimension == 1: normal = ufl.as_vector((-ReferenceGrad(x)[1, 0], ReferenceGrad(x)[0, 0])) - else: # self.topological_dimension() == 2 + else: # self.topological_dimension == 2 normal = ufl.cross(ReferenceGrad(x)[:, 0], ReferenceGrad(x)[:, 1]) f.interpolate(ufl.dot(expr, normal)) @@ -2808,13 +2882,11 @@ def init_cell_orientations(self, expr): self._cell_orientations = cell_orientations.topological def __getattr__(self, name): - val = getattr(self._topology, name) - setattr(self, name, val) - return val + return getattr(self.topology, name) def __dir__(self): current = super(MeshGeometry, self).__dir__() - return list(OrderedDict.fromkeys(dir(self._topology) + current)) + return list(OrderedDict.fromkeys(dir(self.topology) + current)) def mark_entities(self, f, label_value, label_name=None): """Mark selected entities. @@ -2834,6 +2906,167 @@ def mark_entities(self, f, label_value, label_name=None): """ self.topology.mark_entities(f.topological, label_value, label_name) + def __iter__(self): + yield self + + def unique(self): + return self + + def refine_marked_elements(self, mark, netgen_flags=None): + """Refine a mesh using a DG0 marking function. + + This method requires that the mesh has been constructed from a + netgen mesh. + + :arg mark: the marking function which is a Firedrake DG0 function + with the number of refinements on each cell. + :arg netgen_flags: the dictionary of flags to be passed to ngsPETSc. + + It includes the option: + - refine_faces, which is a boolean specifying if you want to refine faces. + + """ + utils.check_netgen_installed() + + if not hasattr(self, "netgen_mesh"): + raise ValueError("Adaptive refinement requires a netgen mesh.") + if netgen_flags is None: + netgen_flags = self.netgen_flags + tdim = self.topological_dimension + if tdim not in {2, 3}: + raise NotImplementedError("No implementation for dimension other than 2 and 3.") + with mark.dat.vec as mvec: + if self.sfBC_orig is None: + cstart, cend = self.topology_dm.getHeightStratum(0) + cellNum = list(map(self._cell_numbering.getOffset, range(cstart, cend))) + mark_np = mvec.getArray()[cellNum] + else: + sfBCInv = self.sfBC_orig.createInverse() + _, mvec0 = self.topology_dm.distributeField(sfBCInv, + self._cell_numbering, + mvec) + mark_np = mvec0.getArray() + max_refs = 0 if mark_np.size == 0 else int(mark_np.max()) + # Create a copy of the netgen mesh + netgen_mesh = self.netgen_mesh.Copy() + refine_faces = netgen_flags.get("refine_faces", False) + for r in range(max_refs): + cells = netgen_mesh.Elements3D() if tdim == 3 else netgen_mesh.Elements2D() + cells.NumPy()["refine"] = (mark_np[:len(cells)] > 0) + if tdim == 3: + faces = netgen_mesh.Elements2D() + faces.NumPy()["refine"] = refine_faces + netgen_mesh.Refine(adaptive=True) + mark_np -= 1 + if r < max_refs - 1: + parents = netgen_mesh.parentelements if tdim == 3 else netgen_mesh.parentsurfaceelements + parents = parents.NumPy()["i"] + num_fine_cells = parents.shape[0] + num_coarse_cells = mark_np.size + indices = np.arange(num_fine_cells, dtype=PETSc.IntType) + while (indices >= num_coarse_cells).any(): + fine_cells = (indices >= num_coarse_cells) + indices[fine_cells] = parents[indices[fine_cells]] + mark_np = mark_np[indices] + + return Mesh(netgen_mesh, + reorder=self._did_reordering, + distribution_parameters=self._distribution_parameters, + comm=self.comm, + netgen_flags=netgen_flags) + + @PETSc.Log.EventDecorator() + def curve_field(self, order, permutation_tol=1e-8, cg_field=None): + '''Return a function containing the curved coordinates of the mesh. + + This method requires that the mesh has been constructed from a + netgen mesh. + + :arg order: the order of the curved mesh. + :arg permutation_tol: tolerance used to construct the permutation of the reference element. + :arg cg_field: return a CG function field representing the mesh, as opposed to a DG field. + Defaults to the continuity of the coordinates of the original mesh. + + ''' + utils.check_netgen_installed() + from firedrake.netgen import find_permutation, netgen_distribute + from firedrake.functionspace import FunctionSpace + from firedrake.function import Function + + if not hasattr(self, "netgen_mesh"): + raise ValueError("Cannot curve a mesh that has not been generated by netgen.") + + if cg_field is None: + cg_field = not self.coordinates.function_space().finat_element.is_dg() + + # Check if the mesh is a surface mesh or two dimensional mesh + if self.topological_dimension == 2: + ng_element = self.netgen_mesh.Elements2D() + else: + ng_element = self.netgen_mesh.Elements3D() + ng_dimension = len(ng_element) + + # Construct the coordinates as a Firedrake function + coords_space = self.coordinates.function_space().reconstruct(degree=order) + broken_space = coords_space.broken_space() + if not cg_field: + coords_space = broken_space + new_coordinates = Function(coords_space).interpolate(self.coordinates) + + # Compute reference points using fiat + fiat_element = new_coordinates.function_space().finat_element.fiat_equivalent + nodes = fiat_element.dual_basis() + ref_pts = [] + for node in nodes: + # Assert singleton point for each node. + pt, = node.get_point_dict().keys() + ref_pts.append(pt) + reference_points = np.array(ref_pts) + + # Construct numpy arrays for physical domain data + physical_points = np.zeros( + (ng_dimension, reference_points.shape[0], self.geometric_dimension) + ) + curved_points = np.zeros( + (ng_dimension, reference_points.shape[0], self.geometric_dimension) + ) + self.netgen_mesh.CalcElementMapping(reference_points, physical_points) + # NOTE: This will segfault for MeshHierarchy on a netgen CSG geometry + self.netgen_mesh.Curve(order) + self.netgen_mesh.CalcElementMapping(reference_points, curved_points) + curved = ng_element.NumPy()["curved"] + + # Distribute curved cell data + cell_node_map = new_coordinates.cell_node_map() + num_cells = cell_node_map.values.shape[0] + DG0 = FunctionSpace(self, "DG", 0) + own_curved = netgen_distribute(DG0, curved) + own_curved = np.flatnonzero(own_curved[:num_cells]) + + # Distribute coordinate data + own_curved_points = netgen_distribute(broken_space, curved_points)[own_curved] + own_physical_points = netgen_distribute(broken_space, physical_points)[own_curved] + + # Get broken indices + cstart, cend = self.topology_dm.getHeightStratum(0) + cellNum = np.array(list(map(self._cell_numbering.getOffset, range(cstart, cend)))) + broken_indices = cell_node_map.values[cellNum[own_curved]] + + # Find the correct coordinate permutation for each cell + permutation = find_permutation( + own_physical_points, + new_coordinates.dat.data_ro_with_halos[broken_indices].real, + tol=permutation_tol, + ) + self.comm.Barrier() + # Apply the permutation to each cell in turn + for i in range(own_curved_points.shape[0]): + own_curved_points[i] = own_curved_points[i, permutation[i]] + + # Assign the curved coordinates to the dat + new_coordinates.dat.data_wo_with_halos[broken_indices] = own_curved_points + return new_coordinates + @PETSc.Log.EventDecorator() def make_mesh_from_coordinates(coordinates, name, tolerance=0.5): @@ -2865,10 +3098,9 @@ def make_mesh_from_coordinates(coordinates, name, tolerance=0.5): element = coordinates.ufl_element() if V.rank != 1 or len(element.reference_value_shape) != 1: raise ValueError("Coordinates must be from a rank-1 FunctionSpace with rank-1 value_shape.") - assert V.mesh().ufl_cell().topological_dimension() <= V.value_size + assert V.mesh().ufl_cell().topological_dimension <= V.value_size - mesh = MeshGeometry.__new__(MeshGeometry, element, coordinates.comm) - mesh.__init__(coordinates) + mesh = MeshGeometry(coordinates) mesh.name = name # Mark mesh as being made from coordinates mesh._made_from_coordinates = True @@ -2876,8 +3108,65 @@ def make_mesh_from_coordinates(coordinates, name, tolerance=0.5): return mesh +def _fully_localize_coordinates(dm): + """Expand sparsely localized coordinates to cover all cells. + + For file-based periodic meshes (e.g. Gmsh), PETSc only creates + cell-local (DG) coordinates for cells touching the periodic + boundary. This fills in the remaining cells using CG vertex + coordinates via ``vecGetClosure``. + """ + gdim = dm.getCoordinateDim() + cStart, cEnd = dm.getHeightStratum(0) + cell_sec = dm.getCellCoordinateSection() + coord_sec = dm.getCoordinateSection() + coord_vec = dm.getCoordinatesLocal() + old_cell_vec = dm.getCellCoordinatesLocal() + + # Find dofs_per_cell from an existing cell entry + dofs_per_cell = None + for c in range(cStart, cEnd): + dof = cell_sec.getDof(c) + if dof > 0: + dofs_per_cell = dof + break + if dofs_per_cell is None: + return + + # Build new section and vector covering all cells + new_sec = PETSc.Section().create(comm=PETSc.COMM_SELF) + new_sec.setNumFields(1) + new_sec.setFieldComponents(0, gdim) + new_sec.setChart(cStart, cEnd) + for c in range(cStart, cEnd): + new_sec.setDof(c, dofs_per_cell) + new_sec.setFieldDof(c, 0, dofs_per_cell) + new_sec.setUp() + + new_vec = PETSc.Vec().create(comm=PETSc.COMM_SELF) + new_vec.setSizes((new_sec.getStorageSize(), PETSc.DETERMINE), gdim) + new_vec.setType(coord_vec.getType()) + + arr = new_vec.array + old_arr = old_cell_vec.array + for c in range(cStart, cEnd): + off = new_sec.getOffset(c) + old_dof = cell_sec.getDof(c) + if old_dof > 0: + old_off = cell_sec.getOffset(c) + arr[off:off + dofs_per_cell] = old_arr[old_off:old_off + old_dof] + else: + arr[off:off + dofs_per_cell] = dm.vecGetClosure( + coord_sec, coord_vec, c)[:dofs_per_cell] + + coord_dm = dm.getCoordinateDM() + dm.setCellCoordinateDM(coord_dm.clone()) + dm.setCellCoordinateSection(gdim, new_sec) + dm.setCellCoordinatesLocal(new_vec) + + def make_mesh_from_mesh_topology(topology, name, tolerance=0.5): - """Make mesh from tpology. + """Make mesh from topology. Parameters ---------- @@ -2897,19 +3186,27 @@ def make_mesh_from_mesh_topology(topology, name, tolerance=0.5): # Construct coordinate element # TODO: meshfile might indicates higher-order coordinate element cell = topology.ufl_cell() - geometric_dim = topology.topology_dm.getCoordinateDim() - if not topology.topology_dm.getCoordinatesLocalized(): + dm = topology.topology_dm + geometric_dim = dm.getCoordinateDim() + # For periodic meshes loaded from file (e.g. Gmsh), PETSc creates + # cell-local (DG) coordinates only for cells touching the periodic + # boundary (sparse localization). Firedrake needs every cell to + # have an entry, so we expand to full localization. + if dm.getCoordinatesLocalized(): + _fully_localize_coordinates(dm) + if not dm.getCoordinatesLocalized(): element = finat.ufl.VectorElement("Lagrange", cell, 1, dim=geometric_dim) else: element = finat.ufl.VectorElement("DQ" if cell in [ufl.quadrilateral, ufl.hexahedron] else "DG", cell, 1, dim=geometric_dim, variant="equispaced") - # Create mesh object - mesh = MeshGeometry.__new__(MeshGeometry, element, topology.comm) - mesh._init_topology(topology) + + coords = coordinates_from_topology(topology, element) + mesh = MeshGeometry(coords) mesh.name = name mesh._tolerance = tolerance return mesh +@PETSc.Log.EventDecorator() def make_vom_from_vom_topology(topology, name, tolerance=0.5): """Make `VertexOnlyMesh` from a mesh topology. @@ -2935,10 +3232,13 @@ def make_vom_from_vom_topology(topology, name, tolerance=0.5): gdim = topology.topology_dm.getCoordinateDim() cell = topology.ufl_cell() element = finat.ufl.VectorElement("DG", cell, 0, dim=gdim) - vmesh = MeshGeometry.__new__(MeshGeometry, element, topology.comm) - vmesh._init_topology(topology) + coords = coordinates_from_topology(topology, element) + vmesh = MeshGeometry(coords) + vmesh.name = name + vmesh._tolerance = tolerance + # Save vertex reference coordinate (within reference cell) in function - parent_tdim = topology._parent_mesh.ufl_cell().topological_dimension() + parent_tdim = topology._parent_mesh.ufl_cell().topological_dimension if parent_tdim > 0: reference_coordinates_fs = functionspace.VectorFunctionSpace(topology, "DG", 0, dim=parent_tdim) reference_coordinates_data = dmcommon.reordered_coords(topology.topology_dm, reference_coordinates_fs.dm.getDefaultSection(), @@ -2947,13 +3247,11 @@ def make_vom_from_vom_topology(topology, name, tolerance=0.5): reference_coordinates = function.CoordinatelessFunction(reference_coordinates_fs, val=reference_coordinates_data, name=_generate_default_mesh_reference_coordinates_name(name)) - refCoordV = functionspaceimpl.WithGeometry.create(reference_coordinates_fs, vmesh) + refCoordV = functionspaceimpl.WithGeometry(reference_coordinates_fs, vmesh) vmesh.reference_coordinates = function.Function(refCoordV, val=reference_coordinates) else: # We can't do this in 0D so leave it undefined. vmesh.reference_coordinates = None - vmesh.name = name - vmesh._tolerance = tolerance return vmesh @@ -3043,6 +3341,8 @@ def Mesh(meshfile, **kwargs): distribution_parameters = kwargs.get("distribution_parameters", None) if distribution_parameters is None: distribution_parameters = {} + if isinstance(meshfile, Path): + meshfile = str(meshfile) if isinstance(meshfile, str) and \ any(meshfile.lower().endswith(ext) for ext in ['.h5', '.hdf5']): from firedrake.output import CheckpointFile @@ -3063,6 +3363,8 @@ def Mesh(meshfile, **kwargs): utils._init() + from_netgen = netgen and isinstance(meshfile, netgen.libngpy._meshing.Mesh) + # We don't need to worry about using a user comm in these cases as # they all immediately call a petsc4py which in turn uses a PETSc # internal comm @@ -3071,16 +3373,15 @@ def Mesh(meshfile, **kwargs): plex = meshfile if MPI.Comm.Compare(user_comm, plex.comm.tompi4py()) not in {MPI.CONGRUENT, MPI.IDENT}: raise ValueError("Communicator used to create `plex` must be at least congruent to the communicator used to create the mesh") - elif netgen and isinstance(meshfile, netgen.libngpy._meshing.Mesh): - try: - from ngsPETSc import FiredrakeMesh - except ImportError: - raise ImportError("Unable to import ngsPETSc. Please ensure that ngsolve is installed and available to Firedrake.") + elif from_netgen: + from firedrake.netgen import FiredrakeMesh + petsctools.cite("Betteridge2024") netgen_flags = kwargs.get("netgen_flags", {"quad": False, "transform": None, "purify_to_tets": False}) netgen_firedrake_mesh = FiredrakeMesh(meshfile, netgen_flags, user_comm) plex = netgen_firedrake_mesh.meshMap.petscPlex plex.setName(_generate_default_mesh_topology_name(name)) + else: basename, ext = os.path.splitext(meshfile) if ext.lower() in ['.e', '.exo']: @@ -3109,11 +3410,35 @@ def Mesh(meshfile, **kwargs): permutation_name=kwargs.get("permutation_name"), submesh_parent=submesh_parent.topology if submesh_parent else None, comm=user_comm) - if netgen and isinstance(meshfile, netgen.libngpy._meshing.Mesh): - netgen_firedrake_mesh.createFromTopology(topology, name=name, comm=user_comm) - mesh = netgen_firedrake_mesh.firedrakeMesh - else: - mesh = make_mesh_from_mesh_topology(topology, name) + mesh = make_mesh_from_mesh_topology(topology, name) + + if from_netgen: + mesh.netgen_mesh = netgen_firedrake_mesh.meshMap.ngMesh + mesh.netgen_flags = netgen_flags + + # Curve the mesh, if requested + degree = netgen_flags.get("degree", 1) + if degree != 1: + permutation_tol = netgen_flags.get("permutation_tol", 1e-8) + cg = netgen_flags.get("cg", None) + coordinates = mesh.curve_field( + order=degree, + permutation_tol=permutation_tol, + cg_field=cg, + ) + # Do not redistribute the mesh + reorder_noop = None + temp = Mesh(coordinates, + reorder=reorder_noop, + perm_is=mesh._dm_renumbering, + distribution_parameters=DISTRIBUTION_PARAMETERS_NOOP, + comm=mesh.comm) + temp.netgen_mesh = mesh.netgen_mesh + temp.netgen_flags = mesh.netgen_flags + temp._distribution_parameters = mesh._distribution_parameters + temp._did_reordering = mesh._did_reordering + mesh = temp + mesh.submesh_parent = submesh_parent mesh._tolerance = tolerance return mesh @@ -3127,7 +3452,7 @@ def ExtrudedMesh(mesh, layers, layer_height=None, extrusion_type='uniform', peri :arg layers: number of extruded cell layers in the "vertical" direction. One may also pass an array of shape (cells, 2) to specify a variable number - of layers. In this case, each entry is a pair + of layers (deprecated). In this case, each entry is a pair ``[a, b]`` where ``a`` indicates the starting cell layer of the column and ``b`` the number of cell layers in that column. @@ -3191,6 +3516,13 @@ def ExtrudedMesh(mesh, layers, layer_height=None, extrusion_type='uniform', peri name = name if name is not None else mesh.name + "_extruded" layers = np.asarray(layers, dtype=IntType) if layers.shape: + warnings.warn( + "Variable layer extrusion is deprecated and will be removed " + "in the 2026.10.0 release. If possible we recommend using " + "Submesh instead. Please get in touch if this is a critical " + "issue for you.", + FutureWarning, + ) if periodic: raise ValueError("Must provide constant layer for periodic extrusion") if layers.shape != (mesh.cell_set.total_size, 2): @@ -3202,7 +3534,8 @@ def ExtrudedMesh(mesh, layers, layer_height=None, extrusion_type='uniform', peri # variable-height layers need to be present for the maximum number # of extruded layers num_layers = layers.sum(axis=1).max() if mesh.cell_set.total_size else 0 - num_layers = mesh._comm.allreduce(num_layers, op=MPI.MAX) + with temp_internal_comm(mesh.comm) as icomm: + num_layers = icomm.allreduce(num_layers, op=MPI.MAX) # Convert to internal representation layers[:, 1] += 1 + layers[:, 0] @@ -3229,7 +3562,7 @@ def ExtrudedMesh(mesh, layers, layer_height=None, extrusion_type='uniform', peri pass elif extrusion_type in ("radial", "radial_hedgehog"): # do not allow radial extrusion if tdim = gdim - if mesh.geometric_dimension() == mesh.topological_dimension(): + if mesh.geometric_dimension == mesh.topological_dimension: raise RuntimeError("Cannot radially-extrude a mesh with equal geometric and topological dimension") else: # check for kernel @@ -3249,7 +3582,7 @@ def ExtrudedMesh(mesh, layers, layer_height=None, extrusion_type='uniform', peri element = finat.ufl.TensorProductElement(helement, velement) if gdim is None: - gdim = mesh.geometric_dimension() + (extrusion_type == "uniform") + gdim = mesh.geometric_dimension + (extrusion_type == "uniform") coordinates_fs = functionspace.VectorFunctionSpace(topology, element, dim=gdim) coordinates = function.CoordinatelessFunction(coordinates_fs, name=_generate_default_mesh_coordinates_name(name)) @@ -3277,26 +3610,6 @@ class MissingPointsBehaviour(enum.Enum): WARN = "warn" -class VertexOnlyMeshMissingPointsError(Exception): - """Exception raised when 1 or more points are not found by a - :func:`~.VertexOnlyMesh` in its parent mesh. - - Attributes - ---------- - n_missing_points : int - The number of points which were not found in the parent mesh. - """ - - def __init__(self, n_missing_points): - self.n_missing_points = n_missing_points - - def __str__(self): - return ( - f"{self.n_missing_points} vertices are outside the mesh and have " - "been removed from the VertexOnlyMesh." - ) - - @PETSc.Log.EventDecorator() def VertexOnlyMesh(mesh, vertexcoords, reorder=None, missing_points_behaviour='error', tolerance=None, redundant=True, name=None): @@ -3360,7 +3673,7 @@ def VertexOnlyMesh(mesh, vertexcoords, reorder=None, missing_points_behaviour='e vertexcoords = np.asarray(vertexcoords, dtype=RealType) if reorder is None: reorder = parameters["reorder_meshes"] - gdim = mesh.geometric_dimension() + gdim = mesh.geometric_dimension _, pdim = vertexcoords.shape if not np.isclose(np.sum(abs(vertexcoords.imag)), 0): raise ValueError("Point coordinates must have zero imaginary part") @@ -3455,6 +3768,7 @@ def other_fields(self, fields): self._other_fields = fields +@PETSc.Log.EventDecorator() def _pic_swarm_in_mesh( parent_mesh, coords, @@ -3581,8 +3895,8 @@ def _pic_swarm_in_mesh( coords = np.asarray(coords, dtype=RealType) plex = parent_mesh.topology.topology_dm - tdim = parent_mesh.topological_dimension() - gdim = parent_mesh.geometric_dimension() + tdim = parent_mesh.topological_dimension + gdim = parent_mesh.geometric_dimension ( coords_local, @@ -3713,6 +4027,7 @@ def _pic_swarm_in_mesh( return swarm, original_ordering_swarm, n_missing_points +@PETSc.Log.EventDecorator() def _dmswarm_create( fields, comm, @@ -3956,45 +4271,7 @@ def _parent_extrusion_numbering(parent_cell_nums, parent_layers): return base_parent_cell_nums, extrusion_heights -def _mpi_array_lexicographic_min(x, y, datatype): - """MPI operator for lexicographic minimum of arrays. - - This compares two arrays of shape (N, 2) lexicographically, i.e. first - comparing the two arrays by their first column, returning the element-wise - minimum, with ties broken by comparing the second column element wise. - - Parameters - ---------- - x : ``np.ndarray`` - The first array to compare of shape (N, 2). - y : ``np.ndarray`` - The second array to compare of shape (N, 2). - datatype : ``MPI.Datatype`` - The datatype of the arrays. - - Returns - ------- - ``np.ndarray`` - The lexicographically lowest array of shape (N, 2). - - """ - # Check the first column - min_idxs = np.where(x[:, 0] < y[:, 0])[0] - result = np.copy(y) - result[min_idxs, :] = x[min_idxs, :] - - # if necessary, check the second column - eq_idxs = np.where(x[:, 0] == y[:, 0])[0] - if len(eq_idxs): - # We only check where we have equal values to avoid unnecessary work - min_idxs = np.where(x[eq_idxs, 1] < y[eq_idxs, 1])[0] - result[eq_idxs[min_idxs], :] = x[eq_idxs[min_idxs], :] - return result - - -array_lexicographic_mpi_op = MPI.Op.Create(_mpi_array_lexicographic_min, commute=True) - - +@PETSc.Log.EventDecorator() def _parent_mesh_embedding( parent_mesh, coords, tolerance, redundant, exclude_halos, remove_missing_points ): @@ -4075,80 +4352,57 @@ def _parent_mesh_embedding( "VertexOnlyMeshes don't have a working locate_cells_ref_coords_and_dists method" ) - import firedrake.functionspace as functionspace - import firedrake.constant as constant - import firedrake.interpolation as interpolation - import firedrake.assemble as assemble - - # In parallel, we need to make sure we know which point is which and save - # it. - if redundant: - # rank 0 broadcasts coords to all ranks - coords_local = parent_mesh._comm.bcast(coords, root=0) - ncoords_local = coords_local.shape[0] - coords_global = coords_local - ncoords_global = coords_global.shape[0] - global_idxs_global = np.arange(coords_global.shape[0]) - input_coords_idxs_local = np.arange(ncoords_local) - input_coords_idxs_global = input_coords_idxs_local - input_ranks_local = np.zeros(ncoords_local, dtype=int) - input_ranks_global = input_ranks_local - else: - # Here, we have to assume that all points we can see are unique. - # We therefore gather all points on all ranks in rank order: if rank 0 - # has 10 points, rank 1 has 20 points, and rank 3 has 5 points, then - # rank 0's points have global numbering 0-9, rank 1's points have - # global numbering 10-29, and rank 3's points have global numbering - # 30-34. - coords_local = coords - ncoords_local = coords.shape[0] - ncoords_local_allranks = parent_mesh._comm.allgather(ncoords_local) - ncoords_global = sum(ncoords_local_allranks) - # The below code looks complicated but it's just an allgather of the - # (variable length) coords_local array such that they are concatenated. - coords_local_size = np.array(coords_local.size) - coords_local_sizes = np.empty(parent_mesh._comm.size, dtype=int) - parent_mesh._comm.Allgatherv(coords_local_size, coords_local_sizes) - coords_global = np.empty( - (ncoords_global, coords.shape[1]), dtype=coords_local.dtype - ) - parent_mesh._comm.Allgatherv(coords_local, (coords_global, coords_local_sizes)) - # # ncoords_local_allranks is in rank order so we can just sum up the - # # previous ranks to get the starting index for the global numbering. - # # For rank 0 we make use of the fact that sum([]) = 0. - # startidx = sum(ncoords_local_allranks[:parent_mesh._comm.rank]) - # endidx = startidx + ncoords_local - # global_idxs_global = np.arange(startidx, endidx) - global_idxs_global = np.arange(coords_global.shape[0]) - input_coords_idxs_local = np.arange(ncoords_local) - input_coords_idxs_global = np.empty(ncoords_global, dtype=int) - parent_mesh._comm.Allgatherv( - input_coords_idxs_local, (input_coords_idxs_global, ncoords_local_allranks) - ) - input_ranks_local = np.full(ncoords_local, parent_mesh._comm.rank, dtype=int) - input_ranks_global = np.empty(ncoords_global, dtype=int) - parent_mesh._comm.Allgatherv( - input_ranks_local, (input_ranks_global, ncoords_local_allranks) - ) - - # Get parent mesh rank ownership information: - # Interpolating Constant(parent_mesh.comm.rank) into P0DG cleverly creates - # a Function whose dat contains rank ownership information in an ordering - # that is accessible using Firedrake's cell numbering. This is because, on - # each rank, parent_mesh.comm.rank creates a Constant with the local rank - # number, and halo exchange ensures that this information is visible, as - # nessesary, to other processes. - P0DG = functionspace.FunctionSpace(parent_mesh, "DG", 0) - with stop_annotating(): - visible_ranks = interpolation.Interpolate( - constant.Constant(parent_mesh.comm.rank), P0DG - ) - visible_ranks = assemble(visible_ranks).dat.data_ro_with_halos.real - - locally_visible = np.full(ncoords_global, False) - # See below for why np.inf is used here. - ranks = np.full(ncoords_global, np.inf) - + with temp_internal_comm(parent_mesh.comm) as icomm: + # In parallel, we need to make sure we know which point is which and save + # it. + if redundant: + # rank 0 broadcasts coords to all ranks + coords_local = icomm.bcast(coords, root=0) + ncoords_local = coords_local.shape[0] + coords_global = coords_local + ncoords_global = coords_global.shape[0] + global_idxs_global = np.arange(coords_global.shape[0]) + input_coords_idxs_local = np.arange(ncoords_local) + input_coords_idxs_global = input_coords_idxs_local + input_ranks_local = np.zeros(ncoords_local, dtype=int) + input_ranks_global = input_ranks_local + else: + # Here, we have to assume that all points we can see are unique. + # We therefore gather all points on all ranks in rank order: if rank 0 + # has 10 points, rank 1 has 20 points, and rank 3 has 5 points, then + # rank 0's points have global numbering 0-9, rank 1's points have + # global numbering 10-29, and rank 3's points have global numbering + # 30-34. + coords_local = coords + ncoords_local = coords.shape[0] + ncoords_local_allranks = icomm.allgather(ncoords_local) + ncoords_global = sum(ncoords_local_allranks) + # The below code looks complicated but it's just an allgather of the + # (variable length) coords_local array such that they are concatenated. + coords_local_size = np.array(coords_local.size) + coords_local_sizes = np.empty(parent_mesh.comm.size, dtype=int) + icomm.Allgatherv(coords_local_size, coords_local_sizes) + coords_global = np.empty( + (ncoords_global, coords.shape[1]), dtype=coords_local.dtype + ) + icomm.Allgatherv(coords_local, (coords_global, coords_local_sizes)) + # # ncoords_local_allranks is in rank order so we can just sum up the + # # previous ranks to get the starting index for the global numbering. + # # For rank 0 we make use of the fact that sum([]) = 0. + # startidx = sum(ncoords_local_allranks[:parent_mesh.comm.rank]) + # endidx = startidx + ncoords_local + # global_idxs_global = np.arange(startidx, endidx) + global_idxs_global = np.arange(coords_global.shape[0]) + input_coords_idxs_local = np.arange(ncoords_local) + input_coords_idxs_global = np.empty(ncoords_global, dtype=int) + icomm.Allgatherv( + input_coords_idxs_local, (input_coords_idxs_global, ncoords_local_allranks) + ) + input_ranks_local = np.full(ncoords_local, icomm.rank, dtype=int) + input_ranks_global = np.empty(ncoords_global, dtype=int) + icomm.Allgatherv( + input_ranks_local, (input_ranks_global, ncoords_local_allranks) + ) ( parent_cell_nums, reference_coords, @@ -4158,48 +4412,48 @@ def _parent_mesh_embedding( assert len(reference_coords) == ncoords_global assert len(ref_cell_dists_l1) == ncoords_global - if parent_mesh.geometric_dimension() > parent_mesh.topological_dimension(): + if parent_mesh.geometric_dimension > parent_mesh.topological_dimension: # The reference coordinates contain an extra unnecessary dimension # which we can safely delete - reference_coords = reference_coords[:, : parent_mesh.topological_dimension()] - - locally_visible[:] = parent_cell_nums != -1 - ranks[locally_visible] = visible_ranks[parent_cell_nums[locally_visible]] - # see below for why np.inf is used here. - ref_cell_dists_l1[~locally_visible] = np.inf + reference_coords = reference_coords[:, : parent_mesh.topological_dimension] + + # Get parent mesh rank ownership information. + visible_ranks = np.empty(parent_mesh.cell_set.total_size, dtype=IntType) + visible_ranks[:parent_mesh.cell_set.size] = parent_mesh.comm.rank + visible_ranks[parent_mesh.cell_set.size:] = -1 + # Halo exchange the visible ranks so that each rank knows which ranks can see each cell. + dmcommon.exchange_cell_orientations( + parent_mesh.topology.topology_dm, parent_mesh.topology._cell_numbering, visible_ranks + ) + locally_visible = parent_cell_nums != -1 - # ensure that points which a rank thinks it owns are always chosen in a tie - # break by setting the rank to be negative. If multiple ranks think they - # own a point then the one with the highest rank will be chosen. - on_this_rank = ranks == parent_mesh.comm.rank - ranks[on_this_rank] = -parent_mesh.comm.rank - ref_cell_dists_l1_and_ranks = np.stack((ref_cell_dists_l1, ranks), axis=1) + if parent_mesh.extruded: + # Halo exchange of visible_ranks is over the base mesh topology and cell numbering, + # so we need to map back to extruded cell numbering after indexing parent_cell_nums. + locally_visible_cell_nums = parent_cell_nums[locally_visible] // (parent_mesh.layers - 1) + else: + locally_visible_cell_nums = parent_cell_nums[locally_visible] # In parallel there will regularly be disagreements about which cell owns a # point when those points are close to mesh partition boundaries. - # We now have the reference cell l1 distance and ranks being np.inf for any - # point which is not locally visible. By collectively taking the minimum - # of the reference cell l1 distance, which is tied to the rank via - # ref_cell_dists_l1_and_ranks, we both check which cell the coordinate is - # closest to and find out which rank owns that cell. - # In cases where the reference cell l1 distance is the same for a - # particular coordinate, we break the tie by choosing the lowest rank. - # This turns out to be a lexicographic row-wise minimum of the - # ref_cell_dists_l1_and_ranks array: we minimise the distance first and - # break ties by choosing the lowest rank. - owned_ref_cell_dists_l1_and_ranks = parent_mesh.comm.allreduce( - ref_cell_dists_l1_and_ranks, op=array_lexicographic_mpi_op - ) - - # switch ranks back to positive - owned_ref_cell_dists_l1_and_ranks[:, 1] = np.abs( - owned_ref_cell_dists_l1_and_ranks[:, 1] - ) - ref_cell_dists_l1_and_ranks[:, 1] = np.abs(ref_cell_dists_l1_and_ranks[:, 1]) - ranks = np.abs(ranks) + # We first set the owning cell to be the one with the minimum L1 distance to the point. + # In the case of ties, we pick the highest rank number. - owned_ref_cell_dists_l1 = owned_ref_cell_dists_l1_and_ranks[:, 0] - owned_ranks = owned_ref_cell_dists_l1_and_ranks[:, 1] + # Set non-visible L1 distance to np.inf so they don't interfere with the MPI.MIN reduction. + ref_cell_dists_l1[~locally_visible] = np.inf + owned_ref_cell_dists_l1 = np.empty_like(ref_cell_dists_l1) + # The owning cell is the one with the minimum L1 distance to the point. + parent_mesh.comm.Allreduce(ref_cell_dists_l1, owned_ref_cell_dists_l1, op=MPI.MIN) + + # Only ranks that achieved the global minimum distance are candidates for + # ownership. Among tied candidates (same minimum distance) we pick the + # highest rank number using MPI.MAX. Non-visible points are set to -np.inf + # so they don't interfere with the MAX reduction. + ranks = np.full(ncoords_global, -np.inf) + ranks[locally_visible] = visible_ranks[locally_visible_cell_nums] + rank_candidates = np.where(ref_cell_dists_l1 == owned_ref_cell_dists_l1, ranks, -np.inf) + owned_ranks = np.empty_like(rank_candidates) + parent_mesh.comm.Allreduce(rank_candidates, owned_ranks, op=MPI.MAX) changed_ref_cell_dists_l1 = owned_ref_cell_dists_l1 != ref_cell_dists_l1 changed_ranks = owned_ranks != ranks @@ -4228,8 +4482,8 @@ def _parent_mesh_embedding( cells_ignore=cells_ignore_T.T[changed_ranks_tied, :], ) # delete extra dimension if necessary - if parent_mesh.geometric_dimension() > parent_mesh.topological_dimension(): - new_reference_coords = new_reference_coords[:, : parent_mesh.topological_dimension()] + if parent_mesh.geometric_dimension > parent_mesh.topological_dimension: + new_reference_coords = new_reference_coords[:, : parent_mesh.topological_dimension] reference_coords[changed_ranks_tied, :] = new_reference_coords # remove newly lost points locally_visible[changed_ranks_tied] = ( @@ -4244,9 +4498,11 @@ def _parent_mesh_embedding( ) changed_ranks_tied &= locally_visible # update the identified rank - ranks[changed_ranks_tied] = visible_ranks[ - parent_cell_nums[changed_ranks_tied] - ] + if parent_mesh.extruded: + _retry_cell_nums = parent_cell_nums[changed_ranks_tied] // (parent_mesh.layers - 1) + else: + _retry_cell_nums = parent_cell_nums[changed_ranks_tied] + ranks[changed_ranks_tied] = visible_ranks[_retry_cell_nums] # if the rank now matches then we have found the correct cell locally_visible[changed_ranks_tied] &= ( owned_ranks[changed_ranks_tied] == ranks[changed_ranks_tied] @@ -4259,12 +4515,12 @@ def _parent_mesh_embedding( parent_cell_nums) ) - # Any ranks which are still np.inf are not in the mesh - missing_global_idxs = np.where(owned_ranks == np.inf)[0] + # Any ranks which are still -np.inf are not in the mesh + missing_global_idxs = np.where(owned_ranks == -np.inf)[0] if not remove_missing_points: missing_coords_idxs_on_rank = np.where( - (owned_ranks == np.inf) & (input_ranks_global == parent_mesh.comm.rank) + (owned_ranks == -np.inf) & (input_ranks_global == parent_mesh.comm.rank) )[0] locally_visible[missing_coords_idxs_on_rank] = True parent_cell_nums[missing_coords_idxs_on_rank] = -1 @@ -4298,6 +4554,7 @@ def _parent_mesh_embedding( ) +@PETSc.Log.EventDecorator() def _swarm_original_ordering_preserve( comm, swarm, @@ -4543,9 +4800,9 @@ def RelabeledMesh(mesh, indicator_functions, subdomain_ids, **kwargs): # cells height = 0 dmlabel_name = dmcommon.CELL_SETS_LABEL - elif (elem.family() == "HDiv Trace" and elem.degree() == 0 and mesh.topological_dimension() > 1) or \ - (elem.family() == "Lagrange" and elem.degree() == 1 and mesh.topological_dimension() == 1) or \ - (elem.family() == "Q" and elem.degree() == 2 and mesh.topology.ufl_cell().cellname() == "hexahedron"): + elif (elem.family() == "HDiv Trace" and elem.degree() == 0 and mesh.topological_dimension > 1) or \ + (elem.family() == "Lagrange" and elem.degree() == 1 and mesh.topological_dimension == 1) or \ + (elem.family() == "Q" and elem.degree() == 2 and mesh.topology.ufl_cell().cellname == "hexahedron"): # facets height = 1 dmlabel_name = dmcommon.FACE_SETS_LABEL @@ -4556,16 +4813,30 @@ def RelabeledMesh(mesh, indicator_functions, subdomain_ids, **kwargs): dmlabel = plex1.getLabel(dmlabel_name) section = f.topological.function_space().dm.getSection() dmcommon.mark_points_with_function_array(plex, section, height, f.dat.data_ro_with_halos.real.astype(IntType), dmlabel, subid) - distribution_parameters_noop = {"partition": False, - "overlap_type": (DistributedMeshOverlapType.NONE, 0)} reorder_noop = None tmesh1 = MeshTopology(plex1, name=plex1.getName(), reorder=reorder_noop, - distribution_parameters=distribution_parameters_noop, + distribution_parameters=DISTRIBUTION_PARAMETERS_NOOP, perm_is=tmesh._dm_renumbering, distribution_name=tmesh._distribution_name, permutation_name=tmesh._permutation_name, comm=tmesh.comm) - return make_mesh_from_mesh_topology(tmesh1, name1) + + # Create a new coordinates function with the same values as before but + # living on the new topology + coordinates_fs = mesh.coordinates.function_space().reconstruct(mesh=tmesh1) + relabeled_coordinates = function.CoordinatelessFunction( + coordinates_fs, + val=mesh.coordinates.dat.data_ro_with_halos, + name=_generate_default_mesh_coordinates_name(tmesh1.name), + ) + rmesh = MeshGeometry(relabeled_coordinates) + rmesh.name = name1 + rmesh._tolerance = mesh.tolerance + + # Tag the relabeled mesh with the original distribution parameters + rmesh._distribution_parameters = mesh._distribution_parameters + rmesh._did_reordering = mesh._did_reordering + return rmesh @PETSc.Log.EventDecorator() @@ -4597,21 +4868,35 @@ def SubDomainData(geometric_expr): return op2.Subset(m.cell_set, indices) -def Submesh(mesh, subdim, subdomain_id, label_name=None, name=None): +def Submesh(mesh, subdim=None, subdomain_id=None, label_name=None, name=None, ignore_halo=False, reorder=None, comm=None): """Construct a submesh from a given mesh. Parameters ---------- mesh : MeshGeometry Parent mesh (`MeshGeometry`). - subdim : int + subdim : int | None Topological dimension of the submesh. - subdomain_id : int + Defaults to ``mesh.topological_dimension``. + subdomain_id : int | None Subdomain ID representing the submesh. - label_name : str + If `None` the submesh will cover the entire domain. + This is useful to obtain a codim-1 submesh over all facets or + a submesh over a different communicator. + label_name : str | None Name of the label to search ``subdomain_id`` in. - name : str + Defaults to ``'Cell Sets'`` or ``'Face Sets'`` depending on ``subdim``. + name : str | None Name of the submesh. + Defaults to ``mesh.name + "_submesh"``· + ignore_halo : bool + Whether to exclude the halo from the submesh. + reorder : bool | None + Whether to reorder the mesh entities. By default, + the submesh will be reordered if the parent mesh was reordered. + comm : PETSc.Comm | None + An optional sub-communicator to define the submesh. + By default, the submesh is defined on `mesh.comm`. Returns ------- @@ -4646,27 +4931,252 @@ def Submesh(mesh, subdim, subdomain_id, label_name=None, name=None): raise NotImplementedError("Can not create a submesh of an ``ExtrudedMesh``") elif isinstance(mesh.topology, VertexOnlyMeshTopology): raise NotImplementedError("Can not create a submesh of a ``VertexOnlyMesh``") + if subdim is None: + subdim = mesh.topological_dimension plex = mesh.topology_dm dim = plex.getDimension() - if subdim not in [dim, dim - 1]: + if subdim not in {dim, dim - 1}: raise NotImplementedError(f"Found submesh dim ({subdim}) and parent dim ({dim})") - if label_name is None: + if subdomain_id is None: + if label_name is not None: + raise ValueError("subdomain_id=None requires label_name=None.") + # Select all entities + label_name = "depth" + subdomain_id = subdim + elif label_name is None: if subdim == dim: label_name = dmcommon.CELL_SETS_LABEL elif subdim == dim - 1: label_name = dmcommon.FACE_SETS_LABEL + subplex = dmcommon.submesh_create(plex, subdim, label_name, subdomain_id, ignore_halo, comm=comm) + + comm = comm or mesh.comm name = name or _generate_default_submesh_name(mesh.name) - subplex = dmcommon.submesh_create(plex, subdim, label_name, subdomain_id, False) subplex.setName(_generate_default_mesh_topology_name(name)) if subplex.getDimension() != subdim: raise RuntimeError(f"Found subplex dim ({subplex.getDimension()}) != expected ({subdim})") + if reorder is None: + # Ideally we should set perm_is = mesh.dm_reordering[label_indices] + reorder = mesh._did_reordering + submesh = Mesh( subplex, submesh_parent=mesh, name=name, - distribution_parameters={ - "partition": False, - "overlap_type": (DistributedMeshOverlapType.NONE, 0), - }, + comm=comm, + reorder=reorder, + distribution_parameters=DISTRIBUTION_PARAMETERS_NOOP, ) + # Tag the relabeled mesh with the original distribution parameters + submesh._distribution_parameters = mesh._distribution_parameters return submesh + + +def coordinates_from_topology(topology: AbstractMeshTopology, element: finat.ufl.FiniteElement) -> "CoordinatelessFunction": + """Convert DMPlex coordinates into Firedrake coordinates. + + Parameters + ---------- + topology : + The mesh topology. + element : + The finite element defining the coordinate function space. + + Returns + ------- + CoordinatelessFunction : + The coordinates of the DMPlex reordered to agree with Firedrake's + element numbering. + + """ + import firedrake.functionspace as functionspace + import firedrake.function as function + + if not isinstance(topology, ExtrudedMeshTopology) and len(topology.dm_cell_types) > 1: + return _MultiCellTypeDummyCoordinates(topology, element) + + (gdim,) = element.reference_value_shape + coordinates_fs = functionspace.FunctionSpace(topology, element) + coordinates_data = dmcommon.reordered_coords(topology.topology_dm, coordinates_fs.dm.getDefaultSection(), + (topology.num_vertices(), gdim)) + return function.CoordinatelessFunction(coordinates_fs, + val=coordinates_data, + name=_generate_default_mesh_coordinates_name(topology.name)) + + +class MeshSequenceGeometry(ufl.MeshSequence): + """A representation of mixed mesh geometry.""" + + def __init__(self, meshes, set_hierarchy=True): + """Initialise. + + Parameters + ---------- + meshes : tuple or list + `MeshGeometry`s to make `MeshSequenceGeometry` with. + set_hierarchy : bool + Flag for making hierarchy. + + """ + for m in meshes: + if not isinstance(m, MeshGeometry): + raise ValueError(f"Got {type(m)}") + super().__init__(meshes) + self.comm = meshes[0].comm + # Only set hierarchy at top level. + if set_hierarchy: + self.set_hierarchy() + + @cached_property + def topology(self): + return MeshSequenceTopology([m.topology for m in self._meshes]) + + @property + def topological(self): + """Alias of topology. + + This is to ensure consistent naming for some multigrid codes.""" + return self.topology + + def __eq__(self, other): + if type(other) != type(self): + return False + if len(other) != len(self): + return False + for o, s in zip(other, self): + if o is not s: + return False + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self._meshes) + + def __len__(self): + return len(self._meshes) + + def __iter__(self): + return iter(self._meshes) + + def __getitem__(self, i): + return self._meshes[i] + + @cached_property + def extruded(self): + m = self.unique() + return m.extruded + + def unique(self): + """Return a single component or raise exception.""" + if len(set(self._meshes)) > 1: + raise NonUniqueMeshSequenceError(f"Found multiple meshes in {self} where a single mesh is expected") + m, = set(self._meshes) + return m + + def set_hierarchy(self): + """Set mesh hierarchy if needed.""" + from firedrake.mg.utils import set_level, get_level, has_level + + # TODO: Think harder on how mesh hierarchy should work with mixed meshes. + if all(not has_level(m) for m in self._meshes): + return + else: + if not all(has_level(m) for m in self._meshes): + raise RuntimeError("Found inconsistent component meshes") + hierarchy_list = [] + level_list = [] + for m in self: + hierarchy, level = get_level(m) + hierarchy_list.append(hierarchy) + level_list.append(level) + nlevels, = set(len(hierarchy) for hierarchy in hierarchy_list) + level, = set(level_list) + result = [] + for ilevel in range(nlevels): + if ilevel == level: + result.append(self) + else: + result.append(MeshSequenceGeometry([hierarchy[ilevel] for hierarchy in hierarchy_list], set_hierarchy=False)) + result = tuple(result) + for i, m in enumerate(result): + set_level(m, result, i) + + +class MeshSequenceTopology: + """A representation of mixed mesh topology.""" + + def __init__(self, meshes): + """Initialise. + + Parameters + ---------- + meshes : tuple or list + `MeshTopology`s to make `MeshSequenceTopology` with. + + """ + for m in meshes: + if not isinstance(m, AbstractMeshTopology): + raise ValueError(f"Got {type(m)}") + self._meshes = tuple(meshes) + self.comm = meshes[0].comm + + @property + def topology(self): + """The underlying mesh topology object.""" + return self + + @property + def topological(self): + """Alias of topology. + + This is to ensure consistent naming for some multigrid codes.""" + return self + + def ufl_cell(self): + return CellSequence([m.ufl_cell() for m in self._meshes]) + + def ufl_mesh(self): + dim = self.ufl_cell().topological_dimension + return ufl.MeshSequence( + [ufl.Mesh(finat.ufl.VectorElement("Lagrange", cell, 1, dim=dim)) + for cell in self.ufl_cell().cells] + ) + + def __eq__(self, other): + if type(other) != type(self): + return False + if len(other) != len(self): + return False + for o, s in zip(other, self): + if o is not s: + return False + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self._meshes) + + def __len__(self): + return len(self._meshes) + + def __iter__(self): + return iter(self._meshes) + + def __getitem__(self, i): + return self._meshes[i] + + @cached_property + def extruded(self): + m = self.unique() + return m.extruded + + def unique(self): + """Return a single component or raise exception.""" + if len(set(self._meshes)) > 1: + raise NonUniqueMeshSequenceError(f"Found multiple meshes in {self} where a single mesh is expected") + m, = set(self._meshes) + return m diff --git a/firedrake/mg/__init__.py b/firedrake/mg/__init__.py index f5e043c3d1..12f8ec7c36 100644 --- a/firedrake/mg/__init__.py +++ b/firedrake/mg/__init__.py @@ -1,4 +1,11 @@ -from .mesh import * # noqa: F401 -from .interface import * # noqa: F401 -from .embedded import * # noqa: F401 -from .opencascade_mh import * # noqa: F401 +from firedrake.mg.mesh import ( # noqa F401 + HierarchyBase, MeshHierarchy, ExtrudedMeshHierarchy, + NonNestedHierarchy, SemiCoarsenedExtrudedHierarchy +) +from firedrake.mg.interface import ( # noqa F401 + prolong, restrict, inject +) +from firedrake.mg.embedded import TransferManager # noqa F401 +from firedrake.mg.opencascade_mh import OpenCascadeMeshHierarchy # noqa F401 +from firedrake.mg.adaptive_hierarchy import AdaptiveMeshHierarchy # noqa F401 +from firedrake.mg.adaptive_transfer_manager import AdaptiveTransferManager # noqa: F401 diff --git a/firedrake/mg/adaptive_hierarchy.py b/firedrake/mg/adaptive_hierarchy.py new file mode 100644 index 0000000000..fc6c2f728a --- /dev/null +++ b/firedrake/mg/adaptive_hierarchy.py @@ -0,0 +1,85 @@ +from firedrake.mesh import MeshGeometry +from firedrake.cofunction import Cofunction +from firedrake.function import Function +from firedrake.mg import HierarchyBase +from firedrake.mg.utils import set_level + +__all__ = ["AdaptiveMeshHierarchy"] + + +class AdaptiveMeshHierarchy(HierarchyBase): + """ + HierarchyBase for hierarchies of adaptively refined meshes. + + Parameters + ---------- + base_mesh + The coarsest mesh in the hierarchy. + nested: bool + A flag to indicate whether the meshes are nested. + + """ + def __init__(self, base_mesh: MeshGeometry, nested: bool = True): + self.meshes = [] + self._meshes = [] + self.nested = nested + self.add_mesh(base_mesh) + + def add_mesh(self, mesh: MeshGeometry): + """ + Adds a mesh into the hierarchy. + + Parameters + ---------- + mesh + The mesh to be added to the finest level. + """ + level = len(self.meshes) + self._meshes.append(mesh) + self.meshes.append(mesh) + set_level(mesh, self, level) + + def adapt(self, eta: Function | Cofunction, theta: float): + """ + Adds a new mesh to the hierarchy by locally refining the finest mesh + with a simplified variant of Dorfler marking. The finest mesh must + come from a netgen mesh. + + Parameters + ---------- + eta + A DG0 :class:`~firedrake.function.Function` with the local error estimator. + theta + The threshold for marking as a fraction of the maximum error. + + Note + ---- + Dorfler marking involves sorting all of the elements by decreasing + error estimator and taking the minimal set that exceeds some fixed + fraction of the total error. What this code implements is the simpler + variant that doesn't have a proof of convergence (as far as I know) + but works as well in practice. + + """ + if not isinstance(eta, (Function, Cofunction)): + raise TypeError(f"eta must be a Function or Cofunction, not a {type(eta).__name__}") + M = eta.function_space() + if M.finat_element.space_dimension() != 1: + raise ValueError("eta must be a Function or Cofunction in DG0") + mesh = self.meshes[-1] + if M.mesh() is not mesh: + raise ValueError("eta must be defined on the finest mesh of the hierarchy") + + # Take the maximum over all processes + with eta.dat.vec_ro as evec: + _, eta_max = evec.max() + + threshold = theta * eta_max + should_refine = eta.dat.data_ro > threshold + + markers = Function(M) + markers.dat.data_wo[should_refine] = 1 + + refined_mesh = mesh.refine_marked_elements(markers) + self.add_mesh(refined_mesh) + return refined_mesh diff --git a/firedrake/mg/adaptive_transfer_manager.py b/firedrake/mg/adaptive_transfer_manager.py new file mode 100644 index 0000000000..149906b845 --- /dev/null +++ b/firedrake/mg/adaptive_transfer_manager.py @@ -0,0 +1,55 @@ +""" +This module contains the AdaptiveTransferManager used to perform +transfer operations on AdaptiveMeshHierarchies +""" +from firedrake.mg.embedded import TransferManager +from firedrake.ufl_expr import action, TrialFunction +from firedrake.interpolation import interpolate + + +__all__ = ("AdaptiveTransferManager",) + + +class AdaptiveTransferManager(TransferManager): + """ + TransferManager for adaptively refined mesh hierarchies + """ + def __init__(self, *, native_transfers=None, use_averaging=True): + if native_transfers is not None: + raise NotImplementedError("Custom transfers not implemented.") + super().__init__(native_transfers=native_transfers, use_averaging=use_averaging) + self.cache = {} + + def get_interpolator(self, Vc, Vf): + from firedrake.assemble import assemble + key = (Vc, Vf) + try: + return self.cache[key] + except KeyError: + Iexpr = interpolate(TrialFunction(Vc), Vf) + # TODO reusable matfree Interpolator + I = assemble(Iexpr, mat_type="aij") + return self.cache.setdefault(key, I) + + def forward(self, uc, uf): + from firedrake.assemble import assemble + Vc = uc.function_space() + Vf = uf.function_space() + I = self.get_interpolator(Vc, Vf) + return assemble(action(I, uc), tensor=uf) + + def adjoint(self, uf, uc): + from firedrake.assemble import assemble + Vc = uc.function_space().dual() + Vf = uf.function_space().dual() + I = self.get_interpolator(Vc, Vf) + return assemble(action(uf, I), tensor=uc) + + def prolong(self, uf, uc): + return self.forward(uf, uc) + + def inject(self, uc, uf): + return self.forward(uc, uf) + + def restrict(self, uc, uf): + return self.adjoint(uc, uf) diff --git a/firedrake/mg/embedded.py b/firedrake/mg/embedded.py index 1b20432fdb..a78bcc4975 100644 --- a/firedrake/mg/embedded.py +++ b/firedrake/mg/embedded.py @@ -5,30 +5,11 @@ from enum import IntEnum from firedrake.petsc import PETSc from firedrake.embedding import get_embedding_dg_element - +from finat.element_factory import create_element __all__ = ("TransferManager", ) -native_families = frozenset(["Lagrange", "Discontinuous Lagrange", "Real", "Q", "DQ", "BrokenElement", "Crouzeix-Raviart", "Kong-Mulder-Veldhuizen"]) -alfeld_families = frozenset(["Hsieh-Clough-Tocher", "Reduced-Hsieh-Clough-Tocher", "Johnson-Mercier", - "Alfeld-Sorokina", "Arnold-Qin", "Reduced-Arnold-Qin", "Christiansen-Hu", - "Guzman-Neilan", "Guzman-Neilan Bubble"]) -non_native_variants = frozenset(["integral", "fdm", "alfeld"]) - - -def get_embedding_element(element, value_shape): - broken_cg = element.sobolev_space in {ufl.H1, ufl.H2} - dg_element = get_embedding_dg_element(element, value_shape, broken_cg=broken_cg) - variant = element.variant() or "default" - family = element.family() - # Elements on Alfeld splits are embedded onto DG Powell-Sabin. - # This yields supermesh projection - if (family in alfeld_families) or ("alfeld" in variant.lower() and family != "Discontinuous Lagrange"): - dg_element = dg_element.reconstruct(variant="powell-sabin") - return dg_element - - class Op(IntEnum): PROLONG = 0 RESTRICT = 1 @@ -68,14 +49,21 @@ def __init__(self, *, native_transfers=None, use_averaging=True): self.caches = {} def is_native(self, element, op): - if element in self.native_transfers.keys(): + if element in self.native_transfers: return self.native_transfers[element][op] is not None if isinstance(element.cell, ufl.TensorProductCell): if isinstance(element, finat.ufl.TensorProductElement): return all(self.is_native(e, op) for e in element.factor_elements) elif isinstance(element, finat.ufl.MixedElement): return all(self.is_native(e, op) for e in element.sub_elements) - return (element.family() in native_families) and not (element.variant() in non_native_variants) + + # Can we interpolate into this element? + finat_element = create_element(element) + try: + finat_element.dual_basis + return True + except NotImplementedError: + return False def _native_transfer(self, element, op): try: @@ -183,7 +171,7 @@ def V_inv_mass_ksp(self, V): except KeyError: M = firedrake.assemble(firedrake.inner(firedrake.TrialFunction(V), firedrake.TestFunction(V))*firedrake.dx) - ksp = PETSc.KSP().create(comm=V._comm) + ksp = PETSc.KSP().create(comm=V.comm) ksp.setOperators(M.petscmat) ksp.setOptionsPrefix("{}_prolongation_mass_".format(V.ufl_element()._short_name)) ksp.setType("preonly") @@ -253,8 +241,8 @@ def op(self, source, target, transfer_op): if not self.requires_transfer(Vs, transfer_op, source, target): return - if all(self.is_native(e, transfer_op) for e in (source_element, target_element)): - self._native_transfer(source_element, transfer_op)(source, target) + if self.is_native(target_element, transfer_op): + self._native_transfer(target_element, transfer_op)(source, target) elif type(source_element) is finat.ufl.MixedElement: assert type(target_element) is finat.ufl.MixedElement for source_, target_ in zip(source.subfunctions, target.subfunctions): @@ -318,7 +306,7 @@ def restrict(self, source, target): if not self.requires_transfer(Vs_star, Op.RESTRICT, source, target): return - if all(self.is_native(e, Op.RESTRICT) for e in (source_element, target_element)): + if self.is_native(source_element, Op.RESTRICT): self._native_transfer(source_element, Op.RESTRICT)(source, target) elif type(source_element) is finat.ufl.MixedElement: assert type(target_element) is finat.ufl.MixedElement diff --git a/firedrake/mg/interface.py b/firedrake/mg/interface.py index 22faeb3e8d..a700b7eacc 100644 --- a/firedrake/mg/interface.py +++ b/firedrake/mg/interface.py @@ -1,7 +1,8 @@ from pyop2 import op2 -import firedrake -from firedrake import ufl_expr +from firedrake import ufl_expr, dmhooks +from firedrake.function import Function +from firedrake.cofunction import Cofunction from firedrake.petsc import PETSc from ufl.duals import is_dual from . import utils @@ -13,10 +14,10 @@ def check_arguments(coarse, fine, needs_dual=False): if is_dual(coarse) != needs_dual: - expected_type = firedrake.Cofunction if needs_dual else firedrake.Function + expected_type = Cofunction if needs_dual else Function raise TypeError("Coarse argument is a %s, not a %s" % (type(coarse).__name__, expected_type.__name__)) if is_dual(fine) != needs_dual: - expected_type = firedrake.Cofunction if needs_dual else firedrake.Function + expected_type = Cofunction if needs_dual else Function raise TypeError("Fine argument is a %s, not a %s" % (type(fine).__name__, expected_type.__name__)) cfs = coarse.function_space() ffs = fine.function_space() @@ -41,7 +42,7 @@ def prolong(coarse, fine): if len(Vc) != len(Vf): raise ValueError("Mixed spaces have different lengths") for in_, out in zip(coarse.subfunctions, fine.subfunctions): - manager = firedrake.dmhooks.get_transfer_manager(in_.function_space().dm) + manager = dmhooks.get_transfer_manager(in_.function_space().dm) manager.prolong(in_, out) return fine @@ -58,20 +59,26 @@ def prolong(coarse, fine): repeat = (fine_level - coarse_level)*refinements_per_level next_level = coarse_level * refinements_per_level + if needs_quadrature := not Vf.finat_element.has_pointwise_dual_basis: + # Introduce an intermediate quadrature target space + Vf = Vf.quadrature_space() + + finest = fine + Vfinest = finest.function_space() meshes = hierarchy._meshes for j in range(repeat): next_level += 1 - if j == repeat - 1: - next = fine - Vf = fine.function_space() + if j == repeat - 1 and not needs_quadrature: + fine = finest else: - Vf = Vc.reconstruct(mesh=meshes[next_level]) - next = firedrake.Function(Vf) + fine = Function(Vf.reconstruct(mesh=meshes[next_level])) + Vf = fine.function_space() + Vc = coarse.function_space() coarse_coords = get_coordinates(Vc) fine_to_coarse = utils.fine_node_to_coarse_node_map(Vf, Vc) fine_to_coarse_coords = utils.fine_node_to_coarse_node_map(Vf, coarse_coords.function_space()) - kernel = kernels.prolong_kernel(coarse) + kernel = kernels.prolong_kernel(coarse, Vf) # XXX: Should be able to figure out locations by pushing forward # reference cell node locations to physical space. @@ -82,13 +89,17 @@ def prolong(coarse, fine): for d in [coarse, coarse_coords]: d.dat.global_to_local_begin(op2.READ) d.dat.global_to_local_end(op2.READ) - op2.par_loop(kernel, next.node_set, - next.dat(op2.WRITE), + op2.par_loop(kernel, fine.node_set, + fine.dat(op2.WRITE), coarse.dat(op2.READ, fine_to_coarse), node_locations.dat(op2.READ), coarse_coords.dat(op2.READ, fine_to_coarse_coords)) - coarse = next - Vc = Vf + + if needs_quadrature: + # Transfer to the actual target space + new_fine = finest if j == repeat-1 else Function(Vfinest.reconstruct(mesh=meshes[next_level])) + fine = new_fine.interpolate(fine) + coarse = fine return fine @@ -101,7 +112,7 @@ def restrict(fine_dual, coarse_dual): if len(Vc) != len(Vf): raise ValueError("Mixed spaces have different lengths") for in_, out in zip(fine_dual.subfunctions, coarse_dual.subfunctions): - manager = firedrake.dmhooks.get_transfer_manager(in_.function_space().dm) + manager = dmhooks.get_transfer_manager(in_.function_space().dm) manager.restrict(in_, out) return coarse_dual @@ -118,17 +129,25 @@ def restrict(fine_dual, coarse_dual): repeat = (fine_level - coarse_level)*refinements_per_level next_level = fine_level * refinements_per_level - meshes = hierarchy._meshes + if needs_quadrature := not Vf.finat_element.has_pointwise_dual_basis: + # Introduce an intermediate quadrature source space + Vq = Vf.quadrature_space() + coarsest = coarse_dual.zero() + meshes = hierarchy._meshes for j in range(repeat): + if needs_quadrature: + # Transfer to the quadrature source space + fine_dual = Function(Vq.reconstruct(mesh=meshes[next_level])).interpolate(fine_dual) + next_level -= 1 if j == repeat - 1: - coarse_dual.dat.zero() - next = coarse_dual + coarse_dual = coarsest else: - Vc = Vf.reconstruct(mesh=meshes[next_level]) - next = firedrake.Cofunction(Vc) - Vc = next.function_space() + coarse_dual = Function(Vc.reconstruct(mesh=meshes[next_level])) + Vf = fine_dual.function_space() + Vc = coarse_dual.function_space() + # XXX: Should be able to figure out locations by pushing forward # reference cell node locations to physical space. # x = \sum_i c_i \phi_i(x_hat) @@ -144,12 +163,11 @@ def restrict(fine_dual, coarse_dual): d.dat.global_to_local_end(op2.READ) kernel = kernels.restrict_kernel(Vf, Vc) op2.par_loop(kernel, fine_dual.node_set, - next.dat(op2.INC, fine_to_coarse), + coarse_dual.dat(op2.INC, fine_to_coarse), fine_dual.dat(op2.READ), node_locations.dat(op2.READ), coarse_coords.dat(op2.READ, fine_to_coarse_coords)) - fine_dual = next - Vf = Vc + fine_dual = coarse_dual return coarse_dual @@ -162,7 +180,7 @@ def inject(fine, coarse): if len(Vc) != len(Vf): raise ValueError("Mixed spaces have different lengths") for in_, out in zip(fine.subfunctions, coarse.subfunctions): - manager = firedrake.dmhooks.get_transfer_manager(in_.function_space().dm) + manager = dmhooks.get_transfer_manager(in_.function_space().dm) manager.inject(in_, out) return @@ -184,46 +202,50 @@ def inject(fine, coarse): # For DG, for each coarse cell, instead: # solve inner(u_c, v_c)*dx_c == inner(f, v_c)*dx_c - kernel, dg = kernels.inject_kernel(Vf, Vc) hierarchy, coarse_level = utils.get_level(ufl_expr.extract_unique_domain(coarse)) - if dg and not hierarchy.nested: - raise NotImplementedError("Sorry, we can't do supermesh projections yet!") _, fine_level = utils.get_level(ufl_expr.extract_unique_domain(fine)) refinements_per_level = hierarchy.refinements_per_level repeat = (fine_level - coarse_level)*refinements_per_level next_level = fine_level * refinements_per_level - meshes = hierarchy._meshes + if needs_quadrature := not Vc.finat_element.has_pointwise_dual_basis: + # Introduce an intermediate quadrature target space + Vc = Vc.quadrature_space() + kernel, dg = kernels.inject_kernel(Vf, Vc) + if dg and not hierarchy.nested: + raise NotImplementedError("Sorry, we can't do supermesh projections yet!") + + coarsest = coarse.zero() + Vcoarsest = coarsest.function_space() + meshes = hierarchy._meshes for j in range(repeat): next_level -= 1 - if j == repeat - 1: - coarse.dat.zero() - next = coarse - Vc = next.function_space() + if j == repeat - 1 and not needs_quadrature: + coarse = coarsest else: - Vc = Vf.reconstruct(mesh=meshes[next_level]) - next = firedrake.Function(Vc) + coarse = Function(Vc.reconstruct(mesh=meshes[next_level])) + Vc = coarse.function_space() + Vf = fine.function_space() if not dg: - node_locations = utils.physical_node_locations(Vc) - fine_coords = get_coordinates(Vf) - coarse_node_to_fine_nodes = utils.coarse_node_to_fine_node_map(Vc, Vf) - coarse_node_to_fine_coords = utils.coarse_node_to_fine_node_map(Vc, fine_coords.function_space()) + coarse_to_fine = utils.coarse_node_to_fine_node_map(Vc, Vf) + coarse_to_fine_coords = utils.coarse_node_to_fine_node_map(Vc, fine_coords.function_space()) + node_locations = utils.physical_node_locations(Vc) # Have to do this, because the node set core size is not right for # this expanded stencil for d in [fine, fine_coords]: d.dat.global_to_local_begin(op2.READ) d.dat.global_to_local_end(op2.READ) - op2.par_loop(kernel, next.node_set, - next.dat(op2.INC), + op2.par_loop(kernel, coarse.node_set, + coarse.dat(op2.WRITE), + fine.dat(op2.READ, coarse_to_fine), node_locations.dat(op2.READ), - fine.dat(op2.READ, coarse_node_to_fine_nodes), - fine_coords.dat(op2.READ, coarse_node_to_fine_coords)) + fine_coords.dat(op2.READ, coarse_to_fine_coords)) else: - coarse_coords = Vc.mesh().coordinates - fine_coords = Vf.mesh().coordinates + coarse_coords = get_coordinates(Vc) + fine_coords = get_coordinates(Vf) coarse_cell_to_fine_nodes = utils.coarse_cell_to_fine_node_map(Vc, Vf) coarse_cell_to_fine_coords = utils.coarse_cell_to_fine_node_map(Vc, fine_coords.function_space()) # Have to do this, because the node set core size is not right for @@ -232,12 +254,16 @@ def inject(fine, coarse): d.dat.global_to_local_begin(op2.READ) d.dat.global_to_local_end(op2.READ) op2.par_loop(kernel, Vc.mesh().cell_set, - next.dat(op2.INC, next.cell_node_map()), + coarse.dat(op2.INC, coarse.cell_node_map()), fine.dat(op2.READ, coarse_cell_to_fine_nodes), fine_coords.dat(op2.READ, coarse_cell_to_fine_coords), coarse_coords.dat(op2.READ, coarse_coords.cell_node_map())) - fine = next - Vf = Vc + + if needs_quadrature: + # Transfer to the actual target space + new_coarse = coarsest if j == repeat - 1 else Function(Vcoarsest.reconstruct(mesh=meshes[next_level])) + coarse = new_coarse.interpolate(coarse) + fine = coarse return coarse @@ -245,5 +271,5 @@ def get_coordinates(V): coords = V.mesh().coordinates if V.boundary_set: W = V.reconstruct(element=coords.function_space().ufl_element()) - coords = firedrake.Function(W).interpolate(coords) + coords = Function(W).interpolate(coords) return coords diff --git a/firedrake/mg/kernels.py b/firedrake/mg/kernels.py index cf61364b1e..f4ce35328f 100644 --- a/firedrake/mg/kernels.py +++ b/firedrake/mg/kernels.py @@ -1,16 +1,13 @@ import numpy import string -from fractions import Fraction from pyop2 import op2 +from pyop2.utils import as_tuple from firedrake.utils import IntType, as_cstr, complex_mode, ScalarType from firedrake.functionspacedata import entity_dofs_key from firedrake.functionspaceimpl import FiredrakeDualSpace -import firedrake from firedrake.mg import utils -from ufl.algorithms.analysis import extract_arguments, extract_coefficients from ufl.algorithms import estimate_total_polynomial_degree -from ufl.corealg.map_dag import map_expr_dags from ufl.domain import extract_unique_domain import loopy as lp @@ -20,22 +17,22 @@ import gem.impero_utils as impero_utils import ufl -import finat.ufl import tsfc import tsfc.kernel_interface.firedrake_loopy as firedrake_interface from tsfc.loopy import generate as generate_loopy from tsfc import fem, ufl_utils, spectral -from tsfc.driver import TSFCIntegralDataInfo +from tsfc.driver import TSFCIntegralDataInfo, compile_expression_dual_evaluation from tsfc.kernel_interface.common import lower_integral_type from tsfc.parameters import default_parameters -from tsfc.ufl_utils import apply_mapping, simplify_abs +from finat.ufl import MixedElement from finat.element_factory import create_element from finat.quadrature import make_quadrature from firedrake.pointquery_utils import dX_norm_square, X_isub_dX, init_X, inside_check, is_affine, celldist_l1_c_expr from firedrake.pointquery_utils import to_reference_coords_newton_step as to_reference_coords_newton_step_body +from firedrake.pointeval_utils import runtime_quadrature_element def to_reference_coordinates(ufl_coordinate_element, parameters=None): @@ -47,19 +44,19 @@ def to_reference_coordinates(ufl_coordinate_element, parameters=None): parameters = _ # Create FInAT element - element = finat.element_factory.create_element(ufl_coordinate_element) + element = create_element(ufl_coordinate_element) gdim, = ufl_coordinate_element.reference_value_shape cell = ufl_coordinate_element.cell code = { "geometric_dimension": gdim, - "topological_dimension": cell.topological_dimension(), + "topological_dimension": cell.topological_dimension, "to_reference_coords_newton_step": to_reference_coords_newton_step_body(ufl_coordinate_element, parameters, x0_dtype=ScalarType, dX_dtype="double"), "init_X": init_X(element.cell, parameters), - "max_iteration_count": 1 if is_affine(ufl_coordinate_element) else 16, + "max_iteration_count": 1 if is_affine(ufl_coordinate_element) else 20, "convergence_epsilon": 1e-12, - "dX_norm_square": dX_norm_square(cell.topological_dimension()), - "X_isub_dX": X_isub_dX(cell.topological_dimension()), + "dX_norm_square": dX_norm_square(cell.topological_dimension), + "X_isub_dX": X_isub_dX(cell.topological_dimension), "IntType": as_cstr(IntType), } @@ -95,149 +92,95 @@ def to_reference_coordinates(ufl_coordinate_element, parameters=None): return evaluate_template_c % code -def compile_element(expression, dual_space=None, parameters=None, +def compile_element(operand, dual_arg, parameters=None, name="evaluate"): """Generate code for point evaluations. - :arg expression: A UFL expression (may contain up to one coefficient, or one argument) - :arg dual_space: if the expression has an argument, should we also distribute residual data? - :returns: The generated code (:class:`loopy.TranslationUnit`) - """ - if parameters is None: - parameters = default_parameters() - else: - _ = default_parameters() - _.update(parameters) - parameters = _ + Parameters + ---------- + operand: ufl.Expr + A primal expression + dual_arg: ufl.Coargument | ufl.Cofunction + A dual argument or coefficient - expression = tsfc.ufl_utils.preprocess_expression(expression, complex_mode=complex_mode) + Returns + ------- + str + The generated code + """ + source_mesh = extract_unique_domain(operand) + target_space = dual_arg.arguments()[0].ufl_function_space() - # # Collect required coefficients + # Reconstruct the target space as a runtime Quadrature space + ufl_element = runtime_quadrature_element(source_mesh, target_space.ufl_element()) + target_space = ufl.FunctionSpace(source_mesh, ufl_element) - try: - # Forward interpolation: expression has a coefficient - arg, = extract_coefficients(expression) - argument_multiindices = () - coefficient = True - except ValueError: - # Adjoint interpolation: expression has an argument - arg, = extract_arguments(expression) - finat_elem = create_element(arg.ufl_element()) - argument_multiindex = finat_elem.get_indices() - argument_multiindices = (argument_multiindex, ) - coefficient = False - - # Map into reference values - domain = extract_unique_domain(expression) - expression = apply_mapping(expression, arg.ufl_element(), domain) - value_shape = expression.ufl_shape - - # Get indices for the output tensor - if coefficient: - tensor_indices = tuple(gem.Index() for s in value_shape) + # Reconstruct the dual argument in the runtime Quadrature space + if isinstance(dual_arg, ufl.Cofunction): + dual_arg = ufl.Cofunction(target_space.dual()) else: - if value_shape: - tensor_indices = argument_multiindex[-len(value_shape):] - else: - tensor_indices = () - - # Replace coordinates (if any) - builder = firedrake_interface.KernelBuilderBase(scalar_type=ScalarType) - # Translate to GEM - cell = domain.ufl_cell() - dim = cell.topological_dimension() - point = gem.Variable('X', (dim,)) - point_arg = lp.GlobalArg("X", dtype=ScalarType, shape=(dim,)) - - config = dict(interface=builder, - ufl_cell=cell, - integral_type="cell", - point_indices=(), - point_expr=point, - argument_multiindices=argument_multiindices, - scalar_type=parameters["scalar_type"]) - context = tsfc.fem.GemPointContext(**config) - - # Abs-simplification - expression = simplify_abs(expression, complex_mode) - - # Translate UFL -> GEM - if coefficient: - assert dual_space is None - builder._coefficient(arg, "f") - f_arg = [builder.generate_arg_from_expression(builder.coefficient_map[arg])] + dual_arg = ufl.Coargument(target_space.dual(), number=dual_arg.number()) + expression = ufl.Interpolate(operand, dual_arg) + + kernel = compile_expression_dual_evaluation(expression, + ufl_element, + parameters=parameters, + name="pyop2_kernel_"+name) + return lp.generate_code_v2(kernel.ast).device_code() + + +def _make_kernel_args(element, *args): + """Returns a string of argument names to call the kernel. + Discards coordinate arguments if they do not appear in the kernel.""" + # NOTE: TSFC will sometimes drop run-time arguments in generated + # kernels if they are deemed not-necessary. + # For further information, see the same note in interpolation.py. + mask = [True] * len(args) + # Drop the source coordinates if the element is affinely-mapped. + needs_coordinates = element.mapping != "affine" + mask[1] = needs_coordinates + # Drop the target location if the element is constant. + is_constant = sum(as_tuple(element.degree)) == 0 and not element.complex.is_macrocell() + mask[-1] = not is_constant + kernel_args = ", ".join(arg for arg, include in zip(args, mask) if include) + return kernel_args + + +def _make_element_key(element): + """Returns a cache key for a finat element.""" + return entity_dofs_key(element.complex.get_topology()) + entity_dofs_key(element.entity_dofs()) + + +def prolong_kernel(expression, Vf): + Vc = expression.ufl_function_space() + hierarchy, levelf = utils.get_level(Vf.mesh()) + hierarchy, levelc = utils.get_level(Vc.mesh()) + if Vc.mesh().extruded: + assert Vf.mesh().extruded + level_ratio = (Vc.mesh().layers - 1) // (Vf.mesh().layers - 1) else: - f_arg = [] - translator = tsfc.fem.Translator(context) - result, = map_expr_dags(translator, [expression]) - - b_arg = [] - if coefficient: - if expression.ufl_shape: - return_variable = gem.Indexed(gem.Variable('R', expression.ufl_shape), tensor_indices) - result_arg = lp.GlobalArg("R", dtype=ScalarType, shape=expression.ufl_shape) - result = gem.Indexed(result, tensor_indices) - else: - return_variable = gem.Indexed(gem.Variable('R', (1,)), (0,)) - result_arg = lp.GlobalArg("R", dtype=ScalarType, shape=(1,)) - + level_ratio = 1 + if levelf > levelc: + # prolong + ncandidate = hierarchy.fine_to_coarse_cells[levelf].shape[1] else: - return_variable = gem.Indexed(gem.Variable('R', finat_elem.index_shape), argument_multiindex) - result = gem.Indexed(result, tensor_indices) - if dual_space: - if value_shape: - var = gem.Indexed(gem.Variable("b", value_shape), tensor_indices) - b_arg = [lp.GlobalArg("b", dtype=ScalarType, shape=value_shape)] - else: - var = gem.Indexed(gem.Variable("b", (1, )), (0, )) - b_arg = [lp.GlobalArg("b", dtype=ScalarType, shape=(1,))] - result = gem.Product(result, var) - - result_arg = lp.GlobalArg("R", dtype=ScalarType, shape=finat_elem.index_shape) - - # Unroll - max_extent = parameters["unroll_indexsum"] - if max_extent: - def predicate(index): - return index.extent <= max_extent - result, = gem.optimise.unroll_indexsum([result], predicate=predicate) - - # Translate GEM -> loopy - result, = gem.impero_utils.preprocess_gem([result]) - impero_c = gem.impero_utils.compile_gem([(return_variable, result)], tensor_indices) - - loopy_args = [result_arg] + b_arg + f_arg + [point_arg] - kernel_code, _ = generate_loopy( - impero_c, loopy_args, ScalarType, - kernel_name="pyop2_kernel_"+name, index_names={}) - - return lp.generate_code_v2(kernel_code).device_code() - - -def prolong_kernel(expression): - meshc = extract_unique_domain(expression) - hierarchy, level = utils.get_level(extract_unique_domain(expression)) - levelf = level + Fraction(1, hierarchy.refinements_per_level) + # inject + ncandidate = hierarchy.coarse_to_fine_cells[levelf].shape[1] + ncandidate *= level_ratio + coordinates = Vc.mesh().coordinates + key = (("prolong", ncandidate) + + (Vf.block_size,) + + _make_element_key(Vf.finat_element) + + _make_element_key(Vc.finat_element) + + _make_element_key(coordinates.function_space().finat_element)) cache = hierarchy._shared_data_cache["transfer_kernels"] - coordinates = extract_unique_domain(expression).coordinates - if meshc.cell_set._extruded: - idx = levelf * hierarchy.refinements_per_level - assert idx == int(idx) - assert hierarchy._meshes[int(idx)].cell_set._extruded - V = expression.function_space() - key = (("prolong",) - + (V.block_size,) - + entity_dofs_key(V.finat_element.complex.get_topology()) - + entity_dofs_key(V.finat_element.entity_dofs()) - + entity_dofs_key(coordinates.function_space().finat_element.entity_dofs())) try: return cache[key] except KeyError: - mesh = extract_unique_domain(coordinates) - eval_code = compile_element(expression) + evaluate_code = compile_element(expression, ufl.TestFunction(Vf.dual())) to_reference_kernel = to_reference_coordinates(coordinates.ufl_element()) - element = create_element(expression.ufl_element()) coords_element = create_element(coordinates.ufl_element()) + element = create_element(expression.ufl_element()) my_kernel = """#include %(to_reference)s @@ -272,7 +215,7 @@ def prolong_kernel(expression): cell = bestcell; } else { fprintf(stderr, "Could not identify cell in transfer operator. Point: "); - for (int coord = 0; coord < %(spacedim)s; coord++) { + for (int coord = 0; coord < %(tdim)s; coord++) { fprintf(stderr, "%%.14e ", X[coord]); } fprintf(stderr, "\\n"); @@ -280,46 +223,44 @@ def prolong_kernel(expression): abort(); } } - const PetscScalar *coarsei = f + cell*%(coarse_cell_inc)d; + const PetscScalar *fi = f + cell*%(coarse_cell_inc)d; + const PetscScalar *Xci = Xc + cell*%(Xc_cell_inc)d; for ( int i = 0; i < %(Rdim)d; i++ ) { R[i] = 0; } - pyop2_kernel_evaluate(R, coarsei, Xref); + pyop2_kernel_evaluate(%(kernel_args)s); } """ % {"to_reference": str(to_reference_kernel), - "evaluate": eval_code, - "spacedim": element.cell.get_spatial_dimension(), - "ncandidate": hierarchy.fine_to_coarse_cells[levelf].shape[1], - "Rdim": V.block_size, + "evaluate": evaluate_code, + "kernel_args": _make_kernel_args(element, "R", "Xci", "fi", "Xref"), + "ncandidate": ncandidate, + "Rdim": Vf.block_size, "inside_cell": inside_check(element.cell, eps=1e-8, X="Xref"), "celldist_l1_c_expr": celldist_l1_c_expr(element.cell, X="Xref"), "Xc_cell_inc": coords_element.space_dimension(), "coarse_cell_inc": element.space_dimension(), - "tdim": mesh.topological_dimension()} + "tdim": element.cell.get_spatial_dimension()} return cache.setdefault(key, op2.Kernel(my_kernel, name="pyop2_kernel_prolong")) def restrict_kernel(Vf, Vc): - hierarchy, level = utils.get_level(Vc.mesh()) - levelf = level + Fraction(1, hierarchy.refinements_per_level) - cache = hierarchy._shared_data_cache["transfer_kernels"] + hierarchy, levelf = utils.get_level(Vf.mesh()) + if Vf.mesh().extruded: + assert Vc.mesh().extruded + ncandidate = hierarchy.fine_to_coarse_cells[levelf].shape[1] coordinates = Vc.mesh().coordinates - if Vf.extruded: - assert Vc.extruded - key = (("restrict",) + key = (("restrict", ncandidate) + (Vf.block_size,) - + entity_dofs_key(Vf.finat_element.complex.get_topology()) - + entity_dofs_key(Vc.finat_element.complex.get_topology()) - + entity_dofs_key(Vf.finat_element.entity_dofs()) - + entity_dofs_key(Vc.finat_element.entity_dofs()) - + entity_dofs_key(coordinates.function_space().finat_element.entity_dofs())) + + _make_element_key(Vf.finat_element) + + _make_element_key(Vc.finat_element) + + _make_element_key(coordinates.function_space().finat_element)) + cache = hierarchy._shared_data_cache["transfer_kernels"] try: return cache[key] except KeyError: assert isinstance(Vc, FiredrakeDualSpace) and isinstance(Vf, FiredrakeDualSpace) - mesh = extract_unique_domain(coordinates) - evaluate_code = compile_element(firedrake.TestFunction(Vc.dual()), Vf.dual()) + evaluate_code = compile_element(ufl.TestFunction(Vc.dual()), ufl.Cofunction(Vf)) to_reference_kernel = to_reference_coordinates(coordinates.ufl_element()) coords_element = create_element(coordinates.ufl_element()) element = create_element(Vc.ufl_element()) @@ -359,7 +300,7 @@ def restrict_kernel(Vf, Vc): cell = bestcell; } else { fprintf(stderr, "Could not identify cell in transfer operator. Point: "); - for (int coord = 0; coord < %(spacedim)s; coord++) { + for (int coord = 0; coord < %(tdim)s; coord++) { fprintf(stderr, "%%.14e ", X[coord]); } fprintf(stderr, "\\n"); @@ -370,112 +311,47 @@ def restrict_kernel(Vf, Vc): { const PetscScalar *Ri = R + cell*%(coarse_cell_inc)d; - pyop2_kernel_evaluate(Ri, b, Xref); + pyop2_kernel_evaluate(%(kernel_args)s); } } """ % {"to_reference": str(to_reference_kernel), "evaluate": evaluate_code, - "ncandidate": hierarchy.fine_to_coarse_cells[levelf].shape[1], + "kernel_args": _make_kernel_args(element, "Ri", "Xc", "b", "Xref"), + "ncandidate": ncandidate, "inside_cell": inside_check(element.cell, eps=1e-8, X="Xref"), "celldist_l1_c_expr": celldist_l1_c_expr(element.cell, X="Xref"), "Xc_cell_inc": coords_element.space_dimension(), "coarse_cell_inc": element.space_dimension(), - "spacedim": element.cell.get_spatial_dimension(), - "tdim": mesh.topological_dimension()} + "tdim": element.cell.get_spatial_dimension()} return cache.setdefault(key, op2.Kernel(my_kernel, name="pyop2_kernel_restrict")) def inject_kernel(Vf, Vc): - hierarchy, level = utils.get_level(Vc.mesh()) - cache = hierarchy._shared_data_cache["transfer_kernels"] - coordinates = Vf.mesh().coordinates - if Vf.extruded: - assert Vc.extruded - level_ratio = (Vf.mesh().layers - 1) // (Vc.mesh().layers - 1) - else: - level_ratio = 1 - key = (("inject", level_ratio) - + (Vf.block_size,) - + entity_dofs_key(Vc.finat_element.complex.get_topology()) - + entity_dofs_key(Vf.finat_element.complex.get_topology()) - + entity_dofs_key(Vc.finat_element.entity_dofs()) - + entity_dofs_key(Vf.finat_element.entity_dofs()) - + entity_dofs_key(Vc.mesh().coordinates.function_space().finat_element.entity_dofs()) - + entity_dofs_key(coordinates.function_space().finat_element.entity_dofs())) - try: - return cache[key] - except KeyError: - ncandidate = hierarchy.coarse_to_fine_cells[level].shape[1] * level_ratio - if Vc.finat_element.entity_dofs() == Vc.finat_element.entity_closure_dofs(): + if Vc.finat_element.is_dg(): + hierarchy, level = utils.get_level(Vc.mesh()) + if Vf.extruded: + assert Vc.extruded + level_ratio = (Vf.mesh().layers - 1) // (Vc.mesh().layers - 1) + else: + level_ratio = 1 + key = (("inject", level_ratio) + + (Vf.block_size,) + + entity_dofs_key(Vc.finat_element.complex.get_topology()) + + entity_dofs_key(Vf.finat_element.complex.get_topology()) + + entity_dofs_key(Vc.finat_element.entity_dofs()) + + entity_dofs_key(Vf.finat_element.entity_dofs()) + + entity_dofs_key(Vc.mesh().coordinates.function_space().finat_element.entity_dofs()) + + entity_dofs_key(Vf.mesh().coordinates.function_space().finat_element.entity_dofs())) + cache = hierarchy._shared_data_cache["transfer_kernels"] + try: + return cache[key] + except KeyError: + ncandidate = hierarchy.coarse_to_fine_cells[level].shape[1] * level_ratio return cache.setdefault(key, (dg_injection_kernel(Vf, Vc, ncandidate), True)) - - coordinates = Vf.mesh().coordinates - evaluate_code = compile_element(ufl.Coefficient(Vf)) - to_reference_kernel = to_reference_coordinates(coordinates.ufl_element()) - - coords_element = create_element(coordinates.ufl_element()) - Vf_element = create_element(Vf.ufl_element()) - kernel = """ - %(to_reference)s - %(evaluate)s - - __attribute__((noinline)) /* Clang bug */ - static void pyop2_kernel_inject(PetscScalar *R, const PetscScalar *X, const PetscScalar *f, const PetscScalar *Xf) - { - PetscScalar Xref[%(tdim)d]; - int cell = -1; - int bestcell = -1; - double bestdist = 1e10; - for (int i = 0; i < %(ncandidate)d; i++) { - const PetscScalar *Xfi = Xf + i*%(Xf_cell_inc)d; - double celldist = 2*bestdist; - to_reference_coords_kernel(Xref, X, Xfi); - if (%(inside_cell)s) { - cell = i; - break; - } - - celldist = %(celldist_l1_c_expr)s; - if (celldist < bestdist) { - bestdist = celldist; - bestcell = i; - } - } - if (cell == -1) { - /* We didn't find a cell that contained this point exactly. - Did we find one that was close enough? */ - if (bestdist < 10) { - cell = bestcell; - } else { - fprintf(stderr, "Could not identify cell in transfer operator. Point: "); - for (int coord = 0; coord < %(spacedim)s; coord++) { - fprintf(stderr, "%%.14e ", X[coord]); - } - fprintf(stderr, "\\n"); - fprintf(stderr, "Number of candidates: %%d. Best distance located: %%14e", %(ncandidate)d, bestdist); - abort(); - } - } - const PetscScalar *fi = f + cell*%(f_cell_inc)d; - for ( int i = 0; i < %(Rdim)d; i++ ) { - R[i] = 0; - } - pyop2_kernel_evaluate(R, fi, Xref); - } - """ % { - "to_reference": str(to_reference_kernel), - "evaluate": evaluate_code, - "inside_cell": inside_check(Vc.finat_element.cell, eps=1e-8, X="Xref"), - "spacedim": Vc.finat_element.cell.get_spatial_dimension(), - "celldist_l1_c_expr": celldist_l1_c_expr(Vc.finat_element.cell, X="Xref"), - "tdim": Vc.mesh().topological_dimension(), - "ncandidate": ncandidate, - "Rdim": Vf.block_size, - "Xf_cell_inc": coords_element.space_dimension(), - "f_cell_inc": Vf_element.space_dimension() - } - return cache.setdefault(key, (op2.Kernel(kernel, name="pyop2_kernel_inject"), False)) + else: + expression = ufl.Coefficient(Vf) + return (prolong_kernel(expression, Vc), False) class MacroKernelBuilder(firedrake_interface.KernelBuilderBase): @@ -494,7 +370,7 @@ def set_coefficients(self, coefficients): self.coefficients = [] self.kernel_args = [] for i, coefficient in enumerate(coefficients): - if type(coefficient.ufl_element()) == finat.ufl.MixedElement: + if type(coefficient.ufl_element()) is MixedElement: raise NotImplementedError("Sorry, not for mixed.") self.coefficients.append(coefficient) self.kernel_args.append(self._coefficient(coefficient, "macro_w_%d" % (i, ))) @@ -526,6 +402,8 @@ def dg_injection_kernel(Vf, Vc, ncell): if complex_mode: raise NotImplementedError("In complex mode we are waiting for Slate") macro_builder = MacroKernelBuilder(ScalarType, ncell) + macro_builder._domain_integral_type_map = {Vf.mesh(): "cell"} + macro_builder._entity_ids = {Vf.mesh(): (0,)} f = ufl.Coefficient(Vf) macro_builder.set_coefficients([f]) macro_builder.set_coordinates(Vf.mesh()) @@ -540,12 +418,10 @@ def dg_injection_kernel(Vf, Vc, ncell): macro_quadrature_rule = make_quadrature(ref_complex, estimate_total_polynomial_degree(ufl.inner(f, f))) index_cache = {} parameters = default_parameters() - integration_dim, entity_ids = lower_integral_type(Vfe.cell, "cell") + integration_dim, _ = lower_integral_type(Vfe.cell, "cell") macro_cfg = dict(interface=macro_builder, ufl_cell=Vf.ufl_cell(), - integral_type="cell", integration_dim=integration_dim, - entity_ids=entity_ids, index_cache=index_cache, quadrature_rule=macro_quadrature_rule, scalar_type=parameters["scalar_type"]) @@ -564,26 +440,26 @@ def dg_injection_kernel(Vf, Vc, ncell): integral_type="cell", subdomain_id=("otherwise",), domain_number=0, + domain_integral_type_map={Vc.mesh(): "cell"}, arguments=(ufl.TestFunction(Vc), ), coefficients=(), coefficient_split={}, coefficient_numbers=()) coarse_builder = firedrake_interface.KernelBuilder(info, parameters["scalar_type"]) - coarse_builder.set_coordinates(Vc.mesh()) + coarse_builder.set_coordinates([Vc.mesh()]) + coarse_builder.set_entity_numbers([Vc.mesh()]) argument_multiindices = coarse_builder.argument_multiindices argument_multiindex, = argument_multiindices return_variable, = coarse_builder.return_variables - integration_dim, entity_ids = lower_integral_type(Vce.cell, "cell") + integration_dim, _ = lower_integral_type(Vce.cell, "cell") # Midpoint quadrature for jacobian on coarse cell. quadrature_rule = make_quadrature(Vce.cell, 0) coarse_cfg = dict(interface=coarse_builder, ufl_cell=Vc.ufl_cell(), - integral_type="cell", integration_dim=integration_dim, - entity_ids=entity_ids, index_cache=index_cache, quadrature_rule=quadrature_rule, scalar_type=parameters["scalar_type"]) diff --git a/firedrake/mg/mesh.py b/firedrake/mg/mesh.py index 791775054d..2279cbc67d 100644 --- a/firedrake/mg/mesh.py +++ b/firedrake/mg/mesh.py @@ -7,7 +7,9 @@ import petsctools import firedrake import firedrake.cython.dmcommon as dmcommon -from firedrake.utils import cached_property +from functools import cached_property + +from firedrake import utils from firedrake.cython import mgimpl as impl from .utils import set_level @@ -55,13 +57,6 @@ def comm(self): raise NotImplementedError("All meshes in hierarchy must be on same communicator") return comm - @cached_property - def _comm(self): - _comm = self[0]._comm - if not all(m._comm == _comm for m in self): - raise NotImplementedError("All meshes in hierarchy must be on same communicator") - return _comm - def __iter__(self): """Iterate over the hierarchy of meshes from coarsest to finest""" for m in self.meshes: @@ -120,12 +115,9 @@ def MeshHierarchy(mesh, refinement_levels, """ if (isinstance(netgen_flags, bool) and netgen_flags) or isinstance(netgen_flags, dict): - try: - from ngsPETSc import NetgenHierarchy - except ImportError: - raise ImportError("Unable to import netgen and ngsPETSc. Please ensure that netgen and ngsPETSc " - "are installed and available to Firedrake (see " - "https://www.firedrakeproject.org/install.html#netgen).") + utils.check_netgen_installed() + from firedrake.mg.netgen import NetgenHierarchy + if hasattr(mesh, "netgen_mesh"): return NetgenHierarchy(mesh, refinement_levels, flags=netgen_flags) else: @@ -134,9 +126,8 @@ def MeshHierarchy(mesh, refinement_levels, # Effectively "invert" addOverlap(). # -- The resulting plex is to have the identical data structure as the one before addOverlap(). # This is algorithmically guaranteed. - dm_cell_type, = mesh.dm_cell_types tdim = mesh.topology_dm.getDimension() - cdm = dmcommon.submesh_create(mesh.topology_dm, tdim, "celltype", dm_cell_type, True) + cdm = dmcommon.submesh_create(mesh.topology_dm, tdim, "depth", tdim, True) cdm.removeLabel("pyop2_core") cdm.removeLabel("pyop2_owned") cdm.removeLabel("pyop2_ghost") @@ -160,7 +151,7 @@ def MeshHierarchy(mesh, refinement_levels, # of the boundary we're trying to conform to. This # doesn't DTRT really for cubed sphere meshes (the # refined meshes are no longer gnonomic). - coords = cdm.getCoordinatesLocal().array.reshape(-1, mesh.geometric_dimension()) + coords = cdm.getCoordinatesLocal().array.reshape(-1, mesh.geometric_dimension) scale = mesh._radius / np.linalg.norm(coords, axis=1).reshape(-1, 1) coords *= scale lgmaps_without_overlap = [impl.create_lgmap(dm) for dm in dms] @@ -173,7 +164,7 @@ def MeshHierarchy(mesh, refinement_levels, meshes = [mesh] + [ mesh_builder( dm, - dim=mesh.geometric_dimension(), + dim=mesh.geometric_dimension, distribution_parameters=parameters, reorder=reorder, comm=mesh.comm, diff --git a/firedrake/mg/netgen.py b/firedrake/mg/netgen.py new file mode 100644 index 0000000000..83f98bbbef --- /dev/null +++ b/firedrake/mg/netgen.py @@ -0,0 +1,353 @@ +""" +This file was copied from ngsPETSc. +""" +import time +from fractions import Fraction + +import numpy as np +import ufl +from packaging.version import Version +from petsc4py import PETSc + +import firedrake as fd +from firedrake.mesh import DISTRIBUTION_PARAMETERS_NOOP +from firedrake.cython import mgimpl as impl, dmcommon +from firedrake import dmhooks +from firedrake.logging import logger + +# Netgen and ngsPETSc are not available when the documentation is getting built +# because they do not have ARM wheels. +try: + from netgen.meshing import MeshingParameters + from ngsPETSc.plex import MeshMapping +except ImportError: + pass + + +def trim_util(T): + """ + Trim zeros from a connectivity array T. + """ + if Version(np.__version__) >= Version("2.2"): + T = np.trim_zeros(T, "b", axis=1).astype(PETSc.IntType) - 1 + else: + T = (np.array([list(np.trim_zeros(a, "b")) for a in list(T)], dtype=PETSc.IntType) - 1) + return T + + +def snapToNetgenDMPlex(ngmesh, petscPlex): + ''' + This function snaps the coordinates of a DMPlex mesh to the coordinates of a Netgen mesh. + ''' + logger.info(f"\t\t\t[{time.time()}]Snapping the DMPlex to NETGEN mesh") + + gdim = petscPlex.getCoordinateDim() + if gdim == 1: + ng_coelement = ngmesh.Elements0D() + elif gdim == 2: + ng_coelement = ngmesh.Elements1D() + elif gdim == 3: + ng_coelement = ngmesh.Elements2D() + # When we create a netgen mesh from a refined plex, + # the netgen mesh represents the local submesh. + # Therefore, there is no need to distribute the netgen data + nodes_to_correct = ng_coelement.NumPy()["nodes"] + nodes_to_correct = trim_util(nodes_to_correct) + nodes_to_correct_sorted = nodes_to_correct.flatten() + nodes_to_correct_index = np.unique(nodes_to_correct_sorted) + logger.info(f"\t\t\t[{time.time()}]Nodes have been corrected") + tic = time.time() + ngCoordinates = ngmesh.Coordinates() + petscCoordinates = petscPlex.getCoordinatesLocal().getArray() + petscCoordinates = petscCoordinates.reshape(-1, petscPlex.getCoordinateDim()) + petscCoordinates[nodes_to_correct_index] = ngCoordinates[nodes_to_correct_index] + petscPlexCoordinates = petscPlex.getCoordinatesLocal() + petscPlexCoordinates.setArray(petscCoordinates.flatten()) + petscPlex.setCoordinatesLocal(petscPlexCoordinates) + toc = time.time() + logger.info(f"\t\t\tSnap the DMPlex to NETGEN mesh. Time taken: {toc - tic} seconds") + + +def snapToCoarse(coarse, linear, degree, snap_smoothing, cg): + ''' + This function snaps the coordinates of a DMPlex mesh to the coordinates of a Netgen mesh. + ''' + dim = linear.geometric_dimension + if dim == 2: + space = fd.VectorFunctionSpace(linear, "CG", degree) + ho = fd.assemble(fd.interpolate(coarse, space)) + if snap_smoothing == "hyperelastic": + # Hyperelastic Smoothing + bcs = [fd.DirichletBC(space, ho, "on_boundary")] + quad_degree = 2*(degree+1)-1 + d = linear.topological_dimension + Q = fd.TensorFunctionSpace(linear, "DG", degree=0) + Jinv = ufl.JacobianInverse(linear) + hinv = fd.Function(Q) + hinv.interpolate(Jinv) + G = ufl.Jacobian(linear) * hinv + ijac = 1/abs(ufl.det(G)) + + def ref_grad(u): + return ufl.dot(ufl.grad(u), G) + + params = { + "snes_type": "newtonls", + "snes_linesearch_type": "l2", + "snes_max_it": 50, + "snes_rtol": 1E-8, + "snes_atol": 1E-8, + "snes_ksp_ew": True, + "snes_ksp_ew_rtol0": 1E-2, + "snes_ksp_ew_rtol_max": 1E-2, + } + params["mat_type"] = "aij" + coarse = { + "ksp_type": "preonly", + "pc_type": "lu", + "pc_mat_factor_type": "mumps", + } + gmg = { + "pc_type": "mg", + "mg_coarse": coarse, + "mg_levels": { + "ksp_max_it": 2, + "ksp_type": "chebyshev", + "pc_type": "jacobi", + }, + } + l = fd.mg.utils.get_level(linear)[1] + pc = gmg if l else coarse + params.update(pc) + ksp = { + "ksp_rtol": 1E-8, + "ksp_atol": 0, + "ksp_type": "minres", + "ksp_norm_type": "preconditioned", + } + params.update(ksp) + u = ho + F = ref_grad(u) + J = ufl.det(F) + psi = (1/2) * (ufl.inner(F, F)-d - ufl.ln(J**2)) + U = (psi * ijac)*fd.dx(degree=quad_degree) + dU = ufl.derivative(U, u, fd.TestFunction(space)) + problem = fd.NonlinearVariationalProblem(dU, u, bcs) + solver = fd.NonlinearVariationalSolver(problem, solver_parameters=params) + solver.set_transfer_manager(None) + ctx = solver._ctx + for c in problem.F.coefficients(): + dm = c.function_space().dm + dmhooks.push_appctx(dm, ctx) + solver.solve() + if not cg: + ho = fd.Function(ho.function_space().broken_space()).interpolate(ho) + else: + raise NotImplementedError("Snapping to Netgen meshes is only implemented for 2D meshes.") + return reconstruct_mesh(linear, ho) + + +def uniformRefinementRoutine(ngmesh, cdm): + ''' + Routine called inside of NetgenHierarchy to compute refined ngmesh and plex. + ''' + # We refine the DMPlex mesh uniformly + logger.info(f"\t\t\t[{time.time()}]Refining the plex") + cdm.setRefinementUniform(True) + rdm = cdm.refine() + rdm.removeLabel("pyop2_core") + rdm.removeLabel("pyop2_owned") + rdm.removeLabel("pyop2_ghost") + logger.info(f"\t\t\t[{time.time()}]Mapping the mesh to Netgen mesh") + tic = time.time() + mapping = MeshMapping(rdm, geo=ngmesh) + toc = time.time() + logger.info(f"\t\t\t[{time.time()}]Mapped the mesh to Netgen. Time taken: {toc-tic}") + return (rdm, mapping.ngMesh) + + +def uniformMapRoutine(meshes, lgmaps): + ''' + This function computes the coarse to fine and fine to coarse maps + for a uniform mesh hierarchy. + ''' + refinements_per_level = 1 + coarse_to_fine_cells = [] + fine_to_coarse_cells = [None] + for (coarse, fine), (clgmaps, flgmaps) in zip( + zip(meshes[:-1], meshes[1:]), + zip(lgmaps[:-1], lgmaps[1:]) + ): + c2f, f2c = impl.coarse_to_fine_cells(coarse, fine, clgmaps, flgmaps) + coarse_to_fine_cells.append(c2f) + fine_to_coarse_cells.append(f2c) + + coarse_to_fine_cells = dict((Fraction(i, refinements_per_level), c2f) + for i, c2f in enumerate(coarse_to_fine_cells)) + fine_to_coarse_cells = dict((Fraction(i, refinements_per_level), f2c) + for i, f2c in enumerate(fine_to_coarse_cells)) + return (coarse_to_fine_cells, fine_to_coarse_cells) + + +def alfeldRefinementRoutine(ngmesh, cdm): + ''' + Routing called inside of NetgenHierarchy to compute refined ngmesh and plex. + ''' + # We refine the netgen mesh alfeld + ngmesh.SplitAlfeld() + # We refine the DMPlex mesh alfeld + tr = PETSc.DMPlexTransform().create(comm=PETSc.COMM_WORLD) + tr.setType(PETSc.DMPlexTransformType.REFINEREGULAR) + tr.setDM(cdm) + tr.setUp() + rdm = tr.apply(cdm) + return (rdm, ngmesh) + + +def alfeldMapRoutine(meshes): + ''' + This function computes the coarse to fine and fine to coarse maps + for a alfeld mesh hierarchy. + ''' + raise NotImplementedError("Alfeld refinement is not implemented yet.") + + +refinementTypes = {"uniform": (uniformRefinementRoutine, uniformMapRoutine), + "Alfeld": (alfeldRefinementRoutine, alfeldMapRoutine)} + + +def NetgenHierarchy(mesh, levs, flags, distribution_parameters=None): + """Create a Firedrake mesh hierarchy from Netgen/NGSolve meshes. + + :arg mesh: the Netgen/NGSolve mesh + :arg levs: the number of levels in the hierarchy + :arg flags: either a bool or a dictionary containing options for Netgen. + If not False the hierachy is constructed using ngsPETSc, if None hierarchy + constructed in a standard manner. Netgen flags includes: + + - degree, either an integer denoting the degree of curvature of all levels of + the mesh or a list of levs+1 integers denoting the degree of curvature of + each level of the mesh. + - tol, geometric tolerance adopted in snapToNetgenDMPlex. + - refinement_type, the refinment type to be used: uniform (default), Alfeld + :kwarg distribution_parameters: a dict of options controlling mesh distribution. + If ``None``, use the same distribution parameters as were used to distribute + the coarse mesh, otherwise, these options override the default. + + """ + tdim = mesh.topological_dimension + # Parse netgen flags + if not isinstance(flags, dict): + flags = mesh.netgen_flags + order = flags.get("degree", 1) + if isinstance(order, int): + order = [order]*(levs+1) + permutation_tol = flags.get("permutation_tol", 1e-8) + refType = flags.get("refinement_type", "uniform") + optMoves = flags.get("optimisation_moves", False) + snap = flags.get("snap_to", "geometry") + snap_smoothing = flags.get("snap_smoothing", "hyperelastic") + cg = flags.get("cg", not mesh.coordinates.function_space().finat_element.is_dg()) + nested = flags.get("nested", snap in ["coarse"]) + logger.info(f"Creating a Netgen hierarchy with {levs} levels.") + logger.info(f"\tOrder of the hierarchy: {order}") + logger.info(f"\tRefinement type: {refType}") + logger.info(f"\tSnap to {snap} using {snap_smoothing} smoothing (if snapping to coarse)") + # Firedrake quantities + meshes = [] + lgmaps = [] + # Curve the mesh + if order[0] != mesh.coordinates.function_space().ufl_element().degree(): + coordinates = mesh.curve_field( + order=order[0], + permutation_tol=permutation_tol, + cg_field=cg, + ) + mesh = reconstruct_mesh(mesh, coordinates) + # Make a plex (cdm) without overlap. + cdm = dmcommon.submesh_create(mesh.topology_dm, tdim, "depth", tdim, True) + cdm.removeLabel("pyop2_core") + cdm.removeLabel("pyop2_owned") + cdm.removeLabel("pyop2_ghost") + no = impl.create_lgmap(cdm) + o = impl.create_lgmap(mesh.topology_dm) + lgmaps.append((no, o)) + mesh.topology_dm.setRefineLevel(0) + meshes.append(mesh) + base_ngmesh = mesh.netgen_mesh + comm = mesh.comm + for l in range(1, levs+1): + rdm, ngmesh = refinementTypes[refType][0](base_ngmesh, cdm) + cdm = rdm + if optMoves: + # Optimises the mesh, for example smoothing + if tdim == 2: + ngmesh.OptimizeMesh2d(MeshingParameters(optimize2d=optMoves)) + elif tdim == 3: + ngmesh.OptimizeVolumeMesh(MeshingParameters(optimize3d=optMoves)) + else: + raise ValueError("Only 2D and 3D meshes can be optimised.") + # Snap the mesh to the Netgen mesh + if snap == "geometry": + snapToNetgenDMPlex(ngmesh, rdm) + + # We construct a Firedrake mesh from the DMPlex mesh + parameters = {} + if distribution_parameters is not None: + parameters.update(distribution_parameters) + else: + parameters.update(mesh._distribution_parameters) + parameters["partition"] = False + mesh = fd.Mesh(rdm, dim=mesh.geometric_dimension, + reorder=False, + distribution_parameters=parameters, + tolerance=mesh.tolerance, + comm=comm) + mesh.netgen_mesh = ngmesh + mesh.netgen_flags = flags + + no = impl.create_lgmap(rdm) + o = impl.create_lgmap(mesh.topology_dm) + lgmaps.append((no, o)) + + # Curve the mesh + if order[l] != mesh.coordinates.function_space().ufl_element().degree(): + logger.info("\t\t\tCurving the mesh ...") + tic = time.time() + if snap == "geometry": + coordinates = mesh.curve_field( + order=order[l], + permutation_tol=permutation_tol, + cg_field=cg, + ) + mesh = reconstruct_mesh(mesh, coordinates) + elif snap == "coarse": + mesh = snapToCoarse(meshes[0].coordinates, mesh, order[l], snap_smoothing, cg) + toc = time.time() + logger.info(f"\t\t\tMeshed curved. Time taken: {toc-tic}") + logger.info(f"\t\tLevel {l}: with {ngmesh.Coordinates().shape[0]}\ + vertices, with order {order[l]}, snapping to {snap}\ + and optimisation moves {optMoves}.") + mesh.topology_dm.setRefineLevel(l) + meshes.append(mesh) + # Populate the coarse to fine map + coarse_to_fine_cells, fine_to_coarse_cells = refinementTypes[refType][1](meshes, lgmaps) + return fd.HierarchyBase(meshes, coarse_to_fine_cells, fine_to_coarse_cells, 1, nested=nested) + + +def reconstruct_mesh(mesh, *args, **kwargs): + """Reconstruct a mesh.""" + kwargs.setdefault("dim", mesh.geometric_dimension) + kwargs.setdefault("reorder", False) + kwargs.setdefault("distribution_parameters", DISTRIBUTION_PARAMETERS_NOOP) + kwargs.setdefault("comm", mesh.comm) + kwargs.setdefault("tolerance", mesh.tolerance) + kwargs.setdefault("perm_is", mesh._dm_renumbering) + + tmesh = fd.Mesh(*args, **kwargs) + tmesh._distribution_parameters = mesh._distribution_parameters + tmesh._did_reordering = mesh._did_reordering + tmesh.netgen_mesh = mesh.netgen_mesh + tmesh.netgen_flags = mesh.netgen_flags + tmesh.sfBC_orig = mesh.sfBC_orig + return tmesh diff --git a/firedrake/mg/ufl_utils.py b/firedrake/mg/ufl_utils.py index 5f7e6f0405..d5a26a36e6 100644 --- a/firedrake/mg/ufl_utils.py +++ b/firedrake/mg/ufl_utils.py @@ -65,6 +65,7 @@ def coarsen(expr, self, coefficient_mapping=None): @coarsen.register(ufl.Mesh) +@coarsen.register(ufl.MeshSequence) def coarsen_mesh(mesh, self, coefficient_mapping=None): hierarchy, level = utils.get_level(mesh) if hierarchy is None: @@ -148,7 +149,9 @@ def coarsen_function_space(V, self, coefficient_mapping=None): return V._coarse V_fine = V - mesh_coarse = self(V_fine.mesh(), self) + # Handle MixedFunctionSpace : V_fine.reconstruct requires MeshSequence. + fine_mesh = V_fine.mesh() if V_fine.index is None else V_fine.parent.mesh() + mesh_coarse = self(fine_mesh, self) name = f"coarse_{V.name}" if V.name else None V_coarse = V_fine.reconstruct(mesh=mesh_coarse, name=name) V_coarse._fine = V_fine diff --git a/firedrake/mg/utils.py b/firedrake/mg/utils.py index 37832b64dc..d2c37ed7aa 100644 --- a/firedrake/mg/utils.py +++ b/firedrake/mg/utils.py @@ -135,15 +135,15 @@ def physical_node_locations(V): mesh = V.mesh() # This is a defaultdict, so the first time we access the key we # get a fresh dict for the cache. - cache = mesh._geometric_shared_data_cache["hierarchy_physical_node_locations"] + cache = mesh.geometric_shared_data_cache["hierarchy_physical_node_locations"] key = (element, V.boundary_set) try: return cache[key] except KeyError: - Vc = V.collapse().reconstruct(element=finat.ufl.VectorElement(element, dim=mesh.geometric_dimension())) + Vc = V.collapse().reconstruct(element=finat.ufl.VectorElement(element, dim=mesh.geometric_dimension)) # FIXME: This is unsafe for DG coordinates and CG target spaces. - locations = firedrake.assemble(firedrake.Interpolate(firedrake.SpatialCoordinate(mesh), Vc)) + locations = firedrake.assemble(firedrake.interpolate(firedrake.SpatialCoordinate(mesh), Vc)) return cache.setdefault(key, locations) diff --git a/firedrake/ml/jax/fem_operator.py b/firedrake/ml/jax/fem_operator.py index 94f2875202..cee0d23a86 100644 --- a/firedrake/ml/jax/fem_operator.py +++ b/firedrake/ml/jax/fem_operator.py @@ -173,7 +173,8 @@ def to_jax(x: Union[Function, Constant], gather: Optional[bool] = False, batched x_P = jnp.array(np.ravel(x.dat.global_data), **kwargs) else: # Use local data - x_P = jnp.array(np.ravel(x.dat.data_ro), **kwargs) + with x.dat.vec_ro as vec: + x_P = jnp.array(np.ravel(vec.buffer_r), **kwargs) if batched: # Default behaviour: add batch dimension after converting to JAX return x_P[None, :] @@ -222,5 +223,7 @@ def from_jax(x: "jax.Array", V: Optional[WithGeometry] = None) -> Union[Function val = val[0] return Constant(val) else: - x_F = Function(V, val=np.asarray(x)) + x_F = Function(V) + with x_F.dat.vec_wo as vec: + vec.array_w = np.asarray(x) return x_F diff --git a/firedrake/ml/pytorch/fem_operator.py b/firedrake/ml/pytorch/fem_operator.py index 868c4cbb34..22b57501c5 100644 --- a/firedrake/ml/pytorch/fem_operator.py +++ b/firedrake/ml/pytorch/fem_operator.py @@ -178,7 +178,8 @@ def to_torch(x, gather=False, batched=True, **kwargs): x_P = torch.tensor(np.ravel(x.dat.global_data), **kwargs) else: # Use local data - x_P = torch.tensor(np.ravel(x.dat.data_ro), **kwargs) + with x.dat.vec_ro as vec: + x_P = torch.tensor(np.ravel(vec.buffer_r), **kwargs) if batched: # Default behaviour: add batch dimension after converting to PyTorch return x_P[None, :] @@ -218,5 +219,7 @@ def from_torch(x, V=None): val = val[0] return Constant(val) else: - x_F = Function(V, val=x.detach().numpy()) + x_F = Function(V) + with x_F.dat.vec_wo as vec: + vec.array_w = x.detach().numpy() return x_F diff --git a/firedrake/ml/pytorch/ml_operator.py b/firedrake/ml/pytorch/ml_operator.py index f2d6055546..ade5893d32 100644 --- a/firedrake/ml/pytorch/ml_operator.py +++ b/firedrake/ml/pytorch/ml_operator.py @@ -18,7 +18,7 @@ raise ImportError("PyTorch is not installed and is required to use the FiredrakeTorchOperator.") -from functools import partial +from functools import partial, cached_property import petsctools from firedrake.external_operators import MLOperator @@ -82,7 +82,7 @@ def model_output(self): def model_output(self, output): self.operator_data['model_output'] = output - @utils.cached_property + @cached_property def torch_grad_enabled(self): # Default: set PyTorch annotation on, unless otherwise specified. return self.operator_data.get('torch_grad_enabled', True) diff --git a/firedrake/netgen.py b/firedrake/netgen.py new file mode 100644 index 0000000000..57b442b83a --- /dev/null +++ b/firedrake/netgen.py @@ -0,0 +1,181 @@ +''' +This module contains all the functions related to wrapping NGSolve meshes to Firedrake + +This file was copied from ngsPETSc. +''' +import numpy as np +from scipy.spatial.distance import cdist + +from pyop2.mpi import COMM_WORLD +from firedrake.petsc import PETSc +import firedrake + +# Netgen and ngsPETSc are not available when the documentation is getting built +# because they do not have ARM wheels. +try: + import netgen.meshing as ngm + from netgen.meshing import MeshingParameters + from ngsPETSc import MeshMapping +except ImportError: + pass + +try: + import ngsolve as ngs +except ImportError: + class ngs: + "dummy class" + class comp: + "dummy class" + Mesh = type(None) + + +def netgen_distribute(V: firedrake.functionspaceimpl.WithGeometryBase, + netgen_data: np.ndarray): + """ + Distribute data from the netgen layout into the DMPlex layout. + + Parameters + ---------- + V + The target function space defining the DMPlex layout. + netgen_data + The data in the layout of the underlying netgen mesh. + + Returns + ------- + ``np.ndarray`` + The data in the target DMPlex layout. + + """ + netgen_data = np.asarray(netgen_data) + mesh = V.mesh() + sf = mesh.sfBC_orig + if sf is None: + # This mesh was not redistributed at construction. + # This means that the underlying netgen mesh represents + # the local part of the mesh owned by this process. + # Therefore the netgen data is already distributed. + plex_data = netgen_data + else: + plex = mesh.topology_dm + nshape = netgen_data.shape + dtype = netgen_data.dtype + + sfBCInv = sf.createInverse() + section = V.dm.getDefaultSection() + vec = V.dof_dset.layout_vec + section0, vec0 = plex.distributeField(sfBCInv, section, vec) + vec0.set(0) + plex_data = None + for i in np.ndindex(V.shape): + di = netgen_data[(..., *i)].flatten() + vec0[:len(di)] = di + _, vec = plex.distributeField(sf, section0, vec0) + arr = vec.getArray() + if plex_data is None: + plex_data = np.empty(arr.shape + V.shape, dtype=dtype) + plex_data[(..., *i)] = arr + plex_data = plex_data.reshape(-1, *nshape[1:]) + return plex_data + + +@PETSc.Log.EventDecorator() +def find_permutation(points_a: np.ndarray, points_b: np.ndarray, + tol: float = 1e-5): + """ Find all permutations between a list of two sets of points. + + Given two numpy arrays of shape (ncells, npoints, dim) containing + floating point coordinates for each cell, determine each index + permutation that takes `points_a` to `points_b`. Ie: + ``` + permutation = find_permutation(points_a, points_b) + assert np.allclose(points_a[permutation], points_b, rtol=0, atol=tol) + ``` + """ + if points_a.shape != points_b.shape: + raise ValueError("`points_a` and `points_b` must have the same shape.") + + p = [np.where(cdist(a, b).T < tol)[1] for a, b in zip(points_a, points_b)] + + if len(p) == 0: + return p + + try: + permutation = np.array(p, ndmin=2) + except ValueError as e: + raise ValueError( + "It was not possible to find a permutation for every cell" + " within the provided tolerance" + ) from e + + if permutation.shape != points_a.shape[0:2]: + raise ValueError( + "It was not possible to find a permutation for every cell" + " within the provided tolerance" + ) + + return permutation + + +def splitToQuads(plex, dim, comm): + """Split a Netgen mesh into quads using a PETSc transform.""" + # TODO: Improve support quad meshing. + # @pef Get netgen to make a quad-dominant mesh, and then only split the triangles. + # Current implementation will make for poor-quality meshes. + if dim == 2: + transform = PETSc.DMPlexTransform().create(comm=comm) + transform.setType(PETSc.DMPlexTransformType.REFINETOBOX) + transform.setDM(plex) + transform.setUp() + else: + raise RuntimeError("Splitting to quads is only possible for 2D meshes.") + newplex = transform.apply(plex) + return newplex + + +splitTypes = {"Alfeld": lambda x: x.SplitAlfeld(), + "Powell-Sabin": lambda x: x.SplitPowellSabin()} + + +class FiredrakeMesh: + ''' + This class creates a Firedrake mesh from Netgen/NGSolve meshes. + + :arg mesh: the mesh object, it can be either a Netgen/NGSolve mesh or a PETSc DMPlex + :param netgen_flags: The dictionary of flags to be passed to ngsPETSc. + :arg comm: the MPI communicator. + ''' + def __init__(self, mesh, netgen_flags, user_comm=COMM_WORLD): + self.comm = user_comm + # Parsing netgen flags + if not isinstance(netgen_flags, dict): + netgen_flags = {} + split2tets = netgen_flags.get("split_to_tets", False) + split = netgen_flags.get("split", False) + quad = netgen_flags.get("quad", False) + optMoves = netgen_flags.get("optimisation_moves", False) + # Checking the mesh format + if isinstance(mesh, (ngs.comp.Mesh, ngm.Mesh)): + if split2tets: + mesh = mesh.Split2Tets() + if split: + # Split mesh this includes Alfeld and Powell-Sabin + splitTypes[split](mesh) + if optMoves: + # Optimises the mesh, for example smoothing + if mesh.dim == 2: + mesh.OptimizeMesh2d(MeshingParameters(optimize2d=optMoves)) + elif mesh.dim == 3: + mesh.OptimizeVolumeMesh(MeshingParameters(optimize3d=optMoves)) + else: + raise ValueError("Only 2D and 3D meshes can be optimised.") + # We create the plex from the netgen mesh + self.meshMap = MeshMapping(mesh, comm=self.comm) + # We apply the DMPLEX transform + if quad: + newplex = splitToQuads(self.meshMap.petscPlex, mesh.dim, comm=self.comm) + self.meshMap = MeshMapping(newplex) + elif isinstance(mesh, PETSc.DMPlex): + self.meshMap = MeshMapping(mesh) + else: + raise ValueError("Mesh format not recognised.") diff --git a/firedrake/norms.py b/firedrake/norms.py index 2610e46cab..605b1bd6ba 100644 --- a/firedrake/norms.py +++ b/firedrake/norms.py @@ -30,8 +30,8 @@ def errornorm(u, uh, norm_type="L2", degree_rise=None, mesh=None): raise ValueError("uh should be a Function, is a %r", type(uh)) if isinstance(u, function.Function): - degree_u = u.function_space().ufl_element().degree() - degree_uh = uh.function_space().ufl_element().degree() + degree_u = u.function_space().ufl_element().embedded_superdegree + degree_uh = uh.function_space().ufl_element().embedded_superdegree if degree_uh > degree_u: warning("Degree of exact solution less than approximation degree") diff --git a/firedrake/nullspace.py b/firedrake/nullspace.py index 554298dcad..fce65edf0d 100644 --- a/firedrake/nullspace.py +++ b/firedrake/nullspace.py @@ -1,6 +1,6 @@ import numpy -from pyop2.mpi import COMM_WORLD, internal_comm +from pyop2.mpi import COMM_WORLD from firedrake import function from firedrake.logging import warning @@ -57,7 +57,6 @@ def __init__(self, vecs=None, constant=False, comm=None): else: warning("No comm specified for VectorSpaceBasis, COMM_WORLD assumed") self.comm = COMM_WORLD - self._comm = internal_comm(self.comm, self) @PETSc.Log.EventDecorator() def nullspace(self, comm=None): @@ -70,7 +69,7 @@ def nullspace(self, comm=None): warning("Specifiy comm when initialising VectorSpaceBasis, ignoring comm argument") self._nullspace = PETSc.NullSpace().create(constant=self._constant, vectors=self._petsc_vecs, - comm=self._comm) + comm=self.comm) return self._nullspace @PETSc.Log.EventDecorator() @@ -223,7 +222,6 @@ class MixedVectorSpaceBasis(object): def __init__(self, function_space, bases): self._function_space = function_space self.comm = function_space.comm - self._comm = internal_comm(self.comm, self) for basis in bases: if isinstance(basis, VectorSpaceBasis): continue @@ -275,7 +273,7 @@ def _build_monolithic_basis(self): self._nullspace = PETSc.NullSpace().create(constant=False, vectors=self._petsc_vecs, - comm=self._comm) + comm=self.comm) def _apply_monolithic(self, matrix, transpose=False, near=False): r"""Set this class:`MixedVectorSpaceBasis` as a nullspace for a diff --git a/firedrake/output/__init__.py b/firedrake/output/__init__.py index 409149cccf..eaf15ce6cd 100644 --- a/firedrake/output/__init__.py +++ b/firedrake/output/__init__.py @@ -1,5 +1,5 @@ try: import vtkmodules.vtkCommonDataModel # noqa: F401 - from .vtk_output import VTKFile # noqa: F401 + from firedrake.output.vtk_output import VTKFile # noqa: F401 except ModuleNotFoundError: - from .vtk_unavailable import VTKFile # noqa: F401 + from firedrake.output.vtk_unavailable import VTKFile # noqa: F401 diff --git a/firedrake/output/vtk_output.py b/firedrake/output/vtk_output.py index cc10efe4d4..7bf9547ff9 100644 --- a/firedrake/output/vtk_output.py +++ b/firedrake/output/vtk_output.py @@ -6,7 +6,7 @@ import finat.ufl from ufl.domain import extract_unique_domain from itertools import chain -from pyop2.mpi import COMM_WORLD, internal_comm +from pyop2.mpi import COMM_WORLD, temp_internal_comm from pyop2.utils import as_tuple from pyadjoint import no_annotations from firedrake.petsc import PETSc @@ -66,7 +66,7 @@ def is_cg(V): :arg V: A FunctionSpace. """ - nvertex = V.mesh().ufl_cell().num_vertices() + nvertex = V.mesh().ufl_cell().num_vertices entity_dofs = V.finat_element.entity_dofs() # If there are as many dofs on vertices as there are vertices, # assume a continuous space. @@ -89,7 +89,7 @@ def is_linear(V): :arg V: A FunctionSpace. """ - nvertex = V.mesh().ufl_cell().num_vertices() + nvertex = V.mesh().ufl_cell().num_vertices return V.finat_element.space_dimension() == nvertex @@ -108,7 +108,7 @@ def get_sup_element(*elements, continuous=False, max_degree=None): if continuous: family = "CG" else: - if cell.cellname() in {"interval", "triangle", "tetrahedron"}: + if cell.cellname in {"interval", "triangle", "tetrahedron"}: family = "DG" else: family = "DQ" @@ -215,9 +215,9 @@ def vrange(cell_layers): con = connectivity + offsets connectivity = con.flatten() if not nonLinear: - offsets_into_con = numpy.arange(start=cell.num_vertices(), - stop=cell.num_vertices() * (num_cells + 1), - step=cell.num_vertices(), + offsets_into_con = numpy.arange(start=cell.num_vertices, + stop=cell.num_vertices * (num_cells + 1), + step=cell.num_vertices, dtype=IntType) else: offsets_into_con = numpy.arange(start=basis_dim, @@ -393,15 +393,14 @@ def __init__(self, filename, project_output=False, comm=None, mode="w", mode = "w" self.comm = comm or COMM_WORLD - self._comm = internal_comm(self.comm, self) - if self._comm.rank == 0 and mode == "w": + if self.comm.rank == 0 and mode == "w": if not os.path.exists(basename): os.makedirs(basename) - elif self._comm.rank == 0 and mode == "a": + elif self.comm.rank == 0 and mode == "a": if not os.path.exists(os.path.abspath(filename)): raise ValueError("Need a file to restart from.") - self._comm.barrier() + self.comm.barrier() self.filename = filename self.basename = basename @@ -424,11 +423,11 @@ def __init__(self, filename, project_output=False, comm=None, mode="w", raise ValueError("target_continuity must be either 'H1' or 'L2'.") countstart = 0 - if self._comm.rank == 0 and mode == "w": + if self.comm.rank == 0 and mode == "w": with open(self.filename, "wb") as f: f.write(self._header) f.write(self._footer) - elif self._comm.rank == 0 and mode == "a": + elif self.comm.rank == 0 and mode == "a": import xml.etree.ElementTree as ElTree tree = ElTree.parse(os.path.abspath(filename)) # Count how many the file already has @@ -440,7 +439,8 @@ def __init__(self, filename, project_output=False, comm=None, mode="w", if mode == "a": # Need to communicate the count across all cores involved; default op is SUM - countstart = self._comm.allreduce(countstart) + with temp_internal_comm(self.comm) as icomm: + countstart = icomm.allreduce(countstart) self.counter = itertools.count(countstart) self.timestep = itertools.count(countstart) diff --git a/firedrake/petsc.py b/firedrake/petsc.py index 031ce85cbf..6eb2ab0864 100644 --- a/firedrake/petsc.py +++ b/firedrake/petsc.py @@ -9,49 +9,16 @@ from petsc4py import PETSc from pyop2 import mpi -from firedrake import utils - -__all__ = ( - "PETSc", - # TODO: These are all now deprecated - "get_petsc_variables", - "get_petscconf_h", - "get_external_packages" -) +__all__ = ("PETSc",) class FiredrakePETScError(Exception): pass -@utils.deprecated("petsctools.flatten_parameters") -def flatten_parameters(*args, **kwargs): - return petsctools.flatten_parameters(*args, **kwargs) - - -@utils.deprecated("petsctools.get_petscvariables") -def get_petsc_variables(): - return petsctools.get_petscvariables() - - -@utils.deprecated("petsctools.get_petscconf_h") -def get_petscconf_h(): - return petsctools.get_petscconf_h() - - -@utils.deprecated("petsctools.get_external_packages") -def get_external_packages(): - return petsctools.get_external_packages() - - -@utils.deprecated("petsctools.get_blas_library") -def get_blas_library(): - return petsctools.get_blas_library() - - -def _extract_comm(obj: Any) -> MPI.Comm: - """Extract and return the Firedrake/PyOP2 internal comm of a given object. +def _extract_comm(obj: Any) -> MPI.Comm | None: + """Extract and return the comm of a given object. Parameters ---------- @@ -60,35 +27,20 @@ def _extract_comm(obj: Any) -> MPI.Comm: Returns ------- - MPI.Comm - Internal communicator + MPI.Comm | None + A communicator if found, else `None`. """ - comm = None - # If the object is a communicator check whether it is already an internal - # communicator, otherwise get the internal communicator attribute from the - # given communicator. - if isinstance(obj, (PETSc.Comm, mpi.MPI.Comm)): - try: - if mpi.is_pyop2_comm(obj): - comm = obj - else: - internal_comm = obj.Get_attr(mpi.innercomm_keyval) - if internal_comm is None: - comm = obj - else: - comm = internal_comm - except mpi.PyOP2CommError: - pass - elif hasattr(obj, "_comm"): - comm = obj._comm + if isinstance(obj, PETSc.Comm | mpi.MPI.Comm): + return obj elif hasattr(obj, "comm"): - comm = obj.comm - return comm + return obj.comm + else: + return None @mpi.collective -def garbage_cleanup(obj: Any): +def garbage_cleanup(obj: Any) -> None: """Clean up garbage PETSc objects on a Firedrake object or any comm. Parameters @@ -110,7 +62,7 @@ def garbage_cleanup(obj: Any): @mpi.collective -def garbage_view(obj: Any): +def garbage_view(obj: Any) -> None: """View garbage PETSc objects stored on a Firedrake object or any comm. Parameters diff --git a/firedrake/pointeval_utils.py b/firedrake/pointeval_utils.py index 2c14656329..f6c24f8b30 100644 --- a/firedrake/pointeval_utils.py +++ b/firedrake/pointeval_utils.py @@ -1,13 +1,17 @@ import loopy as lp from firedrake.utils import IntType, as_cstr -from ufl import TensorProductCell -from finat.ufl import MixedElement +from finat.element_factory import as_fiat_cell +from finat.point_set import UnknownPointSet +from finat.quadrature import QuadratureRule +from finat.ufl import MixedElement, FiniteElement, TensorElement + from ufl.corealg.map_dag import map_expr_dags from ufl.algorithms import extract_arguments, extract_coefficients from ufl.domain import extract_unique_domain import gem +import ufl import tsfc import tsfc.kernel_interface.firedrake_loopy as firedrake_interface @@ -18,6 +22,39 @@ from firedrake.petsc import PETSc +def runtime_quadrature_element(domain, ufl_element, rt_var_name="rt_X"): + """Construct a Quadrature FiniteElement for interpolation onto a + VertexOnlyMesh. The quadrature point is an UnknownPointSet of shape + (1, tdim) where tdim is the topological dimension of domain.ufl_cell(). The + weight is [1.0], since the single local dof in the VertexOnlyMesh function + space corresponds to a point evaluation at the vertex. + + Parameters + ---------- + domain : ufl.AbstractDomain + The source domain. + ufl_element : finat.ufl.finiteelement.FiniteElement + The UFL element of the target FunctionSpace. + rt_var_name : str + String beginning with ``'rt_'`` which is used as the name of the + gem.Variable used to represent the UnknownPointSet. The ``'rt_'`` prefix + forces TSFC to do runtime tabulation. + """ + assert rt_var_name.startswith("rt_") + + cell = domain.ufl_cell() + point_expr = gem.Variable(rt_var_name, (1, cell.topological_dimension)) + point_set = UnknownPointSet(point_expr) + rule = QuadratureRule(point_set, weights=[1.0], ref_el=as_fiat_cell(cell)) + + shape = ufl_element.pullback.physical_value_shape(ufl_element, domain) + rt_element = FiniteElement("Quadrature", cell=cell, degree=0, quad_scheme=rule) + if shape: + symmetry = None if len(shape) < 2 else ufl_element.symmetry() + rt_element = TensorElement(rt_element, shape=shape, symmetry=symmetry) + return rt_element + + @PETSc.Log.EventDecorator() def compile_element(expression, coordinates, parameters=None): """Generates C code for point evaluations. @@ -54,6 +91,8 @@ def compile_element(expression, coordinates, parameters=None): # Initialise kernel builder builder = firedrake_interface.KernelBuilderBase(utils.ScalarType) + builder._domain_integral_type_map = {domain: "cell"} + builder._entity_ids = {domain: (0,)} builder.domain_coordinate[domain] = coordinates builder._coefficient(coordinates, "x") x_arg = builder.generate_arg_from_expression(builder.coefficient_map[coordinates]) @@ -65,13 +104,12 @@ def compile_element(expression, coordinates, parameters=None): # Translate to GEM cell = domain.ufl_cell() - dim = cell.topological_dimension() + dim = cell.topological_dimension point = gem.Variable('X', (dim,)) point_arg = lp.GlobalArg("X", dtype=utils.ScalarType, shape=(dim,)) config = dict(interface=builder, ufl_cell=extract_unique_domain(coordinates).ufl_cell(), - integral_type="cell", point_indices=(), point_expr=point, scalar_type=utils.ScalarType) @@ -113,10 +151,10 @@ def predicate(index): kernel_code = lp.generate_code_v2(loopy_kernel).device_code() # Fill the code template - extruded = isinstance(cell, TensorProductCell) + extruded = isinstance(cell, ufl.TensorProductCell) code = { - "geometric_dimension": domain.geometric_dimension(), + "geometric_dimension": domain.geometric_dimension, "layers_arg": f", {as_cstr(IntType)} const *__restrict__ layers" if extruded else "", "layers": ", layers" if extruded else "", "extruded_define": "1" if extruded else "0", diff --git a/firedrake/pointquery_utils.py b/firedrake/pointquery_utils.py index c425356267..141eb3a939 100644 --- a/firedrake/pointquery_utils.py +++ b/firedrake/pointquery_utils.py @@ -28,11 +28,13 @@ def make_args(function): return (arg,) +@PETSc.Log.EventDecorator() def make_wrapper(function, **kwargs): args = make_args(function) return generate_single_cell_wrapper(function.cell_set, args, **kwargs) +@PETSc.Log.EventDecorator() def src_locate_cell(mesh, tolerance=None): src = ['#include '] src.append(compile_coordinate_element(mesh, tolerance)) @@ -58,7 +60,7 @@ def X_isub_dX(topological_dimension): def is_affine(ufl_element): - return ufl_element.cell.is_simplex() and ufl_element.degree() <= 1 and ufl_element.family() in ["Discontinuous Lagrange", "Lagrange"] + return ufl_element.cell.is_simplex and ufl_element.degree() <= 1 and ufl_element.family() in ["Discontinuous Lagrange", "Lagrange"] def inside_check(fiat_cell, eps, X="X"): @@ -130,7 +132,7 @@ def to_reference_coords_newton_step(ufl_coordinate_element, parameters, x0_dtype # Set up UFL form cell = ufl_coordinate_element.cell domain = ufl.Mesh(ufl_coordinate_element) - gdim = domain.geometric_dimension() + gdim = domain.geometric_dimension K = ufl.JacobianInverse(domain) x = ufl.SpatialCoordinate(domain) x0_element = finat.ufl.VectorElement("Real", cell, 0, dim=gdim) @@ -143,8 +145,9 @@ def to_reference_coords_newton_step(ufl_coordinate_element, parameters, x0_dtype expr = ufl_utils.simplify_abs(expr, complex_mode) builder = firedrake_interface.KernelBuilderBase(ScalarType) + builder._domain_integral_type_map = {domain: "cell"} + builder._entity_ids = {domain: (0,)} builder.domain_coordinate[domain] = C - Cexpr = builder._coefficient(C, "C") x0_expr = builder._coefficient(x0, "x0") loopy_args = [ @@ -156,13 +159,12 @@ def to_reference_coords_newton_step(ufl_coordinate_element, parameters, x0_dtype ), ] - dim = cell.topological_dimension() + dim = cell.topological_dimension point = gem.Variable('X', (dim,)) loopy_args.append(lp.GlobalArg("X", dtype=ScalarType, shape=(dim,))) context = tsfc.fem.GemPointContext( interface=builder, ufl_cell=cell, - integral_type="cell", point_indices=(), point_expr=point, scalar_type=parameters["scalar_type"] @@ -224,15 +226,15 @@ def compile_coordinate_element(mesh: MeshGeometry, contains_eps: float, paramete element = finat.element_factory.create_element(ufl_coordinate_element) code = { - "geometric_dimension": mesh.geometric_dimension(), - "topological_dimension": mesh.topological_dimension(), + "geometric_dimension": mesh.geometric_dimension, + "topological_dimension": mesh.topological_dimension, "celldist_l1_c_expr": celldist_l1_c_expr(element.cell, "X"), "to_reference_coords_newton_step": to_reference_coords_newton_step(ufl_coordinate_element, parameters), "init_X": init_X(element.cell, parameters), "max_iteration_count": 1 if is_affine(ufl_coordinate_element) else 16, "convergence_epsilon": 1e-12, - "dX_norm_square": dX_norm_square(mesh.topological_dimension()), - "X_isub_dX": X_isub_dX(mesh.topological_dimension()), + "dX_norm_square": dX_norm_square(mesh.topological_dimension), + "X_isub_dX": X_isub_dX(mesh.topological_dimension), "extruded_arg": f", {as_cstr(IntType)} const *__restrict__ layers" if mesh.extruded else "", "extr_comment_out": "//" if mesh.extruded else "", "non_extr_comment_out": "//" if not mesh.extruded else "", diff --git a/firedrake/preconditioners/__init__.py b/firedrake/preconditioners/__init__.py index 491a73657b..d3436114df 100644 --- a/firedrake/preconditioners/__init__.py +++ b/firedrake/preconditioners/__init__.py @@ -1,15 +1,27 @@ -from firedrake.preconditioners.base import * # noqa: F401 -from firedrake.preconditioners.asm import * # noqa: F401 -from firedrake.preconditioners.assembled import * # noqa: F401 -from firedrake.preconditioners.massinv import * # noqa: F401 -from firedrake.preconditioners.pcd import * # noqa: F401 -from firedrake.preconditioners.patch import * # noqa: F401 -from firedrake.preconditioners.low_order import * # noqa: F401 -from firedrake.preconditioners.gtmg import * # noqa: F401 -from firedrake.preconditioners.pmg import * # noqa: F401 -from firedrake.preconditioners.hypre_ams import * # noqa: F401 -from firedrake.preconditioners.hypre_ads import * # noqa: F401 -from firedrake.preconditioners.fdm import * # noqa: F401 -from firedrake.preconditioners.hiptmair import * # noqa: F401 -from firedrake.preconditioners.facet_split import * # noqa: F401 -from firedrake.preconditioners.bddc import * # noqa: F401 +from firedrake.preconditioners.base import ( # noqa: F401 + PCBase, SNESBase, PCSNESBase +) +from firedrake.preconditioners.asm import ( # noqa: F401 + ASMPatchPC, ASMStarPC, ASMVankaPC, + ASMLinesmoothPC, ASMExtrudedStarPC +) +from firedrake.preconditioners.assembled import ( # noqa: F401 + AssembledPC, AuxiliaryOperatorPC +) +from firedrake.preconditioners.massinv import MassInvPC # noqa: F401 +from firedrake.preconditioners.pcd import PCDPC # noqa: F401 +from firedrake.preconditioners.patch import ( # noqa: F401 + PatchPC, PlaneSmoother, PatchSNES +) +from firedrake.preconditioners.low_order import ( # noqa: F401 + P1PC, P1SNES, LORPC +) +from firedrake.preconditioners.gtmg import GTMGPC # noqa: F401 +from firedrake.preconditioners.pmg import PMGPC, PMGSNES # noqa: F401 +from firedrake.preconditioners.hypre_ams import HypreAMS # noqa: F401 +from firedrake.preconditioners.hypre_ads import HypreADS # noqa: F401 +from firedrake.preconditioners.fdm import FDMPC, PoissonFDMPC # noqa: F401 +from firedrake.preconditioners.hiptmair import TwoLevelPC, HiptmairPC # noqa: F401 +from firedrake.preconditioners.facet_split import FacetSplitPC # noqa: F401 +from firedrake.preconditioners.bddc import BDDCPC # noqa: F401 +from firedrake.preconditioners.covariance import CovariancePC # noqa: F401 diff --git a/firedrake/preconditioners/asm.py b/firedrake/preconditioners/asm.py index ece1765d2c..de537cf123 100644 --- a/firedrake/preconditioners/asm.py +++ b/firedrake/preconditioners/asm.py @@ -153,15 +153,19 @@ class ASMStarPC(ASMPatchPC): def get_patches(self, V): mesh = V._mesh - mesh_dm = mesh.topology_dm - if mesh.cell_set._extruded: + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") + mesh_dm = mesh_unique.topology_dm + if mesh_unique.cell_set._extruded: warning("applying ASMStarPC on an extruded mesh") # Obtain the topological entities to use to construct the stars opts = PETSc.Options(self.prefix) depth = opts.getInt("construct_dim", default=0) ordering = opts.getString("mat_ordering_type", default="natural") - validate_overlap(mesh, depth, "star") + validate_overlap(mesh_unique, depth, "star") # Accessing .indices causes the allocation of a global array, # so we need to cache these for efficiency @@ -210,8 +214,12 @@ class ASMVankaPC(ASMPatchPC): def get_patches(self, V): mesh = V._mesh - mesh_dm = mesh.topology_dm - if mesh.layers: + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") + mesh_dm = mesh_unique.topology_dm + if mesh_unique.layers: warning("applying ASMVankaPC on an extruded mesh") # Obtain the topological entities to use to construct the stars @@ -240,7 +248,7 @@ def get_patches(self, V): else: (start, end) = mesh_dm.getHeightStratum(height) patch_dim = mesh_dm.getDimension() - height - validate_overlap(mesh, patch_dim, "vanka") + validate_overlap(mesh_unique, patch_dim, "vanka") for seed in range(start, end): # Only build patches over owned DoFs @@ -302,8 +310,12 @@ class ASMLinesmoothPC(ASMPatchPC): def get_patches(self, V): mesh = V._mesh - assert mesh.cell_set._extruded - dm = mesh.topology_dm + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") + assert mesh_unique.cell_set._extruded + dm = mesh_unique.topology_dm section = V.dm.getDefaultSection() # Obtain the codimensions to loop over from options, if present opts = PETSc.Options(self.prefix) @@ -387,7 +399,7 @@ def get_basemesh_nodes(W): if W.mesh().extruded_periodic: # Account for missing dofs from the top layer - for dim in range(W.mesh().topological_dimension()): + for dim in range(W.mesh().topological_dimension): qstart, qend = W.mesh().topology_dm.getDepthStratum(dim) quotient = len(W.finat_element.entity_dofs()[(dim, 0)][0]) basemeshdof[qstart-pstart:qend-pstart] += quotient @@ -408,9 +420,13 @@ class ASMExtrudedStarPC(ASMStarPC): def get_patches(self, V): mesh = V.mesh() - mesh_dm = mesh.topology_dm - nlayers = mesh.layers - if not mesh.cell_set._extruded: + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") + mesh_dm = mesh_unique.topology_dm + nlayers = mesh_unique.layers + if not mesh_unique.cell_set._extruded: return super(ASMExtrudedStarPC, self).get_patches(V) periodic = mesh.extruded_periodic @@ -459,7 +475,7 @@ def get_patches(self, V): else: continue - validate_overlap(mesh, base_depth, "star") + validate_overlap(mesh_unique, base_depth, "star") start, end = mesh_dm.getDepthStratum(base_depth) for seed in range(start, end): # Only build patches over owned DoFs diff --git a/firedrake/preconditioners/base.py b/firedrake/preconditioners/base.py index 187a3f7247..2d373b94fd 100644 --- a/firedrake/preconditioners/base.py +++ b/firedrake/preconditioners/base.py @@ -142,17 +142,15 @@ def new_snes_ctx( """ from firedrake.variational_solver import LinearVariationalProblem from firedrake.function import Function - from firedrake.solving_utils import _SNESContext - dm = pc.getDM() - old_appctx = get_appctx(dm).appctx u = Function(Jp.arguments()[-1].function_space()) L = 0 if bcs: bcs = tuple(bc._as_nonlinear_variational_problem_arg(is_linear=True) for bc in bcs) nprob = LinearVariationalProblem(Jp, L, u, bcs=bcs, form_compiler_parameters=fcp) - return _SNESContext(nprob, mat_type, mat_type, appctx=old_appctx, **kwargs) + octx = get_appctx(pc.getDM()) + return octx.reconstruct(problem=nprob, mat_type=mat_type, pmat_type=mat_type, **kwargs) class PCBase(PCSNESBase): diff --git a/firedrake/preconditioners/bddc.py b/firedrake/preconditioners/bddc.py index c2b3afffde..f35dd689f3 100644 --- a/firedrake/preconditioners/bddc.py +++ b/firedrake/preconditioners/bddc.py @@ -1,10 +1,23 @@ +from itertools import repeat + from firedrake.preconditioners.base import PCBase from firedrake.preconditioners.patch import bcdofs -from firedrake.preconditioners.facet_split import restrict, get_restriction_indices +from firedrake.preconditioners.facet_split import get_restriction_indices from firedrake.petsc import PETSc from firedrake.dmhooks import get_function_space, get_appctx from firedrake.ufl_expr import TestFunction, TrialFunction -from ufl import curl, div, HCurl, HDiv, inner, dx +from firedrake.function import Function +from firedrake.functionspace import FunctionSpace, VectorFunctionSpace, TensorFunctionSpace +from firedrake.preconditioners.fdm import broken_function, tabulate_exterior_derivative +from firedrake.preconditioners.hiptmair import curl_to_grad +from functools import cached_property + +from firedrake.parloops import par_loop, INC, READ +from firedrake.bcs import DirichletBC +from firedrake.mesh import Submesh +from ufl import Form, H1, H2, JacobianDeterminant, dx, inner, replace +from finat.ufl import BrokenElement +from pyop2.mpi import COMM_SELF from pyop2.utils import as_tuple import numpy @@ -18,48 +31,75 @@ class BDDCPC(PCBase): Internally, this PC creates a PETSc PCBDDC object that can be controlled by the options: + - ``'bddc_cellwise'`` to set up a MatIS on cellwise subdomains if P.type == python, + - ``'bddc_matfree'`` to set up a matrix-free MatIS if A.type == python, - ``'bddc_pc_bddc_neumann'`` to set sub-KSPs on subdomains excluding corners, - ``'bddc_pc_bddc_dirichlet'`` to set sub-KSPs on subdomain interiors, - ``'bddc_pc_bddc_coarse'`` to set the coarse solver KSP. - This PC also inspects optional arguments supplied in the application context: - - ``'discrete_gradient'`` for problems in H(curl), this sets the arguments - (a Mat tabulating the gradient of the auxiliary H1 space) and + This PC also inspects optional callbacks supplied in the application context: + - ``'get_discrete_gradient'`` for 3D problems in H(curl), this is a callable that + provide the arguments (a Mat tabulating the gradient of the auxiliary H1 space) and keyword arguments supplied to ``PETSc.PC.setBDDCDiscreteGradient``. - - ``'divergence_mat'`` for 3D problems in H(div), this sets the Mat with the - assembled bilinear form testing the divergence against an L2 space. - - Notes - ----- - Currently the Mat type IS is only supported by FDMPC. - + - ``'get_divergence_mat'`` for problems in H(div) (resp. 2D H(curl)), this is + provide the arguments (a Mat with the assembled bilinear form testing the divergence + (curl) against an L2 space) and keyword arguments supplied to ``PETSc.PC.setDivergenceMat``. + - ``'primal_markers'`` a Function marking degrees of freedom of the solution space to be included in the + coarse space. Any nonzero value is counted as a marked degree of freedom. + If a DG(0) Function is provided, then all degrees of freedom on the cell are marked. + Alternatively, ``'primal_markers'`` can be a list of the global degrees of freedom to + be supplied directly to ``PETSc.PC.setBDDCPrimalVerticesIS``. """ _prefix = "bddc_" def initialize(self, pc): - # Get context from pc - _, P = pc.getOperators() - dm = pc.getDM() - self.prefix = (pc.getOptionsPrefix() or "") + self._prefix + prefix = (pc.getOptionsPrefix() or "") + self._prefix + dm = pc.getDM() V = get_function_space(dm) # Create new PC object as BDDC type bddcpc = PETSc.PC().create(comm=pc.comm) bddcpc.incrementTabLevel(1, parent=pc) - bddcpc.setOptionsPrefix(self.prefix) - bddcpc.setOperators(*pc.getOperators()) + bddcpc.setOptionsPrefix(prefix) bddcpc.setType(PETSc.PC.Type.BDDC) opts = PETSc.Options(bddcpc.getOptionsPrefix()) - if V.ufl_element().variant() == "fdm" and "pc_bddc_use_local_mat_graph" not in opts: - # Disable computation of disconected components of subdomain interfaces + matfree = opts.getBool("matfree", False) + + # Set operators + assemblers = [] + A, P = pc.getOperators() + if P.type == "python": + # Reconstruct P as MatIS + cellwise = opts.getBool("cellwise", False) + P, assembleP = create_matis(P, "aij", cellwise=cellwise) + assemblers.append(assembleP) + + if P.type != "is": + raise ValueError(f"Expecting P to be either 'matfree' or 'is', not {P.type}.") + + if A.type == "python" and matfree: + # Reconstruct A as MatIS + A, assembleA = create_matis(A, "matfree", cellwise=P.getISAllowRepeated()) + assemblers.append(assembleA) + bddcpc.setOperators(A, P) + self.assemblers = assemblers + + # Do not use CSR of local matrix to define dofs connectivity unless requested + # Using the CSR only makes sense for H1/H2 problems + is_h1h2 = V.ufl_element().sobolev_space in {H1, H2} + if "pc_bddc_use_local_mat_graph" not in opts and (not is_h1h2 or not V.finat_element.has_pointwise_dual_basis): opts["pc_bddc_use_local_mat_graph"] = False + # Get context from DM ctx = get_appctx(dm) - bcs = tuple(ctx._problem.bcs) - if V.extruded: + + # Handle boundary dofs + bcs = tuple(ctx._problem.dirichlet_bcs()) + mesh = V.mesh().unique() + if mesh.extruded and not mesh.extruded_periodic: boundary_nodes = numpy.unique(numpy.concatenate(list(map(V.boundary_nodes, ("on_boundary", "top", "bottom"))))) else: boundary_nodes = V.boundary_nodes("on_boundary") @@ -69,48 +109,54 @@ def initialize(self, pc): dir_nodes = numpy.unique(numpy.concatenate([bcdofs(bc, ghost=False) for bc in bcs])) neu_nodes = numpy.setdiff1d(boundary_nodes, dir_nodes) - V.dof_dset.lgmap.apply(dir_nodes, result=dir_nodes) + dir_nodes = V.dof_dset.lgmap.apply(dir_nodes) dir_bndr = PETSc.IS().createGeneral(dir_nodes, comm=pc.comm) bddcpc.setBDDCDirichletBoundaries(dir_bndr) - V.dof_dset.lgmap.apply(neu_nodes, result=neu_nodes) + neu_nodes = V.dof_dset.lgmap.apply(neu_nodes) neu_bndr = PETSc.IS().createGeneral(neu_nodes, comm=pc.comm) bddcpc.setBDDCNeumannBoundaries(neu_bndr) appctx = self.get_appctx(pc) - sobolev_space = V.ufl_element().sobolev_space - tdim = V.mesh().topological_dimension() - degree = max(as_tuple(V.ufl_element().degree())) + # Set coordinates only if corner selection is requested + # There's no API to query from PC + if "pc_bddc_corner_selection" in opts: + degree = max(as_tuple(V.ufl_element().degree())) + variant = V.ufl_element().variant() + W = VectorFunctionSpace(mesh, "Lagrange", degree, variant=variant) + coords = Function(W).interpolate(mesh.coordinates) + bddcpc.setCoordinates(coords.dat.data_ro.repeat(V.block_size, axis=0)) + + tdim = mesh.topological_dimension if tdim >= 2 and V.finat_element.formdegree == tdim-1: - B = appctx.get("divergence_mat", None) - if B is None: - from firedrake.assemble import assemble - d = {HCurl: curl, HDiv: div}[sobolev_space] - Q = V.reconstruct(family="DG", degree=degree-1) - b = inner(d(TrialFunction(V)), TestFunction(Q)) * dx(degree=2*(degree-1)) - B = assemble(b, mat_type="matfree") - bddcpc.setBDDCDivergenceMat(B.petscmat) - elif sobolev_space == HCurl: - gradient = appctx.get("discrete_gradient", None) - if gradient is None: - from firedrake.preconditioners.fdm import tabulate_exterior_derivative - from firedrake.preconditioners.hiptmair import curl_to_grad - Q = V.reconstruct(element=curl_to_grad(V.ufl_element())) - gradient = tabulate_exterior_derivative(Q, V) - corners = get_vertex_dofs(Q) - gradient.compose('_elements_corners', corners) - grad_args = (gradient,) - grad_kwargs = {'order': degree} - else: - try: - grad_args, grad_kwargs = gradient - except ValueError: - grad_args = (gradient,) - grad_kwargs = dict() + allow_repeated = P.getISAllowRepeated() + get_divergence = appctx.get("get_divergence_mat", get_divergence_mat) + divergence = get_divergence(V, mat_type="is", allow_repeated=allow_repeated) + try: + div_args, div_kwargs = divergence + except ValueError: + div_args = (divergence,) + div_kwargs = dict() + bddcpc.setBDDCDivergenceMat(*div_args, **div_kwargs) + elif tdim >= 3 and V.finat_element.formdegree == 1: + get_gradient = appctx.get("get_discrete_gradient", get_discrete_gradient) + gradient = get_gradient(V) + try: + grad_args, grad_kwargs = gradient + except ValueError: + grad_args = (gradient,) + grad_kwargs = dict() bddcpc.setBDDCDiscreteGradient(*grad_args, **grad_kwargs) + # Set the user-defined primal (coarse) degrees of freedom + primal_markers = appctx.get("primal_markers") + if primal_markers is not None: + primal_indices = get_primal_indices(V, primal_markers) + primal_is = PETSc.IS().createGeneral(primal_indices.astype(PETSc.IntType), comm=pc.comm) + bddcpc.setBDDCPrimalVerticesIS(primal_is) + bddcpc.setFromOptions() self.pc = bddcpc @@ -118,7 +164,8 @@ def view(self, pc, viewer=None): self.pc.view(viewer=viewer) def update(self, pc): - pass + for c in self.assemblers: + c() def apply(self, pc, x, y): self.pc.apply(x, y) @@ -127,9 +174,167 @@ def applyTranspose(self, pc, x, y): self.pc.applyTranspose(x, y) -def get_vertex_dofs(V): - W = V.reconstruct(element=restrict(V.ufl_element(), "vertex")) +class BrokenDirichletBC(DirichletBC): + def __init__(self, bc): + self.bc = bc + V = bc.function_space().broken_space() + g = bc._original_arg + super().__init__(V, g, bc.sub_domain) + + @cached_property + def nodes(self): + u = Function(self.bc.function_space()) + self.bc.set(u, 1) + u = broken_function(u.function_space(), val=u.dat) + return numpy.flatnonzero(u.dat.data) + + +def create_matis(Amat, local_mat_type, cellwise=False): + from firedrake.assemble import get_assembler + + def local_mesh(mesh): + key = "local_submesh" + cache = mesh._shared_data_cache["local_submesh_cache"] + try: + return cache[key] + except KeyError: + if mesh.comm.size > 1: + submesh = Submesh(mesh, ignore_halo=True, comm=COMM_SELF) + else: + submesh = None + return cache.setdefault(key, submesh) + + def local_space(V, cellwise): + mesh = local_mesh(V.mesh().unique()) + element = BrokenElement(V.ufl_element()) if cellwise else None + return V.reconstruct(mesh=mesh, element=element) + + def local_argument(arg, cellwise): + return arg.reconstruct(function_space=local_space(arg.function_space(), cellwise)) + + def local_integral(it): + extra_domain_integral_type_map = dict(it.extra_domain_integral_type_map()) + extra_domain_integral_type_map[it.ufl_domain()] = it.integral_type() + return it.reconstruct(domain=local_mesh(it.ufl_domain()), + extra_domain_integral_type_map=extra_domain_integral_type_map) + + def local_bc(bc, cellwise): + V = bc.function_space() + Vsub = local_space(V, False) + sub_domain = list(bc.sub_domain) + if "on_boundary" in sub_domain: + sub_domain.remove("on_boundary") + sub_domain.extend(V.mesh().unique().exterior_facets.unique_markers) + + valid_markers = Vsub.mesh().unique().exterior_facets.unique_markers + sub_domain = list(set(sub_domain) & set(valid_markers)) + bc = bc.reconstruct(V=Vsub, g=0, sub_domain=sub_domain) + if cellwise: + bc = BrokenDirichletBC(bc) + return bc + + def local_to_global_map(V, cellwise): + u = Function(V) + u.dat.data_wo[:] = numpy.arange(*V.dof_dset.layout_vec.getOwnershipRange()) + + Vsub = local_space(V, False) + usub = Function(Vsub).assign(u) + if cellwise: + usub = broken_function(usub.function_space(), val=usub.dat) + indices = usub.dat.data_ro.astype(PETSc.IntType) + return PETSc.LGMap().create(indices, comm=V.comm) + + assert Amat.type == "python" + ctx = Amat.getPythonContext() + form = ctx.a + bcs = ctx.bcs + + local_form = replace(form, {arg: local_argument(arg, cellwise) for arg in form.arguments()}) + local_form = Form(list(map(local_integral, local_form.integrals()))) + local_bcs = tuple(map(local_bc, bcs, repeat(cellwise))) + + assembler = get_assembler(local_form, bcs=local_bcs, mat_type=local_mat_type) + tensor = assembler.assemble() + + rmap = local_to_global_map(form.arguments()[0].function_space(), cellwise) + cmap = local_to_global_map(form.arguments()[1].function_space(), cellwise) + + Amatis = PETSc.Mat().createIS(Amat.getSizes(), comm=Amat.getComm()) + Amatis.setISAllowRepeated(cellwise) + Amatis.setLGMap(rmap, cmap) + Amatis.setISLocalMat(tensor.petscmat) + Amatis.setUp() + Amatis.assemble() + + def update(): + assembler.assemble(tensor=tensor) + Amatis.assemble() + return Amatis, update + + +def get_restricted_dofs(V, domain): + W = FunctionSpace(V.mesh(), V.ufl_element()[domain]) indices = get_restriction_indices(V, W) - V.dof_dset.lgmap.apply(indices, result=indices) - vertex_dofs = PETSc.IS().createGeneral(indices, comm=V.comm) - return vertex_dofs + indices = V.dof_dset.lgmap.apply(indices) + return PETSc.IS().createGeneral(indices, comm=V.comm) + + +def get_divergence_mat(V, mat_type="is", allow_repeated=False): + from firedrake import assemble + degree = max(as_tuple(V.ufl_element().degree())) + Q = TensorFunctionSpace(V.mesh(), "DG", 0, variant=f"integral({degree-1})", shape=V.value_shape[:-1]) + B = tabulate_exterior_derivative(V, Q, mat_type=mat_type, allow_repeated=allow_repeated) + + Jdet = JacobianDeterminant(V.mesh()) + s = assemble(inner(TrialFunction(Q)*(1/Jdet), TestFunction(Q))*dx(degree=0), diagonal=True) + with s.dat.vec as svec: + B.diagonalScale(svec, None) + return (B,), {} + + +def get_discrete_gradient(V): + from firedrake import Constant + from firedrake.nullspace import VectorSpaceBasis + + Q = FunctionSpace(V.mesh(), curl_to_grad(V.ufl_element())) + gradient = tabulate_exterior_derivative(Q, V) + basis = Function(Q) + try: + basis.interpolate(Constant(1)) + except NotImplementedError: + basis.project(Constant(1)) + nsp = VectorSpaceBasis([basis]) + nsp.orthonormalize() + gradient.setNullSpace(nsp.nullspace()) + if not Q.finat_element.has_pointwise_dual_basis: + vdofs = get_restricted_dofs(Q, "vertex") + gradient.compose('_elements_corners', vdofs) + + degree = max(as_tuple(Q.ufl_element().degree())) + grad_args = (gradient,) + grad_kwargs = {'order': degree} + return grad_args, grad_kwargs + + +def get_primal_indices(V, primal_markers): + if isinstance(primal_markers, Function): + marker_space = primal_markers.function_space() + if marker_space == V: + markers = primal_markers + elif marker_space.finat_element.space_dimension() == 1: + shapes = (V.finat_element.space_dimension(), V.block_size) + domain = "{[i,j]: 0 <= i < %d and 0 <= j < %d}" % shapes + instructions = """ + for i, j + w[i,j] = w[i,j] + t[0] + end + """ + markers = Function(V) + par_loop((domain, instructions), dx, {"w": (markers, INC), "t": (primal_markers, READ)}) + else: + raise ValueError(f"Expecting markers in either {V.ufl_element()} or DG(0).") + primal_indices = numpy.flatnonzero(markers.dat.data >= 1E-12) + primal_indices += V.dof_dset.layout_vec.getOwnershipRange()[0] + else: + primal_indices = numpy.asarray(primal_markers, dtype=PETSc.IntType) + return primal_indices diff --git a/firedrake/preconditioners/covariance.py b/firedrake/preconditioners/covariance.py new file mode 100644 index 0000000000..2b92cd79e0 --- /dev/null +++ b/firedrake/preconditioners/covariance.py @@ -0,0 +1,119 @@ +import petsctools +from firedrake.petsc import PETSc +from firedrake.function import Function + + +class CovariancePC(petsctools.PCBase): + r""" + A python PC context for a covariance operator. + Will apply either the action or inverse of the covariance, + whichever is the opposite of the Mat operator. + + .. math:: + + B: V^{*} \to V + + B^{-1}: V \to V^{*} + + Available options: + + * ``-pc_use_amat`` - use Amat to apply the covariance operator. + + See Also + -------- + ~firedrake.adjoint.covariance_operator.CovarianceOperatorBase + ~firedrake.adjoint.covariance_operator.AutoregressiveCovariance + ~firedrake.adjoint.covariance_operator.CovarianceMatCtx + ~firedrake.adjoint.covariance_operator.CovarianceMat + """ + needs_python_pmat = True + prefix = "covariance" + + def initialize(self, pc): + from firedrake.adjoint.covariance_operator import CovarianceMatCtx + A, P = pc.getOperators() + + use_amat_prefix = self.parent_prefix + "pc_use_amat" + self.use_amat = PETSc.Options().getBool(use_amat_prefix, False) + mat = (A if self.use_amat else P).getPythonContext() + + if not isinstance(mat, CovarianceMatCtx): + raise TypeError( + "CovariancePC needs a CovarianceMatCtx") + covariance = mat.covariance + + self.covariance = covariance + self.mat = mat + + V = covariance.function_space() + primal = Function(V) + dual = Function(V.dual()) + + # PC does the opposite of the Mat + if mat.operation == CovarianceMatCtx.Operation.ACTION: + self.operation = CovarianceMatCtx.Operation.INVERSE + self.x = primal + self.y = dual + self._apply_op = covariance.apply_inverse + elif mat.operation == CovarianceMatCtx.Operation.INVERSE: + self.operation = CovarianceMatCtx.Operation.ACTION + self.x = dual + self.y = primal + self._apply_op = covariance.apply_action + + def apply(self, pc, x, y): + """Apply the action or inverse of the covariance operator + to x, putting the result in y. + + y is not guaranteed to be zero on entry. + + Parameters + ---------- + pc : PETSc.PC + The PETSc preconditioner that self is the python context of. + x : PETSc.Vec + The vector acted on by the pc. + y : PETSc.Vec + The result of the pc application. + """ + with self.x.dat.vec_wo as xvec: + x.copy(result=xvec) + + self._apply_op(self.x, tensor=self.y) + + with self.y.dat.vec_ro as yvec: + yvec.copy(result=y) + + def update(self, pc): + pass + + def view(self, pc, viewer=None): + """View object. Method usually called by PETSc with e.g. -ksp_view. + """ + from firedrake.adjoint.covariance_operator import ( + CovarianceMatCtx, AutoregressiveCovariance) + if viewer is None: + return + if viewer.getType() != PETSc.Viewer.Type.ASCII: + return + + viewer.printfASCII(f" firedrake covariance operator preconditioner: {type(self).__name__}\n") + viewer.printfASCII(f" Applying the {str(self.operation)} of the covariance operator {type(self.covariance).__name__}\n") + + if self.use_amat: + viewer.printfASCII(" using Amat matrix\n") + + if (type(self.covariance) is AutoregressiveCovariance) and (self.covariance.iterations > 0): + if viewer.getFormat() == PETSc.Viewer.Format.ASCII_INFO_DETAIL: + if self.operation == CovarianceMatCtx.Operation.ACTION: + viewer.printfASCII(" Information for the diffusion solver for applying the action:\n") + ksp = self.covariance.solver.snes.ksp + elif self.operation == CovarianceMatCtx.Operation.INVERSE: + viewer.printfASCII(" Information for the mass solver for applying the inverse:\n") + ksp = self.covariance.mass_solver.snes.ksp + viewer.pushASCIITab() + ksp.view(viewer) + viewer.popASCIITab() + else: + prefix = pc.getOptionsPrefix() or "" + viewer.printfASCII(f" Use -{prefix}ksp_view ::ascii_info_detail to display information for diffusion or mass solver.\n") diff --git a/firedrake/preconditioners/facet_split.py b/firedrake/preconditioners/facet_split.py index f4ac80eea0..08ac21aeb0 100644 --- a/firedrake/preconditioners/facet_split.py +++ b/firedrake/preconditioners/facet_split.py @@ -1,13 +1,15 @@ from functools import partial from mpi4py import MPI from pyop2 import op2, PermutedMap -from finat.ufl import RestrictedElement, MixedElement, TensorElement, VectorElement +from finat.ufl import MixedElement from firedrake.petsc import PETSc from firedrake.preconditioners.base import PCBase from firedrake.bcs import restricted_function_space import firedrake.dmhooks as dmhooks import numpy +from pyop2.mpi import temp_internal_comm + __all__ = ['FacetSplitPC'] @@ -33,10 +35,11 @@ def get_indices(self, V, W): key = (V, W) if key not in self._index_cache: indices = get_restriction_indices(V, W) - if V._comm.allreduce(len(indices) == V.dof_count and numpy.all(indices[:-1] <= indices[1:]), MPI.PROD): - self._index_cache[key] = None - else: - self._index_cache[key] = indices + with temp_internal_comm(V.comm) as icomm: + if icomm.allreduce(len(indices) == V.dof_count and numpy.all(indices[:-1] <= indices[1:]), MPI.PROD): + self._index_cache[key] = None + else: + self._index_cache[key] = indices return self._index_cache[key] def initialize(self, pc): @@ -57,8 +60,8 @@ def initialize(self, pc): raise ValueError("Decomposition of mixed elements is not supported") element = V.ufl_element() - elements = [restrict(element, domain) for domain in domains] - W = FunctionSpace(V.mesh(), elements[0] if len(elements) == 1 else MixedElement(elements)) + elements = [element[domain] for domain in domains] + W = FunctionSpace(V.mesh(), elements[0] if len(elements) == 1 else MixedElement(*elements)) if V.boundary_set: W = restricted_function_space(W, [V.boundary_set]*len(W)) @@ -203,17 +206,6 @@ def destroy(self, pc): self.subset.destroy() -def restrict(ele, restriction_domain): - """ Restrict a UFL element, keeping VectorElement and TensorElement as the outermost modifier. - """ - if isinstance(ele, VectorElement): - return type(ele)(restrict(ele._sub_element, restriction_domain), dim=ele.num_sub_elements) - elif isinstance(ele, TensorElement): - return type(ele)(restrict(ele._sub_element, restriction_domain), shape=ele._shape, symmetry=ele._symmety) - else: - return RestrictedElement(ele, restriction_domain) - - def split_dofs(elem): """ Split DOFs into interior and facet DOF, where facets are sorted by entity. """ @@ -242,7 +234,7 @@ def restricted_dofs(celem, felem): cdofs = celem.entity_dofs() fdofs = felem.entity_dofs() for dim in sorted(cdofs): - for entity in cdofs[dim]: + for entity in sorted(cdofs[dim]): ndofs = len(cdofs[dim][entity]) indices[cdofs[dim][entity]] = fdofs[dim][entity][:ndofs] return indices @@ -251,6 +243,9 @@ def restricted_dofs(celem, felem): def get_restriction_indices(V, W): """Return the list of dofs in the space V such that W = V[indices]. """ + if V.cell_node_map() is W.cell_node_map(): + return numpy.arange(V.dof_dset.layout_vec.getSizes()[0], dtype=PETSc.IntType) + vdat = V.make_dat(val=numpy.arange(V.dof_count, dtype=PETSc.IntType)) wdats = [Wsub.make_dat(val=numpy.full((Wsub.dof_count,), -1, dtype=PETSc.IntType)) for Wsub in W] wdat = wdats[0] if len(W) == 1 else op2.MixedDat(wdats) diff --git a/firedrake/preconditioners/fdm.py b/firedrake/preconditioners/fdm.py index 24cf2c4e94..56468e4a1e 100644 --- a/firedrake/preconditioners/fdm.py +++ b/firedrake/preconditioners/fdm.py @@ -1,5 +1,5 @@ from textwrap import dedent -from functools import partial +from functools import cached_property, partial from itertools import chain, product from firedrake.petsc import PETSc from firedrake.preconditioners.base import PCBase @@ -8,21 +8,20 @@ evaluate_dual, get_permutation_to_nodal_elements, cache_generate_code) -from firedrake.preconditioners.facet_split import split_dofs, restricted_dofs +from firedrake.preconditioners.facet_split import restricted_dofs, split_dofs from firedrake.formmanipulation import ExtractSubBlock from firedrake.functionspace import FunctionSpace, MixedFunctionSpace from firedrake.function import Function from firedrake.cofunction import Cofunction from firedrake.parloops import par_loop from firedrake.ufl_expr import TestFunction, TestFunctions, TrialFunctions -from firedrake.utils import cached_property from ufl.algorithms.ad import expand_derivatives from ufl.algorithms.expand_indices import expand_indices from finat.element_factory import create_element from pyop2.compilation import load from pyop2.mpi import COMM_SELF from pyop2.sparsity import get_preallocation -from pyop2.utils import get_petsc_dir, as_tuple +from pyop2.utils import as_tuple from pyop2 import op2 from tsfc.ufl_utils import extract_firedrake_constants from firedrake.tsfc_interface import compile_form @@ -40,33 +39,6 @@ __all__ = ("FDMPC", "PoissonFDMPC") -def broken_function(V, val): - W = FunctionSpace(V.mesh(), finat.ufl.BrokenElement(V.ufl_element())) - w = Function(W, dtype=val.dtype) - v = Function(V, val=val) - domain = "{[i]: 0 <= i < v.dofs}" - instructions = """ - for i - w[i] = v[i] - end - """ - par_loop((domain, instructions), ufl.dx, {'w': (w, op2.WRITE), 'v': (v, op2.READ)}) - return w - - -def mask_local_indices(V, lgmap, repeated=False): - mask = lgmap.indices - if repeated: - w = broken_function(V, mask) - V = w.function_space() - mask = w.dat.data_ro_with_halos - indices = numpy.arange(len(mask), dtype=PETSc.IntType) - indices[mask == -1] = -1 - indices_dat = V.make_dat(val=indices) - indices_acc = indices_dat(op2.READ, V.cell_node_map()) - return indices_acc - - class FDMPC(PCBase): """ A preconditioner for tensor-product elements that changes the shape @@ -83,7 +55,7 @@ class FDMPC(PCBase): matrices. The PETSc options inspected by this class are: - - 'fdm_mat_type': can be either 'aij' or 'sbaij' + - 'fdm_mat_type': can be either 'aij', 'sbaij', or 'is' - 'fdm_static_condensation': are we assembling the Schur complement on facets? """ @@ -134,7 +106,10 @@ def initialize(self, pc): # Transform the problem into the space with FDM shape functions V = J.arguments()[-1].function_space() - V_fdm = V.reconstruct(variant=self._variant) + if self._variant == "fdm" and V.ufl_element().variant() in {"fdm", "demkowicz", "demkowiczmass"}: + V_fdm = V + else: + V_fdm = V.reconstruct(variant=self._variant) if V == V_fdm: J_fdm, bcs_fdm = (J, bcs) else: @@ -169,7 +144,7 @@ def initialize(self, pc): self.bc_nodes = numpy.empty(0, dtype=PETSc.IntType) # Internally, we just set up a PC object that the user can configure - # however from the PETSc command line. Since PC allows the user to specify + # however from the PETSc command line. Since PC allows the user to specify # a KSP, we can do iterative by -fdm_pc_type ksp. fdmpc = PETSc.PC().create(comm=pc.comm) fdmpc.incrementTabLevel(1, parent=pc) @@ -218,15 +193,15 @@ def allocate_matrix(self, Amat, V, J, bcs, fcp, pmat_type, use_static_condensati Vfacet = None Vbig = V ebig = V.ufl_element() - _, fdofs = split_dofs(V.finat_element) + idofs, fdofs = split_dofs(V.finat_element) elif len(ifacet) == 1: Vfacet = V[ifacet[0]] ebig, = set(unrestrict_element(Vsub.ufl_element()) for Vsub in V) - Vbig = V.reconstruct(element=ebig) - if len(V) > 1: - dims = [Vsub.finat_element.space_dimension() for Vsub in V] - assert sum(dims) == Vbig.finat_element.space_dimension() + Vbig = V.reconstruct(mesh=V.mesh().unique(), element=ebig) + space_dim = Vbig.finat_element.space_dimension() + assert space_dim == sum(Vsub.finat_element.space_dimension() for Vsub in V) fdofs = restricted_dofs(Vfacet.finat_element, Vbig.finat_element) + idofs = numpy.setdiff1d(numpy.arange(space_dim, dtype=fdofs.dtype), fdofs) else: raise ValueError("Expecting at most one FunctionSpace restricted onto facets.") self.embedding_element = ebig @@ -245,7 +220,7 @@ def allocate_matrix(self, Amat, V, J, bcs, fcp, pmat_type, use_static_condensati # Dictionary with kernel to compute the Schur complement self.schur_kernel = {} - if V == Vbig and Vbig.finat_element.formdegree == 0: + if V == Vbig and Vbig.finat_element.formdegree == 0 and len(idofs) > 0 and pmat_type.endswith("aij"): # If we are in H(grad), we just pad with zeros on the statically-condensed pattern self.schur_kernel[V] = SchurComplementPattern elif Vfacet and use_static_condensation: @@ -481,7 +456,7 @@ def assemble_coefficients(self, J, fcp, block_diagonal=False): args_J = J.arguments() e = args_J[0].ufl_element() mesh = args_J[0].function_space().mesh() - tdim = mesh.topological_dimension() + tdim = mesh.topological_dimension if isinstance(e, (finat.ufl.VectorElement, finat.ufl.TensorElement)): e = e._sub_element e = unrestrict_element(e) @@ -497,7 +472,7 @@ def assemble_coefficients(self, J, fcp, block_diagonal=False): dku = ufl.div(u) if sobolev == ufl.HDiv else ufl.curl(u) eps = expand_derivatives(ufl.diff(ufl.replace(expand_derivatives(dku), {ufl.grad(u): du}), du)) if sobolev == ufl.HDiv: - map_grad = lambda p: ufl.outer(p, eps/tdim) + map_grad = lambda p: ufl.conj(ufl.outer(p, eps/tdim)) elif len(eps.ufl_shape) == 3: map_grad = lambda p: ufl.dot(p, eps/2) else: @@ -710,65 +685,28 @@ def insert_mode(self): def assembly_lgmaps(self): if self.mat_type != "is": return {Vsub: Vsub.dof_dset.lgmap for Vsub in self.V} - lgmaps = {} - for Vsub in self.V: - lgmap = Vsub.dof_dset.lgmap - if self.allow_repeated: - indices = broken_function(Vsub, lgmap.indices).dat.data_ro - else: - indices = lgmap.indices.copy() - local_indices = numpy.arange(len(indices), dtype=PETSc.IntType) - cell_node_map = broken_function(Vsub, local_indices).dat.data_ro - ghost = numpy.setdiff1d(local_indices, numpy.unique(cell_node_map), assume_unique=True) - indices[ghost] = -1 - lgmaps[Vsub] = PETSc.LGMap().create(indices, bsize=lgmap.getBlockSize(), comm=lgmap.getComm()) - return lgmaps + return {Vsub: unghosted_lgmap(Vsub, Vsub.dof_dset.lgmap, self.allow_repeated) for Vsub in self.V} def setup_block(self, Vrow, Vcol): - # Preallocate the auxiliary sparse operator + """Preallocate the auxiliary sparse operator.""" sizes = tuple(Vsub.dof_dset.layout_vec.getSizes() for Vsub in (Vrow, Vcol)) rmap = self.assembly_lgmaps[Vrow] cmap = self.assembly_lgmaps[Vcol] on_diag = Vrow == Vcol ptype = self.mat_type if on_diag else PETSc.Mat.Type.AIJ - preallocator = PETSc.Mat().create(comm=self.comm) - preallocator.setType(PETSc.Mat.Type.PREALLOCATOR) - preallocator.setSizes(sizes) - preallocator.setISAllowRepeated(self.allow_repeated) - preallocator.setLGMap(rmap, cmap) - preallocator.setOption(PETSc.Mat.Option.IGNORE_ZERO_ENTRIES, False) - if ptype.endswith("sbaij"): - preallocator.setOption(PETSc.Mat.Option.IGNORE_LOWER_TRIANGULAR, True) - preallocator.setUp() + preallocator = get_preallocator(self.comm, sizes, rmap, cmap, mat_type=ptype) self.set_values(preallocator, Vrow, Vcol) preallocator.assemble() - dnz, onz = get_preallocation(preallocator, sizes[0][0]) - if on_diag: - numpy.maximum(dnz, 1, out=dnz) + P = allocate_matrix(preallocator, ptype, on_diag=on_diag, allow_repeated=self.allow_repeated) preallocator.destroy() - P = PETSc.Mat().create(comm=self.comm) - P.setType(ptype) - P.setSizes(sizes) - P.setISAllowRepeated(self.allow_repeated) - P.setLGMap(rmap, cmap) - if on_diag and ptype == "is" and self.allow_repeated: - bsize = Vrow.finat_element.space_dimension() * Vrow.block_size + + if on_diag and P.type == "is" and self.allow_repeated: + bsize = Vrow.block_size * Vrow.finat_element.space_dimension() local_mat = P.getISLocalMat() nblocks = local_mat.getSize()[0] // bsize sizes = numpy.full((nblocks,), bsize, dtype=PETSc.IntType) local_mat.setVariableBlockSizes(sizes) - P.setPreallocationNNZ((dnz, onz)) - - if not (ptype.endswith("sbaij") or ptype == "is"): - P.setOption(PETSc.Mat.Option.UNUSED_NONZERO_LOCATION_ERR, True) - P.setOption(PETSc.Mat.Option.NEW_NONZERO_ALLOCATION_ERR, True) - P.setOption(PETSc.Mat.Option.STRUCTURALLY_SYMMETRIC, on_diag) - P.setOption(PETSc.Mat.Option.FORCE_DIAGONAL_ENTRIES, True) - P.setOption(PETSc.Mat.Option.KEEP_NONZERO_PATTERN, True) - if ptype.endswith("sbaij"): - P.setOption(PETSc.Mat.Option.IGNORE_LOWER_TRIANGULAR, True) - P.setUp() return P @PETSc.Log.EventDecorator("FDMSetValues") @@ -1291,7 +1229,7 @@ def matmult_kernel_code(a, prefix="form", fcp=None, matshell=False): nargs = len(kernel.arguments) - len(a.arguments()) ncoef = nargs - len(extract_firedrake_constants(F)) - matmult_struct = cache_generate_code(kernel, V._comm) + matmult_struct = cache_generate_code(kernel, V.comm) matmult_struct = matmult_struct.replace("void "+kernel.name, "static void "+kernel.name) ctx_coeff = "".join(f"appctx[{i}], " for i in range(ncoef)) @@ -1647,7 +1585,7 @@ def diff_blocks(tdim, formdegree, A00, A11, A10): A_blocks = [[A00.kron(A10)], [A10.kron(A00)]] elif formdegree == 1: A_blocks = [[A10.kron(A11), A11.kron(A10)]] - A_blocks[-1][-1].scale(-1) + A_blocks[0][0].scale(-1) elif tdim == 3: if formdegree == 0: A_blocks = [[kron3(A00, A00, A10)], [kron3(A00, A10, A00)], [kron3(A10, A00, A00)]] @@ -1661,19 +1599,100 @@ def diff_blocks(tdim, formdegree, A00, A11, A10): return A_blocks -def tabulate_exterior_derivative(Vc, Vf, cbcs=[], fbcs=[], comm=None): - """ - Tabulate exterior derivative: Vc -> Vf as an explicit sparse matrix. - Works for any tensor-product basis. These are the same matrices one needs for HypreAMS and friends. +def broken_function(V, val): + """Return a Function(V, val=val) interpolated onto the broken space.""" + W = V.broken_space() + w = Function(W, dtype=val.dtype) + v = Function(V, val=val) + domain = "{[i]: 0 <= i < v.dofs}" + instructions = """ + for i + w[i] = v[i] + end """ + par_loop((domain, instructions), ufl.dx, {'w': (w, op2.WRITE), 'v': (v, op2.READ)}) + return w + + +def mask_local_indices(V, lgmap, allow_repeated): + """Return a numpy array with the masked local indices.""" + mask = lgmap.indices + if allow_repeated: + w = broken_function(V, mask) + V = w.function_space() + mask = w.dat.data_ro_with_halos + + indices = numpy.arange(mask.size, dtype=PETSc.IntType) + indices[mask == -1] = -1 + indices_dat = V.make_dat(val=indices) + indices_acc = indices_dat(op2.READ, V.cell_node_map()) + return indices_acc + + +def unghosted_lgmap(V, lgmap, allow_repeated): + """Construct the local to global mapping for MatIS assembly.""" + if allow_repeated: + indices = broken_function(V, lgmap.indices).dat.data_ro + else: + indices = lgmap.indices.copy() + local_indices = numpy.arange(indices.size, dtype=PETSc.IntType) + cell_node_map = broken_function(V, local_indices).dat.data_ro + ghost = numpy.setdiff1d(local_indices, numpy.unique(cell_node_map), assume_unique=True) + indices[ghost] = -1 + return PETSc.LGMap().create(indices, bsize=lgmap.getBlockSize(), comm=lgmap.getComm()) + + +def get_preallocator(comm, sizes, rmap, cmap, mat_type=None): + """Set up a matrix preallocator.""" + preallocator = PETSc.Mat().create(comm=comm) + preallocator.setType(PETSc.Mat.Type.PREALLOCATOR) + preallocator.setSizes(sizes) + preallocator.setLGMap(rmap, cmap) + preallocator.setOption(PETSc.Mat.Option.IGNORE_ZERO_ENTRIES, False) + if mat_type is not None and mat_type.endswith("sbaij"): + preallocator.setOption(PETSc.Mat.Option.IGNORE_LOWER_TRIANGULAR, True) + preallocator.setUp() + return preallocator + + +def allocate_matrix(preallocator, mat_type, on_diag=False, allow_repeated=False): + """Set up a matrix from a preallocator.""" + sizes = preallocator.getSizes() + nnz = get_preallocation(preallocator, sizes[0][0]) + if on_diag: + numpy.maximum(nnz[0], 1, out=nnz[0]) + + A = PETSc.Mat().create(comm=preallocator.getComm()) + A.setType(mat_type) + A.setSizes(sizes) + A.setBlockSize(preallocator.getBlockSize()) + A.setISAllowRepeated(allow_repeated) + A.setLGMap(*preallocator.getLGMap()) + A.setPreallocationNNZ(nnz) + if mat_type.endswith("sbaij"): + A.setOption(PETSc.Mat.Option.IGNORE_LOWER_TRIANGULAR, True) + if not (mat_type.endswith("sbaij") or mat_type == "is"): + A.setOption(PETSc.Mat.Option.UNUSED_NONZERO_LOCATION_ERR, True) + A.setOption(PETSc.Mat.Option.NEW_NONZERO_ALLOCATION_ERR, True) + A.setOption(PETSc.Mat.Option.STRUCTURALLY_SYMMETRIC, on_diag) + A.setOption(PETSc.Mat.Option.FORCE_DIAGONAL_ENTRIES, on_diag) + A.setOption(PETSc.Mat.Option.KEEP_NONZERO_PATTERN, True) + A.setUp() + return A + + +def tabulate_exterior_derivative(Vc, Vf, cbcs=[], fbcs=[], comm=None, mat_type="aij", allow_repeated=False): + """Tabulate exterior derivative: Vc -> Vf as an explicit sparse matrix. + Works for any tensor-product basis. These are the same matrices one needs for HypreAMS and friends.""" if comm is None: comm = Vf.comm + ec = Vc.finat_element ef = Vf.finat_element if ef.formdegree - ec.formdegree != 1: raise ValueError("Expecting Vf = d(Vc)") - if Vf.mesh().ufl_cell().is_simplex(): + if Vf.mesh().ufl_cell().is_simplex: c0 = ec.fiat_equivalent f1 = ef.fiat_equivalent derivative = {ufl.H1: "grad", ufl.HCurl: "curl", ufl.HDiv: "div"}[Vc.ufl_element().sobolev_space] @@ -1688,7 +1707,7 @@ def tabulate_exterior_derivative(Vc, Vf, cbcs=[], fbcs=[], comm=None): if c1.formdegree != 1: c1 = None - tdim = Vc.mesh().topological_dimension() + tdim = Vc.mesh().topological_dimension zero = PETSc.Mat() A00 = petsc_sparse(evaluate_dual(c0, f0), comm=COMM_SELF) if f0 else zero A11 = petsc_sparse(evaluate_dual(c1, f1), comm=COMM_SELF) if c1 else zero @@ -1718,29 +1737,30 @@ def tabulate_exterior_derivative(Vc, Vf, cbcs=[], fbcs=[], comm=None): temp.destroy() eye.destroy() - sizes = tuple(V.dof_dset.layout_vec.getSizes() for V in (Vf, Vc)) - preallocator = PETSc.Mat().create(comm=comm) - preallocator.setType(PETSc.Mat.Type.PREALLOCATOR) - preallocator.setSizes(sizes) - preallocator.setUp() - - kernel = ElementKernel(Dhat, name="exterior_derivative").kernel() - indices = tuple(op2.Dat(V.dof_dset, V.local_to_global_map(bcs).indices)(op2.READ, V.cell_node_map()) - for V, bcs in zip((Vf, Vc), (fbcs, cbcs))) - assembler = op2.ParLoop(kernel, + if mat_type != "is": + allow_repeated = False + spaces = (Vf, Vc) + bcs = (fbcs, cbcs) + lgmaps = tuple(V.local_to_global_map(bcs) for V, bcs in zip(spaces, bcs)) + indices_acc = tuple(mask_local_indices(V, lgmap, allow_repeated) for V, lgmap in zip(spaces, lgmaps)) + if mat_type == "is": + lgmaps = tuple(unghosted_lgmap(V, lgmap, allow_repeated) for V, lgmap in zip(spaces, lgmaps)) + + sizes = tuple(V.dof_dset.layout_vec.getSizes() for V in spaces) + preallocator = get_preallocator(comm, sizes, *lgmaps) + + kernel = ElementKernel(Dhat, name="exterior_derivative") + assembler = op2.ParLoop(kernel.kernel(mat_type=mat_type), Vc.mesh().cell_set, - *(op2.PassthroughArg(op2.OpaqueType("Mat"), m.handle) for m in (preallocator, Dhat)), - *indices) + *kernel.make_args(preallocator), + *indices_acc) assembler() preallocator.assemble() - nnz = get_preallocation(preallocator, sizes[0][0]) - preallocator.destroy() - Dmat = PETSc.Mat().createAIJ(sizes, Vf.block_size, nnz=nnz, comm=comm) - Dmat.setOption(PETSc.Mat.Option.NEW_NONZERO_ALLOCATION_ERR, True) + Dmat = allocate_matrix(preallocator, mat_type, allow_repeated=allow_repeated) assembler.arguments[0].data = Dmat.handle + preallocator.destroy() assembler() - Dmat.assemble() Dhat.destroy() return Dmat @@ -1810,11 +1830,13 @@ def setSubMatCSR(comm, triu=False): @staticmethod def load_c_code(code, name, comm, argtypes, restype): - petsc_dir = get_petsc_dir() - cppargs = [f"-I{d}/include" for d in petsc_dir] - ldargs = ([f"-L{d}/lib" for d in petsc_dir] - + [f"-Wl,-rpath,{d}/lib" for d in petsc_dir] - + ["-lpetsc", "-lm"]) + cppargs = petsctools.get_petsc_dirs(prefix="-I", subdir="include") + ldargs = ( + *petsctools.get_petsc_dirs(prefix="-L", subdir="lib"), + *petsctools.get_petsc_dirs(prefix="-Wl,-rpath,", subdir="lib"), + "-lpetsc", + "-lm", + ) dll = load(code, "c", cppargs=cppargs, ldargs=ldargs, comm=comm) fn = getattr(dll, name) fn.argtypes = argtypes @@ -1970,7 +1992,7 @@ def cell_to_global(lgmap, cell_to_local, cell_index, result=None): bsize = V.block_size ncomp = V.ufl_element().reference_value_size sdim = (V.finat_element.space_dimension() * bsize) // ncomp # dimension of a single component - tdim = V.mesh().topological_dimension() + tdim = V.mesh().topological_dimension shift = axes_shifts * bsize index_coef, _ = extrude_node_map((Gq or Bq).cell_node_map()) @@ -2073,7 +2095,7 @@ def cell_to_global(lgmap, cell_to_local, cell_index, result=None): if any(Dk is not None for Dk in Dfdm): if static_condensation: raise NotImplementedError("Static condensation for SIPG not implemented") - if tdim < V.mesh().geometric_dimension(): + if tdim < V.mesh().geometric_dimension: raise NotImplementedError("SIPG on immersed meshes is not implemented") eta = float(self.appctx.get("eta")) @@ -2172,7 +2194,7 @@ def assemble_coefficients(self, J, fcp): args_J = J.arguments() V = args_J[-1].function_space() mesh = V.mesh() - tdim = mesh.topological_dimension() + tdim = mesh.topological_dimension Finv = ufl.JacobianInverse(mesh) degree, = set(as_tuple(V.ufl_element().degree())) @@ -2206,7 +2228,7 @@ def assemble_coefficients(self, J, fcp): if not isinstance(alpha, ufl.constantvalue.Zero): Q = FunctionSpace(mesh, finat.ufl.TensorElement(DG, shape=alpha.ufl_shape)) tensor = coefficients.setdefault("alpha", Function(Q.dual())) - assembly_callables.append(partial(get_assembler(ufl.inner(TestFunction(Q), alpha)*dx, form_compiler_parameters=fcp).assemble, tensor=tensor)) + assembly_callables.append(partial(get_assembler(ufl.inner(alpha, TestFunction(Q))*dx, form_compiler_parameters=fcp).assemble, tensor=tensor)) # get zero-th order coefficent ref_val = [ufl.variable(t) for t in args_J] @@ -2227,7 +2249,7 @@ def assemble_coefficients(self, J, fcp): beta = ufl.diag_vector(beta) Q = FunctionSpace(mesh, finat.ufl.TensorElement(DG, shape=beta.ufl_shape) if beta.ufl_shape else DG) tensor = coefficients.setdefault("beta", Function(Q.dual())) - assembly_callables.append(partial(get_assembler(ufl.inner(TestFunction(Q), beta)*dx, form_compiler_parameters=fcp).assemble, tensor=tensor)) + assembly_callables.append(partial(get_assembler(ufl.inner(beta, TestFunction(Q))*dx, form_compiler_parameters=fcp).assemble, tensor=tensor)) family = "CG" if tdim == 1 else "DGT" degree = 1 if tdim == 1 else 0 @@ -2249,11 +2271,11 @@ def assemble_coefficients(self, J, fcp): Q = FunctionSpace(mesh, finat.ufl.TensorElement(DGT, shape=G.ufl_shape)) tensor = coefficients.setdefault("Gq_facet", Function(Q.dual())) - assembly_callables.append(partial(get_assembler(ifacet_inner(TestFunction(Q), G), form_compiler_parameters=fcp).assemble, tensor=tensor)) + assembly_callables.append(partial(get_assembler(ifacet_inner(G, TestFunction(Q)), form_compiler_parameters=fcp).assemble, tensor=tensor)) PT = Piola.T Q = FunctionSpace(mesh, finat.ufl.TensorElement(DGT, shape=PT.ufl_shape)) tensor = coefficients.setdefault("PT_facet", Function(Q.dual())) - assembly_callables.append(partial(get_assembler(ifacet_inner(TestFunction(Q), PT), form_compiler_parameters=fcp).assemble, tensor=tensor)) + assembly_callables.append(partial(get_assembler(ifacet_inner(PT, TestFunction(Q)), form_compiler_parameters=fcp).assemble, tensor=tensor)) # make DGT functions with BC flags shape = V.ufl_element().reference_value_shape @@ -2273,7 +2295,7 @@ def assemble_coefficients(self, J, fcp): if beta.ufl_shape: beta = ufl.diag_vector(beta) ds_ext = ufl.Measure(itype, domain=mesh, subdomain_id=it.subdomain_id(), metadata=md) - forms.append(ufl.inner(test, beta)*ds_ext) + forms.append(ufl.inner(beta, test)*ds_ext) tensor = coefficients.setdefault("bcflags", Function(Q.dual())) if len(forms): @@ -2288,7 +2310,7 @@ def assemble_coefficients(self, J, fcp): def get_piola_tensor(mapping, domain): - tdim = domain.topological_dimension() + tdim = domain.topological_dimension if mapping == 'identity': return None elif mapping == 'covariant piola': diff --git a/firedrake/preconditioners/gtmg.py b/firedrake/preconditioners/gtmg.py index 6ce73cd6b4..d26c6b73fe 100644 --- a/firedrake/preconditioners/gtmg.py +++ b/firedrake/preconditioners/gtmg.py @@ -4,7 +4,7 @@ from firedrake.petsc import PETSc from firedrake.preconditioners.base import PCBase from firedrake.parameters import parameters -from firedrake.interpolation import Interpolate +from firedrake.interpolation import interpolate from firedrake.solving_utils import _SNESContext from firedrake.matrix_free.operators import ImplicitMatrixContext import firedrake.dmhooks as dmhooks @@ -155,7 +155,7 @@ def initialize(self, pc): # Create interpolation matrix from coarse space to fine space fine_space = ctx.J.arguments()[0].function_space() coarse_test, coarse_trial = coarse_operator.arguments() - interp = assemble(Interpolate(coarse_trial, fine_space)) + interp = assemble(interpolate(coarse_trial, fine_space)) interp_petscmat = interp.petscmat restr_petscmat = appctx.get("restriction_matrix", None) @@ -179,7 +179,7 @@ def initialize(self, pc): # coarse space dm coarse_dm = coarse_space.dm coarse_solver.setDM(coarse_dm) - coarse_solver.setDMActive(False) + coarse_solver.setDMActive(PETSc.KSP.DMActive.ALL, False) pcmg.setDM(pc.getDM()) pcmg.setFromOptions() self.pc = pcmg diff --git a/firedrake/preconditioners/hiptmair.py b/firedrake/preconditioners/hiptmair.py index 14ec77fe1a..c0e74a563e 100644 --- a/firedrake/preconditioners/hiptmair.py +++ b/firedrake/preconditioners/hiptmair.py @@ -8,14 +8,13 @@ from firedrake.preconditioners.base import PCBase from firedrake.ufl_expr import TestFunction, TrialFunction from firedrake.preconditioners.hypre_ams import chop -from firedrake.preconditioners.facet_split import restrict from firedrake.parameters import parameters -from firedrake.interpolation import Interpolator +from firedrake.interpolation import interpolate from ufl.algorithms.ad import expand_derivatives import firedrake.dmhooks as dmhooks -import firedrake.utils as utils import ufl import finat.ufl +from functools import cached_property __all__ = ("TwoLevelPC", "HiptmairPC") @@ -80,7 +79,7 @@ def initialize(self, pc): coarse_space = coarse_operator.arguments()[-1].function_space() coarse_dm = coarse_space.dm coarse_solver.setDM(coarse_dm) - coarse_solver.setDMActive(False) + coarse_solver.setDMActive(PETSc.KSP.DMActive.ALL, False) pcmg.setDM(pc.getDM()) pcmg.setFromOptions() self.pc = pcmg @@ -159,7 +158,7 @@ def coarsen(self, pc): opts = PETSc.Options(options_prefix) domain = opts.getString("mg_coarse_restriction_domain", "") if domain: - celement = restrict(celement, domain) + celement = celement[domain] coarse_space = V.reconstruct(element=celement) assert coarse_space.finat_element.formdegree + 1 == formdegree @@ -202,7 +201,7 @@ def coarsen(self, pc): coarse_space_bcs = tuple(coarse_space_bcs) if G_callback is None: - interp_petscmat = chop(Interpolator(dminus(trial), V, bcs=bcs + coarse_space_bcs).callable().handle) + interp_petscmat = chop(assemble(interpolate(dminus(trial), V), bcs=bcs + coarse_space_bcs).petscmat) else: interp_petscmat = G_callback(coarse_space, V, coarse_space_bcs, bcs) @@ -210,14 +209,12 @@ def coarsen(self, pc): def curl_to_grad(ele): - if isinstance(ele, finat.ufl.VectorElement): - return type(ele)(curl_to_grad(ele._sub_element), dim=ele.num_sub_elements) - elif isinstance(ele, finat.ufl.TensorElement): - return type(ele)(curl_to_grad(ele._sub_element), shape=ele._shape, symmetry=ele.symmetry()) + if isinstance(ele, (finat.ufl.VectorElement, finat.ufl.TensorElement)): + return ele.reconstruct(curl_to_grad(ele._sub_element)) elif isinstance(ele, finat.ufl.MixedElement): - return type(ele)(*(curl_to_grad(e) for e in ele.sub_elements)) + return type(ele)(*map(curl_to_grad, ele.sub_elements)) elif isinstance(ele, finat.ufl.RestrictedElement): - return finat.ufl.RestrictedElement(curl_to_grad(ele._element), ele.restriction_domain()) + return ele.reconstruct(element=curl_to_grad(ele._element)) else: cell = ele.cell family = ele.family() @@ -230,25 +227,23 @@ def curl_to_grad(ele): degree = degree + 1 family = "CG" if isinstance(degree, tuple) and isinstance(cell, ufl.TensorProductCell): - cells = ele.cell.sub_cells() + cells = ele.cell.sub_cells elems = [finat.ufl.FiniteElement(family, cell=c, degree=d, variant=variant) for c, d in zip(cells, degree)] return finat.ufl.TensorProductElement(*elems, cell=cell) return finat.ufl.FiniteElement(family, cell=cell, degree=degree, variant=variant) def div_to_curl(ele): - if isinstance(ele, finat.ufl.VectorElement): - return type(ele)(div_to_curl(ele._sub_element), dim=ele.num_sub_elements) - elif isinstance(ele, finat.ufl.TensorElement): - return type(ele)(div_to_curl(ele._sub_element), shape=ele._shape, symmetry=ele.symmetry()) + if isinstance(ele, (finat.ufl.VectorElement, finat.ufl.TensorElement)): + return ele.reconstruct(sub_element=div_to_curl(ele._sub_element)) elif isinstance(ele, finat.ufl.MixedElement): - return type(ele)(*(div_to_curl(e) for e in ele.sub_elements)) - elif isinstance(ele, finat.ufl.RestrictedElement): - return finat.ufl.RestrictedElement(div_to_curl(ele._element), ele.restriction_domain()) + return type(ele)(*map(div_to_curl, ele.sub_elements)) elif isinstance(ele, finat.ufl.EnrichedElement): - return type(ele)(*(div_to_curl(e) for e in reversed(ele._elements))) + return type(ele)(*map(div_to_curl, reversed(ele._elements))) elif isinstance(ele, finat.ufl.TensorProductElement): - return type(ele)(*(div_to_curl(e) for e in ele.factor_elements), cell=ele.cell) + return type(ele)(*map(div_to_curl, ele.factor_elements), cell=ele.cell) + elif isinstance(ele, finat.ufl.RestrictedElement): + return ele.reconstruct(element=div_to_curl(ele._element)) elif isinstance(ele, finat.ufl.WithMapping): return type(ele)(div_to_curl(ele.wrapee), ele.mapping()) elif isinstance(ele, finat.ufl.BrokenElement): @@ -261,7 +256,7 @@ def div_to_curl(ele): degree = ele.degree() family = ele.family() if family in ["Lagrange", "CG", "Q"]: - family = "DG" if ele.cell.is_simplex() else "DQ" + family = "DG" if ele.cell.is_simplex else "DQ" degree = degree - 1 elif family in ["Discontinuous Lagrange", "DG", "DQ"]: family = "CG" @@ -289,6 +284,6 @@ def __init__(self, V, g, nodes): self._nodes = nodes super(BCFromNodes, self).__init__(V, g, tuple()) - @utils.cached_property + @cached_property def nodes(self): return self._nodes diff --git a/firedrake/preconditioners/hypre_ads.py b/firedrake/preconditioners/hypre_ads.py index 66ba5958ec..f3fe716599 100644 --- a/firedrake/preconditioners/hypre_ads.py +++ b/firedrake/preconditioners/hypre_ads.py @@ -30,9 +30,9 @@ def initialize(self, obj): # Get the auxiliary Nedelec and Lagrange spaces and the coordinate space cell = V.ufl_element().cell - NC1_element = FiniteElement("N1curl" if cell.is_simplex() else "NCE", cell=cell, degree=1) + NC1_element = FiniteElement("N1curl" if cell.is_simplex else "NCE", cell=cell, degree=1) P1_element = FiniteElement("Lagrange", cell=cell, degree=1) - coords_element = VectorElement(P1_element, dim=mesh.geometric_dimension()) + coords_element = VectorElement(P1_element, dim=mesh.geometric_dimension) if V.shape: NC1_element = TensorElement(NC1_element, shape=V.shape) P1_element = TensorElement(P1_element, shape=V.shape) diff --git a/firedrake/preconditioners/hypre_ams.py b/firedrake/preconditioners/hypre_ams.py index 6da4758e6f..3105f77f3a 100644 --- a/firedrake/preconditioners/hypre_ams.py +++ b/firedrake/preconditioners/hypre_ams.py @@ -51,7 +51,7 @@ def initialize(self, obj): # Get the auxiliary Lagrange space and the coordinate space P1_element = FiniteElement("Lagrange", degree=1) - coords_element = VectorElement(P1_element, dim=mesh.geometric_dimension()) + coords_element = VectorElement(P1_element, dim=mesh.geometric_dimension) if V.shape: P1_element = TensorElement(P1_element, shape=V.shape) P1 = V.reconstruct(element=P1_element) diff --git a/firedrake/preconditioners/patch.py b/firedrake/preconditioners/patch.py index 5c445e6ae0..ee4329a243 100644 --- a/firedrake/preconditioners/patch.py +++ b/firedrake/preconditioners/patch.py @@ -3,19 +3,21 @@ from firedrake.petsc import PETSc from firedrake.cython.patchimpl import set_patch_residual, set_patch_jacobian from firedrake.solving_utils import _SNESContext -from firedrake.utils import cached_property, complex_mode, IntType +from firedrake.utils import complex_mode, IntType from firedrake.dmhooks import get_appctx, push_appctx, pop_appctx -from firedrake.interpolation import Interpolate +from firedrake.interpolation import interpolate +from firedrake.ufl_expr import extract_domains from collections import namedtuple import operator from itertools import chain -from functools import partial +from functools import cached_property, partial import numpy from finat.ufl import VectorElement, MixedElement from ufl.domain import extract_unique_domain from tsfc.ufl_utils import extract_firedrake_constants import weakref +import petsctools import ctypes from pyop2 import op2 @@ -26,7 +28,6 @@ from pyop2.codegen.rep2loopy import register_petsc_function from pyop2.global_kernel import compile_global_kernel from pyop2.mpi import COMM_SELF -from pyop2.utils import get_petsc_dir __all__ = ("PatchPC", "PlaneSmoother", "PatchSNES") @@ -136,6 +137,10 @@ def increment_dat_version(self): CompiledKernel = namedtuple('CompiledKernel', ["funptr", "kinfo"]) +def get_map(V, base_mesh, base_integral_type): + return V.topological.entity_node_map(base_mesh.topology, base_integral_type, None, None) + + def matrix_funptr(form, state): from firedrake.tsfc_interface import compile_form test, trial = map(operator.methodcaller("function_space"), form.arguments()) @@ -149,34 +154,36 @@ def matrix_funptr(form, state): kernels = compile_form(form, "subspace_form", split=False, dont_split=dont_split) + all_meshes = extract_domains(form) cell_kernels = [] int_facet_kernels = [] + ext_facet_kernels = [] for kernel in kernels: kinfo = kernel.kinfo + mesh = all_meshes[kinfo.domain_number] # integration domain + integral_type = kinfo.integral_type if kinfo.subdomain_id != ("otherwise",): raise NotImplementedError("Only for full domain integrals") - if kinfo.integral_type not in {"cell", "interior_facet"}: - raise NotImplementedError("Only for cell or interior facet integrals") + if kinfo.integral_type not in {"cell", "interior_facet", "exterior_facet"}: + raise NotImplementedError("Only for cell, interior facet, or exterior facet integrals") # OK, now we've validated the kernel, let's build the callback args = [] - if kinfo.integral_type == "cell": - get_map = operator.methodcaller("cell_node_map") + if integral_type == "cell": kernels = cell_kernels - elif kinfo.integral_type == "interior_facet": - get_map = operator.methodcaller("interior_facet_node_map") + elif integral_type == "interior_facet": kernels = int_facet_kernels - else: - get_map = None + elif integral_type == "exterior_facet": + kernels = ext_facet_kernels toset = op2.Set(1, comm=test.comm) dofset = op2.DataSet(toset, 1) arity = sum(m.arity*s.cdim - for m, s in zip(get_map(test), + for m, s in zip(get_map(test, mesh, integral_type), test.dof_dset)) - iterset = get_map(test).iterset + iterset = get_map(test, mesh, integral_type).iterset entity_node_map = op2.Map(iterset, toset, arity, values=numpy.zeros(iterset.total_size*arity, dtype=IntType)) @@ -190,16 +197,17 @@ def matrix_funptr(form, state): values=numpy.zeros(iterset.total_size*arity, dtype=IntType)) statearg = statedat(op2.READ, state_entity_node_map) - mesh = form.ufl_domains()[kinfo.domain_number] - arg = mesh.coordinates.dat(op2.READ, get_map(mesh.coordinates)) - args.append(arg) - if kinfo.oriented: - c = mesh.cell_orientations() - arg = c.dat(op2.READ, get_map(c)) + for i in kinfo.active_domain_numbers.coordinates: + c = all_meshes[i].coordinates + arg = c.dat(op2.READ, get_map(c.function_space(), mesh, integral_type)) + args.append(arg) + for i in kinfo.active_domain_numbers.cell_orientations: + c = all_meshes[i].cell_orientations() + arg = c.dat(op2.READ, get_map(c.function_space(), mesh, integral_type)) args.append(arg) - if kinfo.needs_cell_sizes: - c = mesh.cell_sizes - arg = c.dat(op2.READ, get_map(c)) + for i in kinfo.active_domain_numbers.cell_sizes: + c = all_meshes[i].cell_sizes + arg = c.dat(op2.READ, get_map(c.function_space(), mesh, integral_type)) args.append(arg) for n, indices in kinfo.coefficient_numbers: c = form.coefficients()[n] @@ -210,7 +218,7 @@ def matrix_funptr(form, state): continue for ind in indices: c_ = c.subfunctions[ind] - map_ = get_map(c_) + map_ = get_map(c_.function_space(), mesh, integral_type) arg = c_.dat(op2.READ, map_) args.append(arg) @@ -218,15 +226,18 @@ def matrix_funptr(form, state): for constant_index in kinfo.constant_numbers: args.append(all_constants[constant_index].dat(op2.READ)) - if kinfo.integral_type == "interior_facet": + if integral_type == "interior_facet": arg = mesh.interior_facets.local_facet_dat(op2.READ) args.append(arg) + elif integral_type == "exterior_facet": + arg = mesh.exterior_facets.local_facet_dat(op2.READ) + args.append(arg) iterset = op2.Subset(iterset, []) wrapper_knl_args = tuple(a.global_kernel_arg for a in args) mod = op2.GlobalKernel(kinfo.kernel, wrapper_knl_args, subset=True) kernels.append(CompiledKernel(compile_global_kernel(mod, iterset.comm), kinfo)) - return cell_kernels, int_facet_kernels + return cell_kernels, int_facet_kernels, ext_facet_kernels def residual_funptr(form, state): @@ -243,32 +254,34 @@ def residual_funptr(form, state): kernels = compile_form(form, "subspace_form", split=False, dont_split=dont_split) + all_meshes = extract_domains(form) cell_kernels = [] int_facet_kernels = [] + ext_facet_kernels = [] for kernel in kernels: kinfo = kernel.kinfo + mesh = all_meshes[kinfo.domain_number] # integration domain + integral_type = kinfo.integral_type if kinfo.subdomain_id != ("otherwise",): raise NotImplementedError("Only for full domain integrals") - if kinfo.integral_type not in {"cell", "interior_facet"}: - raise NotImplementedError("Only for cell integrals or interior_facet integrals") + if kinfo.integral_type not in {"cell", "interior_facet", "exterior_facet"}: + raise NotImplementedError("Only for cell, interior facet, or exterior facet integrals") args = [] if kinfo.integral_type == "cell": - get_map = operator.methodcaller("cell_node_map") kernels = cell_kernels elif kinfo.integral_type == "interior_facet": - get_map = operator.methodcaller("interior_facet_node_map") kernels = int_facet_kernels - else: - get_map = None + elif kinfo.integral_type == "exterior_facet": + kernels = ext_facet_kernels toset = op2.Set(1, comm=test.comm) dofset = op2.DataSet(toset, 1) arity = sum(m.arity*s.cdim - for m, s in zip(get_map(test), + for m, s in zip(get_map(test, mesh, integral_type), test.dof_dset)) - iterset = get_map(test).iterset + iterset = get_map(test, mesh, integral_type).iterset entity_node_map = op2.Map(iterset, toset, arity, values=numpy.zeros(iterset.total_size*arity, dtype=IntType)) @@ -283,17 +296,17 @@ def residual_funptr(form, state): arg = dat(op2.INC, entity_node_map) args.append(arg) - mesh = form.ufl_domains()[kinfo.domain_number] - arg = mesh.coordinates.dat(op2.READ, get_map(mesh.coordinates)) - args.append(arg) - - if kinfo.oriented: - c = mesh.cell_orientations() - arg = c.dat(op2.READ, get_map(c)) + for i in kinfo.active_domain_numbers.coordinates: + c = all_meshes[i].coordinates + arg = c.dat(op2.READ, get_map(c.function_space(), mesh, integral_type)) + args.append(arg) + for i in kinfo.active_domain_numbers.cell_orientations: + c = all_meshes[i].cell_orientations() + arg = c.dat(op2.READ, get_map(c.function_space(), mesh, integral_type)) args.append(arg) - if kinfo.needs_cell_sizes: - c = mesh.cell_sizes - arg = c.dat(op2.READ, get_map(c)) + for i in kinfo.active_domain_numbers.cell_sizes: + c = all_meshes[i].cell_sizes + arg = c.dat(op2.READ, get_map(c.function_space(), mesh, integral_type)) args.append(arg) for n, indices in kinfo.coefficient_numbers: c = form.coefficients()[n] @@ -304,7 +317,7 @@ def residual_funptr(form, state): continue for ind in indices: c_ = c.subfunctions[ind] - map_ = get_map(c_) + map_ = get_map(c_.function_space(), mesh, integral_type) arg = c_.dat(op2.READ, map_) args.append(arg) @@ -315,12 +328,15 @@ def residual_funptr(form, state): if kinfo.integral_type == "interior_facet": arg = extract_unique_domain(test).interior_facets.local_facet_dat(op2.READ) args.append(arg) + elif kinfo.integral_type == "exterior_facet": + arg = extract_unique_domain(test).exterior_facets.local_facet_dat(op2.READ) + args.append(arg) iterset = op2.Subset(iterset, []) wrapper_knl_args = tuple(a.global_kernel_arg for a in args) mod = op2.GlobalKernel(kinfo.kernel, wrapper_knl_args, subset=True) kernels.append(CompiledKernel(compile_global_kernel(mod, iterset.comm), kinfo)) - return cell_kernels, int_facet_kernels + return cell_kernels, int_facet_kernels, ext_facet_kernels # We need to set C function pointer callbacks for PCPatch to work. @@ -375,7 +391,7 @@ class Struct(ctypes.Structure): const PetscInt *point2facet; {}; }} UserCtx;""".format(coeff_struct, map_struct, function) - call = "pyop2_call(0, npoints, whichPoints, out, {}, dofArray, {})".format(coeff_call, map_call) + call = "pyop2_call(0, npoints, whichPoints, out, {}, activeDofsArray, {})".format(coeff_call, map_call) return struct, call, Struct @@ -397,11 +413,14 @@ def make_residual_wrapper(coeffs, maps, flops): const PetscInt *dofArrayWithAll, void *ctx_) {{ - const PetscScalar *state = NULL; - const PetscInt *whichPoints = NULL; - PetscScalar *out = NULL; - UserCtx *ctx = (UserCtx *)ctx_; + const PetscScalar *state = NULL; + const PetscInt *whichPoints = NULL; + const PetscInt *activeDofsArray = dofArray; + PetscScalar *out = NULL; + UserCtx *ctx = (UserCtx *)ctx_; PetscInt npoints; + PetscInt *filtpoints = NULL; + PetscInt *filtdofs = NULL; PetscErrorCode ierr; PetscFunctionBeginUser; ierr = ISGetSize(points, &npoints);CHKERRQ(ierr); @@ -413,23 +432,28 @@ def make_residual_wrapper(coeffs, maps, flops): ierr = VecGetArray(F, &out);CHKERRQ(ierr); ierr = ISGetIndices(points, &whichPoints);CHKERRQ(ierr); if (ctx->point2facet) {{ - PetscInt *pointsArray = NULL; - if (npoints > 128) {{ - ierr = PetscMalloc1(npoints, &pointsArray);CHKERRQ(ierr); - }} else {{ - pointsArray = pointbuf; - }} + PetscInt nvalid = 0; + PetscInt tDPP = ndof / npoints; + ierr = PetscMalloc1(npoints, &filtpoints);CHKERRQ(ierr); + if (ndof > 0) {{ ierr = PetscMalloc1(ndof, &filtdofs);CHKERRQ(ierr); }} for (PetscInt i = 0; i < npoints; i++) {{ - pointsArray[i] = ctx->point2facet[whichPoints[i]]; + PetscInt fi = ctx->point2facet[whichPoints[i]]; + if (fi >= 0) {{ + filtpoints[nvalid] = fi; + for (PetscInt d = 0; d < tDPP; d++) + filtdofs[nvalid * tDPP + d] = dofArray[i * tDPP + d]; + nvalid++; + }} }} ierr = ISRestoreIndices(points, &whichPoints);CHKERRQ(ierr); - whichPoints = pointsArray; + npoints = nvalid; + whichPoints = filtpoints; + activeDofsArray = filtdofs; }} - ctx->{}; + if (npoints) ctx->{}; if (ctx->point2facet) {{ - if (npoints > 128) {{ - ierr = PetscFree(whichPoints); - }} + ierr = PetscFree(filtpoints); + ierr = PetscFree(filtdofs); }} else {{ ierr = ISRestoreIndices(points, &whichPoints);CHKERRQ(ierr); }} @@ -461,10 +485,13 @@ def make_jacobian_wrapper(coeffs, maps, flops): const PetscInt *dofArrayWithAll, void *ctx_) {{ - const PetscScalar *state = NULL; - const PetscInt *whichPoints = NULL; - UserCtx *ctx = (UserCtx *)ctx_; + const PetscScalar *state = NULL; + const PetscInt *whichPoints = NULL; + const PetscInt *activeDofsArray = dofArray; + UserCtx *ctx = (UserCtx *)ctx_; PetscInt npoints; + PetscInt *filtpoints = NULL; + PetscInt *filtdofs = NULL; PetscErrorCode ierr; PetscFunctionBeginUser; ierr = ISGetSize(points, &npoints);CHKERRQ(ierr); @@ -474,23 +501,28 @@ def make_jacobian_wrapper(coeffs, maps, flops): }} ierr = ISGetIndices(points, &whichPoints);CHKERRQ(ierr); if (ctx->point2facet) {{ - PetscInt *pointsArray = NULL; - if (npoints > 128) {{ - ierr = PetscMalloc1(npoints, &pointsArray);CHKERRQ(ierr); - }} else {{ - pointsArray = pointbuf; - }} + PetscInt nvalid = 0; + PetscInt tDPP = ndof / npoints; + ierr = PetscMalloc1(npoints, &filtpoints);CHKERRQ(ierr); + if (ndof > 0) {{ ierr = PetscMalloc1(ndof, &filtdofs);CHKERRQ(ierr); }} for (PetscInt i = 0; i < npoints; i++) {{ - pointsArray[i] = ctx->point2facet[whichPoints[i]]; + PetscInt fi = ctx->point2facet[whichPoints[i]]; + if (fi >= 0) {{ + filtpoints[nvalid] = fi; + for (PetscInt d = 0; d < tDPP; d++) + filtdofs[nvalid * tDPP + d] = dofArray[i * tDPP + d]; + nvalid++; + }} }} ierr = ISRestoreIndices(points, &whichPoints);CHKERRQ(ierr); - whichPoints = pointsArray; + npoints = nvalid; + whichPoints = filtpoints; + activeDofsArray = filtdofs; }} - ctx->{}; + if (npoints) ctx->{}; if (ctx->point2facet) {{ - if (npoints > 128) {{ - ierr = PetscFree(whichPoints); - }} + ierr = PetscFree(filtpoints); + ierr = PetscFree(filtdofs); }} else {{ ierr = ISRestoreIndices(points, &whichPoints);CHKERRQ(ierr); }} @@ -504,10 +536,13 @@ def make_jacobian_wrapper(coeffs, maps, flops): def load_c_function(code, name, comm): - cppargs = ["-I%s/include" % d for d in get_petsc_dir()] - ldargs = (["-L%s/lib" % d for d in get_petsc_dir()] - + ["-Wl,-rpath,%s/lib" % d for d in get_petsc_dir()] - + ["-lpetsc", "-lm"]) + cppargs = petsctools.get_petsc_dirs(prefix="-I", subdir="include") + ldargs = ( + *petsctools.get_petsc_dirs(prefix="-L", subdir="lib"), + *petsctools.get_petsc_dirs(prefix="-Wl,-rpath,", subdir="lib"), + "-lpetsc", + "-lm", + ) dll = load(code, "c", cppargs=cppargs, ldargs=ldargs, comm=comm) fn = getattr(dll, name) fn.argtypes = [ctypes.c_voidp, ctypes.c_int, ctypes.c_voidp, @@ -517,14 +552,14 @@ def load_c_function(code, name, comm): return fn -def make_c_arguments(form, kernel, state, get_map, require_state=False, +def make_c_arguments(form, kernel, state, integral_type, require_state=False, require_facet_number=False): - mesh = form.ufl_domains()[kernel.kinfo.domain_number] - coeffs = [mesh.coordinates] - if kernel.kinfo.oriented: - coeffs.append(mesh.cell_orientations()) - if kernel.kinfo.needs_cell_sizes: - coeffs.append(mesh.cell_sizes) + all_meshes = extract_domains(form) + mesh = all_meshes[kernel.kinfo.domain_number] + coeffs = [] + coeffs.extend([all_meshes[i].coordinates for i in kernel.kinfo.active_domain_numbers.coordinates]) + coeffs.extend([all_meshes[i].cell_orientations() for i in kernel.kinfo.active_domain_numbers.cell_orientations]) + coeffs.extend([all_meshes[i].cell_sizes for i in kernel.kinfo.active_domain_numbers.cell_sizes]) for n, indices in kernel.kinfo.coefficient_numbers: c = form.coefficients()[n] if c is state: @@ -544,7 +579,7 @@ def make_c_arguments(form, kernel, state, get_map, require_state=False, map_args.append(None) else: data_args.extend(c.dat._kernel_args_) - map_ = get_map(c) + map_ = get_map(c.function_space(), mesh, integral_type) if map_ is not None: for k in map_._kernel_args_: if k not in seen: @@ -556,7 +591,10 @@ def make_c_arguments(form, kernel, state, get_map, require_state=False, data_args.extend(all_constants[constant_index].dat._kernel_args_) if require_facet_number: - data_args.extend(mesh.interior_facets.local_facet_dat._kernel_args_) + if integral_type == "interior_facet": + data_args.extend(mesh.interior_facets.local_facet_dat._kernel_args_) + elif integral_type == "exterior_facet": + data_args.extend(mesh.exterior_facets.local_facet_dat._kernel_args_) return data_args, map_args @@ -653,7 +691,11 @@ def sort_entities(self, dm, axis, dir, ndiv=None, divisions=None): raise RuntimeError("Must either set ndiv or divisions for PlaneSmoother!") mesh = dm.getAttr("__firedrake_mesh__") - coordinates = mesh.coordinates + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") + coordinates = mesh_unique.coordinates V = coordinates.function_space() if V.finat_element.is_dg(): # We're using DG or DQ for our coordinates, so we got @@ -661,7 +703,7 @@ def sort_entities(self, dm, axis, dir, ndiv=None, divisions=None): # with access descriptor MAX to define a consistent opinion # about where the vertices are. CGk = V.reconstruct(family="Lagrange") - coordinates = assemble(Interpolate(coordinates, CGk, access=op2.MAX)) + coordinates = assemble(interpolate(coordinates, CGk, access=op2.MAX)) select = partial(select_entity, dm=dm, exclude="pyop2_ghost") entities = [(p, self.coords(dm, p, coordinates)) for p in @@ -761,7 +803,11 @@ def initialize(self, obj): J, bcs = self.form(obj) V = J.arguments()[0].function_space() mesh = V.mesh() - self.plex = mesh.topology_dm + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") + self.plex = mesh_unique.topology_dm # We need to attach the mesh and appctx to the plex, so that # PlaneSmoothers (and any other user-customised patch # constructors) can use firedrake's opinion of what @@ -770,7 +816,7 @@ def initialize(self, obj): self.ctx = ctx self.plex.setAttr("__firedrake_ctx__", weakref.proxy(ctx)) - if mesh.cell_set._extruded: + if mesh_unique.cell_set._extruded: raise NotImplementedError("Not implemented on extruded meshes") # Validate the mesh overlap @@ -787,7 +833,7 @@ def initialize(self, obj): patch_dim = self.plex.getDimension() - patch_codim else: patch_dim = 0 - validate_overlap(mesh, patch_dim, patch_type) + validate_overlap(mesh_unique, patch_dim, patch_type) patch = obj.__class__().create(comm=mesh.comm) patch.setOptionsPrefix(prefix) @@ -813,14 +859,16 @@ def initialize(self, obj): ghost_bc_nodes = numpy.empty(0, dtype=PETSc.IntType) global_bc_nodes = numpy.empty(0, dtype=PETSc.IntType) - Jcell_kernels, Jint_facet_kernels = matrix_funptr(J, Jstate) - Jcell_kernel, = Jcell_kernels - Jcell_flops = Jcell_kernel.kinfo.kernel.num_flops - Jop_data_args, Jop_map_args = make_c_arguments(J, Jcell_kernel, Jstate, - operator.methodcaller("cell_node_map")) - code, Struct = make_jacobian_wrapper(Jop_data_args, Jop_map_args, Jcell_flops) - Jop_function = load_c_function(code, "ComputeJacobian", mesh.comm) - Jop_struct = make_c_struct(Jop_data_args, Jop_map_args, Jcell_kernel.funptr, Struct) + Jcell_kernels, Jint_facet_kernels, Jext_facet_kernels = matrix_funptr(J, Jstate) + + Jhas_cell_kernel = len(Jcell_kernels) > 0 + if Jhas_cell_kernel: + Jcell_kernel, = Jcell_kernels + Jcell_flops = Jcell_kernel.kinfo.kernel.num_flops + Jop_data_args, Jop_map_args = make_c_arguments(J, Jcell_kernel, Jstate, "cell") + code, Struct = make_jacobian_wrapper(Jop_data_args, Jop_map_args, Jcell_flops) + Jop_function = load_c_function(code, "ComputeJacobian", mesh.comm) + Jop_struct = make_c_struct(Jop_data_args, Jop_map_args, Jcell_kernel.funptr, Struct) Jhas_int_facet_kernel = False if len(Jint_facet_kernels) > 0: @@ -828,29 +876,46 @@ def initialize(self, obj): Jhas_int_facet_kernel = True Jint_facet_flops = Jint_facet_kernel.kinfo.kernel.num_flops facet_Jop_data_args, facet_Jop_map_args = make_c_arguments(J, Jint_facet_kernel, Jstate, - operator.methodcaller("interior_facet_node_map"), + "interior_facet", require_facet_number=True) code, Struct = make_jacobian_wrapper(facet_Jop_data_args, facet_Jop_map_args, Jint_facet_flops) facet_Jop_function = load_c_function(code, "ComputeJacobian", mesh.comm) - point2facet = mesh.interior_facets.point2facetnumber.ctypes.data + point2facet = mesh_unique.interior_facets.point2facetnumber.ctypes.data facet_Jop_struct = make_c_struct(facet_Jop_data_args, facet_Jop_map_args, Jint_facet_kernel.funptr, Struct, point2facet=point2facet) + Jhas_ext_facet_kernel = False + if len(Jext_facet_kernels) > 0: + Jext_facet_kernel, = Jext_facet_kernels + Jhas_ext_facet_kernel = True + Jext_facet_flops = Jext_facet_kernel.kinfo.kernel.num_flops + ext_facet_Jop_data_args, ext_facet_Jop_map_args = make_c_arguments(J, Jext_facet_kernel, Jstate, + "exterior_facet", + require_facet_number=True) + code, Struct = make_jacobian_wrapper(ext_facet_Jop_data_args, ext_facet_Jop_map_args, Jext_facet_flops) + ext_facet_Jop_function = load_c_function(code, "ComputeJacobian", mesh.comm) + ext_point2facet = mesh_unique.exterior_facets.point2facetnumber.ctypes.data + ext_facet_Jop_struct = make_c_struct(ext_facet_Jop_data_args, ext_facet_Jop_map_args, + Jext_facet_kernel.funptr, Struct, + point2facet=ext_point2facet) + set_residual = hasattr(ctx, "F") and isinstance(obj, PETSc.SNES) if set_residual: F = ctx.F Fstate = ctx._problem.u - Fcell_kernels, Fint_facet_kernels = residual_funptr(F, Fstate) - - Fcell_kernel, = Fcell_kernels - Fcell_flops = Fcell_kernel.kinfo.kernel.num_flops - Fop_data_args, Fop_map_args = make_c_arguments(F, Fcell_kernel, Fstate, - operator.methodcaller("cell_node_map"), - require_state=True) - code, Struct = make_residual_wrapper(Fop_data_args, Fop_map_args, Fcell_flops) - Fop_function = load_c_function(code, "ComputeResidual", mesh.comm) - Fop_struct = make_c_struct(Fop_data_args, Fop_map_args, Fcell_kernel.funptr, Struct) + Fcell_kernels, Fint_facet_kernels, Fext_facet_kernels = residual_funptr(F, Fstate) + + Fhas_cell_kernel = len(Fcell_kernels) > 0 + if Fhas_cell_kernel: + Fcell_kernel, = Fcell_kernels + Fcell_flops = Fcell_kernel.kinfo.kernel.num_flops + Fop_data_args, Fop_map_args = make_c_arguments(F, Fcell_kernel, Fstate, + "cell", + require_state=True) + code, Struct = make_residual_wrapper(Fop_data_args, Fop_map_args, Fcell_flops) + Fop_function = load_c_function(code, "ComputeResidual", mesh.comm) + Fop_struct = make_c_struct(Fop_data_args, Fop_map_args, Fcell_kernel.funptr, Struct) Fhas_int_facet_kernel = False if len(Fint_facet_kernels) > 0: @@ -858,7 +923,7 @@ def initialize(self, obj): Fhas_int_facet_kernel = True Fint_facet_flops = Fint_facet_kernel.kinfo.kernel.num_flops facet_Fop_data_args, facet_Fop_map_args = make_c_arguments(F, Fint_facet_kernel, Fstate, - operator.methodcaller("interior_facet_node_map"), + "interior_facet", require_state=True, require_facet_number=True) code, Struct = make_jacobian_wrapper(facet_Fop_data_args, facet_Fop_map_args, Fint_facet_flops) @@ -868,8 +933,24 @@ def initialize(self, obj): Fint_facet_kernel.funptr, Struct, point2facet=point2facet) + Fhas_ext_facet_kernel = False + if len(Fext_facet_kernels) > 0: + Fext_facet_kernel, = Fext_facet_kernels + Fhas_ext_facet_kernel = True + Fext_facet_flops = Fext_facet_kernel.kinfo.kernel.num_flops + ext_facet_Fop_data_args, ext_facet_Fop_map_args = make_c_arguments(F, Fext_facet_kernel, Fstate, + "exterior_facet", + require_state=True, + require_facet_number=True) + code, Struct = make_residual_wrapper(ext_facet_Fop_data_args, ext_facet_Fop_map_args, Fext_facet_flops) + ext_facet_Fop_function = load_c_function(code, "ComputeResidual", mesh.comm) + ext_point2facet = extract_unique_domain(F).exterior_facets.point2facetnumber.ctypes.data + ext_facet_Fop_struct = make_c_struct(ext_facet_Fop_data_args, ext_facet_Fop_map_args, + Fext_facet_kernel.funptr, Struct, + point2facet=ext_point2facet) + patch.setDM(self.plex) - patch.setPatchCellNumbering(mesh._cell_numbering) + patch.setPatchCellNumbering(mesh_unique._cell_numbering) offsets = numpy.append([0], numpy.cumsum([W.dof_count for W in V])).astype(PETSc.IntType) @@ -880,22 +961,34 @@ def initialize(self, obj): offsets, ghost_bc_nodes, global_bc_nodes) - self.Jop_struct = Jop_struct - set_patch_jacobian(patch, ctypes.cast(Jop_function, ctypes.c_voidp).value, - ctypes.addressof(Jop_struct), is_snes=is_snes) + if Jhas_cell_kernel: + self.Jop_struct = Jop_struct + set_patch_jacobian(patch, ctypes.cast(Jop_function, ctypes.c_voidp).value, + ctypes.addressof(Jop_struct), is_snes=is_snes) if Jhas_int_facet_kernel: self.facet_Jop_struct = facet_Jop_struct set_patch_jacobian(patch, ctypes.cast(facet_Jop_function, ctypes.c_voidp).value, ctypes.addressof(facet_Jop_struct), is_snes=is_snes, interior_facets=True) + if Jhas_ext_facet_kernel: + self.ext_facet_Jop_struct = ext_facet_Jop_struct + set_patch_jacobian(patch, ctypes.cast(ext_facet_Jop_function, ctypes.c_voidp).value, + ctypes.addressof(ext_facet_Jop_struct), is_snes=is_snes, + exterior_facets=True) if set_residual: - self.Fop_struct = Fop_struct - set_patch_residual(patch, ctypes.cast(Fop_function, ctypes.c_voidp).value, - ctypes.addressof(Fop_struct), is_snes=is_snes) + if Fhas_cell_kernel: + self.Fop_struct = Fop_struct + set_patch_residual(patch, ctypes.cast(Fop_function, ctypes.c_voidp).value, + ctypes.addressof(Fop_struct), is_snes=is_snes) if Fhas_int_facet_kernel: set_patch_residual(patch, ctypes.cast(facet_Fop_function, ctypes.c_voidp).value, ctypes.addressof(facet_Fop_struct), is_snes=is_snes, interior_facets=True) + if Fhas_ext_facet_kernel: + self.ext_facet_Fop_struct = ext_facet_Fop_struct + set_patch_residual(patch, ctypes.cast(ext_facet_Fop_function, ctypes.c_voidp).value, + ctypes.addressof(ext_facet_Fop_struct), is_snes=is_snes, + exterior_facets=True) patch.setPatchConstructType(PETSc.PC.PatchConstructType.PYTHON, operator=self.user_construction_op) patch.setAttr("ctx", ctx) diff --git a/firedrake/preconditioners/pmg.py b/firedrake/preconditioners/pmg.py index f4b45a67a5..5b53a9cbc2 100644 --- a/firedrake/preconditioners/pmg.py +++ b/firedrake/preconditioners/pmg.py @@ -1,4 +1,4 @@ -from functools import partial +from functools import cached_property, partial from itertools import chain from firedrake.dmhooks import (attach_hooks, get_appctx, push_appctx, pop_appctx, add_hook, get_parent, push_parent, pop_parent, @@ -8,8 +8,7 @@ from firedrake.nullspace import VectorSpaceBasis, MixedVectorSpaceBasis from firedrake.solving_utils import _SNESContext from firedrake.tsfc_interface import extract_numbered_coefficients -from firedrake.utils import ScalarType_c, IntType_c, cached_property -from finat.element_factory import create_element +from firedrake.utils import IntType_c from tsfc import compile_expression_dual_evaluation from pyop2 import op2 from pyop2.caching import serial_cache @@ -529,8 +528,7 @@ def coarsen_bc_value(self, bc, cV): def prolongation_transfer_kernel_action(Vf, expr): - to_element = create_element(Vf.ufl_element()) - kernel = compile_expression_dual_evaluation(expr, to_element, Vf.ufl_element()) + kernel = compile_expression_dual_evaluation(expr, Vf.ufl_element()) coefficients = extract_numbered_coefficients(expr, kernel.coefficient_numbers) if kernel.needs_external_coords: coefficients = [Vf.mesh().coordinates] + coefficients @@ -1101,13 +1099,13 @@ def make_mapping_code(Q, cmapping, fmapping, t_in, t_out): u = ufl.Coefficient(Q) expr = ufl.dot(tensor, u) prolong_map_kernel, coefficients = prolongation_transfer_kernel_action(Q, expr) - prolong_map_code = cache_generate_code(prolong_map_kernel, Q._comm) + prolong_map_code = cache_generate_code(prolong_map_kernel, Q.comm) prolong_map_code = prolong_map_code.replace("void expression_kernel", "static void prolongation_mapping") coefficients.remove(u) expr = ufl.dot(u, tensor) restrict_map_kernel, coefficients = prolongation_transfer_kernel_action(Q, expr) - restrict_map_code = cache_generate_code(restrict_map_kernel, Q._comm) + restrict_map_code = cache_generate_code(restrict_map_kernel, Q.comm) restrict_map_code = restrict_map_code.replace("void expression_kernel", "static void restriction_mapping") restrict_map_code = restrict_map_code.replace("#include ", "") restrict_map_code = restrict_map_code.replace("#include ", "") @@ -1220,16 +1218,15 @@ def work_function(self, V): @cached_property def _weight(self): + cell_set = self.Vf.mesh().topology.unique().cell_set weight = firedrake.Function(self.Vf) - size = self.Vf.finat_element.space_dimension() * self.Vf.block_size + wsize = self.Vf.finat_element.space_dimension() * self.Vf.block_size kernel_code = f""" - void weight(PetscScalar *restrict w){{ - for(PetscInt i=0; i<{size}; i++) w[i] += 1.0; - return; - }} - """ - kernel = op2.Kernel(kernel_code, "weight", requires_zeroed_output_arguments=True) - op2.par_loop(kernel, weight.cell_set, weight.dat(op2.INC, weight.cell_node_map())) + void multiplicity(PetscScalar *restrict w) {{ + for (PetscInt i=0; i<{wsize}; i++) w[i] += 1; + }}""" + kernel = op2.Kernel(kernel_code, "multiplicity") + op2.par_loop(kernel, cell_set, weight.dat(op2.INC, weight.cell_node_map())) with weight.dat.vec as w: w.reciprocal() return weight @@ -1237,27 +1234,41 @@ def _weight(self): @cached_property def _kernels(self): try: - # We generate custom prolongation and restriction kernels mainly because: - # 1. Code generation for the transpose of prolongation is not readily available - # 2. Dual evaluation of EnrichedElement is not yet implemented in FInAT - uf_map = get_permuted_map(self.Vf) - uc_map = get_permuted_map(self.Vc) - prolong_kernel, restrict_kernel, coefficients = self.make_blas_kernels(self.Vf, self.Vc) - prolong_args = [prolong_kernel, self.uf.cell_set, - self.uf.dat(op2.INC, uf_map), - self.uc.dat(op2.READ, uc_map), - self._weight.dat(op2.READ, uf_map)] - except ValueError: - # The elements do not have the expected tensor product structure - # Fall back to aij kernels - uf_map = self.Vf.cell_node_map() - uc_map = self.Vc.cell_node_map() - prolong_kernel, restrict_kernel, coefficients = self.make_kernels(self.Vf, self.Vc) - prolong_args = [prolong_kernel, self.uf.cell_set, - self.uf.dat(op2.WRITE, uf_map), - self.uc.dat(op2.READ, uc_map)] - - restrict_args = [restrict_kernel, self.uf.cell_set, + self.Vf.finat_element.dual_basis + self.Vc.finat_element.dual_basis + native_interpolation_supported = True + except NotImplementedError: + native_interpolation_supported = False + + if native_interpolation_supported: + return self._build_native_interpolators() + else: + return self._build_custom_interpolators() + + def _build_native_interpolators(self): + from firedrake.interpolation import interpolate, get_interpolator + P = get_interpolator(interpolate(self.uc, self.Vf)) + prolong = partial(P.assemble, tensor=self.uf) + + rf = firedrake.Function(self.Vf.dual(), val=self.uf.dat) + rc = firedrake.Function(self.Vc.dual(), val=self.uc.dat) + vc = firedrake.TestFunction(self.Vc) + R = get_interpolator(interpolate(vc, rf)) + restrict = partial(R.assemble, tensor=rc) + return prolong, restrict + + def _build_custom_interpolators(self): + # We generate custom prolongation and restriction kernels because + # dual evaluation of EnrichedElement is not yet implemented in FInAT + uf_map = get_permuted_map(self.Vf) + uc_map = get_permuted_map(self.Vc) + prolong_kernel, restrict_kernel, coefficients = self.make_blas_kernels(self.Vf, self.Vc) + cell_set = self.Vf.mesh().topology.unique().cell_set + prolong_args = [prolong_kernel, cell_set, + self.uf.dat(op2.INC, uf_map), + self.uc.dat(op2.READ, uc_map), + self._weight.dat(op2.READ, uf_map)] + restrict_args = [restrict_kernel, cell_set, self.uc.dat(op2.INC, uc_map), self.uf.dat(op2.READ, uf_map), self._weight.dat(op2.READ, uf_map)] @@ -1444,49 +1455,6 @@ def make_blas_kernels(self, Vf, Vc): ldargs=BLASLAPACK_LIB.split(), requires_zeroed_output_arguments=True) return cache.setdefault(key, (prolong_kernel, restrict_kernel, coefficients)) - def make_kernels(self, Vf, Vc): - """ - Interpolation and restriction kernels between arbitrary elements. - - This is temporary while we wait for dual evaluation in FInAT. - """ - cache = self._cache_kernels - key = (Vf.ufl_element(), Vc.ufl_element()) - try: - return cache[key] - except KeyError: - pass - prolong_kernel, _ = prolongation_transfer_kernel_action(Vf, self.uc) - matrix_kernel, coefficients = prolongation_transfer_kernel_action(Vf, firedrake.TrialFunction(Vc)) - - # The way we transpose the prolongation kernel is suboptimal. - # A local matrix is generated each time the kernel is executed. - element_kernel = cache_generate_code(matrix_kernel, Vf._comm) - element_kernel = element_kernel.replace("void expression_kernel", "static void expression_kernel") - coef_args = "".join([", c%d" % i for i in range(len(coefficients))]) - coef_decl = "".join([", const %s *restrict c%d" % (ScalarType_c, i) for i in range(len(coefficients))]) - dimc = Vc.finat_element.space_dimension() * Vc.block_size - dimf = Vf.finat_element.space_dimension() * Vf.block_size - restrict_code = f""" - {element_kernel} - - void restriction({ScalarType_c} *restrict Rc, const {ScalarType_c} *restrict Rf, const {ScalarType_c} *restrict w{coef_decl}) - {{ - {ScalarType_c} Afc[{dimf}*{dimc}] = {{0}}; - expression_kernel(Afc{coef_args}); - for ({IntType_c} i = 0; i < {dimf}; i++) - for ({IntType_c} j = 0; j < {dimc}; j++) - Rc[j] += Afc[i*{dimc} + j] * Rf[i] * w[i]; - }} - """ - restrict_kernel = op2.Kernel( - restrict_code, - "restriction", - requires_zeroed_output_arguments=True, - events=matrix_kernel.events, - ) - return cache.setdefault(key, (prolong_kernel, restrict_kernel, coefficients)) - def multTranspose(self, mat, rf, rc): """ Implement restriction: restrict residual on fine grid rf to coarse grid rc. @@ -1559,68 +1527,23 @@ def getNestSubMatrix(self, i, j): if i == j: s = self._standalones[i] sizes = (s.uf.dof_dset.layout_vec.getSizes(), s.uc.dof_dset.layout_vec.getSizes()) - M_shll = PETSc.Mat().createPython(sizes, s, comm=s.uf._comm) + M_shll = PETSc.Mat().createPython(sizes, s, comm=s.uf.comm) M_shll.setUp() return M_shll else: return None -def prolongation_matrix_aij(P1, Pk, P1_bcs=[], Pk_bcs=[]): - if isinstance(P1, firedrake.Function): - P1 = P1.function_space() - if isinstance(Pk, firedrake.Function): - Pk = Pk.function_space() - sp = op2.Sparsity((Pk.dof_dset, - P1.dof_dset), - {(i, j): [(rmap, cmap, None)] - for i, rmap in enumerate(Pk.cell_node_map()) - for j, cmap in enumerate(P1.cell_node_map()) - if i == j}) - mat = op2.Mat(sp, PETSc.ScalarType) - mesh = Pk.mesh() - - fele = Pk.ufl_element() - if type(fele) is finat.ufl.MixedElement: - for i in range(fele.num_sub_elements): - Pk_bcs_i = [bc for bc in Pk_bcs if bc.function_space().index == i] - P1_bcs_i = [bc for bc in P1_bcs if bc.function_space().index == i] - - rlgmap, clgmap = mat[i, i].local_to_global_maps - rlgmap = Pk.sub(i).local_to_global_map(Pk_bcs_i, lgmap=rlgmap) - clgmap = P1.sub(i).local_to_global_map(P1_bcs_i, lgmap=clgmap) - unroll = any(bc.function_space().component is not None - for bc in chain(Pk_bcs_i, P1_bcs_i) if bc is not None) - matarg = mat[i, i](op2.WRITE, (Pk.sub(i).cell_node_map(), P1.sub(i).cell_node_map()), - lgmaps=((rlgmap, clgmap), ), unroll_map=unroll) - expr = firedrake.TrialFunction(P1.sub(i)) - kernel, coefficients = prolongation_transfer_kernel_action(Pk.sub(i), expr) - parloop_args = [kernel, mesh.cell_set, matarg] - for coefficient in coefficients: - m_ = coefficient.cell_node_map() - parloop_args.append(coefficient.dat(op2.READ, m_)) - - op2.par_loop(*parloop_args) - - else: - rlgmap, clgmap = mat.local_to_global_maps - rlgmap = Pk.local_to_global_map(Pk_bcs, lgmap=rlgmap) - clgmap = P1.local_to_global_map(P1_bcs, lgmap=clgmap) - unroll = any(bc.function_space().component is not None - for bc in chain(Pk_bcs, P1_bcs) if bc is not None) - matarg = mat(op2.WRITE, (Pk.cell_node_map(), P1.cell_node_map()), - lgmaps=((rlgmap, clgmap), ), unroll_map=unroll) - expr = firedrake.TrialFunction(P1) - kernel, coefficients = prolongation_transfer_kernel_action(Pk, expr) - parloop_args = [kernel, mesh.cell_set, matarg] - for coefficient in coefficients: - m_ = coefficient.cell_node_map() - parloop_args.append(coefficient.dat(op2.READ, m_)) - - op2.par_loop(*parloop_args) - - mat.assemble() - return mat.handle +def prolongation_matrix_aij(Vc, Vf, Vc_bcs=(), Vf_bcs=()): + if isinstance(Vf, firedrake.Function): + Vf = Vf.function_space() + if isinstance(Vc, firedrake.Function): + Vc = Vc.function_space() + bcs = Vc_bcs + Vf_bcs + interp = firedrake.interpolate(firedrake.TrialFunction(Vc), Vf) + mat_type = "nest" if len(Vc) > 1 or len(Vf) > 1 else None + mat = firedrake.assemble(interp, bcs=bcs, mat_type=mat_type) + return mat.petscmat def prolongation_matrix_matfree(Vc, Vf, Vc_bcs=[], Vf_bcs=[]): @@ -1631,6 +1554,6 @@ def prolongation_matrix_matfree(Vc, Vf, Vc_bcs=[], Vf_bcs=[]): ctx = StandaloneInterpolationMatrix(Vc, Vf, Vc_bcs, Vf_bcs) sizes = (Vf.dof_dset.layout_vec.getSizes(), Vc.dof_dset.layout_vec.getSizes()) - M_shll = PETSc.Mat().createPython(sizes, ctx, comm=Vf._comm) + M_shll = PETSc.Mat().createPython(sizes, ctx, comm=Vf.comm) M_shll.setUp() return M_shll diff --git a/firedrake/projection.py b/firedrake/projection.py index 3ce346f664..5a2d83f371 100644 --- a/firedrake/projection.py +++ b/firedrake/projection.py @@ -8,7 +8,9 @@ import firedrake from firedrake.bcs import BCBase from firedrake.petsc import PETSc -from firedrake.utils import cached_property, complex_mode, SLATE_SUPPORTS_COMPLEX +from functools import cached_property + +from firedrake.utils import complex_mode, SLATE_SUPPORTS_COMPLEX from firedrake import functionspaceimpl from firedrake import function from firedrake.adjoint_utils import annotate_project diff --git a/firedrake/pyplot/__init__.py b/firedrake/pyplot/__init__.py index ae91f23f2e..2eb6de5976 100644 --- a/firedrake/pyplot/__init__.py +++ b/firedrake/pyplot/__init__.py @@ -1,8 +1,8 @@ -from .mpl import ( +from firedrake.pyplot.mpl import ( plot, triplot, tricontourf, tricontour, trisurf, tripcolor, quiver, streamplot, FunctionPlotter ) -from .pgf import pgfplot +from firedrake.pyplot.pgf import pgfplot __all__ = [ "plot", "triplot", "tricontourf", "tricontour", "trisurf", "tripcolor", diff --git a/firedrake/pyplot/mpl.py b/firedrake/pyplot/mpl.py index 3cf010a1c9..7a416e5702 100644 --- a/firedrake/pyplot/mpl.py +++ b/firedrake/pyplot/mpl.py @@ -18,7 +18,7 @@ import mpl_toolkits.mplot3d from mpl_toolkits.mplot3d.art3d import Line3DCollection, Poly3DCollection from math import factorial -from firedrake import (Interpolate, sqrt, inner, Function, SpatialCoordinate, +from firedrake import (interpolate, sqrt, inner, Function, SpatialCoordinate, FunctionSpace, VectorFunctionSpace, PointNotInDomainError, Constant, assemble, dx) from firedrake.mesh import MeshGeometry @@ -100,10 +100,10 @@ def triplot(mesh, axes=None, interior_kw={}, boundary_kw={}): :arg boundary_kw: keyword arguments to apply when plotting the mesh boundary :return: list of matplotlib :class:`Collection ` objects """ - gdim = mesh.geometric_dimension() - tdim = mesh.topological_dimension() + gdim = mesh.geometric_dimension + tdim = mesh.topological_dimension BoundaryCollection, InteriorCollection = _get_collection_types(gdim, tdim) - quad = mesh.ufl_cell().cellname() == "quadrilateral" + quad = mesh.ufl_cell().cellname == "quadrilateral" if mesh.extruded: raise NotImplementedError("Visualizing extruded meshes not implemented yet!") @@ -120,7 +120,7 @@ def triplot(mesh, axes=None, interior_kw={}, boundary_kw={}): if element.degree() != 1: # Interpolate to piecewise linear. V = VectorFunctionSpace(mesh, element.family(), 1) - coordinates = assemble(Interpolate(coordinates, V)) + coordinates = assemble(interpolate(coordinates, V)) coords = toreal(coordinates.dat.data_ro_with_halos, "real") result = [] @@ -215,7 +215,7 @@ def _plot_2d_field(method_name, function, *args, complex_component="real", **kwa if len(function.ufl_shape) == 1: element = function.ufl_element().sub_elements[0] Q = FunctionSpace(mesh, element) - function = assemble(Interpolate(sqrt(inner(function, function)), Q)) + function = assemble(interpolate(sqrt(inner(function, function)), Q)) num_sample_points = kwargs.pop("num_sample_points", 10) function_plotter = FunctionPlotter(mesh, num_sample_points) @@ -319,14 +319,14 @@ def trisurf(function, *args, complex_component="real", **kwargs): Q = function.function_space() mesh = Q.mesh() - if mesh.geometric_dimension() == 3: + if mesh.geometric_dimension == 3: return _trisurf_3d(axes, function, *args, complex_component=complex_component, **_kwargs) _kwargs.update({"shade": False}) if len(function.ufl_shape) == 1: element = function.ufl_element().sub_elements[0] Q = FunctionSpace(mesh, element) - function = assemble(Interpolate(sqrt(inner(function, function)), Q)) + function = assemble(interpolate(sqrt(inner(function, function)), Q)) num_sample_points = kwargs.pop("num_sample_points", 10) function_plotter = FunctionPlotter(mesh, num_sample_points) @@ -355,7 +355,7 @@ def quiver(function, *, complex_component="real", **kwargs): coords = toreal(extract_unique_domain(function).coordinates.dat.data_ro, "real") V = extract_unique_domain(function).coordinates.function_space() - function_interp = assemble(Interpolate(function, V)) + function_interp = assemble(interpolate(function, V)) vals = toreal(function_interp.dat.data_ro, complex_component) C = np.linalg.norm(vals, axis=1) return axes.quiver(*(coords.T), *(vals.T), C, **kwargs) @@ -471,7 +471,7 @@ def __init__(self, function, resolution, min_length, max_time, tolerance, coords = toreal(mesh.coordinates.dat.data_ro, "real") self._xmin = coords.min(axis=0) xmax = coords.max(axis=0) - self._r = self.resolution / np.sqrt(mesh.geometric_dimension()) + self._r = self.resolution / np.sqrt(mesh.geometric_dimension) shape = tuple(((xmax - self._xmin) / self._r).astype(int) + 2) self._grid = np.full(shape, 4 * self.resolution) @@ -752,7 +752,7 @@ def plot(function, *args, num_sample_points=10, complex_component="real", **kwar if isinstance(line, MeshGeometry): raise TypeError("Expected Function, not Mesh; see firedrake.triplot") - if extract_unique_domain(line).geometric_dimension() > 1: + if extract_unique_domain(line).geometric_dimension > 1: raise ValueError("Expected 1D Function; for plotting higher-dimensional fields, " "see tricontourf, tripcolor, quiver, trisurf") @@ -816,7 +816,7 @@ def _bezier_plot(function, axes, complex_component="real", **kwargs): mesh = function.function_space().mesh() if deg == 0: V = FunctionSpace(mesh, "DG", 1) - interp = assemble(Interpolate(function, V)) + interp = assemble(interpolate(function, V)) return _bezier_plot(interp, axes, complex_component=complex_component, **kwargs) y_vals = _bezier_calculate_points(function) @@ -905,7 +905,7 @@ def __init__(self, mesh, num_sample_points): # num_sample_points must be of the form 3k + 1 for cubic Bezier plotting if num_sample_points % 3 != 1: num_sample_points = (num_sample_points // 3) * 3 + 1 - if mesh.topological_dimension() == 1: + if mesh.topological_dimension == 1: self._setup_1d(mesh, num_sample_points) else: self._setup_nd(mesh, num_sample_points) @@ -914,7 +914,7 @@ def _setup_1d(self, mesh, num_sample_points): self._reference_points = np.linspace(0.0, 1.0, num_sample_points).reshape(-1, 1) def _setup_nd(self, mesh, num_sample_points): - cell_name = mesh.ufl_cell().cellname() + cell_name = mesh.ufl_cell().cellname if cell_name == "triangle": x = np.array([0, 0, 1]) y = np.array([0, 1, 0]) @@ -943,13 +943,13 @@ def _setup_nd(self, mesh, num_sample_points): all_triangles = (triangles + add_idx).reshape(-1, 3) coordinate_values = self(mesh.coordinates) - X = coordinate_values.reshape(-1, mesh.geometric_dimension()) + X = coordinate_values.reshape(-1, mesh.geometric_dimension) coords = toreal(X, "real") - if mesh.geometric_dimension() == 2: + if mesh.geometric_dimension == 2: x, y = coords[:, 0], coords[:, 1] self.triangulation = matplotlib.tri.Triangulation(x, y, triangles=all_triangles) - elif mesh.geometric_dimension() == 3: + elif mesh.geometric_dimension == 3: self.coordinates = coords self.triangles = all_triangles @@ -958,7 +958,7 @@ def __call__(self, function): # if the function space is the same as the last one Q = function.function_space() mesh = Q.mesh() - dimension = mesh.topological_dimension() + dimension = mesh.topological_dimension keys = {1: (0,), 2: (0, 0)} fiat_element = Q.finat_element.fiat_equivalent diff --git a/firedrake/pyplot/pgf.py b/firedrake/pyplot/pgf.py index 6640454679..7ad41bb659 100644 --- a/firedrake/pyplot/pgf.py +++ b/firedrake/pyplot/pgf.py @@ -212,7 +212,7 @@ def pgfplot(f, filename, degree=1, complex_component='real', print_latex_example V = f.function_space() elem = V.ufl_element() mesh = V.ufl_domain() - dim = mesh.geometric_dimension() + dim = mesh.geometric_dimension if dim not in (2, 3): raise NotImplementedError(f"Not yet implemented for functions in spatial dimension {dim}") if mesh.extruded: diff --git a/firedrake/randomfunctiongen.py b/firedrake/randomfunctiongen.py index 87afd2c3e3..8f03504754 100644 --- a/firedrake/randomfunctiongen.py +++ b/firedrake/randomfunctiongen.py @@ -326,8 +326,8 @@ def func(self, *args, **kwargs): def __init__(self, *args, **kwargs): _kwargs = kwargs.copy() - self._comm = _kwargs.pop('comm', COMM_WORLD) - if self._comm.Get_size() > 1 and module_attr not in ['PCG64', 'PCG64DXSM', 'Philox']: + self.comm = _kwargs.pop('comm', COMM_WORLD) + if self.comm.Get_size() > 1 and module_attr not in ['PCG64', 'PCG64DXSM', 'Philox']: raise TypeError("Use 'PCG64', 'PCG64DXSM', or 'Philox', for parallel RNG") self._init(*args, **_kwargs) @@ -338,8 +338,8 @@ def seed(self, *args, **kwargs): def _init(self, *args, **kwargs): if 'inc' in kwargs: raise RuntimeError("'inc' is no longer a valid keyword; see ") - rank = self._comm.Get_rank() - size = self._comm.Get_size() + rank = self.comm.Get_rank() + size = self.comm.Get_size() _kwargs = kwargs.copy() seed = _kwargs.get("seed") if seed is None: @@ -348,7 +348,7 @@ def _init(self, *args, **kwargs): seed = randomgen.SeedSequence().entropy else: seed = None - seed = self._comm.bcast(seed, root=0) + seed = self.comm.bcast(seed, root=0) if isinstance(seed, randomgen.SeedSequence): # We assume that the user has generated # a parallel-safe SeedSequence. @@ -363,8 +363,8 @@ def _init(self, *args, **kwargs): seed = kwargs.get("seed") # counter = kwargs.get("counter") key = kwargs.get("key") - if self._comm.Get_size() > 1: - rank = self._comm.Get_rank() + if self.comm.Get_size() > 1: + rank = self.comm.Get_rank() if seed is not None: raise TypeError("'seed' should not be used when using 'Philox' in parallel. A random 'key' is automatically generated and used unless specified.") # if 'key' is to be passed, it is users' responsibility diff --git a/firedrake/slate/__init__.py b/firedrake/slate/__init__.py index b54e4a1446..af3993a88a 100644 --- a/firedrake/slate/__init__.py +++ b/firedrake/slate/__init__.py @@ -1,2 +1,8 @@ -from firedrake.slate.slate import * # noqa: F401 -from firedrake.slate.static_condensation import * # noqa: F401 +from firedrake.slate.slate import ( # noqa: F401 + AssembledVector, Block, Factorization, Tensor, Inverse, + Transpose, Negative, Add, Mul, Solve, BlockAssembledVector, + DiagonalTensor, Reciprocal, TensorOp, TensorBase +) +from firedrake.slate.static_condensation import ( # noqa: F401 + HybridizationPC, SchurComplementBuilder, SCPC +) diff --git a/firedrake/slate/slac/__init__.py b/firedrake/slate/slac/__init__.py index 33016cea08..7d7a5e6a07 100644 --- a/firedrake/slate/slac/__init__.py +++ b/firedrake/slate/slac/__init__.py @@ -1 +1 @@ -from firedrake.slate.slac.compiler import * # noqa: F401 +from firedrake.slate.slac.compiler import compile_expression # noqa: F401 diff --git a/firedrake/slate/slac/compiler.py b/firedrake/slate/slac/compiler.py index 84c1cddc08..567701ef4b 100644 --- a/firedrake/slate/slac/compiler.py +++ b/firedrake/slate/slac/compiler.py @@ -23,7 +23,6 @@ from gem import impero_utils from itertools import chain -from pyop2.utils import get_petsc_dir from pyop2.mpi import COMM_WORLD from pyop2.codegen.rep2loopy import SolveCallable, INVCallable from pyop2.caching import memory_and_disk_cache @@ -36,6 +35,7 @@ from gem import indices as make_indices from tsfc.kernel_args import OutputKernelArg, CoefficientKernelArg from tsfc.loopy import generate as generate_loopy +from tsfc.kernel_interface.firedrake_loopy import ActiveDomainNumbers import copy from petsc4py import PETSc @@ -44,13 +44,6 @@ GREEN = "\033[1;37;32m%s\033[0m" - -try: - PETSC_DIR, PETSC_ARCH = get_petsc_dir() -except ValueError: - PETSC_DIR, = get_petsc_dir() - PETSC_ARCH = None - BLASLAPACK_LIB = None BLASLAPACK_INCLUDE = None if COMM_WORLD.rank == 0: @@ -192,14 +185,20 @@ def generate_loopy_kernel(slate_expr, compiler_parameters=None): kinfo = KernelInfo(kernel=loopykernel, integral_type="cell", # slate can only do things as contributions to the cell integrals - oriented=builder.bag.needs_cell_orientations, subdomain_id=("otherwise",), domain_number=0, + active_domain_numbers=ActiveDomainNumbers(coordinates=(0, ) if builder.bag.needs_coordinates else (), + cell_orientations=(0, ) if builder.bag.needs_cell_orientations else (), + cell_sizes=(0, ) if builder.bag.needs_cell_sizes else (), + exterior_facets=(), + interior_facets=(), + orientations_cell=(), + orientations_exterior_facet=(), + orientations_interior_facet=(),), coefficient_numbers=coefficient_numbers, constant_numbers=constant_numbers, needs_cell_facets=builder.bag.needs_cell_facets, pass_layer_arg=builder.bag.needs_mesh_layers, - needs_cell_sizes=builder.bag.needs_cell_sizes, arguments=arguments, events=events) diff --git a/firedrake/slate/slac/kernel_builder.py b/firedrake/slate/slac/kernel_builder.py index 8cf27b5298..1e66569f42 100644 --- a/firedrake/slate/slac/kernel_builder.py +++ b/firedrake/slate/slac/kernel_builder.py @@ -132,13 +132,22 @@ def collect_tsfc_kernel_data(self, mesh, tsfc_coefficients, tsfc_constants, wrap that are coordinates, orientations, cell sizes and cofficients. """ - kernel_data = [(mesh.coordinates, self.coordinates_arg_name)] - - if kinfo.oriented: + kernel_data = [] + for coord_domain_number in kinfo.active_domain_numbers.coordinates: + if coord_domain_number != 0: + raise ValueError("Slate currently only supports single domain") + self.bag.needs_coordinates = True + kernel_data.append((mesh.coordinates, self.coordinates_arg_name)) + + for cell_orientation_domain_number in kinfo.active_domain_numbers.cell_orientations: + if cell_orientation_domain_number != 0: + raise ValueError("Slate currently only supports single domain") self.bag.needs_cell_orientations = True kernel_data.append((mesh.cell_orientations(), self.cell_orientations_arg_name)) - if kinfo.needs_cell_sizes: + for cell_size_domain_number in kinfo.active_domain_numbers.cell_sizes: + if cell_size_domain_number != 0: + raise ValueError("Slate currently only supports single domain") self.bag.needs_cell_sizes = True kernel_data.append((mesh.cell_sizes, self.cell_sizes_arg_name)) @@ -194,9 +203,9 @@ def facet_integral_predicates(self, mesh, integral_type, kinfo, subdomain_id): self.bag.needs_cell_facets = True # Number of recerence cell facets if mesh.cell_set._extruded: - self.num_facets = mesh._base_mesh.ufl_cell().num_facets() + self.num_facets = mesh._base_mesh.ufl_cell().num_facets else: - self.num_facets = mesh.ufl_cell().num_facets() + self.num_facets = mesh.ufl_cell().num_facets # Index for loop over cell faces of reference cell fidx = self.bag.index_creator((self.num_facets,)) @@ -337,10 +346,11 @@ def generate_wrapper_kernel_args(self, tensor2temp): args = [] tmp_args = [] - coords_extent = self.extent(self.expression.ufl_domain().coordinates) - coords_loopy_arg = loopy.GlobalArg(self.coordinates_arg_name, shape=coords_extent, - dtype=self.tsfc_parameters["scalar_type"]) - args.append(kernel_args.CoordinatesKernelArg(coords_loopy_arg)) + if self.bag.needs_coordinates: + coords_extent = self.extent(self.expression.ufl_domain().coordinates) + coords_loopy_arg = loopy.GlobalArg(self.coordinates_arg_name, shape=coords_extent, + dtype=self.tsfc_parameters["scalar_type"]) + args.append(kernel_args.CoordinatesKernelArg(coords_loopy_arg)) if self.bag.needs_cell_orientations: ori_extent = self.extent(self.expression.ufl_domain().cell_orientations()) @@ -440,9 +450,15 @@ def generate_tsfc_calls(self, terminal, loopy_tensor): if subdomain_id != "otherwise": raise NotImplementedError("No subdomain markers for cells yet") elif self.is_integral_type(integral_type, "facet_integral"): - predicates, fidx, facet_arg = self.facet_integral_predicates(mesh, integral_type, kinfo, subdomain_id) - reads.append(facet_arg) - inames_dep.append(fidx[0].name) + if kinfo.active_domain_numbers._asdict()[{"exterior_facet": "exterior_facets", + "exterior_facet_vert": "exterior_facets", + "interior_facet": "interior_facets", + "interior_facet_vert": "interior_facets"}[kinfo.integral_type]] != (): + predicates, fidx, facet_arg = self.facet_integral_predicates(mesh, integral_type, kinfo, subdomain_id) + reads.append(facet_arg) + inames_dep.append(fidx[0].name) + else: + predicates = None elif self.is_integral_type(integral_type, "layer_integral"): predicates = self.layer_integral_predicates(slate_tensor, integral_type) else: @@ -469,6 +485,7 @@ def __init__(self, coeffs, constants): self.coefficients = coeffs self.constants = constants self.inames = OrderedDict() + self.needs_coordinates = False self.needs_cell_orientations = False self.needs_cell_sizes = False self.needs_cell_facets = False diff --git a/firedrake/slate/slate.py b/firedrake/slate/slate.py index 942faf2bd8..7c7d0759f0 100644 --- a/firedrake/slate/slate.py +++ b/firedrake/slate/slate.py @@ -15,7 +15,7 @@ functions to be executed within the Firedrake architecture. """ from abc import ABCMeta, abstractproperty, abstractmethod - +import functools from collections import OrderedDict, namedtuple, defaultdict from ufl import Constant @@ -24,8 +24,9 @@ from firedrake.formmanipulation import ExtractSubBlock, subspace from firedrake.function import Function, Cofunction from firedrake.ufl_expr import TestFunction -from firedrake.utils import cached_property, unique +from firedrake.utils import unique +from functools import cached_property from itertools import chain, count from pyop2.utils import as_tuple @@ -253,6 +254,13 @@ def ufl_domain(self): raise ValueError("All integrals must share the same domain of integration.") return domain + @staticmethod + def _expand_mixed_meshes(func): + @functools.wraps(func) + def wrapper(self, *args, **kwargs): + return sort_domains(join_domains(func(self, *args, **kwargs))) + return wrapper + @abstractmethod def ufl_domains(self): """Returns the integration domains of the integrals associated with @@ -487,6 +495,7 @@ def slate_coefficients(self): """Returns a tuple of coefficients associated with the tensor.""" return self.coefficients() + @TensorBase._expand_mixed_meshes def ufl_domains(self): """Returns the integration domains of the integrals associated with the tensor. @@ -562,6 +571,7 @@ def slate_coefficients(self): """Returns a BlockFunction in a tuple which carries all information to generate the right coefficients and maps.""" return (BlockFunction(self._function, self._indices, self._original_function),) + @TensorBase._expand_mixed_meshes def ufl_domains(self): """Returns the integration domains of the integrals associated with the tensor. """ @@ -720,6 +730,7 @@ def slate_coefficients(self): """Returns a tuple of coefficients associated with the tensor.""" return self.coefficients() + @TensorBase._expand_mixed_meshes def ufl_domains(self): """Returns the integration domains of the integrals associated with the tensor. @@ -815,6 +826,7 @@ def slate_coefficients(self): """Returns a tuple of coefficients associated with the tensor.""" return self.coefficients() + @TensorBase._expand_mixed_meshes def ufl_domains(self): """Returns the integration domains of the integrals associated with the tensor. @@ -918,6 +930,7 @@ def slate_coefficients(self): """Returns a tuple of coefficients associated with the tensor.""" return self.coefficients() + @TensorBase._expand_mixed_meshes def ufl_domains(self): """Returns the integration domains of the integrals associated with the tensor. @@ -976,6 +989,7 @@ def slate_coefficients(self): coeffs = [op.slate_coefficients() for op in self.operands] return tuple(OrderedDict.fromkeys(chain(*coeffs))) + @TensorBase._expand_mixed_meshes def ufl_domains(self): """Returns the integration domains of the integrals associated with the tensor. diff --git a/firedrake/slate/static_condensation/__init__.py b/firedrake/slate/static_condensation/__init__.py index 53debc0caf..b37bc83da4 100644 --- a/firedrake/slate/static_condensation/__init__.py +++ b/firedrake/slate/static_condensation/__init__.py @@ -1,2 +1,4 @@ -from firedrake.slate.static_condensation.hybridization import * # noqa: F401 -from firedrake.slate.static_condensation.scpc import * # noqa: F401 +from firedrake.slate.static_condensation.hybridization import ( # noqa: F401 + HybridizationPC, SchurComplementBuilder +) +from firedrake.slate.static_condensation.scpc import SCPC # noqa: F401 diff --git a/firedrake/slate/static_condensation/hybridization.py b/firedrake/slate/static_condensation/hybridization.py index bd0fc761f0..f3dc7a9ece 100644 --- a/firedrake/slate/static_condensation/hybridization.py +++ b/firedrake/slate/static_condensation/hybridization.py @@ -1,9 +1,9 @@ import functools import ufl -import finat.ufl import firedrake.dmhooks as dmhooks +import pyop2 from firedrake.slate.static_condensation.sc_base import SCBase from firedrake.matrix_free.operators import ImplicitMatrixContext from firedrake.petsc import PETSc @@ -54,6 +54,10 @@ def initialize(self, pc): V = test.function_space() mesh = V.mesh() + if len(set(mesh)) == 1: + mesh_unique = mesh.unique() + else: + raise NotImplementedError("Not implemented for general mixed meshes") if len(V) != 2: raise ValueError("Expecting two function spaces.") @@ -83,11 +87,10 @@ def initialize(self, pc): except TypeError: tdegree = W.ufl_element().degree() - 1 - TraceSpace = FunctionSpace(mesh, "HDiv Trace", tdegree) + TraceSpace = FunctionSpace(mesh[self.vidx], "HDiv Trace", tdegree) # Break the function spaces and define fully discontinuous spaces - broken_elements = finat.ufl.MixedElement([finat.ufl.BrokenElement(Vi.ufl_element()) for Vi in V]) - V_d = FunctionSpace(mesh, broken_elements) + V_d = V.broken_space() # Set up the functions for the original, hybridized # and schur complement systems @@ -122,10 +125,10 @@ def initialize(self, pc): trial: TrialFunction(V_d)} Atilde = Tensor(replace(self.ctx.a, arg_map)) gammar = TestFunction(TraceSpace) - n = ufl.FacetNormal(mesh) + n = ufl.FacetNormal(mesh_unique) sigma = TrialFunctions(V_d)[self.vidx] - if mesh.cell_set._extruded: + if mesh_unique.cell_set._extruded: Kform = (gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_h + gammar('+') * ufl.jump(sigma, n=n) * ufl.dS_v) else: @@ -159,7 +162,7 @@ def initialize(self, pc): integrand = gammar * ufl.dot(sigma, n) measures = [] trace_subdomains = [] - if mesh.cell_set._extruded: + if mesh_unique.cell_set._extruded: ds = ufl.ds_v for subdomain in sorted(extruded_neumann_subdomains): measures.append({"top": ufl.ds_t, "bottom": ufl.ds_b}[subdomain]) @@ -170,7 +173,7 @@ def initialize(self, pc): measures.append(ds) else: measures.extend((ds(sd) for sd in sorted(neumann_subdomains))) - markers = [int(x) for x in mesh.exterior_facets.unique_markers] + markers = [int(x) for x in mesh_unique.exterior_facets.unique_markers] dirichlet_subdomains = set(markers) - neumann_subdomains trace_subdomains.extend(sorted(dirichlet_subdomains)) @@ -182,10 +185,15 @@ def initialize(self, pc): else: # No bcs were provided, we assume weak Dirichlet conditions. # We zero out the contribution of the trace variables on - # the exterior boundary. Extruded cells will have both - # horizontal and vertical facets - trace_subdomains = ["on_boundary"] - if mesh.cell_set._extruded: + # the exterior boundary. We don't need to do this for boundary-less + # domains (like a sphere). + trace_subdomains = [] + with pyop2.mpi.temp_internal_comm(mesh_unique.comm) as icomm: + num_exterior_facets = icomm.allreduce(mesh_unique.exterior_facets.set.size) + if num_exterior_facets > 0: + trace_subdomains.append("on_boundary") + # Extruded cells will have both horizontal and vertical facets + if mesh_unique.cell_set._extruded and not mesh_unique.cell_set._extruded_periodic: trace_subdomains.extend(["bottom", "top"]) trace_bcs = [DirichletBC(TraceSpace, 0, subdomain) for subdomain in trace_subdomains] @@ -233,7 +241,7 @@ def initialize(self, pc): # Set the dm for the trace solver trace_ksp.setDM(trace_dm) - trace_ksp.setDMActive(False) + trace_ksp.setDMActive(PETSc.KSP.DMActive.ALL, False) trace_ksp.setOptionsPrefix(prefix) trace_ksp.setOperators(Smat, Smat) diff --git a/firedrake/slate/static_condensation/scpc.py b/firedrake/slate/static_condensation/scpc.py index e9ccb022b3..d176e56a05 100644 --- a/firedrake/slate/static_condensation/scpc.py +++ b/firedrake/slate/static_condensation/scpc.py @@ -62,7 +62,7 @@ def initialize(self, pc): # Need to duplicate a space which is NOT # associated with a subspace of a mixed space. - Vc = FunctionSpace(W.mesh(), W[c_field].ufl_element()) + Vc = FunctionSpace(W.mesh()[c_field], W[c_field].ufl_element()) bcs = [] cxt_bcs = self.cxt.row_bcs for bc in cxt_bcs: @@ -154,7 +154,7 @@ def initialize(self, pc): # Set the dm for the condensed solver c_ksp.setDM(c_dm) - c_ksp.setDMActive(False) + c_ksp.setDMActive(PETSc.KSP.DMActive.ALL, False) c_ksp.setOptionsPrefix(prefix) c_ksp.setOperators(A=Smat, P=Smat_pc) self.condensed_ksp = c_ksp diff --git a/firedrake/slope_limiter/__init__.py b/firedrake/slope_limiter/__init__.py index e72ee1d4d1..eaba2f087b 100644 --- a/firedrake/slope_limiter/__init__.py +++ b/firedrake/slope_limiter/__init__.py @@ -1,2 +1,2 @@ -from firedrake.slope_limiter.limiter import * # noqa: F401 -from firedrake.slope_limiter.vertex_based_limiter import * # noqa: F401 +from firedrake.slope_limiter.limiter import Limiter # noqa: F401 +from firedrake.slope_limiter.vertex_based_limiter import VertexBasedLimiter # noqa: F401 diff --git a/firedrake/solving_utils.py b/firedrake/solving_utils.py index 661f3ae218..5efecc2e20 100644 --- a/firedrake/solving_utils.py +++ b/firedrake/solving_utils.py @@ -10,8 +10,9 @@ from firedrake.matrix import MatrixBase from firedrake.exceptions import ConvergenceError from firedrake.petsc import PETSc, DEFAULT_KSP_PARAMETERS +from functools import cached_property + from firedrake.formmanipulation import ExtractSubBlock -from firedrake.utils import cached_property from firedrake.logging import warning @@ -276,6 +277,19 @@ def __init__(self, problem, self._coefficient_mapping = None self._transfer_manager = transfer_manager + def reconstruct(self, problem=None, mat_type=None, pmat_type=None, **kwargs): + """Reconstruct this _SNESContext instance with new arguments.""" + problem = problem or self.problem + mat_type = mat_type or self.mat_type + pmat_type = pmat_type or self.pmat_type + kwargs.setdefault("sub_mat_type", self.sub_mat_type) + kwargs.setdefault("sub_pmat_type", self.sub_pmat_type) + kwargs.setdefault("appctx", self.appctx) + kwargs.setdefault("options_prefix", self.options_prefix) + kwargs.setdefault("transfer_manager", self.transfer_manager) + kwargs.setdefault("pre_apply_bcs", self.pre_apply_bcs) + return _SNESContext(problem, mat_type, pmat_type, **kwargs) + @property def transfer_manager(self): """This allows the transfer manager to be set from options, e.g. diff --git a/firedrake/supermeshing.py b/firedrake/supermeshing.py index c1dc80dacb..eb011ceffa 100644 --- a/firedrake/supermeshing.py +++ b/firedrake/supermeshing.py @@ -3,10 +3,11 @@ import ctypes import pathlib import libsupermesh +import petsctools from firedrake.cython.supermeshimpl import assemble_mixed_mass_matrix as ammm, intersection_finder from firedrake.mg.utils import get_level from firedrake.petsc import PETSc -from firedrake.mg.kernels import to_reference_coordinates, compile_element +from firedrake.mg.kernels import to_reference_coordinates, compile_element, _make_kernel_args from firedrake.utility_meshes import UnitTriangleMesh, UnitTetrahedronMesh from firedrake.functionspace import FunctionSpace from firedrake.assemble import assemble @@ -19,7 +20,6 @@ from pyop2.sparsity import get_preallocation from pyop2.compilation import load from pyop2.mpi import COMM_SELF -from pyop2.utils import get_petsc_dir from collections import defaultdict @@ -78,10 +78,10 @@ def assemble_mixed_mass_matrix(V_A, V_B): mesh_A = V_A.mesh() mesh_B = V_B.mesh() - dim = mesh_A.geometric_dimension() - assert dim == mesh_B.geometric_dimension() - assert dim == mesh_A.topological_dimension() - assert dim == mesh_B.topological_dimension() + dim = mesh_A.geometric_dimension + assert dim == mesh_B.geometric_dimension + assert dim == mesh_A.topological_dimension + assert dim == mesh_B.topological_dimension (mh_A, level_A) = get_level(mesh_A) (mh_B, level_B) = get_level(mesh_B) @@ -149,7 +149,7 @@ def likely(cell_A): assert V_A.block_size == 1 assert V_B.block_size == 1 - preallocator = PETSc.Mat().create(comm=mesh_A._comm) + preallocator = PETSc.Mat().create(comm=mesh_A.comm) preallocator.setType(PETSc.Mat.Type.PREALLOCATOR) rset = V_B.dof_dset @@ -185,7 +185,7 @@ def likely(cell_A): # # Preallocate M_AB. # - mat = PETSc.Mat().create(comm=mesh_A._comm) + mat = PETSc.Mat().create(comm=mesh_A.comm) mat.setType(PETSc.Mat.Type.AIJ) rsizes = tuple(n * rdim for n in nrows) csizes = tuple(c * cdim for c in ncols) @@ -201,9 +201,6 @@ def likely(cell_A): mat.setOption(mat.Option.IGNORE_ZERO_ENTRIES, True) mat.setUp() - evaluate_kernel_A = compile_element(ufl.Coefficient(V_A), name="evaluate_kernel_A") - evaluate_kernel_B = compile_element(ufl.Coefficient(V_B), name="evaluate_kernel_B") - # We only need one of these since we assume that the two meshes both have CG1 coordinates to_reference_kernel = to_reference_coordinates(mesh_A.coordinates.ufl_element()) @@ -211,10 +208,15 @@ def likely(cell_A): reference_mesh = UnitTriangleMesh(comm=COMM_SELF) else: reference_mesh = UnitTetrahedronMesh(comm=COMM_SELF) - evaluate_kernel_S = compile_element(ufl.Coefficient(reference_mesh.coordinates.function_space()), name="evaluate_kernel_S") + V_S = reference_mesh.coordinates.function_space() V_S_A = FunctionSpace(reference_mesh, V_A.ufl_element()) V_S_B = FunctionSpace(reference_mesh, V_B.ufl_element()) + + evaluate_kernel_A = compile_element(ufl.Coefficient(V_A), ufl.TestFunction(V_S_A.dual()), name="evaluate_kernel_A") + evaluate_kernel_B = compile_element(ufl.Coefficient(V_B), ufl.TestFunction(V_S_B.dual()), name="evaluate_kernel_B") + evaluate_kernel_S = compile_element(ufl.Coefficient(V_S), ufl.TestFunction(V_S.dual()), name="evaluate_kernel_S") + M_SS = assemble(inner(TrialFunction(V_S_A), TestFunction(V_S_B)) * dx) M_SS = M_SS.petscmat[:, :] node_locations_A = utils.physical_node_locations(V_S_A).dat.data_ro_with_halos @@ -361,7 +363,7 @@ def likely(cell_A): PetscScalar* reference_node_location = &nodes_A[n*d]; PetscScalar* physical_node_location = physical_nodes_A[n]; for (int j=0; j < d; j++) physical_node_location[j] = 0.0; - pyop2_kernel_evaluate_kernel_S(physical_node_location, simplex_S, reference_node_location); + pyop2_kernel_evaluate_kernel_S(%(kernel_args_S)s); PrintInfo("\\tNode "); print_array(reference_node_location, d); PrintInfo(" mapped to "); @@ -374,7 +376,7 @@ def likely(cell_A): PetscScalar* reference_node_location = &nodes_B[n*d]; PetscScalar* physical_node_location = physical_nodes_B[n]; for (int j=0; j < d; j++) physical_node_location[j] = 0.0; - pyop2_kernel_evaluate_kernel_S(physical_node_location, simplex_S, reference_node_location); + pyop2_kernel_evaluate_kernel_S(%(kernel_args_S)s); PrintInfo("\\tNode "); print_array(reference_node_location, d); PrintInfo(" mapped to "); @@ -408,7 +410,7 @@ def likely(cell_A): coeffs_A[i] = 1.; for(int j=0; j split form coefficients) and @@ -130,14 +135,13 @@ def __init__( events=events) kernels.append(KernelInfo(kernel=pyop2_kernel, integral_type=kernel.integral_type, - oriented=kernel.oriented, subdomain_id=kernel.subdomain_id, - domain_number=kernel.domain_number, + domain_number=domain_number, + active_domain_numbers=active_domain_numbers, coefficient_numbers=coefficient_numbers_per_kernel, constant_numbers=constant_numbers_per_kernel, needs_cell_facets=False, pass_layer_arg=False, - needs_cell_sizes=kernel.needs_cell_sizes, arguments=kernel.arguments, events=events)) self.kernels = tuple(kernels) @@ -216,6 +220,7 @@ def compile_form(form, name, parameters=None, split=True, dont_split=(), diagona kernels = [] numbering = form.terminal_numbering() + all_meshes = extract_domains(form) if split: iterable = split_form(form, diagonal=diagonal) else: @@ -231,8 +236,10 @@ def compile_form(form, name, parameters=None, split=True, dont_split=(), diagona # and that component doesn't actually appear in the form then we # have an empty form, which we should not attempt to assemble. continue - # Map local coefficient/constant numbers (as seen inside the + # Map local domain/coefficient/constant numbers (as seen inside the # compiler) to the global coefficient/constant numbers + meshes = extract_domains(f) + domain_number_map = tuple(all_meshes.index(m) for m in meshes) coefficient_numbers = tuple( numbering[c] for c in f.coefficients() ) @@ -245,6 +252,7 @@ def compile_form(form, name, parameters=None, split=True, dont_split=(), diagona f, prefix, parameters, + domain_number_map, coefficient_numbers, constant_numbers, dont_split_numbers, @@ -291,20 +299,21 @@ def _ensure_cachedir(comm=None): def gather_integer_subdomain_ids(knls): - """Gather a dict of all integer subdomain IDs per integral type. + """Gather a dict of all integer subdomain IDs per integral type per domain. This is needed to correctly interpret the ``"otherwise"`` subdomain ID. :arg knls: Iterable of :class:`SplitKernel` objects. """ - all_integer_subdomain_ids = collections.defaultdict(list) + all_integer_subdomain_ids = collections.defaultdict(lambda: collections.defaultdict(set)) for _, kinfo in knls: for subdomain_id in kinfo.subdomain_id: if subdomain_id != "otherwise": - all_integer_subdomain_ids[kinfo.integral_type].append(subdomain_id) + all_integer_subdomain_ids[kinfo.domain_number][kinfo.integral_type].add(subdomain_id) - for k, v in all_integer_subdomain_ids.items(): - all_integer_subdomain_ids[k] = tuple(sorted(v)) + for domain_number, integral_type_subdomain_ids_dict in all_integer_subdomain_ids.items(): + for integral_type, subdomain_ids in integral_type_subdomain_ids_dict.items(): + all_integer_subdomain_ids[domain_number][integral_type] = tuple(sorted(subdomain_ids)) return all_integer_subdomain_ids diff --git a/firedrake/ufl_expr.py b/firedrake/ufl_expr.py index e4fc89b175..f71d111981 100644 --- a/firedrake/ufl_expr.py +++ b/firedrake/ufl_expr.py @@ -5,11 +5,11 @@ from ufl.split_functions import split from ufl.algorithms import extract_arguments, extract_coefficients from ufl.domain import as_domain - import firedrake -from firedrake import utils, function, cofunction +from firedrake import function, cofunction from firedrake.constant import Constant from firedrake.petsc import PETSc +from functools import cached_property __all__ = ['Argument', 'Coargument', 'TestFunction', 'TrialFunction', @@ -51,15 +51,15 @@ def arguments(self): def coefficients(self): return () - @utils.cached_property + @cached_property def cell_node_map(self): return self.function_space().cell_node_map - @utils.cached_property + @cached_property def interior_facet_node_map(self): return self.function_space().interior_facet_node_map - @utils.cached_property + @cached_property def exterior_facet_node_map(self): return self.function_space().exterior_facet_node_map @@ -103,15 +103,15 @@ def __init__(self, function_space, number, part=None): number, part=part) self._function_space = function_space - @utils.cached_property + @cached_property def cell_node_map(self): return self.function_space().cell_node_map - @utils.cached_property + @cached_property def interior_facet_node_map(self): return self.function_space().interior_facet_node_map - @utils.cached_property + @cached_property def exterior_facet_node_map(self): return self.function_space().exterior_facet_node_map @@ -233,63 +233,64 @@ def derivative(form, u, du=None, coefficient_derivatives=None): raise TypeError( f"Cannot take the derivative of a {type(form).__name__}" ) - u_is_x = isinstance(u, ufl.SpatialCoordinate) - if u_is_x or isinstance(u, (Constant, BaseFormOperator)): - uc = u - else: - uc, = extract_coefficients(u) - if not (u_is_x or isinstance(u, BaseFormOperator)) and len(uc.subfunctions) > 1 and set(extract_coefficients(form)) & set(uc.subfunctions): - raise ValueError("Taking derivative of form wrt u, but form contains coefficients from u.subfunctions." - "\nYou probably meant to write split(u) when defining your form.") - - mesh = as_domain(form) - if not mesh: - raise ValueError("Expression to be differentiated has no ufl domain." - "\nDo you need to add a domain to your Constant?") - is_dX = u_is_x or u is mesh.coordinates - try: args = form.arguments() except AttributeError: args = extract_arguments(form) # UFL arguments need unique indices within a form n = max(a.number() for a in args) if args else -1 - - if is_dX: - coords = mesh.coordinates - u = ufl.SpatialCoordinate(mesh) + set_internal_coord_derivatives = False + all_meshes = extract_domains(form) + if isinstance(u, ufl.SpatialCoordinate): + uc = u + coords_mesh, = extract_unique_domain(u) + coords = coords_mesh.coordinates + V = coords.function_space() + set_internal_coord_derivatives = True + elif any(u is m.coordinates for m in all_meshes): + uc = u + coords = u + coord_mesh = u.function_space().mesh() + u = ufl.SpatialCoordinate(coord_mesh) V = coords.function_space() - elif isinstance(uc, (firedrake.Function, firedrake.Cofunction, BaseFormOperator)): + set_internal_coord_derivatives = True + elif isinstance(u, BaseFormOperator): + uc = u V = uc.function_space() - elif isinstance(uc, firedrake.Constant): + elif isinstance(u, Constant): + uc = u if uc.ufl_shape != (): raise ValueError("Real function space of vector elements not supported") # Replace instances of the constant with a new argument ``x`` # and differentiate wrt ``x``. + mesh = as_domain(form) # integration domain V = firedrake.FunctionSpace(mesh, "Real", 0) x = ufl.Coefficient(V) # TODO: Update this line when https://github.com/FEniCS/ufl/issues/171 is fixed form = ufl.replace(form, {u: x}) u_orig, u = u, x else: - raise RuntimeError("Can't compute derivative for form") - + uc, = extract_coefficients(u) + if not isinstance(uc, (firedrake.Function, firedrake.Cofunction)): + raise RuntimeError(f"Can't compute derivative for form w.r.t {u}") + if len(uc.subfunctions) > 1 and set(extract_coefficients(form)) & set(uc.subfunctions): + raise ValueError("Taking derivative of form wrt u, but form contains coefficients from u.subfunctions." + "\nYou probably meant to write split(u) when defining your form.") + V = uc.function_space() if du is None: du = Argument(V, n + 1) - - if is_dX: + if set_internal_coord_derivatives: internal_coefficient_derivatives = {coords: du} else: internal_coefficient_derivatives = {} if coefficient_derivatives: internal_coefficient_derivatives.update(coefficient_derivatives) - if u.ufl_shape != du.ufl_shape: raise ValueError("Shapes of u and du do not match.\n" "If you passed an indexed part of split(u) into " "derivative, you need to provide an appropriate du as well.") dform = ufl.derivative(form, u, du, internal_coefficient_derivatives) - if isinstance(uc, firedrake.Constant): + if isinstance(uc, Constant): # If we replaced constants with ``x`` to differentiate, # replace them back to the original symbolic constant dform = ufl.replace(dform, {u: u_orig}) @@ -366,23 +367,37 @@ def FacetNormal(mesh): return ufl.FacetNormal(mesh) -def extract_domains(func): - """Extract the domain from `func`. +def extract_domains(f): + """Extract the domain from `f`. Parameters ---------- - x : firedrake.function.Function, firedrake.cofunction.Cofunction, or firedrake.constant.Constant - The function to extract the domain from. + f : ufl.form.Form or firedrake.slate.TensorBase or firedrake.function.Function or firedrake.cofunction.Cofunction or firedrake.constant.Constant + The form, tensor, or function to extract the domain from. Returns ------- list of firedrake.mesh.MeshGeometry Extracted domains. """ - if isinstance(func, (function.Function, cofunction.Cofunction, Argument, Coargument)): - return [func.function_space().mesh()] + from firedrake.mesh import MeshSequenceGeometry + + if isinstance(f, firedrake.slate.TensorBase): + return f.ufl_domains() + elif isinstance(f, (cofunction.Cofunction, Coargument)): + # ufl.domain.extract_domains does not work. + mesh = f.function_space().mesh() + if isinstance(mesh, MeshSequenceGeometry): + return list(set(mesh._meshes)) + else: + return [mesh] + elif isinstance(f, (ufl.form.FormSum, ufl.Action)): + # ufl.domain.extract_domains does not work. + if f._domains is None: + f._analyze_domains() + return f._domains else: - return ufl.domain.extract_domains(func) + return ufl.domain.extract_domains(f) def extract_unique_domain(func): @@ -390,7 +405,7 @@ def extract_unique_domain(func): Parameters ---------- - x : firedrake.function.Function, firedrake.cofunction.Cofunction, or firedrake.constant.Constant + func : firedrake.function.Function, firedrake.cofunction.Cofunction, or firedrake.constant.Constant The function to extract the domain from. Returns @@ -399,6 +414,6 @@ def extract_unique_domain(func): Extracted domains. """ if isinstance(func, (function.Function, cofunction.Cofunction, Argument, Coargument)): - return func.function_space().mesh() + return func.function_space().mesh().unique() else: return ufl.domain.extract_unique_domain(func) diff --git a/firedrake/utility_meshes.py b/firedrake/utility_meshes.py index 7fa5aeec73..f4cc9d6813 100644 --- a/firedrake/utility_meshes.py +++ b/firedrake/utility_meshes.py @@ -1,38 +1,34 @@ +import numbers import numpy as np +import warnings +from collections.abc import Mapping +from typing import Literal +import petsctools import ufl +from mpi4py import MPI from pyop2.mpi import COMM_WORLD from firedrake.utils import IntType, ScalarType from firedrake import ( VectorFunctionSpace, - FunctionSpace, Function, Constant, assemble, - Interpolate, + interpolate, FiniteElement, - interval, tetrahedron, - atan2, - pi, - as_vector, - SpatialCoordinate, - conditional, - gt, - as_tensor, - dot, - And, - Or, - sin, - cos, real ) from firedrake.cython import dmcommon -from firedrake import mesh -from firedrake import function -from firedrake import functionspace +from firedrake.mesh import ( + Mesh, DistributedMeshOverlapType, DEFAULT_MESH_NAME, MeshGeometry, + plex_from_cell_list, _generate_default_mesh_topology_name, + _generate_default_mesh_coordinates_name, MeshTopology, + make_mesh_from_mesh_topology, + make_mesh_from_coordinates, ExtrudedMesh +) from firedrake.parameters import parameters from firedrake.petsc import PETSc @@ -76,7 +72,7 @@ distribution_parameters_no_overlap = {"partition": True, - "overlap_type": (mesh.DistributedMeshOverlapType.NONE, 0)} + "overlap_type": (DistributedMeshOverlapType.NONE, 0)} reorder_noop = False @@ -92,7 +88,7 @@ def _postprocess_periodic_mesh(coords, comm, distribution_parameters, reorder, n V.finat_element, V.dm.getLocalSection(), coords.dat._vec) - return mesh.Mesh( + return Mesh( dm, comm=comm, distribution_parameters=distribution_parameters, @@ -111,7 +107,7 @@ def IntervalMesh( distribution_parameters=None, reorder=False, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -146,35 +142,22 @@ def IntervalMesh( left = length_or_left if ncells <= 0 or ncells % 1: - raise ValueError("Number of cells must be a postive integer") - length = right - left - if length < 0: + raise ValueError("Number of cells must be a positive integer") + if right - left < 0: raise ValueError("Requested mesh has negative length") - dx = length / ncells - # This ensures the rightmost point is actually present. - coords = np.arange(left, right + 0.01 * dx, dx, dtype=np.double).reshape(-1, 1) - cells = np.dstack( - ( - np.arange(0, len(coords) - 1, dtype=np.int32), - np.arange(1, len(coords), dtype=np.int32), - ) - ).reshape(-1, 2) - plex = mesh.plex_from_cell_list( - 1, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + + plex = PETSc.DMPlex().createBoxMesh( + (ncells,), + lower=(left,), + upper=(right,), + simplex=False, + periodic=False, + interpolate=True, + comm=comm ) - # Apply boundary IDs - plex.createLabel(dmcommon.FACE_SETS_LABEL) - coordinates = plex.getCoordinates() - coord_sec = plex.getCoordinateSection() - vStart, vEnd = plex.getDepthStratum(0) # vertices - for v in range(vStart, vEnd): - vcoord = plex.vecGetClosure(coord_sec, coordinates, v) - if vcoord[0] == coords[0]: - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, v, 1) - if vcoord[0] == coords[-1]: - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, v, 2) - - m = mesh.Mesh( + _mark_mesh_boundaries(plex) + + return Mesh( plex, reorder=reorder, distribution_parameters=distribution_parameters, @@ -183,7 +166,6 @@ def IntervalMesh( permutation_name=permutation_name, comm=comm, ) - return m @PETSc.Log.EventDecorator() @@ -192,7 +174,7 @@ def UnitIntervalMesh( distribution_parameters=None, reorder=False, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -215,7 +197,6 @@ def UnitIntervalMesh( The left hand (:math:`x=0`) boundary point has boundary marker 1, while the right hand (:math:`x=1`) point has marker 2. """ - return IntervalMesh( ncells, length_or_left=1.0, @@ -235,7 +216,7 @@ def PeriodicIntervalMesh( distribution_parameters=None, reorder=False, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -255,46 +236,27 @@ def PeriodicIntervalMesh( when checkpointing; if `None`, the name is automatically generated. """ - - if ncells < 3: - raise ValueError( - "1D periodic meshes with fewer than 3 \ -cells are not currently supported" - ) - m = CircleManifoldMesh( - ncells, - distribution_parameters=distribution_parameters_no_overlap, - reorder=reorder_noop, - comm=comm, + plex = PETSc.DMPlex().createBoxMesh( + (ncells,), + lower=(0.,), + upper=(length,), + simplex=False, + periodic=True, + interpolate=True, + sparseLocalize=False, + comm=comm + ) + _mark_mesh_boundaries(plex) + + return Mesh( + plex, + reorder=reorder, + distribution_parameters=distribution_parameters, name=name, distribution_name=distribution_name, permutation_name=permutation_name, + comm=comm, ) - indicator = Function(FunctionSpace(m, "DG", 0)) - coord_fs = VectorFunctionSpace( - m, FiniteElement("DG", interval, 1, variant="equispaced"), dim=1 - ) - new_coordinates = Function( - coord_fs, name=mesh._generate_default_mesh_coordinates_name(name) - ) - x, y = SpatialCoordinate(m) - eps = 1.e-14 - indicator.interpolate(conditional(gt(real(y), 0), 0., 1.)) - new_coordinates.interpolate( - as_vector((conditional( - gt(real(x), real(1. - eps)), indicator, # Periodic break. - # Unwrap rest of circle. - atan2(real(-y), real(-x))/(2 * pi) + 0.5 - ) * length,)) - ) - - return _postprocess_periodic_mesh(new_coordinates, - comm, - distribution_parameters, - reorder, - name, - distribution_name, - permutation_name) @PETSc.Log.EventDecorator() @@ -303,7 +265,7 @@ def PeriodicUnitIntervalMesh( distribution_parameters=None, reorder=False, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -341,7 +303,7 @@ def OneElementThickMesh( Ly, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -373,10 +335,10 @@ def OneElementThickMesh( coords = np.array([X, Y]).T # a line of coordinates, with a looped topology - plex = mesh.plex_from_cell_list( - 2, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 2, cells, coords, comm, _generate_default_mesh_topology_name(name) ) - tmesh1 = mesh.MeshTopology( + tmesh1 = MeshTopology( plex, plex.getName(), reorder=parameters["reorder_meshes"], @@ -481,14 +443,14 @@ def OneElementThickMesh( cell_closure[row][0:4] = [v1, v1, v2, v2] tmesh1.cell_closure = np.array(cell_closure, dtype=IntType) - mesh1 = mesh.make_mesh_from_mesh_topology(tmesh1, "temp") + mesh1 = make_mesh_from_mesh_topology(tmesh1, "temp") fe_dg = FiniteElement("DQ", mesh1.ufl_cell(), 1, variant="equispaced") Vc = VectorFunctionSpace(mesh1, fe_dg) fc = Function( - Vc, name=mesh._generate_default_mesh_coordinates_name(name) + Vc, name=_generate_default_mesh_coordinates_name(name) ).interpolate(mesh1.coordinates) - mash = mesh.Mesh( + mash = Mesh( fc, name=name, distribution_name=distribution_name, @@ -525,7 +487,7 @@ def UnitTriangleMesh( refinement_level=0, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -545,7 +507,7 @@ def UnitTriangleMesh( """ coords = [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0]] cells = [[0, 1, 2]] - plex = mesh.plex_from_cell_list(2, cells, coords, comm) + plex = plex_from_cell_list(2, cells, coords, comm) # mark boundary facets plex.createLabel(dmcommon.FACE_SETS_LABEL) @@ -571,8 +533,8 @@ def UnitTriangleMesh( for i in range(refinement_level): plex = plex.refine() - plex.setName(mesh._generate_default_mesh_topology_name(name)) - return mesh.Mesh( + plex.setName(_generate_default_mesh_topology_name(name)) + return Mesh( plex, reorder=False, distribution_parameters=distribution_parameters, @@ -593,10 +555,10 @@ def RectangleMesh( originY=0., quadrilateral=False, reorder=None, - diagonal="left", + diagonal=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -631,24 +593,34 @@ def RectangleMesh( * 3: plane y == originY * 4: plane y == Ly """ + if any(n <= 0 or not isinstance(n, numbers.Integral) for n in {nx, ny}): + raise ValueError("Number of cells must be a positive integer") + if quadrilateral and diagonal is not None: + raise ValueError("Cannot specify slope of diagonal on quad meshes") + if not quadrilateral and diagonal is None: + diagonal = "left" - for n in (nx, ny): - if n <= 0 or n % 1: - raise ValueError("Number of cells must be a postive integer") + plex = PETSc.DMPlex().createBoxMesh( + (nx, ny), + lower=(originX, originY), + upper=(Lx, Ly), + simplex=False, + interpolate=True, + comm=comm + ) + _mark_mesh_boundaries(plex) - xcoords = np.linspace(originX, Lx, nx + 1, dtype=np.double) - ycoords = np.linspace(originY, Ly, ny + 1, dtype=np.double) - return TensorRectangleMesh( - xcoords, - ycoords, - quadrilateral=quadrilateral, + if not quadrilateral: + plex = _refine_quads_to_triangles(plex, diagonal) + + return Mesh( + plex, reorder=reorder, - diagonal=diagonal, distribution_parameters=distribution_parameters, - comm=comm, name=name, distribution_name=distribution_name, permutation_name=permutation_name, + comm=comm, ) @@ -657,10 +629,10 @@ def TensorRectangleMesh( ycoords, quadrilateral=False, reorder=None, - diagonal="left", + diagonal=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -676,6 +648,7 @@ def TensorRectangleMesh( :kwarg diagonal: For triangular meshes, should the diagonal got from bottom left to top right (``"right"``), or top left to bottom right (``"left"``), or put in both diagonals (``"crossed"``). + Default is ``"left"``. The boundary edges in this mesh are numbered as follows: @@ -684,61 +657,33 @@ def TensorRectangleMesh( * 3: plane y == ycoords[0] * 4: plane y == ycoords[-1] """ + if quadrilateral and diagonal is not None: + raise ValueError("Cannot specify slope of diagonal on quad meshes") + if not quadrilateral and diagonal is None: + diagonal = "left" + xcoords = np.unique(xcoords) ycoords = np.unique(ycoords) nx = np.size(xcoords) - 1 ny = np.size(ycoords) - 1 - for n in (nx, ny): - if n <= 0: - raise ValueError("Number of cells must be a postive integer") + if any(n <= 0 for n in {nx, ny}): + raise ValueError("Number of cells must be a positive integer") coords = np.asarray(np.meshgrid(xcoords, ycoords)).swapaxes(0, 2).reshape(-1, 2) # cell vertices i, j = np.meshgrid(np.arange(nx, dtype=np.int32), np.arange(ny, dtype=np.int32)) - if not quadrilateral and diagonal == "crossed": - xs = 0.5 * (xcoords[1:] + xcoords[:-1]) - ys = 0.5 * (ycoords[1:] + ycoords[:-1]) - extra = np.asarray(np.meshgrid(xs, ys)).swapaxes(0, 2).reshape(-1, 2) - coords = np.vstack([coords, extra]) - # - # 2-----3 - # | \ / | - # | 4 | - # | / \ | - # 0-----1 - cells = [ - i * (ny + 1) + j, - i * (ny + 1) + j + 1, - (i + 1) * (ny + 1) + j, - (i + 1) * (ny + 1) + j + 1, - (nx + 1) * (ny + 1) + i * ny + j, - ] - cells = np.asarray(cells).swapaxes(0, 2).reshape(-1, 5) - idx = [0, 1, 4, 0, 2, 4, 2, 3, 4, 3, 1, 4] - cells = cells[:, idx].reshape(-1, 3) - else: - cells = [ - i * (ny + 1) + j, - i * (ny + 1) + j + 1, - (i + 1) * (ny + 1) + j + 1, - (i + 1) * (ny + 1) + j, - ] - cells = np.asarray(cells).swapaxes(0, 2).reshape(-1, 4) - if not quadrilateral: - if diagonal == "left": - idx = [0, 1, 3, 1, 2, 3] - elif diagonal == "right": - idx = [0, 1, 2, 0, 2, 3] - else: - raise ValueError("Unrecognised value for diagonal '%r'", diagonal) - # two cells per cell above... - cells = cells[:, idx].reshape(-1, 3) + cells = [ + i * (ny + 1) + j, + i * (ny + 1) + j + 1, + (i + 1) * (ny + 1) + j + 1, + (i + 1) * (ny + 1) + j, + ] + cells = np.asarray(cells).swapaxes(0, 2).reshape(-1, 4) - plex = mesh.plex_from_cell_list( - 2, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 2, cells, coords, comm, _generate_default_mesh_topology_name(name) ) - # mark boundary facets plex.createLabel(dmcommon.FACE_SETS_LABEL) plex.markBoundaryFaces("boundary_faces") @@ -763,7 +708,11 @@ def TensorRectangleMesh( if abs(face_coords[1] - y1) < ytol and abs(face_coords[3] - y1) < ytol: plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 4) plex.removeLabel("boundary_faces") - m = mesh.Mesh( + + if not quadrilateral: + plex = _refine_quads_to_triangles(plex, diagonal) + + return Mesh( plex, reorder=reorder, distribution_parameters=distribution_parameters, @@ -772,40 +721,59 @@ def TensorRectangleMesh( permutation_name=permutation_name, comm=comm, ) - return m @PETSc.Log.EventDecorator() def SquareMesh( - nx, - ny, - L, - reorder=None, - quadrilateral=False, - diagonal="left", - distribution_parameters=None, - comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, - distribution_name=None, - permutation_name=None, + nx: numbers.Integral, + ny: numbers.Integral, + L: numbers.Real, + reorder: bool | None = None, + quadrilateral: bool = False, + diagonal: Literal['crossed', 'left', 'right'] | None = None, + distribution_parameters: dict | None = None, + comm: MPI.Comm = COMM_WORLD, + name: str = DEFAULT_MESH_NAME, + distribution_name: str | None = None, + permutation_name: str | None = None, ): - """Generate a square mesh + """Generate a square mesh. - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :arg L: The extent in the x and y directions - :kwarg quadrilateral: (optional), creates quadrilateral mesh. - :kwarg reorder: (optional), should the mesh be reordered - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx + The number of cells in the x direction. + ny + The number of cells in the y direction. + L + The extent in the x and y directions. + reorder + Flag indicating whether to reorder the mesh. + quadrilateral + Flag indicating whether to create a quadrilateral mesh. + diagonal + The refinement strategy used for non-quadrilateral meshes. Must be + one of ``"crossed"``, ``"left"``, ``"right"``. + distribution_parameters + Options controlling mesh distribution, see :func:`.Mesh` for details. + comm + Optional communicator to build the mesh on. + name + Optional name of the mesh. + distribution_name + The name of parallel distribution used when checkpointing; if `None`, + the name is automatically generated. + permutation_name + The name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + + Returns + ------- + MeshGeometry + The new mesh. + + Notes + ----- The boundary edges in this mesh are numbered as follows: @@ -813,6 +781,7 @@ def SquareMesh( * 2: plane x == L * 3: plane y == 0 * 4: plane y == L + """ return RectangleMesh( nx, @@ -832,33 +801,52 @@ def SquareMesh( @PETSc.Log.EventDecorator() def UnitSquareMesh( - nx, - ny, - reorder=None, - diagonal="left", - quadrilateral=False, - distribution_parameters=None, - comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, - distribution_name=None, - permutation_name=None, + nx: numbers.Integral, + ny: numbers.Integral, + reorder: bool | None = None, + diagonal: Literal["crossed", "left", "right"] | None = None, + quadrilateral: bool = False, + distribution_parameters: dict | None = None, + comm: MPI.Comm = COMM_WORLD, + name: str = DEFAULT_MESH_NAME, + distribution_name: str | None = None, + permutation_name: str | None = None, ): - """Generate a unit square mesh + """Generate a unit square mesh. - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :kwarg quadrilateral: (optional), creates quadrilateral mesh. - :kwarg reorder: (optional), should the mesh be reordered - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx + The number of cells in the x direction. + ny + The number of cells in the y direction. + reorder + Flag indicating whether to reorder the mesh. + diagonal + The refinement strategy used for non-quadrilateral meshes. Must be + one of ``"crossed"``, ``"left"``, ``"right"``. + quadrilateral + Flag indicating whether to create a quadrilateral mesh. + distribution_parameters + Options controlling mesh distribution, see :func:`.Mesh` for details. + comm + Optional communicator to build the mesh on. + name + Optional name of the mesh. + distribution_name + The name of parallel distribution used when checkpointing; if `None`, + the name is automatically generated. + permutation_name + The name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + + Returns + ------- + MeshGeometry + The new mesh. + + Notes + ----- The boundary edges in this mesh are numbered as follows: @@ -866,6 +854,7 @@ def UnitSquareMesh( * 2: plane x == 1 * 3: plane y == 0 * 4: plane y == 1 + """ return SquareMesh( nx, @@ -884,42 +873,61 @@ def UnitSquareMesh( @PETSc.Log.EventDecorator() def PeriodicRectangleMesh( - nx, - ny, - Lx, - Ly, - direction="both", - quadrilateral=False, - reorder=None, - distribution_parameters=None, - diagonal=None, - comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, - distribution_name=None, - permutation_name=None, -): - """Generate a periodic rectangular mesh + nx: numbers.Integral, + ny: numbers.Integral, + Lx: numbers.Real, + Ly: numbers.Real, + direction: Literal["both", "x", "y"] = "both", + quadrilateral: bool = False, + reorder: bool | None = None, + distribution_parameters: dict | None = None, + diagonal: Literal["crossed", "left", "right"] | None = None, + comm: MPI.Comm = COMM_WORLD, + name: str = DEFAULT_MESH_NAME, + distribution_name: str | None = None, + permutation_name: str | None = None, +) -> MeshGeometry: + """Generate a periodic rectangular mesh. - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :arg Lx: The extent in the x direction - :arg Ly: The extent in the y direction - :arg direction: The direction of the periodicity, one of - ``"both"``, ``"x"`` or ``"y"``. - :kwarg quadrilateral: (optional), creates quadrilateral mesh. - :kwarg reorder: (optional), should the mesh be reordered - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg diagonal: (optional), one of ``"crossed"``, ``"left"``, ``"right"``. - Not valid for quad meshes. Only used for direction ``"x"`` or direction ``"y"``. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx + The number of cells in the x direction. + ny + The number of cells in the y direction. + Lx + The extent in the x direction. + Ly + The extent in the y direction. + direction + The direction of the periodicity, one of ``"both"``, ``"x"`` or ``"y"``. + quadrilateral + Flag indicating whether to create a quadrilateral mesh. + reorder + Flag indicating whether to reorder the mesh. + distribution_parameters + Options controlling mesh distribution, see :func:`.Mesh` for details. + diagonal + The refinement strategy used for non-quadrilateral meshes. Must be + one of ``"crossed"``, ``"left"``, ``"right"``. + comm + Optional communicator to build the mesh on. + name + Optional name of the mesh. + distribution_name + The name of parallel distribution used when checkpointing; if `None`, + the name is automatically generated. + permutation_name + The name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + + Returns + ------- + MeshGeometry + The new mesh. + + Notes + ----- If direction == "x" the boundary edges in this mesh are numbered as follows: @@ -930,136 +938,111 @@ def PeriodicRectangleMesh( * 1: plane x == 0 * 2: plane x == Lx + """ + if quadrilateral and diagonal is not None: + raise ValueError("Cannot specify slope of diagonal on quad meshes") + if not quadrilateral and diagonal is None: + diagonal = "left" - if direction == "both" and ny == 1 and quadrilateral: - return OneElementThickMesh( - nx, - Lx, - Ly, - distribution_parameters=distribution_parameters, - name=name, - distribution_name=distribution_name, - permutation_name=permutation_name, - comm=comm, - ) + # TODO: Remove this awkward mapping, this will be a breaking API change + # for which a deprecation policy cannot be followed + plex_to_firedrake_boundary_labels = None + match direction: + case "both": + periodic = (True, True) + case "x": + periodic = (True, False) + # NOTE: The vertical faces (plex faces 2 and 4) have arbitrary + # labels. What matters is that we have 1->1 and 3->2. + plex_to_firedrake_boundary_labels = {1: 1, 2: 4, 3: 2, 4: 3} + case "y": + periodic = (False, True) + # default plex to firedrake label mapping is correct + case _: + raise ValueError( + f"Cannot have a periodic mesh with periodicity '{direction}'" + ) - if direction not in ("both", "x", "y"): - raise ValueError( - "Cannot have a periodic mesh with periodicity '%s'" % direction - ) - if direction != "both": - return PartiallyPeriodicRectangleMesh( - nx, - ny, - Lx, - Ly, - direction=direction, - quadrilateral=quadrilateral, - reorder=reorder, - distribution_parameters=distribution_parameters, - diagonal=diagonal, - comm=comm, - name=name, - distribution_name=distribution_name, - permutation_name=permutation_name, - ) - if nx < 3 or ny < 3: - raise ValueError( - "2D periodic meshes with fewer than 3 cells in each direction are not currently supported" - ) + plex = PETSc.DMPlex().createBoxMesh( + (nx, ny), + lower=(0., 0.), + upper=(Lx, Ly), + simplex=False, + periodic=periodic, + interpolate=True, + sparseLocalize=False, + comm=comm + ) + _mark_mesh_boundaries(plex, plex_to_firedrake_boundary_labels) + if not quadrilateral: + plex = _refine_quads_to_triangles(plex, diagonal) - m = TorusMesh( - nx, - ny, - 1.0, - 0.5, - quadrilateral=quadrilateral, - reorder=reorder_noop, - distribution_parameters=distribution_parameters_no_overlap, - comm=comm, + return Mesh( + plex, + reorder=reorder, + distribution_parameters=distribution_parameters, name=name, distribution_name=distribution_name, permutation_name=permutation_name, + comm=comm, ) - coord_family = "DQ" if quadrilateral else "DG" - cell = "quadrilateral" if quadrilateral else "triangle" - - coord_fs = VectorFunctionSpace( - m, FiniteElement(coord_family, cell, 1, variant="equispaced"), dim=2 - ) - new_coordinates = Function( - coord_fs, name=mesh._generate_default_mesh_coordinates_name(name) - ) - x, y, z = SpatialCoordinate(m) - eps = 1.e-14 - indicator_y = Function(FunctionSpace(m, coord_family, 0)) - indicator_y.interpolate(conditional(gt(real(y), 0), 0., 1.)) - x_coord = Function(FunctionSpace(m, coord_family, 1, variant="equispaced")) - x_coord.interpolate( - # Periodic break. - conditional(And(gt(real(eps), real(abs(y))), gt(real(x), 0.)), indicator_y, - # Unwrap rest of circle. - atan2(real(-y), real(-x))/(2*pi)+0.5) - ) - phi_coord = as_vector([cos(2*pi*x_coord), sin(2*pi*x_coord)]) - dr = dot(as_vector((x, y))-phi_coord, phi_coord) - indicator_z = Function(FunctionSpace(m, coord_family, 0)) - indicator_z.interpolate(conditional(gt(real(z), 0), 0., 1.)) - new_coordinates.interpolate(as_vector(( - x_coord * Lx, - # Periodic break. - conditional(And(gt(real(eps), real(abs(z))), gt(real(dr), 0.)), indicator_z, - # Unwrap rest of circle. - atan2(real(-z), real(-dr))/(2*pi)+0.5) * Ly - ))) - - return _postprocess_periodic_mesh(new_coordinates, - comm, - distribution_parameters, - reorder, - name, - distribution_name, - permutation_name) @PETSc.Log.EventDecorator() def PeriodicSquareMesh( - nx, - ny, - L, - direction="both", - quadrilateral=False, - reorder=None, - distribution_parameters=None, - diagonal=None, - comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, - distribution_name=None, - permutation_name=None, + nx: numbers.Integral, + ny: numbers.Integral, + L: numbers.Real, + direction: Literal["both", "x", "y"] = "both", + quadrilateral: bool = False, + reorder: bool | None = None, + distribution_parameters: dict | None = None, + diagonal: Literal["crossed", "left", "right"] | None = None, + comm: MPI.Comm = COMM_WORLD, + name: str = DEFAULT_MESH_NAME, + distribution_name: str | None = None, + permutation_name: str | None = None, ): - """Generate a periodic square mesh + """Generate a periodic square mesh. - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :arg L: The extent in the x and y directions - :arg direction: The direction of the periodicity, one of - ``"both"``, ``"x"`` or ``"y"``. - :kwarg quadrilateral: (optional), creates quadrilateral mesh. - :kwarg reorder: (optional), should the mesh be reordered - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg diagonal: (optional), one of ``"crossed"``, ``"left"``, ``"right"``. - Not valid for quad meshes. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx + The number of cells in the x direction. + ny + The number of cells in the y direction. + L + The extent in the x and y directions. + direction + The direction of the periodicity, one of ``"both"``, ``"x"`` or ``"y"``. + quadrilateral + Flag indicating whether to create a quadrilateral mesh. + reorder + Flag indicating whether to reorder the mesh. + distribution_parameters + Options controlling mesh distribution, see :func:`.Mesh` for details. + diagonal + The refinement strategy used for non-quadrilateral meshes. Must be + one of ``"crossed"``, ``"left"``, ``"right"``. + comm + Optional communicator to build the mesh on. + name + Optional name of the mesh. + distribution_name + The name of parallel distribution used when checkpointing; if `None`, + the name is automatically generated. + permutation_name + The name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + Returns + ------- + MeshGeometry + The new mesh. + + Notes + ----- If direction == "x" the boundary edges in this mesh are numbered as follows: * 1: plane y == 0 @@ -1069,6 +1052,7 @@ def PeriodicSquareMesh( * 1: plane x == 0 * 2: plane x == L + """ return PeriodicRectangleMesh( nx, @@ -1089,39 +1073,55 @@ def PeriodicSquareMesh( @PETSc.Log.EventDecorator() def PeriodicUnitSquareMesh( - nx, - ny, - direction="both", - reorder=None, - quadrilateral=False, - distribution_parameters=None, - diagonal=None, - comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, - distribution_name=None, - permutation_name=None, + nx: numbers.Integral, + ny: numbers.Integral, + direction: Literal["both", "x", "y"] = "both", + quadrilateral: bool = False, + reorder: bool | None = None, + distribution_parameters: dict | None = None, + diagonal: Literal["crossed", "left", "right"] | None = None, + comm: MPI.Comm = COMM_WORLD, + name: str = DEFAULT_MESH_NAME, + distribution_name: str | None = None, + permutation_name: str | None = None, ): - """Generate a periodic unit square mesh + """Generate a periodic unit square mesh. - :arg nx: The number of cells in the x direction - :arg ny: The number of cells in the y direction - :arg direction: The direction of the periodicity, one of - ``"both"``, ``"x"`` or ``"y"``. - :kwarg quadrilateral: (optional), creates quadrilateral mesh. - :kwarg reorder: (optional), should the mesh be reordered - :kwarg distribution_parameters: options controlling mesh - distribution, see :func:`.Mesh` for details. - :kwarg diagonal: (optional), one of ``"crossed"``, ``"left"``, ``"right"``. - Not valid for quad meshes. - :kwarg comm: Optional communicator to build the mesh on. - :kwarg name: Optional name of the mesh. - :kwarg distribution_name: the name of parallel distribution used - when checkpointing; if `None`, the name is automatically - generated. - :kwarg permutation_name: the name of entity permutation (reordering) used - when checkpointing; if `None`, the name is automatically - generated. + Parameters + ---------- + nx + The number of cells in the x direction. + ny + The number of cells in the y direction. + direction + The direction of the periodicity, one of ``"both"``, ``"x"`` or ``"y"``. + quadrilateral + Flag indicating whether to create a quadrilateral mesh. + reorder + Flag indicating whether to reorder the mesh. + distribution_parameters + Options controlling mesh distribution, see :func:`.Mesh` for details. + diagonal + The refinement strategy used for non-quadrilateral meshes. Must be + one of ``"crossed"``, ``"left"``, ``"right"``. + comm + Optional communicator to build the mesh on. + name + Optional name of the mesh. + distribution_name + The name of parallel distribution used when checkpointing; if `None`, + the name is automatically generated. + permutation_name + The name of entity permutation (reordering) used when checkpointing; + if `None`, the name is automatically generated. + Returns + ------- + MeshGeometry + The new mesh. + + Notes + ----- If direction == "x" the boundary edges in this mesh are numbered as follows: * 1: plane y == 0 @@ -1131,6 +1131,7 @@ def PeriodicUnitSquareMesh( * 1: plane x == 0 * 2: plane x == 1 + """ return PeriodicSquareMesh( nx, @@ -1156,7 +1157,7 @@ def CircleManifoldMesh( distribution_parameters=None, reorder=False, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1196,10 +1197,10 @@ def CircleManifoldMesh( ) ) - plex = mesh.plex_from_cell_list( - 1, cells, vertices, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 1, cells, vertices, comm, _generate_default_mesh_topology_name(name) ) - m = mesh.Mesh( + m = Mesh( plex, dim=2, reorder=reorder, @@ -1210,15 +1211,13 @@ def CircleManifoldMesh( comm=comm, ) if degree > 1: - new_coords = function.Function( - functionspace.VectorFunctionSpace(m, "CG", degree) - ) + new_coords = Function(VectorFunctionSpace(m, "CG", degree)) new_coords.interpolate(ufl.SpatialCoordinate(m)) # "push out" to circle new_coords.dat.data[:] *= ( radius / np.linalg.norm(new_coords.dat.data, axis=1) ).reshape(-1, 1) - m = mesh.Mesh( + m = Mesh( new_coords, name=name, distribution_name=distribution_name, @@ -1235,7 +1234,7 @@ def UnitDiskMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1273,8 +1272,8 @@ def UnitDiskMesh( np.int32, ) - plex = mesh.plex_from_cell_list( - 2, cells, vertices, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 2, cells, vertices, comm, _generate_default_mesh_topology_name(name) ) # mark boundary facets @@ -1296,7 +1295,7 @@ def UnitDiskMesh( t = np.max(np.abs(x)) / norm x[:] *= t - m = mesh.Mesh( + m = Mesh( plex, dim=2, reorder=reorder, @@ -1315,7 +1314,7 @@ def UnitBallMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1361,8 +1360,8 @@ def UnitBallMesh( np.int32, ) - plex = mesh.plex_from_cell_list( - 3, cells, vertices, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 3, cells, vertices, comm, _generate_default_mesh_topology_name(name) ) plex.createLabel(dmcommon.FACE_SETS_LABEL) @@ -1383,7 +1382,7 @@ def UnitBallMesh( t = np.sum(np.abs(x)) / norm x[:] *= t - m = mesh.Mesh( + m = Mesh( plex, dim=3, reorder=reorder, @@ -1399,7 +1398,7 @@ def UnitBallMesh( @PETSc.Log.EventDecorator() def UnitTetrahedronMesh( comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1416,10 +1415,10 @@ def UnitTetrahedronMesh( """ coords = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] cells = [[0, 1, 2, 3]] - plex = mesh.plex_from_cell_list( - 3, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 3, cells, coords, comm, _generate_default_mesh_topology_name(name) ) - m = mesh.Mesh( + m = Mesh( plex, reorder=False, name=name, @@ -1438,7 +1437,7 @@ def TensorBoxMesh( distribution_parameters=None, diagonal="default", comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1526,8 +1525,8 @@ def TensorBoxMesh( ) else: raise ValueError("Unrecognised value for diagonal '%r'", diagonal) - plex = mesh.plex_from_cell_list( - 3, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 3, cells, coords, comm, _generate_default_mesh_topology_name(name) ) nvert = 3 # num. vertices on facet @@ -1565,7 +1564,7 @@ def TensorBoxMesh( if all([abs(face_coords[2 + cdim * i] - z1) < ztol for i in range(nvert)]): plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 6) plex.removeLabel("boundary_faces") - m = mesh.Mesh( + m = Mesh( plex, reorder=reorder, distribution_parameters=distribution_parameters, @@ -1590,7 +1589,7 @@ def BoxMesh( distribution_parameters=None, diagonal="default", comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1624,38 +1623,17 @@ def BoxMesh( if n <= 0 or n % 1: raise ValueError("Number of cells must be a postive integer") if hexahedral: - plex = PETSc.DMPlex().createBoxMesh((nx, ny, nz), lower=(0., 0., 0.), upper=(Lx, Ly, Lz), simplex=False, periodic=False, interpolate=True, comm=comm) - plex.removeLabel(dmcommon.FACE_SETS_LABEL) - nvert = 4 # num. vertices on faect - - # Apply boundary IDs - plex.createLabel(dmcommon.FACE_SETS_LABEL) - plex.markBoundaryFaces("boundary_faces") - coords = plex.getCoordinates() - coord_sec = plex.getCoordinateSection() - cdim = plex.getCoordinateDim() - assert cdim == 3 - if plex.getStratumSize("boundary_faces", 1) > 0: - boundary_faces = plex.getStratumIS("boundary_faces", 1).getIndices() - xtol = Lx / (2 * nx) - ytol = Ly / (2 * ny) - ztol = Lz / (2 * nz) - for face in boundary_faces: - face_coords = plex.vecGetClosure(coord_sec, coords, face) - if all([abs(face_coords[0 + cdim * i]) < xtol for i in range(nvert)]): - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 1) - if all([abs(face_coords[0 + cdim * i] - Lx) < xtol for i in range(nvert)]): - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 2) - if all([abs(face_coords[1 + cdim * i]) < ytol for i in range(nvert)]): - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 3) - if all([abs(face_coords[1 + cdim * i] - Ly) < ytol for i in range(nvert)]): - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 4) - if all([abs(face_coords[2 + cdim * i]) < ztol for i in range(nvert)]): - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 5) - if all([abs(face_coords[2 + cdim * i] - Lz) < ztol for i in range(nvert)]): - plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 6) - plex.removeLabel("boundary_faces") - m = mesh.Mesh( + plex = PETSc.DMPlex().createBoxMesh( + (nx, ny, nz), + lower=(0., 0., 0.), + upper=(Lx, Ly, Lz), + simplex=False, + periodic=False, + interpolate=True, + comm=comm, + ) + _mark_mesh_boundaries(plex) + return Mesh( plex, reorder=reorder, distribution_parameters=distribution_parameters, @@ -1664,7 +1642,6 @@ def BoxMesh( permutation_name=permutation_name, comm=comm, ) - return m else: xcoords = np.linspace(0, Lx, nx + 1, dtype=np.double) ycoords = np.linspace(0, Ly, ny + 1, dtype=np.double) @@ -1693,7 +1670,7 @@ def CubeMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1751,7 +1728,7 @@ def UnitCubeMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1810,7 +1787,7 @@ def PeriodicBoxMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -1866,14 +1843,9 @@ def PeriodicBoxMesh( * 5: plane z == 0 * 6: plane z == Lz - where periodic surfaces are regarded as interior, for which dS integral is to be used. + where periodic surfaces are ignored. """ - for n in (nx, ny, nz): - if n < 3: - raise ValueError( - "3D periodic meshes with fewer than 3 cells are not currently supported" - ) if hexahedral: if len(directions) != 3: raise ValueError(f"directions must have exactly dim (=3) elements : Got {directions}") @@ -1887,7 +1859,8 @@ def PeriodicBoxMesh( sparseLocalize=False, comm=comm, ) - m = mesh.Mesh( + _mark_mesh_boundaries(plex) + return Mesh( plex, reorder=reorder, distribution_parameters=distribution_parameters, @@ -1895,29 +1868,8 @@ def PeriodicBoxMesh( distribution_name=distribution_name, permutation_name=permutation_name, comm=comm) - x, y, z = SpatialCoordinate(m) - V = FunctionSpace(m, "Q", 2) - eps = min([Lx / nx, Ly / ny, Lz / nz]) / 1000. - if directions[0]: # x - fx0 = Function(V).interpolate(conditional(Or(x < eps, x > Lx - eps), 1., 0.)) - fx1 = fx0 - else: - fx0 = Function(V).interpolate(conditional(x < eps, 1., 0.)) - fx1 = Function(V).interpolate(conditional(x > Lx - eps, 1., 0.)) - if directions[1]: # y - fy0 = Function(V).interpolate(conditional(Or(y < eps, y > Ly - eps), 1., 0.)) - fy1 = fy0 - else: - fy0 = Function(V).interpolate(conditional(y < eps, 1., 0.)) - fy1 = Function(V).interpolate(conditional(y > Ly - eps, 1., 0.)) - if directions[2]: # z - fz0 = Function(V).interpolate(conditional(Or(z < eps, z > Lz - eps), 1., 0.)) - fz1 = fz0 - else: - fz0 = Function(V).interpolate(conditional(z < eps, 1., 0.)) - fz1 = Function(V).interpolate(conditional(z > Lz - eps, 1., 0.)) - return mesh.RelabeledMesh(m, [fx0, fx1, fy0, fy1, fz0, fz1], [1, 2, 3, 4, 5, 6], name=name) else: + # TODO: When hexahedra -> simplex refinement is implemented this can go away. if tuple(directions) != (True, True, True): raise NotImplementedError("Can only specify directions with hexahedral = True") xcoords = np.arange(0.0, Lx, Lx / nx, dtype=np.double) @@ -1949,10 +1901,10 @@ def PeriodicBoxMesh( [v0, v2, v6, v7], ] cells = np.asarray(cells).reshape(-1, ny, nx, nz).swapaxes(0, 3).reshape(-1, 4) - plex = mesh.plex_from_cell_list( - 3, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 3, cells, coords, comm, _generate_default_mesh_topology_name(name) ) - m = mesh.Mesh( + m = Mesh( plex, reorder=reorder_noop, distribution_parameters=distribution_parameters_no_overlap, @@ -1966,7 +1918,7 @@ def PeriodicBoxMesh( VectorFunctionSpace( m, FiniteElement("DG", tetrahedron, 1, variant="equispaced") ), - name=mesh._generate_default_mesh_coordinates_name(name), + name=_generate_default_mesh_coordinates_name(name), ) new_coordinates.interpolate(m.coordinates) @@ -2007,7 +1959,7 @@ def PeriodicUnitCubeMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2057,7 +2009,7 @@ def PeriodicUnitCubeMesh( * 5: plane z == 0 * 6: plane z == 1 - where periodic surfaces are regarded as interior, for which dS integral is to be used. + where periodic surfaces are ignored. """ return PeriodicBoxMesh( @@ -2086,7 +2038,7 @@ def IcosahedralSphereMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2170,16 +2122,16 @@ def IcosahedralSphereMesh( dtype=np.int32, ) - plex = mesh.plex_from_cell_list(2, faces, vertices, comm) + plex = plex_from_cell_list(2, faces, vertices, comm) plex.setRefinementUniform(True) for i in range(refinement_level): plex = plex.refine() - plex.setName(mesh._generate_default_mesh_topology_name(name)) + plex.setName(_generate_default_mesh_topology_name(name)) coords = plex.getCoordinatesLocal().array.reshape(-1, 3) scale = (radius / np.linalg.norm(coords, axis=1)).reshape(-1, 1) coords *= scale - m = mesh.Mesh( + m = Mesh( plex, dim=3, reorder=reorder, @@ -2190,15 +2142,13 @@ def IcosahedralSphereMesh( comm=comm, ) if degree > 1: - new_coords = function.Function( - functionspace.VectorFunctionSpace(m, "CG", degree) - ) + new_coords = Function(VectorFunctionSpace(m, "CG", degree)) new_coords.interpolate(ufl.SpatialCoordinate(m)) # "push out" to sphere new_coords.dat.data[:] *= ( radius / np.linalg.norm(new_coords.dat.data, axis=1) ).reshape(-1, 1) - m = mesh.Mesh( + m = Mesh( new_coords, name=name, distribution_name=distribution_name, @@ -2216,7 +2166,7 @@ def UnitIcosahedralSphereMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2263,7 +2213,7 @@ def OctahedralSphereMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2332,14 +2282,14 @@ def OctahedralSphereMesh( for new, idx in enumerate(indices): faces[faces == idx] = new - plex = mesh.plex_from_cell_list(2, faces, vertices, comm) + plex = plex_from_cell_list(2, faces, vertices, comm) plex.setRefinementUniform(True) for i in range(refinement_level): plex = plex.refine() - plex.setName(mesh._generate_default_mesh_topology_name(name)) + plex.setName(_generate_default_mesh_topology_name(name)) # build the initial mesh - m = mesh.Mesh( + m = Mesh( plex, dim=3, reorder=reorder, @@ -2351,8 +2301,8 @@ def OctahedralSphereMesh( ) if degree > 1: # use it to build a higher-order mesh - m = assemble(Interpolate(ufl.SpatialCoordinate(m), VectorFunctionSpace(m, "CG", degree))) - m = mesh.Mesh( + m = assemble(interpolate(ufl.SpatialCoordinate(m), VectorFunctionSpace(m, "CG", degree))) + m = Mesh( m, name=name, distribution_name=distribution_name, @@ -2386,11 +2336,11 @@ def OctahedralSphereMesh( # Make a copy of the coordinates so that we can blend two different # mappings near the pole Vc = m.coordinates.function_space() - Xlatitudinal = assemble(Interpolate( + Xlatitudinal = assemble(interpolate( Constant(radius) * ufl.as_vector([x * scale, y * scale, znew]), Vc )) Vlow = VectorFunctionSpace(m, "CG", 1) - Xlow = assemble(Interpolate(Xlatitudinal, Vlow)) + Xlow = assemble(interpolate(Xlatitudinal, Vlow)) r = ufl.sqrt(Xlow[0] ** 2 + Xlow[1] ** 2 + Xlow[2] ** 2) Xradial = Constant(radius) * Xlow / r @@ -2417,7 +2367,7 @@ def UnitOctahedralSphereMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2599,7 +2549,7 @@ def CubedSphereMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2629,11 +2579,11 @@ def CubedSphereMesh( raise ValueError("Mesh coordinate degree must be at least 1") cells, coords = _cubedsphere_cells_and_coords(radius, refinement_level) - plex = mesh.plex_from_cell_list( - 2, cells, coords, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 2, cells, coords, comm, _generate_default_mesh_topology_name(name) ) - m = mesh.Mesh( + m = Mesh( plex, dim=3, reorder=reorder, @@ -2645,15 +2595,13 @@ def CubedSphereMesh( ) if degree > 1: - new_coords = function.Function( - functionspace.VectorFunctionSpace(m, "Q", degree) - ) + new_coords = Function(VectorFunctionSpace(m, "Q", degree)) new_coords.interpolate(ufl.SpatialCoordinate(m)) # "push out" to sphere new_coords.dat.data[:] *= ( radius / np.linalg.norm(new_coords.dat.data, axis=1) ).reshape(-1, 1) - m = mesh.Mesh( + m = Mesh( new_coords, distribution_name=distribution_name, permutation_name=permutation_name, @@ -2670,7 +2618,7 @@ def UnitCubedSphereMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2713,7 +2661,7 @@ def TorusMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2780,10 +2728,10 @@ def TorusMesh( # two cells per cell above... cells = cells[:, [0, 1, 3, 1, 2, 3]].reshape(-1, 3) - plex = mesh.plex_from_cell_list( - 2, cells, vertices, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 2, cells, vertices, comm, _generate_default_mesh_topology_name(name) ) - m = mesh.Mesh( + m = Mesh( plex, dim=3, reorder=reorder, @@ -2804,7 +2752,7 @@ def AnnulusMesh( nt=32, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2836,12 +2784,12 @@ def AnnulusMesh( name=base_name, distribution_name=distribution_name, permutation_name=permutation_name) - bar = mesh.ExtrudedMesh(base, layers=nt, layer_height=2 * np.pi / nt, extrusion_type="uniform", periodic=True) + bar = ExtrudedMesh(base, layers=nt, layer_height=2 * np.pi / nt, extrusion_type="uniform", periodic=True) x, y = ufl.SpatialCoordinate(bar) V = bar.coordinates.function_space() coord = Function(V).interpolate(ufl.as_vector([x * ufl.cos(y), x * ufl.sin(y)])) - annulus = mesh.make_mesh_from_coordinates(coord.topological, name) - annulus.topology.name = mesh._generate_default_mesh_topology_name(name) + annulus = make_mesh_from_coordinates(coord.topological, name) + annulus.topology.name = _generate_default_mesh_topology_name(name) annulus._base_mesh = base return annulus @@ -2855,7 +2803,7 @@ def SolidTorusMesh( reorder=None, distribution_parameters=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -2889,15 +2837,15 @@ def SolidTorusMesh( x, y = ufl.SpatialCoordinate(unit) V = unit.coordinates.function_space() coord = Function(V).interpolate(ufl.as_vector([r * x + R, r * y])) - disk = mesh.make_mesh_from_coordinates(coord.topological, base_name) - disk.topology.name = mesh._generate_default_mesh_topology_name(base_name) + disk = make_mesh_from_coordinates(coord.topological, base_name) + disk.topology.name = _generate_default_mesh_topology_name(base_name) disk.topology.topology_dm.setName(disk.topology.name) - bar = mesh.ExtrudedMesh(disk, layers=nR, layer_height=2 * np.pi / nR, extrusion_type="uniform", periodic=True) + bar = ExtrudedMesh(disk, layers=nR, layer_height=2 * np.pi / nR, extrusion_type="uniform", periodic=True) x, y, z = ufl.SpatialCoordinate(bar) V = bar.coordinates.function_space() coord = Function(V).interpolate(ufl.as_vector([x * ufl.cos(z), x * ufl.sin(z), -y])) - torus = mesh.make_mesh_from_coordinates(coord.topological, name) - torus.topology.name = mesh._generate_default_mesh_topology_name(name) + torus = make_mesh_from_coordinates(coord.topological, name) + torus.topology.name = _generate_default_mesh_topology_name(name) torus._base_mesh = disk return torus @@ -2914,7 +2862,7 @@ def CylinderMesh( distribution_parameters=None, diagonal=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -3000,7 +2948,7 @@ def CylinderMesh( # 0-----1 offset = np.arange(nl, dtype=np.int32) * nr - origquads = np.row_stack(tuple(ring_cells + i for i in offset)) + origquads = np.vstack(tuple(ring_cells + i for i in offset)) cells = np.zeros((origquads.shape[0] * 4, 3), dtype=np.int32) cellidx = 0 newvertices = range(len(origvertices), len(origvertices) + len(extras)) @@ -3013,7 +2961,7 @@ def CylinderMesh( else: offset = np.arange(nl, dtype=np.int32) * nr - cells = np.row_stack(tuple(ring_cells + i for i in offset)) + cells = np.vstack(tuple(ring_cells + i for i in offset)) if not quadrilateral: if diagonal == "left": idx = [0, 1, 3, 1, 2, 3] @@ -3033,8 +2981,8 @@ def CylinderMesh( elif longitudinal_direction != "z": raise ValueError("Unknown longitudinal direction '%s'" % longitudinal_direction) - plex = mesh.plex_from_cell_list( - 2, cells, vertices, comm, mesh._generate_default_mesh_topology_name(name) + plex = plex_from_cell_list( + 2, cells, vertices, comm, _generate_default_mesh_topology_name(name) ) plex.createLabel(dmcommon.FACE_SETS_LABEL) @@ -3058,7 +3006,7 @@ def CylinderMesh( plex.setLabelValue(dmcommon.FACE_SETS_LABEL, face, 2) plex.removeLabel("boundary_faces") - return mesh.Mesh( + return Mesh( plex, dim=3, reorder=reorder, @@ -3082,7 +3030,7 @@ def PartiallyPeriodicRectangleMesh( distribution_parameters=None, diagonal=None, comm=COMM_WORLD, - name=mesh.DEFAULT_MESH_NAME, + name=DEFAULT_MESH_NAME, distribution_name=None, permutation_name=None, ): @@ -3108,74 +3056,129 @@ def PartiallyPeriodicRectangleMesh( when checkpointing; if `None`, the name is automatically generated. - If direction == "x" the boundary edges in this mesh are numbered as follows: - - * 1: plane y == 0 - * 2: plane y == Ly - - If direction == "y" the boundary edges are: + The boundary edges in this mesh are numbered as follows: * 1: plane x == 0 * 2: plane x == Lx - """ - if direction not in ("x", "y"): - raise ValueError("Unsupported periodic direction '%s'" % direction) + * 3: plane y == 0 + * 4: plane y == Ly - # handle x/y directions: na, La are for the periodic axis - na, nb = nx, ny - if direction == "y": - na, nb = ny, nx + If periodic in the 'x' direction then boundary edges 1 and 2 are empty, and + if periodic in 'y' then 3 and 4 are empty. - if na < 3: - raise ValueError( - "2D periodic meshes with fewer than 3 cells in each direction are not currently supported" - ) + """ + warnings.warn( + "'PartiallyPeriodicRectangleMesh' is deprecated. Please use " + "'PeriodicRectangleMesh' instead, passing 'direction=\"x\"' or " + "'direction=\"y\"'.", + FutureWarning, + ) - m = CylinderMesh( - na, - nb, - 1.0, - 1.0, - longitudinal_direction="z", + return PeriodicRectangleMesh( + nx, + ny, + Lx, + Ly, + direction=direction, quadrilateral=quadrilateral, - reorder=reorder_noop, - distribution_parameters=distribution_parameters_no_overlap, + reorder=reorder, + distribution_parameters=distribution_parameters, diagonal=diagonal, comm=comm, name=name, distribution_name=distribution_name, permutation_name=permutation_name, ) - coord_family = "DQ" if quadrilateral else "DG" - cell = "quadrilateral" if quadrilateral else "triangle" - indicator = Function(FunctionSpace(m, coord_family, 0)) - coord_fs = VectorFunctionSpace( - m, FiniteElement(coord_family, cell, 1, variant="equispaced"), dim=2 - ) - new_coordinates = Function( - coord_fs, name=mesh._generate_default_mesh_coordinates_name(name) - ) - x, y, z = SpatialCoordinate(m) - eps = 1.e-14 - indicator.interpolate(conditional(gt(real(y), 0), 0., 1.)) - if direction == "x": - transform = as_tensor([[Lx, 0.], [0., Ly]]) - else: - transform = as_tensor([[0., Lx], [Ly, 0]]) - new_coordinates.interpolate(dot( - transform, - as_vector(( - conditional(gt(real(x), real(1. - eps)), indicator, # Periodic break. - # Unwrap rest of circle. - atan2(real(-y), real(-x))/(2 * pi) + 0.5), - z - )) - )) - return _postprocess_periodic_mesh(new_coordinates, - comm, - distribution_parameters, - reorder, - name, - distribution_name, - permutation_name) + +def _mark_mesh_boundaries( + plex: PETSc.DMPlex, + plex_to_firedrake_boundary_labels: Mapping[int, int] | None = None, +) -> None: + """Reorder the 'Face Sets' label of the DMPlex to match Firedrake ordering.""" + # TODO: Remove the awkward face set mapping for 2D periodic domains + + if plex_to_firedrake_boundary_labels is None: + match plex.getDimension(): + case 0: + plex_to_firedrake_boundary_labels = {} + case 1: + # Firedrake and DMPlex conventions agree (left is 1, right is 2) + plex_to_firedrake_boundary_labels = {1: 1, 2: 2} + case 2: + # DMPlex Firedrake + # + # 3 4 + # +-----+ +-----+ + # | | | | + # 4| |2 1| |2 + # | | | | + # +-----+ +-----+ + # 1 3 + plex_to_firedrake_boundary_labels = {1: 3, 2: 2, 3: 4, 4: 1} + case 3: + # DMPlex Firedrake + # + # +-------+ +-------+ + # / /| / /| + # / / | / / | + # / 4/3 / | / 4/3 / | + # / / | / / | + # / / | / / | + # +-------+ 5/6 + +-------+ 2/1 + + # | | / | | / + # | | / | | / + # y | 1/2 | / z y | 5/6 | / z + # | | / | | / + # | |/ | |/ + # +-------+ +-------+ + # O x O x + # + # 'x/y' means that 'x' is the label for the visible face and 'y' + # the label for the opposite hidden face. + plex_to_firedrake_boundary_labels = {1: 5, 2: 6, 3: 3, 4: 4, 5: 2, 6: 1} + case _: + raise AssertionError + + # Get the original label + plex_boundary_label = plex.getLabel(dmcommon.FACE_SETS_LABEL) + plex.removeLabel(dmcommon.FACE_SETS_LABEL) + + # Now create the new one + plex.createLabel(dmcommon.FACE_SETS_LABEL) + firedrake_boundary_label = plex.getLabel(dmcommon.FACE_SETS_LABEL) + for plex_value, firedrake_value in plex_to_firedrake_boundary_labels.items(): + points = plex_boundary_label.getStratumIS(plex_value) + if points: + firedrake_boundary_label.setStratumIS(firedrake_value, points) + else: + # create an empty stratum + firedrake_boundary_label.addStratum(firedrake_value) + + +def _refine_quads_to_triangles( + plex: PETSc.DMPlex, + diagonal: Literal["crossed", "left", "right"], +) -> PETSc.DMPlex: + match diagonal: + case "crossed": + options = {"dm_refine": 1, "dm_plex_transform_type": "refine_alfeld"} + case "left": + options = { + "dm_refine": 1, + "dm_plex_transform_type": "refine_tosimplex", + "dm_plex_transform_tosimplex_reflect": True, + } + case "right": + options = {"dm_refine": 1, "dm_plex_transform_type": "refine_tosimplex"} + case _: + raise AssertionError(f"'diagonal' type '{diagonal}' is not recognised") + + tr = PETSc.DMPlexTransform().create(comm=plex.comm) + petsctools.set_from_options(tr, options) + tr.setDM(plex) + tr.setUp() + with petsctools.inserted_options(tr): + refined_plex = tr.apply(plex) + tr.destroy() + return refined_plex diff --git a/firedrake/utils.py b/firedrake/utils.py index 901e2694ae..1df989a824 100644 --- a/firedrake/utils.py +++ b/firedrake/utils.py @@ -2,12 +2,14 @@ import collections.abc import warnings from decorator import decorator -from pyop2.utils import cached_property # noqa: F401 from pyop2.datatypes import ScalarType, as_cstr from pyop2.datatypes import RealType # noqa: F401 from pyop2.datatypes import IntType # noqa: F401 from pyop2.datatypes import as_ctypes # noqa: F401 from pyop2.mpi import MPI +from petsc4py import PETSc +from functools import cache +from firedrake.exceptions import UnrecognisedDeviceError import petsctools @@ -24,6 +26,64 @@ SLATE_SUPPORTS_COMPLEX = False +@cache +def device_matrix_type(warn: bool = True) -> str | None: + r"""Get device matrix type + + Attempt to initialise a GPU device and return the PETSc mat_type + compatible with that device, or None if no device is detected. + Typical Usage Example: + mat_type = device_matrix_type(pc.comm.rank == 0) + + Parameters + ---------- + warn + Emit a warning containing the reason a device mat_type + has not been returned. Defaults to False. + + Raises + ------ + UnrecognisedDeviceError + Raised when PETSc initialises a GPU device that + Firedrake does not understand + + Returns + ------- + str | None + The PETSc mat_type compatible with the GPU device detected on + this system or None + + """ + _device_mat_type_map = {"HOST": None, "CUDA": "aijcusparse"} + try: + dev = PETSc.Device.create() + except PETSc.Error: + # Could not initialise device - not a failure condition as this could + # be a GPU-enabled PETSc installation running on a CPU-only host. + if warn: + warnings.warn( + "This installation of Firedrake is GPU-enabled, but no GPU device has been detected" + ) + return None + dev_type = dev.getDeviceType() + dev.destroy() + if dev_type not in _device_mat_type_map: + raise UnrecognisedDeviceError( + f"Unknown device type: {dev_type} initialised by PETSc. Firedrake " + f"currently understands {', '.join([k for k in _device_mat_type_map if k != 'HOST'])}" + "devices" + ) + + if warn: + if dev_type == "HOST": + warnings.warn( + "This installation of Firedrake is not GPU-enabled, to enable GPU functionality " + "PETSc will need to be rebuilt with some GPU capability appropriate for this system " + "(e.g. '--with-cuda=1')." + ) + return _device_mat_type_map[dev_type] + + def _new_uid(comm): uid = comm.Get_attr(FIREDRAKE_UID) if uid is None: @@ -168,3 +228,20 @@ def wrapper(*args, **kwargs): return fn(*args, **kwargs) return wrapper return decorator + + +def check_netgen_installed() -> None: + """Check that netgen and ngsPETSc are available. + + If they are not an import error is raised. + + """ + try: + import netgen # noqa: F401 + import ngsPETSc # noqa: F401 + except ImportError: + raise ImportError( + "Unable to import netgen and ngsPETSc. Please ensure that they " + "are installed and available to Firedrake (see " + "https://www.firedrakeproject.org/install.html#netgen)." + ) diff --git a/firedrake/variational_solver.py b/firedrake/variational_solver.py index 92abe6c138..4031bf3c5c 100644 --- a/firedrake/variational_solver.py +++ b/firedrake/variational_solver.py @@ -15,6 +15,7 @@ from firedrake.bcs import DirichletBC, EquationBC, extract_subdomain_ids, restricted_function_space from firedrake.adjoint_utils import NonlinearVariationalProblemMixin, NonlinearVariationalSolverMixin from ufl import replace, Form +from functools import cached_property __all__ = ["LinearVariationalProblem", "LinearVariationalSolver", @@ -94,7 +95,7 @@ def __init__(self, F, u, bcs=None, J=None, if restrict and bcs: V_res = restricted_function_space(V, extract_subdomain_ids(bcs)) bcs = [bc.reconstruct(V=V_res, indices=bc._indices) for bc in bcs] - self.u_restrict = Function(V_res).interpolate(u) + self.u_restrict = Function(V_res) v_res, u_res = TestFunction(V_res), TrialFunction(V_res) if isinstance(F, Form): F_arg, = F.arguments() @@ -128,7 +129,7 @@ def dirichlet_bcs(self): for bc in self.bcs: yield from bc.dirichlet_bcs() - @utils.cached_property + @cached_property def dm(self): return self.u_restrict.function_space().dm @@ -371,6 +372,10 @@ def solve(self, bounds=None): problem_dms.append(dm) problem_dms.append(solution_dm) + if problem.restrict: + # Transfer the initial guess into the RestrictedFunctionSpace + problem.u_restrict.assign(problem.u) + if self._ctx.pre_apply_bcs: for bc in problem.dirichlet_bcs(): bc.apply(problem.u_restrict) @@ -394,12 +399,12 @@ def solve(self, bounds=None): work.copy(u) self._setup = True if problem.restrict: - problem.u.interpolate(problem.u_restrict) + problem.u.assign(problem.u_restrict) solving_utils.check_snes_convergence(self.snes) # Grab the comm associated with the `_problem` and call PETSc's garbage cleanup routine - comm = self._problem.u_restrict.function_space().mesh()._comm - PETSc.garbage_cleanup(comm=comm) + comm = self._problem.u_restrict.function_space().mesh().comm + PETSc.garbage_cleanup(comm) class LinearVariationalProblem(NonlinearVariationalProblem): diff --git a/pyop2/codegen/builder.py b/pyop2/codegen/builder.py index 82cd5dfe21..d05e5aeeae 100644 --- a/pyop2/codegen/builder.py +++ b/pyop2/codegen/builder.py @@ -1,7 +1,7 @@ import itertools from abc import ABCMeta, abstractmethod from collections import OrderedDict -from functools import reduce +from functools import cached_property, reduce import numpy from pyop2.global_kernel import (GlobalKernelArg, DatKernelArg, MixedDatKernelArg, @@ -18,7 +18,6 @@ from pyop2.datatypes import IntType, OpaqueType from pyop2.op2 import (ALL, INC, MAX, MIN, ON_BOTTOM, ON_INTERIOR_FACETS, ON_TOP, READ, RW, WRITE) -from pyop2.utils import cached_property MatType = OpaqueType("Mat") @@ -241,9 +240,16 @@ def emit_unpack_instruction(self, **kwargs): class GlobalPack(Pack): - def __init__(self, outer, access, init_with_zero=False): + def __init__(self, outer, access, double, init_with_zero=False): + if double and access is not READ: + raise NotImplementedError( + "'double' is only valid for globals that are read (Firedrake " + "coefficients)" + ) + self.outer = outer self.access = access + self.double = double self.init_with_zero = init_with_zero def kernel_arg(self, loop_indices=None): @@ -258,7 +264,7 @@ def pack(self, loop_indices=None): return self._pack shape = self.outer.shape - if self.access is READ: + if self.access is READ and not self.double: # No packing required return self.outer # We don't need to pack for memory layout, however packing @@ -266,18 +272,29 @@ def pack(self, loop_indices=None): # vectorisation loop transformations privatise these reduction # variables. The extra memory movement cost is minimal. loop_indices = self.pick_loop_indices(*loop_indices) + if self.init_with_zero: also_zero = {MIN, MAX} else: also_zero = set() + + # If 'double' is True then we need something like: + # + # for i < 2; + # for j < dim: + # t0[i, j] = glob[j] + rhs_multiindex = MultiIndex(*(Index(e) for e in shape)) + if self.double: + lhs_multiindex = MultiIndex(Index(2), *rhs_multiindex.children) + else: + lhs_multiindex = rhs_multiindex + if self.access in {INC, WRITE} | also_zero: val = Zero((), self.outer.dtype) - multiindex = MultiIndex(*(Index(e) for e in shape)) - self._pack = Materialise(PackInst(loop_indices), val, multiindex) + self._pack = Materialise(PackInst(loop_indices), val, lhs_multiindex) elif self.access in {READ, RW, MIN, MAX} - also_zero: - multiindex = MultiIndex(*(Index(e) for e in shape)) - expr = Indexed(self.outer, multiindex) - self._pack = Materialise(PackInst(loop_indices), expr, multiindex) + expr = Indexed(self.outer, rhs_multiindex) + self._pack = Materialise(PackInst(loop_indices), expr, lhs_multiindex) else: raise ValueError("Don't know how to initialise pack for '%s' access" % self.access) return self._pack @@ -834,7 +851,7 @@ def add_argument(self, arg): elif isinstance(arg, GlobalKernelArg): argument = Argument(arg.dim, dtype, pfx="glob") - pack = GlobalPack(argument, access, + pack = GlobalPack(argument, access, double=arg.double, init_with_zero=self.requires_zeroed_output_arguments) self.arguments.append(argument) elif isinstance(arg, DatKernelArg): diff --git a/pyop2/codegen/representation.py b/pyop2/codegen/representation.py index 5277094d96..ad07764ee2 100644 --- a/pyop2/codegen/representation.py +++ b/pyop2/codegen/representation.py @@ -1,8 +1,7 @@ import numbers import itertools -from functools import partial +from functools import cached_property, partial from collections import defaultdict -from pyop2.utils import cached_property import numpy from abc import ABCMeta from pyop2.codegen.node import Node as NodeBase diff --git a/pyop2/compilation.py b/pyop2/compilation.py index b37f004d30..f37599382b 100644 --- a/pyop2/compilation.py +++ b/pyop2/compilation.py @@ -50,13 +50,13 @@ from typing import Hashable from random import randint +import petsctools from pyop2 import mpi from pyop2.caching import parallel_cache, memory_cache, default_parallel_hashkey, DictLikeDiskAccess, as_hexdigest from pyop2.configuration import configuration from pyop2.logger import warning, debug, progress, INFO from pyop2.exceptions import CompilationError -from pyop2.utils import get_petsc_variables from petsc4py import PETSc @@ -73,8 +73,6 @@ def _check_hashes(x, y, datatype): # _and_ per user for shared machines _EXE_HASH = md5(sys.executable.encode()).hexdigest()[-6:] MEM_TMP_DIR = Path(gettempdir()).joinpath(f"pyop2-tempcache-uid{os.getuid()}").joinpath(_EXE_HASH) -# PETSc Configuration -petsc_variables = get_petsc_variables() def set_default_compiler(compiler): @@ -269,11 +267,11 @@ def __eq__(self, other) -> bool: @property def cc(self): - return self._cc or petsc_variables["CC"] + return self._cc or petsctools.get_petscvariables()["CC"] @property def cxx(self): - return self._cxx or petsc_variables["CXX"] + return self._cxx or petsctools.get_petscvariables()["CXX"] @property def ld(self): @@ -453,9 +451,9 @@ def load(code, extension, cppargs=(), ldargs=(), comm=None): else: # Sniff compiler from file extension, if extension == "cpp": - exe = petsc_variables["CXX"] + exe = petsctools.get_petscvariables()["CXX"] else: - exe = petsc_variables["CC"] + exe = petsctools.get_petscvariables()["CC"] compiler = sniff_compiler(exe, comm) debug = configuration["debug"] @@ -565,78 +563,63 @@ def make_so(compiler, code, extension, comm): exe = compiler.cc compiler_flags = compiler.cflags - # Compile on compilation communicator (ccomm) rank 0 - if ccomm.rank == 0: - # Track exceptions as values so that they may be raised collectively + def compile_single_rank(): + # Adding random 2-digit hexnum avoids using excessive filesystem inodes + tempdir = MEM_TMP_DIR.joinpath(f"{randint(0, 255):02x}") + tempdir.mkdir(parents=True, exist_ok=True) + # This path + filename should be unique + descriptor, filename = mkstemp(suffix=f".{extension}", dir=tempdir, text=True) + filename = Path(filename) + + cname = filename + oname = filename.with_suffix(".o") + soname = filename.with_suffix(".so") + logfile = filename.with_suffix(".log") + errfile = filename.with_suffix(".err") + try: - # Adding random 2-digit hexnum avoids using excessive filesystem inodes - tempdir = MEM_TMP_DIR.joinpath(f"{randint(0, 255):02x}") - tempdir.mkdir(parents=True, exist_ok=True) - # This path + filename should be unique - descriptor, filename = mkstemp(suffix=f".{extension}", dir=tempdir, text=True) - filename = Path(filename) - - cname = filename - oname = filename.with_suffix(".o") - soname = filename.with_suffix(".so") - logfile = filename.with_suffix(".log") - errfile = filename.with_suffix(".err") - except BaseException as e: - result = e - else: - try: - with progress(INFO, 'Compiling wrapper'): - # Write source code to disk - with open(cname, "w") as fh: - fh.write(code) - os.close(descriptor) - - if not compiler.ld: - # Compile and link - cc = (exe,) + compiler_flags + ('-o', str(soname), str(cname)) + compiler.ldflags - _run(cc, logfile, errfile) - else: - # Compile - cc = (exe,) + compiler_flags + ('-c', '-o', str(oname), str(cname)) - _run(cc, logfile, errfile) - # Extract linker specific "cflags" from ldflags and link - ld = tuple(shlex.split(compiler.ld)) + ('-o', str(soname), str(oname)) + tuple(expandWl(compiler.ldflags)) - _run(ld, logfile, errfile, step="Linker", filemode="a") - - result = soname - except subprocess.CalledProcessError as e: - msg = dedent(f""" - Command "{e.cmd}" return error status {e.returncode}. - Unable to compile code - """) - if os.environ.get("FIREDRAKE_CI", False): - msg += dedent(f""" - Code is: - {code} - """) - with open(errfile) as err: - msg += dedent(f""" - Compiler output is: - {''.join(err.readlines())} - """) + with progress(INFO, 'Compiling wrapper'): + # Write source code to disk + with open(cname, "w") as fh: + fh.write(code) + os.close(descriptor) + + if not compiler.ld: + # Compile and link + cc = (exe,) + compiler_flags + ('-o', str(soname), str(cname)) + compiler.ldflags + _run(cc, logfile, errfile) else: + # Compile + cc = (exe,) + compiler_flags + ('-c', '-o', str(oname), str(cname)) + _run(cc, logfile, errfile) + # Extract linker specific "cflags" from ldflags and link + ld = tuple(shlex.split(compiler.ld)) + ('-o', str(soname), str(oname)) + tuple(expandWl(compiler.ldflags)) + _run(ld, logfile, errfile, step="Linker", filemode="a") + except subprocess.CalledProcessError as e: + msg = dedent(f""" + Command "{e.cmd}" return error status {e.returncode}. + Unable to compile code + """) + if os.environ.get("FIREDRAKE_CI", False): + msg += dedent(f""" + Code is: + {code} + """) + with open(errfile) as err: msg += dedent(f""" - Compile log in {logfile!s} - Compile errors in {errfile!s} + Compiler output is: + {''.join(err.readlines())} """) - result = CompilationError(msg) - result.__cause__ = e # equivalent to 'raise XXX from e' - except BaseException as e: - # catch and broadcast all exceptions to prevent deadlocks - result = e - else: - result = None + else: + msg += dedent(f""" + Compile log in {logfile!s} + Compile errors in {errfile!s} + """) + raise CompilationError(msg) from e + else: + return soname - result = ccomm.bcast(result) - if isinstance(result, BaseException): - raise result - else: - return result + return mpi.safe_noncollective(ccomm, compile_single_rank, root=0) def _run(cc, logfile, errfile, step="Compilation", filemode="w"): diff --git a/pyop2/global_kernel.py b/pyop2/global_kernel.py index 38a1acb726..ea7898dae7 100644 --- a/pyop2/global_kernel.py +++ b/pyop2/global_kernel.py @@ -1,12 +1,14 @@ import collections.abc import ctypes from dataclasses import dataclass +from functools import cached_property import os from typing import Optional, Tuple import itertools import loopy as lp import numpy as np +import petsctools import pytools from loopy.codegen.result import process_preambles from petsc4py import PETSc @@ -18,7 +20,6 @@ from pyop2.datatypes import IntType, as_ctypes from pyop2.codegen.rep2loopy import generate from pyop2.types import IterationRegion, Constant, READ -from pyop2.utils import cached_property, get_petsc_dir # We set eq=False to force identity-based hashing. This is required for when @@ -96,10 +97,11 @@ class GlobalKernelArg: """ dim: Tuple[int, ...] + double: bool = False @property def cache_key(self): - return type(self), self.dim + return type(self), self.dim, self.double @property def maps(self): @@ -386,18 +388,21 @@ def num_flops(self, iterset): @cached_property def _cppargs(self): - cppargs = [f"-I{d}/include" for d in get_petsc_dir()] - cppargs.extend(f"-I{d}" for d in self.local_kernel.include_dirs) - cppargs.append(f"-I{os.path.abspath(os.path.dirname(__file__))}") - return tuple(cppargs) + return ( + *petsctools.get_petsc_dirs(prefix="-I", subdir="include"), + *[f"-I{d}" for d in self.local_kernel.include_dirs], + f"-I{os.path.abspath(os.path.dirname(__file__))}" + ) @cached_property def _ldargs(self): - ldargs = [f"-L{d}/lib" for d in get_petsc_dir()] - ldargs.extend(f"-Wl,-rpath,{d}/lib" for d in get_petsc_dir()) - ldargs.extend(["-lpetsc", "-lm"]) - ldargs.extend(self.local_kernel.ldargs) - return tuple(ldargs) + return ( + *petsctools.get_petsc_dirs(prefix="-L", subdir="lib"), + *petsctools.get_petsc_dirs(prefix="-Wl,-rpath,", subdir="lib"), + "-lpetsc", + "-lm", + *self.local_kernel.ldargs, + ) @memory_cache(hashkey=lambda knl, _: knl.cache_key) diff --git a/pyop2/local_kernel.py b/pyop2/local_kernel.py index 79639c8ba7..a65e626bd9 100644 --- a/pyop2/local_kernel.py +++ b/pyop2/local_kernel.py @@ -1,5 +1,6 @@ import abc from dataclasses import dataclass +from functools import cached_property import hashlib from typing import Union @@ -13,7 +14,7 @@ from pyop2.datatypes import ScalarType from pyop2.exceptions import NameTypeError from pyop2.types import Access -from pyop2.utils import cached_property, validate_type +from pyop2.utils import validate_type @dataclass(frozen=True) diff --git a/pyop2/mpi.py b/pyop2/mpi.py index f8bb0f4829..8c4900fa25 100644 --- a/pyop2/mpi.py +++ b/pyop2/mpi.py @@ -34,6 +34,7 @@ """PyOP2 MPI communicator.""" +from typing import Any, Callable from petsc4py import PETSc from mpi4py import MPI # noqa from itertools import count @@ -55,7 +56,6 @@ "COMM_WORLD", "COMM_SELF", "MPI", - "internal_comm", "is_pyop2_comm", "incref", "decref", @@ -64,11 +64,8 @@ # These are user-level communicators, we never send any messages on # them inside PyOP2. -COMM_WORLD = PETSc.COMM_WORLD.tompi4py().Dup() -COMM_WORLD.Set_name("PYOP2_COMM_WORLD") - -COMM_SELF = PETSc.COMM_SELF.tompi4py().Dup() -COMM_SELF.Set_name("PYOP2_COMM_SELF") +COMM_WORLD = MPI.COMM_WORLD +COMM_SELF = MPI.COMM_SELF # Creation index counter _COMM_CIDX = count() @@ -228,16 +225,26 @@ def delcomm_outer(comm, keyval, icomm): :arg icomm: The inner communicator, should have a reference to ``comm``. """ - # Use debug printer that is safe to use at exit time - debug = finalize_safe_debug() if keyval not in (innercomm_keyval, compilationcomm_keyval): raise PyOP2CommError("Unexpected keyval") + # Use debug printer that is safe to use at exit time + debug = finalize_safe_debug() if keyval == innercomm_keyval: debug(f'Deleting innercomm keyval on {comm.name}') if keyval == compilationcomm_keyval: debug(f'Deleting compilationcomm keyval on {comm.name}') + # During finalisation the inner comms are cleaned up first so can be null here + if icomm == MPI.COMM_NULL: + debug("Inner comm is MPI_COMM_NULL") + return + + # Disable the garbage collector during cleanup - we don't want to trigger + # any further destructors as this is progressing (believe me, ask how I know) + gc_was_enabled = gc.isenabled() + gc.disable() + ocomm = icomm.Get_attr(outercomm_keyval) if ocomm is None: raise PyOP2CommError("Inner comm does not have expected reference to outer comm") @@ -257,7 +264,6 @@ def delcomm_outer(comm, keyval, icomm): cidx = icomm.Get_attr(cidx_keyval) cidx = cidx[0] del _DUPED_COMM_DICT[cidx] - gc.collect() refcount = icomm.Get_attr(refcount_keyval) if refcount[0] > 1: # In the case where `comm` is a custom user communicator there may be references @@ -269,6 +275,9 @@ def delcomm_outer(comm, keyval, icomm): ) icomm.Free() + if gc_was_enabled: + gc.enable() + # Reference count, creation index, inner/outer/compilation communicator # attributes for internal communicators @@ -557,6 +566,44 @@ def finalize_safe_debug(): return debug +def safe_noncollective(comm: MPI.Comm, func: Callable[[], Any], *, root: int) -> Any: + """Run a function on a single rank of ``comm`` in a deadlock safe way. + + If an exception is raised on the active rank then this is caught and + raised collectively. + + Parameters + ---------- + comm + The communicator. + func + The operation to be performed on a single rank. This should be a + callable that takes no arguments. + root + The rank performing the operation. + + Returns + ------- + Any + The result of ``func``, broadcasted to all ranks. + + """ + if comm.rank == root: + try: + result = func() + except BaseException as e: + result = e + else: + result = None + + with temp_internal_comm(comm) as icomm: + result = icomm.bcast(result, root=root) + if isinstance(result, BaseException): + raise result + else: + return result + + @atexit.register def _free_comms(): """Free all outstanding communicators.""" @@ -571,15 +618,7 @@ def _free_comms(): debug("STATE0") debug(pyop2_comm_status()) - debug("Freeing PYOP2_COMM_WORLD") - COMM_WORLD.Free() - debug("STATE1") - debug(pyop2_comm_status()) - - debug("Freeing PYOP2_COMM_SELF") - COMM_SELF.Free() - debug("STATE2") - debug(pyop2_comm_status()) + # We free the comms in order because comm destruction is collective debug(f"Freeing comms in list (length {len(_DUPED_COMM_DICT)})") for key in sorted(_DUPED_COMM_DICT.keys(), reverse=True): comm = _DUPED_COMM_DICT[key] diff --git a/pyop2/parloop.py b/pyop2/parloop.py index c70f4c9fb7..6392bc889f 100644 --- a/pyop2/parloop.py +++ b/pyop2/parloop.py @@ -2,6 +2,7 @@ import itertools import operator from dataclasses import dataclass +from functools import cached_property from typing import Any, Optional, Tuple import loopy as lp @@ -18,7 +19,6 @@ from pyop2.types import (Access, Global, AbstractDat, Dat, DatView, MixedDat, Mat, Set, MixedSet, ExtrudedSet, Subset, Map, ComposedMap, MixedMap) from pyop2.types.data_carrier import DataCarrier -from pyop2.utils import cached_property class ParloopArg(abc.ABC): @@ -189,7 +189,7 @@ def __init__(self, global_knl, iterset, arguments): self.global_kernel = global_knl self.iterset = iterset - self.comm = mpi.internal_comm(iterset.comm, self) + self.comm = iterset.comm self.arguments, self.reduced_globals = self.prepare_reduced_globals(arguments, global_knl) @property @@ -296,7 +296,8 @@ def replace_lgmaps(self): olgmaps = [] for m, lgmaps in zip(pl_arg.data, pl_arg.lgmaps): olgmaps.append(m.handle.getLGMap()) - m.handle.setLGMap(*lgmaps) + if m.handle.type != "is": + m.handle.setLGMap(*lgmaps) orig_lgmaps.append(olgmaps) return tuple(orig_lgmaps) @@ -309,7 +310,8 @@ def restore_lgmaps(self, orig_lgmaps): for arg, d in reversed(list(zip(self.global_kernel.arguments, self.arguments))): if isinstance(arg, (MatKernelArg, MixedMatKernelArg)) and d.lgmaps is not None: for m, lgmaps in zip(d.data, orig_lgmaps.pop()): - m.handle.setLGMap(*lgmaps) + if m.handle.type != "is": + m.handle.setLGMap(*lgmaps) @cached_property def _has_mats(self): @@ -760,6 +762,7 @@ def parloop(knl, *args, **kwargs): raise KernelTypeError +@PETSc.Log.EventDecorator() def generate_single_cell_wrapper(iterset, args, forward_args=(), kernel_name=None, wrapper_name=None): """Generates wrapper for a single cell. No iteration loop, but cellwise data is extracted. diff --git a/pyop2/scripts/spydump b/pyop2/scripts/spydump index 0077fe1ca1..b9905615a3 100755 --- a/pyop2/scripts/spydump +++ b/pyop2/scripts/spydump @@ -39,7 +39,7 @@ plots if two input file names are given.""" import matplotlib import numpy as np import pylab -from scipy.sparse import csr_matrix +from scipy.sparse import csr_array COOKIE = 1211216 # from petscmat.h IntType = '>i4' # big-endian, 4 byte integer @@ -66,7 +66,7 @@ def readmat(filename): def dump2csr(filename): (M, N), (I, J, V) = readmat(filename) - return csr_matrix((V, J, I)) + return csr_array((V, J, I)) def compare_dump(files, outfile=None, marker='.', markersize=.5): diff --git a/pyop2/sparsity.pyx b/pyop2/sparsity.pyx index d6411fecac..5914bacc86 100644 --- a/pyop2/sparsity.pyx +++ b/pyop2/sparsity.pyx @@ -37,6 +37,7 @@ import numpy as np cimport numpy as np import cython cimport petsc4py.PETSc as PETSc +from petsc4py.PETSc cimport CHKERR from petsc4py import PETSc from pyop2.datatypes import IntType @@ -49,18 +50,21 @@ cdef extern from "petsc.h": PETSC_TRUE, PETSC_FALSE ctypedef enum PetscInsertMode "InsertMode": PETSC_INSERT_VALUES "INSERT_VALUES" - int PetscCalloc1(size_t, void*) - int PetscMalloc1(size_t, void*) - int PetscMalloc2(size_t, void*, size_t, void*) - int PetscFree(void*) - int PetscFree2(void*,void*) - int MatSetValuesBlockedLocal(PETSc.PetscMat, PetscInt, PetscInt*, PetscInt, PetscInt*, - PetscScalar*, PetscInsertMode) - int MatSetValuesLocal(PETSc.PetscMat, PetscInt, PetscInt*, PetscInt, PetscInt*, - PetscScalar*, PetscInsertMode) - int MatPreallocatorPreallocate(PETSc.PetscMat, PetscBool, PETSc.PetscMat) - int MatXAIJSetPreallocation(PETSc.PetscMat, PetscInt, const PetscInt[], const PetscInt[], - const PetscInt[], const PetscInt[]) + ctypedef enum PetscErrorCode: + PETSC_SUCCESS + + PetscErrorCode PetscCalloc1(size_t, void*) + PetscErrorCode PetscMalloc1(size_t, void*) + PetscErrorCode PetscMalloc2(size_t, void*, size_t, void*) + PetscErrorCode PetscFree(void*) + PetscErrorCode PetscFree2(void*,void*) + PetscErrorCode MatSetValuesBlockedLocal(PETSc.PetscMat, PetscInt, PetscInt*, PetscInt, PetscInt*, + PetscScalar*, PetscInsertMode) + PetscErrorCode MatSetValuesLocal(PETSc.PetscMat, PetscInt, PetscInt*, PetscInt, PetscInt*, + PetscScalar*, PetscInsertMode) + PetscErrorCode MatPreallocatorPreallocate(PETSc.PetscMat, PetscBool, PETSc.PetscMat) + PetscErrorCode MatXAIJSetPreallocation(PETSc.PetscMat, PetscInt, const PetscInt[], const PetscInt[], + const PetscInt[], const PetscInt[]) cdef extern from "petsc/private/matimpl.h": struct _p_Mat: @@ -71,26 +75,6 @@ ctypedef struct Mat_Preallocator: PetscInt *dnz PetscInt *onz -cdef extern from *: - void PyErr_SetObject(object, object) - void *PyExc_RuntimeError - -cdef object PetscError = PyExc_RuntimeError - -cdef inline int SETERR(int ierr) with gil: - if (PetscError) != NULL: - PyErr_SetObject(PetscError, ierr) - else: - PyErr_SetObject(PyExc_RuntimeError, ierr) - return ierr - -cdef inline int CHKERR(int ierr) nogil except -1: - if ierr == 0: - return 0 # no error - else: - SETERR(ierr) - return -1 - cdef object set_writeable(map): flag = map.values_with_halo.flags['WRITEABLE'] map.values_with_halo.setflags(write=True) diff --git a/pyop2/types/dat.py b/pyop2/types/dat.py index 3e0d37c5a8..ef21d2f29f 100644 --- a/pyop2/types/dat.py +++ b/pyop2/types/dat.py @@ -17,6 +17,7 @@ mpi, utils ) +from functools import cached_property from pyop2.types.access import Access from pyop2.types.dataset import DataSet, GlobalDataSet, MixedDataSet from pyop2.types.data_carrier import DataCarrier, EmptyDataMixin, VecAccessMixin @@ -83,22 +84,22 @@ def __init__(self, dataset, data=None, dtype=None, name=None): EmptyDataMixin.__init__(self, data, dtype, self._shape) self._dataset = dataset - self.comm = mpi.internal_comm(dataset.comm, self) + self.comm = dataset.comm self.halo_valid = True self._name = name or "dat_#x%x" % id(self) self._halo_frozen = False self._frozen_access_mode = None - @utils.cached_property + @cached_property def _kernel_args_(self): return (self._data.ctypes.data, ) - @utils.cached_property + @cached_property def _argtypes_(self): return (ctypes.c_voidp, ) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), self.dtype, self._dataset._wrapper_cache_key_) @@ -115,22 +116,22 @@ def __getitem__(self, idx): raise ex.IndexValueError("Can only extract component 0 from %r" % self) return self - @utils.cached_property + @cached_property def split(self): """Tuple containing only this :class:`Dat`.""" return (self,) - @utils.cached_property + @cached_property def dataset(self): """:class:`DataSet` on which the Dat is defined.""" return self._dataset - @utils.cached_property + @cached_property def dim(self): """The shape of the values for each element of the object.""" return self.dataset.dim - @utils.cached_property + @cached_property def cdim(self): """The scalar number of values for each member of the object. This is the product of the dim tuple.""" @@ -272,15 +273,15 @@ def load(self, filename): else: self.data[:] = np.load(filename) - @utils.cached_property + @cached_property def shape(self): return self._shape - @utils.cached_property + @cached_property def dtype(self): return self._dtype - @utils.cached_property + @cached_property def nbytes(self): """Return an estimate of the size of the data associated with this :class:`Dat` in bytes. This will be the correct size of the data @@ -551,7 +552,7 @@ def __radd__(self, other): self.__radd__(other) <==> other + self.""" return self + other - @utils.cached_property + @cached_property def _neg_kernel(self): # Copy and negate in one go. import islpy as isl @@ -737,27 +738,27 @@ def __init__(self, dat, index): def increment_dat_version(self): self._parent.increment_dat_version() - @utils.cached_property + @cached_property def _kernel_args_(self): return self._parent._kernel_args_ - @utils.cached_property + @cached_property def _argtypes_(self): return self._parent._argtypes_ - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), self.index, self._parent._wrapper_cache_key_) - @utils.cached_property + @cached_property def cdim(self): return 1 - @utils.cached_property + @cached_property def dim(self): return (1, ) - @utils.cached_property + @cached_property def shape(self): return (self.dataset.total_size, ) @@ -810,7 +811,7 @@ def __init__(self, *args, **kwargs): petsc_counter = (self.dtype == PETSc.ScalarType) VecAccessMixin.__init__(self, petsc_counter=petsc_counter) - @utils.cached_property + @cached_property def _vec(self): assert self.dtype == PETSc.ScalarType, \ "Can't create Vec with type %s, must be %s" % (self.dtype, PETSc.ScalarType) @@ -825,14 +826,14 @@ def _vec(self): data = self._data[:size[0]] return PETSc.Vec().createWithArray(data, size=size, bsize=self.cdim, comm=self.comm) - @utils.cached_property + @cached_property def _data_filtered(self): size, _ = self.dataset.layout_vec.getSizes() size //= self.dataset.layout_vec.block_size data = self._data[:size] return np.empty_like(data) - @utils.cached_property + @cached_property def _data_filter(self): lgmap = self.dataset.lgmap n = self.dataset.size @@ -889,7 +890,7 @@ def what(x): if not all(d.dtype == self._dats[0].dtype for d in self._dats): raise ex.DataValueError('MixedDat with different dtypes is not supported') # TODO: Think about different communicators on dats (c.f. MixedSet) - self.comm = mpi.internal_comm(self._dats[0].comm, self) + self.comm = self._dats[0].comm @property def dat_version(self): @@ -907,15 +908,15 @@ def __call__(self, access, path=None): from pyop2.parloop import MixedDatLegacyArg return MixedDatLegacyArg(self, path, access) - @utils.cached_property + @cached_property def _kernel_args_(self): return tuple(itertools.chain(*(d._kernel_args_ for d in self))) - @utils.cached_property + @cached_property def _argtypes_(self): return tuple(itertools.chain(*(d._argtypes_ for d in self))) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self),) + tuple(d._wrapper_cache_key_ for d in self) @@ -923,22 +924,22 @@ def __getitem__(self, idx): """Return :class:`Dat` with index ``idx`` or a given slice of Dats.""" return self._dats[idx] - @utils.cached_property + @cached_property def dtype(self): """The NumPy dtype of the data.""" return self._dats[0].dtype - @utils.cached_property + @cached_property def split(self): r"""The underlying tuple of :class:`Dat`\s.""" return self._dats - @utils.cached_property + @cached_property def dataset(self): r""":class:`MixedDataSet`\s this :class:`MixedDat` is defined on.""" return MixedDataSet(tuple(s.dataset for s in self._dats)) - @utils.cached_property + @cached_property def _data(self): """Return the user-provided data buffer, or a zeroed buffer of the correct size if none was provided.""" @@ -1033,7 +1034,7 @@ def zero(self, subset=None): for d in self._dats: d.zero() - @utils.cached_property + @cached_property def nbytes(self): """Return an estimate of the size of the data associated with this :class:`MixedDat` in bytes. This will be the correct size of the data @@ -1195,7 +1196,7 @@ def __idiv__(self, other): """Pointwise division or scaling of fields.""" return self._iop(other, operator.idiv) - @utils.cached_property + @cached_property def _vec(self): assert self.dtype == PETSc.ScalarType, \ "Can't create Vec with type %s, must be %s" % (self.dtype, PETSc.ScalarType) diff --git a/pyop2/types/data_carrier.py b/pyop2/types/data_carrier.py index 632b3ced15..fd1721278a 100644 --- a/pyop2/types/data_carrier.py +++ b/pyop2/types/data_carrier.py @@ -7,6 +7,7 @@ mpi, utils ) +from functools import cached_property from pyop2.types.access import Access @@ -18,27 +19,27 @@ class DataCarrier(abc.ABC): (:class:`Global`), rank 1 (:class:`Dat`), or rank 2 (:class:`Mat`)""" - @utils.cached_property + @cached_property def dtype(self): """The Python type of the data.""" return self._data.dtype - @utils.cached_property + @cached_property def ctype(self): """The c type of the data.""" return dtypes.as_cstr(self.dtype) - @utils.cached_property + @cached_property def name(self): """User-defined label.""" return self._name - @utils.cached_property + @cached_property def dim(self): """The shape tuple of the values for each element of the object.""" return self._dim - @utils.cached_property + @cached_property def cdim(self): """The scalar number of values for each member of the object. This is the product of the dim tuple.""" @@ -64,7 +65,7 @@ def __init__(self, data, dtype, shape): self._numpy_data = utils.verify_reshape(data, dtype, shape, allow_none=True) self._dtype = self._data.dtype - @utils.cached_property + @cached_property def _data(self): """Return the user-provided data buffer, or a zeroed buffer of the correct size if none was provided.""" diff --git a/pyop2/types/dataset.py b/pyop2/types/dataset.py index 087d9b091d..2f7152bc3f 100644 --- a/pyop2/types/dataset.py +++ b/pyop2/types/dataset.py @@ -10,6 +10,7 @@ mpi, utils ) +from functools import cached_property from pyop2.types.set import ExtrudedSet, GlobalSet, MixedSet, Set, Subset @@ -30,7 +31,7 @@ def __init__(self, iter_set, dim=1, name=None, apply_local_global_filter=False): return if isinstance(iter_set, Subset): raise NotImplementedError("Deriving a DataSet from a Subset is unsupported") - self.comm = mpi.internal_comm(iter_set.comm, self) + self.comm = iter_set.comm self._set = iter_set self._dim = utils.as_tuple(dim, numbers.Integral) self._cdim = np.prod(self._dim).item() @@ -46,7 +47,7 @@ def _process_args(cls, *args, **kwargs): def _cache_key(cls, iter_set, dim=1, name=None, apply_local_global_filter=False): return (iter_set, utils.as_tuple(dim, numbers.Integral)) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), self.dim, self._set._wrapper_cache_key_, self._apply_local_global_filter) @@ -69,23 +70,23 @@ def __getitem__(self, idx): assert idx == 0 return self - @utils.cached_property + @cached_property def dim(self): """The shape tuple of the values for each element of the set.""" return self._dim - @utils.cached_property + @cached_property def cdim(self): """The scalar number of values for each member of the set. This is the product of the dim tuple.""" return self._cdim - @utils.cached_property + @cached_property def name(self): """Returns the name of the data set.""" return self._name - @utils.cached_property + @cached_property def set(self): """Returns the parent set of the data set.""" return self._set @@ -109,7 +110,7 @@ def __contains__(self, dat): """Indicate whether a given Dat is compatible with this DataSet.""" return dat.dataset == self - @utils.cached_property + @cached_property def lgmap(self): """A PETSc LGMap mapping process-local indices to global indices for this :class:`DataSet`. @@ -123,14 +124,14 @@ def lgmap(self): bsize=self.cdim, comm=self.comm) return lgmap - @utils.cached_property + @cached_property def scalar_lgmap(self): if self.cdim == 1: return self.lgmap indices = self.lgmap.block_indices return PETSc.LGMap().create(indices=indices, bsize=1, comm=self.comm) - @utils.cached_property + @cached_property def unblocked_lgmap(self): """A PETSc LGMap mapping process-local indices to global indices for this :class:`DataSet` with a block size of 1. @@ -143,7 +144,7 @@ def unblocked_lgmap(self): bsize=1, comm=self.lgmap.comm) return lgmap - @utils.cached_property + @cached_property def field_ises(self): """A list of PETSc ISes defining the global indices for each set in the DataSet. @@ -164,7 +165,7 @@ def field_ises(self): offset += nrows return tuple(ises) - @utils.cached_property + @cached_property def local_ises(self): """A list of PETSc ISes defining the local indices for each set in the DataSet. @@ -181,7 +182,7 @@ def local_ises(self): ises.append(iset) return tuple(ises) - @utils.cached_property + @cached_property def layout_vec(self): """A PETSc Vec compatible with the dof layout of this DataSet.""" vec = PETSc.Vec().create(comm=self.comm) @@ -190,7 +191,7 @@ def layout_vec(self): vec.setUp() return vec - @utils.cached_property + @cached_property def dm(self): dm = PETSc.DMShell().create(comm=self.comm) dm.setGlobalVector(self.layout_vec) @@ -207,7 +208,7 @@ def __init__(self, global_): if self._initialized: return self._global = global_ - self.comm = mpi.internal_comm(global_.comm, self) + self.comm = global_.comm self._globalset = GlobalSet(comm=self.comm) self._name = "gdset_#x%x" % id(self) self._initialized = True @@ -216,28 +217,28 @@ def __init__(self, global_): def _cache_key(cls, *args): return None - @utils.cached_property + @cached_property def dim(self): """The shape tuple of the values for each element of the set.""" return self._global._dim - @utils.cached_property + @cached_property def cdim(self): """The scalar number of values for each member of the set. This is the product of the dim tuple.""" return self._global._cdim - @utils.cached_property + @cached_property def name(self): """Returns the name of the data set.""" return self._global._name - @utils.cached_property + @cached_property def set(self): """Returns the parent set of the data set.""" return self._globalset - @utils.cached_property + @cached_property def size(self): """The number of local entries in the Dataset (1 on rank 0)""" return 1 if mpi.MPI.comm.rank == 0 else 0 @@ -257,7 +258,7 @@ def __str__(self): def __repr__(self): return "GlobalDataSet(%r)" % (self._global) - @utils.cached_property + @cached_property def lgmap(self): """A PETSc LGMap mapping process-local indices to global indices for this :class:`DataSet`. @@ -267,7 +268,7 @@ def lgmap(self): bsize=self.cdim, comm=self.comm) return lgmap - @utils.cached_property + @cached_property def unblocked_lgmap(self): """A PETSc LGMap mapping process-local indices to global indices for this :class:`DataSet` with a block size of 1. @@ -280,14 +281,14 @@ def unblocked_lgmap(self): bsize=1, comm=self.lgmap.comm) return lgmap - @utils.cached_property + @cached_property def local_ises(self): """A list of PETSc ISes defining the local indices for each set in the DataSet. Used when extracting blocks from matrices for assembly.""" raise NotImplementedError - @utils.cached_property + @cached_property def layout_vec(self): """A PETSc Vec compatible with the dof layout of this DataSet.""" vec = PETSc.Vec().create(comm=self.comm) @@ -296,7 +297,7 @@ def layout_vec(self): vec.setUp() return vec - @utils.cached_property + @cached_property def dm(self): dm = PETSc.DMShell().create(comm=self.comm) dm.setGlobalVector(self.layout_vec) @@ -350,13 +351,9 @@ def __init__(self, arg, dims=None): if self._initialized: return self._dsets = arg - try: - # Try to choose the comm to be the same as the first set - # of the MixedDataSet - comm = self._process_args(arg, dims)[0][0].comm - except AttributeError: - comm = None - self.comm = mpi.internal_comm(comm, self) + # Try to choose the comm to be the same as the first set + # of the MixedDataSet + self.comm = self._process_args(arg, dims)[0][0].comm self._initialized = True @classmethod @@ -385,7 +382,7 @@ def _process_args(cls, arg, dims=None): def _cache_key(cls, arg, dims=None): return arg - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): raise NotImplementedError @@ -393,28 +390,28 @@ def __getitem__(self, idx): """Return :class:`DataSet` with index ``idx`` or a given slice of datasets.""" return self._dsets[idx] - @utils.cached_property + @cached_property def split(self): r"""The underlying tuple of :class:`DataSet`\s.""" return self._dsets - @utils.cached_property + @cached_property def dim(self): """The shape tuple of the values for each element of the sets.""" return tuple(s.dim for s in self._dsets) - @utils.cached_property + @cached_property def cdim(self): """The sum of the scalar number of values for each member of the sets. This is the sum of products of the dim tuples.""" return sum(s.cdim for s in self._dsets) - @utils.cached_property + @cached_property def name(self): """Returns the name of the data sets.""" return tuple(s.name for s in self._dsets) - @utils.cached_property + @cached_property def set(self): """Returns the :class:`MixedSet` this :class:`MixedDataSet` is defined on.""" @@ -435,7 +432,7 @@ def __str__(self): def __repr__(self): return "MixedDataSet(%r)" % (self._dsets,) - @utils.cached_property + @cached_property def layout_vec(self): """A PETSc Vec compatible with the dof layout of this MixedDataSet.""" vec = PETSc.Vec().create(comm=self.comm) @@ -445,7 +442,7 @@ def layout_vec(self): vec.setUp() return vec - @utils.cached_property + @cached_property def lgmap(self): """A PETSc LGMap mapping process-local indices to global indices for this :class:`MixedDataSet`. @@ -511,7 +508,7 @@ def lgmap(self): lgmap.create(indices=indices, bsize=1, comm=self.comm) return lgmap - @utils.cached_property + @cached_property def unblocked_lgmap(self): """A PETSc LGMap mapping process-local indices to global indices for this :class:`DataSet` with a block size of 1. diff --git a/pyop2/types/glob.py b/pyop2/types/glob.py index d5686ef10a..a425e01585 100644 --- a/pyop2/types/glob.py +++ b/pyop2/types/glob.py @@ -12,6 +12,7 @@ mpi, utils ) +from functools import cached_property from pyop2.types.access import Access from pyop2.types.dataset import GlobalDataSet from pyop2.types.data_carrier import DataCarrier, EmptyDataMixin, VecAccessMixin @@ -27,15 +28,15 @@ def __init__(self, dim, data=None, dtype=None, name=None): self._buf = np.empty(self.shape, dtype=self.dtype) self._name = name or "%s_#x%x" % (self.__class__.__name__.lower(), id(self)) - @utils.cached_property + @cached_property def _kernel_args_(self): return (self._data.ctypes.data, ) - @utils.cached_property + @cached_property def _argtypes_(self): return (ctypes.c_voidp, ) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), self.dtype, self.shape) @@ -281,7 +282,8 @@ def __init__(self, dim, data=None, dtype=None, name=None, comm=None): super().__init__(dim, data, dtype, name) if comm is None: warnings.warn("PyOP2.Global has no comm, this is likely to break in parallel!") - self.comm = mpi.internal_comm(comm, self) + comm = mpi.COMM_WORLD + self.comm = comm # Object versioning setup petsc_counter = (comm and self.dtype == PETSc.ScalarType) @@ -311,7 +313,7 @@ def __neg__(self): comm=self.comm ) - @utils.cached_property + @cached_property def dataset(self): return GlobalDataSet(self) @@ -374,7 +376,7 @@ def unfreeze_halo(self): part of a :class:`MixedDat`.""" pass - @utils.cached_property + @cached_property def _vec(self): assert self.dtype == PETSc.ScalarType, \ "Can't create Vec with type %s, must be %s" % (self.dtype, PETSc.ScalarType) @@ -402,7 +404,8 @@ def vec_context(self, access): yield self._vec if access is not Access.READ: data = self._data - self.comm.Bcast(data, 0) + with mpi.temp_internal_comm(self.comm) as icomm: + icomm.Bcast(data, 0) def increment_dat_version(self): VecAccessMixin.increment_dat_version(self) diff --git a/pyop2/types/map.py b/pyop2/types/map.py index 13a43143ba..96d02529e6 100644 --- a/pyop2/types/map.py +++ b/pyop2/types/map.py @@ -10,7 +10,7 @@ exceptions as ex, utils ) -from pyop2 import mpi +from functools import cached_property from pyop2.types.set import GlobalSet, MixedSet, Set @@ -37,7 +37,7 @@ class Map: def __init__(self, iterset, toset, arity, values=None, name=None, offset=None, offset_quotient=None): self._iterset = iterset self._toset = toset - self.comm = mpi.internal_comm(toset.comm, self) + self.comm = toset.comm self._arity = arity self._values = utils.verify_reshape(values, dtypes.IntType, (iterset.total_size, arity), allow_none=True) @@ -54,11 +54,11 @@ def __init__(self, iterset, toset, arity, values=None, name=None, offset=None, o # A cache for objects built on top of this map self._cache = {} - @utils.cached_property + @cached_property def _kernel_args_(self): return (self._values.ctypes.data, ) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), self.arity, utils.tuplify(self.offset), utils.tuplify(self.offset_quotient)) @@ -76,7 +76,7 @@ def __len__(self): # Here we enforce that every map stores a single, unique MapKernelArg. # This is required because we use object identity to determined whether # maps are referenced more than once in a parloop. - @utils.cached_property + @cached_property def _global_kernel_arg(self): from pyop2.global_kernel import MapKernelArg @@ -84,27 +84,27 @@ def _global_kernel_arg(self): offset_quotient = tuple(self.offset_quotient) if self.offset_quotient is not None else None return MapKernelArg(self.arity, offset, offset_quotient) - @utils.cached_property + @cached_property def split(self): return (self,) - @utils.cached_property + @cached_property def iterset(self): """:class:`Set` mapped from.""" return self._iterset - @utils.cached_property + @cached_property def toset(self): """:class:`Set` mapped to.""" return self._toset - @utils.cached_property + @cached_property def arity(self): """Arity of the mapping: number of toset elements mapped to per iterset element.""" return self._arity - @utils.cached_property + @cached_property def arities(self): """Arity of the mapping: number of toset elements mapped to per iterset element. @@ -112,12 +112,12 @@ def arities(self): :rtype: tuple""" return (self._arity,) - @utils.cached_property + @cached_property def arange(self): """Tuple of arity offsets for each constituent :class:`Map`.""" return (0, self._arity) - @utils.cached_property + @cached_property def values(self): """Mapping array. @@ -125,7 +125,7 @@ def values(self): halo points too, use :meth:`values_with_halo`.""" return self._values[:self.iterset.size] - @utils.cached_property + @cached_property def values_with_halo(self): """Mapping array. @@ -134,17 +134,17 @@ def values_with_halo(self): points.""" return self._values - @utils.cached_property + @cached_property def name(self): """User-defined label""" return self._name - @utils.cached_property + @cached_property def offset(self): """The vertical offset.""" return self._offset - @utils.cached_property + @cached_property def offset_quotient(self): """The offset quotient.""" return self._offset_quotient @@ -161,7 +161,7 @@ def __le__(self, o): """self<=o if o equals self or self._parent <= o.""" return self == o - @utils.cached_property + @cached_property def flattened_maps(self): """Return all component maps. @@ -197,16 +197,16 @@ def __init__(self, map_, permutation): if isinstance(map_, ComposedMap): raise NotImplementedError("PermutedMap of ComposedMap not implemented: simply permute before composing") self.map_ = map_ - self.comm = mpi.internal_comm(map_.comm, self) + self.comm = map_.comm self.permutation = np.asarray(permutation, dtype=Map.dtype) assert (np.unique(permutation) == np.arange(map_.arity, dtype=Map.dtype)).all() - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return super()._wrapper_cache_key_ + (tuple(self.permutation),) # See Map._global_kernel_arg above for more information. - @utils.cached_property + @cached_property def _global_kernel_arg(self): from pyop2.global_kernel import PermutedMapKernelArg @@ -248,7 +248,7 @@ def __init__(self, *maps_, name=None): raise ex.MapTypeError("frommap.arity must be 1") self._iterset = maps_[-1].iterset self._toset = maps_[0].toset - self.comm = mpi.internal_comm(self._toset.comm, self) + self.comm = self._toset.comm self._arity = maps_[0].arity # Don't call super().__init__() to avoid calling verify_reshape() self._values = None @@ -259,25 +259,25 @@ def __init__(self, *maps_, name=None): self._cache = {} self.maps_ = tuple(maps_) - @utils.cached_property + @cached_property def _kernel_args_(self): return tuple(itertools.chain(*[m._kernel_args_ for m in self.maps_])) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return tuple(m._wrapper_cache_key_ for m in self.maps_) - @utils.cached_property + @cached_property def _global_kernel_arg(self): from pyop2.global_kernel import ComposedMapKernelArg return ComposedMapKernelArg(*(m._global_kernel_arg for m in self.maps_)) - @utils.cached_property + @cached_property def values(self): raise RuntimeError("ComposedMap does not store values directly") - @utils.cached_property + @cached_property def values_with_halo(self): r = np.empty(self.shape, dtype=Map.dtype) # Initialise map values. @@ -295,7 +295,7 @@ def values_with_halo(self): r[~mask, :] = Map.VALUE_UNDEFINED return r - @utils.cached_property + @cached_property def indices_active_with_halo(self): """Return boolean array for active indices. @@ -325,7 +325,7 @@ def __repr__(self): def __le__(self, o): raise NotImplementedError("__le__ not implemented for ComposedMap") - @utils.cached_property + @cached_property def flattened_maps(self): return tuple(itertools.chain(*(m.flattened_maps for m in self.maps_))) @@ -345,7 +345,7 @@ def __init__(self, maps): raise ex.MapTypeError("All maps needs to share a communicator") if len(comms) == 0: raise ex.MapTypeError("Don't know how to make communicator") - self.comm = mpi.internal_comm(comms[0], self) + self.comm = comms[0] self._initialized = True @classmethod @@ -358,24 +358,24 @@ def _process_args(cls, *args, **kwargs): def _cache_key(cls, maps): return maps - @utils.cached_property + @cached_property def _kernel_args_(self): return tuple(itertools.chain(*(m._kernel_args_ for m in self if m is not None))) - @utils.cached_property + @cached_property def _argtypes_(self): return tuple(itertools.chain(*(m._argtypes_ for m in self if m is not None))) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return tuple(m._wrapper_cache_key_ for m in self if m is not None) - @utils.cached_property + @cached_property def split(self): r"""The underlying tuple of :class:`Map`\s.""" return self._maps - @utils.cached_property + @cached_property def iterset(self): """:class:`MixedSet` mapped from.""" s, = set(m.iterset for m in self._maps) @@ -384,13 +384,13 @@ def iterset(self): else: raise RuntimeError("Found multiple itersets.") - @utils.cached_property + @cached_property def toset(self): """:class:`MixedSet` mapped to.""" return MixedSet(tuple(GlobalSet(comm=self.comm) if m is None else m.toset for m in self._maps)) - @utils.cached_property + @cached_property def arity(self): """Arity of the mapping: total number of toset elements mapped to per iterset element.""" @@ -400,7 +400,7 @@ def arity(self): else: raise RuntimeError("Found multiple itersets.") - @utils.cached_property + @cached_property def arities(self): """Arity of the mapping: number of toset elements mapped to per iterset element. @@ -408,12 +408,12 @@ def arities(self): :rtype: tuple""" return tuple(m.arity for m in self._maps) - @utils.cached_property + @cached_property def arange(self): """Tuple of arity offsets for each constituent :class:`Map`.""" return (0,) + tuple(np.cumsum(self.arities)) - @utils.cached_property + @cached_property def values(self): """Mapping arrays excluding data for halos. @@ -421,7 +421,7 @@ def values(self): halo points too, use :meth:`values_with_halo`.""" return tuple(m.values for m in self._maps) - @utils.cached_property + @cached_property def values_with_halo(self): """Mapping arrays including data for halos. @@ -431,17 +431,17 @@ def values_with_halo(self): return tuple(None if m is None else m.values_with_halo for m in self._maps) - @utils.cached_property + @cached_property def name(self): """User-defined labels""" return tuple(m.name for m in self._maps) - @utils.cached_property + @cached_property def offset(self): """Vertical offsets.""" return tuple(0 if m is None else m.offset for m in self._maps) - @utils.cached_property + @cached_property def offset_quotient(self): """Offsets quotient.""" return tuple(0 if m is None else m.offset_quotient for m in self._maps) @@ -465,6 +465,6 @@ def __str__(self): def __repr__(self): return "MixedMap(%r)" % (self._maps,) - @utils.cached_property + @cached_property def flattened_maps(self): raise NotImplementedError("flattend_maps should not be necessary for MixedMap") diff --git a/pyop2/types/mat.py b/pyop2/types/mat.py index 88cbbdd1d2..75c6336d3d 100644 --- a/pyop2/types/mat.py +++ b/pyop2/types/mat.py @@ -16,6 +16,7 @@ sparsity, utils ) +from functools import cached_property from pyop2.types.access import Access from pyop2.types.data_carrier import DataCarrier from pyop2.types.dataset import DataSet, GlobalDataSet, MixedDataSet @@ -61,8 +62,8 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N self._maps_and_regions = maps_and_regions self._block_sparse = block_sparse self._diagonal_block = diagonal_block - self.lcomm = mpi.internal_comm(self.dsets[0].comm, self) - self.rcomm = mpi.internal_comm(self.dsets[1].comm, self) + self.lcomm = self.dsets[0].comm + self.rcomm = self.dsets[1].comm if isinstance(dsets[0], GlobalDataSet) or isinstance(dsets[1], GlobalDataSet): self._dims = (((1, 1),),) self._d_nnz = None @@ -79,7 +80,7 @@ def __init__(self, dsets, maps_and_regions, name=None, nest=None, block_sparse=N self._dims = tuple(tuple(d) for d in dims) if self.lcomm != self.rcomm: raise ValueError("Haven't thought hard enough about different left and right communicators") - self.comm = mpi.internal_comm(self.lcomm, self) + self.comm = self.lcomm self._name = name or "sparsity_#x%x" % id(self) # If the Sparsity is defined on MixedDataSets, we need to build each # block separately @@ -188,21 +189,21 @@ def __getitem__(self, idx): except TypeError: return self._blocks[idx] - @utils.cached_property + @cached_property def dsets(self): r"""A pair of :class:`DataSet`\s for the left and right function spaces this :class:`Sparsity` maps between.""" return self._dsets - @utils.cached_property + @cached_property def rcmaps(self): return {key: [(_rmap, _cmap) for _rmap, _cmap, _ in val] for key, val in self._maps_and_regions.items()} - @utils.cached_property + @cached_property def iteration_regions(self): return {key: [_iteration_regions for _, _, _iteration_regions in val] for key, val in self._maps_and_regions.items()} - @utils.cached_property + @cached_property def dims(self): """A tuple of tuples where the ``i,j``th entry is a pair giving the number of rows per entry of the row @@ -212,13 +213,13 @@ def dims(self): """ return self._dims - @utils.cached_property + @cached_property def shape(self): """Number of block rows and columns.""" return (len(self._dsets[0] or [1]), len(self._dsets[1] or [1])) - @utils.cached_property + @cached_property def nested(self): r"""Whether a sparsity is monolithic (even if it has a block structure). @@ -232,7 +233,7 @@ def nested(self): """ return self._nested - @utils.cached_property + @cached_property def name(self): """A user-defined label.""" return self._name @@ -250,7 +251,7 @@ def __str__(self): def __repr__(self): return "Sparsity(%r, %r, name=%r, nested=%r, block_sparse=%r, diagonal_block=%r)" % (self.dsets, self._maps_and_regions, self.name, self._nested, self._block_sparse, self._diagonal_block) - @utils.cached_property + @cached_property def nnz(self): """Array containing the number of non-zeroes in the various rows of the diagonal portion of the local submatrix. @@ -259,7 +260,7 @@ def nnz(self): PETSc's MatMPIAIJSetPreallocation_.""" return self._d_nnz - @utils.cached_property + @cached_property def onnz(self): """Array containing the number of non-zeroes in the various rows of the off-diagonal portion of the local submatrix. @@ -268,11 +269,11 @@ def onnz(self): PETSc's MatMPIAIJSetPreallocation_.""" return self._o_nnz - @utils.cached_property + @cached_property def nz(self): return self._d_nnz.sum() - @utils.cached_property + @cached_property def onz(self): return self._o_nnz.sum() @@ -312,10 +313,10 @@ def __init__(self, parent, i, j): self._parent = parent self._dims = tuple([tuple([parent.dims[i][j]])]) self._blocks = [[self]] - self.lcomm = mpi.internal_comm(self.dsets[0].comm, self) - self.rcomm = mpi.internal_comm(self.dsets[1].comm, self) + self.lcomm = self.dsets[0].comm + self.rcomm = self.dsets[1].comm # TODO: think about lcomm != rcomm - self.comm = mpi.internal_comm(self.lcomm, self) + self.comm = self.lcomm self._initialized = True @classmethod @@ -341,6 +342,55 @@ def masked_lgmap(lgmap, mask, block=True): return PETSc.LGMap().create(indices=indices, bsize=bsize, comm=lgmap.comm) +def mask_ghost_cells(cell_node_map): + """Return the local indices of the nodes that belong to ghost cells.""" + own_cells = cell_node_map.iterset.size + owned = cell_node_map.values[:own_cells] + ghost = cell_node_map.values_with_halo[own_cells:] + offset = cell_node_map.offset + if offset is None or ghost.size == 0: + # Non-extruded case + mask = np.setdiff1d(ghost, owned) + elif cell_node_map.iterset.constant_layers: + # Extruded case + mask_pieces = [] + owned = owned.copy() + ghost = ghost.copy() + quotient = cell_node_map.offset_quotient + layers = cell_node_map.iterset.layers + for i in range(layers-1): + if quotient is not None and i == layers-2: + # Periodic extruded case + owned -= quotient + ghost -= quotient + mask_pieces.append(np.setdiff1d(ghost, owned)) + owned += offset + ghost += offset + mask = np.concatenate(mask_pieces) + else: + raise NotImplementedError("MatIS does not support variable extrusion with overlap.") + return mask + + +def unghosted_lgmap(dset, node_maps): + """Return a local-to-global map where the nodes on ghost cells are masked out.""" + if len(node_maps) == 1: + # Non-mixed case + cmap, = node_maps + mask = mask_ghost_cells(cmap) + else: + # Mixed case + mask_pieces = [] + for iset, cmap in zip(dset.local_ises, node_maps): + to_mask = mask_ghost_cells(cmap) + bs = iset.block_size + if bs > 1: + to_mask = np.concatenate([i + bs * to_mask for i in range(bs)]) + mask_pieces.append(iset.indices[to_mask]) + mask = np.concatenate(mask_pieces) + return masked_lgmap(dset.lgmap, mask) + + class AbstractMat(DataCarrier, abc.ABC): r"""OP2 matrix data. A ``Mat`` is defined on a sparsity pattern and holds a value for each element in the :class:`Sparsity`. @@ -374,9 +424,9 @@ class AbstractMat(DataCarrier, abc.ABC): ('name', str, ex.NameTypeError)) def __init__(self, sparsity, dtype=None, name=None): self._sparsity = sparsity - self.lcomm = mpi.internal_comm(sparsity.lcomm, self) - self.rcomm = mpi.internal_comm(sparsity.rcomm, self) - self.comm = mpi.internal_comm(sparsity.comm, self) + self.lcomm = sparsity.lcomm + self.rcomm = sparsity.rcomm + self.comm = sparsity.comm dtype = dtype or dtypes.ScalarType self._datatype = np.dtype(dtype) self._name = name or "mat_#x%x" % id(self) @@ -395,7 +445,7 @@ def __call__(self, access, path, lgmaps=None, unroll_map=False): else: return MatLegacyArg(self, path, access, lgmaps, unroll_map) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), self.dtype, self.dims) @@ -417,20 +467,20 @@ def set_values(self, rows, cols, values): raise NotImplementedError( "Abstract Mat base class doesn't know how to set values.") - @utils.cached_property + @cached_property def nblocks(self): return int(np.prod(self.sparsity.shape)) - @utils.cached_property + @cached_property def _argtypes_(self): """Ctypes argtype for this :class:`Mat`""" return tuple(ctypes.c_voidp for _ in self) - @utils.cached_property + @cached_property def is_mixed(self): return self.sparsity.shape > (1, 1) - @utils.cached_property + @cached_property def dims(self): """A pair of integers giving the number of matrix rows and columns for each member of the row :class:`Set` and column :class:`Set` @@ -438,12 +488,12 @@ def dims(self): :class:`DataSet`.""" return self._sparsity._dims - @utils.cached_property + @cached_property def nrows(self): "The number of rows in the matrix (local to this process)" return self.sparsity.dsets[0].layout_vec.local_size - @utils.cached_property + @cached_property def nblock_rows(self): """The number "block" rows in the matrix (local to this process). @@ -454,7 +504,7 @@ def nblock_rows(self): layout_vec = self.sparsity.dsets[0].layout_vec return layout_vec.local_size // layout_vec.block_size - @utils.cached_property + @cached_property def nblock_cols(self): """The number of "block" columns in the matrix (local to this process). @@ -465,23 +515,23 @@ def nblock_cols(self): layout_vec = self.sparsity.dsets[1].layout_vec return layout_vec.local_size // layout_vec.block_size - @utils.cached_property + @cached_property def ncols(self): "The number of columns in the matrix (local to this process)" return self.sparsity.dsets[1].layout_vec.local_size - @utils.cached_property + @cached_property def sparsity(self): """:class:`Sparsity` on which the ``Mat`` is defined.""" return self._sparsity - @utils.cached_property + @cached_property def _is_scalar_field(self): # Sparsity from Dat to MixedDat has a shape like (1, (1, 1)) # (which you can't take the product of) return all(np.prod(d) == 1 for d in self.dims) - @utils.cached_property + @cached_property def _is_vector_field(self): return not self._is_scalar_field @@ -511,12 +561,12 @@ def values(self): """ raise NotImplementedError("Abstract base Mat does not implement values()") - @utils.cached_property + @cached_property def dtype(self): """The Python type of the data.""" return self._datatype - @utils.cached_property + @cached_property def nbytes(self): """Return an estimate of the size of the data associated with this :class:`Mat` in bytes. This will be the correct size of the @@ -560,6 +610,7 @@ class Mat(AbstractMat): def __init__(self, *args, **kwargs): self.mat_type = kwargs.pop("mat_type", None) + self.sub_mat_type = kwargs.pop("sub_mat_type", None) super().__init__(*args, **kwargs) self._init() self.assembly_state = Mat.ASSEMBLED @@ -567,7 +618,7 @@ def __init__(self, *args, **kwargs): # Firedrake relies on this to distinguish between MatBlock and not for boundary conditions local_to_global_maps = (None, None) - @utils.cached_property + @cached_property def _kernel_args_(self): return tuple(a.handle.handle for a in self) @@ -616,13 +667,23 @@ def _init_dense(self): def _init_monolithic(self): mat = PETSc.Mat() rset, cset = self.sparsity.dsets - rlgmap = rset.unblocked_lgmap - clgmap = cset.unblocked_lgmap - mat.createAIJ(size=((self.nrows, None), (self.ncols, None)), - nnz=(self.sparsity.nnz, self.sparsity.onnz), - bsize=1, - comm=self.comm) + if self.mat_type == "is": + rmaps = [None for _ in rset.local_ises] + cmaps = [None for _ in cset.local_ises] + for (i, j), maps_and_regions in self.sparsity._maps_and_regions.items(): + for item in maps_and_regions: + rmaps[i], cmaps[j], _ = item + rlgmap = unghosted_lgmap(rset, rmaps) + clgmap = unghosted_lgmap(cset, cmaps) + create = mat.createIS + else: + rlgmap = rset.unblocked_lgmap + clgmap = cset.unblocked_lgmap + create = mat.createAIJ + size = ((self.nrows, None), (self.ncols, None)) + create(size, bsize=1, comm=self.comm) mat.setLGMap(rmap=rlgmap, cmap=clgmap) + mat.setPreallocationNNZ((self.sparsity.nnz, self.sparsity.onnz)) self.handle = mat self._blocks = [] rows, cols = self.sparsity.shape @@ -635,7 +696,10 @@ def _init_monolithic(self): mat.setOption(mat.Option.KEEP_NONZERO_PATTERN, True) # We completely fill the allocated matrix when zeroing the # entries, so raise an error if we "missed" one. - mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, True) + if self.mat_type != "is": + # The local matrix will have fewer nonzeros than the one prescribed + # in the global sparsity pattern + mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, True) mat.setOption(mat.Option.IGNORE_OFF_PROC_ENTRIES, False) mat.setOption(mat.Option.NEW_NONZERO_ALLOCATION_ERR, True) # The first assembly (filling with zeros) sets all possible entries. @@ -663,8 +727,10 @@ def _init_nest(self): for i in range(rows): row = [] for j in range(cols): + # Only set sub_mat_type on the diagonal blocks row.append(Mat(self.sparsity[i, j], self.dtype, - '_'.join([self.name, str(i), str(j)]))) + '_'.join([self.name, str(i), str(j)]), + mat_type=self.sub_mat_type if i == j else None)) self._blocks.append(row) # PETSc Mat.createNest wants a flattened list of Mats mat.createNest([[m.handle for m in row_] for row_ in self._blocks], @@ -685,7 +751,13 @@ def _init_block(self): col_lg = cset.lgmap rdim, cdim = self.dims[0][0] - if rdim == cdim and rdim > 1 and self.sparsity._block_sparse: + if self.mat_type == "is": + rmap, cmap, _ = tuple(self.sparsity._maps_and_regions[(0, 0)])[0] + row_lg = unghosted_lgmap(rset, [rmap]) + col_lg = unghosted_lgmap(cset, [cmap]) + block_sparse = False + create = mat.createIS + elif rdim == cdim and rdim > 1 and self.sparsity._block_sparse: # Size is total number of rows and columns, but the # /sparsity/ is the block sparsity. block_sparse = True @@ -695,12 +767,11 @@ def _init_block(self): # the /dof/ sparsity. block_sparse = False create = mat.createAIJ - create(size=((self.nrows, None), - (self.ncols, None)), - nnz=(self.sparsity.nnz, self.sparsity.onnz), - bsize=(rdim, cdim), - comm=self.comm) + size = ((self.nrows, None), (self.ncols, None)) + create(size, bsize=(rdim, cdim), comm=self.comm) + mat.setLGMap(rmap=row_lg, cmap=col_lg) + mat.setPreallocationNNZ((self.sparsity.nnz, self.sparsity.onnz)) # Stash entries destined for other processors mat.setOption(mat.Option.IGNORE_OFF_PROC_ENTRIES, False) # Any add or insertion that would generate a new entry that has not @@ -716,7 +787,8 @@ def _init_block(self): mat.setOption(mat.Option.KEEP_NONZERO_PATTERN, True) # We completely fill the allocated matrix when zeroing the # entries, so raise an error if we "missed" one. - mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, True) + if self.mat_type != "is": + mat.setOption(mat.Option.UNUSED_NONZERO_LOCATION_ERR, True) # Put zeros in all the places we might eventually put a value. with profiling.timed_region("MatZeroInitial"): sparsity.fill_with_zeros(mat, self.sparsity.dims[0][0], @@ -854,11 +926,15 @@ def set_local_diagonal_entries(self, else: rows = np.dstack([rbs*rows + i for i in range(rbs)]).flatten() rows = rows.reshape(-1, 1) - self.change_assembly_state(Mat.INSERT_VALUES) - if len(rows) > 0: - values = np.full(rows.shape, diag_val, dtype=dtypes.ScalarType) - self.handle.setValuesLocalRCV(rows, rows, values, - addv=PETSc.InsertMode.INSERT_VALUES) + if self.handle.type == "is": + self.handle.assemble() + self.handle.zeroRowsColumnsLocal(rows, diag_val) + else: + self.change_assembly_state(Mat.INSERT_VALUES) + if len(rows) > 0: + values = np.full(rows.shape, diag_val, dtype=dtypes.ScalarType) + self.handle.setValuesLocalRCV(rows, rows, values, + addv=PETSc.InsertMode.INSERT_VALUES) @mpi.collective def assemble(self): @@ -891,7 +967,7 @@ def set_values(self, rows, cols, values): self.handle.setValuesBlockedLocal(rows, cols, values, addv=PETSc.InsertMode.INSERT_VALUES) - @utils.cached_property + @cached_property def blocks(self): """2-dimensional array of matrix blocks.""" return self._blocks @@ -925,18 +1001,18 @@ def __init__(self, parent, i, j): colis = cset.local_ises[j] self.handle = parent.handle.getLocalSubMatrix(isrow=rowis, iscol=colis) - self.comm = mpi.internal_comm(parent.comm, self) + self.comm = parent.comm self.local_to_global_maps = self.handle.getLGMap() @property def dat_version(self): return self.handle.stateGet() - @utils.cached_property + @cached_property def _kernel_args_(self): return (self.handle.handle, ) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self._parent), self._parent.dtype, self.dims) @@ -982,11 +1058,15 @@ def set_local_diagonal_entries(self, rows, diag_val=1.0, idx=None): else: rows = np.dstack([rbs*rows + i for i in range(rbs)]).flatten() rows = rows.reshape(-1, 1) - self.change_assembly_state(Mat.INSERT_VALUES) - if len(rows) > 0: - values = np.full(rows.shape, diag_val, dtype=dtypes.ScalarType) - self.handle.setValuesLocalRCV(rows, rows, values, - addv=PETSc.InsertMode.INSERT_VALUES) + if self.handle.type == "is": + self.handle.assemble() + self.handle.zeroRowsColumnsLocal(rows, diag_val) + else: + self.change_assembly_state(Mat.INSERT_VALUES) + if len(rows) > 0: + values = np.full(rows.shape, diag_val, dtype=dtypes.ScalarType) + self.handle.setValuesLocalRCV(rows, rows, values, + addv=PETSc.InsertMode.INSERT_VALUES) def addto_values(self, rows, cols, values): """Add a block of values to the :class:`Mat`.""" diff --git a/pyop2/types/set.py b/pyop2/types/set.py index f10c934048..a913aab2b7 100644 --- a/pyop2/types/set.py +++ b/pyop2/types/set.py @@ -11,6 +11,7 @@ mpi, utils ) +from functools import cached_property class Set: @@ -58,14 +59,14 @@ class Set: _kernel_args_ = () _argtypes_ = () - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return (type(self), ) @utils.validate_type(('size', (numbers.Integral, tuple, list, np.ndarray), ex.SizeTypeError), ('name', str, ex.NameTypeError)) - def __init__(self, size, name=None, halo=None, comm=None, constrained_size=0): - self.comm = mpi.internal_comm(comm, self) + def __init__(self, size, name=None, halo=None, comm=mpi.COMM_WORLD, constrained_size=0): + self.comm = comm if isinstance(size, numbers.Integral): size = [size] * 3 size = utils.as_tuple(size, numbers.Integral, 3) @@ -85,45 +86,45 @@ def indices(self): """Returns iterator.""" return range(self.total_size) - @utils.cached_property + @cached_property def core_size(self): """Core set size. Owned elements not touching halo elements.""" return self._sizes[Set._CORE_SIZE] - @utils.cached_property + @cached_property def constrained_size(self): return self._constrained_size - @utils.cached_property + @cached_property def size(self): """Set size, owned elements.""" return self._sizes[Set._OWNED_SIZE] - @utils.cached_property + @cached_property def total_size(self): """Set size including ghost elements. """ return self._sizes[Set._GHOST_SIZE] - @utils.cached_property + @cached_property def sizes(self): """Set sizes: core, owned, execute halo, total.""" return self._sizes - @utils.cached_property + @cached_property def core_part(self): return SetPartition(self, 0, self.core_size) - @utils.cached_property + @cached_property def owned_part(self): return SetPartition(self, self.core_size, self.size - self.core_size) - @utils.cached_property + @cached_property def name(self): """User-defined label""" return self._name - @utils.cached_property + @cached_property def halo(self): """:class:`Halo` associated with this Set""" return self._halo @@ -191,7 +192,7 @@ def __pow__(self, e): from pyop2.types import DataSet return DataSet(self, dim=e) - @utils.cached_property + @cached_property def layers(self): """Return None (not an :class:`ExtrudedSet`).""" return None @@ -238,33 +239,33 @@ class GlobalSet(Set): _argtypes_ = () def __init__(self, comm=None): - self.comm = mpi.internal_comm(comm, self) + self.comm = comm self._cache = {} - @utils.cached_property + @cached_property def core_size(self): return 0 - @utils.cached_property + @cached_property def size(self): return 1 if self.comm.rank == 0 else 0 - @utils.cached_property + @cached_property def total_size(self): """Total set size, including halo elements.""" return 1 if self.comm.rank == 0 else 0 - @utils.cached_property + @cached_property def sizes(self): """Set sizes: core, owned, execute halo, total.""" return (self.core_size, self.size, self.total_size) - @utils.cached_property + @cached_property def name(self): """User-defined label""" return "GlobalSet" - @utils.cached_property + @cached_property def halo(self): """:class:`Halo` associated with this Set""" return None @@ -323,7 +324,7 @@ class ExtrudedSet(Set): @utils.validate_type(('parent', Set, TypeError)) def __init__(self, parent, layers, extruded_periodic=False): self._parent = parent - self.comm = mpi.internal_comm(parent.comm, self) + self.comm = parent.comm try: layers = utils.verify_reshape(layers, dtypes.IntType, (parent.total_size, 2)) self.constant_layers = False @@ -346,15 +347,15 @@ def __init__(self, parent, layers, extruded_periodic=False): self._extruded = True self._extruded_periodic = extruded_periodic - @utils.cached_property + @cached_property def _kernel_args_(self): return (self.layers_array.ctypes.data, ) - @utils.cached_property + @cached_property def _argtypes_(self): return (ctypes.c_voidp, ) - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): return self.parent._wrapper_cache_key_ + (self.constant_layers, ) @@ -373,11 +374,11 @@ def __str__(self): def __repr__(self): return "ExtrudedSet(%r, %r)" % (self._parent, self._layers) - @utils.cached_property + @cached_property def parent(self): return self._parent - @utils.cached_property + @cached_property def layers(self): """The layers of this extruded set.""" if self.constant_layers: @@ -386,7 +387,7 @@ def layers(self): else: raise ValueError("No single layer, use layers_array attribute") - @utils.cached_property + @cached_property def layers_array(self): return self._layers @@ -404,7 +405,7 @@ class Subset(ExtrudedSet): @utils.validate_type(('superset', Set, TypeError), ('indices', (list, tuple, np.ndarray), TypeError)) def __init__(self, superset, indices): - self.comm = mpi.internal_comm(superset.comm, self) + self.comm = superset.comm # sort and remove duplicates indices = np.unique(indices) @@ -429,11 +430,11 @@ def __init__(self, superset, indices): self._extruded = superset._extruded self._extruded_periodic = superset._extruded_periodic - @utils.cached_property + @cached_property def _kernel_args_(self): return self._superset._kernel_args_ + (self._indices.ctypes.data, ) - @utils.cached_property + @cached_property def _argtypes_(self): return self._superset._argtypes_ + (ctypes.c_voidp, ) @@ -467,24 +468,24 @@ def __call__(self, *indices): indices = [indices] return Subset(self, indices) - @utils.cached_property + @cached_property def superset(self): """Returns the superset Set""" return self._superset - @utils.cached_property + @cached_property def indices(self): """Returns the indices pointing in the superset.""" return self._indices - @utils.cached_property + @cached_property def owned_indices(self): """Return the indices that correspond to the owned entities of the superset. """ return self.indices[self.indices < self.superset.size] - @utils.cached_property + @cached_property def layers_array(self): if self._superset.constant_layers: return self._superset.layers_array @@ -548,21 +549,18 @@ def __init__(self, sets): assert all(s is None or isinstance(s, GlobalSet) or ((s.layers == self._sets[0].layers).all() if s.layers is not None else True) for s in sets), \ "All components of a MixedSet must have the same number of layers." # TODO: do all sets need the same communicator? - self.comm = mpi.internal_comm( - pytools.single_valued(s.comm for s in sets if s is not None), - self - ) + self.comm = pytools.single_valued(s.comm for s in sets if s is not None) self._initialized = True - @utils.cached_property + @cached_property def _kernel_args_(self): raise NotImplementedError - @utils.cached_property + @cached_property def _argtypes_(self): raise NotImplementedError - @utils.cached_property + @cached_property def _wrapper_cache_key_(self): raise NotImplementedError @@ -584,56 +582,56 @@ def __getitem__(self, idx): """Return :class:`Set` with index ``idx`` or a given slice of sets.""" return self._sets[idx] - @utils.cached_property + @cached_property def split(self): r"""The underlying tuple of :class:`Set`\s.""" return self._sets - @utils.cached_property + @cached_property def core_size(self): """Core set size. Owned elements not touching halo elements.""" return sum(s.core_size for s in self._sets) - @utils.cached_property + @cached_property def constrained_size(self): """Set size, owned constrained elements.""" return sum(s.constrained_size for s in self._sets) - @utils.cached_property + @cached_property def size(self): """Set size, owned elements.""" return sum(0 if s is None else s.size for s in self._sets) - @utils.cached_property + @cached_property def total_size(self): """Total set size, including halo elements.""" return sum(s.total_size for s in self._sets) - @utils.cached_property + @cached_property def sizes(self): """Set sizes: core, owned, execute halo, total.""" return (self.core_size, self.size, self.total_size) - @utils.cached_property + @cached_property def name(self): """User-defined labels.""" return tuple(s.name for s in self._sets) - @utils.cached_property + @cached_property def halo(self): r""":class:`Halo`\s associated with these :class:`Set`\s.""" halos = tuple(s.halo for s in self._sets) return halos if any(halos) else None - @utils.cached_property + @cached_property def _extruded(self): return isinstance(self._sets[0], ExtrudedSet) - @utils.cached_property + @cached_property def _extruded_periodic(self): raise NotImplementedError("_extruded_periodic not implemented in MixedSet") - @utils.cached_property + @cached_property def layers(self): """Numbers of layers in the extruded mesh (or None if this MixedSet is not extruded).""" return self._sets[0].layers diff --git a/pyop2/utils.py b/pyop2/utils.py index 2739c75f58..e5b9bb13b1 100644 --- a/pyop2/utils.py +++ b/pyop2/utils.py @@ -34,14 +34,10 @@ """Common utility classes/functions.""" -import os import sys import numpy as np from decorator import decorator import argparse -import petsc4py - -from functools import cached_property # noqa: F401 from pyop2.exceptions import DataTypeError, DataValueError from pyop2.configuration import configuration @@ -302,26 +298,3 @@ def trim(docstring): def strip(code): return '\n'.join([l for l in code.splitlines() if l.strip() and l.strip() != ';']) - - -def get_petsc_dir(): - """Attempts to find the PETSc directory on the system - """ - petsc_config = petsc4py.get_config() - petsc_dir = petsc_config["PETSC_DIR"] - petsc_arch = petsc_config["PETSC_ARCH"] - pathlist = [petsc_dir] - if petsc_arch: - pathlist.append(os.path.join(petsc_dir, petsc_arch)) - return tuple(pathlist) - - -def get_petsc_variables(): - """Attempts obtain a dictionary of PETSc configuration settings - """ - path = [get_petsc_dir()[-1], "lib/petsc/conf/petscvariables"] - variables_path = os.path.join(*path) - with open(variables_path) as fh: - # Split lines on first '=' (assignment) - splitlines = (line.split("=", maxsplit=1) for line in fh.readlines()) - return {k.strip(): v.strip() for k, v in splitlines} diff --git a/pyproject.toml b/pyproject.toml index b1e467416b..cba23ef3f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "firedrake" # .. # TODO RELEASE -version = "2025.10.4.dev0" +version = "2026.4.0.dev0" description = "An automated system for the portable solution of partial differential equations using the finite element method" readme = "README.rst" license = "LGPL-3.0-or-later" @@ -20,21 +20,19 @@ dependencies = [ "decorator<=4.4.2", "mpi4py>3; python_version >= '3.13'", "mpi4py; python_version < '3.13'", - # TODO RELEASE - "fenics-ufl @ git+https://github.com/FEniCS/ufl.git@release", - # TODO RELEASE - "firedrake-fiat @ git+https://github.com/firedrakeproject/fiat.git@release", + "fenics-ufl>=2025.3", + "firedrake-fiat>=2026.4", "h5py>3.12.1", "immutabledict", "libsupermesh", "loopy>2024.1", "numpy", "packaging", - "petsc4py==3.24.5", - "petsctools", + "petsc4py==3.25.0", + "petsctools @ git+https://github.com/firedrakeproject/petsctools.git@main", "pkgconfig", "progress", - "pyadjoint-ad>=2025.10.0", + "pyadjoint-ad>=2026.4.0", "pycparser", "pytools[siphash]", "requests", @@ -68,7 +66,7 @@ pyop2-clean = "pyop2.compilation:clear_compiler_disk_cache" [project.optional-dependencies] check = [ - "mpi-pytest>=2025.7", + "mpi-pytest>=2026.0", "pytest", ] docs = [ @@ -90,10 +88,10 @@ jax = [ "jax", ] netgen = [ - "ngsPETSc>=0.1.1", + "ngsPETSc>=0.2.0", ] slepc = [ - "slepc4py==3.24.3", + "slepc4py==3.25.0", ] torch = [ # requires passing '--extra-index-url' to work "torch", @@ -107,35 +105,37 @@ ci = [ "ipympl", # needed for notebook testing "jax", "matplotlib", - "mpi-pytest>=2025.7", + "mpi-pytest>=2026.0", "nbval", "networkx", - "ngsPETSc>=0.1.1", + "ngsPETSc>=0.2.0", "pdf2image", "pygraphviz", "pylit", "pytest", + "pytest-order", "pytest-split", # needed for firedrake-run-split-tests "pytest-timeout", "pytest-xdist", - "slepc4py==3.24.3", + "slepc4py==3.25.0", "torch", # requires passing '--extra-index-url' to work "vtk", ] docker = [ # Used in firedrake-vanilla container "ipympl", # needed for notebook testing "matplotlib", - "mpi-pytest>=2025.7", + "mpi-pytest>=2026.0", "nbval", "networkx", "pdf2image", "pygraphviz", "pylit", "pytest", + "pytest-order", "pytest-split", # needed for firedrake-run-split-tests "pytest-timeout", "pytest-xdist", - "slepc4py==3.24.3", + "slepc4py==3.25.0", "vtk", ] @@ -146,12 +146,12 @@ requires = [ "mpi4py>3; python_version >= '3.13'", "mpi4py; python_version < '3.13'", "numpy", + "petsc4py==3.25.0", "petsctools", "pkgconfig", "pybind11", - "setuptools>=77.0.3", - "petsc4py==3.24.5", "rtree>=1.2", + "setuptools>=77.0.3", ] build-backend = "setuptools.build_meta" @@ -168,6 +168,8 @@ script-files = [ # Unless specified these files will not be installed along with the # rest of the package firedrake = [ + "cython/*.pxi", + "cython/*.pyx", "evaluate.h", "locate.c", "icons/*.png", diff --git a/requirements-build.txt b/requirements-build.txt index 3e63fbea9f..716b5e0651 100644 --- a/requirements-build.txt +++ b/requirements-build.txt @@ -7,9 +7,10 @@ numpy pkgconfig petsctools pybind11 -setuptools>=77.0.3 rtree>=1.2 +setuptools>=77.0.3 # Transitive build dependencies hatchling meson-python +scikit_build_core diff --git a/scripts/firedrake-configure b/scripts/firedrake-configure index 8236b85f20..abcca15926 100755 --- a/scripts/firedrake-configure +++ b/scripts/firedrake-configure @@ -30,6 +30,15 @@ LINUX_APT_AARCH64 = PackageManager.LINUX_APT_AARCH64 MACOS_HOMEBREW_ARM64 = PackageManager.MACOS_HOMEBREW_ARM64 +class GPUArch(enum.Enum): + NO_GPU = "none" + CUDA = "cuda" + + +NO_GPU = GPUArch.NO_GPU +CUDA = GPUArch.CUDA + + class FiredrakeArch(enum.Enum): DEFAULT = "default" COMPLEX = "complex" @@ -39,7 +48,21 @@ ARCH_DEFAULT = FiredrakeArch.DEFAULT ARCH_COMPLEX = FiredrakeArch.COMPLEX -SUPPORTED_PETSC_VERSION = "v3.24.5" +SUPPORTED_PETSC_VERSION = "v3.25.0" +# CUDA 13.1 is currently not supported by GPU drivers on Firedrake CI systems. +SUPPORTED_CUDA_VERSION = "13.0" + + +CUDA_ARCH_MAP = { + "aarch64": "sbsa" +} + +# Contains the URL to a deb package that will enable vendor-specific software development +# repositories, or an empty string if none are required. +EXTRA_LINUX_APT_PKG_URL = { + NO_GPU: "", + CUDA: f"https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2404/{CUDA_ARCH_MAP.get(platform.machine(), platform.machine())}/cuda-keyring_1.1-1_all.deb", +} def main(): @@ -77,6 +100,15 @@ Please see https://firedrakeproject.org/install for more information.""" default=ARCH_DEFAULT, help="The target configuration to install.", ) + parser.add_argument( + "--gpu-arch", + choices=[arch.value for arch in GPUArch], + default="none", + help=( + "Target GPU architecture. WARNING: This is an experimental feature. " + "GPU support in Firedrake is currently very limited." + ), + ) cmd_group = parser.add_mutually_exclusive_group(required=True) cmd_group.add_argument( "--show-system-packages", @@ -113,6 +145,12 @@ Please see https://firedrakeproject.org/install for more information.""" action="store_true", help="Print out the environment variables that need to be exported to install Firedrake.", ) + cmd_group.add_argument( + "--show-extra-repo-pkg-url", + "--repopkgurl", + action="store_true", + help="Print out the URL of any package required to enable non-OS repo access for this build", + ) args = parser.parse_args() if args.package_manager is not None: @@ -124,13 +162,20 @@ Please see https://firedrakeproject.org/install for more information.""" package_manager = sniff_package_manager() arch = FiredrakeArch(args.arch) + gpu_arch = GPUArch(args.gpu_arch) + if gpu_arch != NO_GPU and package_manager == MACOS_HOMEBREW_ARM64: + raise RuntimeError( + "GPU-compatible PETSc builds are currently only supported" + "on Linux" + ) + if args.show_system_packages: if package_manager is None: raise RuntimeError( "Cannot install Firedrake dependencies without a package manager, " "please install them manually" ) - print(" ".join(SYSTEM_PACKAGES[package_manager, arch]), end="") + print(" ".join(SYSTEM_PACKAGES[package_manager, arch, gpu_arch]), end="") elif args.show_minimal_system_packages: if package_manager is None: raise RuntimeError( @@ -139,12 +184,14 @@ Please see https://firedrakeproject.org/install for more information.""" ) print(" ".join(MINIMAL_SYSTEM_PACKAGES[package_manager]), end="") elif args.show_petsc_configure_options: - print(" ".join(PETSC_CONFIGURE_OPTIONS[package_manager, arch]), end="") + print(" ".join(PETSC_CONFIGURE_OPTIONS[package_manager, arch, gpu_arch]), end="") elif args.show_petsc_version: print(SUPPORTED_PETSC_VERSION, end="") + elif args.show_extra_repo_pkg_url: + print(EXTRA_LINUX_APT_PKG_URL[gpu_arch], end="") else: assert args.show_env - print(" ".join(ENVIRONMENT_VARS[package_manager, arch]), end="") + print(" ".join(ENVIRONMENT_VARS[package_manager, arch, gpu_arch]), end="") def sniff_package_manager() -> Optional[PackageManager]: @@ -199,7 +246,7 @@ BASE_LINUX_APT_PACKAGES = ( MINIMAL_LINUX_APT_PACKAGES + ("bison", "cmake", "libopenblas-dev", "libopenmpi-dev") ) -PETSC_EXTRAS_LINUX_APT_PACKAGES = ( +PETSC_EXTRAS_COMMON_APT_PACKAGES = ( "libfftw3-dev", "libfftw3-mpi-dev", "libhwloc-dev", @@ -210,12 +257,32 @@ PETSC_EXTRAS_LINUX_APT_PACKAGES = ( "libpnetcdf-dev", "libptscotch-dev", "libscalapack-openmpi-dev", +) + +PETSC_EXTRAS_LINUX_APT_NOGPU_PACKAGES = PETSC_EXTRAS_COMMON_APT_PACKAGES + ( "libsuitesparse-dev", "libsuperlu-dev", "libsuperlu-dist-dev", ) -LINUX_APT_PACKAGES = BASE_LINUX_APT_PACKAGES + PETSC_EXTRAS_LINUX_APT_PACKAGES +cuda_ver_str = SUPPORTED_CUDA_VERSION.replace(".", "-") + +PETSC_EXTRAS_LINUX_APT_CUDA_PACKAGES = PETSC_EXTRAS_COMMON_APT_PACKAGES + ( + f"cuda-compat-{cuda_ver_str}", + f"cuda-nvtx-{cuda_ver_str}", + f"cuda-cudart-dev-{cuda_ver_str}", + f"cuda-command-line-tools-{cuda_ver_str}", + f"cuda-minimal-build-{cuda_ver_str}", + f"cuda-libraries-dev-{cuda_ver_str}", + f"cuda-nvml-dev-{cuda_ver_str}", + f"libnpp-dev-{cuda_ver_str}", + f"libcusparse-dev-{cuda_ver_str}", + f"libcublas-dev-{cuda_ver_str}", +) + +LINUX_APT_PACKAGES_NOGPU = BASE_LINUX_APT_PACKAGES + PETSC_EXTRAS_LINUX_APT_NOGPU_PACKAGES + +LINUX_APT_PACKAGES_CUDA = BASE_LINUX_APT_PACKAGES + PETSC_EXTRAS_LINUX_APT_CUDA_PACKAGES MINIMAL_MACOS_HOMEBREW_PACKAGES = ( "autoconf", @@ -255,12 +322,14 @@ MINIMAL_SYSTEM_PACKAGES = { } SYSTEM_PACKAGES = { - (LINUX_APT_X86_64, ARCH_DEFAULT): LINUX_APT_PACKAGES, - (LINUX_APT_X86_64, ARCH_COMPLEX): LINUX_APT_PACKAGES, - (LINUX_APT_AARCH64, ARCH_DEFAULT): LINUX_APT_PACKAGES, - (LINUX_APT_AARCH64, ARCH_COMPLEX): LINUX_APT_PACKAGES, - (MACOS_HOMEBREW_ARM64, ARCH_DEFAULT): MACOS_HOMEBREW_PACKAGES, - (MACOS_HOMEBREW_ARM64, ARCH_COMPLEX): MACOS_HOMEBREW_PACKAGES, + (LINUX_APT_X86_64, ARCH_DEFAULT, NO_GPU): LINUX_APT_PACKAGES_NOGPU, + (LINUX_APT_X86_64, ARCH_COMPLEX, NO_GPU): LINUX_APT_PACKAGES_NOGPU, + (LINUX_APT_AARCH64, ARCH_DEFAULT, NO_GPU): LINUX_APT_PACKAGES_NOGPU, + (LINUX_APT_AARCH64, ARCH_COMPLEX, NO_GPU): LINUX_APT_PACKAGES_NOGPU, + (MACOS_HOMEBREW_ARM64, ARCH_DEFAULT, NO_GPU): MACOS_HOMEBREW_PACKAGES, + (MACOS_HOMEBREW_ARM64, ARCH_COMPLEX, NO_GPU): MACOS_HOMEBREW_PACKAGES, + (LINUX_APT_X86_64, ARCH_DEFAULT, CUDA): LINUX_APT_PACKAGES_CUDA, + (LINUX_APT_AARCH64, ARCH_DEFAULT, CUDA): LINUX_APT_PACKAGES_CUDA, } COMMON_PETSC_CONFIGURE_OPTIONS = ( @@ -271,11 +340,18 @@ COMMON_PETSC_CONFIGURE_OPTIONS = ( "--with-strict-petscerrorcode", ) + +class PetscPackageAction(enum.IntEnum): + PETSC_AUTODETECT = enum.auto() + PETSC_DOWNLOAD = enum.auto() + + # Placeholder value to use when we want PETSc to autodetect the package -PETSC_AUTODETECT = 333 +PETSC_AUTODETECT = PetscPackageAction.PETSC_AUTODETECT # Placeholder value to use when we want PETSc to download the package -PETSC_DOWNLOAD = 666 +PETSC_DOWNLOAD = PetscPackageAction.PETSC_DOWNLOAD + # For each package and architecture there are a number of different types of input: # 1. PETSC_AUTODETECT - PETSc will be able to find the package itself @@ -285,7 +361,10 @@ PETSC_DOWNLOAD = 666 # 'lib' subdirectories) # 4. tuple[str, tuple[str, ...]] - a 2-tuple consisting of the includes directory # (location of the header files) and a collection of library files that PETSc needs. -PETSC_EXTERNAL_PACKAGE_SPECS = { +PetscSpecValueType = PetscPackageAction | str | tuple[str | None, tuple[str, ...]] +PetscSpecsDictType = dict[str, dict[PackageManager, PetscSpecValueType]] + +PETSC_EXTERNAL_PACKAGE_SPECS_COMMON: PetscSpecsDictType = { "bison": { LINUX_APT_X86_64: PETSC_AUTODETECT, LINUX_APT_AARCH64: PETSC_AUTODETECT, @@ -341,16 +420,6 @@ PETSC_EXTERNAL_PACKAGE_SPECS = { LINUX_APT_AARCH64: (None, ("-lscalapack-openmpi",)), MACOS_HOMEBREW_ARM64: "/opt/homebrew", }, - "suitesparse": { - LINUX_APT_X86_64: PETSC_AUTODETECT, - LINUX_APT_AARCH64: PETSC_AUTODETECT, - MACOS_HOMEBREW_ARM64: "/opt/homebrew", - }, - "superlu_dist": { - LINUX_APT_X86_64: PETSC_AUTODETECT, - LINUX_APT_AARCH64: PETSC_AUTODETECT, - MACOS_HOMEBREW_ARM64: PETSC_DOWNLOAD, - }, "zlib": { LINUX_APT_X86_64: PETSC_AUTODETECT, LINUX_APT_AARCH64: PETSC_AUTODETECT, @@ -358,6 +427,43 @@ PETSC_EXTERNAL_PACKAGE_SPECS = { }, } +PETSC_EXTERNAL_PACKAGE_SPECS_NOGPU: PetscSpecsDictType = ( + PETSC_EXTERNAL_PACKAGE_SPECS_COMMON + | { + "suitesparse": { + LINUX_APT_X86_64: PETSC_AUTODETECT, + LINUX_APT_AARCH64: PETSC_AUTODETECT, + MACOS_HOMEBREW_ARM64: "/opt/homebrew", + }, + "superlu_dist": { + LINUX_APT_X86_64: PETSC_AUTODETECT, + LINUX_APT_AARCH64: PETSC_AUTODETECT, + MACOS_HOMEBREW_ARM64: PETSC_DOWNLOAD, + }, + } +) + +PETSC_EXTERNAL_PACKAGE_SPECS_CUDA: PetscSpecsDictType = ( + PETSC_EXTERNAL_PACKAGE_SPECS_COMMON + | { + "suitesparse": { + LINUX_APT_X86_64: PETSC_DOWNLOAD, + LINUX_APT_AARCH64: PETSC_DOWNLOAD, + MACOS_HOMEBREW_ARM64: "/opt/homebrew", + }, + "superlu_dist": { + LINUX_APT_X86_64: PETSC_DOWNLOAD, + LINUX_APT_AARCH64: PETSC_DOWNLOAD, + MACOS_HOMEBREW_ARM64: PETSC_DOWNLOAD, + }, + "umpire": { + LINUX_APT_X86_64: PETSC_DOWNLOAD, + LINUX_APT_AARCH64: PETSC_DOWNLOAD, + MACOS_HOMEBREW_ARM64: PETSC_DOWNLOAD, + }, + } +) + COMMON_PETSC_EXTERNAL_PACKAGES = ( "bison", "fftw", @@ -374,10 +480,13 @@ COMMON_PETSC_EXTERNAL_PACKAGES = ( "zlib", ) +PETSC_EXTRA_EXTERNAL_PACKAGES_CUDA = ("umpire",) + def prepare_external_package_configure_options( external_packages: Sequence[str], - package_manager: Optional[PackageManager], + package_manager: PackageManager | None = None, + gpu_arch: GPUArch = NO_GPU, ) -> tuple[str, ...]: configure_options = [] for external_package in external_packages: @@ -385,7 +494,10 @@ def prepare_external_package_configure_options( # Don't know anything about the system, download everything package_spec = PETSC_DOWNLOAD else: - package_spec = PETSC_EXTERNAL_PACKAGE_SPECS[external_package][package_manager] + if gpu_arch == NO_GPU: + package_spec = PETSC_EXTERNAL_PACKAGE_SPECS_NOGPU[external_package][package_manager] + elif gpu_arch == CUDA: + package_spec = PETSC_EXTERNAL_PACKAGE_SPECS_CUDA[external_package][package_manager] if package_spec == PETSC_AUTODETECT: # PETSc will find the package for us @@ -408,12 +520,20 @@ def prepare_external_package_configure_options( return tuple(configure_options) +def get_petsc_arch(arch: FiredrakeArch, gpu_arch: GPUArch) -> str: + arr = ["arch", "firedrake", arch.value] + if gpu_arch != NO_GPU: + arr.append(gpu_arch.value) + return "-".join(arr) + + def prepare_configure_options( package_manager: Optional[PackageManager], arch: FiredrakeArch, + gpu_arch: GPUArch, ) -> tuple[str, ...]: configure_options = list(COMMON_PETSC_CONFIGURE_OPTIONS) - configure_options.append(f"PETSC_ARCH=arch-firedrake-{arch.value}") + configure_options.append(f"PETSC_ARCH={get_petsc_arch(arch, gpu_arch)}") # include/link flags if package_manager in (LINUX_APT_X86_64, LINUX_APT_AARCH64): @@ -426,10 +546,14 @@ def prepare_configure_options( includes = ( f"{incdir}/hdf5/openmpi", f"{incdir}/scotch", - f"{incdir}/superlu", - f"{incdir}/superlu-dist", ) + if gpu_arch == NO_GPU: + includes = includes + ( + f"{incdir}/superlu", + f"{incdir}/superlu-dist", + ) + libraries = ( f"{libdir}/hdf5/openmpi", ) @@ -458,41 +582,59 @@ def prepare_configure_options( if arch == ARCH_COMPLEX: configure_options.append("--with-scalar-type=complex") + if gpu_arch == CUDA: + configure_options.extend( + ["--with-cuda=1", "--with-openmp=1", "--with-cxx-dialect=c++17"] + ) + external_packages = list(COMMON_PETSC_EXTERNAL_PACKAGES) if arch != ARCH_COMPLEX: external_packages.append("hypre") + if gpu_arch == CUDA: + external_packages.extend(PETSC_EXTRA_EXTERNAL_PACKAGES_CUDA) configure_options.extend( - prepare_external_package_configure_options(external_packages, package_manager) + prepare_external_package_configure_options( + external_packages, package_manager, gpu_arch + ) ) return tuple(configure_options) +PETSC_VALID_BUILD_COMBINATIONS = ( + (LINUX_APT_X86_64, ARCH_DEFAULT, NO_GPU), + (LINUX_APT_X86_64, ARCH_COMPLEX, NO_GPU), + (LINUX_APT_AARCH64, ARCH_DEFAULT, NO_GPU), + (LINUX_APT_AARCH64, ARCH_COMPLEX, NO_GPU), + (MACOS_HOMEBREW_ARM64, ARCH_DEFAULT, NO_GPU), + (MACOS_HOMEBREW_ARM64, ARCH_COMPLEX, NO_GPU), + (None, ARCH_DEFAULT, NO_GPU), + (None, ARCH_COMPLEX, NO_GPU), + (LINUX_APT_X86_64, ARCH_DEFAULT, CUDA), + (LINUX_APT_AARCH64, ARCH_DEFAULT, CUDA), + (None, ARCH_DEFAULT, CUDA), +) + + PETSC_CONFIGURE_OPTIONS = { - (package_manager, arch): prepare_configure_options(package_manager, arch) - for (package_manager, arch) in ( - (LINUX_APT_X86_64, ARCH_DEFAULT), - (LINUX_APT_X86_64, ARCH_COMPLEX), - (LINUX_APT_AARCH64, ARCH_DEFAULT), - (LINUX_APT_AARCH64, ARCH_COMPLEX), - (MACOS_HOMEBREW_ARM64, ARCH_DEFAULT), - (MACOS_HOMEBREW_ARM64, ARCH_COMPLEX), - (None, ARCH_DEFAULT), - (None, ARCH_COMPLEX), + (package_manager, arch, gpu_arch): prepare_configure_options( + package_manager, arch, gpu_arch ) + for (package_manager, arch, gpu_arch) in PETSC_VALID_BUILD_COMBINATIONS } def prepare_environment_vars( package_manager: Optional[PackageManager], arch: FiredrakeArch, + gpu_arch: GPUArch, ) -> tuple[str, ...]: vars = { - "CC": "mpicc", - "CXX": "mpicxx", "PETSC_DIR": f"{os.getcwd()}/petsc", - "PETSC_ARCH": f"arch-firedrake-{arch.value}", + "PETSC_ARCH": get_petsc_arch(arch, gpu_arch), "HDF5_MPI": "ON", } + if gpu_arch == CUDA: + vars["PATH"] = f"/usr/local/cuda/bin:{os.environ.get('PATH', '')}" if package_manager == MACOS_HOMEBREW_ARM64: # On macOS h5py cannot find the HDF5 library without help @@ -505,17 +647,10 @@ def prepare_environment_vars( ENVIRONMENT_VARS = { - (package_manager, arch): prepare_environment_vars(package_manager, arch) - for (package_manager, arch) in ( - (LINUX_APT_X86_64, ARCH_DEFAULT), - (LINUX_APT_X86_64, ARCH_COMPLEX), - (LINUX_APT_AARCH64, ARCH_DEFAULT), - (LINUX_APT_AARCH64, ARCH_COMPLEX), - (MACOS_HOMEBREW_ARM64, ARCH_DEFAULT), - (MACOS_HOMEBREW_ARM64, ARCH_COMPLEX), - (None, ARCH_DEFAULT), - (None, ARCH_COMPLEX), + (package_manager, arch, gpu_arch): prepare_environment_vars( + package_manager, arch, gpu_arch ) + for (package_manager, arch, gpu_arch) in PETSC_VALID_BUILD_COMBINATIONS } diff --git a/scripts/firedrake-run-split-tests b/scripts/firedrake-run-split-tests index 8876490cf2..9164168e07 100755 --- a/scripts/firedrake-run-split-tests +++ b/scripts/firedrake-run-split-tests @@ -1,31 +1,40 @@ #!/usr/bin/env bash -# Script for running a pytest test suite in parallel across multiple jobs. -# -# Only the tests that use the given number of processors are selected from the suite. This list of tests is distributed between multiple jobs and each job outputs its own log file. -# -# Usage: -# -# firedrake-run-split-tests -# -# where: -# * is the number of ranks used in each test -# * is the number of different jobs -# * are additional arguments that are passed to pytest -# -# Example: -# -# firedrake-run-split-tests 3 4 tests/unit --verbose -# -# will run all of the parallel[3] tests inside tests/unit verbosely -# and split between 4 different jobs. -# -# Requires: -# -# * pytest -# * pytest-split -# * mpi-pytest -# * GNU parallel +HELP_MSG="\ +Script for running a pytest test suite in parallel across multiple jobs. + +Only the tests that use the given number of processors are selected from the suite. This list of tests is distributed between multiple jobs and each job outputs its own log file. + +Usage: + + firedrake-run-split-tests + + where: + * is the number of ranks used in each test + * is the number of different jobs + * are additional arguments that are passed to pytest + +Example: + + firedrake-run-split-tests 3 4 tests/unit --verbose + + will run all of the parallel[3] tests inside tests/unit verbosely + and split between 4 different jobs. + +Run with [no arguments | -h | --help] to print this help message. + +Requires: + + * pytest + * pytest-split + * mpi-pytest + * GNU parallel" + +# Print out help message with no arguments or "-h" or "--help" +if [[ "$#" -eq "0" ]] || [[ "$1" == "-h" ]] || [[ "$1" == "--help" ]]; then + echo -e "${HELP_MSG}" + exit +fi num_procs=$1 num_jobs=$2 @@ -38,10 +47,9 @@ if [ $num_procs = 1 ]; then else pytest_exec="mpiexec -n ${num_procs} python3 -m pytest" fi -marker_spec="parallel[${num_procs}]" pytest_cmd="${pytest_exec} -v \ --splits ${num_jobs} --group {#} \ - -m ${marker_spec} ${extra_args}" + -m parallel[match] ${extra_args}" log_file_prefix="pytest_nprocs${num_procs}_job" @@ -53,7 +61,7 @@ set -x # * Uses tee to pipe stdout+stderr to both stdout and a log file # * Writes pytest's exit code to a file called jobN.errcode (for later inspection) parallel --line-buffer --tag \ - "${pytest_cmd} |& tee ${log_file_prefix}{#}.log; \ + "${pytest_cmd} 2>&1 | tee ${log_file_prefix}{#}.log; \ echo \${PIPESTATUS[0]} > job{#}.errcode" \ ::: $(seq ${num_jobs}) diff --git a/setup.py b/setup.py index 27015d23cc..33ac21b8e9 100644 --- a/setup.py +++ b/setup.py @@ -3,6 +3,7 @@ import platform import shutil import site +from collections.abc import Sequence from dataclasses import dataclass, field from glob import glob from pathlib import Path @@ -20,7 +21,7 @@ # Ensure that the PETSc getting linked against is compatible -petsctools.init(version_spec=">=3.23.0") +petsctools.init(version_spec=">=3.25.0") import petsc4py @@ -30,17 +31,17 @@ class ExternalDependency: that correspond to the keyword arguments of `Extension`. For convenience it also implements addition and `**` unpacking. ''' - include_dirs: list[str] = field(default_factory=list, init=True) - extra_compile_args: list[str] = field(default_factory=list, init=True) - libraries: list[str] = field(default_factory=list, init=True) - library_dirs: list[str] = field(default_factory=list, init=True) - extra_link_args: list[str] = field(default_factory=list, init=True) - runtime_library_dirs: list[str] = field(default_factory=list, init=True) + include_dirs: Sequence[str] = field(default_factory=list, init=True) + extra_compile_args: Sequence[str] = field(default_factory=list, init=True) + libraries: Sequence[str] = field(default_factory=list, init=True) + library_dirs: Sequence[str] = field(default_factory=list, init=True) + extra_link_args: Sequence[str] = field(default_factory=list, init=True) + runtime_library_dirs: Sequence[str] = field(default_factory=list, init=True) def __add__(self, other): combined = {} for f in self.__dataclass_fields__.keys(): - combined[f] = getattr(self, f) + getattr(other, f) + combined[f] = [*getattr(self, f), *getattr(other, f)] return self.__class__(**combined) def keys(self): @@ -53,6 +54,14 @@ def __getitem__(self, key): raise KeyError(f"Key {key} not present") +# MPI +# strip the leading 'gcc' or equivalent +mpi_args = petsctools.get_petscvariables()["MPICC_SHOW"].split()[1:] +mpi_ = ExternalDependency( + extra_compile_args=mpi_args, +) + + # Pybind11 # example: # gcc -I/pyind11/include ... @@ -74,14 +83,14 @@ def __getitem__(self, key): # example: # gcc -I$PETSC_DIR/include -I$PETSC_DIR/$PETSC_ARCH/include -I/petsc4py/include # gcc -L$PETSC_DIR/$PETSC_ARCH/lib -lpetsc -Wl,-rpath,$PETSC_DIR/$PETSC_ARCH/lib -petsc_dir = petsctools.get_petsc_dir() -petsc_arch = petsctools.get_petsc_arch() -petsc_dirs = [petsc_dir, os.path.join(petsc_dir, petsc_arch)] petsc_ = ExternalDependency( libraries=["petsc"], - include_dirs=[petsc4py.get_include()] + [os.path.join(d, "include") for d in petsc_dirs], - library_dirs=[os.path.join(petsc_dirs[-1], "lib")], - runtime_library_dirs=[os.path.join(petsc_dirs[-1], "lib")], + include_dirs=[ + petsc4py.get_include(), + *petsctools.get_petsc_dirs(subdir="include"), + ], + library_dirs=petsctools.get_petsc_dirs(subdir="lib"), + runtime_library_dirs=petsctools.get_petsc_dirs(subdir="lib"), ) petscvariables = petsctools.get_petscvariables() petsc_hdf5_compile_args = petscvariables.get("HDF5_INCLUDE", "") @@ -157,56 +166,56 @@ def extensions(): name="firedrake.cython.dmcommon", language="c", sources=[os.path.join("firedrake", "cython", "dmcommon.pyx")], - **(petsc_ + numpy_) + **(mpi_ + petsc_ + numpy_) )) # firedrake/cython/extrusion_numbering.pyx: petsc, numpy cython_list.append(Extension( name="firedrake.cython.extrusion_numbering", language="c", sources=[os.path.join("firedrake", "cython", "extrusion_numbering.pyx")], - **(petsc_ + numpy_) + **(mpi_ + petsc_ + numpy_) )) # firedrake/cython/hdf5interface.pyx: petsc, numpy, hdf5 cython_list.append(Extension( name="firedrake.cython.hdf5interface", language="c", sources=[os.path.join("firedrake", "cython", "hdf5interface.pyx")], - **(petsc_ + numpy_ + hdf5_) + **(mpi_ + petsc_ + numpy_ + hdf5_) )) # firedrake/cython/mgimpl.pyx: petsc, numpy cython_list.append(Extension( name="firedrake.cython.mgimpl", language="c", sources=[os.path.join("firedrake", "cython", "mgimpl.pyx")], - **(petsc_ + numpy_) + **(mpi_ + petsc_ + numpy_) )) # firedrake/cython/patchimpl.pyx: petsc, numpy cython_list.append(Extension( name="firedrake.cython.patchimpl", language="c", sources=[os.path.join("firedrake", "cython", "patchimpl.pyx")], - **(petsc_ + numpy_) + **(mpi_ + petsc_ + numpy_) )) # firedrake/cython/spatialindex.pyx: numpy, spatialindex cython_list.append(Extension( name="firedrake.cython.spatialindex", language="c", sources=[os.path.join("firedrake", "cython", "spatialindex.pyx")], - **(numpy_ + spatialindex_) + **(mpi_ + numpy_ + spatialindex_) )) # firedrake/cython/supermeshimpl.pyx: petsc, numpy, supermesh cython_list.append(Extension( name="firedrake.cython.supermeshimpl", language="c", sources=[os.path.join("firedrake", "cython", "supermeshimpl.pyx")], - **(petsc_ + numpy_ + libsupermesh_) + **(mpi_ + petsc_ + numpy_ + libsupermesh_) )) # pyop2/sparsity.pyx: petsc, numpy, cython_list.append(Extension( name="pyop2.sparsity", language="c", sources=[os.path.join("pyop2", "sparsity.pyx")], - **(petsc_ + numpy_) + **(mpi_ + petsc_ + numpy_) )) # PYBIND11 EXTENSIONS pybind11_list = [] @@ -215,7 +224,7 @@ def extensions(): name="tinyasm._tinyasm", language="c++", sources=sorted(glob("tinyasm/*.cpp")), # Sort source files for reproducibility - **(petsc_ + pybind11_) + **(mpi_ + petsc_ + pybind11_) )) return cythonize(cython_list) + pybind11_list diff --git a/tests/firedrake/adjoint/test_assemble.py b/tests/firedrake/adjoint/test_assemble.py index b10d68cfcc..22bd7540a1 100644 --- a/tests/firedrake/adjoint/test_assemble.py +++ b/tests/firedrake/adjoint/test_assemble.py @@ -6,26 +6,14 @@ from firedrake.adjoint import * -@pytest.fixture -def rg(): - return RandomGenerator(PCG64(seed=1234)) - - @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() +def autouse_set_test_tape(set_test_tape): + pass -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +@pytest.fixture +def rg(): + return RandomGenerator(PCG64(seed=1234)) @pytest.mark.skipcomplex @@ -89,7 +77,7 @@ def test_assemble_1_forms_tlm(rg): mesh = IntervalMesh(10, 0, 1) V = FunctionSpace(mesh, "Lagrange", 1) v = TestFunction(V) - f = Function(V).assign(1) + f = Function(V).assign(1.) w1 = assemble(inner(f, v) * dx) w2 = assemble(inner(f**2, v) * dx) diff --git a/tests/firedrake/adjoint/test_assignment.py b/tests/firedrake/adjoint/test_assignment.py index a6ba4e6c83..3423550766 100644 --- a/tests/firedrake/adjoint/test_assignment.py +++ b/tests/firedrake/adjoint/test_assignment.py @@ -7,26 +7,14 @@ from numpy.testing import assert_approx_equal, assert_allclose -@pytest.fixture -def rg(): - return RandomGenerator(PCG64(seed=1234)) - - @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() +def autouse_set_test_tape(set_test_tape): + pass -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +@pytest.fixture +def rg(): + return RandomGenerator(PCG64(seed=1234)) @pytest.mark.skipcomplex @@ -64,7 +52,7 @@ def test_assign_vector_valued(): J = assemble(inner(f, g)*u**2*dx) rf = ReducedFunctional(J, Control(f)) - h = Function(V).assign(1) + h = Function(V).assign(1.) assert taylor_test(rf, f, h) > 1.9 @@ -84,7 +72,7 @@ def test_assign_tlm(): J = assemble(inner(f, g)*u**2*dx) rf = ReducedFunctional(J, Control(f)) - h = Function(V).assign(1) + h = Function(V).assign(1.) f.block_variable.tlm_value = h tape = get_working_tape() @@ -138,7 +126,7 @@ def test_assign_hessian(): dJdm = rf.derivative() - h = Function(V).assign(1) + h = Function(V).assign(1.) Hm = rf.hessian(h) assert taylor_test(rf, f, h, dJdm=h._ad_dot(dJdm), Hm=h._ad_dot(Hm)) > 2.9 diff --git a/tests/firedrake/adjoint/test_burgers_newton.py b/tests/firedrake/adjoint/test_burgers_newton.py index fd001b45e8..a8cd11d600 100644 --- a/tests/firedrake/adjoint/test_burgers_newton.py +++ b/tests/firedrake/adjoint/test_burgers_newton.py @@ -13,20 +13,8 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.fixture @@ -119,7 +107,7 @@ def J(ic, nu, solve_type, timestep, total_steps, V, nu_time_dependent=False): # The comment below and the others like it are used to generate the # documentation for the firedrake/docs/source/chekpointing.rst file. # [test_disk_checkpointing 10] - for step in tape.timestepper(range(total_steps)): + for step in tape.timestepper(iter(range(total_steps))): # Advance the forward model # [test_disk_checkpointing 11] if nu_time_dependent and step > 4: diff --git a/tests/firedrake/adjoint/test_checkpointing_multistep.py b/tests/firedrake/adjoint/test_checkpointing_multistep.py index a6004d01d7..61c80b9175 100644 --- a/tests/firedrake/adjoint/test_checkpointing_multistep.py +++ b/tests/firedrake/adjoint/test_checkpointing_multistep.py @@ -10,29 +10,20 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass total_steps = 20 dt = 0.01 -mesh = UnitIntervalMesh(1) -V = FunctionSpace(mesh, "DG", 0) -def J(displacement_0): +@pytest.fixture +def V(): + return FunctionSpace(UnitIntervalMesh(1), "DG", 0) + + +def J(displacement_0, V): stiff = Constant(2.5) damping = Constant(0.3) rho = Constant(1.0) @@ -59,12 +50,12 @@ def J(displacement_0): @pytest.mark.skipcomplex -def test_multisteps(): +def test_multisteps(V): tape = get_working_tape() tape.progress_bar = ProgressBar tape.enable_checkpointing(MixedCheckpointSchedule(total_steps, 2, storage=StorageType.RAM)) displacement_0 = Function(V).assign(1.0) - val = J(displacement_0) + val = J(displacement_0, V) _check_forward(tape) c = Control(displacement_0) J_hat = ReducedFunctional(val, c) @@ -82,20 +73,20 @@ def test_multisteps(): @pytest.mark.skipcomplex -def test_validity(): +def test_validity(V): tape = get_working_tape() tape.progress_bar = ProgressBar displacement_0 = Function(V).assign(1.0) # Without checkpointing. - val0 = J(displacement_0) + val0 = J(displacement_0, V) J_hat0 = ReducedFunctional(val0, Control(displacement_0)) dJ0 = J_hat0.derivative() - val_recomputed0 = J(displacement_0) + val_recomputed0 = J(displacement_0, V) tape.clear_tape() # With checkpointing. tape.enable_checkpointing(MixedCheckpointSchedule(total_steps, 2, storage=StorageType.RAM)) - val = J(displacement_0) + val = J(displacement_0, V) J_hat = ReducedFunctional(val, Control(displacement_0)) dJ = J_hat.derivative() val_recomputed = J_hat(displacement_0) diff --git a/tests/firedrake/adjoint/test_covariance_operator.py b/tests/firedrake/adjoint/test_covariance_operator.py new file mode 100644 index 0000000000..a1980eda7a --- /dev/null +++ b/tests/firedrake/adjoint/test_covariance_operator.py @@ -0,0 +1,47 @@ +import pytest +from firedrake import * +from firedrake.adjoint import * + + +@pytest.fixture(autouse=True) +def autouse_set_test_tape(set_test_tape): + pass + + +@pytest.mark.skipcomplex +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("m", (0, 2, 4)) +@pytest.mark.parametrize("family", ("CG", "DG")) +def test_covariance_adjoint_norm(m, family): + """Test that covariance operators are properly taped. + """ + nx = 20 + L = 0.2 + sigma = 0.1 + + mesh = UnitIntervalMesh(nx) + x, = SpatialCoordinate(mesh) + + V = FunctionSpace(mesh, family, 1) + + u = Function(V).project(sin(2*pi*x)) + v = Function(V).project(2 - 0.5*sin(6*pi*x)) + + form = 'IP' if family == 'DG' else 'CG' + B = AutoregressiveCovariance(V, L, sigma, m, form=form) + + continue_annotation() + with set_working_tape() as tape: + w = Function(V).project(u**4 + v) + J = B.norm(w) + Jhat = ReducedFunctional(J, Control(u), tape=tape) + pause_annotation() + + m = Function(V).project(sin(2*pi*(x+0.2))) + h = Function(V).project(sin(4*pi*(x-0.2))) + + taylor = taylor_to_dict(Jhat, m, h) + + assert min(taylor['R0']['Rate']) > 0.95, taylor['R0'] + assert min(taylor['R1']['Rate']) > 1.95, taylor['R1'] + assert min(taylor['R2']['Rate']) > 2.95, taylor['R2'] diff --git a/tests/firedrake/adjoint/test_disk_checkpointing.py b/tests/firedrake/adjoint/test_disk_checkpointing.py index 24d8fbfbd9..13c9469230 100644 --- a/tests/firedrake/adjoint/test_disk_checkpointing.py +++ b/tests/firedrake/adjoint/test_disk_checkpointing.py @@ -3,28 +3,18 @@ from firedrake import * from firedrake.adjoint import * from firedrake.adjoint_utils.checkpointing import disk_checkpointing +from pyadjoint.tape import set_working_tape, continue_annotation from checkpoint_schedules import SingleDiskStorageSchedule +from mpi4py import MPI import numpy as np import os +import shutil +import tempfile @pytest.fixture(autouse=True) -def handle_taping(): +def autouse_test_taping(set_test_tape): yield - tape = get_working_tape() - tape.clear_tape() - tape._package_data = {} - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() - if disk_checkpointing(): pause_disk_checkpointing() @@ -216,3 +206,313 @@ def test_bcs(): obj = assemble(uu * uu * dx) rf = ReducedFunctional(obj, control) assert np.allclose(rf(F), obj) + + +# --- checkpoint_comm disk checkpointing tests --- +# These test the checkpoint_comm option which writes function data using +# PETSc Vec I/O on a user-supplied communicator instead of CheckpointFile. +# Passing MPI.COMM_SELF gives each rank its own file, avoiding parallel +# HDF5 and enabling node-local storage on HPC systems. + + +@pytest.mark.skipcomplex +@pytest.mark.parallel(nprocs=3) +def test_checkpoint_comm_disk_checkpointing_parallel(): + tape = get_working_tape() + # Each rank creates its own tmpdir independently. This is intentional: + # with COMM_SELF each rank is its own communicator, so there is no need + # to agree on a shared directory. We can't use pytest's tmp_path here + # because parallel tests run as separate MPI processes. + tmpdir = tempfile.mkdtemp(prefix="firedrake_test_checkpoint_comm_") + try: + enable_disk_checkpointing(checkpoint_comm=MPI.COMM_SELF, + checkpoint_dir=tmpdir) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + mesh = checkpointable_mesh(UnitSquareMesh(10, 10)) + J_disk, grad_J_disk = adjoint_example(mesh) + + assert disk_checkpointing() is False + tape.clear_tape() + J_mem, grad_J_mem = adjoint_example(mesh) + assert np.allclose(J_disk, J_mem) + assert np.allclose(assemble((grad_J_disk - grad_J_mem)**2*dx), 0.0) + finally: + shutil.rmtree(tmpdir, ignore_errors=True) + + +@pytest.mark.skipcomplex +def test_checkpoint_comm_successive_writes(tmp_path): + tape = get_working_tape() + tape.clear_tape() + enable_disk_checkpointing(checkpoint_comm=MPI.COMM_SELF, + checkpoint_dir=str(tmp_path)) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + + mesh = checkpointable_mesh(UnitSquareMesh(1, 1)) + cg_space = FunctionSpace(mesh, "CG", 1) + u = Function(cg_space, name='u') + v = Function(cg_space, name='v') + + u.assign(1.) + v.assign(v + 2.*u) + v.assign(v + 3.*u) + + J = assemble(v*dx) + Jhat = ReducedFunctional(J, Control(u)) + assert np.allclose(J, Jhat(Function(cg_space).interpolate(1.))) + assert disk_checkpointing() is False + + +@pytest.mark.skipcomplex +@pytest.mark.parallel(nprocs=3) +def test_checkpoint_comm_multi_mesh_parallel(): + """Test checkpoint_comm checkpointing with two independently partitioned meshes. + + Uses two meshes with different sizes and element orders so that the + function spaces live on differently partitioned meshes. Both solves + are controlled by the same control variable to exercise the + checkpoint save/restore across meshes. + """ + tape = get_working_tape() + # Per-rank tmpdir: same rationale as test_checkpoint_comm_disk_checkpointing_parallel. + tmpdir = tempfile.mkdtemp(prefix="firedrake_test_checkpoint_comm_multi_") + try: + enable_disk_checkpointing(checkpoint_comm=MPI.COMM_SELF, + checkpoint_dir=tmpdir) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + + mesh_a = checkpointable_mesh(UnitSquareMesh(10, 10, name="mesh_a")) + mesh_b = checkpointable_mesh(UnitSquareMesh(7, 7, name="mesh_b")) + + Va = FunctionSpace(mesh_a, "CG", 2) + Vb = FunctionSpace(mesh_b, "CG", 1) + + x_a, y_a = SpatialCoordinate(mesh_a) + m = assemble(interpolate(sin(4*pi*x_a)*cos(4*pi*y_a), Va)) + + # Solve on mesh_a driven by m + u_a = Function(Va, name="u_a") + v_a = TestFunction(Va) + F_a = inner(grad(u_a), grad(v_a)) * dx - m * v_a * dx + bcs_a = [DirichletBC(Va, 0.0, "on_boundary")] + solve(F_a == 0, u_a, bcs=bcs_a) + + # Independent solve on mesh_b + x_b, y_b = SpatialCoordinate(mesh_b) + u_b = Function(Vb, name="u_b") + v_b = TestFunction(Vb) + F_b = inner(grad(u_b), grad(v_b)) * dx - (x_b + y_b) * v_b * dx + bcs_b = [DirichletBC(Vb, 0.0, "on_boundary")] + solve(F_b == 0, u_b, bcs=bcs_b) + + J = assemble(u_a**2 * dx) + assemble(u_b**2 * dx) + Jhat = ReducedFunctional(J, Control(m)) + + with stop_annotating(): + m_new = assemble(interpolate(sin(4*pi*x_a)*cos(4*pi*y_a), Va)) + + h = Function(Va).interpolate(Constant(0.1)) + assert taylor_test(Jhat, m_new, h) > 1.9 + finally: + shutil.rmtree(tmpdir, ignore_errors=True) + + +# --- sub_comm disk checkpointing tests --- +# These test the checkpoint_comm option with a communicator that groups +# ranks into sub-communicators of size > 1 but < COMM_WORLD, exercising +# the multi-rank createMPI + parallel HDF5 path where ranks collectively +# write to a shared checkpoint file. + + +def _sub_comm(): + """Return a communicator splitting ranks into groups of 2.""" + comm = MPI.COMM_WORLD + return comm.Split(color=comm.rank // 2, key=comm.rank) + + +def _broadcast_tmpdir(comm): + """Create a tmpdir on rank 0 and broadcast the path to all ranks. + + Broadcast on COMM_WORLD so all sub-comms share the same parent + directory. Each sub-comm's TemporaryFunctionCheckpointFile creates + its own subdirectory inside it, so isolation is automatic. A single + parent also means rank 0 of COMM_WORLD can clean everything up. + """ + if comm.rank == 0: + d = tempfile.mkdtemp(prefix="firedrake_test_sub_comm_") + else: + d = None + return comm.bcast(d, root=0) + + +@pytest.mark.skipcomplex +@pytest.mark.parallel(nprocs=3) +def test_sub_comm_disk_checkpointing_parallel(): + """Test disk checkpointing with a multi-rank sub-communicator.""" + sub_comm = _sub_comm() + tape = get_working_tape() + tmpdir = _broadcast_tmpdir(MPI.COMM_WORLD) + try: + enable_disk_checkpointing(checkpoint_comm=sub_comm, + checkpoint_dir=tmpdir) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + mesh = checkpointable_mesh(UnitSquareMesh(10, 10)) + J_disk, grad_J_disk = adjoint_example(mesh) + + assert disk_checkpointing() is False + tape.clear_tape() + J_mem, grad_J_mem = adjoint_example(mesh) + assert np.allclose(J_disk, J_mem) + assert np.allclose(assemble((grad_J_disk - grad_J_mem)**2*dx), 0.0) + finally: + if MPI.COMM_WORLD.rank == 0: + shutil.rmtree(tmpdir, ignore_errors=True) + + +@pytest.mark.skipcomplex +@pytest.mark.parallel(nprocs=3) +def test_sub_comm_multi_mesh_parallel(): + """Test sub-comm checkpointing with two independently partitioned meshes.""" + sub_comm = _sub_comm() + tape = get_working_tape() + tmpdir = _broadcast_tmpdir(MPI.COMM_WORLD) + try: + enable_disk_checkpointing(checkpoint_comm=sub_comm, + checkpoint_dir=tmpdir) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + + mesh_a = checkpointable_mesh(UnitSquareMesh(10, 10, name="mesh_a")) + mesh_b = checkpointable_mesh(UnitSquareMesh(7, 7, name="mesh_b")) + + Va = FunctionSpace(mesh_a, "CG", 2) + Vb = FunctionSpace(mesh_b, "CG", 1) + + x_a, y_a = SpatialCoordinate(mesh_a) + m = assemble(interpolate(sin(4*pi*x_a)*cos(4*pi*y_a), Va)) + + # Solve on mesh_a driven by m + u_a = Function(Va, name="u_a") + v_a = TestFunction(Va) + F_a = inner(grad(u_a), grad(v_a)) * dx - m * v_a * dx + bcs_a = [DirichletBC(Va, 0.0, "on_boundary")] + solve(F_a == 0, u_a, bcs=bcs_a) + + # Independent solve on mesh_b + x_b, y_b = SpatialCoordinate(mesh_b) + u_b = Function(Vb, name="u_b") + v_b = TestFunction(Vb) + F_b = inner(grad(u_b), grad(v_b)) * dx - (x_b + y_b) * v_b * dx + bcs_b = [DirichletBC(Vb, 0.0, "on_boundary")] + solve(F_b == 0, u_b, bcs=bcs_b) + + J = assemble(u_a**2 * dx) + assemble(u_b**2 * dx) + Jhat = ReducedFunctional(J, Control(m)) + + with stop_annotating(): + m_new = assemble(interpolate(sin(4*pi*x_a)*cos(4*pi*y_a), Va)) + + h = Function(Va).interpolate(Constant(0.1)) + assert taylor_test(Jhat, m_new, h) > 1.9 + finally: + if MPI.COMM_WORLD.rank == 0: + shutil.rmtree(tmpdir, ignore_errors=True) + + +@pytest.mark.skipcomplex +@pytest.mark.parallel(nprocs=3) +def test_sub_comm_adjoint_dependencies_parallel(): + """Test sub-comm checkpointing with timestepper and taylor_test.""" + sub_comm = _sub_comm() + tape = get_working_tape() + tmpdir = _broadcast_tmpdir(MPI.COMM_WORLD) + try: + enable_disk_checkpointing(checkpoint_comm=sub_comm, + checkpoint_dir=tmpdir) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + mesh = checkpointable_mesh(UnitSquareMesh(10, 10)) + V = FunctionSpace(mesh, "CG", 1) + c = Function(V).interpolate(1.0) + + def delta_expr(x0, x, y, sigma_x=2000.0): + sigma_x = Constant(sigma_x) + return exp(-sigma_x * ((x - x0[0]) ** 2 + (y - x0[1]) ** 2)) + + x, y = SpatialCoordinate(mesh) + + u = TrialFunction(V) + v = TestFunction(V) + u_np1 = Function(V, name="u_np1") + u_n = Function(V, name="u_n") + u_nm1 = Function(V, name="u_nm1") + time_term = (u - 2.0 * u_n + u_nm1) / Constant(0.001**2) * v * dx + a = c * c * dot(grad(u_n), grad(v)) * dx + F = time_term + a + delta_expr(Constant([0.5, 0.5]), x, y) * v * dx + lin_var = LinearVariationalProblem(lhs(F), rhs(F), u_np1, constant_jacobian=True) + solver = LinearVariationalSolver(lin_var) + J = 0. + for _ in tape.timestepper(iter(range(10))): + solver.solve() + u_nm1.assign(u_n) + u_n.assign(u_np1) + J += assemble(u_np1 * u_np1 * dx) + + J_hat = ReducedFunctional(J, Control(c)) + assert taylor_test(J_hat, c, Function(V).interpolate(0.1)) > 1.9 + finally: + if MPI.COMM_WORLD.rank == 0: + shutil.rmtree(tmpdir, ignore_errors=True) + + +# --- _checkpoint_indices pruning test --- + + +@pytest.mark.skipcomplex +def test_checkpoint_indices_pruning(tmp_path): + """_checkpoint_indices entries are pruned when the checkpoint file is released. + + CheckpointFunction objects are created during tape replay (inside Jhat), + not during the initial forward pass. Two reference chains keep a + CheckPointFileReference alive: (1) DiskCheckpointer.current_checkpoint_file + and (2) CheckpointFunction.self.file held via the tape blocks and Jhat's + controls. Once both are dropped, CheckPointFileReference.__del__ fires and + pops the entry from _checkpoint_indices. This keeps memory bounded over + long adjoint runs without risking premature removal: restore() never reads + _checkpoint_indices, it uses stored_name and stored_index baked into the + CheckpointFunction at save time. + """ + import gc + from firedrake.adjoint_utils.checkpointing import CheckpointFunction + + tape = get_working_tape() + tape.clear_tape() + enable_disk_checkpointing(dirname=str(tmp_path)) + tape.enable_checkpointing(SingleDiskStorageSchedule()) + mesh = checkpointable_mesh(UnitSquareMesh(1, 1)) + cg_space = FunctionSpace(mesh, "CG", 1) + u = Function(cg_space, name='u') + v = Function(cg_space, name='v') + u.assign(1.) + v.assign(v + 2.*u) + J = assemble(v*dx) + Jhat = ReducedFunctional(J, Control(u)) + _ = Jhat(Function(cg_space).interpolate(1.)) + + checkpointer = tape._package_data["firedrake"] + # Filter to files created by this test (replay writes to a new file chosen + # by reset(), so we match by directory rather than the initial file name). + our_files = {f for f in CheckpointFunction._checkpoint_indices + if str(tmp_path) in f} + assert len(our_files) > 0 + + # Drop both reference chains to trigger __del__ on CheckPointFileReference: + # (1) DiskCheckpointer replaces current_checkpoint_file via reset(). + checkpointer.reset() + # (2) tape.clear_tape() removes blocks (CheckpointFunction objects held as + # saved_output), and del Jhat releases the Control → BlockVariable → + # saved_output chain that Jhat's controls keep alive. + tape.clear_tape() + del Jhat + gc.collect() + + for f in our_files: + assert f not in CheckpointFunction._checkpoint_indices diff --git a/tests/firedrake/adjoint/test_dynamic_meshes.py b/tests/firedrake/adjoint/test_dynamic_meshes.py index c4c4c0058d..1f945b710a 100644 --- a/tests/firedrake/adjoint/test_dynamic_meshes.py +++ b/tests/firedrake/adjoint/test_dynamic_meshes.py @@ -6,25 +6,13 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.mark.skipcomplex -@pytest.mark.parametrize("mesh", [UnitSquareMesh(10, 10)]) -def test_dynamic_meshes_2D(mesh): +def test_dynamic_meshes_2D(): + mesh = UnitSquareMesh(10, 10) S = mesh.coordinates.function_space() s = [Function(S), Function(S), Function(S)] mesh.coordinates.assign(mesh.coordinates + s[0]) @@ -71,19 +59,32 @@ def test_dynamic_meshes_2D(mesh): @pytest.mark.skipcomplex -@pytest.mark.parametrize("mesh", [UnitCubeMesh(4, 4, 5), - UnitOctahedralSphereMesh(3), - UnitIcosahedralSphereMesh(3), - UnitCubedSphereMesh(3), - TorusMesh(25, 10, 1, 0.5), - CylinderMesh(10, 25, radius=0.5, depth=0.8)]) -def test_dynamic_meshes_3D(mesh): +@pytest.mark.parametrize("mesh_type", ["UnitCubeMesh", + "UnitOctahedralSphereMesh", + "UnitIcosahedralSphereMesh", + "UnitCubedSphereMesh", + "TorusMesh", + "CylinderMesh"]) +def test_dynamic_meshes_3D(mesh_type): + if mesh_type == "UnitCubeMesh": + mesh = UnitCubeMesh(4, 4, 5) + if mesh_type == "UnitOctahedralSphereMesh": + mesh = UnitOctahedralSphereMesh(3) + if mesh_type == "UnitIcosahedralSphereMesh": + mesh = UnitIcosahedralSphereMesh(3) + if mesh_type == "UnitCubedSphereMesh": + mesh = UnitCubedSphereMesh(3) + if mesh_type == "TorusMesh": + mesh = TorusMesh(25, 10, 1, 0.5) + if mesh_type == "CylinderMesh": + mesh = CylinderMesh(10, 25, radius=0.5, depth=0.8) + S = mesh.coordinates.function_space() s = [Function(S), Function(S), Function(S)] mesh.coordinates.assign(mesh.coordinates + s[0]) x = SpatialCoordinate(mesh) - if mesh.cell_dimension() != mesh.geometric_dimension(): + if mesh.cell_dimension() != mesh.geometric_dimension: mesh.init_cell_orientations(x) V = FunctionSpace(mesh, "CG", 1) diff --git a/tests/firedrake/adjoint/test_ensemble_reduced_functional.py b/tests/firedrake/adjoint/test_ensemble_reduced_functional.py index a9fe89d132..bd654ed52d 100644 --- a/tests/firedrake/adjoint/test_ensemble_reduced_functional.py +++ b/tests/firedrake/adjoint/test_ensemble_reduced_functional.py @@ -6,20 +6,8 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.mark.parallel(nprocs=4) diff --git a/tests/firedrake/adjoint/test_external_modification.py b/tests/firedrake/adjoint/test_external_modification.py index 4115f6f67a..5d79b7f6bc 100644 --- a/tests/firedrake/adjoint/test_external_modification.py +++ b/tests/firedrake/adjoint/test_external_modification.py @@ -6,20 +6,8 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.mark.skipcomplex diff --git a/tests/firedrake/adjoint/test_hessian.py b/tests/firedrake/adjoint/test_hessian.py index 344868b26e..e3ec975c97 100644 --- a/tests/firedrake/adjoint/test_hessian.py +++ b/tests/firedrake/adjoint/test_hessian.py @@ -3,8 +3,10 @@ from firedrake import * from firedrake.adjoint import * -from numpy.random import default_rng -rng = default_rng() + +@pytest.fixture(autouse=True) +def autouse_set_test_tape(set_test_tape): + pass @pytest.fixture @@ -12,23 +14,6 @@ def rg(): return RandomGenerator(PCG64(seed=1234)) -@pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() - - @pytest.mark.skipcomplex def test_simple_solve(rg): tape = Tape() diff --git a/tests/firedrake/adjoint/test_optimisation.py b/tests/firedrake/adjoint/test_optimisation.py index 2189a7882b..da1ce48050 100644 --- a/tests/firedrake/adjoint/test_optimisation.py +++ b/tests/firedrake/adjoint/test_optimisation.py @@ -3,6 +3,7 @@ from enum import Enum, auto from numpy.testing import assert_allclose import numpy as np +from ufl.duals import is_primal from firedrake import * from firedrake.adjoint import * from pyadjoint import Block, MinimizationProblem, TAOSolver, get_working_tape @@ -11,20 +12,8 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.mark.skipcomplex @@ -192,9 +181,9 @@ def transform(v, transform_type, *args, mfn_parameters=None, **kwargs): mfn_parameters = dict(mfn_parameters) space = v.function_space() - if not ufl.duals.is_primal(space): + if not is_primal(space): space = space.dual() - if not ufl.duals.is_primal(space): + if not is_primal(space): raise NotImplementedError("Mixed primal/dual space case not implemented") comm = v.comm @@ -243,7 +232,7 @@ def mult(self, A, x, y): if mfn.getConvergedReason() <= 0: raise RuntimeError("Convergence failure") - if ufl.duals.is_primal(v): + if is_primal(v): u = Function(space) else: u = Cofunction(space.dual()) diff --git a/tests/firedrake/adjoint/test_projection.py b/tests/firedrake/adjoint/test_projection.py index 090924e00b..c059dd3dac 100644 --- a/tests/firedrake/adjoint/test_projection.py +++ b/tests/firedrake/adjoint/test_projection.py @@ -4,26 +4,14 @@ from firedrake.adjoint import * -@pytest.fixture -def rg(): - return RandomGenerator(PCG64(seed=1234)) - - @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() +def autouse_set_test_tape(set_test_tape): + pass -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +@pytest.fixture +def rg(): + return RandomGenerator(PCG64(seed=1234)) @pytest.mark.skipcomplex @@ -42,7 +30,7 @@ def test_project_vector_valued(): J = assemble(inner(f, g)*u**2*dx) rf = ReducedFunctional(J, Control(f)) - h = Function(V).assign(1) + h = Function(V).assign(1.) assert taylor_test(rf, f, h) > 1.9 @@ -62,7 +50,7 @@ def test_project_tlm(): J = assemble(inner(f, g)*u**2*dx) rf = ReducedFunctional(J, Control(f)) - h = Function(V).assign(1) + h = Function(V).assign(1.) f.tlm_value = h tape = get_working_tape() @@ -89,7 +77,7 @@ def test_project_hessian(): dJdm = rf.derivative() - h = Function(V).assign(1) + h = Function(V).assign(1.) Hm = rf.hessian(h) assert taylor_test(rf, f, h, dJdm=h._ad_dot(dJdm), Hm=h._ad_dot(Hm)) > 2.9 diff --git a/tests/firedrake/adjoint/test_reduced_functional.py b/tests/firedrake/adjoint/test_reduced_functional.py index 33803b3f2f..ef6f3d1fba 100644 --- a/tests/firedrake/adjoint/test_reduced_functional.py +++ b/tests/firedrake/adjoint/test_reduced_functional.py @@ -1,27 +1,14 @@ import pytest +import numpy as np from firedrake import * from firedrake.adjoint import * from pytest_mpi.parallel_assert import parallel_assert -from numpy.random import rand - @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.mark.skipcomplex @@ -32,7 +19,7 @@ def test_constant(): c = Function(R, val=1) f = Function(V) - f.assign(1) + f.assign(1.) u = Function(V) v = TestFunction(V) @@ -53,7 +40,7 @@ def test_function(): c = Constant(1) f = Function(V) - f.assign(1) + f.assign(1.) u = Function(V) v = TestFunction(V) @@ -66,7 +53,7 @@ def test_function(): Jhat = ReducedFunctional(J, Control(f)) h = Function(V) - h.dat.data[:] = rand(V.dof_dset.size) + h.dat.data[:] = np.random.rand(V.dof_dset.size) assert taylor_test(Jhat, f, h) > 1.9 @@ -89,7 +76,7 @@ def test_wrt_function_dirichlet_boundary(control): g1 = Function(R, val=2) g2 = Function(R, val=1) f = Function(V) - f.assign(10) + f.assign(10.) a = inner(grad(u), grad(v))*dx + u**2*v*dx L = inner(f, v)*dx + inner(g1, v)*ds(4) + inner(g2, v)*ds(3) @@ -103,7 +90,7 @@ def test_wrt_function_dirichlet_boundary(control): Jhat = ReducedFunctional(J, Control(bc_func)) g = bc_func h = Function(V) - h.assign(1) + h.assign(1.) else: Jhat = ReducedFunctional(J, Control(g1)) g = g1 @@ -132,10 +119,10 @@ def test_time_dependent(): T = 0.5 dt = 0.1 f = Function(V) - f.assign(1) + f.assign(1.) u_1 = Function(V) - u_1.assign(1) + u_1.assign(1.) control = Control(u_1) a = u_1*u*v*dx + dt*f*inner(grad(u), grad(v))*dx @@ -153,7 +140,7 @@ def test_time_dependent(): Jhat = ReducedFunctional(J, control) h = Function(V) - h.assign(1) + h.assign(1.) assert taylor_test(Jhat, control.tape_value(), h) > 1.9 @@ -173,7 +160,7 @@ def test_mixed_boundary(): g1 = Constant(2) g2 = Constant(1) f = Function(V) - f.assign(10) + f.assign(10.) a = f*inner(grad(u), grad(v))*dx L = inner(f, v)*dx + inner(g1, v)*ds(4) + inner(g2, v)*ds(3) @@ -184,7 +171,7 @@ def test_mixed_boundary(): Jhat = ReducedFunctional(J, Control(f)) h = Function(V) - h.assign(1) + h.assign(1.) assert taylor_test(Jhat, f, h) > 1.9 @@ -195,13 +182,13 @@ def test_assemble_recompute(): R = FunctionSpace(mesh, "R", 0) f = Function(V) - f.assign(2) + f.assign(2.) expr = Function(R).assign(assemble(f**2*dx)) J = assemble(expr**2*dx(domain=mesh)) Jhat = ReducedFunctional(J, Control(f)) h = Function(V) - h.assign(1) + h.assign(1.) assert taylor_test(Jhat, f, h) > 1.9 @@ -215,7 +202,7 @@ def test_interpolate(): f = Function(V) f.dat.data[:] = 2 - J = assemble(Interpolate(f**2, c)) + J = assemble(interpolate(f**2, c)) Jhat = ReducedFunctional(J, Control(f)) h = Function(V) @@ -245,7 +232,7 @@ def test_interpolate_mixed(): f1, f2 = split(f) exprs = [f2 * div(f1)**2, grad(f2) * div(f1)] expr = as_vector([e[i] for e in exprs for i in np.ndindex(e.ufl_shape)]) - J = assemble(Interpolate(expr, c)) + J = assemble(interpolate(expr, c)) Jhat = ReducedFunctional(J, Control(f)) h = Function(V) @@ -308,6 +295,6 @@ def test_ad_dot(riesz_representation): dJhat = Jhat.derivative(apply_riesz=True) h = Function(V) - h.dat.data[:] = rand(V.dof_dset.size) + h.dat.data[:] = np.random.rand(V.dof_dset.size) dJdh = dJhat._ad_dot(h, options={'riesz_representation': riesz_representation}) assert taylor_test(Jhat, f, h, dJdm=dJdh) > 1.9 diff --git a/tests/firedrake/adjoint/test_shape_derivatives.py b/tests/firedrake/adjoint/test_shape_derivatives.py index 0573cb7cc9..adefaf6aa4 100644 --- a/tests/firedrake/adjoint/test_shape_derivatives.py +++ b/tests/firedrake/adjoint/test_shape_derivatives.py @@ -7,20 +7,8 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.mark.skipcomplex diff --git a/tests/firedrake/adjoint/test_solving.py b/tests/firedrake/adjoint/test_solving.py index 5ce9b120ce..81dfdc64ad 100644 --- a/tests/firedrake/adjoint/test_solving.py +++ b/tests/firedrake/adjoint/test_solving.py @@ -5,26 +5,14 @@ from numpy.testing import assert_approx_equal -@pytest.fixture -def rg(): - return RandomGenerator(PCG64(seed=1234)) - - @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() +def autouse_set_test_tape(set_test_tape): + pass -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +@pytest.fixture +def rg(): + return RandomGenerator(PCG64(seed=1234)) @pytest.mark.skipcomplex @@ -33,7 +21,7 @@ def test_linear_problem(rg): mesh = IntervalMesh(10, 0, 1) V = FunctionSpace(mesh, "Lagrange", 1) R = FunctionSpace(mesh, "R", 0) - f = Function(V).assign(1) + f = Function(V).assign(1.) u = TrialFunction(V) u_ = Function(V) @@ -60,7 +48,7 @@ def test_singular_linear_problem(rg): mesh = UnitSquareMesh(10, 10) V = FunctionSpace(mesh, "CG", 1) - f = Function(V).assign(1) + f = Function(V).assign(1.) u = TrialFunction(V) u_ = Function(V) @@ -85,7 +73,7 @@ def test_nonlinear_problem(pre_apply_bcs, rg): mesh = IntervalMesh(10, 0, 1) V = FunctionSpace(mesh, "Lagrange", 1) R = FunctionSpace(mesh, "R", 0) - f = Function(V).assign(1) + f = Function(V).assign(1.) u = Function(V) v = TestFunction(V) @@ -116,7 +104,7 @@ def test_mixed_boundary(rg): g1 = Constant(2) g2 = Constant(1) - f = Function(V).assign(10) + f = Function(V).assign(10.) def J(f): a = f*inner(grad(u), grad(v))*dx @@ -165,7 +153,7 @@ def xtest_wrt_function_dirichlet_boundary(): g1 = Constant(2) g2 = Constant(1) - f = Function(V).assign(10) + f = Function(V).assign(10.) def J(bc): a = inner(grad(u), grad(v))*dx @@ -195,7 +183,7 @@ def test_wrt_function_neumann_boundary(): g1 = Function(R, val=2) g2 = Function(R, val=1) - f = Function(V).assign(10) + f = Function(V).assign(10.) def J(g1): a = inner(grad(u), grad(v))*dx @@ -247,7 +235,7 @@ def test_wrt_constant_neumann_boundary(): g1 = Function(R, val=2) g2 = Function(R, val=1) - f = Function(V).assign(10) + f = Function(V).assign(10.) def J(g1): a = inner(grad(u), grad(v))*dx @@ -283,7 +271,7 @@ def test_time_dependent(): f = Function(R, val=1) def J(f): - u_1 = Function(V).assign(1) + u_1 = Function(V).assign(1.) a = u_1*u*v*dx + dt*f*inner(grad(u), grad(v))*dx L = u_1*v*dx @@ -340,7 +328,7 @@ def _test_adjoint_function_boundary(J, bc, f): set_working_tape(tape) V = f.function_space() - h = Function(V).assign(1) + h = Function(V).assign(1.) g = Function(V) eps_ = [0.4/2.0**i for i in range(4)] residuals = [] diff --git a/tests/firedrake/adjoint/test_split_and_subfunctions.py b/tests/firedrake/adjoint/test_split_and_subfunctions.py index 40b72b498d..27bf466dd1 100644 --- a/tests/firedrake/adjoint/test_split_and_subfunctions.py +++ b/tests/firedrake/adjoint/test_split_and_subfunctions.py @@ -6,42 +6,44 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() +def autouse_set_test_tape(set_test_tape): + pass -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +@pytest.fixture +def mesh(): + return UnitSquareMesh(2, 2) -mesh = UnitSquareMesh(2, 2) -cg2 = FiniteElement("CG", triangle, 2) -cg1 = FiniteElement("CG", triangle, 1) -ele = MixedElement([cg2, cg1]) -ZZ = FunctionSpace(mesh, ele) -V2 = FunctionSpace(mesh, cg2) +@pytest.fixture +def V1(mesh): + return FunctionSpace(mesh, "CG", 1) -# the tests are run on functions from the MixedFunctionSpace ZZ +@pytest.fixture +def V2(mesh): + return FunctionSpace(mesh, "CG", 2) + + +# the tests are run on functions from the MixedFunctionSpace V2*V1 # and on a normal (non-mixed) FunctionSpace. Calling split() on # a non-mixed function is trivial, but was previously broken -@pytest.fixture(params=[ZZ, V2], ids=('mixed', 'non-mixed')) -def Z(request): - return request.param +@pytest.fixture(params=['non-mixed', 'mixed']) +def Z(request, V2, V1): + if request.param == 'non-mixed': + return V2 + elif request.param == 'mixed': + return V2*V1 + else: + raise ValueError("Unknown function space specification") -rg = RandomGenerator() +@pytest.fixture +def rng(): + return RandomGenerator(PCG64(seed=13)) -def main(ic, fnsplit=True): +def main(ic, V2, fnsplit=True): u = Function(V2) w = TrialFunction(V2) v = TestFunction(V2) @@ -60,10 +62,10 @@ def main(ic, fnsplit=True): @pytest.mark.skipcomplex -def test_split(Z): +def test_split(Z, V2): ic = Function(Z) - u = main(ic, fnsplit=False) + u = main(ic, V2, fnsplit=False) j = assemble(u**2*dx) rf = ReducedFunctional(j, Control(ic)) @@ -72,36 +74,36 @@ def test_split(Z): @pytest.mark.skipcomplex -def test_fn_split(Z): +def test_fn_split(Z, V2, rng): set_working_tape(Tape()) ic = Function(Z) - u = main(ic, fnsplit=True) + u = main(ic, V2, fnsplit=True) j = assemble(u**2*dx) rf = ReducedFunctional(j, Control(ic)) - h = rg.uniform(Z) + h = rng.uniform(Z) assert taylor_test(rf, ic, h) > 1.9 @pytest.mark.skipcomplex -def test_fn_split_hessian(Z): +def test_fn_split_hessian(Z, V2, rng): set_working_tape(Tape()) ic = Function(Z) - u = main(ic, fnsplit=True) + u = main(ic, V2, fnsplit=True) j = assemble(u ** 4 * dx) rf = ReducedFunctional(j, Control(ic)) - h = rg.uniform(Z) + h = rng.uniform(Z) dJdm = rf.derivative()._ad_dot(h) Hm = rf.hessian(h)._ad_dot(h) assert taylor_test(rf, ic, h, dJdm=dJdm, Hm=Hm) > 2.9 @pytest.mark.skipcomplex -def test_fn_split_no_annotate(Z): +def test_fn_split_no_annotate(Z, V2, rng): set_working_tape(Tape()) ic = Function(Z) @@ -120,7 +122,7 @@ def test_fn_split_no_annotate(Z): j = assemble(u ** 4 * dx + ic_uv * dx) rf = ReducedFunctional(j, Control(ic)) - h = rg.uniform(Z) + h = rng.uniform(Z) r = taylor_to_dict(rf, ic, h) assert min(r["R0"]["Rate"]) > 0.95 diff --git a/tests/firedrake/adjoint/test_tag.py b/tests/firedrake/adjoint/test_tag.py index 21f3d41b66..f695bd02cf 100644 --- a/tests/firedrake/adjoint/test_tag.py +++ b/tests/firedrake/adjoint/test_tag.py @@ -5,20 +5,8 @@ @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() - - -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +def autouse_set_test_tape(set_test_tape): + pass @pytest.fixture(params=[ diff --git a/tests/firedrake/adjoint/test_tlm.py b/tests/firedrake/adjoint/test_tlm.py index 723f84e481..5eea04f253 100644 --- a/tests/firedrake/adjoint/test_tlm.py +++ b/tests/firedrake/adjoint/test_tlm.py @@ -4,26 +4,14 @@ from firedrake.adjoint import * -@pytest.fixture -def rg(): - return RandomGenerator(PCG64(seed=1234)) - - @pytest.fixture(autouse=True) -def handle_taping(): - yield - tape = get_working_tape() - tape.clear_tape() +def autouse_set_test_tape(set_test_tape): + pass -@pytest.fixture(autouse=True, scope="module") -def handle_annotation(): - if not annotate_tape(): - continue_annotation() - yield - # Ensure annotation is paused when we finish. - if annotate_tape(): - pause_annotation() +@pytest.fixture +def rg(): + return RandomGenerator(PCG64(seed=1234)) # Tolerance in the tests. @@ -36,7 +24,7 @@ def test_tlm_assemble(rg): set_working_tape(tape) mesh = IntervalMesh(10, 0, 1) V = FunctionSpace(mesh, "Lagrange", 1) - f = Function(V).assign(5) + f = Function(V).assign(5.) u = TrialFunction(V) v = TestFunction(V) @@ -65,8 +53,8 @@ def test_tlm_bc(): mesh = IntervalMesh(10, 0, 1) V = FunctionSpace(mesh, "Lagrange", 1) R = FunctionSpace(mesh, "R", 0) - c = Function(R, val=1) - f = Function(V).assign(1) + c = Function(R, val=1.) + f = Function(V).assign(1.) u = Function(V) v = TestFunction(V) @@ -88,8 +76,8 @@ def test_tlm_func(rg): mesh = IntervalMesh(10, 0, 1) V = FunctionSpace(mesh, "Lagrange", 1) - c = Function(V).assign(1) - f = Function(V).assign(1) + c = Function(V).assign(1.) + f = Function(V).assign(1.) u = Function(V) v = TestFunction(V) @@ -130,9 +118,9 @@ def test_time_dependent(solve_type, rg): # Some variables T = 0.5 dt = 0.1 - f = Function(V).assign(1) + f = Function(V).assign(1.) - u_1 = Function(V).assign(1) + u_1 = Function(V).assign(1.) control = Control(u_1) a = u_1 * u * v * dx + dt * f * inner(grad(u), grad(v)) * dx @@ -216,8 +204,8 @@ def test_projection(): V = FunctionSpace(mesh, "CG", 1) R = FunctionSpace(mesh, "R", 0) - bc = DirichletBC(V, Constant(1), "on_boundary") - k = Function(R, val=2) + bc = DirichletBC(V, Constant(1.), "on_boundary") + k = Function(R, val=2.) x, y = SpatialCoordinate(mesh) expr = sin(k*x) f = project(expr, V) @@ -234,7 +222,7 @@ def test_projection(): J = assemble(u_**2*dx) Jhat = ReducedFunctional(J, Control(k)) - assert (taylor_test(Jhat, k, Function(R, val=1), dJdm=Jhat.tlm(Constant(1))) > 1.9) + assert (taylor_test(Jhat, k, Function(R, val=1.), dJdm=Jhat.tlm(Constant(1.))) > 1.9) @pytest.mark.skipcomplex @@ -244,7 +232,7 @@ def test_projection_function(rg): mesh = UnitSquareMesh(10, 10) V = FunctionSpace(mesh, "CG", 1) - bc = DirichletBC(V, Constant(1), "on_boundary") + bc = DirichletBC(V, Constant(1.), "on_boundary") x, y = SpatialCoordinate(mesh) g = project(sin(x)*sin(y), V, annotate=False) expr = sin(g*x) diff --git a/tests/firedrake/adjoint/test_transformed_functional.py b/tests/firedrake/adjoint/test_transformed_functional.py new file mode 100644 index 0000000000..379a8d6e77 --- /dev/null +++ b/tests/firedrake/adjoint/test_transformed_functional.py @@ -0,0 +1,288 @@ +from collections.abc import Sequence +from functools import partial + +import firedrake as fd +from firedrake.adjoint import ( + Control, L2TransformedFunctional, MinimizationProblem, ReducedFunctional, + continue_annotation, pause_annotation, minimize) +import numpy as np +from pyadjoint import TAOSolver +from pyadjoint.reduced_functional_numpy import ReducedFunctionalNumPy +import pytest +import ufl + + +@pytest.fixture(autouse=True) +def autouse_set_test_tape(set_test_tape): + pass + + +class ReducedFunctional(ReducedFunctional): + def __init__(self, *args, **kwargs): + self._test_transformed_functional__ncalls = 0 + super().__init__(*args, **kwargs) + + def __call__(self, *args, **kwargs): + self._test_transformed_functional__ncalls += 1 + return super().__call__(*args, **kwargs) + + +class L2TransformedFunctional(L2TransformedFunctional): + def __init__(self, *args, **kwargs): + self._test_transformed_functional__ncalls = 0 + super().__init__(*args, **kwargs) + + def __call__(self, *args, **kwargs): + self._test_transformed_functional__ncalls += 1 + return super().__call__(*args, **kwargs) + + +class MinimizeCallback(Sequence): + def __init__(self, m_0, error_norm): + self._space = m_0.function_space() + self._error_norm = error_norm + self._data = [] + + self(np.asarray(m_0._ad_to_list(m_0))) + + def __len__(self): + return len(self._data) + + def __getitem__(self, key): + return self._data[key] + + def __call__(self, xk): + k = len(self) + if ufl.duals.is_primal(self._space): + m_k = fd.Function(self._space, name="m_k") + elif ufl.duals.is_dual(self._space): + m_k = fd.Cofunction(self._space, name="m_k") + else: + raise ValueError("space is neither primal nor dual") + m_k._ad_assign_numpy(m_k, xk, 0) + error_norm = self._error_norm(m_k) + print(f"{k=} {error_norm=:6g}") + self._data.append(error_norm) + + +@pytest.mark.parametrize("family", ("Lagrange", "Discontinuous Lagrange")) +@pytest.mark.skipcomplex +def test_transformed_functional_mass_inverse(family): + mesh = fd.UnitSquareMesh(5, 5, diagonal="crossed") + x, y = fd.SpatialCoordinate(mesh) + space = fd.FunctionSpace(mesh, family, 1, variant="equispaced") + + def forward(m): + return fd.assemble(fd.inner(m - m_ref, m - m_ref) * fd.dx) + + m_ref = fd.Function(space, name="m_ref").interpolate( + fd.exp(x) * fd.sin(fd.pi * x) * fd.cos(fd.pi * y)) + + continue_annotation() + m_0 = fd.Function(space, name="m_0") + J = forward(m_0) + pause_annotation() + c = Control(m_0, riesz_map="l2") + + J_hat = ReducedFunctional(J, c) + + def error_norm(m): + return fd.norm(m - m_ref, norm_type="L2") + + cb = MinimizeCallback(m_0, error_norm) + _ = minimize(J_hat, method="L-BFGS-B", + callback=cb, + options={"ftol": 0, + "gtol": 1e-6}) + assert 1e-6 < cb[-1] < 1e-5 + if family == "Lagrange": + assert len(cb) > 12 # == 15 + assert J_hat._test_transformed_functional__ncalls > 12 # == 15 + elif family == "Discontinuous Lagrange": + assert len(cb) == 5 + assert J_hat._test_transformed_functional__ncalls == 6 + else: + raise ValueError(f"Invalid element family: '{family}'") + + continue_annotation() + m_0 = fd.Function(space, name="m_0") + J = forward(m_0) + pause_annotation() + c = Control(m_0, riesz_map="l2") + + J_hat = L2TransformedFunctional(J, c, alpha=1) + + def error_norm(m): + m = J_hat.map_result(m) + return fd.norm(m - m_ref, norm_type="L2") + + cb = MinimizeCallback(J_hat.controls[0].control, error_norm) + _ = minimize(ReducedFunctionalNumPy(J_hat), method="L-BFGS-B", + callback=cb, + options={"ftol": 0, + "gtol": 1e-6}) + assert cb[-1] < 1e-10 + assert len(cb) == 3 + assert J_hat._test_transformed_functional__ncalls == 3 + + +@pytest.mark.skipcomplex +def test_transformed_functional_poisson(): + mesh = fd.UnitSquareMesh(5, 5, diagonal="crossed") + x, y = fd.SpatialCoordinate(mesh) + space = fd.FunctionSpace(mesh, "Lagrange", 1) + test = fd.TestFunction(space) + trial = fd.TrialFunction(space) + bc = fd.DirichletBC(space, 0, "on_boundary") + + def pre_process(m): + m_0 = fd.Function(space, name="m_0").assign(m) + bc.apply(m_0) + m_1 = fd.Function(space, name="m_1").assign(m - m_0) + return m_0, m_1 + + def forward(m): + m_0, m_1 = pre_process(m) + u = fd.Function(space, name="u") + fd.solve(fd.inner(fd.grad(trial), fd.grad(test)) * fd.dx + == fd.inner(m_0, test) * fd.dx, + u, bc) + return m_0, m_1, u + + def forward_J(m, u_ref, alpha): + _, m_1, u = forward(m) + return fd.assemble(fd.inner(u - u_ref, u - u_ref) * fd.dx + + fd.Constant(alpha ** 2) * fd.inner(m_1, m_1) * fd.ds) + + m_ref = fd.Function(space, name="m_ref").interpolate( + fd.exp(x) * fd.sin(fd.pi * x) * fd.sin(fd.pi * y)) + m_ref, _, u_ref = forward(m_ref) + forward_J = partial(forward_J, u_ref=u_ref, alpha=1) + + continue_annotation() + m_0 = fd.Function(space, name="m_0") + J = forward_J(m_0) + pause_annotation() + c = Control(m_0, riesz_map="l2") + + J_hat = ReducedFunctional(J, c) + + def error_norm(m): + m, _ = pre_process(m) + return fd.norm(m - m_ref, norm_type="L2") + + cb = MinimizeCallback(m_0, error_norm) + _ = minimize(J_hat, method="L-BFGS-B", + callback=cb, + options={"ftol": 0, + "gtol": 1e-10}) + assert 1e-2 < cb[-1] < 5e-2 + assert len(cb) > 80 # == 85 + assert J_hat._test_transformed_functional__ncalls > 90 # == 95 + + continue_annotation() + m_0 = fd.Function(space, name="m_0") + J = forward_J(m_0) + pause_annotation() + c = Control(m_0, riesz_map="l2") + + J_hat = L2TransformedFunctional(J, c, alpha=1e-5) + + def error_norm(m): + m = J_hat.map_result(m) + m, _ = pre_process(m) + return fd.norm(m - m_ref, norm_type="L2") + + cb = MinimizeCallback(J_hat.controls[0].control, error_norm) + _ = minimize(ReducedFunctionalNumPy(J_hat), method="L-BFGS-B", + callback=cb, + options={"ftol": 0, + "gtol": 1e-10}) + assert 1e-4 < cb[-1] < 5e-4 + assert len(cb) < 55 # == 51 + assert J_hat._test_transformed_functional__ncalls < 60 # == 55 + + +@pytest.mark.skipcomplex +def test_transformed_functional_poisson_tao_nls(): + mesh = fd.UnitSquareMesh(5, 5, diagonal="crossed") + x, y = fd.SpatialCoordinate(mesh) + space = fd.FunctionSpace(mesh, "Lagrange", 1) + test = fd.TestFunction(space) + trial = fd.TrialFunction(space) + bc = fd.DirichletBC(space, 0, "on_boundary") + + def pre_process(m): + m_0 = fd.Function(space, name="m_0").assign(m) + bc.apply(m_0) + m_1 = fd.Function(space, name="m_1").assign(m - m_0) + return m_0, m_1 + + def forward(m): + m_0, m_1 = pre_process(m) + u = fd.Function(space, name="u") + fd.solve(fd.inner(fd.grad(trial), fd.grad(test)) * fd.dx + == fd.inner(m_0, test) * fd.dx, + u, bc) + return m_0, m_1, u + + def forward_J(m, u_ref, alpha): + _, m_1, u = forward(m) + return fd.assemble(fd.inner(u - u_ref, u - u_ref) * fd.dx + + fd.Constant(alpha ** 2) * fd.inner(m_1, m_1) * fd.ds) + + m_ref = fd.Function(space, name="m_ref").interpolate( + fd.exp(x) * fd.sin(fd.pi * x) * fd.sin(fd.pi * y)) + m_ref, _, u_ref = forward(m_ref) + forward_J = partial(forward_J, u_ref=u_ref, alpha=1) + + continue_annotation() + m_0 = fd.Function(space, name="m_0") + J = forward_J(m_0) + pause_annotation() + c = Control(m_0, riesz_map="l2") + + J_hat = ReducedFunctional(J, c) + + def error_norm(m): + m, _ = pre_process(m) + return fd.norm(m - m_ref, norm_type="L2") + + problem = MinimizationProblem(J_hat) + solver = TAOSolver(problem, {"tao_type": "nls", + "tao_monitor": None, + "tao_converged_reason": None, + "tao_gatol": 1.0e-5, + "tao_grtol": 0.0, + "tao_gttol": 1.0e-6}) + m_opt = solver.solve() + error_norm_opt = error_norm(m_opt) + print(f"{error_norm_opt=:.6g}") + assert 1e-2 < error_norm_opt < 5e-2 + assert J_hat._test_transformed_functional__ncalls < 10 + + continue_annotation() + m_0 = fd.Function(space, name="m_0") + J = forward_J(m_0) + pause_annotation() + c = Control(m_0, riesz_map="l2") + + J_hat = L2TransformedFunctional(J, c, alpha=1e-5) + + def error_norm(m): + m = J_hat.map_result(m) + m, _ = pre_process(m) + return fd.norm(m - m_ref, norm_type="L2") + + problem = MinimizationProblem(J_hat) + solver = TAOSolver(problem, {"tao_type": "nls", + "tao_monitor": None, + "tao_converged_reason": None, + "tao_gatol": 1.0e-5, + "tao_grtol": 0.0, + "tao_gttol": 1.0e-6}) + m_opt = solver.solve() + error_norm_opt = error_norm(m_opt) + print(f"{error_norm_opt=:.6g}") + assert 1e-3 < error_norm_opt < 1e-2 + assert J_hat._test_transformed_functional__ncalls < 10 # == 8 diff --git a/tests/firedrake/conftest.py b/tests/firedrake/conftest.py index 890d13c739..f0f980adb0 100644 --- a/tests/firedrake/conftest.py +++ b/tests/firedrake/conftest.py @@ -9,8 +9,14 @@ os.environ["FIREDRAKE_DISABLE_OPTIONS_LEFT"] = "1" import pytest -from firedrake.petsc import PETSc +from mpi4py import MPI from petsctools import get_external_packages +from pyadjoint.tape import ( + annotate_tape, get_working_tape, set_working_tape, + continue_annotation, pause_annotation +) + +from firedrake.petsc import PETSc # Use a non-interactive backend for matplotlib if DISPLAY is undefined. This @@ -160,10 +166,14 @@ def pytest_configure(config): "markers", "skipnetgen: mark as skipped if netgen and ngsPETSc is not installed" ) + config.addinivalue_line( + "markers", + "skipnogpu: mark as skipped when GPU hardware is unavailable" + ) def pytest_collection_modifyitems(session, config, items): - from firedrake.utils import complex_mode, SLATE_SUPPORTS_COMPLEX + from firedrake.utils import complex_mode, device_matrix_type, SLATE_SUPPORTS_COMPLEX for item in items: if complex_mode: @@ -175,6 +185,10 @@ def pytest_collection_modifyitems(session, config, items): if item.get_closest_marker("skipreal") is not None: item.add_marker(pytest.mark.skip(reason="Test makes no sense unless in complex mode")) + if device_matrix_type(False) is None: + if item.get_closest_marker("skipnogpu") is not None: + item.add_marker(pytest.mark.skip(reason="Test requires GPU hardware to run.")) + for dep, marker, reason in dependency_skip_markers_and_reasons: if item.get_closest_marker(marker) is not None and _skip_test_dependency(dep): item.add_marker(pytest.mark.skip(reason)) @@ -182,10 +196,9 @@ def pytest_collection_modifyitems(session, config, items): @pytest.fixture(scope="module", autouse=True) def check_empty_tape(request): - """Check that the tape is empty at the end of each module""" - from pyadjoint.tape import annotate_tape, get_working_tape - - def fin(): + """Check that the tape is empty at the end of each module. + """ + def finalizer(): # make sure taping is switched off assert not annotate_tape() @@ -194,7 +207,17 @@ def fin(): if tape is not None: assert len(tape.get_blocks()) == 0 - request.addfinalizer(fin) + request.addfinalizer(finalizer) + + +@pytest.fixture +def set_test_tape(): + """Set a new working tape specifically for this test. + """ + continue_annotation() + with set_working_tape(): + yield + pause_annotation() @pytest.fixture @@ -242,3 +265,10 @@ def __exit__(self, exc_type, exc_val, traceback): def petsc_raises(): # This function is needed because pytest does not support classes as fixtures. return _petsc_raises + + +@pytest.fixture +def garbage_cleanup(): + """Fixture that runs the parallel garbage collector.""" + yield + PETSc.garbage_cleanup(MPI.COMM_WORLD) diff --git a/tests/firedrake/demos/test_demos_run.py b/tests/firedrake/demos/test_demos_run.py index dbb0b938be..7202ddbc5f 100644 --- a/tests/firedrake/demos/test_demos_run.py +++ b/tests/firedrake/demos/test_demos_run.py @@ -18,6 +18,7 @@ DEMO_DIR = join(CWD, "..", "..", "..", "demos") SERIAL_DEMOS = [ + Demo(("adaptive_multigrid", "adaptive_multigrid"), ["matplotlib", "netgen", "vtk"]), Demo(("benney_luke", "benney_luke"), ["vtk"]), Demo(("boussinesq", "boussinesq"), []), Demo(("burgers", "burgers"), ["vtk"]), @@ -26,6 +27,7 @@ Demo(("DG_advection", "DG_advection"), ["matplotlib"]), Demo(("eigenvalues_QG_basinmodes", "qgbasinmodes"), ["matplotlib", "slepc", "vtk"]), Demo(("extruded_continuity", "extruded_continuity"), []), + Demo(("extruded_shallow_water", "extruded_shallow_water"), []), Demo(("helmholtz", "helmholtz"), ["vtk"]), Demo(("higher_order_mass_lumping", "higher_order_mass_lumping"), ["vtk"]), Demo(("immersed_fem", "immersed_fem"), []), diff --git a/tests/firedrake/ensemble/test_ensemble.py b/tests/firedrake/ensemble/test_ensemble.py index f7c3a9a893..e3186d6c9e 100644 --- a/tests/firedrake/ensemble/test_ensemble.py +++ b/tests/firedrake/ensemble/test_ensemble.py @@ -3,9 +3,6 @@ import pytest from pytest_mpi.parallel_assert import parallel_assert -from operator import mul -from functools import reduce - max_ncpts = 2 @@ -60,7 +57,7 @@ def W(request, mesh): if COMM_WORLD.size == 1: return V = FunctionSpace(mesh, "CG", 1) - return reduce(mul, [V for _ in range(request.param)]) + return MixedFunctionSpace([V for _ in range(request.param)]) # initialise unique function on each rank @@ -383,3 +380,43 @@ def test_ensemble_solvers(ensemble, W, urank, urank_sum): ensemble.allreduce(u_separate, usum) parallel_assert(errornorm(u_combined, usum) < 1e-8) + + +@pytest.mark.parallel(nprocs=6) +@pytest.mark.parametrize("direction", ["forward", "reverse"]) +def test_ensemble_sequential(ensemble, direction): + """ + Test that the sequential context manager sends forward + the correct values after each rank has executed, for both + intrinsic types (float) and Firedrake types (Function). + """ + + rank = ensemble.ensemble_rank + mesh = UnitIntervalMesh(1, comm=ensemble.comm) + R = FunctionSpace(mesh, "R", 0) + + reverse = direction == "reverse" + + idx_i = 0 + idx_f = Function(R).zero() + two = Function(R).assign(2) + + with ensemble.sequential(reverse=reverse, idx_i=idx_i, idx_f=idx_f) as ctx: + recv_i = float(ctx.idx_i) + recv_f = float(ctx.idx_f) + + ctx.idx_i += 2 + ctx.idx_f += two + + if reverse: + expected = 2*(ensemble.ensemble_size - 1 - rank) + else: + expected = 2*rank + + parallel_assert( + recv_i == expected, + msg=f"Failed to send int properly. Expecting {expected} but received {recv_i}") + + parallel_assert( + abs(float(recv_f)-expected) < 1e-12, + msg=f"Failed to send Function properly. Expecting {expected} but received {float(recv_f)}") diff --git a/tests/firedrake/ensemble/test_ensemble_mat.py b/tests/firedrake/ensemble/test_ensemble_mat.py new file mode 100644 index 0000000000..b95fa0aa74 --- /dev/null +++ b/tests/firedrake/ensemble/test_ensemble_mat.py @@ -0,0 +1,152 @@ +import pytest +import numpy as np +import petsctools +from pytest_mpi.parallel_assert import parallel_assert +from firedrake import * +from firedrake.ensemble.ensemble_mat import EnsembleBlockDiagonalMat + + +@pytest.mark.parallel([1, 2, 3, 4]) +def test_ensemble_mat(): + # create ensemble + global_ranks = COMM_WORLD.size + nspatial_ranks = 2 if (global_ranks % 2 == 0) else 1 + ensemble = Ensemble(COMM_WORLD, nspatial_ranks) + ensemble_rank = ensemble.ensemble_rank + + # create mesh + mesh = UnitIntervalMesh(10, comm=ensemble.comm) + + # create function spaces + CG = FunctionSpace(mesh, "CG", 1) + DG = FunctionSpace(mesh, "DG", 1+ensemble_rank) + + # create ensemble function spaces / functions + row_space = EnsembleFunctionSpace([CG, CG], ensemble) + col_space = EnsembleFunctionSpace([CG, DG], ensemble) + + # build forms + u, v = TrialFunction(CG), TestFunction(CG) + nu = Constant(ensemble_rank+1) + a0 = inner(u, v)*dx + nu*inner(grad(u), grad(v))*dx + + u, v = TrialFunction(CG), TestFunction(DG) + a1 = (1/nu)*inner(u, v)*dx + + # assemble mats + A0mat = assemble(a0).petscmat + A1mat = assemble(a1).petscmat + mats = [A0mat, A1mat] + + # create ensemble mat + emat = EnsembleBlockDiagonalMat(mats, row_space, col_space) + + # build ensemble function lhs and rhs for Ax=y + x = EnsembleFunction(row_space) + y = EnsembleCofunction(col_space.dual()) + ycheck = EnsembleCofunction(col_space.dual()) + + for i, xi in enumerate(x.subfunctions): + xi.assign(ensemble_rank + i + 1) + + # assemble reference matmult + for A, xi, yi in zip(mats, x.subfunctions, ycheck.subfunctions): + with xi.dat.vec_ro as xv, yi.dat.vec_wo as yv: + A.mult(xv, yv) + + # assemble matmult + with x.vec_ro() as xv, y.vec_wo() as yv: + emat.mult(xv, yv) + + checks = [ + np.allclose(yi.dat.data_ro, yci.dat.data_ro) + for yi, yci in zip(y.subfunctions, ycheck.subfunctions) + ] + + # check results + parallel_assert( + all(checks), + msg=("Action of EnsembleBlockDiagonalMat does not match" + f" actions of local matrices: {checks}") + ) + + +@pytest.mark.parallel([1, 2, 3, 4]) +@pytest.mark.parametrize("default_options", [True, False], + ids=["default_options", "blockwise_options"]) +def test_ensemble_pc(default_options): + # create ensemble + global_ranks = COMM_WORLD.size + nspatial_ranks = 2 if (global_ranks % 2 == 0) else 1 + ensemble = Ensemble(COMM_WORLD, nspatial_ranks) + ensemble_rank = ensemble.ensemble_rank + + # Default PETSc pc is ILU so need a 2D mesh + # because for 1D ILU is an exact solver. + mesh = UnitSquareMesh(8, 8, comm=ensemble.comm) + + # create function spaces + CG = FunctionSpace(mesh, "CG", 2) + DG = FunctionSpace(mesh, "DG", 2+ensemble_rank) + + # create ensemble function spaces / functions + row_space = EnsembleFunctionSpace([CG, DG], ensemble) + col_space = EnsembleFunctionSpace([CG, DG], ensemble) + offset = col_space.global_spaces_offset + + # build forms + u, v = TrialFunction(CG), TestFunction(CG) + nu = Constant(offset + 1) + a0 = inner(u, v)*dx + nu*inner(grad(u), grad(v))*dx + + u, v = TrialFunction(DG), TestFunction(DG) + a1 = (1/nu)*inner(u, v)*dx + + # assemble mats + A0mat = assemble(a0, mat_type='aij').petscmat + A1mat = assemble(a1, mat_type='aij').petscmat + mats = [A0mat, A1mat] + + # create ensemble mat + emat = EnsembleBlockDiagonalMat(mats, row_space, col_space) + + # parameters: direct solve on blocks + parameters = { + 'ksp_rtol': 1e-14, + 'ksp_type': 'richardson', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.EnsembleBJacobiPC', + } + if default_options: + parameters['sub_pc_type'] = 'lu' + else: + for i in range(col_space.nglobal_spaces): + parameters[f'sub_{i}_pc_type'] = 'lu' + + # create ensemble ksp + ksp = PETSc.KSP().create(comm=ensemble.global_comm) + ksp.setOperators(emat, emat) + petsctools.set_from_options( + ksp, parameters=parameters, + options_prefix="ensemble") + + x = EnsembleFunction(row_space) + b = EnsembleFunction(col_space.dual()) + + for i, bi in enumerate(b.subfunctions): + bi.assign(offset + i + 1) + + with petsctools.inserted_options(ksp): + with x.vec_wo() as xv, b.vec_ro() as bv: + ksp.solve(bv, xv) + + # 1 richardson iteration should be a direct solve + parallel_assert( + ksp.its == 1, + msg=("EnsembleBJacobiPC took more than one iteration to" + f" solve an EnsembleBlockDiagonalMat: {ksp.its=}") + ) + + +if __name__ == "__main__": + test_ensemble_pc(default_options=True) diff --git a/tests/firedrake/ensemble/test_ensemble_wrapper.py b/tests/firedrake/ensemble/test_ensemble_wrapper.py new file mode 100644 index 0000000000..69d2851338 --- /dev/null +++ b/tests/firedrake/ensemble/test_ensemble_wrapper.py @@ -0,0 +1,133 @@ +from firedrake import * +import pytest +from pytest_mpi.parallel_assert import parallel_assert + + +min_root = 0 +max_root = 1 + +roots = [] +roots.extend([pytest.param(None, id="root_none")]) +roots.extend([pytest.param(i, id=f"root_{i}") + for i in range(min_root, max_root + 1)]) + +blocking = [ + pytest.param(True, id="blocking"), + pytest.param(False, id="nonblocking") +] + +sendrecv_pairs = [ + pytest.param((0, 1), id="ranks01"), + pytest.param((1, 2), id="ranks12"), + pytest.param((2, 0), id="ranks20") +] + + +@pytest.fixture(scope="module") +def ensemble(): + if COMM_WORLD.size == 1: + return + return Ensemble(COMM_WORLD, 1) + + +@pytest.mark.parallel(nprocs=2) +def test_ensemble_allreduce(ensemble): + rank = ensemble.ensemble_rank + result = ensemble.allreduce(rank+1) + expected = sum([r+1 for r in range(ensemble.ensemble_size)]) + parallel_assert( + result == expected, + msg=f"{result=} does not match {expected=}") + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize("root", roots) +def test_ensemble_reduce(ensemble, root): + rank = ensemble.ensemble_rank + + # check default root=0 works + if root is None: + result = ensemble.reduce(rank+1) + root = 0 + else: + result = ensemble.reduce(rank+1, root=root) + + expected = sum([r+1 for r in range(ensemble.ensemble_size)]) + + parallel_assert( + result == expected, + participating=(rank == root), + msg=f"{result=} does not match {expected=} on rank {root=}" + ) + parallel_assert( + result is None, + participating=(rank != root), + msg=f"Unexpected {result=} on non-root rank" + ) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize("root", roots) +def test_ensemble_bcast(ensemble, root): + rank = ensemble.ensemble_rank + + # check default root=0 works + if root is None: + result = ensemble.bcast(rank+1) + root = 0 + else: + result = ensemble.bcast(rank+1, root=root) + + expected = root + 1 + + parallel_assert(result == expected) + + +@pytest.mark.parallel(nprocs=3) +@pytest.mark.parametrize("ranks", sendrecv_pairs) +def test_send_and_recv(ensemble, ranks): + rank = ensemble.ensemble_rank + + rank0, rank1 = ranks + + send_data = rank + 1 + + if rank == rank0: + recv_expected = rank1 + 1 + + ensemble.send(send_data, dest=rank1, tag=rank0) + recv_data = ensemble.recv(source=rank1, tag=rank1) + + elif rank == rank1: + recv_expected = rank0 + 1 + + recv_data = ensemble.recv(source=rank0, tag=rank0) + ensemble.send(send_data, dest=rank0, tag=rank1) + + else: + recv_expected = None + recv_data = None + + # Test send/recv between first two spatial comms + # ie: ensemble.ensemble_comm.rank == 0 and 1 + parallel_assert( + recv_data == recv_expected, + participating=rank in (rank0, rank1), + ) + + +@pytest.mark.parallel(nprocs=3) +def test_sendrecv(ensemble): + rank = ensemble.ensemble_rank + size = ensemble.ensemble_size + src_rank = (rank - 1) % size + dst_rank = (rank + 1) % size + + send_data = rank + 1 + recv_expected = src_rank + 1 + + recv_result = ensemble.sendrecv( + send_data, dst_rank, sendtag=rank, + source=src_rank, recvtag=src_rank) + + parallel_assert(recv_result == recv_expected) diff --git a/tests/firedrake/equation_bcs/test_equation_bcs_assemble.py b/tests/firedrake/equation_bcs/test_equation_bcs_assemble.py index 402148edfd..e523945651 100644 --- a/tests/firedrake/equation_bcs/test_equation_bcs_assemble.py +++ b/tests/firedrake/equation_bcs/test_equation_bcs_assemble.py @@ -14,7 +14,7 @@ def test_equation_bcs_direct_assemble_one_form(): bc = EquationBC(F1 == 0, u, 1) g = assemble(F, bcs=bc.extract_form('F')) - assert np.allclose(g.dat.data, [0.5, 0.5, 0, 0]) + assert np.allclose(g.dat.data, [0.5, 0, 0, 0.5]) def test_equation_bcs_direct_assemble_two_form(): @@ -31,15 +31,19 @@ def test_equation_bcs_direct_assemble_two_form(): # Must preprocess bc to extract appropriate # `EquationBCSplit` object. A = assemble(a, bcs=bc.extract_form('J')) - assert np.allclose(A.M.values, [[1 / 3, 1 / 6, 0, 0], - [1 / 6, 1 / 3, 0, 0], - [-1 / 3, -1 / 6, 2 / 3, -1 / 6], - [-1 / 6, -1 / 3, -1 / 6, 2 / 3]]) + expected = [[ 1/3, 0, 0, 1/6], # noqa + [-1/6, 2/3, -1/6, -1/3], # noqa + [-1/3, -1/6, 2/3, -1/6], # noqa + [ 1/6, 0, 0, 1/3]] # noqa + assert np.allclose(A.M.values, expected) + A = assemble(a, bcs=bc.extract_form('Jp')) - assert np.allclose(A.M.values, [[2 / 3, 2 / 6, 0, 0], - [2 / 6, 2 / 3, 0, 0], - [-1 / 3, -1 / 6, 2 / 3, -1 / 6], - [-1 / 6, -1 / 3, -1 / 6, 2 / 3]]) + expected = [[ 2/3, 0, 0, 1/3], # noqa + [-1/6, 2/3, -1/6, -1/3], # noqa + [-1/3, -1/6, 2/3, -1/6], # noqa + [ 1/3, 0, 0, 2/3]] # noqa + assert np.allclose(A.M.values, expected) + with pytest.raises(TypeError) as excinfo: # Unable to use raw `EquationBC` object, as # assembler can not infer merely from the rank diff --git a/tests/firedrake/external_operators/test_external_operators.py b/tests/firedrake/external_operators/test_external_operators.py index b22c0a1bf1..f4db50fc86 100644 --- a/tests/firedrake/external_operators/test_external_operators.py +++ b/tests/firedrake/external_operators/test_external_operators.py @@ -3,6 +3,7 @@ import ufl from firedrake import * +from firedrake.matrix import MatrixBase @pytest.fixture(scope='module') @@ -49,7 +50,7 @@ def f(mesh, V): fi.interpolate(as_vector([(2 * pi ** 2 + 1) * sin(pi * x) * sin(pi * y)] * V.value_size)) elif fs_i.rank == 2: fi.interpolate(as_tensor([[(2 * pi ** 2 + 1) * sin(pi * x) * sin(pi * y) - for _ in range(fs_i.mesh().geometric_dimension())] + for _ in range(fs_i.mesh().geometric_dimension)] for _ in range(fs_i.rank)])) else: fi.interpolate((2 * pi ** 2 + 1) * sin(pi * x) * sin(pi * y)) @@ -104,7 +105,7 @@ def test_assemble(V, f): assert isinstance(jac, MatrixBase) # Assemble the exact Jacobian, i.e. the interpolation matrix: `Interpolate(dexpr(u,v,w)/du, V)` - jac_exact = assemble(Interpolate(derivative(expr(u, v, w), u), V)) + jac_exact = assemble(interpolate(derivative(expr(u, v, w), u), V)) np.allclose(jac.petscmat[:, :], jac_exact.petscmat[:, :], rtol=1e-14) # -- dNdu(u, v, w; δu, v*) (TLM) -- # diff --git a/tests/firedrake/external_operators/test_jax_operator.py b/tests/firedrake/external_operators/test_jax_operator.py index 55d2e752d4..4ee66ebb42 100644 --- a/tests/firedrake/external_operators/test_jax_operator.py +++ b/tests/firedrake/external_operators/test_jax_operator.py @@ -117,6 +117,40 @@ def test_forward(u, nn): assert np.allclose(y_F.dat.data_ro, assembled_N.dat.data_ro) +@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done +@pytest.mark.skipjax # Skip if JAX is not installed +def test_forward_mixed(V, nn): + + W = V * V + u = Function(W) + u1, u2 = u.subfunctions + x, y = SpatialCoordinate(V.mesh()) + u1.interpolate(sin(pi * x) * sin(pi * y)) + u2.interpolate(sin(2 * pi * x) * sin(2 * pi * y)) + + # Set JaxOperator + n = W.dim() + model = Linear(n, n) + + N = ml_operator(model, function_space=W)(u) + # Get model + model = N.model + + # Assemble NeuralNet + assembled_N = assemble(N) + assert isinstance(assembled_N, Function) + + # Convert from Firedrake to JAX + x_P = to_jax(u) + # Forward pass + y_P = model(x_P) + # Convert from JAX to Firedrake + y_F = from_jax(y_P, u.function_space()) + + # Check + assert np.allclose(y_F.dat.data_ro, assembled_N.dat.data_ro) + + @pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done @pytest.mark.skipjax # Skip if JAX is not installed def test_jvp(u, nn, rg): diff --git a/tests/firedrake/external_operators/test_pytorch_operator.py b/tests/firedrake/external_operators/test_pytorch_operator.py index d9782d1644..7e0ffef7ba 100644 --- a/tests/firedrake/external_operators/test_pytorch_operator.py +++ b/tests/firedrake/external_operators/test_pytorch_operator.py @@ -109,6 +109,41 @@ def test_forward(u, nn): assert np.allclose(y_F.dat.data_ro, assembled_N.dat.data_ro) +@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done +@pytest.mark.skiptorch # Skip if PyTorch is not installed +def test_forward_mixed(V, nn): + + W = V * V + u = Function(W) + u1, u2 = u.subfunctions + x, y = SpatialCoordinate(V.mesh()) + u1.interpolate(sin(pi * x) * sin(pi * y)) + u2.interpolate(sin(2 * pi * x) * sin(2 * pi * y)) + + # Set PytorchOperator + n = W.dim() + model = Linear(n, n) + + N = ml_operator(model, function_space=W)(u) + # Get model + model = N.model + + # Assemble NeuralNet + assembled_N = assemble(N) + + assert isinstance(assembled_N, Function) + + # Convert from Firedrake to PyTorch + x_P = to_torch(u) + # Forward pass + y_P = model(x_P) + # Convert from PyTorch to Firedrake + y_F = from_torch(y_P, u.function_space()) + + # Check + assert np.allclose(y_F.dat.data_ro, assembled_N.dat.data_ro) + + @pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done @pytest.mark.skiptorch # Skip if PyTorch is not installed def test_jvp(u, nn, rg): diff --git a/tests/firedrake/extrusion/test_facet_support_dofs.py b/tests/firedrake/extrusion/test_facet_support_dofs.py index c9ba7c1559..50aaf45ee0 100644 --- a/tests/firedrake/extrusion/test_facet_support_dofs.py +++ b/tests/firedrake/extrusion/test_facet_support_dofs.py @@ -40,7 +40,7 @@ def test_hex(hex_mesh, args, kwargs, horiz_expected, vert_expected): if not kwargs: fe = FiniteElement(args[0], hex_mesh.ufl_cell(), args[1], variant='equispaced') else: - A, B = hex_mesh.ufl_cell().sub_cells() + A, B = hex_mesh.ufl_cell().sub_cells hfe = FiniteElement(args[0], A, args[1], variant='equispaced') vfe = FiniteElement(kwargs["vfamily"], B, kwargs['vdegree'], variant='equispaced') fe = TensorProductElement(hfe, vfe) diff --git a/tests/firedrake/extrusion/test_steady_advection_3D_extr.py b/tests/firedrake/extrusion/test_steady_advection_3D_extr.py index 780ef5c6fc..0bd4744b19 100644 --- a/tests/firedrake/extrusion/test_steady_advection_3D_extr.py +++ b/tests/firedrake/extrusion/test_steady_advection_3D_extr.py @@ -33,7 +33,7 @@ def DGDPC1(request, mesh): @pytest.fixture def W(mesh): - if mesh.ufl_cell().sub_cells()[0].cellname() == "quadrilateral": + if mesh.ufl_cell().sub_cells[0].cellname == "quadrilateral": # RTCF1 element on a hexahedron W0_h = FiniteElement("RTCF", "quadrilateral", 1) W1_h = FiniteElement("DQ", "quadrilateral", 0) diff --git a/tests/firedrake/extrusion/test_variable_layers_numbering.py b/tests/firedrake/extrusion/test_variable_layers_numbering.py index e13358f57d..470dae4807 100644 --- a/tests/firedrake/extrusion/test_variable_layers_numbering.py +++ b/tests/firedrake/extrusion/test_variable_layers_numbering.py @@ -381,16 +381,16 @@ def test_numbering_quad(): [2, 4, 5, 7, 8, 10, 11, 13, 14, 16, 18, 20, 21, 24]).all() assert numpy.equal(DirichletBC(V, 0, 1).nodes, - [0, 1, 2, 3, 4, 5, 17, 18]).all() + [0, 1, 2, 9, 10, 11, 15, 16]).all() assert numpy.equal(DirichletBC(V, 0, 2).nodes, - [12, 13, 14, 15, 16, 22, 23, 24]).all() + [17, 18, 19, 20, 21, 22, 23, 24]).all() assert numpy.equal(DirichletBC(V, 0, 3).nodes, - [0, 1, 2, 9, 10, 11, 15, 16]).all() + [0, 1, 2, 3, 4, 5, 17, 18]).all() assert numpy.equal(DirichletBC(V, 0, 4).nodes, - [17, 18, 19, 20, 21, 22, 23, 24]).all() + [12, 13, 14, 15, 16, 22, 23, 24]).all() @pytest.mark.parametrize(["domain", "expected"], @@ -497,8 +497,8 @@ def test_layer_extents_parallel(): [0, 2, 0, 2], [0, 2, 0, 2], [0, 2, 0, 2], - [0, 2, 0, 2], [0, 3, 0, 2], + [0, 2, 0, 2], [0, 2, 0, 2]], dtype=IntType) elif mesh.comm.rank == 1: # Top view, plex points @@ -545,8 +545,8 @@ def test_layer_extents_parallel(): # edges [0, 3, 0, 3], [0, 3, 0, 3], - [0, 2, 0, 2], [0, 3, 0, 2], + [0, 2, 0, 2], [0, 2, 0, 2]], dtype=IntType) elif mesh.comm.rank == 3: # Top view, plex points @@ -569,11 +569,11 @@ def test_layer_extents_parallel(): [0, 2, 0, 2], [0, 3, 0, 3], # edges - [0, 2, 0, 2], [0, 3, 0, 2], [0, 2, 0, 2], [0, 2, 0, 2], [0, 2, 0, 2], + [0, 2, 0, 2], [0, 3, 0, 3], [0, 3, 0, 3]], dtype=IntType) assert numpy.equal(extmesh.layer_extents, expected).all() diff --git a/tests/firedrake/macro/test_macro_interp_project.py b/tests/firedrake/macro/test_macro_interp_project.py index 48ddea6946..a8a2ee8f45 100644 --- a/tests/firedrake/macro/test_macro_interp_project.py +++ b/tests/firedrake/macro/test_macro_interp_project.py @@ -17,26 +17,23 @@ def proj_bc(u, f): return proj(u, f, bcs=DirichletBC(u.function_space(), f, "on_boundary")) -def h1_proj(u, f, bcs=None): - # compute h1 projection of f into u's function +def riesz_proj(u, f, bcs=None): + # compute the Riesz representative of f in u's function # space, store the result in u. v = TestFunction(u.function_space()) + w = TrialFunction(u.function_space()) - d = {H2: grad, H1: grad, HCurl: curl, HDiv: div, HDivDiv: div}[u.ufl_element().sobolev_space] - F = (inner(d(u-f), d(v)) * dx - + inner(u-f, v) * dx) - fcp = {"mode": "vanilla"} - solve(F == 0, u, - bcs=bcs, - solver_parameters={"snes_type": "ksponly", - "ksp_type": "preonly", - "pc_type": "cholesky"}, - form_compiler_parameters=fcp) - return assemble(F(u-f), form_compiler_parameters=fcp)**0.5 + # Apply the Riesz map <==> projection in the H(d) inner-product norm + d = {H2: grad, H1: grad, HCurl: curl, HDiv: div}[u.ufl_element().sobolev_space] + a = inner(d(w), d(v)) * dx + inner(w, v) * dx + L = a(v, f) + solve(a == L, u, bcs=bcs) + return assemble(a(u-f, u-f))**0.5 -def h1_proj_bc(u, f): - return h1_proj(u, f, bcs=DirichletBC(u.function_space(), f, "on_boundary")) + +def riesz_proj_bc(u, f): + return riesz_proj(u, f, bcs=DirichletBC(u.function_space(), f, "on_boundary")) @pytest.fixture(params=("square", "cube")) @@ -53,10 +50,10 @@ def mesh(request): ('alfeld', 1), ('alfeld', 'd'), ('iso(2)', 2)]) -@pytest.mark.parametrize('op', (interp, proj, proj_bc, h1_proj, h1_proj_bc)) +@pytest.mark.parametrize('op', (interp, proj, proj_bc, riesz_proj, riesz_proj_bc)) def test_projection_scalar_monomial(op, mesh, degree, variant): if degree == 'd': - degree = mesh.geometric_dimension() + degree = mesh.geometric_dimension V = FunctionSpace(mesh, "CG", degree=degree, variant=variant) u = Function(V) x = SpatialCoordinate(mesh) @@ -112,7 +109,7 @@ def run_convergence(mh, el, deg, convrate, op): (2, 'HCT', 3, 3), (2, 'HCT-red', 3, 2), ]) -@pytest.mark.parametrize('op', (proj, h1_proj)) +@pytest.mark.parametrize('op', (proj, riesz_proj)) def test_scalar_convergence(hierarchy, dim, el, deg, convrate, op): if op == proj: convrate += 1 @@ -132,7 +129,7 @@ def test_scalar_convergence(hierarchy, dim, el, deg, convrate, op): (3, 'Guzman-Neilan 1st kind H1', 1, 1), (3, 'Guzman-Neilan H1(div)', 3, 2), ]) -@pytest.mark.parametrize('op', (proj, h1_proj)) +@pytest.mark.parametrize('op', (proj, riesz_proj)) def test_piola_convergence(hierarchy, dim, el, deg, convrate, op): if op == proj: convrate += 1 diff --git a/tests/firedrake/macro/test_macro_multigrid.py b/tests/firedrake/macro/test_macro_multigrid.py index c9d921cf27..d128d417b1 100644 --- a/tests/firedrake/macro/test_macro_multigrid.py +++ b/tests/firedrake/macro/test_macro_multigrid.py @@ -158,7 +158,7 @@ def test_macro_multigrid_poisson(hierarchy, degree, variant, petsc_raises): solver.solve() expected = 10 - if mesh.geometric_dimension() == 3 and variant == "alfeld": + if mesh.geometric_dimension == 3 and variant == "alfeld": expected = 14 assert solver.snes.ksp.getIterationNumber() <= expected diff --git a/tests/firedrake/macro/test_macro_quadrature.py b/tests/firedrake/macro/test_macro_quadrature.py index 7330965d4f..86c004c1dc 100644 --- a/tests/firedrake/macro/test_macro_quadrature.py +++ b/tests/firedrake/macro/test_macro_quadrature.py @@ -5,7 +5,7 @@ def alfeld_split(msh): - dim = msh.geometric_dimension() + dim = msh.geometric_dimension coords = msh.coordinates.dat.data.reshape((-1, dim)) coords = numpy.vstack((coords, numpy.average(coords, 0))) cells = [list(map(lambda i: dim+1 if i == j else i, range(dim+1))) for j in range(dim+1)] @@ -39,7 +39,7 @@ def meshes(variant, base_mesh): @pytest.mark.parametrize("degree", (1, 4,)) def test_macro_quadrature_monomial(degree, variant, meshes): msh = meshes[0] - gdim = msh.geometric_dimension() + gdim = msh.geometric_dimension x = SpatialCoordinate(msh) c = Constant(numpy.arange(1, gdim+1)) expr = dot(c, x) ** degree @@ -59,7 +59,7 @@ def test_macro_quadrature_monomial(degree, variant, meshes): def test_macro_quadrature_piecewise(degree, variant, meshes): results = [] for msh, v in zip(meshes, (variant, None)): - gdim = msh.geometric_dimension() + gdim = msh.geometric_dimension x = SpatialCoordinate(msh) if variant == "alfeld": diff --git a/tests/firedrake/macro/test_macro_solve.py b/tests/firedrake/macro/test_macro_solve.py index b30c2a74c1..5d9e313fce 100644 --- a/tests/firedrake/macro/test_macro_solve.py +++ b/tests/firedrake/macro/test_macro_solve.py @@ -24,7 +24,7 @@ def mixed_element(mh, variant): Vel = FiniteElement("CG", cell, degree=1, variant="iso") Pel = FiniteElement("CG", cell, degree=1) elif variant == "alfeld": - dim = mh[0].topological_dimension() + dim = mh[0].topological_dimension Vel = FiniteElement("CG", cell, degree=dim, variant="alfeld") Pel = FiniteElement("DG", cell, degree=dim-1, variant="alfeld") elif variant == "th": @@ -63,7 +63,7 @@ def riesz_map(Z, gamma=None): def test_riesz(mh, variant, mixed_element): - dim = mh[0].geometric_dimension() + dim = mh[0].geometric_dimension u_err = [] p_err = [] el1, el2 = mixed_element @@ -121,7 +121,7 @@ def errornormL2_0(pexact, ph): def test_stokes(mh, variant, mixed_element): - dim = mh[0].geometric_dimension() + dim = mh[0].geometric_dimension u_err = [] p_err = [] div_err = [] diff --git a/tests/firedrake/macro/test_stokes_macroelements.py b/tests/firedrake/macro/test_stokes_macroelements.py index c3f9b6e5c2..1fd7c5b54b 100644 --- a/tests/firedrake/macro/test_stokes_macroelements.py +++ b/tests/firedrake/macro/test_stokes_macroelements.py @@ -15,7 +15,7 @@ def mesh(request): @pytest.fixture(params=("SV", "GN", "GN2", "GNH1div")) def space(request, mesh): family = request.param - dim = mesh.topological_dimension() + dim = mesh.topological_dimension if family == "GN": V = FunctionSpace(mesh, "GN", 1) Q = FunctionSpace(mesh, "DG", 0) @@ -48,7 +48,7 @@ def test_stokes_complex(mesh, space): # Test that DirichletBC does not set derivative nodes of supersmooth H1 functions def test_supersmooth_bcs(mesh): - tdim = mesh.topological_dimension() + tdim = mesh.topological_dimension if tdim == 3: V = FunctionSpace(mesh, "GNH1div", 3) else: diff --git a/tests/firedrake/meshes/mixed_cell_unit_square.msh b/tests/firedrake/meshes/mixed_cell_unit_square.msh new file mode 100644 index 0000000000..582012ce11 --- /dev/null +++ b/tests/firedrake/meshes/mixed_cell_unit_square.msh @@ -0,0 +1,231 @@ +$MeshFormat +2.200000 0 8 +$EndMeshFormat +$Nodes +121 +1 0.000000 0.000000 0.000000 +2 1.000000 0.000000 0.000000 +3 0.000000 1.000000 0.000000 +4 1.000000 1.000000 0.000000 +5 0.100000 0.000000 0.000000 +6 0.200000 0.000000 0.000000 +7 0.300000 0.000000 0.000000 +8 0.400000 0.000000 0.000000 +9 0.500000 0.000000 0.000000 +10 0.600000 0.000000 0.000000 +11 0.700000 0.000000 0.000000 +12 0.800000 0.000000 0.000000 +13 0.900000 0.000000 0.000000 +14 0.000000 0.100000 0.000000 +15 0.000000 0.200000 0.000000 +16 0.000000 0.300000 0.000000 +17 0.000000 0.400000 0.000000 +18 0.000000 0.500000 0.000000 +19 0.000000 0.600000 0.000000 +20 0.000000 0.700000 0.000000 +21 0.000000 0.800000 0.000000 +22 0.000000 0.900000 0.000000 +23 1.000000 0.100000 0.000000 +24 1.000000 0.200000 0.000000 +25 1.000000 0.300000 0.000000 +26 1.000000 0.400000 0.000000 +27 1.000000 0.500000 0.000000 +28 1.000000 0.600000 0.000000 +29 1.000000 0.700000 0.000000 +30 1.000000 0.800000 0.000000 +31 1.000000 0.900000 0.000000 +32 0.900000 1.000000 0.000000 +33 0.800000 1.000000 0.000000 +34 0.700000 1.000000 0.000000 +35 0.600000 1.000000 0.000000 +36 0.500000 1.000000 0.000000 +37 0.400000 1.000000 0.000000 +38 0.300000 1.000000 0.000000 +39 0.200000 1.000000 0.000000 +40 0.100000 1.000000 0.000000 +41 0.099910 0.099602 0.000000 +42 0.199647 0.099017 0.000000 +43 0.298763 0.097861 0.000000 +44 0.397030 0.096831 0.000000 +45 0.494086 0.096232 0.000000 +46 0.592555 0.098553 0.000000 +47 0.692780 0.101000 0.000000 +48 0.795403 0.102370 0.000000 +49 0.897918 0.101440 0.000000 +50 0.099976 0.199139 0.000000 +51 0.100276 0.298871 0.000000 +52 0.100754 0.398576 0.000000 +53 0.100758 0.498986 0.000000 +54 0.100564 0.599366 0.000000 +55 0.100269 0.699723 0.000000 +56 0.100125 0.799876 0.000000 +57 0.100042 0.899958 0.000000 +58 0.895799 0.203814 0.000000 +59 0.895030 0.307167 0.000000 +60 0.899822 0.405711 0.000000 +61 0.901364 0.503314 0.000000 +62 0.901355 0.601763 0.000000 +63 0.900674 0.700631 0.000000 +64 0.900353 0.800260 0.000000 +65 0.900126 0.900083 0.000000 +66 0.800284 0.900132 0.000000 +67 0.700338 0.900083 0.000000 +68 0.600524 0.900015 0.000000 +69 0.500444 0.899908 0.000000 +70 0.400365 0.899842 0.000000 +71 0.300156 0.899889 0.000000 +72 0.200102 0.899912 0.000000 +73 0.394894 0.192601 0.000000 +74 0.298413 0.195663 0.000000 +75 0.487192 0.189602 0.000000 +76 0.582637 0.196271 0.000000 +77 0.682960 0.202753 0.000000 +78 0.789894 0.207261 0.000000 +79 0.783414 0.322202 0.000000 +80 0.199715 0.198087 0.000000 +81 0.200548 0.296574 0.000000 +82 0.201983 0.396430 0.000000 +83 0.202035 0.497478 0.000000 +84 0.201235 0.598747 0.000000 +85 0.200629 0.699407 0.000000 +86 0.200213 0.799802 0.000000 +87 0.804703 0.506541 0.000000 +88 0.803135 0.412674 0.000000 +89 0.803344 0.603044 0.000000 +90 0.801425 0.700957 0.000000 +91 0.800669 0.800362 0.000000 +92 0.701315 0.800417 0.000000 +93 0.601314 0.800070 0.000000 +94 0.501275 0.799737 0.000000 +95 0.400862 0.799573 0.000000 +96 0.300662 0.799553 0.000000 +97 0.477928 0.273171 0.000000 +98 0.396177 0.285892 0.000000 +99 0.665542 0.306018 0.000000 +100 0.712544 0.509728 0.000000 +101 0.721484 0.426103 0.000000 +102 0.300142 0.292896 0.000000 +103 0.305573 0.390645 0.000000 +104 0.305015 0.494723 0.000000 +105 0.303243 0.597330 0.000000 +106 0.301423 0.698947 0.000000 +107 0.702926 0.701156 0.000000 +108 0.706906 0.603770 0.000000 +109 0.604087 0.700387 0.000000 +110 0.402800 0.698655 0.000000 +111 0.503558 0.699294 0.000000 +112 0.565943 0.292189 0.000000 +113 0.617065 0.502789 0.000000 +114 0.633291 0.406568 0.000000 +115 0.608525 0.601090 0.000000 +116 0.534001 0.392869 0.000000 +117 0.416004 0.377213 0.000000 +118 0.410393 0.491619 0.000000 +119 0.405409 0.596818 0.000000 +120 0.508620 0.598318 0.000000 +121 0.517096 0.496276 0.000000 +$EndNodes +$Elements +101 +1 3 2 1 1 1 5 41 14 +2 3 2 1 1 5 6 42 41 +3 3 2 1 1 6 7 43 42 +4 3 2 1 1 7 8 44 43 +5 3 2 1 1 8 9 45 44 +6 3 2 1 1 9 10 46 45 +7 3 2 1 1 10 11 47 46 +8 3 2 1 1 11 12 48 47 +9 3 2 1 1 12 13 49 48 +10 3 2 1 1 13 2 23 49 +11 3 2 1 1 15 14 41 50 +12 3 2 1 1 16 15 50 51 +13 3 2 1 1 17 16 51 52 +14 3 2 1 1 18 17 52 53 +15 3 2 1 1 19 18 53 54 +16 3 2 1 1 20 19 54 55 +17 3 2 1 1 21 20 55 56 +18 3 2 1 1 22 21 56 57 +19 3 2 1 1 3 22 57 40 +20 3 2 1 1 23 24 58 49 +21 3 2 1 1 24 25 59 58 +22 3 2 1 1 25 26 60 59 +23 3 2 1 1 26 27 61 60 +24 3 2 1 1 27 28 62 61 +25 3 2 1 1 28 29 63 62 +26 3 2 1 1 29 30 64 63 +27 3 2 1 1 30 31 65 64 +28 3 2 1 1 31 4 32 65 +29 3 2 1 1 32 33 66 65 +30 3 2 1 1 33 34 67 66 +31 3 2 1 1 34 35 68 67 +32 3 2 1 1 35 36 69 68 +33 3 2 1 1 36 37 70 69 +34 3 2 1 1 37 38 71 70 +35 3 2 1 1 38 39 72 71 +36 3 2 1 1 39 40 57 72 +37 3 2 1 1 43 44 73 74 +38 3 2 1 1 44 45 75 73 +39 3 2 1 1 45 46 76 75 +40 3 2 1 1 46 47 77 76 +41 3 2 1 1 47 48 78 77 +42 3 2 1 1 48 49 58 78 +43 3 2 1 1 58 59 79 78 +44 3 2 1 1 41 42 80 50 +45 3 2 1 1 51 50 80 81 +46 3 2 1 1 52 51 81 82 +47 3 2 1 1 53 52 82 83 +48 3 2 1 1 54 53 83 84 +49 3 2 1 1 55 54 84 85 +50 3 2 1 1 56 55 85 86 +51 3 2 1 1 57 56 86 72 +52 3 2 1 1 60 61 87 88 +53 3 2 1 1 61 62 89 87 +54 3 2 1 1 62 63 90 89 +55 3 2 1 1 63 64 91 90 +56 3 2 1 1 64 65 66 91 +57 3 2 1 1 66 67 92 91 +58 3 2 1 1 67 68 93 92 +59 3 2 1 1 68 69 94 93 +60 3 2 1 1 69 70 95 94 +61 3 2 1 1 70 71 96 95 +62 3 2 1 1 71 72 86 96 +63 3 2 1 1 59 60 88 79 +64 3 2 1 1 42 43 74 80 +65 3 2 1 1 73 75 97 98 +66 3 2 1 1 77 78 79 99 +67 3 2 1 1 88 87 100 101 +68 3 2 1 1 74 73 98 102 +69 3 2 1 1 81 80 74 102 +70 3 2 1 1 82 81 102 103 +71 3 2 1 1 83 82 103 104 +72 3 2 1 1 84 83 104 105 +73 3 2 1 1 85 84 105 106 +74 3 2 1 1 86 85 106 96 +75 3 2 1 1 90 91 92 107 +76 3 2 1 1 87 89 108 100 +77 3 2 1 1 92 93 109 107 +78 3 2 1 1 94 95 110 111 +79 3 2 1 1 93 94 111 109 +80 3 2 1 1 95 96 106 110 +81 2 2 1 1 79 88 101 +82 3 2 1 1 89 90 107 108 +83 3 2 1 1 75 76 112 97 +84 3 2 1 1 76 77 99 112 +85 3 2 1 1 101 100 113 114 +86 3 2 1 1 107 109 115 108 +87 3 2 1 1 112 99 114 116 +88 3 2 1 1 104 103 117 118 +89 3 2 1 1 105 104 118 119 +90 3 2 1 1 106 105 119 110 +91 3 2 1 1 97 112 116 117 +92 3 2 1 1 102 98 117 103 +93 3 2 1 1 111 110 119 120 +94 3 2 1 1 100 108 115 113 +95 3 2 1 1 109 111 120 115 +96 3 2 1 1 114 113 121 116 +97 3 2 1 1 119 118 121 120 +98 3 2 1 1 117 116 121 118 +99 3 2 1 1 115 120 121 113 +100 2 2 1 1 97 117 98 +101 3 2 1 1 101 114 99 79 +$EndElements diff --git a/tests/firedrake/meshes/p2d.geo b/tests/firedrake/meshes/p2d.geo new file mode 100644 index 0000000000..d663e023c8 --- /dev/null +++ b/tests/firedrake/meshes/p2d.geo @@ -0,0 +1,16 @@ +SetFactory("OpenCASCADE"); +//+ +Rectangle(1) = {0, 0, 0, 0.6, 0.5, 0}; +MeshSize {:} = 0.1; +MeshSize {:} = 0.1; + +// S2: Right Side +// S4: Left Side + +Periodic Curve {2} = {4} Translate {0.6,0,0}; + +Physical Surface(1) = {1}; +Physical Curve(1) = {1}; +Physical Curve(2) = {2}; +Physical Curve(3) = {3}; +Physical Curve(4) = {4}; diff --git a/tests/firedrake/meshes/p2d.msh b/tests/firedrake/meshes/p2d.msh new file mode 100644 index 0000000000..9250828498 --- /dev/null +++ b/tests/firedrake/meshes/p2d.msh @@ -0,0 +1,175 @@ +$MeshFormat +2.2 0 8 +$EndMeshFormat +$Nodes +52 +1 0 0 0 +2 0.6 0 0 +3 0.6 0.5 0 +4 0 0.5 0 +5 0.1 0 0 +6 0.2000000000000002 0 0 +7 0.3000000000000003 0 0 +8 0.4000000000000006 0 0 +9 0.5000000000000007 0 0 +10 0.6 0.09999999999999998 0 +11 0.6 0.2 0 +12 0.6 0.3 0 +13 0.6 0.4 0 +14 0.4999999999999999 0.5 0 +15 0.3999999999999997 0.5 0 +16 0.2999999999999997 0.5 0 +17 0.1999999999999993 0.5 0 +18 0.09999999999999931 0.5 0 +19 0 0.4 0 +20 0 0.3 0 +21 0 0.2 0 +22 0 0.09999999999999998 0 +23 0.5186224219100395 0.245166047252571 0 +24 0.08137757808996043 0.2451660472525709 0 +25 0.2543372475932793 0.08224840025510172 0 +26 0.3499999999999996 0.413397459621556 0 +27 0.4430574538662087 0.09595102731164898 0 +28 0.150423736928087 0.415322795494417 0 +29 0.2500706228213473 0.4137183489336996 0 +30 0.3000117704702239 0.3268484007951361 0 +31 0.4018525930917393 0.3349783789207162 0 +32 0.3578740082166879 0.2419001089813815 0 +33 0.2435188692474362 0.2409750047583969 0 +34 0.4497777049626761 0.41512773648052 0 +35 0.3452285359833018 0.08308079139007907 0 +36 0.1575621529328313 0.09532900729402857 0 +37 0.1971535352311468 0.3354831874338384 0 +38 0.08080880024257878 0.1530389498800314 0 +39 0.5059889644735611 0.3418579238694238 0 +40 0.5204794770710884 0.1523966756079144 0 +41 0.0938978312712459 0.3419579049447122 0 +42 0.4431474979352846 0.1938733509157883 0 +43 0.1574026289941869 0.1937225425877068 0 +44 0.2999999999999989 0.1633974596215566 0 +45 0.5267949192431121 0.4267949192431121 0 +46 0.07320508075688786 0.07320508075688781 0 +47 0.5267949192431126 0.0732050807568875 0 +48 0.07320508075688736 0.4267949192431125 0 +49 0.3759586495219419 0.156251476709271 0 +50 0.2251229998750313 0.1564314424076824 0 +51 0.4454970971254625 0.2715551619879761 0 +52 0.1551248878926393 0.2709383851890471 0 +$EndNodes +$Elements +102 +1 1 2 1 1 1 5 +2 1 2 1 1 5 6 +3 1 2 1 1 6 7 +4 1 2 1 1 7 8 +5 1 2 1 1 8 9 +6 1 2 1 1 9 2 +7 1 2 2 2 2 10 +8 1 2 2 2 10 11 +9 1 2 2 2 11 12 +10 1 2 2 2 12 13 +11 1 2 2 2 13 3 +12 1 2 3 3 3 14 +13 1 2 3 3 14 15 +14 1 2 3 3 15 16 +15 1 2 3 3 16 17 +16 1 2 3 3 17 18 +17 1 2 3 3 18 4 +18 1 2 4 4 4 19 +19 1 2 4 4 19 20 +20 1 2 4 4 20 21 +21 1 2 4 4 21 22 +22 1 2 4 4 22 1 +23 2 2 1 1 31 39 34 +24 2 2 1 1 28 41 37 +25 2 2 1 1 40 42 27 +26 2 2 1 1 36 43 38 +27 2 2 1 1 31 51 39 +28 2 2 1 1 41 52 37 +29 2 2 1 1 42 49 27 +30 2 2 1 1 36 50 43 +31 2 2 1 1 15 34 14 +32 2 2 1 1 29 30 26 +33 2 2 1 1 7 35 25 +34 2 2 1 1 16 29 26 +35 2 2 1 1 30 37 33 +36 2 2 1 1 9 27 8 +37 2 2 1 1 29 37 30 +38 2 2 1 1 6 36 5 +39 2 2 1 1 26 34 15 +40 2 2 1 1 8 35 7 +41 2 2 1 1 17 29 16 +42 2 2 1 1 18 28 17 +43 2 2 1 1 30 31 26 +44 2 2 1 1 30 32 31 +45 2 2 1 1 25 36 6 +46 2 2 1 1 16 26 15 +47 2 2 1 1 7 25 6 +48 2 2 1 1 21 24 20 +49 2 2 1 1 31 34 26 +50 2 2 1 1 28 37 29 +51 2 2 1 1 30 33 32 +52 2 2 1 1 12 23 11 +53 2 2 1 1 27 35 8 +54 2 2 1 1 28 29 17 +55 2 2 1 1 22 38 21 +56 2 2 1 1 13 39 12 +57 2 2 1 1 11 40 10 +58 2 2 1 1 20 41 19 +59 2 2 1 1 21 38 24 +60 2 2 1 1 23 40 11 +61 2 2 1 1 24 41 20 +62 2 2 1 1 12 39 23 +63 2 2 1 1 1 46 22 +64 2 2 1 1 2 47 9 +65 2 2 1 1 3 45 13 +66 2 2 1 1 5 46 1 +67 2 2 1 1 14 45 3 +68 2 2 1 1 10 47 2 +69 2 2 1 1 19 48 4 +70 2 2 1 1 4 48 18 +71 2 2 1 1 27 49 35 +72 2 2 1 1 25 50 36 +73 2 2 1 1 35 44 25 +74 2 2 1 1 33 44 32 +75 2 2 1 1 23 42 40 +76 2 2 1 1 38 43 24 +77 2 2 1 1 32 51 31 +78 2 2 1 1 37 52 33 +79 2 2 1 1 39 51 23 +80 2 2 1 1 24 52 41 +81 2 2 1 1 42 51 32 +82 2 2 1 1 33 52 43 +83 2 2 1 1 41 48 19 +84 2 2 1 1 40 47 10 +85 2 2 1 1 22 46 38 +86 2 2 1 1 13 45 39 +87 2 2 1 1 34 45 14 +88 2 2 1 1 36 46 5 +89 2 2 1 1 9 47 27 +90 2 2 1 1 18 48 28 +91 2 2 1 1 27 47 40 +92 2 2 1 1 28 48 41 +93 2 2 1 1 38 46 36 +94 2 2 1 1 39 45 34 +95 2 2 1 1 43 50 33 +96 2 2 1 1 32 49 42 +97 2 2 1 1 33 50 44 +98 2 2 1 1 44 49 32 +99 2 2 1 1 23 51 42 +100 2 2 1 1 43 52 24 +101 2 2 1 1 35 49 44 +102 2 2 1 1 44 50 25 +$EndElements +$Periodic +1 +1 2 4 +Affine 1 0 0 0.6 0 1 0 0 0 0 1 0 0 0 0 1 +6 +2 1 +3 4 +10 22 +11 21 +12 20 +13 19 +$EndPeriodic diff --git a/tests/firedrake/meshes/p2d_xy.geo b/tests/firedrake/meshes/p2d_xy.geo new file mode 100644 index 0000000000..c5c32bf65a --- /dev/null +++ b/tests/firedrake/meshes/p2d_xy.geo @@ -0,0 +1,16 @@ +SetFactory("OpenCASCADE"); +//+ +Rectangle(1) = {0, 0, 0, 0.6, 0.5, 0}; +MeshSize {:} = 0.05; + +// Curve 1: bottom (y=0), Curve 2: right (x=0.6) +// Curve 3: top (y=0.5), Curve 4: left (x=0) + +Periodic Curve {2} = {4} Translate {0.6, 0, 0}; +Periodic Curve {3} = {1} Translate {0, 0.5, 0}; + +Physical Surface(1) = {1}; +Physical Curve(1) = {1}; +Physical Curve(2) = {2}; +Physical Curve(3) = {3}; +Physical Curve(4) = {4}; diff --git a/tests/firedrake/meshes/p2d_xy.msh b/tests/firedrake/meshes/p2d_xy.msh new file mode 100644 index 0000000000..36934efcee --- /dev/null +++ b/tests/firedrake/meshes/p2d_xy.msh @@ -0,0 +1,550 @@ +$MeshFormat +2.2 0 8 +$EndMeshFormat +$Nodes +170 +1 0 0 0 +2 0.6 0 0 +3 0.6 0.5 0 +4 0 0.5 0 +5 0.05000000000000002 0 0 +6 0.1 0 0 +7 0.1500000000000001 0 0 +8 0.2000000000000002 0 0 +9 0.2500000000000003 0 0 +10 0.3000000000000003 0 0 +11 0.3500000000000005 0 0 +12 0.4000000000000006 0 0 +13 0.4500000000000006 0 0 +14 0.5000000000000007 0 0 +15 0.5500000000000003 0 0 +16 0.6 0.04999999999999999 0 +17 0.6 0.09999999999999998 0 +18 0.6 0.15 0 +19 0.6 0.2 0 +20 0.6 0.25 0 +21 0.6 0.3 0 +22 0.6 0.35 0 +23 0.6 0.4 0 +24 0.6 0.45 0 +25 0.5500000000000003 0.5 0 +26 0.5000000000000007 0.5 0 +27 0.4500000000000006 0.5 0 +28 0.4000000000000006 0.5 0 +29 0.3500000000000005 0.5 0 +30 0.3000000000000003 0.5 0 +31 0.2500000000000003 0.5 0 +32 0.2000000000000002 0.5 0 +33 0.1500000000000001 0.5 0 +34 0.1 0.5 0 +35 0.05000000000000004 0.5 0 +36 0 0.45 0 +37 0 0.4 0 +38 0 0.35 0 +39 0 0.3 0 +40 0 0.25 0 +41 0 0.2 0 +42 0 0.15 0 +43 0 0.09999999999999998 0 +44 0 0.04999999999999999 0 +45 0.2746785666810747 0.04226795090252519 0 +46 0.3250000000000004 0.4566987298107781 0 +47 0.04238914066107835 0.2734201453141715 0 +48 0.5524999559409198 0.2319551468455084 0 +49 0.1750000000000002 0.456698729810778 0 +50 0.4241179869728686 0.03913125464382386 0 +51 0.4250000000000005 0.4566987298107781 0 +52 0.03717812400393483 0.1742734396881679 0 +53 0.1750000000000002 0.04330127018922197 0 +54 0.5520260777188489 0.3250515300530362 0 +55 0.04481480624777157 0.379336282735329 0 +56 0.5556374511175446 0.1260207664185476 0 +57 0.5571073806914171 0.4266515428715898 0 +58 0.07523000439531077 0.03857402544334781 0 +59 0.5222779727052853 0.04774764140511981 0 +60 0.07521186846404387 0.4576613977472084 0 +61 0.4745913491193617 0.4569346645067195 0 +62 0.4499318915198941 0.4134367820708797 0 +63 0.3999886485866495 0.4134040133631101 0 +64 0.4249867566844243 0.3701038354641472 0 +65 0.3749959008785126 0.3700985560612287 0 +66 0.3999971095938232 0.3267965880198969 0 +67 0.349998835078723 0.3267955918107254 0 +68 0.3749993241120915 0.2834940392779566 0 +69 0.3249996931984695 0.2834938261858369 0 +70 0.3499998362184272 0.2401924734240038 0 +71 0.2999999215694832 0.2401924241465487 0 +72 0.2749999357946591 0.2834936861228614 0 +73 0.2499999762273574 0.2401923925898102 0 +74 0.2249999853370031 0.2834936575195758 0 +75 0.1999999935940605 0.2401923825631396 0 +76 0.3249999596313188 0.1968911319823157 0 +77 0.3749999659749581 0.1968911283198139 0 +78 0.2249999949702367 0.1968911115793819 0 +79 0.2499999868552774 0.3267949268322217 0 +80 0.1999999987935709 0.3267949199396447 0 +81 0.3499999967724511 0.1535898403496511 0 +82 0.4249994056176529 0.2834939922206988 0 +83 0.1749999980940499 0.1968911097758475 0 +84 0.3999999937912352 0.1535898420708569 0 +85 0.1999999988440482 0.1535898391536139 0 +86 0.1499999994896834 0.1535898387808562 0 +87 0.1477670886777593 0.2418269831870357 0 +88 0.424999993294366 0.1968911125469464 0 +89 0.4499999978476006 0.1535898397289132 0 +90 0.2499999989690479 0.1535898390814453 0 +91 0.2249999976081417 0.3700961908132745 0 +92 0.2749999974105702 0.3700961909273425 0 +93 0.1749999994002857 0.3700961897785797 0 +94 0.1499999996989764 0.3267949194169086 0 +95 0.4735361238200813 0.3692666762861764 0 +96 0.3737651801892966 0.1110014918471133 0 +97 0.2250000000000006 0.1102885682970025 0 +98 0.4741025389021054 0.1974092580819035 0 +99 0.2499999991697855 0.4133974601008809 0 +100 0.3247941961602917 0.112054523037251 0 +101 0.1264503173971827 0.3692588482926607 0 +102 0.1015972092964662 0.3266900717871233 0 +103 0.3472902595822954 0.07393885857972832 0 +104 0.507152228408108 0.1517701838953708 0 +105 0.4744889600087906 0.110749566557858 0 +106 0.554238592187739 0.3800287791034055 0 +107 0.1243279422841183 0.2850552647857479 0 +108 0.4728030553498509 0.04007812242252801 0 +109 0.1750000000000005 0.1102885682970025 0 +110 0.1250000000000005 0.1102885682970025 0 +111 0.1033318830842875 0.1527831203632191 0 +112 0.07795777705557491 0.1142512076720568 0 +113 0.1250353114106741 0.4568591744668498 0 +114 0.09244283868815417 0.4091619142362889 0 +115 0.4497532326193305 0.3266582767146536 0 +116 0.4749587730394976 0.2834709324936154 0 +117 0.2999656919221851 0.1538841685705186 0 +118 0.4498434518089373 0.2402750088454168 0 +119 0.5000000000000008 0.2401923788646685 0 +120 0.524278421034044 0.2804108179166568 0 +121 0.0505189749569617 0.3264342162565577 0 +122 0.5566987298107779 0.175 0 +123 0.1238477112422602 0.1991818420178264 0 +124 0.1250099786718777 0.04224517867281655 0 +125 0.2250000000000002 0.04330127018922197 0 +126 0.2749895827715682 0.1112322163248662 0 +127 0.2249999998616311 0.4566987298906655 0 +128 0.04109242712879907 0.2246155975003899 0 +129 0.09014550448903852 0.2463898678255307 0 +130 0.3999736628377388 0.2402062924391394 0 +131 0.2749942572149382 0.1969401779916701 0 +132 0.4984751204913282 0.3249883959230648 0 +133 0.4234742037348359 0.1128317489013732 0 +134 0.2999997413896167 0.326795068551887 0 +135 0.3249990791262373 0.3700967210990493 0 +136 0.5068930752529754 0.4099292749856354 0 +137 0.3749981080977753 0.456699822101037 0 +138 0.1750000000000005 0.2834936490538903 0 +139 0.3499969561148626 0.4133992170094598 0 +140 0.2749999998385697 0.4566987299039801 0 +141 0.1999999993399741 0.4133974600026225 0 +142 0.2999993386100043 0.4133978414752484 0 +143 0.1489880777060452 0.4125787194312966 0 +144 0.5600540870587416 0.07674230049055653 0 +145 0.04161876697749889 0.4254372001975218 0 +146 0.5245362728576325 0.4590937325182053 0 +147 0.04453078533678545 0.07662939954961934 0 +148 0.5190116389080632 0.1000828547412282 0 +149 0.0767003652058372 0.1904807509206902 0 +150 0.5196152422706635 0.2 0 +151 0.03477617481834677 0.125 0 +152 0.3244948043772287 0.03774922962987609 0 +153 0.3989412717465496 0.07451945672940327 0 +154 0.566185719504996 0.275 0 +155 0.4490432367731932 0.07639013019059904 0 +156 0.2500000000000006 0.07679491924311235 0 +157 0.2000000000000005 0.07679491924311233 0 +158 0.1500000000000005 0.07679491924311235 0 +159 0.09962142424325832 0.07646388314632591 0 +160 0.3741407204464905 0.03755646659713857 0 +161 0.3000000000000007 0.07687010113573392 0 +162 0.08316482931730726 0.3621762666615919 0 +163 0.5170337978941946 0.3618529312702636 0 +164 0.08137247134659048 0.2909133796506256 0 +165 0.03660254037844396 0.4633974596215561 0 +166 0.5633974596215563 0.4633974596215562 0 +167 0.5633974596215565 0.03660254037844363 0 +168 0.03634796322775286 0.03634796322775283 0 +169 0.4883528012267261 0.07551973625765104 0 +170 0.06531180840246492 0.1485575083329968 0 +$EndNodes +$Elements +338 +1 1 2 1 1 1 5 +2 1 2 1 1 5 6 +3 1 2 1 1 6 7 +4 1 2 1 1 7 8 +5 1 2 1 1 8 9 +6 1 2 1 1 9 10 +7 1 2 1 1 10 11 +8 1 2 1 1 11 12 +9 1 2 1 1 12 13 +10 1 2 1 1 13 14 +11 1 2 1 1 14 15 +12 1 2 1 1 15 2 +13 1 2 2 2 2 16 +14 1 2 2 2 16 17 +15 1 2 2 2 17 18 +16 1 2 2 2 18 19 +17 1 2 2 2 19 20 +18 1 2 2 2 20 21 +19 1 2 2 2 21 22 +20 1 2 2 2 22 23 +21 1 2 2 2 23 24 +22 1 2 2 2 24 3 +23 1 2 3 3 3 25 +24 1 2 3 3 25 26 +25 1 2 3 3 26 27 +26 1 2 3 3 27 28 +27 1 2 3 3 28 29 +28 1 2 3 3 29 30 +29 1 2 3 3 30 31 +30 1 2 3 3 31 32 +31 1 2 3 3 32 33 +32 1 2 3 3 33 34 +33 1 2 3 3 34 35 +34 1 2 3 3 35 4 +35 1 2 4 4 4 36 +36 1 2 4 4 36 37 +37 1 2 4 4 37 38 +38 1 2 4 4 38 39 +39 1 2 4 4 39 40 +40 1 2 4 4 40 41 +41 1 2 4 4 41 42 +42 1 2 4 4 42 43 +43 1 2 4 4 43 44 +44 1 2 4 4 44 1 +45 2 2 1 1 87 129 123 +46 2 2 1 1 57 136 106 +47 2 2 1 1 114 145 55 +48 2 2 1 1 48 120 119 +49 2 2 1 1 55 162 114 +50 2 2 1 1 136 163 106 +51 2 2 1 1 56 122 104 +52 2 2 1 1 48 154 120 +53 2 2 1 1 129 149 123 +54 2 2 1 1 104 148 56 +55 2 2 1 1 107 129 87 +56 2 2 1 1 122 150 104 +57 2 2 1 1 54 132 120 +58 2 2 1 1 60 145 114 +59 2 2 1 1 102 162 121 +60 2 2 1 1 54 163 132 +61 2 2 1 1 147 159 112 +62 2 2 1 1 57 146 136 +63 2 2 1 1 58 159 147 +64 2 2 1 1 105 148 104 +65 2 2 1 1 153 155 133 +66 2 2 1 1 11 160 152 +67 2 2 1 1 119 150 48 +68 2 2 1 1 144 148 59 +69 2 2 1 1 121 164 102 +70 2 2 1 1 121 162 55 +71 2 2 1 1 106 163 54 +72 2 2 1 1 152 160 103 +73 2 2 1 1 50 155 153 +74 2 2 1 1 27 61 26 +75 2 2 1 1 99 141 91 +76 2 2 1 1 22 106 54 +77 2 2 1 1 87 138 107 +78 2 2 1 1 82 116 115 +79 2 2 1 1 98 118 88 +80 2 2 1 1 70 130 68 +81 2 2 1 1 69 71 70 +82 2 2 1 1 87 123 83 +83 2 2 1 1 93 143 101 +84 2 2 1 1 140 142 46 +85 2 2 1 1 64 95 62 +86 2 2 1 1 69 72 71 +87 2 2 1 1 34 113 33 +88 2 2 1 1 22 54 21 +89 2 2 1 1 66 115 64 +90 2 2 1 1 141 143 93 +91 2 2 1 1 107 138 94 +92 2 2 1 1 117 126 100 +93 2 2 1 1 13 108 50 +94 2 2 1 1 33 113 49 +95 2 2 1 1 60 113 34 +96 2 2 1 1 77 130 70 +97 2 2 1 1 118 130 88 +98 2 2 1 1 98 119 118 +99 2 2 1 1 127 141 99 +100 2 2 1 1 49 127 32 +101 2 2 1 1 69 70 68 +102 2 2 1 1 83 123 86 +103 2 2 1 1 89 104 98 +104 2 2 1 1 30 46 29 +105 2 2 1 1 33 49 32 +106 2 2 1 1 47 121 39 +107 2 2 1 1 51 137 63 +108 2 2 1 1 74 138 75 +109 2 2 1 1 94 138 80 +110 2 2 1 1 102 107 94 +111 2 2 1 1 99 142 140 +112 2 2 1 1 29 137 28 +113 2 2 1 1 40 47 39 +114 2 2 1 1 51 62 61 +115 2 2 1 1 68 82 66 +116 2 2 1 1 93 101 94 +117 2 2 1 1 19 122 18 +118 2 2 1 1 20 48 19 +119 2 2 1 1 48 122 19 +120 2 2 1 1 84 89 88 +121 2 2 1 1 86 123 111 +122 2 2 1 1 89 98 88 +123 2 2 1 1 89 133 105 +124 2 2 1 1 97 126 90 +125 2 2 1 1 111 112 110 +126 2 2 1 1 7 124 6 +127 2 2 1 1 32 127 31 +128 2 2 1 1 38 55 37 +129 2 2 1 1 24 57 23 +130 2 2 1 1 57 106 23 +131 2 2 1 1 60 114 113 +132 2 2 1 1 76 117 81 +133 2 2 1 1 83 85 78 +134 2 2 1 1 85 90 78 +135 2 2 1 1 79 91 80 +136 2 2 1 1 79 134 92 +137 2 2 1 1 91 93 80 +138 2 2 1 1 82 130 118 +139 2 2 1 1 86 110 109 +140 2 2 1 1 92 99 91 +141 2 2 1 1 91 141 93 +142 2 2 1 1 6 124 58 +143 2 2 1 1 8 53 7 +144 2 2 1 1 9 125 8 +145 2 2 1 1 10 45 9 +146 2 2 1 1 14 108 13 +147 2 2 1 1 41 128 40 +148 2 2 1 1 40 128 47 +149 2 2 1 1 76 131 117 +150 2 2 1 1 81 117 100 +151 2 2 1 1 86 109 85 +152 2 2 1 1 117 131 90 +153 2 2 1 1 120 132 116 +154 2 2 1 1 6 58 5 +155 2 2 1 1 8 125 53 +156 2 2 1 1 28 137 51 +157 2 2 1 1 46 137 29 +158 2 2 1 1 42 52 41 +159 2 2 1 1 23 106 22 +160 2 2 1 1 65 67 66 +161 2 2 1 1 82 115 66 +162 2 2 1 1 67 69 68 +163 2 2 1 1 71 76 70 +164 2 2 1 1 72 73 71 +165 2 2 1 1 71 131 76 +166 2 2 1 1 74 75 73 +167 2 2 1 1 80 138 74 +168 2 2 1 1 75 83 78 +169 2 2 1 1 90 131 78 +170 2 2 1 1 93 94 80 +171 2 2 1 1 83 86 85 +172 2 2 1 1 86 111 110 +173 2 2 1 1 101 102 94 +174 2 2 1 1 116 132 115 +175 2 2 1 1 45 125 9 +176 2 2 1 1 13 50 12 +177 2 2 1 1 127 140 31 +178 2 2 1 1 35 60 34 +179 2 2 1 1 38 121 55 +180 2 2 1 1 64 115 95 +181 2 2 1 1 72 74 73 +182 2 2 1 1 75 138 87 +183 2 2 1 1 76 81 77 +184 2 2 1 1 85 97 90 +185 2 2 1 1 85 109 97 +186 2 2 1 1 53 124 7 +187 2 2 1 1 18 56 17 +188 2 2 1 1 128 129 47 +189 2 2 1 1 63 64 62 +190 2 2 1 1 65 66 64 +191 2 2 1 1 67 135 134 +192 2 2 1 1 72 79 74 +193 2 2 1 1 75 78 73 +194 2 2 1 1 75 87 83 +195 2 2 1 1 84 88 77 +196 2 2 1 1 118 119 116 +197 2 2 1 1 28 51 27 +198 2 2 1 1 52 128 41 +199 2 2 1 1 46 139 137 +200 2 2 1 1 63 65 64 +201 2 2 1 1 68 130 82 +202 2 2 1 1 73 131 71 +203 2 2 1 1 72 134 79 +204 2 2 1 1 78 131 73 +205 2 2 1 1 79 80 74 +206 2 2 1 1 81 84 77 +207 2 2 1 1 79 92 91 +208 2 2 1 1 82 118 116 +209 2 2 1 1 96 133 84 +210 2 2 1 1 134 135 92 +211 2 2 1 1 115 132 95 +212 2 2 1 1 99 140 127 +213 2 2 1 1 119 120 116 +214 2 2 1 1 30 140 46 +215 2 2 1 1 39 121 38 +216 2 2 1 1 67 68 66 +217 2 2 1 1 67 134 69 +218 2 2 1 1 88 130 77 +219 2 2 1 1 15 59 14 +220 2 2 1 1 65 135 67 +221 2 2 1 1 81 100 96 +222 2 2 1 1 89 105 104 +223 2 2 1 1 51 61 27 +224 2 2 1 1 46 142 139 +225 2 2 1 1 137 139 63 +226 2 2 1 1 81 96 84 +227 2 2 1 1 84 133 89 +228 2 2 1 1 90 126 117 +229 2 2 1 1 18 122 56 +230 2 2 1 1 63 139 65 +231 2 2 1 1 76 77 70 +232 2 2 1 1 31 140 30 +233 2 2 1 1 49 141 127 +234 2 2 1 1 95 136 62 +235 2 2 1 1 100 103 96 +236 2 2 1 1 114 143 113 +237 2 2 1 1 139 142 135 +238 2 2 1 1 51 63 62 +239 2 2 1 1 101 143 114 +240 2 2 1 1 49 143 141 +241 2 2 1 1 59 108 14 +242 2 2 1 1 69 134 72 +243 2 2 1 1 92 142 99 +244 2 2 1 1 62 136 61 +245 2 2 1 1 113 143 49 +246 2 2 1 1 65 139 135 +247 2 2 1 1 135 142 92 +248 2 2 1 1 149 170 111 +249 2 2 1 1 148 169 59 +250 2 2 1 1 26 146 25 +251 2 2 1 1 17 144 16 +252 2 2 1 1 37 145 36 +253 2 2 1 1 136 146 61 +254 2 2 1 1 52 170 149 +255 2 2 1 1 120 154 54 +256 2 2 1 1 123 149 111 +257 2 2 1 1 128 149 129 +258 2 2 1 1 104 150 98 +259 2 2 1 1 55 145 37 +260 2 2 1 1 56 144 17 +261 2 2 1 1 61 146 26 +262 2 2 1 1 42 151 52 +263 2 2 1 1 44 147 43 +264 2 2 1 1 11 152 10 +265 2 2 1 1 96 153 133 +266 2 2 1 1 20 154 48 +267 2 2 1 1 54 154 21 +268 2 2 1 1 50 160 12 +269 2 2 1 1 100 161 103 +270 2 2 1 1 95 163 136 +271 2 2 1 1 114 162 101 +272 2 2 1 1 47 164 121 +273 2 2 1 1 112 151 147 +274 2 2 1 1 4 165 35 +275 2 2 1 1 3 166 24 +276 2 2 1 1 36 165 4 +277 2 2 1 1 25 166 3 +278 2 2 1 1 16 167 2 +279 2 2 1 1 2 167 15 +280 2 2 1 1 5 168 1 +281 2 2 1 1 1 168 44 +282 2 2 1 1 147 151 43 +283 2 2 1 1 52 149 128 +284 2 2 1 1 98 150 119 +285 2 2 1 1 48 150 122 +286 2 2 1 1 107 164 129 +287 2 2 1 1 56 148 144 +288 2 2 1 1 59 169 108 +289 2 2 1 1 153 160 50 +290 2 2 1 1 103 161 152 +291 2 2 1 1 43 151 42 +292 2 2 1 1 10 152 45 +293 2 2 1 1 103 153 96 +294 2 2 1 1 102 164 107 +295 2 2 1 1 21 154 20 +296 2 2 1 1 45 161 156 +297 2 2 1 1 158 159 124 +298 2 2 1 1 110 159 158 +299 2 2 1 1 108 155 50 +300 2 2 1 1 133 155 105 +301 2 2 1 1 53 158 124 +302 2 2 1 1 125 157 53 +303 2 2 1 1 112 159 110 +304 2 2 1 1 45 156 125 +305 2 2 1 1 157 158 53 +306 2 2 1 1 156 157 125 +307 2 2 1 1 109 157 97 +308 2 2 1 1 97 156 126 +309 2 2 1 1 97 157 156 +310 2 2 1 1 124 159 58 +311 2 2 1 1 109 158 157 +312 2 2 1 1 110 158 109 +313 2 2 1 1 156 161 126 +314 2 2 1 1 12 160 11 +315 2 2 1 1 126 161 100 +316 2 2 1 1 101 162 102 +317 2 2 1 1 132 163 95 +318 2 2 1 1 129 164 47 +319 2 2 1 1 144 167 16 +320 2 2 1 1 145 165 36 +321 2 2 1 1 146 166 25 +322 2 2 1 1 58 168 5 +323 2 2 1 1 24 166 57 +324 2 2 1 1 15 167 59 +325 2 2 1 1 35 165 60 +326 2 2 1 1 151 170 52 +327 2 2 1 1 111 170 112 +328 2 2 1 1 59 167 144 +329 2 2 1 1 57 166 146 +330 2 2 1 1 60 165 145 +331 2 2 1 1 147 168 58 +332 2 2 1 1 44 168 147 +333 2 2 1 1 152 161 45 +334 2 2 1 1 103 160 153 +335 2 2 1 1 105 169 148 +336 2 2 1 1 108 169 155 +337 2 2 1 1 155 169 105 +338 2 2 1 1 112 170 151 +$EndElements +$Periodic +2 +1 2 4 +Affine 1 0 0 0.6 0 1 0 0 0 0 1 0 0 0 0 1 +11 +2 1 +3 4 +16 44 +17 43 +18 42 +19 41 +20 40 +21 39 +22 38 +23 37 +24 36 +1 3 1 +Affine 1 0 0 0 0 1 0 0.5 0 0 1 0 0 0 0 1 +13 +3 2 +4 1 +25 15 +26 14 +27 13 +28 12 +29 11 +30 10 +31 9 +32 8 +33 7 +34 6 +35 5 +$EndPeriodic diff --git a/tests/firedrake/meshes/p3d.geo b/tests/firedrake/meshes/p3d.geo new file mode 100644 index 0000000000..28add2db92 --- /dev/null +++ b/tests/firedrake/meshes/p3d.geo @@ -0,0 +1,17 @@ +SetFactory("OpenCASCADE"); + +Box(1) = {0,0,0,1,1,1}; +MeshSize {:} = 0.3; + +// Periodic Surface +// S2: Right side +// S1: Left Side ( Translate the 1 surface by 1) +Periodic Surface {2} = {1} Translate {1,0,0}; + +Physical Volume(1) = {1}; +Physical Surface(1) = {1}; +Physical Surface(2) = {2}; +Physical Surface(3) = {3}; +Physical Surface(4) = {4}; +Physical Surface(5) = {5}; +Physical Surface(6) = {6}; diff --git a/tests/firedrake/meshes/p3d.msh b/tests/firedrake/meshes/p3d.msh new file mode 100644 index 0000000000..0738bb518a --- /dev/null +++ b/tests/firedrake/meshes/p3d.msh @@ -0,0 +1,839 @@ +$MeshFormat +2.2 0 8 +$EndMeshFormat +$Nodes +143 +1 0 0 1 +2 0 0 0 +3 0 1 1 +4 0 1 0 +5 1 0 1 +6 1 0 0 +7 1 1 1 +8 1 1 0 +9 0 0 0.2499999999999997 +10 0 0 0.4999999999999988 +11 0 0 0.7499999999999989 +12 0 0.2499999999999997 1 +13 0 0.4999999999999988 1 +14 0 0.7499999999999989 1 +15 0 1 0.2499999999999997 +16 0 1 0.4999999999999988 +17 0 1 0.7499999999999989 +18 0 0.2499999999999997 0 +19 0 0.4999999999999988 0 +20 0 0.7499999999999989 0 +21 1 0 0.2499999999999997 +22 1 0 0.4999999999999988 +23 1 0 0.7499999999999989 +24 1 0.2499999999999997 1 +25 1 0.4999999999999988 1 +26 1 0.7499999999999989 1 +27 1 1 0.2499999999999997 +28 1 1 0.4999999999999988 +29 1 1 0.7499999999999989 +30 1 0.2499999999999997 0 +31 1 0.4999999999999988 0 +32 1 0.7499999999999989 0 +33 0.2499999999999997 0 0 +34 0.4999999999999988 0 0 +35 0.7499999999999989 0 0 +36 0.2499999999999997 0 1 +37 0.4999999999999988 0 1 +38 0.7499999999999989 0 1 +39 0.2499999999999997 1 0 +40 0.4999999999999988 1 0 +41 0.7499999999999989 1 0 +42 0.2499999999999997 1 1 +43 0.4999999999999988 1 1 +44 0.7499999999999989 1 1 +45 0 0.7834936490538906 0.6249999999999989 +46 0 0.2046118127312859 0.6359641041884292 +47 0 0.6239406576797799 0.2116930112639576 +48 0 0.3759949993707399 0.2114006558410865 +49 0 0.4999999999999995 0.4330127018922183 +50 0 0.7874036008069083 0.3671197358413988 +51 0 0.5203442540690995 0.7082726776397392 +52 0 0.2365667220822624 0.3989630255083483 +53 0 0.6896861734925073 0.8231510809247649 +54 0 0.3749999999999993 0.8447098097131593 +55 0 0.8169872981077801 0.1830127018922196 +56 0 0.1830127018922192 0.1830127018922192 +57 0 0.1780977805306458 0.8250407933257091 +58 0 0.3832210891220216 0.5884011055039227 +59 0 0.8540734935786143 0.8515195403519169 +60 1 0.7834936490538906 0.6249999999999989 +61 1 0.2046118127312859 0.6359641041884292 +62 1 0.6239406576797799 0.2116930112639576 +63 1 0.3759949993707399 0.2114006558410865 +64 1 0.4999999999999995 0.4330127018922183 +65 1 0.7874036008069083 0.3671197358413988 +66 1 0.5203442540690995 0.7082726776397392 +67 1 0.2365667220822624 0.3989630255083483 +68 1 0.6896861734925073 0.8231510809247649 +69 1 0.3749999999999993 0.8447098097131593 +70 1 0.8169872981077801 0.1830127018922196 +71 1 0.1830127018922192 0.1830127018922192 +72 1 0.1780977805306458 0.8250407933257091 +73 1 0.3832210891220216 0.5884011055039227 +74 1 0.8540734935786143 0.8515195403519169 +75 0.6249999999999989 0 0.7834936490538906 +76 0.6359641041884292 0 0.2046118127312859 +77 0.2116930112639576 0 0.6239406576797799 +78 0.2114006558410865 0 0.3759949993707399 +79 0.4330127018922183 0 0.4999999999999995 +80 0.3671197358413988 0 0.7874036008069083 +81 0.7082726776397392 0 0.5203442540690995 +82 0.3989630255083483 0 0.2365667220822624 +83 0.8231510809247649 0 0.6896861734925073 +84 0.8447098097131593 0 0.3749999999999993 +85 0.1830127018922196 0 0.8169872981077801 +86 0.1830127018922192 0 0.1830127018922192 +87 0.8250407933257091 0 0.1780977805306458 +88 0.5884011055039227 0 0.3832210891220216 +89 0.8515195403519169 0 0.8540734935786143 +90 0.6249999999999989 1 0.7834936490538906 +91 0.6359641041884292 1 0.2046118127312859 +92 0.2116930112639576 1 0.6239406576797799 +93 0.2114006558410865 1 0.3759949993707399 +94 0.4330127018922183 1 0.4999999999999995 +95 0.3671197358413988 1 0.7874036008069083 +96 0.7082726776397392 1 0.5203442540690995 +97 0.3989630255083483 1 0.2365667220822624 +98 0.8231510809247649 1 0.6896861734925073 +99 0.8447098097131593 1 0.3749999999999993 +100 0.1830127018922196 1 0.8169872981077801 +101 0.1830127018922192 1 0.1830127018922192 +102 0.8250407933257091 1 0.1780977805306458 +103 0.5884011055039227 1 0.3832210891220216 +104 0.8515195403519169 1 0.8540734935786143 +105 0.7834936490538906 0.6249999999999989 0 +106 0.2046118127312859 0.6359641041884292 0 +107 0.6239406576797799 0.2116930112639576 0 +108 0.3759949993707399 0.2114006558410865 0 +109 0.4999999999999995 0.4330127018922183 0 +110 0.7874036008069083 0.3671197358413988 0 +111 0.5203442540690995 0.7082726776397392 0 +112 0.2365667220822624 0.3989630255083483 0 +113 0.6896861734925073 0.8231510809247649 0 +114 0.3749999999999993 0.8447098097131593 0 +115 0.8169872981077801 0.1830127018922196 0 +116 0.1830127018922192 0.1830127018922192 0 +117 0.1780977805306458 0.8250407933257091 0 +118 0.3832210891220216 0.5884011055039227 0 +119 0.8540734935786143 0.8515195403519169 0 +120 0.6249999999999989 0.7834936490538906 1 +121 0.6359641041884292 0.2046118127312859 1 +122 0.2116930112639576 0.6239406576797799 1 +123 0.2114006558410861 0.3759949993707395 1 +124 0.433012701892218 0.4999999999999987 1 +125 0.3671197358413987 0.7874036008069082 1 +126 0.708272677639739 0.520344254069099 1 +127 0.3989630255083481 0.2365667220822622 1 +128 0.8231510809247647 0.6896861734925068 1 +129 0.8447098097131593 0.3749999999999992 1 +130 0.1830127018922196 0.8169872981077801 1 +131 0.1830127018922189 0.1830127018922189 1 +132 0.8250407933257091 0.1780977805306459 1 +133 0.5884011055039224 0.3832210891220214 1 +134 0.8515195403519167 0.8540734935786141 1 +135 0.5311660704081805 0.4688339295918192 0.5311660704081804 +136 0.3129625374984548 0.6837047509419145 0.3125095581543041 +137 0.3019983492042677 0.3019983492042673 0.3019983492042676 +138 0.2978524042887684 0.3158078589280904 0.694389544073173 +139 0.6539728275435966 0.681890006411153 0.3050450347681465 +140 0.7100164430621708 0.2909798945004842 0.2938075582665383 +141 0.2939025133261642 0.658378522187882 0.6870491819080808 +142 0.7437259318470089 0.2856540108202066 0.7437259318470087 +143 0.7128248123519866 0.7128248123519865 0.7128248123519866 +$EndNodes +$Elements +650 +1 2 2 1 1 11 1 57 +2 2 2 1 1 1 12 57 +3 2 2 1 1 2 9 56 +4 2 2 1 1 18 2 56 +5 2 2 1 1 14 3 59 +6 2 2 1 1 3 17 59 +7 2 2 1 1 15 4 55 +8 2 2 1 1 4 20 55 +9 2 2 1 1 9 10 52 +10 2 2 1 1 9 52 56 +11 2 2 1 1 10 11 46 +12 2 2 1 1 10 46 52 +13 2 2 1 1 46 11 57 +14 2 2 1 1 12 13 54 +15 2 2 1 1 12 54 57 +16 2 2 1 1 13 14 53 +17 2 2 1 1 51 13 53 +18 2 2 1 1 13 51 54 +19 2 2 1 1 53 14 59 +20 2 2 1 1 16 15 50 +21 2 2 1 1 50 15 55 +22 2 2 1 1 17 16 45 +23 2 2 1 1 45 16 50 +24 2 2 1 1 17 45 59 +25 2 2 1 1 19 18 48 +26 2 2 1 1 48 18 56 +27 2 2 1 1 20 19 47 +28 2 2 1 1 47 19 48 +29 2 2 1 1 20 47 55 +30 2 2 1 1 49 45 50 +31 2 2 1 1 45 49 51 +32 2 2 1 1 45 51 53 +33 2 2 1 1 45 53 59 +34 2 2 1 1 52 46 58 +35 2 2 1 1 54 46 57 +36 2 2 1 1 46 54 58 +37 2 2 1 1 47 48 49 +38 2 2 1 1 47 49 50 +39 2 2 1 1 47 50 55 +40 2 2 1 1 49 48 52 +41 2 2 1 1 52 48 56 +42 2 2 1 1 51 49 58 +43 2 2 1 1 49 52 58 +44 2 2 1 1 54 51 58 +45 2 2 2 2 23 5 72 +46 2 2 2 2 5 24 72 +47 2 2 2 2 6 21 71 +48 2 2 2 2 30 6 71 +49 2 2 2 2 26 7 74 +50 2 2 2 2 7 29 74 +51 2 2 2 2 27 8 70 +52 2 2 2 2 8 32 70 +53 2 2 2 2 21 22 67 +54 2 2 2 2 21 67 71 +55 2 2 2 2 22 23 61 +56 2 2 2 2 22 61 67 +57 2 2 2 2 61 23 72 +58 2 2 2 2 24 25 69 +59 2 2 2 2 24 69 72 +60 2 2 2 2 25 26 68 +61 2 2 2 2 66 25 68 +62 2 2 2 2 25 66 69 +63 2 2 2 2 68 26 74 +64 2 2 2 2 28 27 65 +65 2 2 2 2 65 27 70 +66 2 2 2 2 29 28 60 +67 2 2 2 2 60 28 65 +68 2 2 2 2 29 60 74 +69 2 2 2 2 31 30 63 +70 2 2 2 2 63 30 71 +71 2 2 2 2 32 31 62 +72 2 2 2 2 62 31 63 +73 2 2 2 2 32 62 70 +74 2 2 2 2 64 60 65 +75 2 2 2 2 60 64 66 +76 2 2 2 2 60 66 68 +77 2 2 2 2 60 68 74 +78 2 2 2 2 67 61 73 +79 2 2 2 2 69 61 72 +80 2 2 2 2 61 69 73 +81 2 2 2 2 62 63 64 +82 2 2 2 2 62 64 65 +83 2 2 2 2 62 65 70 +84 2 2 2 2 64 63 67 +85 2 2 2 2 67 63 71 +86 2 2 2 2 66 64 73 +87 2 2 2 2 64 67 73 +88 2 2 2 2 69 66 73 +89 2 2 3 3 1 11 85 +90 2 2 3 3 36 1 85 +91 2 2 3 3 9 2 86 +92 2 2 3 3 2 33 86 +93 2 2 3 3 23 5 89 +94 2 2 3 3 5 38 89 +95 2 2 3 3 6 21 87 +96 2 2 3 3 35 6 87 +97 2 2 3 3 10 9 78 +98 2 2 3 3 78 9 86 +99 2 2 3 3 11 10 77 +100 2 2 3 3 77 10 78 +101 2 2 3 3 11 77 85 +102 2 2 3 3 21 22 84 +103 2 2 3 3 21 84 87 +104 2 2 3 3 22 23 83 +105 2 2 3 3 81 22 83 +106 2 2 3 3 22 81 84 +107 2 2 3 3 83 23 89 +108 2 2 3 3 33 34 82 +109 2 2 3 3 33 82 86 +110 2 2 3 3 34 35 76 +111 2 2 3 3 34 76 82 +112 2 2 3 3 76 35 87 +113 2 2 3 3 37 36 80 +114 2 2 3 3 80 36 85 +115 2 2 3 3 38 37 75 +116 2 2 3 3 75 37 80 +117 2 2 3 3 38 75 89 +118 2 2 3 3 79 75 80 +119 2 2 3 3 75 79 81 +120 2 2 3 3 75 81 83 +121 2 2 3 3 75 83 89 +122 2 2 3 3 82 76 88 +123 2 2 3 3 84 76 87 +124 2 2 3 3 76 84 88 +125 2 2 3 3 77 78 79 +126 2 2 3 3 77 79 80 +127 2 2 3 3 77 80 85 +128 2 2 3 3 79 78 82 +129 2 2 3 3 82 78 86 +130 2 2 3 3 81 79 88 +131 2 2 3 3 79 82 88 +132 2 2 3 3 84 81 88 +133 2 2 4 4 3 100 17 +134 2 2 4 4 42 100 3 +135 2 2 4 4 15 101 4 +136 2 2 4 4 4 101 39 +137 2 2 4 4 29 104 7 +138 2 2 4 4 7 104 44 +139 2 2 4 4 8 102 27 +140 2 2 4 4 41 102 8 +141 2 2 4 4 16 93 15 +142 2 2 4 4 93 101 15 +143 2 2 4 4 17 92 16 +144 2 2 4 4 92 93 16 +145 2 2 4 4 17 100 92 +146 2 2 4 4 27 99 28 +147 2 2 4 4 27 102 99 +148 2 2 4 4 28 98 29 +149 2 2 4 4 96 98 28 +150 2 2 4 4 28 99 96 +151 2 2 4 4 98 104 29 +152 2 2 4 4 39 97 40 +153 2 2 4 4 39 101 97 +154 2 2 4 4 40 91 41 +155 2 2 4 4 40 97 91 +156 2 2 4 4 91 102 41 +157 2 2 4 4 43 95 42 +158 2 2 4 4 95 100 42 +159 2 2 4 4 44 90 43 +160 2 2 4 4 90 95 43 +161 2 2 4 4 44 104 90 +162 2 2 4 4 94 95 90 +163 2 2 4 4 90 96 94 +164 2 2 4 4 90 98 96 +165 2 2 4 4 90 104 98 +166 2 2 4 4 97 103 91 +167 2 2 4 4 99 102 91 +168 2 2 4 4 91 103 99 +169 2 2 4 4 92 94 93 +170 2 2 4 4 92 95 94 +171 2 2 4 4 92 100 95 +172 2 2 4 4 94 97 93 +173 2 2 4 4 97 101 93 +174 2 2 4 4 96 103 94 +175 2 2 4 4 94 103 97 +176 2 2 4 4 99 103 96 +177 2 2 5 5 2 18 116 +178 2 2 5 5 33 2 116 +179 2 2 5 5 20 4 117 +180 2 2 5 5 4 39 117 +181 2 2 5 5 30 6 115 +182 2 2 5 5 6 35 115 +183 2 2 5 5 8 32 119 +184 2 2 5 5 41 8 119 +185 2 2 5 5 18 19 112 +186 2 2 5 5 18 112 116 +187 2 2 5 5 19 20 106 +188 2 2 5 5 19 106 112 +189 2 2 5 5 106 20 117 +190 2 2 5 5 31 30 110 +191 2 2 5 5 110 30 115 +192 2 2 5 5 32 31 105 +193 2 2 5 5 105 31 110 +194 2 2 5 5 32 105 119 +195 2 2 5 5 34 33 108 +196 2 2 5 5 108 33 116 +197 2 2 5 5 35 34 107 +198 2 2 5 5 107 34 108 +199 2 2 5 5 35 107 115 +200 2 2 5 5 39 40 114 +201 2 2 5 5 39 114 117 +202 2 2 5 5 40 41 113 +203 2 2 5 5 111 40 113 +204 2 2 5 5 40 111 114 +205 2 2 5 5 113 41 119 +206 2 2 5 5 109 105 110 +207 2 2 5 5 105 109 111 +208 2 2 5 5 105 111 113 +209 2 2 5 5 105 113 119 +210 2 2 5 5 112 106 118 +211 2 2 5 5 114 106 117 +212 2 2 5 5 106 114 118 +213 2 2 5 5 107 108 109 +214 2 2 5 5 107 109 110 +215 2 2 5 5 107 110 115 +216 2 2 5 5 109 108 112 +217 2 2 5 5 112 108 116 +218 2 2 5 5 111 109 118 +219 2 2 5 5 109 112 118 +220 2 2 5 5 114 111 118 +221 2 2 6 6 1 131 12 +222 2 2 6 6 36 131 1 +223 2 2 6 6 14 130 3 +224 2 2 6 6 3 130 42 +225 2 2 6 6 24 132 5 +226 2 2 6 6 5 132 38 +227 2 2 6 6 7 134 26 +228 2 2 6 6 44 134 7 +229 2 2 6 6 12 123 13 +230 2 2 6 6 12 131 123 +231 2 2 6 6 13 122 14 +232 2 2 6 6 13 123 122 +233 2 2 6 6 122 130 14 +234 2 2 6 6 25 129 24 +235 2 2 6 6 129 132 24 +236 2 2 6 6 26 128 25 +237 2 2 6 6 25 128 126 +238 2 2 6 6 126 129 25 +239 2 2 6 6 26 134 128 +240 2 2 6 6 37 127 36 +241 2 2 6 6 127 131 36 +242 2 2 6 6 38 121 37 +243 2 2 6 6 121 127 37 +244 2 2 6 6 38 132 121 +245 2 2 6 6 42 125 43 +246 2 2 6 6 42 130 125 +247 2 2 6 6 43 120 44 +248 2 2 6 6 43 125 120 +249 2 2 6 6 120 134 44 +250 2 2 6 6 120 125 124 +251 2 2 6 6 124 126 120 +252 2 2 6 6 126 128 120 +253 2 2 6 6 128 134 120 +254 2 2 6 6 121 133 127 +255 2 2 6 6 121 132 129 +256 2 2 6 6 129 133 121 +257 2 2 6 6 123 124 122 +258 2 2 6 6 124 125 122 +259 2 2 6 6 125 130 122 +260 2 2 6 6 123 127 124 +261 2 2 6 6 123 131 127 +262 2 2 6 6 124 133 126 +263 2 2 6 6 127 133 124 +264 2 2 6 6 126 133 129 +265 4 2 1 1 82 107 108 137 +266 4 2 1 1 52 77 78 137 +267 4 2 1 1 112 47 48 136 +268 4 2 1 1 88 135 81 140 +269 4 2 1 1 127 138 133 142 +270 4 2 1 1 88 137 135 140 +271 4 2 1 1 81 61 67 142 +272 4 2 1 1 46 77 52 138 +273 4 2 1 1 77 137 52 138 +274 4 2 1 1 109 139 137 140 +275 4 2 1 1 107 137 82 140 +276 4 2 1 1 76 107 82 140 +277 4 2 1 1 135 137 139 140 +278 4 2 1 1 92 45 93 136 +279 4 2 1 1 106 47 112 136 +280 4 2 1 1 73 140 67 142 +281 4 2 1 1 48 112 136 137 +282 4 2 1 1 127 133 121 142 +283 4 2 1 1 73 67 61 142 +284 4 2 1 1 136 137 109 139 +285 4 2 1 1 63 62 105 140 +286 4 2 1 1 49 138 137 141 +287 4 2 1 1 49 137 136 141 +288 4 2 1 1 103 139 96 143 +289 4 2 1 1 135 136 94 139 +290 4 2 1 1 136 137 135 141 +291 4 2 1 1 137 138 135 141 +292 4 2 1 1 135 140 73 142 +293 4 2 1 1 81 88 79 135 +294 4 2 1 1 81 67 140 142 +295 4 2 1 1 135 137 136 139 +296 4 2 1 1 133 138 135 142 +297 4 2 1 1 135 138 79 142 +298 4 2 1 1 94 136 135 141 +299 4 2 1 1 81 135 79 142 +300 4 2 1 1 79 135 88 137 +301 4 2 1 1 127 121 75 142 +302 4 2 1 1 81 140 135 142 +303 4 2 1 1 127 75 138 142 +304 4 2 1 1 66 64 73 143 +305 4 2 1 1 103 96 94 143 +306 4 2 1 1 133 124 126 143 +307 4 2 1 1 75 79 138 142 +308 4 2 1 1 63 64 62 140 +309 4 2 1 1 135 139 94 143 +310 4 2 1 1 92 93 94 136 +311 4 2 1 1 109 108 107 137 +312 4 2 1 1 78 77 79 137 +313 4 2 1 1 93 45 50 136 +314 4 2 1 1 103 94 139 143 +315 4 2 1 1 82 137 88 140 +316 4 2 1 1 94 141 135 143 +317 4 2 1 1 105 110 63 140 +318 4 2 1 1 49 58 137 138 +319 4 2 1 1 48 47 49 136 +320 4 2 1 1 73 135 64 140 +321 4 2 1 1 127 75 80 138 +322 4 2 1 1 64 135 73 143 +323 4 2 1 1 133 135 124 143 +324 4 2 1 1 62 139 105 140 +325 4 2 1 1 109 136 118 137 +326 4 2 1 1 124 135 133 138 +327 4 2 1 1 51 122 123 141 +328 4 2 1 1 109 118 136 139 +329 4 2 1 1 52 137 58 138 +330 4 2 1 1 54 51 122 123 +331 4 2 1 1 76 82 88 140 +332 4 2 1 1 112 118 136 137 +333 4 2 1 1 92 45 136 141 +334 4 2 1 1 119 70 102 139 +335 4 2 1 1 123 54 51 138 +336 4 2 1 1 96 99 65 139 +337 4 2 1 1 58 138 49 141 +338 4 2 1 1 95 125 141 143 +339 4 2 1 1 65 96 139 143 +340 4 2 1 1 106 112 118 136 +341 4 2 1 1 46 52 58 138 +342 4 2 1 1 79 137 77 138 +343 4 2 1 1 119 102 113 139 +344 4 2 1 1 107 109 137 140 +345 4 2 1 1 64 139 62 140 +346 4 2 1 1 75 81 79 142 +347 4 2 1 1 129 66 69 142 +348 4 2 1 1 97 114 111 136 +349 4 2 1 1 65 139 64 143 +350 4 2 1 1 48 136 49 137 +351 4 2 1 1 51 123 138 141 +352 4 2 1 1 94 95 141 143 +353 4 2 1 1 80 75 79 138 +354 4 2 1 1 94 92 136 141 +355 4 2 1 1 91 97 111 139 +356 4 2 1 1 79 135 137 138 +357 4 2 1 1 95 90 125 143 +358 4 2 1 1 84 81 67 140 +359 4 2 1 1 48 112 19 47 +360 4 2 1 1 82 34 108 107 +361 4 2 1 1 78 52 10 77 +362 4 2 1 1 97 136 111 139 +363 4 2 1 1 66 129 126 142 +364 4 2 1 1 64 139 135 143 +365 4 2 1 1 103 136 97 139 +366 4 2 1 1 65 60 96 143 +367 4 2 1 1 135 139 64 140 +368 4 2 1 1 130 59 100 141 +369 4 2 1 1 65 64 60 143 +370 4 2 1 1 94 90 95 143 +371 4 2 1 1 103 94 136 139 +372 4 2 1 1 66 142 126 143 +373 4 2 1 1 128 68 25 66 +374 4 2 1 1 55 117 101 136 +375 4 2 1 1 135 141 124 143 +376 4 2 1 1 77 52 10 46 +377 4 2 1 1 82 34 107 76 +378 4 2 1 1 47 112 19 106 +379 4 2 1 1 91 97 114 111 +380 4 2 1 1 84 81 61 67 +381 4 2 1 1 68 66 128 143 +382 4 2 1 1 111 136 118 139 +383 4 2 1 1 81 83 61 142 +384 4 2 1 1 114 101 117 136 +385 4 2 1 1 130 53 59 141 +386 4 2 1 1 124 123 122 141 +387 4 2 1 1 120 125 90 143 +388 4 2 1 1 55 106 117 136 +389 4 2 1 1 124 135 138 141 +390 4 2 1 1 51 138 58 141 +391 4 2 1 1 91 103 97 139 +392 4 2 1 1 31 62 105 63 +393 4 2 1 1 92 16 93 45 +394 4 2 1 1 55 47 106 136 +395 4 2 1 1 129 66 25 69 +396 4 2 1 1 131 85 57 138 +397 4 2 1 1 126 128 25 66 +398 4 2 1 1 124 133 127 138 +399 4 2 1 1 109 111 118 139 +400 4 2 1 1 73 64 67 140 +401 4 2 1 1 56 78 86 137 +402 4 2 1 1 86 108 116 137 +403 4 2 1 1 116 48 56 137 +404 4 2 1 1 128 66 126 143 +405 4 2 1 1 119 105 70 139 +406 4 2 1 1 85 46 57 138 +407 4 2 1 1 112 109 118 137 +408 4 2 1 1 52 49 58 137 +409 4 2 1 1 79 88 82 137 +410 4 2 1 1 103 94 97 136 +411 4 2 1 1 99 60 96 65 +412 4 2 1 1 91 113 102 139 +413 4 2 1 1 97 101 114 136 +414 4 2 1 1 93 50 101 136 +415 4 2 1 1 46 85 77 138 +416 4 2 1 1 124 138 123 141 +417 4 2 1 1 125 130 100 141 +418 4 2 1 1 54 131 57 138 +419 4 2 1 1 101 50 55 136 +420 4 2 1 1 66 129 25 126 +421 4 2 1 1 51 58 49 141 +422 4 2 1 1 48 116 112 137 +423 4 2 1 1 78 56 52 137 +424 4 2 1 1 86 82 108 137 +425 4 2 1 1 54 123 131 138 +426 4 2 1 1 54 122 13 123 +427 4 2 1 1 111 113 91 139 +428 4 2 1 1 102 99 91 139 +429 4 2 1 1 116 56 86 137 +430 4 2 1 1 131 80 85 138 +431 4 2 1 1 113 105 119 139 +432 4 2 1 1 50 93 16 45 +433 4 2 1 1 110 31 105 63 +434 4 2 1 1 70 105 62 139 +435 4 2 1 1 122 51 53 141 +436 4 2 1 1 122 13 51 54 +437 4 2 1 1 114 40 91 111 +438 4 2 1 1 84 61 81 22 +439 4 2 1 1 104 74 134 143 +440 4 2 1 1 22 84 61 67 +441 4 2 1 1 114 40 97 91 +442 4 2 1 1 70 62 65 139 +443 4 2 1 1 45 100 59 141 +444 4 2 1 1 131 127 80 138 +445 4 2 1 1 62 64 65 139 +446 4 2 1 1 114 117 106 136 +447 4 2 1 1 96 103 99 139 +448 4 2 1 1 125 100 95 141 +449 4 2 1 1 103 91 99 139 +450 4 2 1 1 125 95 43 90 +451 4 2 1 1 127 121 37 75 +452 4 2 1 1 130 122 53 141 +453 4 2 1 1 92 94 95 141 +454 4 2 1 1 87 115 76 140 +455 4 2 1 1 76 115 107 140 +456 4 2 1 1 122 51 13 53 +457 4 2 1 1 71 115 87 140 +458 4 2 1 1 84 71 87 140 +459 4 2 1 1 106 118 114 136 +460 4 2 1 1 120 126 124 143 +461 4 2 1 1 66 60 64 143 +462 4 2 1 1 90 94 96 143 +463 4 2 1 1 100 45 92 141 +464 4 2 1 1 90 44 134 104 +465 4 2 1 1 97 93 101 136 +466 4 2 1 1 61 83 81 22 +467 4 2 1 1 113 91 40 111 +468 4 2 1 1 37 80 127 75 +469 4 2 1 1 118 111 114 136 +470 4 2 1 1 95 100 92 141 +471 4 2 1 1 134 74 68 143 +472 4 2 1 1 104 98 74 143 +473 4 2 1 1 77 80 79 138 +474 4 2 1 1 45 59 53 141 +475 4 2 1 1 110 71 63 140 +476 4 2 1 1 110 115 71 140 +477 4 2 1 1 67 71 84 140 +478 4 2 1 1 54 57 46 138 +479 4 2 1 1 97 94 93 136 +480 4 2 1 1 43 120 125 90 +481 4 2 1 1 105 111 109 139 +482 4 2 1 1 68 128 134 143 +483 4 2 1 1 104 134 90 143 +484 4 2 1 1 54 46 58 138 +485 4 2 1 1 123 127 131 138 +486 4 2 1 1 47 50 49 136 +487 4 2 1 1 55 50 47 136 +488 4 2 1 1 124 127 123 138 +489 4 2 1 1 48 49 52 137 +490 4 2 1 1 109 112 108 137 +491 4 2 1 1 79 82 78 137 +492 4 2 1 1 89 132 72 142 +493 4 2 1 1 84 88 81 140 +494 4 2 1 1 85 80 77 138 +495 4 2 1 1 110 109 107 140 +496 4 2 1 1 54 58 51 138 +497 4 2 1 1 98 96 60 143 +498 4 2 1 1 96 28 60 99 +499 4 2 1 1 56 48 52 137 +500 4 2 1 1 108 112 116 137 +501 4 2 1 1 86 78 82 137 +502 4 2 1 1 52 9 56 78 +503 4 2 1 1 86 82 33 108 +504 4 2 1 1 18 48 116 112 +505 4 2 1 1 51 49 45 141 +506 4 2 1 1 86 56 9 78 +507 4 2 1 1 33 116 86 108 +508 4 2 1 1 116 56 48 18 +509 4 2 1 1 63 67 64 140 +510 4 2 1 1 89 72 83 142 +511 4 2 1 1 76 88 84 140 +512 4 2 1 1 89 75 132 142 +513 4 2 1 1 130 125 122 141 +514 4 2 1 1 84 87 76 140 +515 4 2 1 1 124 122 125 141 +516 4 2 1 1 89 75 38 132 +517 4 2 1 1 107 76 35 115 +518 4 2 1 1 20 47 106 55 +519 4 2 1 1 77 11 85 46 +520 4 2 1 1 57 85 11 46 +521 4 2 1 1 117 20 106 55 +522 4 2 1 1 87 35 76 115 +523 4 2 1 1 15 101 93 50 +524 4 2 1 1 110 30 63 71 +525 4 2 1 1 28 60 98 96 +526 4 2 1 1 132 129 72 142 +527 4 2 1 1 99 28 60 65 +528 4 2 1 1 101 15 55 50 +529 4 2 1 1 110 30 71 115 +530 4 2 1 1 42 130 100 125 +531 4 2 1 1 72 61 83 142 +532 4 2 1 1 100 17 45 59 +533 4 2 1 1 105 119 70 32 +534 4 2 1 1 121 132 75 142 +535 4 2 1 1 81 75 83 142 +536 4 2 1 1 67 63 71 140 +537 4 2 1 1 73 69 66 142 +538 4 2 1 1 133 126 129 142 +539 4 2 1 1 129 69 72 142 +540 4 2 1 1 121 38 75 132 +541 4 2 1 1 17 45 92 100 +542 4 2 1 1 62 105 70 32 +543 4 2 1 1 36 80 131 127 +544 4 2 1 1 98 60 74 143 +545 4 2 1 1 120 90 44 134 +546 4 2 1 1 131 85 80 36 +547 4 2 1 1 112 18 48 19 +548 4 2 1 1 82 34 33 108 +549 4 2 1 1 10 9 52 78 +550 4 2 1 1 133 129 121 142 +551 4 2 1 1 61 69 73 142 +552 4 2 1 1 72 132 24 129 +553 4 2 1 1 95 42 100 125 +554 4 2 1 1 107 115 110 140 +555 4 2 1 1 12 54 131 57 +556 4 2 1 1 102 27 99 70 +557 4 2 1 1 90 134 120 143 +558 4 2 1 1 56 2 116 86 +559 4 2 1 1 55 117 4 101 +560 4 2 1 1 6 87 71 115 +561 4 2 1 1 1 131 85 57 +562 4 2 1 1 12 54 123 131 +563 4 2 1 1 129 24 72 69 +564 4 2 1 1 111 105 113 139 +565 4 2 1 1 89 83 75 142 +566 4 2 1 1 19 20 47 106 +567 4 2 1 1 34 107 76 35 +568 4 2 1 1 77 10 11 46 +569 4 2 1 1 132 121 129 142 +570 4 2 1 1 72 69 61 142 +571 4 2 1 1 31 110 30 63 +572 4 2 1 1 16 15 93 50 +573 4 2 1 1 74 60 68 143 +574 4 2 1 1 104 90 98 143 +575 4 2 1 1 120 134 128 143 +576 4 2 1 1 127 36 80 37 +577 4 2 1 1 117 114 39 101 +578 4 2 1 1 87 84 21 71 +579 4 2 1 1 59 100 3 130 +580 4 2 1 1 122 14 130 53 +581 4 2 1 1 5 72 89 132 +582 4 2 1 1 26 68 134 74 +583 4 2 1 1 114 97 39 101 +584 4 2 1 1 84 67 21 71 +585 4 2 1 1 70 102 8 119 +586 4 2 1 1 45 53 51 141 +587 4 2 1 1 90 96 98 143 +588 4 2 1 1 60 66 68 143 +589 4 2 1 1 128 126 120 143 +590 4 2 1 1 27 65 99 70 +591 4 2 1 1 98 104 74 29 +592 4 2 1 1 125 95 42 43 +593 4 2 1 1 61 83 23 72 +594 4 2 1 1 91 41 102 113 +595 4 2 1 1 45 92 16 17 +596 4 2 1 1 32 62 105 31 +597 4 2 1 1 130 100 3 42 +598 4 2 1 1 4 15 55 101 +599 4 2 1 1 71 30 6 115 +600 4 2 1 1 74 98 29 60 +601 4 2 1 1 122 13 14 53 +602 4 2 1 1 117 4 20 55 +603 4 2 1 1 6 35 87 115 +604 4 2 1 1 85 11 1 57 +605 4 2 1 1 26 68 128 134 +606 4 2 1 1 8 27 102 70 +607 4 2 1 1 1 12 131 57 +608 4 2 1 1 132 5 72 24 +609 4 2 1 1 41 119 102 113 +610 4 2 1 1 72 83 23 89 +611 4 2 1 1 14 59 130 53 +612 4 2 1 1 56 116 2 18 +613 4 2 1 1 2 116 86 33 +614 4 2 1 1 9 2 56 86 +615 4 2 1 1 85 131 1 36 +616 4 2 1 1 40 41 91 113 +617 4 2 1 1 23 83 61 22 +618 4 2 1 1 121 38 37 75 +619 4 2 1 1 87 21 6 71 +620 4 2 1 1 117 39 4 101 +621 4 2 1 1 25 26 68 128 +622 4 2 1 1 72 23 5 89 +623 4 2 1 1 59 3 14 130 +624 4 2 1 1 89 38 5 132 +625 4 2 1 1 3 17 100 59 +626 4 2 1 1 8 70 119 32 +627 4 2 1 1 119 102 8 41 +628 4 2 1 1 97 40 114 39 +629 4 2 1 1 67 22 84 21 +630 4 2 1 1 13 12 54 123 +631 4 2 1 1 90 43 120 44 +632 4 2 1 1 98 29 60 28 +633 4 2 1 1 134 7 26 74 +634 4 2 1 1 74 134 7 104 +635 4 2 1 1 104 7 74 29 +636 4 2 1 1 134 44 7 104 +637 4 2 1 1 65 99 28 27 +638 4 2 1 1 69 129 24 25 +639 4 2 1 1 73 143 142 66 +640 4 2 1 1 142 143 73 135 +641 4 2 1 1 143 133 142 126 +642 4 2 1 1 143 142 133 135 +643 4 2 1 1 99 70 139 102 +644 4 2 1 1 139 70 99 65 +645 4 2 1 1 140 105 109 139 +646 4 2 1 1 140 109 105 110 +647 4 2 1 1 143 125 124 120 +648 4 2 1 1 143 124 125 141 +649 4 2 1 1 45 136 49 50 +650 4 2 1 1 45 49 136 141 +$EndElements +$Periodic +1 +2 2 1 +Affine 1 0 0 1 0 1 0 0 0 0 1 0 0 0 0 1 +31 +5 1 +6 2 +7 3 +8 4 +21 9 +22 10 +23 11 +24 12 +25 13 +26 14 +27 15 +28 16 +29 17 +30 18 +31 19 +32 20 +60 45 +61 46 +62 47 +63 48 +64 49 +65 50 +66 51 +67 52 +68 53 +69 54 +70 55 +71 56 +72 57 +73 58 +74 59 +$EndPeriodic diff --git a/tests/firedrake/multigrid/test_adaptive_multigrid.py b/tests/firedrake/multigrid/test_adaptive_multigrid.py new file mode 100644 index 0000000000..1235c95bd5 --- /dev/null +++ b/tests/firedrake/multigrid/test_adaptive_multigrid.py @@ -0,0 +1,369 @@ +""" +Tests for AdaptiveMeshHierarchy +and AdaptiveTransferManager +""" + +import pytest +import numpy as np +from firedrake import * + + +@pytest.fixture(params=[2, 3]) +def amh(request): + """ + Generate AdaptiveMeshHierarchies + """ + from netgen.occ import WorkPlane, OCCGeometry, Box, Pnt + dim = request.param + if dim == 2: + wp = WorkPlane() + wp.Rectangle(1, 1) + face = wp.Face() + geo = OCCGeometry(face, dim=2) + maxh = 0.5 + else: + cube = Box(Pnt(0, 0, 0), Pnt(1, 1, 1)) + geo = OCCGeometry(cube, dim=3) + maxh = 0.5 + + ngmesh = geo.GenerateMesh(maxh=maxh) + + dparams = {"overlap_type": (DistributedMeshOverlapType.VERTEX, 1)} + base = Mesh(ngmesh, distribution_parameters=dparams) + amh_test = AdaptiveMeshHierarchy(base) + + rg = RandomGenerator(PCG64(seed=0)) + for l in range(2): + mesh = amh_test[-1] + DG = FunctionSpace(mesh, "DG", 0) + should_refine = rg.uniform(DG, 0, 1).dat.global_data + + ngmesh = mesh.netgen_mesh + if dim == 2: + els = ngmesh.Elements2D() + else: + els = ngmesh.Elements3D() + for i, el in enumerate(els): + el.refine = 1 if should_refine[i] < 0.5 else 0 + + ngmesh.Refine(adaptive=True) + mesh = Mesh(ngmesh, distribution_parameters=dparams) + amh_test.add_mesh(mesh) + return amh_test + + +@pytest.fixture +def mh_uniform(): + """ + Generate MeshHierarchy for reference + """ + from netgen.occ import WorkPlane, OCCGeometry + wp = WorkPlane() + wp.Rectangle(2, 2) + face = wp.Face() + geo = OCCGeometry(face, dim=2) + maxh = 0.5 + ngmesh = geo.GenerateMesh(maxh=maxh) + + dparams = {"overlap_type": (DistributedMeshOverlapType.VERTEX, 1)} + base1 = Mesh(ngmesh, distribution_parameters=dparams) + mh = MeshHierarchy(base1, 2) + + base2 = Mesh(ngmesh, distribution_parameters=dparams) + amh = AdaptiveMeshHierarchy(base2) + for _ in range(2): + mesh = amh[-1] + ngmesh = mesh.netgen_mesh + ngmesh.Refine() + mesh = Mesh(ngmesh, distribution_parameters=dparams) + amh.add_mesh(mesh) + return amh, mh + + +@pytest.fixture +def atm(): + """atm used in tests""" + return AdaptiveTransferManager() + + +@pytest.fixture +def tm(): + """tm used for restrict consistency""" + return TransferManager() + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +@pytest.mark.parametrize("operator", ["prolong", "inject"]) +def test_DG0(amh, atm, operator): # pylint: disable=W0621 + """ + Prolongation & Injection test for DG0 + """ + V_coarse = FunctionSpace(amh[0], "DG", 0) + V_fine = FunctionSpace(amh[-1], "DG", 0) + u_coarse = Function(V_coarse) + u_fine = Function(V_fine) + xc, *_ = SpatialCoordinate(V_coarse.mesh()) + stepc = conditional(ge(xc, 0), 1, 0) + xf, *_ = SpatialCoordinate(V_fine.mesh()) + stepf = conditional(ge(xf, 0), 1, 0) + + if operator == "prolong": + u_coarse.interpolate(stepc) + assert errornorm(stepc, u_coarse) <= 1e-12 + + atm.prolong(u_coarse, u_fine) + assert errornorm(stepf, u_fine) <= 1e-12 + if operator == "inject": + u_fine.interpolate(stepf) + assert errornorm(stepf, u_fine) <= 1e-12 + + atm.inject(u_fine, u_coarse) + assert errornorm(stepc, u_coarse) <= 1e-12 + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +@pytest.mark.parametrize("operator", ["prolong", "inject"]) +def test_CG1(amh, atm, operator): # pylint: disable=W0621 + """ + Prolongation & Injection test for CG1 + """ + V_coarse = FunctionSpace(amh[0], "CG", 1) + V_fine = FunctionSpace(amh[-1], "CG", 1) + u_coarse = Function(V_coarse) + u_fine = Function(V_fine) + xc, *_ = SpatialCoordinate(V_coarse.mesh()) + xf, *_ = SpatialCoordinate(V_fine.mesh()) + + if operator == "prolong": + u_coarse.interpolate(xc) + assert errornorm(xc, u_coarse) <= 1e-12 + + atm.prolong(u_coarse, u_fine) + assert errornorm(xf, u_fine) <= 1e-12 + if operator == "inject": + u_fine.interpolate(xf) + assert errornorm(xf, u_fine) <= 1e-12 + + atm.inject(u_fine, u_coarse) + assert errornorm(xc, u_coarse) <= 1e-12 + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +def test_restrict_consistency(mh_uniform, atm, tm): # pylint: disable=W0621 + """ + Test restriction consistency of amh with uniform refinement vs mh + """ + amh_unif, mh = mh_uniform + + V_coarse = FunctionSpace(amh_unif[0], "DG", 0) + V_fine = FunctionSpace(amh_unif[-1], "DG", 0) + u_coarse = Function(V_coarse) + u_fine = Function(V_fine) + xc, _ = SpatialCoordinate(V_coarse.mesh()) + + u_coarse.interpolate(xc) + atm.prolong(u_coarse, u_fine) + + rf = assemble(conj(TestFunction(V_fine)) * dx) + rc = Cofunction(V_coarse.dual()) + atm.restrict(rf, rc) + + # compare with mesh_hierarchy + xcoarse, _ = SpatialCoordinate(mh[0]) + Vcoarse = FunctionSpace(mh[0], "DG", 0) + Vfine = FunctionSpace(mh[-1], "DG", 0) + + mhuc = Function(Vcoarse) + mhuc.interpolate(xcoarse) + mhuf = Function(Vfine) + tm.prolong(mhuc, mhuf) + + mhrf = assemble(conj(TestFunction(Vfine)) * dx) + mhrc = Cofunction(Vcoarse.dual()) + + tm.restrict(mhrf, mhrc) + + assert abs( + (assemble(action(mhrc, mhuc)) - assemble(action(mhrf, mhuf))) + / assemble(action(mhrf, mhuf)) + ) <= 1e-12 + assert abs( + (assemble(action(rc, u_coarse)) - assemble(action(mhrc, mhuc))) + / assemble(action(mhrc, mhuc)) + ) <= 1e-12 + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +def test_restrict_CG1(amh, atm): # pylint: disable=W0621 + """ + Test restriction with CG1 + """ + V_coarse = FunctionSpace(amh[0], "CG", 1) + V_fine = FunctionSpace(amh[-1], "CG", 1) + u_coarse = Function(V_coarse) + u_fine = Function(V_fine) + xc, *_ = SpatialCoordinate(V_coarse.mesh()) + + u_coarse.interpolate(xc) + atm.prolong(u_coarse, u_fine) + + rf = assemble(conj(TestFunction(V_fine)) * dx) + rc = Cofunction(V_coarse.dual()) + atm.restrict(rf, rc) + + assert np.allclose( + assemble(action(rc, u_coarse)), + assemble(action(rf, u_fine)), + rtol=1e-12 + ) + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +def test_restrict_DG0(amh, atm): # pylint: disable=W0621 + """ + Test restriction with DG0 + """ + V_coarse = FunctionSpace(amh[0], "DG", 0) + V_fine = FunctionSpace(amh[-1], "DG", 0) + u_coarse = Function(V_coarse) + u_fine = Function(V_fine) + xc, *_ = SpatialCoordinate(V_coarse.mesh()) + + u_coarse.interpolate(xc) + atm.prolong(u_coarse, u_fine) + + rf = assemble(conj(TestFunction(V_fine)) * dx) + rc = Cofunction(V_coarse.dual()) + atm.restrict(rf, rc) + + assert np.allclose( + assemble(action(rc, u_coarse)), + assemble(action(rf, u_fine)), + rtol=1e-12 + ) + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +def test_mg_jacobi(amh, atm): # pylint: disable=W0621 + """ + Test multigrid with jacobi smoothers + """ + V = FunctionSpace(amh[-1], "CG", 1) + x = SpatialCoordinate(amh[-1]) + u_ex = Function(V).interpolate(sin(2 * pi * x[0]) * sin(2 * pi * x[1])) + u = Function(V) + v = TestFunction(V) + bc = DirichletBC(V, u_ex, "on_boundary") + F = inner(grad(u - u_ex), grad(v)) * dx + + params = { + "snes_type": "ksponly", + "ksp_max_it": 20, + "ksp_type": "cg", + "ksp_norm_type": "unpreconditioned", + "ksp_rtol": 1e-8, + "ksp_atol": 1e-8, + "pc_type": "mg", + "mg_levels_pc_type": "jacobi", + "mg_levels_ksp_type": "chebyshev", + "mg_levels_ksp_max_it": 2, + "mg_coarse_ksp_type": "preonly", + "mg_coarse_pc_type": "lu", + "mg_coarse_pc_factor_mat_solver_type": "mumps", + } + + problem = NonlinearVariationalProblem(F, u, bc) + solver = NonlinearVariationalSolver(problem, solver_parameters=params) + solver.set_transfer_manager(atm) + solver.solve() + assert errornorm(u_ex, u) <= 1e-8 + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.skipnetgen +@pytest.mark.parametrize("params", ["jacobi", "asm", "patch"]) +def test_mg_patch(amh, atm, params): # pylint: disable=W0621 + """ + Test multigrid with patch relaxation + """ + if params == "jacobi": + solver_params = { + "mat_type": "matfree", + "ksp_type": "cg", + "pc_type": "mg", + "mg_levels": { + "ksp_type": "chebyshev", + "ksp_max_it": 1, + "pc_type": "jacobi", + }, + "mg_coarse": { + "mat_type": "aij", + "ksp_type": "preonly", + "pc_type": "lu", + }, + } + elif params == "patch": + solver_params = { + "mat_type": "matfree", + "ksp_type": "cg", + "pc_type": "mg", + "mg_levels": { + "ksp_type": "chebyshev", + "ksp_max_it": 1, + "pc_type": "python", + "pc_python_type": "firedrake.PatchPC", + "patch": { + "pc_patch": { + "construct_type": "star", + "construct_dim": 0, + "sub_mat_type": "seqdense", + "dense_inverse": True, + "save_operators": True, + "precompute_element_tensors": True, + }, + "sub_ksp_type": "preonly", + "sub_pc_type": "lu", + }, + }, + "mg_coarse": { + "mat_type": "aij", + "ksp_type": "preonly", + "pc_type": "lu", + }, + } + else: + solver_params = { + "mat_type": "aij", + "ksp_type": "cg", + "pc_type": "mg", + "mg_levels": { + "ksp_type": "chebyshev", + "ksp_max_it": 1, + "pc_type": "python", + "pc_python_type": "firedrake.ASMStarPC", + "pc_star_backend": "tinyasm", + }, + "mg_coarse": {"ksp_type": "preonly", "pc_type": "lu"}, + } + + V = FunctionSpace(amh[-1], "CG", 1) + x = SpatialCoordinate(amh[-1]) + u_ex = Function(V).interpolate(sin(2 * pi * x[0]) * sin(2 * pi * x[1])) + u = Function(V) + v = TestFunction(V) + bc = DirichletBC(V, u_ex, "on_boundary") + F = inner(grad(u - u_ex), grad(v)) * dx + + problem = NonlinearVariationalProblem(F, u, bc) + solver = NonlinearVariationalSolver(problem, + solver_parameters=solver_params) + solver.set_transfer_manager(atm) + + solver.solve() + assert errornorm(u_ex, u) <= 1e-8 diff --git a/tests/firedrake/multigrid/test_embedded_transfer.py b/tests/firedrake/multigrid/test_embedded_transfer.py index 0d7cf7aec9..2ff519fa37 100644 --- a/tests/firedrake/multigrid/test_embedded_transfer.py +++ b/tests/firedrake/multigrid/test_embedded_transfer.py @@ -1,5 +1,7 @@ import pytest +import numpy from firedrake import * +from firedrake.mg.utils import get_level @pytest.fixture @@ -25,55 +27,90 @@ def degree(request): return request.param -@pytest.fixture(params=["RT", "N1curl", "CG"]) +@pytest.fixture(params=["CG", "N1curl", "RT"]) def space(request): return request.param @pytest.fixture -def V(mesh, degree, space): - return FunctionSpace(mesh, space, degree, variant="integral") +def V(mesh, space, degree): + if space == "CG": + return VectorFunctionSpace(mesh, space, degree, variant="integral") + else: + return FunctionSpace(mesh, space, degree, variant="integral") -@pytest.fixture(params=["Default", "Exact", "Averaging"]) -def use_averaging(request): - return request.param +@pytest.mark.parametrize("op", ["prolong", "restrict", "inject"]) +def test_transfer(op, V): + + def expr(V): + x = SpatialCoordinate(V.mesh()) + return {H1: x, HCurl: perp(x), HDiv: x}[V.ufl_element().sobolev_space] + + mh, _ = get_level(V.mesh()) + Vf = V + Vc = V.reconstruct(mh[0]) + + if op == "prolong": + uf = Function(Vf) + uc = Function(Vc) + uc.interpolate(expr(Vc)) + prolong(uc, uf) + assert errornorm(expr(Vf), uf) < 1E-13 + + elif op == "restrict": + rf = assemble(inner(expr(Vf), TestFunction(Vf))*dx) + rc = Function(Vc.dual()) + restrict(rf, rc) + expected = assemble(inner(expr(Vc), TestFunction(Vc))*dx) + assert numpy.allclose(expected.dat.data_ro, rc.dat.data_ro) + + rg = RandomGenerator(PCG64(seed=0)) + uc = rg.uniform(Vc, -1, 1) + uf = Function(Vf) + prolong(uc, uf) + + rf = rg.uniform(Vf.dual(), -1, 1) + rc = Function(Vc.dual()) + restrict(rf, rc) + + result_prolong = assemble(action(rf, uf)) + result_restrict = assemble(action(rc, uc)) + assert numpy.isclose(result_prolong, result_restrict) + + elif op == "inject": + uf = Function(Vf) + uc = Function(Vc) + uc.interpolate(expr(Vc)) + uf.interpolate(expr(Vf)) + inject(uf, uc) + assert errornorm(expr(Vc), uc) < 1E-13 @pytest.fixture -def solver_parameters(use_averaging, V): - element_name = V.ufl_element()._short_name +def solver_parameters(V): solver_parameters = { "mat_type": "aij", "snes_type": "ksponly", - # When using mass solves in the prolongation, the V-cycle is - # no longer a linear operator (because the prolongation uses - # CG which is a nonlinear operator). - "ksp_type": "cg" if use_averaging else "fcg", + "ksp_type": "cg", "ksp_max_it": 20, "ksp_rtol": 1e-9, "ksp_monitor_true_residual": None, "pc_type": "mg", "mg_levels": { "ksp_type": "richardson", - "ksp_norm_type": "unpreconditioned", "ksp_richardson_scale": 0.5, + "ksp_norm_type": "none", + "ksp_max_it": 1, "pc_type": "python", - "pc_python_type": "firedrake.PatchPC", - "patch_pc_patch_save_operators": True, - "patch_pc_patch_partition_of_unity": False, - "patch_pc_patch_construct_type": "star", - "patch_pc_patch_construct_dim": 0, - "patch_pc_patch_sub_mat_type": "seqdense", - "patch_sub_ksp_type": "preonly", - "patch_sub_pc_type": "lu", + "pc_python_type": "firedrake.ASMStarPC", + "pc_star_sub_sub_pc_type": "cholesky", + "pc_star_sub_sub_pc_factor_mat_solver_type": "petsc", }, - "mg_coarse_pc_type": "lu", - element_name: { - "prolongation_mass_ksp_type": "cg", - "prolongation_mass_ksp_max_it": 10, - "prolongation_mass_pc_type": "bjacobi", - "prolongation_mass_sub_pc_type": "ilu", + "mg_coarse": { + "mat_type": "aij", + "pc_type": "cholesky", + "pc_factor_mat_solver_type": "mumps", } } return solver_parameters @@ -83,10 +120,11 @@ def solver_parameters(use_averaging, V): def solver(V, space, solver_parameters): u = Function(V) v = TestFunction(V) - mesh = V.mesh() - (x, y) = SpatialCoordinate(mesh) + (x, y) = SpatialCoordinate(V.mesh()) f = as_vector([2*y*(1-x**2), -2*x*(1-y**2)]) + if u.ufl_shape == (): + f = sum(f) a = Constant(1) b = Constant(100) if space == "RT": @@ -94,28 +132,14 @@ def solver(V, space, solver_parameters): elif space == "N1curl": F = a*inner(u, v)*dx + b*inner(curl(u), curl(v))*dx - inner(f, v)*dx elif space == "CG": - F = a*inner(u, v)*dx + b*inner(grad(u), grad(v))*dx - inner(1, v)*dx + F = a*inner(u, v)*dx + b*inner(grad(u), grad(v))*dx - inner(f, v)*dx problem = NonlinearVariationalProblem(F, u) solver = NonlinearVariationalSolver(problem, solver_parameters=solver_parameters, options_prefix="") return solver -@pytest.mark.skipcomplexnoslate -def test_riesz(V, solver, use_averaging): - if use_averaging == "Default": - transfer = None - elif use_averaging == "Exact": - transfer = TransferManager(use_averaging=False) - else: - transfer = TransferManager(use_averaging=True) - solver.set_transfer_manager(transfer) +@pytest.mark.parallel([1, 3]) +def test_riesz(V, solver): solver.solve() - assert solver.snes.ksp.getIterationNumber() < 15 - - -@pytest.mark.parallel(nprocs=3) -@pytest.mark.skipcomplexnoslate -def test_riesz_parallel(V, solver, use_averaging): - test_riesz(V, solver, use_averaging) diff --git a/tests/firedrake/multigrid/test_grid_transfer.py b/tests/firedrake/multigrid/test_grid_transfer.py index 86af28b455..53a848fb59 100644 --- a/tests/firedrake/multigrid/test_grid_transfer.py +++ b/tests/firedrake/multigrid/test_grid_transfer.py @@ -329,7 +329,7 @@ def exact_primal_periodic(mesh, shape, degree): return expr -@pytest.mark.parallel(nprocs=3) +@pytest.mark.parallel def test_grid_transfer_periodic(periodic_hierarchy, periodic_space): degrees = [4] shape = "scalar" diff --git a/tests/firedrake/multigrid/test_hiptmair.py b/tests/firedrake/multigrid/test_hiptmair.py index 99ab52fe33..d6f0bfbca1 100644 --- a/tests/firedrake/multigrid/test_hiptmair.py +++ b/tests/firedrake/multigrid/test_hiptmair.py @@ -136,7 +136,7 @@ def run_riesz_map(V, mat_type, max_it, solver_type="gmg"): @pytest.mark.parametrize("mat_type", ["aij", "matfree"]) def test_gmg_hiptmair_hcurl(mesh_hierarchy, mat_type): mesh = mesh_hierarchy[-1] - if mesh.ufl_cell().is_simplex(): + if mesh.ufl_cell().is_simplex: family = "N1curl" max_it = 14 else: @@ -150,7 +150,7 @@ def test_gmg_hiptmair_hcurl(mesh_hierarchy, mat_type): @pytest.mark.parametrize("mat_type", ["aij", "matfree"]) def test_gmg_hiptmair_hdiv(mesh_hierarchy, mat_type): mesh = mesh_hierarchy[-1] - if mesh.ufl_cell().is_simplex(): + if mesh.ufl_cell().is_simplex: family = "N1div" max_it = 14 else: diff --git a/tests/firedrake/multigrid/test_netgen_gmg.py b/tests/firedrake/multigrid/test_netgen_gmg.py index d2f4efaea1..6216907800 100644 --- a/tests/firedrake/multigrid/test_netgen_gmg.py +++ b/tests/firedrake/multigrid/test_netgen_gmg.py @@ -1,102 +1,110 @@ import pytest - +import numpy from firedrake import * -def create_netgen_mesh_circle(): - from netgen.geom2d import Circle, CSG2d - geo = CSG2d() - - circle = Circle(center=(0, 0), radius=1.0, mat="mat1", bc="circle") - geo.Add(circle) - - ngmesh = geo.GenerateMesh(maxh=0.75) +@pytest.fixture(params=[(2, "occ"), (2, "spline"), (2, "csg"), (3, "occ"), (3, "csg")], + ids=lambda val: "-".join(map(str, val))) +def ngmesh(request): + dim, geo_type = request.param + maxh = 0.75 + if dim == 2: + if geo_type == "occ": + from netgen.occ import Circle, OCCGeometry + circle = Circle((0, 0), 1.0).Face() + circle.edges.name = "surface" + geo = OCCGeometry(circle, dim=2) + elif geo_type == "spline": + from netgen.geom2d import SplineGeometry + geo = SplineGeometry() + geo.AddCircle(c=(0, 0), r=1.0, bc="surface") + elif geo_type == "csg": + from netgen.geom2d import CSG2d, Circle + geo = CSG2d() + geo.Add(Circle(center=(0, 0), radius=1, bc="surface")) + else: + raise ValueError(f"Unexpected geometry backend {geo_type}") + elif dim == 3: + if geo_type == "occ": + from netgen.occ import Sphere, OCCGeometry + sphere = Sphere((0, 0, 0), 1.0) + sphere.faces.name = "surface" + geo = OCCGeometry(sphere, dim=3) + elif geo_type == "csg": + from netgen.csg import CSGeometry, Sphere, Pnt + geo = CSGeometry() + sphere = Sphere(Pnt(0, 0, 0), 1) + sphere.bc("surface") + geo.Add(sphere) + maxh = 0.5 + else: + raise ValueError(f"Unexpected geometry backend {geo_type}") + else: + raise ValueError(f"Unexpected dimension {dim}") + ngmesh = geo.GenerateMesh(maxh=maxh) return ngmesh @pytest.mark.skipcomplex @pytest.mark.skipnetgen -def test_netgen_mg_circle(): - ngmesh = create_netgen_mesh_circle() - mesh = Mesh(ngmesh) - nh = MeshHierarchy(mesh, 2, netgen_flags={"degree": 3}) - mesh = nh[-1] - - V = FunctionSpace(mesh, "CG", 3) - - u = TrialFunction(V) - v = TestFunction(V) - - a = inner(grad(u), grad(v))*dx - labels = [i+1 for i, name in enumerate(ngmesh.GetRegionNames(codim=1)) if name in ["circle"]] - bcs = DirichletBC(V, zero(), labels) - x, y = SpatialCoordinate(mesh) - - f = 4+0*x - L = f*v*dx - exact = (1-x**2-y**2) - - u = Function(V) - solve(a == L, u, bcs=bcs, solver_parameters={"ksp_type": "cg", - "pc_type": "mg"}) - expect = Function(V).interpolate(exact) - assert (norm(assemble(u - expect)) <= 1e-6) - - -@pytest.mark.skipcomplex -@pytest.mark.skipnetgen -def test_netgen_mg_circle_non_uniform_degree(): - ngmesh = create_netgen_mesh_circle() - mesh = Mesh(ngmesh) - nh = MeshHierarchy(mesh, 2, netgen_flags={"degree": [1, 2, 3]}) - mesh = nh[-1] - - V = FunctionSpace(mesh, "CG", 3) - - u = TrialFunction(V) - v = TestFunction(V) - - a = inner(grad(u), grad(v))*dx - labels = [i+1 for i, name in enumerate(ngmesh.GetRegionNames(codim=1)) if name in ["circle"]] - bcs = DirichletBC(V, zero(), labels) - x, y = SpatialCoordinate(mesh) - - f = 4+0*x - L = f*v*dx - exact = (1-x**2-y**2) - - u = Function(V) - solve(a == L, u, bcs=bcs, solver_parameters={"ksp_type": "cg", - "pc_type": "mg"}) - expect = Function(V).interpolate(exact) - assert (norm(assemble(u - expect)) <= 1e-6) - - -@pytest.mark.skipcomplex -@pytest.mark.skipnetgen -@pytest.mark.parallel -def test_netgen_mg_circle_parallel(): - ngmesh = create_netgen_mesh_circle() - mesh = Mesh(ngmesh) - nh = MeshHierarchy(mesh, 2, netgen_flags={"degree": 3}) - mesh = nh[-1] - - V = FunctionSpace(mesh, "CG", 3) - - u = TrialFunction(V) - v = TestFunction(V) - - a = inner(grad(u), grad(v))*dx - labels = [i+1 for i, name in enumerate(ngmesh.GetRegionNames(codim=1)) if name in ["circle"]] - bcs = DirichletBC(V, zero(), labels) - x, y = SpatialCoordinate(mesh) - - f = 4+0*x - L = f*v*dx - exact = (1-x**2-y**2) - - u = Function(V) - solve(a == L, u, bcs=bcs, solver_parameters={"ksp_type": "cg", - "pc_type": "mg"}) - expect = Function(V).interpolate(exact) - assert norm(assemble(u - expect)) <= 1e-6 +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("netgen_degree", [1, 3, (1, 2, 3)], ids=lambda degree: f"{degree=}") +def test_netgen_mg(ngmesh, netgen_degree): + dparams = {"overlap_type": (DistributedMeshOverlapType.VERTEX, 1)} + base = Mesh(ngmesh, distribution_parameters=dparams) + mh = MeshHierarchy(base, 2, netgen_flags={"degree": netgen_degree}) + try: + len(netgen_degree) + except TypeError: + netgen_degree = (netgen_degree,)*len(mh) + + coords_space = base.coordinates.function_space() + assert coords_space.ufl_element().degree() == 1 + assert not coords_space.finat_element.is_dg() + for m, deg in zip(mh, netgen_degree): + coords_space = m.coordinates.function_space() + assert coords_space.ufl_element().degree() == deg + assert not coords_space.finat_element.is_dg() + + errors = [] + for mesh in mh[1:]: + V = FunctionSpace(mesh, "CG", 3) + u = TrialFunction(V) + v = TestFunction(V) + + a = inner(grad(u), grad(v)) * dx + labels = [i+1 for i, name in enumerate(ngmesh.GetRegionNames(codim=1)) if name in ["surface"]] + + x = SpatialCoordinate(mesh) + uexact = 1-dot(x, x) + bcs = DirichletBC(V, 0, labels) + L = a(uexact, v) + uh = Function(V) + + uerr = uexact - uh + solve(a == L, uh, bcs=bcs, solver_parameters={ + "ksp_type": "cg", + "ksp_norm_type": "natural", + "ksp_max_it": 14, + "ksp_rtol": 1E-8, + "ksp_monitor": None, + "pc_type": "mg", + "mg_levels_pc_type": "python", + "mg_levels_pc_python_type": "firedrake.ASMStarPC", + "mg_levels_pc_star_backend": "tinyasm", + "mg_coarse_pc_type": "lu", + "mg_coarse_pc_factor_mat_solver_type": "mumps", + }) + err = assemble(a(uerr, uerr)) ** 0.5 + errors.append(err) + + if len(set(netgen_degree)) > 1: + # Just check for accuracy if we have non-uniform degree + assert errors[-1] < 6E-3 + else: + rate = -numpy.diff(numpy.log2(errors)) + if V.ufl_element().degree() == netgen_degree[-1]: + expected = netgen_degree[-1] + else: + expected = netgen_degree[-1] + 0.5 + assert rate[-1] > 0.9*expected diff --git a/tests/firedrake/multigrid/test_p_multigrid.py b/tests/firedrake/multigrid/test_p_multigrid.py index 8e57633214..a78727de37 100644 --- a/tests/firedrake/multigrid/test_p_multigrid.py +++ b/tests/firedrake/multigrid/test_p_multigrid.py @@ -1,4 +1,5 @@ import pytest +import numpy as np from firedrake import * @@ -27,7 +28,7 @@ def tp_mesh(request): @pytest.fixture(params=[0, 1, 2], ids=["H1", "HCurl", "HDiv"]) def tp_family(tp_mesh, request): - tdim = tp_mesh.topological_dimension() + tdim = tp_mesh.topological_dimension if tdim == 3: families = ["Q", "NCE", "NCF"] else: @@ -47,7 +48,7 @@ def mixed_family(tp_mesh, request): if request.param == 0: Vfamily = "Q" else: - tdim = tp_mesh.topological_dimension() + tdim = tp_mesh.topological_dimension Vfamily = "NCF" if tdim == 3 else "RTCF" Qfamily = "DQ" return Vfamily, Qfamily @@ -78,7 +79,7 @@ def test_prolong_basic(tp_mesh, family): """ Interpolate a constant function between low-order and high-order spaces """ from firedrake.preconditioners.pmg import prolongation_matrix_matfree - if tp_mesh.topological_dimension() == 2: + if tp_mesh.topological_dimension == 2: family = family.replace("N", "RT") fs = [FunctionSpace(tp_mesh, family, degree) for degree in (1, 2)] @@ -96,7 +97,7 @@ def test_prolong_de_rham(tp_mesh): """ from firedrake.preconditioners.pmg import prolongation_matrix_matfree - tdim = tp_mesh.topological_dimension() + tdim = tp_mesh.topological_dimension b = Constant(list(range(tdim))) if tp_mesh.extruded_periodic: expr = b @@ -177,8 +178,7 @@ def test_p_multigrid_scalar(mesh, mat_type, restrict): F = inner(grad(u), grad(v))*dx - inner(f, v)*dx relax = {"ksp_type": "chebyshev", - "ksp_monitor_true_residual": None, - "ksp_norm_type": "unpreconditioned", + "ksp_convergence_test": "skip", "ksp_max_it": 3, "pc_type": "jacobi"} @@ -188,20 +188,12 @@ def test_p_multigrid_scalar(mesh, mat_type, restrict): "ksp_monitor_true_residual": None, "pc_type": "python", "pc_python_type": "firedrake.PMGPC", - "pmg_pc_mg_type": "multiplicative", "pmg_mg_levels": relax, "pmg_mg_levels_transfer_mat_type": mat_type, - "pmg_mg_coarse_ksp_type": "richardson", - "pmg_mg_coarse_ksp_max_it": 1, - "pmg_mg_coarse_ksp_norm_type": "unpreconditioned", - "pmg_mg_coarse_ksp_monitor": None, + "pmg_mg_coarse_ksp_type": "preonly", "pmg_mg_coarse_pc_type": "mg", - "pmg_mg_coarse_pc_mg_type": "multiplicative", "pmg_mg_coarse_mg_levels": relax, - "pmg_mg_coarse_mg_coarse_ksp_type": "richardson", - "pmg_mg_coarse_mg_coarse_ksp_max_it": 1, - "pmg_mg_coarse_mg_coarse_ksp_norm_type": "unpreconditioned", - "pmg_mg_coarse_mg_coarse_ksp_monitor": None, + "pmg_mg_coarse_mg_coarse_ksp_type": "preonly", "pmg_mg_coarse_mg_coarse_pc_type": "gamg", "pmg_mg_coarse_mg_coarse_pc_gamg_threshold": 0} problem = NonlinearVariationalProblem(F, u, bcs, restrict=restrict) @@ -225,8 +217,6 @@ def test_p_multigrid_nonlinear_scalar(mesh, mat_type): F = inner((Constant(1.0) + u**2) * grad(u), grad(v))*dx - inner(f, v)*dx relax = {"ksp_type": "chebyshev", - "ksp_monitor_true_residual": None, - "ksp_norm_type": "unpreconditioned", "ksp_max_it": 3, "pc_type": "jacobi"} @@ -236,20 +226,12 @@ def test_p_multigrid_nonlinear_scalar(mesh, mat_type): "ksp_monitor_true_residual": None, "pc_type": "python", "pc_python_type": "firedrake.PMGPC", - "pmg_pc_mg_type": "multiplicative", "pmg_mg_levels": relax, "pmg_mg_levels_transfer_mat_type": mat_type, - "pmg_mg_coarse_ksp_type": "richardson", - "pmg_mg_coarse_ksp_max_it": 1, - "pmg_mg_coarse_ksp_norm_type": "unpreconditioned", - "pmg_mg_coarse_ksp_monitor": None, + "pmg_mg_coarse_ksp_type": "preonly", "pmg_mg_coarse_pc_type": "mg", - "pmg_mg_coarse_pc_mg_type": "multiplicative", "pmg_mg_coarse_mg_levels": relax, - "pmg_mg_coarse_mg_coarse_ksp_type": "richardson", - "pmg_mg_coarse_mg_coarse_ksp_max_it": 1, - "pmg_mg_coarse_mg_coarse_ksp_norm_type": "unpreconditioned", - "pmg_mg_coarse_mg_coarse_ksp_monitor": None, + "pmg_mg_coarse_mg_coarse_ksp_type": "preonly", "pmg_mg_coarse_mg_coarse_pc_type": "gamg", "pmg_mg_coarse_mg_coarse_pc_gamg_threshold": 0} problem = NonlinearVariationalProblem(F, u, bcs) @@ -295,14 +277,9 @@ def test_p_multigrid_vector(): "pc_python_type": "firedrake.PMGPC", "pmg_pc_mg_type": "full", "pmg_mg_levels_ksp_type": "chebyshev", - "pmg_mg_levels_ksp_monitor_true_residual": None, - "pmg_mg_levels_ksp_norm_type": "unpreconditioned", "pmg_mg_levels_ksp_max_it": 2, "pmg_mg_levels_pc_type": "pbjacobi", - "pmg_mg_coarse_ksp_type": "richardson", - "pmg_mg_coarse_ksp_max_it": 1, - "pmg_mg_coarse_ksp_norm_type": "unpreconditioned", - "pmg_mg_coarse_ksp_monitor": None, + "pmg_mg_coarse_ksp_type": "preonly", "pmg_mg_coarse_pc_type": "lu"} problem = NonlinearVariationalProblem(F, u, bcs) solver = NonlinearVariationalSolver(problem, solver_parameters=sp) @@ -328,16 +305,12 @@ def test_p_multigrid_mixed(mat_type): relax = {"transfer_mat_type": mat_type, "ksp_type": "chebyshev", - "ksp_monitor_true_residual": None, - "ksp_norm_type": "unpreconditioned", + "ksp_convergence_test": "skip", "ksp_max_it": 3, "pc_type": "jacobi"} coarse = {"mat_type": "aij", # This circumvents the need for AssembledPC - "ksp_type": "richardson", - "ksp_max_it": 1, - "ksp_norm_type": "unpreconditioned", - "ksp_monitor": None, + "ksp_type": "preonly", "pc_type": "cholesky", "pc_factor_shift_type": "nonzero", "pc_factor_shift_amount": 1E-10} @@ -350,7 +323,6 @@ def test_p_multigrid_mixed(mat_type): "pc_type": "python", "pc_python_type": "firedrake.PMGPC", "mat_type": mat_type, - "pmg_pc_mg_type": "multiplicative", "pmg_mg_levels": relax, "pmg_mg_coarse": coarse} @@ -424,13 +396,10 @@ def test_p_fas_scalar(): coarse = { "mat_type": "aij", "ksp_type": "preonly", - "ksp_norm_type": None, "pc_type": "cholesky"} relax = { "ksp_type": "chebyshev", - "ksp_monitor_true_residual": None, - "ksp_norm_type": "unpreconditioned", "pc_type": "jacobi"} pmg = { @@ -512,12 +481,10 @@ def test_p_fas_nonlinear_scalar(): coarse = { "ksp_type": "preonly", - "ksp_norm_type": None, "pc_type": "cholesky"} relax = { "ksp_type": "chebyshev", - "ksp_norm_type": "unpreconditioned", "ksp_chebyshev_esteig": "0.75,0.25,0,1", "ksp_max_it": 3, "pc_type": "jacobi"} @@ -531,7 +498,6 @@ def test_p_fas_nonlinear_scalar(): "ksp_norm_type": "unpreconditioned", "pc_type": "python", "pc_python_type": "firedrake.PMGPC", - "pmg_pc_mg_type": "multiplicative", "pmg_mg_levels": relax, "pmg_mg_levels_transfer_mat_type": mat_type, "pmg_mg_coarse": coarse} diff --git a/tests/firedrake/multigrid/test_poisson_gmg_extruded_serendipity.py b/tests/firedrake/multigrid/test_poisson_gmg_extruded_serendipity.py index 6ccf0889f8..d34c6498b7 100644 --- a/tests/firedrake/multigrid/test_poisson_gmg_extruded_serendipity.py +++ b/tests/firedrake/multigrid/test_poisson_gmg_extruded_serendipity.py @@ -2,46 +2,39 @@ import pytest -def run_poisson(): - test_params = {"snes_type": "ksponly", - "ksp_type": "preonly", - "pc_type": "mg", - "pc_mg_type": "full", - "mg_levels_ksp_type": "chebyshev", - "mg_levels_ksp_max_it": 2, - "mg_levels_pc_type": "jacobi"} - +@pytest.fixture +def mh(): N = 2 - base_msh = UnitSquareMesh(N, N, quadrilateral=True) base_mh = MeshHierarchy(base_msh, 2) - mh = ExtrudedMeshHierarchy(base_mh, height=1, base_layer=N) + return ExtrudedMeshHierarchy(base_mh, height=1, base_layer=N) - deg = 2 - msh = mh[-1] +@pytest.mark.parallel +@pytest.mark.parametrize("family,degree", [("S", 2)]) +def test_poisson_gmg(mh, family, degree): + test_params = { + "ksp_type": "cg", + "ksp_max_it": 10, + "pc_type": "mg", + "mg_levels_ksp_type": "chebyshev", + "mg_levels_ksp_max_it": 2, + "mg_levels_pc_type": "jacobi", + "mg_coarse_pc_type": "cholesky", + } - V = FunctionSpace(msh, "S", deg) + msh = mh[-1] + V = FunctionSpace(msh, family, degree) v = TestFunction(V) - u = Function(V, name="Potential") - gg = Function(V) - - bcs = [DirichletBC(V, gg, blah) for blah in ("on_boundary", "top", "bottom")] + u = TrialFunction(V) + uh = Function(V) - x, y, z = SpatialCoordinate(msh) - uex = x * (1 - x) * y * (1 - y) * z * (1 - z) * exp(x) - f = -div(grad(uex)) + rg = RandomGenerator(PCG64(seed=0)) + uex = rg.uniform(V, -1, 1) - F = inner(grad(u), grad(v))*dx - inner(f, v)*dx(metadata={"quadrature_degree": 2*deg}) + bcs = [DirichletBC(V, uex, sub) for sub in ("on_boundary", "top", "bottom")] + a = inner(grad(u), grad(v))*dx + L = action(a, uex) - solve(F == 0, u, bcs=bcs, solver_parameters=test_params) - - err = errornorm(uex, u) - - return err - - -@pytest.mark.skipcomplex -@pytest.mark.parallel -def test_poisson_gmg(): - assert run_poisson() < 1e-3 + solve(a == L, uh, bcs=bcs, solver_parameters=test_params) + assert errornorm(uex, uh) / norm(uex) < 1E-8 diff --git a/tests/firedrake/multigrid/test_poisson_gtmg.py b/tests/firedrake/multigrid/test_poisson_gtmg.py index f70e5c6825..a4154dd392 100644 --- a/tests/firedrake/multigrid/test_poisson_gtmg.py +++ b/tests/firedrake/multigrid/test_poisson_gtmg.py @@ -60,7 +60,7 @@ def p1_callback(): if custom_transfer: P1 = get_p1_space() V = FunctionSpace(mesh, "DGT", degree - 1) - I = assemble(Interpolate(TrialFunction(P1), V)).petscmat + I = assemble(interpolate(TrialFunction(P1), V)).petscmat R = PETSc.Mat().createTranspose(I) appctx['interpolation_matrix'] = I appctx['restriction_matrix'] = R diff --git a/tests/firedrake/multigrid/test_transfer_manager.py b/tests/firedrake/multigrid/test_transfer_manager.py index 3d89c6fc8c..4e0dde7c07 100644 --- a/tests/firedrake/multigrid/test_transfer_manager.py +++ b/tests/firedrake/multigrid/test_transfer_manager.py @@ -21,7 +21,6 @@ def mesh(hierarchy): @pytest.mark.parametrize("sub", (True, False), ids=["Z.sub(0)", "V"]) -@pytest.mark.skipcomplexnoslate def test_transfer_manager_inside_coarsen(sub, mesh): V = FunctionSpace(mesh, "N1curl", 2) Q = FunctionSpace(mesh, "P", 1) @@ -78,9 +77,7 @@ def test_transfer_manager_dat_version_cache(action, transfer_op, spaces): source = Function(Vsource) target = Function(Vtarget) - family = Vsource.ufl_element().family() - if complex_mode and ((family == "Discontinuous Lagrange" and transfer_op == "inject") - or family not in {"Lagrange", "Discontinuous Lagrange"}): + if complex_mode and Vsource.finat_element.is_dg() and transfer_op == "inject": with pytest.raises(NotImplementedError): op(source, target) return diff --git a/tests/firedrake/output/test_io_backward_compat.py b/tests/firedrake/output/test_io_backward_compat.py index 0960ba8a65..0542f84657 100644 --- a/tests/firedrake/output/test_io_backward_compat.py +++ b/tests/firedrake/output/test_io_backward_compat.py @@ -151,7 +151,7 @@ def _get_mesh_and_V(params): def _get_expr(V): mesh = V.mesh() - dim = mesh.geometric_dimension() + dim = mesh.geometric_dimension shape = V.value_shape if dim == 2: x, y = SpatialCoordinate(mesh) diff --git a/tests/firedrake/output/test_io_function.py b/tests/firedrake/output/test_io_function.py index 7ec2f0c900..9169b7919a 100644 --- a/tests/firedrake/output/test_io_function.py +++ b/tests/firedrake/output/test_io_function.py @@ -16,6 +16,11 @@ func_name = "f" +@pytest.fixture(autouse=True) +def autouse_garbage_cleanup(garbage_cleanup): + pass + + def _initialise_function(f, _f, method): if method == "project": getattr(f, method)(_f, solver_parameters={"ksp_type": "cg", "pc_type": "sor", "ksp_rtol": 1.e-16}) @@ -67,7 +72,7 @@ def _get_mesh(cell_type, comm): def _get_expr(V): mesh = V.mesh() - dim = mesh.geometric_dimension() + dim = mesh.geometric_dimension shape = V.value_shape if dim == 2: x, y = SpatialCoordinate(mesh) diff --git a/tests/firedrake/output/test_io_mesh.py b/tests/firedrake/output/test_io_mesh.py index de66fddce6..6fb1b44435 100644 --- a/tests/firedrake/output/test_io_mesh.py +++ b/tests/firedrake/output/test_io_mesh.py @@ -99,7 +99,15 @@ def _compute_integral(mesh): return assemble(inner(x, x) * dx) -def _test_io_mesh_extrusion(mesh, tmpdir, variable_layers=False): +def _test_io_mesh_extrusion(mesh, tmpdir, variable_layers=False, change_coords=False): + if change_coords: + # For extruded meshes this will discard the '_base_mesh' attribute + # for the mesh geometry (but not the topology) + new_coords = mesh.coordinates.copy(deepcopy=True) + new_coords *= 2 + mesh = Mesh(new_coords, name=mesh_name) + assert mesh._base_mesh is None + # Parameters fname = os.path.join(str(tmpdir), "test_io_mesh_extrusion_dump.h5") fname = COMM_WORLD.bcast(fname, root=0) @@ -124,7 +132,7 @@ def _test_io_mesh_extrusion(mesh, tmpdir, variable_layers=False): assert np.array_equal(mesh.topology.layers, layers) v1 = _compute_integral(mesh) assert abs(v1 - v) < 5.e-14 - if isinstance(mesh.topology, ExtrudedMeshTopology): + if isinstance(mesh.topology, ExtrudedMeshTopology) and not change_coords: assert mesh.topology._base_mesh is mesh._base_mesh.topology # Save. with CheckpointFile(fname, "w", comm=comm) as afile: @@ -138,8 +146,9 @@ def test_io_mesh_base(base_mesh, tmpdir): @pytest.mark.parallel(nprocs=3) -def test_io_mesh_uniform_extrusion(uniform_mesh, tmpdir): - _test_io_mesh_extrusion(uniform_mesh, tmpdir) +@pytest.mark.parametrize("change_coords", [False, True]) +def test_io_mesh_uniform_extrusion(uniform_mesh, change_coords, tmpdir): + _test_io_mesh_extrusion(uniform_mesh, tmpdir, change_coords=change_coords) @pytest.mark.parallel(nprocs=3) diff --git a/tests/firedrake/output/test_io_timestepping.py b/tests/firedrake/output/test_io_timestepping.py index 0aec83c62a..97d18fbd6a 100644 --- a/tests/firedrake/output/test_io_timestepping.py +++ b/tests/firedrake/output/test_io_timestepping.py @@ -49,7 +49,7 @@ def element(request): return finat.ufl.FiniteElement("Real", ufl.triangle, 0) -@pytest.mark.parallel(nprocs=3) +@pytest.mark.parallel def test_io_timestepping(element, tmpdir): filename = os.path.join(str(tmpdir), "test_io_timestepping_dump.h5") filename = COMM_WORLD.bcast(filename, root=0) diff --git a/tests/firedrake/regression/test_adjoint_operators.py b/tests/firedrake/regression/test_adjoint_operators.py index cc4f1ade43..57faf80477 100644 --- a/tests/firedrake/regression/test_adjoint_operators.py +++ b/tests/firedrake/regression/test_adjoint_operators.py @@ -729,7 +729,7 @@ def test_copy_function(): g = f.copy(deepcopy=True) J = assemble(g*dx) rf = ReducedFunctional(J, Control(f)) - a = assemble(Interpolate(-one, V)) + a = assemble(interpolate(-one, V)) assert np.isclose(rf(a), -J) diff --git a/tests/firedrake/regression/test_assemble.py b/tests/firedrake/regression/test_assemble.py index 94e3439c6e..9972715746 100644 --- a/tests/firedrake/regression/test_assemble.py +++ b/tests/firedrake/regression/test_assemble.py @@ -115,6 +115,76 @@ def test_mat_nest_real_block_assembler_correctly_reuses_tensor(mesh): assert A2.M is A1.M +@pytest.mark.parallel +@pytest.mark.parametrize("shape,mat_type", [("scalar", "is"), ("vector", "is"), ("mixed", "is"), ("mixed", "nest")]) +@pytest.mark.parametrize("dirichlet_bcs", [False, True]) +def test_assemble_matis(mesh, shape, mat_type, dirichlet_bcs): + if shape == "scalar": + V = FunctionSpace(mesh, "CG", 1) + elif shape == "vector": + V = VectorFunctionSpace(mesh, "CG", 1, dim=3) + elif shape == "mixed": + V = VectorFunctionSpace(mesh, "CG", 1) + Q = FunctionSpace(mesh, "CG", 1) + V = V * Q + else: + raise ValueError(f"Unrecognized shape {shape}.") + + if V.value_size == 1: + A = 1 + else: + A = as_matrix([[2, -1, 0], [-1, 2, -1], [0, -1, 2]]) + + u = TrialFunction(V) + v = TestFunction(V) + a = inner(A * grad(u), grad(v))*dx + if dirichlet_bcs: + subspaces = [V] if len(V) == 1 else [V.sub(i) for i in range(len(V))] + components = [] + for i, Vi in enumerate(subspaces): + if Vi.block_size == 1: + components.append(Vi) + else: + components.extend(Vi.sub(j) for j in range(Vi.block_size)) + + assert len(components) == V.value_size + bcs = [DirichletBC(components[i], 0, (i % 4+1, (i+2) % 4+1)) for i in range(len(components))] + else: + bcs = None + + aij_ref = assemble(a, bcs=bcs, mat_type="aij").petscmat + ais = assemble(a, bcs=bcs, mat_type=mat_type, sub_mat_type="is").petscmat + + aij = PETSc.Mat() + if ais.type == "nest": + blocks = [] + for i in range(len(V)): + row = [] + for j in range(len(V)): + bis = ais.getNestSubMatrix(i, j) + if i == j: + assert bis.type == "is" + bij = PETSc.Mat() + bis.convert("aij", bij) + else: + bij = bis + row.append(bij) + blocks.append(row) + anest = PETSc.Mat() + anest.createNest(blocks, + isrows=V.dof_dset.field_ises, + iscols=V.dof_dset.field_ises, + comm=ais.comm) + anest.convert("aij", aij) + else: + assert ais.type == "is" + ais.convert("aij", aij) + + aij_ref.axpy(-1, aij) + ind, iptr, values = aij_ref.getValuesCSR() + assert np.allclose(values, 0) + + def test_assemble_diagonal(mesh): V = FunctionSpace(mesh, "P", 3) u = TrialFunction(V) diff --git a/tests/firedrake/regression/test_assemble_baseform.py b/tests/firedrake/regression/test_assemble_baseform.py index 9644cbe9d7..8f76b70cb0 100644 --- a/tests/firedrake/regression/test_assemble_baseform.py +++ b/tests/firedrake/regression/test_assemble_baseform.py @@ -133,7 +133,7 @@ def test_scalar_formsum(f, scale): s2 = Constant(s2) elif scale == "Real": mesh = f.function_space().mesh() - R = FunctionSpace(mesh, "R", 0) + R = FunctionSpace(mesh.unique(), "R", 0) s1 = Function(R, val=s1) s2 = Function(R, val=s2) @@ -142,7 +142,7 @@ def test_scalar_formsum(f, scale): res2 = assemble(formsum) assert res2 == expected - mesh = f.function_space().mesh() + mesh = f.function_space().mesh().unique() R = FunctionSpace(mesh, "R", 0) tensor = Cofunction(R.dual()) diff --git a/tests/firedrake/regression/test_assign.py b/tests/firedrake/regression/test_assign.py new file mode 100644 index 0000000000..fd51a4fb77 --- /dev/null +++ b/tests/firedrake/regression/test_assign.py @@ -0,0 +1,20 @@ +from firedrake import * +import numpy as np + + +def test_single_mesh_mixed_assign(): + """Assigning between functions on separately constructed but equivalent + MixedFunctionSpaces should work and preserve values.""" + mesh = UnitSquareMesh(4, 4) + V = VectorFunctionSpace(mesh, "CG", 1) + W = FunctionSpace(mesh, "CG", 1) + + z = Function(MixedFunctionSpace([V, W])) + z.subfunctions[0].assign(Constant((1.0, 2.0))) + z.subfunctions[1].assign(3.0) + + w = Function(MixedFunctionSpace([V, W])) + w.assign(z) + + assert np.allclose(w.subfunctions[0].dat.data_ro, [1.0, 2.0]) + assert np.allclose(w.subfunctions[1].dat.data_ro, 3.0) diff --git a/tests/firedrake/regression/test_auxiliary_dm.py b/tests/firedrake/regression/test_auxiliary_dm.py index a849f4b40b..a4fbb094cb 100644 --- a/tests/firedrake/regression/test_auxiliary_dm.py +++ b/tests/firedrake/regression/test_auxiliary_dm.py @@ -138,5 +138,5 @@ def test_auxiliary_dm(): # Error in L2 norm (u, v, alpha) = z.subfunctions u_exact = problem.analytical_solution(mesh) - error_L2 = errornorm(u_exact, u, 'L2') / errornorm(u_exact, Function(FunctionSpace(mesh, 'CG', 1)), 'L2') + error_L2 = errornorm(u_exact, u) / norm(u_exact) assert error_L2 < 0.02 diff --git a/tests/firedrake/regression/test_bcs.py b/tests/firedrake/regression/test_bcs.py index 4544b95cc0..9e43ba805b 100644 --- a/tests/firedrake/regression/test_bcs.py +++ b/tests/firedrake/regression/test_bcs.py @@ -423,8 +423,8 @@ def test_bcs_mixed_real(): bc = DirichletBC(V.sub(0), 0.0, 1) a = inner(u1, v0) * dx + inner(u0, v1) * dx A = assemble(a, bcs=[bc, ]) - assert np.allclose(A.M[0][1].values, [[0.00], [0.00], [0.25], [0.25]]) - assert np.allclose(A.M[1][0].values, [[0.00, 0.00, 0.25, 0.25]]) + assert np.allclose(A.M[0][1].values, [[0.00], [0.25], [0.25], [0.00]]) + assert np.allclose(A.M[1][0].values, [[0.00, 0.25, 0.25, 0.00]]) def test_bcs_mixed_real_vector(): @@ -437,8 +437,8 @@ def test_bcs_mixed_real_vector(): bc = DirichletBC(V.sub(0).sub(1), 0.0, 1) a = inner(as_vector([u1, u1]), v0) * dx + inner(u0, as_vector([v1, v1])) * dx A = assemble(a, bcs=[bc, ]) - assert np.allclose(A.M[0][1].values, [[[0.25], [0.], [0.25], [0.], [0.25], [0.25], [0.25], [0.25]]]) - assert np.allclose(A.M[1][0].values, [[0.25, 0., 0.25, 0., 0.25, 0.25, 0.25, 0.25]]) + assert np.allclose(A.M[0][1].values, [[[0.25], [0.], [0.25], [0.25], [0.25], [0.25], [0.25], [0.]]]) + assert np.allclose(A.M[1][0].values, [[0.25, 0., 0.25, 0.25, 0.25, 0.25, 0.25, 0.]]) def test_homogeneous_bc_residual(): diff --git a/tests/firedrake/regression/test_bddc.py b/tests/firedrake/regression/test_bddc.py index ff82094ba9..a359dff4f3 100644 --- a/tests/firedrake/regression/test_bddc.py +++ b/tests/firedrake/regression/test_bddc.py @@ -1,30 +1,40 @@ import pytest +import numpy as np +from functools import reduce from firedrake import * from firedrake.petsc import DEFAULT_DIRECT_SOLVER -def bddc_params(static_condensation): +@pytest.fixture +def rg(): + return RandomGenerator(PCG64(seed=123456789)) + + +def bddc_params(mat_type="is", cellwise=False): chol = { "pc_type": "cholesky", - "pc_factor_mat_solver_type": "petsc", - "pc_factor_mat_ordering_type": "natural", + "pc_factor_mat_solver_type": DEFAULT_DIRECT_SOLVER, } sp = { + "mat_type": mat_type, "pc_type": "python", "pc_python_type": "firedrake.BDDCPC", + "bddc_cellwise": cellwise, "bddc_pc_bddc_neumann": chol, "bddc_pc_bddc_dirichlet": chol, - "bddc_pc_bddc_coarse": DEFAULT_DIRECT_SOLVER, + "bddc_pc_bddc_coarse": chol, } return sp -def solver_parameters(static_condensation=True): - rtol = 1E-8 - atol = 1E-12 - sp_bddc = bddc_params(static_condensation) - repeated = True - if static_condensation: +def solver_parameters(cellwise=False, condense=False, variant=None, rtol=1E-10, atol=0): + mat_type = "matfree" if cellwise and variant != "fdm" else "is" + sp_bddc = bddc_params(mat_type=mat_type, cellwise=cellwise) + if variant != "fdm": + assert not condense + sp = sp_bddc + + elif condense: sp = { "pc_type": "python", "pc_python_type": "firedrake.FacetSplitPC", @@ -33,13 +43,14 @@ def solver_parameters(static_condensation=True): "facet_fdm_static_condensation": True, "facet_fdm_pc_use_amat": False, "facet_fdm_mat_type": "is", - "facet_fdm_mat_is_allow_repeated": repeated, + "facet_fdm_mat_is_allow_repeated": cellwise, "facet_fdm_pc_type": "fieldsplit", "facet_fdm_pc_fieldsplit_type": "symmetric_multiplicative", "facet_fdm_pc_fieldsplit_diag_use_amat": False, "facet_fdm_pc_fieldsplit_off_diag_use_amat": False, "facet_fdm_fieldsplit_ksp_type": "preonly", - "facet_fdm_fieldsplit_0_pc_type": "jacobi", + "facet_fdm_fieldsplit_0_pc_type": "bjacobi", + "facet_fdm_fieldsplit_0_pc_type_sub_pc_type": "icc", "facet_fdm_fieldsplit_1": sp_bddc, } else: @@ -47,34 +58,41 @@ def solver_parameters(static_condensation=True): "pc_type": "python", "pc_python_type": "firedrake.FDMPC", "fdm_pc_use_amat": False, - "fdm_mat_type": "is", - "fdm_mat_is_allow_repeated": repeated, + "fdm_mat_is_allow_repeated": cellwise, "fdm": sp_bddc, } + sp.update({ - "mat_type": "matfree", "ksp_type": "cg", + "ksp_max_it": 20, "ksp_norm_type": "natural", - "ksp_monitor": None, + "ksp_converged_reason": None, "ksp_rtol": rtol, "ksp_atol": atol, }) + if variant == "fdm": + sp["mat_type"] = "matfree" return sp -def solve_riesz_map(mesh, family, degree, bcs, condense): +def solve_riesz_map(rg, mesh, family, degree, variant, bcs, cellwise=False, condense=False, vector=False, threshold=None): + """Solve the riesz map for a random manufactured solution and return the + square root of the estimated condition number.""" dirichlet_ids = [] if bcs: dirichlet_ids = ["on_boundary"] if hasattr(mesh, "extruded") and mesh.extruded: dirichlet_ids.extend(["bottom", "top"]) - tdim = mesh.topological_dimension() + tdim = mesh.topological_dimension if family.endswith("E"): family = "RTCE" if tdim == 2 else "NCE" if family.endswith("F"): family = "RTCF" if tdim == 2 else "NCF" - V = FunctionSpace(mesh, family, degree, variant="fdm") + + fs = VectorFunctionSpace if vector else FunctionSpace + + V = fs(mesh, family, degree, variant=variant) v = TestFunction(V) u = TrialFunction(V) d = { @@ -85,46 +103,177 @@ def solve_riesz_map(mesh, family, degree, bcs, condense): formdegree = V.finat_element.formdegree if formdegree == 0: - a = inner(d(u), d(v)) * dx(degree=2*degree) + a = inner(d(u), d(v)) * dx else: - a = (inner(u, v) + inner(d(u), d(v))) * dx(degree=2*degree) + a = (inner(u, v) + inner(d(u), d(v))) * dx - rg = RandomGenerator(PCG64(seed=123456789)) u_exact = rg.uniform(V, -1, 1) - L = ufl.replace(a, {u: u_exact}) + L = replace(a, {u: u_exact}) bcs = [DirichletBC(V, u_exact, sub) for sub in dirichlet_ids] nsp = None if formdegree == 0: - nsp = VectorSpaceBasis([Function(V).interpolate(Constant(1))]) + b = np.zeros(V.value_shape) + expr = Constant(b) + basis = [] + for i in np.ndindex(V.value_shape): + b[...] = 0 + b[i] = 1 + expr.assign(b) + basis.append(Function(V).interpolate(expr)) + nsp = VectorSpaceBasis(basis) nsp.orthonormalize() + appctx = {} + if threshold is not None: + appctx["primal_markers"] = get_primal_markers(mesh, threshold=threshold) + uh = Function(V, name="solution") problem = LinearVariationalProblem(a, L, uh, bcs=bcs) - sp = solver_parameters(condense) + rtol = 1E-8 + sp = solver_parameters(cellwise=cellwise, condense=condense, variant=variant, rtol=rtol) + sp.setdefault("ksp_view_singularvalues", None) solver = LinearVariationalSolver(problem, near_nullspace=nsp, - solver_parameters=sp, - options_prefix="") + solver_parameters=sp, appctx=appctx) solver.solve() - return solver.snes.getLinearSolveIterations() + uerr = Function(V).assign(uh - u_exact) + assert (assemble(a(uerr, uerr)) / assemble(a(u_exact, u_exact))) ** 0.5 < rtol + + ew = solver.snes.ksp.computeEigenvalues() + assert min(ew) >= 1.0 + kappa = max(abs(ew)) / min(abs(ew)) + return kappa ** 0.5 + + +def tensor_mesh(x, extruded=False, **kwargs): + base = TensorRectangleMesh(x, x, quadrilateral=True, **kwargs) + if extruded: + mesh = ExtrudedMesh(base, len(x)-1, layer_height=np.diff(x)) + else: + mesh = base + return mesh + + +def corner_refined_mesh(nx, ratio=0.5, extruded=False, **kwargs): + t = 1-np.logspace(-nx, -1, nx, base=1/ratio) + t /= t[0] + x = np.concatenate([-t, [0], np.flip(t)]) + return tensor_mesh(x, extruded=extruded, **kwargs) + + +def cell_aspect_ratio(mesh): + """Compute the aspect ratio of each cell""" + J = Jacobian(mesh) + G = J.T * J + hs = tuple(abs(G[i, i]**0.5) for i in range(G.ufl_shape[0])) + hmax = reduce(max_value, hs) + hmin = reduce(min_value, hs) + + DG0 = FunctionSpace(mesh, "DG", 0) + ratio = Function(DG0).interpolate(hmax / hmin) + return ratio + + +def get_primal_markers(mesh, threshold=2**15): + """Cell marker for cells with high aspect ratio""" + threshold = Constant(threshold) + marker = cell_aspect_ratio(mesh) + marker.interpolate(conditional(ge(marker, threshold), 1, 0)) + return marker @pytest.fixture(params=(2, 3), ids=("square", "cube")) -def mesh(request): +def mh(request): dim = request.param nx = 4 - msh = UnitSquareMesh(nx, nx, quadrilateral=True) + base = UnitSquareMesh(nx, nx, quadrilateral=True) + mh = MeshHierarchy(base, 1) if dim == 3: - msh = ExtrudedMesh(msh, nx) - return msh + mh = ExtrudedMeshHierarchy(mh, height=1, base_layer=nx) + return mh @pytest.mark.parallel -@pytest.mark.parametrize("family", "Q") -@pytest.mark.parametrize("degree", (4,)) +@pytest.mark.parametrize("degree", range(1, 3)) +@pytest.mark.parametrize("variant", ("spectral", "fdm")) +def test_vertex_dofs(mh, variant, degree): + """Check that we extract the right number of vertex dofs from a high order Lagrange space.""" + from firedrake.preconditioners.bddc import get_restricted_dofs + mesh = mh[-1] + P1 = FunctionSpace(mesh, "Lagrange", 1, variant=variant) + V0 = FunctionSpace(mesh, "Lagrange", degree, variant=variant) + v = get_restricted_dofs(V0, "vertex") + assert v.getSizes() == P1.dof_dset.layout_vec.getSizes() + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("family,degree", [("Q", 4), ("E", 3), ("F", 3)]) @pytest.mark.parametrize("condense", (False, True)) -def test_bddc_fdm(mesh, family, degree, condense): +def test_bddc_cellwise_fdm(rg, mh, family, degree, condense): + """Test h-independence of condition number by measuring iteration counts""" + variant = "fdm" + bcs = True + sqrt_kappa = [solve_riesz_map(rg, m, family, degree, variant, bcs, cellwise=True, condense=condense) for m in mh] + assert (np.diff(sqrt_kappa) <= 0.1).all(), str(sqrt_kappa) + + +@pytest.mark.skipcomplex # max_value does not work in complex mode +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("family,degree", [("Q", 4)]) +def test_bddc_cellwise_high_aspect_ratio(rg, family, degree): + """Test that marking high aspect ratio cells leads to robust iteration counts""" + variant = "fdm" + bcs = True + mh = [corner_refined_mesh(nx) for nx in (10, 12)] + # For these meshes it is better to set adaptive BDDC parameters, + # but here we just test the appctx["primal_markers"] interface + sqrt_kappa = [solve_riesz_map(rg, m, family, degree, variant, bcs, cellwise=True, threshold=2**6) for m in mh] + assert (np.diff(sqrt_kappa) <= 0.1).all(), str(sqrt_kappa) + + +@pytest.mark.parallel +@pytest.mark.parametrize("family,degree", [("Q", 4)]) +@pytest.mark.parametrize("vector", (False, True), ids=("scalar", "vector")) +def test_bddc_aij_quad(rg, mh, family, degree, vector): + """Test h-dependence of condition number by measuring iteration counts""" + variant = None + bcs = True + sqrt_kappa = [solve_riesz_map(rg, m, family, degree, variant, bcs, vector=vector) for m in mh] + assert (np.diff(sqrt_kappa) <= 0.5).all(), str(sqrt_kappa) + + +@pytest.mark.parallel +@pytest.mark.parametrize("family,degree,cellwise", [("CG", 3, False), ("CG", 3, True), ("N1curl", 3, False), ("N1div", 3, False)]) +def test_bddc_aij_simplex(rg, family, degree, cellwise): + """Test h-dependence of condition number by measuring iteration counts""" + variant = None bcs = True - tdim = mesh.topological_dimension() - expected = 7 if tdim == 2 else 11 - assert solve_riesz_map(mesh, family, degree, bcs, condense) <= expected + base = UnitCubeMesh(2, 2, 2) + meshes = MeshHierarchy(base, 2) + sqrt_kappa = [solve_riesz_map(rg, m, family, degree, variant, bcs, cellwise=cellwise) for m in meshes] + assert (np.diff(sqrt_kappa) <= 0.5).all(), str(sqrt_kappa) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("cellwise", (True, False)) +@pytest.mark.parametrize("local_mat_type", ("aij", "matfree")) +def test_create_matis(local_mat_type, cellwise): + from firedrake.preconditioners.bddc import create_matis + mesh = UnitSquareMesh(4, 4) + V = FunctionSpace(mesh, "CG", 1) + a = inner(grad(TrialFunction(V)), grad(TestFunction(V)))*dx + A = assemble(a, mat_type="matfree").petscmat + + A, assembler = create_matis(A, local_mat_type, cellwise=cellwise) + B = assemble(a, mat_type=local_mat_type).petscmat + if local_mat_type == "matfree": + Ax, x = A.createVecs() + Bx, _ = B.createVecs() + x.setRandom() + A.mult(x, Ax) + B.mult(x, Bx) + assert np.allclose(Ax.array, Bx.array) + else: + A.convert("aij") + B.axpy(-1, A) + assert np.isclose(B.norm(PETSc.NormType.FROBENIUS), 0) diff --git a/tests/firedrake/regression/test_bubble.py b/tests/firedrake/regression/test_bubble.py index d77d24d495..e98be8cd35 100644 --- a/tests/firedrake/regression/test_bubble.py +++ b/tests/firedrake/regression/test_bubble.py @@ -44,6 +44,5 @@ def test_BDFM(): a = out.dat.data a.sort() assert (abs(a[1:7]) < 1e-12).all() - assert abs(a[0] + 6.75) < 1e-12 - assert abs(a[7] - 6.75) < 1e-12 - assert abs(a[8] - 13.5) < 1e-12 + assert abs(a[7] + a[0]) < 1e-12 + assert abs(a[8] + a[0]) < 1e-12 diff --git a/tests/firedrake/regression/test_change_coordinates.py b/tests/firedrake/regression/test_change_coordinates.py index a712ffd6d5..39660d8ce4 100644 --- a/tests/firedrake/regression/test_change_coordinates.py +++ b/tests/firedrake/regression/test_change_coordinates.py @@ -13,7 +13,7 @@ def test_immerse_1d(dim): m = Mesh(new_coords) - assert m.geometric_dimension() == dim + assert m.geometric_dimension == dim def test_immerse_2d(): @@ -23,7 +23,7 @@ def test_immerse_2d(): m = Mesh(new_coords) - assert m.geometric_dimension() == 3 + assert m.geometric_dimension == 3 def test_project_2d(): @@ -33,7 +33,7 @@ def test_project_2d(): m = Mesh(new_coords) - assert m.geometric_dimension() == 1 + assert m.geometric_dimension == 1 def test_immerse_extruded(): @@ -44,4 +44,22 @@ def test_immerse_extruded(): m = Mesh(new_coords) - assert m.geometric_dimension() == 3 + assert m.geometric_dimension == 3 + + +def test_relabeled_mesh_preserves_coord_changes(): + orig_mesh = UnitSquareMesh(3, 3) + + high_order_space = VectorFunctionSpace(orig_mesh, "CG", 3) + high_order_coords = Function(high_order_space).interpolate(orig_mesh.coordinates) + high_order_mesh = Mesh(high_order_coords) + + x, _ = SpatialCoordinate(high_order_mesh) + marker_space = FunctionSpace(high_order_mesh, "DG", 0) + marker = Function(marker_space).interpolate(conditional(x > 0.5, 1., 0.)) + relabeled_mesh = RelabeledMesh(high_order_mesh, [marker], [666]) + + expected = high_order_mesh.coordinates.dat.data_ro + actual = relabeled_mesh.coordinates.dat.data_ro + assert actual.shape == expected.shape + assert (actual == expected).all() diff --git a/tests/firedrake/regression/test_constant.py b/tests/firedrake/regression/test_constant.py index 3dd40ed068..0c89c85b73 100644 --- a/tests/firedrake/regression/test_constant.py +++ b/tests/firedrake/regression/test_constant.py @@ -287,7 +287,7 @@ def test_constant_ufl2unicode(): _ = ufl2unicode(dFda) _ = ufl2unicode(dFdb) - dFda_du = derivative(F, u=a, du=ufl.classes.IntValue(1)) - dFdb_du = derivative(F, u=b, du=ufl.classes.IntValue(1)) + dFda_du = derivative(F, u=a, du=IntValue(1)) + dFdb_du = derivative(F, u=b, du=IntValue(1)) _ = ufl2unicode(dFda_du) _ = ufl2unicode(dFdb_du) diff --git a/tests/firedrake/regression/test_covariance_operator.py b/tests/firedrake/regression/test_covariance_operator.py new file mode 100644 index 0000000000..c8bb09b87f --- /dev/null +++ b/tests/firedrake/regression/test_covariance_operator.py @@ -0,0 +1,463 @@ +import pytest +import numpy as np +from scipy.sparse import csr_array +import petsctools +from firedrake import * +from firedrake.adjoint import ( + WhiteNoiseGenerator, PyOP2NoiseBackend, PetscNoiseBackend, + VOMNoiseBackend, AutoregressiveCovariance, MixedCovarianceOperator, + CovarianceMat) + + +def petsc2numpy_vec(petsc_vec): + """Allgather a PETSc.Vec.""" + gvec = petsc_vec + gather, lvec = PETSc.Scatter().toAll(gvec) + gather(gvec, lvec, addv=PETSc.InsertMode.INSERT_VALUES) + return lvec.array_r.copy() + + +def petsc2numpy_mat(petsc_mat): + """Allgather a PETSc.Mat.""" + comm = petsc_mat.getComm() + local_mat = petsc_mat.getRedundantMatrix( + comm.size, PETSc.COMM_SELF) + return csr_array( + local_mat.getValuesCSR()[::-1], + shape=local_mat.getSize() + ).todense() + + +@pytest.fixture +def rng(): + return RandomGenerator(PCG64(seed=13)) + + +@pytest.mark.skipcomplex +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("degree", (1, 2), ids=["degree1", "degree2"]) +@pytest.mark.parametrize("dim", (0, 2, (2, 2)), ids=["scalar", "vec2", "tensor22"]) +@pytest.mark.parametrize("family", ("CG", "DG")) +@pytest.mark.parametrize("mesh_type", ("interval", "square")) +@pytest.mark.parametrize("backend_type", (PyOP2NoiseBackend, PetscNoiseBackend), ids=("pyop2", "petsc")) +def test_white_noise(family, degree, mesh_type, dim, backend_type, rng, garbage_cleanup): + """Test that white noise generator converges to a mass matrix covariance. + """ + + nx = 10 + # Mesh dimension + if mesh_type == 'interval': + mesh = UnitIntervalMesh(nx) + elif mesh_type == 'square': + mesh = UnitSquareMesh(nx, nx) + + # Variable rank + if not isinstance(dim, int): + V = TensorFunctionSpace(mesh, family, degree, shape=dim) + elif dim > 0: + V = VectorFunctionSpace(mesh, family, degree, dim=dim) + else: + V = FunctionSpace(mesh, family, degree) + + # Finite element white noise has mass matrix covariance + M = inner(TrialFunction(V), TestFunction(V))*dx + covmat = petsc2numpy_mat( + assemble(M, mat_type='aij').petscmat) + + generator = WhiteNoiseGenerator( + V, backend=backend_type(V, rng=rng)) + + # Test convergence as sample size increases + nsamples = [50, 100, 200, 400, 800] + + samples = np.empty((V.dim(), nsamples[-1])) + for i in range(nsamples[-1]): + with generator.sample().dat.vec_ro as bv: + samples[:, i] = petsc2numpy_vec(bv) + + covariances = [np.cov(samples[:, :ns]) for ns in nsamples] + + # Measured covariance matrix should converge at a rate of sqrt(n). + # The number of samples is fairly small to keep the test cost + # lower so we only test the mean rate to a low tolerance. + + errors = [np.linalg.norm(cov-covmat) for cov in covariances] + rate = -np.diff(np.log(errors))/np.diff(np.log(nsamples)) + + assert np.mean(rate) > 0.4 + + +@pytest.mark.skipcomplex +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("dim", (0, 2, (2, 2)), ids=["scalar", "vec2", "tensor22"]) +@pytest.mark.parametrize("mesh_type", ("interval", "square")) +def test_vom_white_noise(dim, mesh_type, rng): + """Test that white noise generator converges to a mass matrix covariance. + """ + + nx = 10 + nv = 10 + np.random.seed(13) + # Mesh dimension + if mesh_type == 'interval': + mesh = UnitIntervalMesh(nx) + points = np.random.random_sample((nv, 1)) + elif mesh_type == 'square': + mesh = UnitSquareMesh(nx, nx) + points = np.random.random_sample((nv, 2)) + + vom = VertexOnlyMesh(mesh, points) + + # Variable rank + if not isinstance(dim, int): + V = TensorFunctionSpace(vom, "DG", 0, shape=dim) + elif dim > 0: + V = VectorFunctionSpace(vom, "DG", 0, dim=dim) + else: + V = FunctionSpace(vom, "DG", 0) + + # Finite element white noise has mass matrix covariance + M = inner(TrialFunction(V), TestFunction(V))*dx + covmat = petsc2numpy_mat( + assemble(M, mat_type='aij').petscmat) + + backend = VOMNoiseBackend(V, rng) + generator = WhiteNoiseGenerator(V, backend=backend) + + # Test convergence as sample size increases + nsamples = [50, 100, 200, 400, 800] + + samples = np.empty((V.dim(), nsamples[-1])) + for i in range(nsamples[-1]): + with generator.sample().dat.vec_ro as bv: + samples[:, i] = petsc2numpy_vec(bv) + + covariances = [np.cov(samples[:, :ns]) for ns in nsamples] + + # Measured covariance matrix should converge at a rate of sqrt(n). + # The number of samples is fairly small to keep the test cost + # lower so we only test the mean rate to a low tolerance. + + errors = [np.linalg.norm(cov-covmat) for cov in covariances] + rate = -np.diff(np.log(errors))/np.diff(np.log(nsamples)) + + assert np.mean(rate) > 0.4 + + +@pytest.mark.skipcomplex +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("m", (0, 2, 4)) +@pytest.mark.parametrize("dim", (0, 2), ids=["scalar", "vector2"]) +@pytest.mark.parametrize("family", ("CG", "DG")) +@pytest.mark.parametrize("mesh_type", ("interval", "square")) +def test_covariance_inverse_action(m, family, mesh_type, dim): + """Test that covariance operator action and inverse are opposites. + """ + + nx = 20 + if mesh_type == 'interval': + mesh = PeriodicUnitIntervalMesh(nx) + x, = SpatialCoordinate(mesh) + wexpr = cos(2*pi*x) + elif mesh_type == 'square': + mesh = PeriodicUnitSquareMesh(nx, nx) + x, y = SpatialCoordinate(mesh) + wexpr = cos(2*pi*x)*cos(4*pi*y) + elif mesh_type == 'cube': + mesh = PeriodicUnitCubeMesh(nx, nx, nx) + x, y, z = SpatialCoordinate(mesh) + wexpr = cos(2*pi*x)*cos(4*pi*y)*cos(pi*z) + if dim > 0: + V = VectorFunctionSpace(mesh, family, 1, dim=dim) + wexpr = as_vector([-1**(j+1)*wexpr for j in range(dim)]) + else: + V = FunctionSpace(mesh, family, 1) + + L = 0.1 + sigma = 0.9 + + solver_parameters = { + 'ksp_type': 'preonly', + 'pc_type': 'lu', + 'pc_factor_mat_solver_type': 'mumps' + } + + form = 'IP' if family == 'DG' else 'CG' + + B = AutoregressiveCovariance( + V, L, sigma, m, form=form, + solver_parameters=solver_parameters, + options_prefix="") + + w = Function(V).project(wexpr) + wcheck = B.apply_action(B.apply_inverse(w)) + + tol = 1e-10 + + assert errornorm(w, wcheck) < tol + + +@pytest.mark.skipcomplex +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("m", (0, 2, 4)) +def test_covariance_inverse_action_hdiv(m): + """Test that covariance operator action and inverse are opposites + for hdiv spaces. + """ + + nx = 20 + mesh = PeriodicUnitSquareMesh(nx, nx) + x, y = SpatialCoordinate(mesh) + wexpr = cos(2*pi*x)*cos(4*pi*x) + + V = FunctionSpace(mesh, "BDM", 1) + wexpr = as_vector([-1**(j+1)*wexpr for j in range(2)]) + + L = 0.1 + sigma = 0.9 + + solver_parameters = { + 'ksp_type': 'preonly', + 'pc_type': 'lu', + 'pc_factor_mat_solver_type': 'mumps' + } + + B = AutoregressiveCovariance( + V, L, sigma, m, form='IP', + solver_parameters=solver_parameters, + options_prefix="") + + w = Function(V).project(wexpr) + wcheck = B.apply_action(B.apply_inverse(w)) + + tol = 1e-8 + + assert errornorm(w, wcheck) < tol + + +@pytest.mark.skipcomplex +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("m", (0, 2, 4)) +@pytest.mark.parametrize("family", ("CG", "DG")) +@pytest.mark.parametrize("operation", ("action", "inverse")) +def test_covariance_mat(m, family, operation): + """Test that covariance mat and pc apply correct and opposite actions. + """ + nx = 20 + L = 0.2 + sigma = 0.9 + + mesh = UnitIntervalMesh(nx) + coords, = SpatialCoordinate(mesh) + + V = FunctionSpace(mesh, family, 1) + + form = 'IP' if family == 'DG' else 'CG' + + B = AutoregressiveCovariance(V, L, sigma, m, form=form) + + mat = CovarianceMat(B, operation=operation) + + expr = 2*pi*coords + + if operation == 'action': + x = Function(V).project(expr).riesz_representation() + y = Function(V) + xcheck = x.copy(deepcopy=True) + ycheck = y.copy(deepcopy=True) + + B.apply_action(xcheck, tensor=ycheck) + + elif operation == 'inverse': + x = Function(V).project(expr) + y = Function(V.dual()) + xcheck = x.copy(deepcopy=True) + ycheck = y.copy(deepcopy=True) + + B.apply_inverse(xcheck, tensor=ycheck) + + with x.dat.vec as xv, y.dat.vec as yv: + mat.mult(xv, yv) + + # flip to primal space to calculate norms + if operation == 'inverse': + y = y.riesz_representation() + ycheck = ycheck.riesz_representation() + + assert errornorm(ycheck, y)/norm(ycheck) < 1e-12 + + if operation == 'inverse': + y = y.riesz_representation() + ycheck = ycheck.riesz_representation() + + ksp = PETSc.KSP().create() + ksp.setOperators(mat) + + tol = 1e-8 + + petsctools.set_from_options( + ksp, options_prefix=str(operation), + parameters={ + 'ksp_monitor': None, + 'ksp_type': 'richardson', + 'ksp_max_it': 2, + 'ksp_rtol': tol, + 'pc_type': 'python', + 'pc_python_type': 'firedrake.CovariancePC', + } + ) + x.zero() + + with x.dat.vec as xv, y.dat.vec as yv: + with petsctools.inserted_options(ksp): + ksp.solve(yv, xv) + + # CovarianceOperator operations should + # be exact inverses of each other. + assert ksp.its == 1 + + if operation == 'action': + x = x.riesz_representation() + xcheck = xcheck.riesz_representation() + + assert errornorm(xcheck, x)/norm(xcheck) < 10*tol + + +@pytest.mark.skipcomplex +@pytest.mark.parametrize("operation", ("action", "inverse")) +def test_mixed_covariance(operation): + """Test that covariance mat and pc apply correct and opposite actions. + """ + nx = 20 + L = 0.2 + sigma = 0.9 + + mesh = UnitIntervalMesh(nx) + coords, = SpatialCoordinate(mesh) + + V0 = FunctionSpace(mesh, "CG", 1) + V1 = FunctionSpace(mesh, "DG", 1) + W = V0*V1 + + Bc = AutoregressiveCovariance(V0, L, sigma, m=2, form="CG") + Bd = AutoregressiveCovariance(V1, 2*L, 2*sigma, m=2, form="IP") + + B = MixedCovarianceOperator(W, [Bc, Bd]) + + mat = CovarianceMat(B, operation=operation) + + expr_c = sin(2*pi*coords) + expr_d = cos(1*pi*coords) + + if operation == 'action': + x = Function(W) + y = Function(W) + + x.subfunctions[0].project(expr_c) + x.subfunctions[1].project(expr_d) + x = x.riesz_representation() + + xcheck = x.copy(deepcopy=True) + ycheck = y.copy(deepcopy=True) + + B.apply_action(xcheck, tensor=ycheck) + + elif operation == 'inverse': + x = Function(W) + y = Function(W.dual()) + + x.subfunctions[0].project(expr_c) + x.subfunctions[1].project(expr_d) + + xcheck = x.copy(deepcopy=True) + ycheck = y.copy(deepcopy=True) + + B.apply_inverse(xcheck, tensor=ycheck) + + with x.dat.vec as xv, y.dat.vec as yv: + mat.mult(xv, yv) + + # flip to primal space to calculate norms + if operation == 'inverse': + y = y.riesz_representation() + ycheck = ycheck.riesz_representation() + + assert errornorm(ycheck, y)/norm(ycheck) < 1e-12 + + if operation == 'inverse': + y = y.riesz_representation() + ycheck = ycheck.riesz_representation() + + ksp = PETSc.KSP().create() + ksp.setOperators(mat) + + tol = 1e-8 + + petsctools.set_from_options( + ksp, options_prefix=str(operation), + parameters={ + 'ksp_monitor': None, + 'ksp_type': 'richardson', + 'ksp_max_it': 2, + 'ksp_rtol': tol, + 'pc_type': 'python', + 'pc_python_type': 'firedrake.CovariancePC', + } + ) + x.zero() + + with x.dat.vec as xv, y.dat.vec as yv: + with petsctools.inserted_options(ksp): + ksp.solve(yv, xv) + + # CovarianceOperator operations should + # be exact inverses of each other. + assert ksp.its == 1 + + if operation == 'action': + x = x.riesz_representation() + xcheck = xcheck.riesz_representation() + + assert errornorm(xcheck, x)/norm(xcheck) < 10*tol + + +@pytest.mark.skipcomplex +@pytest.mark.parametrize("family", ("CG", "DG")) +def test_diffusion_form(family): + """Test that the provided diffusion forms converge to a known solution at the expected rate. + """ + from firedrake.adjoint.covariance_operator import diffusion_form + + def poisson_error(mesh, family): + V = FunctionSpace(mesh, family, 1) + x, y = SpatialCoordinate(mesh) + + f = Function(V).interpolate((1+8*pi*pi)*cos(x*pi*2)*cos(y*pi*2)) + uexact = cos(x*pi*2)*cos(y*pi*2) + + nu = Constant(1) + u = TrialFunction(V) + v = TestFunction(V) + + formulation = AutoregressiveCovariance.DiffusionForm( + "CG" if family == "CG" else "IP") + + a = diffusion_form(u, v, nu, formulation) + L = inner(f, v)*dx + + u = Function(V) + solve(a == L, u) + + return errornorm(uexact, u) + + base_nx = 16 + nrefs = 3 + base_mesh = UnitSquareMesh(base_nx, base_nx) + mh = MeshHierarchy(base_mesh, nrefs) + + errors = [poisson_error(m, family) for m in mh] + + # second order convergence + nxs = [base_nx*(2**n) for n in range(nrefs+1)] + rate = - np.diff(np.log(errors))/np.diff(np.log(nxs)) + assert (rate > 1.9).all() diff --git a/tests/firedrake/regression/test_cross_mesh_non_lagrange.py b/tests/firedrake/regression/test_cross_mesh_non_lagrange.py new file mode 100644 index 0000000000..ac0ea40ecb --- /dev/null +++ b/tests/firedrake/regression/test_cross_mesh_non_lagrange.py @@ -0,0 +1,174 @@ +from firedrake import * +import pytest +import numpy as np +from functools import partial + + +def mat_equals(a, b) -> bool: + """Check that two Matrices are equal.""" + a = a.petscmat.copy() + a.axpy(-1.0, b.petscmat) + return a.norm(norm_type=PETSc.NormType.NORM_FROBENIUS) < 1e-14 + + +def fs_shape(V): + shape = V.ufl_function_space().value_shape + if len(shape) == 0: + return FunctionSpace + elif len(shape) == 1: + return partial(VectorFunctionSpace, dim=shape[0]) + elif len(shape) == 2: + return partial(TensorFunctionSpace, shape=shape) + else: + raise ValueError("Invalid function space shape") + + +@pytest.fixture(params=[("RT", 1), ("RT", 2), ("BDM", 1), ("BDM", 2), ("BDFM", 2), + ("HHJ", 0), ("HHJ", 2), ("N1curl", 1), ("N1curl", 2), + ("N2curl", 1), ("N2curl", 2), ("GLS", 1), ("GLS", 2), + ("GLS2", 2), ("Regge", 0), ("Regge", 2)], + ids=lambda x: f"{x[0]}_{x[1]}") +def V(request): + element, degree = request.param + mesh = UnitSquareMesh(16, 16) + return FunctionSpace(mesh, element, degree) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("rank", [1, 2]) +def test_cross_mesh(V, rank): + mesh1 = UnitSquareMesh(5, 5) + mesh2 = V.mesh() + x, y = SpatialCoordinate(mesh1) + x1, y1 = SpatialCoordinate(mesh2) + + shape = V.ufl_function_space().value_shape + if len(shape) == 0: + fs_type = FunctionSpace + expr1 = x * x + y * y + expr2 = x1 * x1 + y1 * y1 + elif len(shape) == 1: + fs_type = partial(VectorFunctionSpace, dim=shape[0]) + expr1 = as_vector([x, y]) + expr2 = as_vector([x1, y1]) + elif len(shape) == 2: + fs_type = partial(TensorFunctionSpace, shape=shape) + expr1 = as_tensor([[x, x*y], [x*y, y]]) + expr2 = as_tensor([[x1, x1*y1], [x1*y1, y1]]) + else: + raise ValueError("Unsupported target space shape") + + V_source = fs_type(mesh1, "CG", 2) + f_source = Function(V_source).interpolate(expr1) + f_direct = Function(V).interpolate(expr2) + + Q = V.quadrature_space() + + if rank == 2: + # Assemble the operator + I1 = interpolate(TrialFunction(V_source), Q) # V_source x Q_target^* -> R + I2 = interpolate(TrialFunction(Q), V) # Q_target x V^* -> R + I_manual = assemble(action(I2, I1)) # V_source x V^* -> R + assert I_manual.arguments() == (TestFunction(V.dual()), TrialFunction(V_source)) + # Direct assembly + I_direct = assemble(interpolate(TrialFunction(V_source), V)) # V_source + assert I_direct.arguments() == (TestFunction(V.dual()), TrialFunction(V_source)) + assert mat_equals(I_manual, I_direct) + + f_interpolated_manual = assemble(action(I_manual, f_source)) + assert np.allclose(f_interpolated_manual.dat.data_ro, f_direct.dat.data_ro) + f_interpolated_direct = assemble(action(I_direct, f_source)) + assert np.allclose(f_interpolated_direct.dat.data_ro, f_direct.dat.data_ro) + elif rank == 1: + # Interp V_source -> Q + I1 = interpolate(f_source, Q) # SameMesh + f_quadrature = assemble(I1) + # Interp Q -> V + I2 = interpolate(f_quadrature, V) # CrossMesh + f_interpolated_manual = assemble(I2) + assert f_interpolated_manual.function_space() == V + assert np.allclose(f_interpolated_manual.dat.data_ro, f_direct.dat.data_ro) + + f_interpolated_direct = assemble(interpolate(f_source, V)) + assert f_interpolated_direct.function_space() == V + assert np.allclose(f_interpolated_direct.dat.data_ro, f_direct.dat.data_ro) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("rank", [0, 1, 2]) +def test_cross_mesh_adjoint(V, rank): + # Can already do Lagrange -> RT adjoint + # V^* -> Q^* -> V_target^* + name = V.ufl_element()._short_name + deg = V.ufl_element().degree() + if name in ["N1curl", "GLS", "RT"] and deg == 1: + exact = False + elif name in ["Regge", "HHJ"] and deg == 0: + exact = False + else: + exact = True + + mesh1 = UnitSquareMesh(2, 2) + x1 = SpatialCoordinate(mesh1) + V_target = fs_shape(V)(mesh1, "CG", 1) + + mesh2 = V.mesh() + x2 = SpatialCoordinate(mesh2) + + if len(V.value_shape) > 1: + expr = outer(x2, x2) + target_expr = outer(x1, x1) + if V.ufl_element().mapping() == "covariant contravariant Piola": + expr = dev(expr) + target_expr = dev(target_expr) + else: + expr = x2 + target_expr = x1 + + oneform_V = inner(expr, TestFunction(V)) * dx # V^* + cofunc_Vtarget_direct = assemble(inner(target_expr, TestFunction(V_target)) * dx) + + if exact: + def close(x, y): + if rank == 0: + return np.isclose(x, y) + else: + return np.allclose(x, y) + else: + def close(x, y): + return np.linalg.norm(x - y) < 0.003 + + Q = V.quadrature_space() + + if rank == 2: + # Assemble the operator + I1 = interpolate(TestFunction(Q), V) # V^* x Q -> R + I2 = interpolate(TestFunction(V_target), Q) # Q^* x V_target -> R + I_manual = assemble(action(I2, I1)) # V^* x V_target -> R + assert I_manual.arguments() == (TestFunction(V_target), TrialFunction(V.dual())) + # Direct assembly + I_direct = assemble(interpolate(TestFunction(V_target), V)) # V^* x V_target -> R + assert I_direct.arguments() == (TestFunction(V_target), TrialFunction(V.dual())) + assert mat_equals(I_manual, I_direct) + + cofunc_Vtarget_manual = assemble(action(I_manual, oneform_V)) + assert close(cofunc_Vtarget_manual.dat.data_ro, cofunc_Vtarget_direct.dat.data_ro) + + cofunc_Vtarget = assemble(action(I_direct, oneform_V)) + assert close(cofunc_Vtarget.dat.data_ro, cofunc_Vtarget_direct.dat.data_ro) + elif rank == 1: + # Interp V^* -> Q^* + I1_adj = interpolate(TestFunction(Q), oneform_V) # SameMesh + cofunc_Q = assemble(I1_adj) + + # Interp Q^* -> V_target^* + I2_adj = interpolate(TestFunction(V_target), cofunc_Q) # CrossMesh + cofunc_Vtarget_manual = assemble(I2_adj) + assert close(cofunc_Vtarget_manual.dat.data_ro, cofunc_Vtarget_direct.dat.data_ro) + + cofunc_Vtarget = assemble(interpolate(TestFunction(V_target), oneform_V)) # V^* -> V_target^* + assert close(cofunc_Vtarget.dat.data_ro, cofunc_Vtarget_direct.dat.data_ro) + elif rank == 0: + res = assemble(interpolate(target_expr, oneform_V)) + actual = assemble(inner(expr, expr) * dx) + assert close(res, actual) diff --git a/tests/firedrake/regression/test_eigensolver.py b/tests/firedrake/regression/test_eigensolver.py index f4065083bd..1f34a3e72d 100644 --- a/tests/firedrake/regression/test_eigensolver.py +++ b/tests/firedrake/regression/test_eigensolver.py @@ -20,10 +20,13 @@ def evals(n, degree=1, mesh=None, restrict=False): bc = DirichletBC(V, 0.0, "on_boundary") eigenprob = LinearEigenproblem(a, bcs=bc, bc_shift=-6666., restrict=restrict) - # Create corresponding eigensolver, looking for n eigenvalues - eigensolver = LinearEigensolver( - eigenprob, n, solver_parameters={"eps_largest_real": None} - ) + # Create corresponding eigensolver, looking for n eigenvalues close to 0 + # We use shift-and-invert as spectral transform (SLEPc's default is shift) + solver_parameters = { + "eps_target": 0, + "st_type": "sinvert", + } + eigensolver = LinearEigensolver(eigenprob, n, solver_parameters=solver_parameters) ncov = eigensolver.solve() # boffi solns @@ -67,8 +70,12 @@ def poisson_eigenvalue_2d(i): ep = LinearEigenproblem(inner(grad(u), grad(v)) * dx, bcs=bc, bc_shift=666.0) - es = LinearEigensolver(ep, 1, solver_parameters={"eps_gen_hermitian": None, - "eps_largest_real": None}) + solver_parameters = { + "eps_gen_hermitian": None, + "eps_target": 0, + "st_type": "sinvert", + } + es = LinearEigensolver(ep, 1, solver_parameters=solver_parameters) es.solve() return es.eigenvalue(0)-2.0 @@ -77,7 +84,7 @@ def poisson_eigenvalue_2d(i): @pytest.mark.skipslepc def test_evals_2d(): """2D Eigenvalue convergence test. As with Boffi, we observe that the - convergence rate convergest to 2 from above.""" + convergence rate converges to 2 from above.""" errors = np.array([poisson_eigenvalue_2d(i) for i in range(5)]) convergence = np.log(errors[:-1]/errors[1:])/np.log(2.0) diff --git a/tests/firedrake/regression/test_fdm.py b/tests/firedrake/regression/test_fdm.py index 947ae43fbc..872b5eeb9b 100644 --- a/tests/firedrake/regression/test_fdm.py +++ b/tests/firedrake/regression/test_fdm.py @@ -1,4 +1,5 @@ import pytest +import numpy from firedrake import * from pyop2.utils import as_tuple from firedrake.petsc import DEFAULT_DIRECT_SOLVER @@ -138,7 +139,7 @@ def variant(request): @pytest.mark.skipcomplex def test_p_independence_hgrad(mesh, variant): family = "Lagrange" - expected = [16, 12] if mesh.topological_dimension() == 3 else [9, 7] + expected = [16, 12] if mesh.topological_dimension == 3 else [9, 7] solvers = [fdmstar] if variant is None else [fdmstar, facetstar] for degree in range(3, 6): V = FunctionSpace(mesh, family, degree, variant=variant) @@ -150,8 +151,8 @@ def test_p_independence_hgrad(mesh, variant): @pytest.mark.skipmumps @pytest.mark.skipcomplex def test_p_independence_hcurl(mesh): - family = "NCE" if mesh.topological_dimension() == 3 else "RTCE" - expected = [13, 10] if mesh.topological_dimension() == 3 else [6, 6] + family = "NCE" if mesh.topological_dimension == 3 else "RTCE" + expected = [13, 10] if mesh.topological_dimension == 3 else [6, 6] solvers = [fdmstar, facetstar] for degree in range(3, 6): V = FunctionSpace(mesh, family, degree, variant="fdm") @@ -163,7 +164,7 @@ def test_p_independence_hcurl(mesh): @pytest.mark.skipmumps @pytest.mark.skipcomplex def test_p_independence_hdiv(mesh): - family = "NCF" if mesh.topological_dimension() == 3 else "RTCF" + family = "NCF" if mesh.topological_dimension == 3 else "RTCF" expected = [6, 6] solvers = [fdmstar, facetstar] for degree in range(3, 6): @@ -175,7 +176,7 @@ def test_p_independence_hdiv(mesh): @pytest.mark.skipcomplex def test_variable_coefficient(mesh): - gdim = mesh.geometric_dimension() + gdim = mesh.geometric_dimension k = 4 V = FunctionSpace(mesh, "Lagrange", k) u = TrialFunction(V) @@ -209,7 +210,7 @@ def test_variable_coefficient(mesh): ids=["cg", "dg", "rt"]) def fs(request, mesh): degree = 3 - tdim = mesh.topological_dimension() + tdim = mesh.topological_dimension element = request.param variant = "fdm_ipdg" if element == "rt": @@ -227,7 +228,7 @@ def fs(request, mesh): def test_ipdg_direct_solver(fs): mesh = fs.mesh() x = SpatialCoordinate(mesh) - gdim = mesh.geometric_dimension() + gdim = mesh.geometric_dimension ncomp = fs.value_size homogenize = gdim > 2 @@ -334,3 +335,73 @@ def test_ipdg_direct_solver(fs): assert uvec.norm() < 1E-8 else: assert norm(u_exact-uh, "H1") < 1.0E-8 + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize("mat_type", ("aij",)) +@pytest.mark.parametrize("variant,degree", [("spectral", 1), ("spectral", 4), ("integral", 4), ("fdm", 4)]) +def test_tabulate_gradient(mesh, variant, degree, mat_type): + from firedrake.preconditioners.fdm import tabulate_exterior_derivative + tdim = mesh.topological_dimension + family = {1: "DG", 2: "RTCE", 3: "NCE"}[tdim] + + V0 = FunctionSpace(mesh, "Lagrange", degree, variant=variant) + V1 = FunctionSpace(mesh, family, degree-(tdim == 1), variant=variant) + D = tabulate_exterior_derivative(V0, V1, mat_type=mat_type) + + M = assemble(inner(TrialFunction(V1), TestFunction(V1))*dx).petscmat + Dij = D if D.type.endswith("aij") else D.convert(M.type, PETSc.Mat()) + B = M.matMult(Dij) + + Bref = assemble(inner(grad(TrialFunction(V0)), TestFunction(V1))*dx).petscmat + Bref.axpy(-1, B) + _, _, vals = Bref.getValuesCSR() + assert numpy.allclose(vals, 0) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize("mat_type", ("aij",)) +@pytest.mark.parametrize("variant,degree", [("spectral", 1), ("spectral", 4), ("integral", 4), ("fdm", 4)]) +def test_tabulate_curl(mesh, variant, degree, mat_type): + from firedrake.preconditioners.fdm import tabulate_exterior_derivative + tdim = mesh.topological_dimension + family1 = {1: "CG", 2: "CG", 3: "NCE"}[tdim] + family2 = {1: "DG", 2: "RTCF", 3: "NCF"}[tdim] + + V1 = FunctionSpace(mesh, family1, degree, variant=variant) + V2 = FunctionSpace(mesh, family2, degree-(tdim == 1), variant=variant) + D = tabulate_exterior_derivative(V1, V2, mat_type=mat_type) + + M = assemble((-1)**(tdim-1)*inner(TrialFunction(V2), TestFunction(V2))*dx).petscmat + Dij = D if D.type.endswith("aij") else D.convert(M.type, PETSc.Mat()) + B = M.matMult(Dij) + + Bref = assemble(inner(curl(TrialFunction(V1)), TestFunction(V2))*dx).petscmat + Bref.axpy(-1, B) + _, _, vals = Bref.getValuesCSR() + assert numpy.allclose(vals, 0) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize("mat_type", ("aij", "is")) +@pytest.mark.parametrize("variant,degree", [("spectral", 1), ("spectral", 4), ("integral", 4), ("fdm", 4)]) +def test_tabulate_divergence(mesh, variant, degree, mat_type): + from firedrake.preconditioners.fdm import tabulate_exterior_derivative + tdim = mesh.topological_dimension + family = {1: "CG", 2: "RTCF", 3: "NCF"}[tdim] + + V = FunctionSpace(mesh, family, degree, variant=variant) + Q = FunctionSpace(mesh, "DG", 0, variant=f"integral({degree-1})") + D = tabulate_exterior_derivative(V, Q, mat_type=mat_type, allow_repeated=True) + + # Fix scale + Jdet = JacobianDeterminant(mesh) + M = assemble(inner(TrialFunction(Q)*(1/Jdet), TestFunction(Q))*dx, diagonal=True) + with M.dat.vec as mvec: + D.diagonalScale(mvec, None) + + Dref = assemble(inner(div(TrialFunction(V)), TestFunction(Q))*dx).petscmat + Dij = D if D.type.endswith("aij") else D.convert(Dref.type, PETSc.Mat()) + Dref.axpy(-1, Dij) + _, _, vals = Dref.getValuesCSR() + assert numpy.allclose(vals, 0) diff --git a/tests/firedrake/regression/test_function.py b/tests/firedrake/regression/test_function.py index 85c6cdc0a0..3ed44b0e04 100644 --- a/tests/firedrake/regression/test_function.py +++ b/tests/firedrake/regression/test_function.py @@ -81,22 +81,22 @@ def test_firedrake_tensor_function_nonstandard_shape(W_nonstandard_shape): def test_mismatching_rank_interpolation(V): f = Function(V) - with pytest.raises(RuntimeError): + with pytest.raises(ValueError): f.interpolate(Constant((1, 2))) VV = VectorFunctionSpace(V.mesh(), 'CG', 1) f = Function(VV) - with pytest.raises(RuntimeError): + with pytest.raises(ValueError): f.interpolate(Constant((1, 2))) VVV = TensorFunctionSpace(V.mesh(), 'CG', 1) f = Function(VVV) - with pytest.raises(RuntimeError): + with pytest.raises(ValueError): f.interpolate(Constant((1, 2))) def test_mismatching_shape_interpolation(V): VV = VectorFunctionSpace(V.mesh(), 'CG', 1) f = Function(VV) - with pytest.raises(RuntimeError): + with pytest.raises(ValueError): f.interpolate(Constant([1] * (VV.value_shape[0] + 1))) @@ -275,3 +275,53 @@ def test_function_riesz_representation_l2_dat_version(V): version = f.dat.dat_version _ = f.riesz_representation(riesz_map="l2") assert f.dat.dat_version == version + + +@pytest.mark.parallel(nprocs=2) +def test_function_assign_mixed_subset_3_quads_2_processes(): + # mesh + # rank 0: + # 4---12----6---15---(8)-(18)-(10) + # | | | | + # 11 0 13 1 (17) (2) (19) + # | | | | + # 3---14----5---16---(7)-(20)--(9) + # rank 1: + # (7)-(13)---3----9----5 + # | | | + # (12) (1) 8 0 10 + # | | | plex points + # (6)-(14)---2---11----4 () = ghost + left = 111 + right = 222 + mesh = RectangleMesh( + 3, 1, 3., 1., quadrilateral=True, distribution_parameters={"partitioner_type": "simple"}, + ) + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + f_l = Function(DG0).interpolate(conditional(x < 1.0, 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 2.0, 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r], [left, right]) + x = SpatialCoordinate(mesh) + CG1 = VectorFunctionSpace(mesh, "CG", 1) + CG3 = VectorFunctionSpace(mesh, "CG", 3) + V = CG1 * CG3 + cg1cg3 = Function(V) + cg1, cg3 = cg1cg3.subfunctions + cg1.interpolate(x) + cg3.interpolate(x) + # Include closure of the right-most cell. + subset_cg1_indices = np.where(cg1.dat.data_ro_with_halos[:, 0] > 1.999) + subset_cg3_indices = np.where(cg3.dat.data_ro_with_halos[:, 0] > 1.999) + subset_cg1 = op2.Subset(CG1.node_set, subset_cg1_indices) + subset_cg3 = op2.Subset(CG3.node_set, subset_cg3_indices) + subset = op2.MixedSet([subset_cg1, subset_cg3]) + f = Function(V) + c = Constant(7.) + f.assign(c) + f.assign(cg1cg3, subset=subset) + xx = as_vector([x[0], x[1], x[0], x[1]]) + e = sqrt(assemble(inner(f, f) * dx(left))) + assert abs(e - 14.) < 1.e-14 + e = sqrt(assemble(inner(f - xx, f - xx) * dx(right))) + assert abs(e) < 1.e-15 diff --git a/tests/firedrake/regression/test_function_spaces.py b/tests/firedrake/regression/test_function_spaces.py index 3445f8773a..491354df19 100644 --- a/tests/firedrake/regression/test_function_spaces.py +++ b/tests/firedrake/regression/test_function_spaces.py @@ -120,7 +120,7 @@ def test_function_space_variant(mesh, space): @pytest.mark.parametrize("modifier", - [BrokenElement, HDivElement, + [HDivElement, HCurlElement]) @pytest.mark.parametrize("element", [FiniteElement("CG", triangle, 1), @@ -249,8 +249,8 @@ def test_reconstruct_variant(family, dual): def test_reconstruct_mixed(fs, mesh, mesh2, dual): W1 = fs.dual() if dual else fs W2 = W1.reconstruct(mesh=mesh2) - assert W1.mesh() == mesh - assert W2.mesh() == mesh2 + assert W1.mesh().unique() == mesh + assert W2.mesh().unique() == mesh2 assert W1.ufl_element() == W2.ufl_element() for index, V in enumerate(W1): V1 = W1.sub(index) @@ -313,3 +313,46 @@ def test_reconstruct_sub_component(dg0, rt1, mesh, mesh2, dual): assert is_primal(V1.parent) == is_primal(V2.parent) != dual assert V1.parent.ufl_element() == V2.parent.ufl_element() assert V1.parent.index == V2.parent.index == index + + +@pytest.mark.parametrize("family", ("CG", "BDM", "DG")) +@pytest.mark.parametrize("shape", (0, 2, (2, 3)), ids=("0", "2", "(2,3)")) +def test_broken_space(mesh, shape, family): + """Check that FunctionSpace.broken_space returns the a + FunctionSpace with the correct element. + """ + kwargs = {"variant": "spectral"} if family == "DG" else {} + + elem = FiniteElement(family, mesh.ufl_cell(), 1, **kwargs) + + if not isinstance(shape, int): + make_element = lambda elem: TensorElement(elem, shape=shape) + elif shape > 0: + make_element = lambda elem: VectorElement(elem, dim=shape) + else: + make_element = lambda elem: elem + + fs = FunctionSpace(mesh, make_element(elem)) + broken = fs.broken_space() + expected = FunctionSpace(mesh, make_element(BrokenElement(elem))) + + assert broken == expected + + +def test_mixed_broken_space(mesh): + """Check that MixedFunctionSpace.broken_space returns the a + MixedFunctionSpace with the correct element. + """ + + mixed_elem = MixedElement([ + FiniteElement("CG", mesh.ufl_cell(), 1), + VectorElement("BDM", mesh.ufl_cell(), 2, dim=2), + TensorElement("DG", mesh.ufl_cell(), 1, shape=(2, 3), variant="spectral") + ]) + broken_elem = MixedElement([BrokenElement(elem) for elem in mixed_elem.sub_elements]) + + mfs = FunctionSpace(mesh, mixed_elem) + broken = mfs.broken_space() + expected = FunctionSpace(mesh, broken_elem) + + assert broken == expected diff --git a/tests/firedrake/regression/test_garbage.py b/tests/firedrake/regression/test_garbage.py index 546f64f907..0a71b24667 100644 --- a/tests/firedrake/regression/test_garbage.py +++ b/tests/firedrake/regression/test_garbage.py @@ -1,7 +1,13 @@ -from firedrake import * -from firedrake.petsc import garbage_cleanup +import gc +import re import pytest +from mpi4py import MPI +from pyop2.mpi import temp_internal_comm +from pytest_mpi.parallel_assert import parallel_assert + +from firedrake import * +from firedrake.petsc import garbage_cleanup @pytest.mark.parallel(2) @@ -16,3 +22,35 @@ def test_making_many_meshes_does_not_exhaust_comms(): # Clean up garbage after too garbage_cleanup(COMM_WORLD) + + +# perform this test last so it will catch anything leaking from earlier tests +@pytest.mark.order("last") +@pytest.mark.parallel(3) +def test_no_petsc_objects_on_private_comm(request, capfd): + """Check that PETSc objects are being created with the correct comm. + + If objects are being created using Firedrake's private communicator then + they will not be destroyed using `PETSc.garbage_cleanup`. + + """ + # Put ref cycle objects into the garbage + gc.collect() + + with temp_internal_comm(MPI.COMM_WORLD) as private_comm: + PETSc.garbage_view(private_comm) + captured = MPI.COMM_WORLD.bcast(capfd.readouterr().out) + + pattern = r"Rank \d+:: Total entries: (\d+)" + all_zero = True + nhits = 0 + for line in captured.splitlines(): + if match := re.fullmatch(pattern, line): + nhits += 1 + if match.groups()[0] != "0": + all_zero = False + parallel_assert(nhits == MPI.COMM_WORLD.size) + parallel_assert( + all_zero, + msg=f"Objects found on private communicator, got:\n{captured}" + ) diff --git a/tests/firedrake/regression/test_helmholtz.py b/tests/firedrake/regression/test_helmholtz.py index 6eeba6e71a..f9d25d4bbb 100644 --- a/tests/firedrake/regression/test_helmholtz.py +++ b/tests/firedrake/regression/test_helmholtz.py @@ -26,7 +26,7 @@ def helmholtz(r, quadrilateral=False, degree=2, mesh=None): mesh = UnitSquareMesh(2 ** r, 2 ** r, quadrilateral=quadrilateral) V = FunctionSpace(mesh, "CG", degree) # Define variational problem - dim = mesh.ufl_cell().topological_dimension() + dim = mesh.ufl_cell().topological_dimension lmbda = 1 u = TrialFunction(V) v = TestFunction(V) diff --git a/tests/firedrake/regression/test_helmholtz_sphere.py b/tests/firedrake/regression/test_helmholtz_sphere.py index 403db5afee..95b3a32d72 100644 --- a/tests/firedrake/regression/test_helmholtz_sphere.py +++ b/tests/firedrake/regression/test_helmholtz_sphere.py @@ -30,9 +30,9 @@ def run_helmholtz_mixed_sphere(MeshClass, r, meshd, eltd): m = MeshClass(refinement_level=r, degree=meshd) x = SpatialCoordinate(m) m.init_cell_orientations(x) - if m.ufl_cell().cellname() == "triangle": + if m.ufl_cell().cellname == "triangle": V = FunctionSpace(m, 'RT', eltd+1) - elif m.ufl_cell().cellname() == "quadrilateral": + elif m.ufl_cell().cellname == "quadrilateral": V = FunctionSpace(m, 'RTCF', eltd+1) Q = FunctionSpace(m, 'DG', eltd) W = V*Q diff --git a/tests/firedrake/regression/test_interior_facets.py b/tests/firedrake/regression/test_interior_facets.py index 0fc13f97e0..7cecc87ee1 100644 --- a/tests/firedrake/regression/test_interior_facets.py +++ b/tests/firedrake/regression/test_interior_facets.py @@ -156,4 +156,4 @@ def test_interior_facet_integration(circle_in_square_mesh): assert np.allclose(assemble(f*dS(2)), 2*pi, rtol=1e-2) assert np.allclose(assemble(f*dS), - assemble(f*dS(2)) + assemble(f*dS(unmarked))) + assemble(f*dS(2)) + assemble(f*dS(UNMARKED))) diff --git a/tests/firedrake/regression/test_interp_dual.py b/tests/firedrake/regression/test_interp_dual.py index 50e29b05cb..b0ce971a95 100644 --- a/tests/firedrake/regression/test_interp_dual.py +++ b/tests/firedrake/regression/test_interp_dual.py @@ -2,6 +2,7 @@ import numpy as np from firedrake import * from firedrake.utils import complex_mode +from firedrake.matrix import MatrixBase import ufl @@ -54,7 +55,7 @@ def test_assemble_interp_adjoint_tensor(mesh, V1, f1): def test_assemble_interp_operator(V2, f1): # Check type - If1 = Interpolate(f1, V2) + If1 = interpolate(f1, V2) assert isinstance(If1, ufl.Interpolate) # -- I(f1, V2) -- # @@ -89,7 +90,7 @@ def test_assemble_interp_matrix(V1, V2, f1): def test_assemble_interp_tlm(V1, V2, f1): # -- Action(I(v1, V2), f1) -- # v1 = TrialFunction(V1) - Iv1 = Interpolate(v1, V2) + Iv1 = interpolate(v1, V2) b = assemble(interpolate(f1, V2)) assembled_action_Iv1 = assemble(action(Iv1, f1)) @@ -99,7 +100,7 @@ def test_assemble_interp_tlm(V1, V2, f1): def test_assemble_interp_adjoint_matrix(V1, V2): # -- Adjoint(I(v1, V2)) -- # v1 = TrialFunction(V1) - Iv1 = Interpolate(v1, V2) + Iv1 = interpolate(v1, V2) v2 = TestFunction(V2) c2 = assemble(conj(v2) * dx) @@ -120,11 +121,11 @@ def test_assemble_interp_adjoint_matrix(V1, V2): def test_assemble_interp_adjoint_model(V1, V2): # -- Action(Adjoint(I(v1, v2)), fstar) -- # v1 = TrialFunction(V1) - Iv1 = Interpolate(v1, V2) + Iv1 = interpolate(v1, V2) fstar = Cofunction(V2.dual()) v = Argument(V1, 0) - Ivfstar = assemble(Interpolate(v, fstar)) + Ivfstar = assemble(interpolate(v, fstar)) # Action(Adjoint(I(v1, v2)), fstar) <=> I(v, fstar) res = assemble(action(adjoint(Iv1), fstar)) assert np.allclose(res.dat.data, Ivfstar.dat.data) @@ -150,9 +151,9 @@ def test_assemble_interp_rank0(V1, V2, f1): u2 = assemble(conj(v2) * dx) # Interpolate(f1, u2) <=> Action(Interpolate(f1, V2), u2) # a is rank 0 so assembling it produces a scalar. - a = assemble(Interpolate(f1, u2)) + a = assemble(interpolate(f1, u2)) # Compute numerically Action(Interpolate(f1, V2), u2) - b = assemble(Interpolate(f1, V2)) + b = assemble(interpolate(f1, V2)) with b.dat.vec_ro as x, u2.dat.vec_ro as y: res = x.dot(y) assert np.abs(a - res) < 1e-9 @@ -167,9 +168,9 @@ def test_assemble_base_form_operator_expressions(mesh): f2 = Function(V1).interpolate(sin(2*pi*y)) f3 = Function(V1).interpolate(cos(2*pi*x)) - If1 = Interpolate(f1, V2) - If2 = Interpolate(f2, V2) - If3 = Interpolate(f3, V2) + If1 = interpolate(f1, V2) + If2 = interpolate(f2, V2) + If3 = interpolate(f3, V2) # Sum of BaseFormOperators (1-form) res = assemble(If1 + If2 + If3) @@ -184,8 +185,8 @@ def test_assemble_base_form_operator_expressions(mesh): # Sum of BaseFormOperator (2-form) v1 = TrialFunction(V1) - Iv1 = Interpolate(v1, V2) - Iv2 = Interpolate(v1, V2) + Iv1 = interpolate(v1, V2) + Iv2 = interpolate(v1, V2) res = assemble(Iv1 + Iv2) mat_Iv1 = assemble(Iv1) mat_Iv2 = assemble(Iv2) @@ -210,7 +211,7 @@ def test_check_identity(mesh): V1 = FunctionSpace(mesh, "CG", 1) v2 = TestFunction(V2) v1 = TestFunction(V1) - a = assemble(Interpolate(v1, conj(v2)*dx)) + a = assemble(interpolate(v1, conj(v2)*dx)) b = assemble(conj(v1)*dx) assert np.allclose(a.dat.data, b.dat.data) @@ -234,7 +235,7 @@ def test_solve_interp_f(mesh): # -- Solution where the source term is interpolated via `ufl.Interpolate` u2 = Function(V1) - If = Interpolate(f1, V2) + If = interpolate(f1, V2) # This requires assembling If F2 = inner(grad(u2), grad(w))*dx + inner(u2, w)*dx - inner(If, w)*dx solve(F2 == 0, u2) @@ -267,7 +268,7 @@ def test_solve_interp_u(mesh): # -- Solution where u2 is interpolated via `ufl.Interpolate` (mat-free) u2 = Function(V1) # Iu is the identity - Iu = Interpolate(u2, V1) + Iu = interpolate(u2, V1) # This requires assembling the action the Jacobian of Iu F2 = inner(grad(u2), grad(w))*dx + inner(Iu, w)*dx - inner(f, w)*dx solve(F2 == 0, u2, solver_parameters={"mat_type": "matfree", @@ -278,7 +279,7 @@ def test_solve_interp_u(mesh): # Same problem with grad(Iu) instead of grad(Iu) u2 = Function(V1) # Iu is the identity - Iu = Interpolate(u2, V1) + Iu = interpolate(u2, V1) # This requires assembling the action the Jacobian of Iu F2 = inner(grad(Iu), grad(w))*dx + inner(Iu, w)*dx - inner(f, w)*dx solve(F2 == 0, u2, solver_parameters={"mat_type": "matfree", @@ -341,7 +342,7 @@ def test_interp_dual_mixed(source_space, target_space): expected = assemble(F_target) F_source = inner(b, v)*dx - I_source = Interpolate(expr, F_source) + I_source = interpolate(expr, F_source) c = Cofunction(W.dual()) c.assign(99) @@ -352,3 +353,45 @@ def test_interp_dual_mixed(source_space, target_space): assert result is tensor for x, y, in zip(result.subfunctions, expected.subfunctions): assert np.allclose(x.dat.data_ro, y.dat.data_ro) + + +def test_assemble_action_adjoint(V1, V2): + u = TrialFunction(V1) + + a = interpolate(u, V2) # V1 x V2^* -> R, equiv. V1 -> V2 + assert a.arguments() == (TestFunction(V2.dual()), TrialFunction(V1)) + + f_form = inner(1, TestFunction(V2)) * dx + + for f in (f_form, assemble(f_form)): + expr = action(adjoint(assemble(a)), f) + assert isinstance(expr, Action) + res = assemble(expr) + assert isinstance(res, Cofunction) + assert res.function_space() == V1.dual() + + expr2 = action(f, a) # This simplifies into an Interpolate + assert isinstance(expr2, Interpolate) + res2 = assemble(expr2) + assert isinstance(res2, Cofunction) + assert res2.function_space() == V1.dual() + assert np.allclose(res.dat.data, res2.dat.data) + + A = assemble(a) + assert isinstance(A, MatrixBase) + + # This doesn't explicitly assemble the adjoint of A, but uses multHermitian + expr3 = action(f, A) + assert isinstance(expr3, Action) + res3 = assemble(expr3) + assert isinstance(res3, Cofunction) + assert res3.function_space() == V1.dual() + assert np.allclose(res.dat.data, res3.dat.data) + + # This is simplified into action(f, A) to avoid explicit assembly of adjoint(A) + expr4 = action(adjoint(A), f) + assert isinstance(expr4, Action) + res4 = assemble(expr4) + assert isinstance(res4, Cofunction) + assert res4.function_space() == V1.dual() + assert np.allclose(res.dat.data, res4.dat.data) diff --git a/tests/firedrake/regression/test_interpolate.py b/tests/firedrake/regression/test_interpolate.py index a2a0d11618..66d733c57c 100644 --- a/tests/firedrake/regression/test_interpolate.py +++ b/tests/firedrake/regression/test_interpolate.py @@ -6,6 +6,13 @@ cwd = abspath(dirname(__file__)) +def mat_equals(a, b): + """Check that two Matrices are equal.""" + a = a.petscmat.copy() + a.axpy(-1.0, b.petscmat) + return a.norm(norm_type=PETSc.NormType.NORM_FROBENIUS) < 1e-14 + + def test_constant(): cg1 = FunctionSpace(UnitSquareMesh(5, 5), "CG", 1) f = assemble(interpolate(Constant(1.0), cg1)) @@ -190,6 +197,18 @@ def test_hcurl_extruded_interval(): assert np.allclose(u.dat.data, u_proj.dat.data) +def test_dpc_into_dq_extruded_interval(): + mesh = ExtrudedMesh(UnitIntervalMesh(10), 10, 0.1) + DPC = FunctionSpace(mesh, "DPC", 1) + DQ = FunctionSpace(mesh, "DQ", 1) + u1 = Function(DPC) + u2 = Function(DQ) + + u1.assign(1) + u2.interpolate(u1) + assert errornorm(u2, u1) < 1E-12 + + # Requires the relevant FInAT or FIAT duals to be defined @pytest.mark.xfail(raises=NotImplementedError, reason="Requires the relevant FInAT or FIAT duals to be defined") def test_hdiv_2d(): @@ -384,7 +403,8 @@ def test_adjoint_dg(): @pytest.mark.parametrize("degree", range(1, 4)) -def test_function_cofunction(degree): +@pytest.mark.parametrize("cofunc", [True, False]) +def test_zeroform(degree, cofunc): mesh = UnitSquareMesh(10, 10) Pkp1 = FunctionSpace(mesh, "CG", degree+1) Pk = FunctionSpace(mesh, "CG", degree) @@ -393,7 +413,9 @@ def test_function_cofunction(degree): x = SpatialCoordinate(mesh) f = assemble(interpolate(sin(2*pi*x[0])*sin(2*pi*x[1]), Pk)) - fhat = assemble(f*v1*dx) + fhat = f * v1 * dx + if cofunc: + fhat = assemble(fhat) norm_i = assemble(interpolate(f, fhat)) norm = assemble(f*f*dx) @@ -523,7 +545,8 @@ def test_interpolate_logical_not(): @pytest.mark.parametrize("mode", ("forward", "adjoint")) -def test_mixed_matrix(mode): +@pytest.mark.parametrize("mat_type", (None, "nest")) +def test_mixed_matrix(mode, mat_type): nx = 3 mesh = UnitSquareMesh(nx, nx) @@ -537,11 +560,11 @@ def test_mixed_matrix(mode): if mode == "forward": I = Interpolate(TrialFunction(Z), TestFunction(W.dual())) - a = assemble(I) + a = assemble(I, mat_type=mat_type) assert a.arguments()[0].function_space() == W.dual() assert a.arguments()[1].function_space() == Z assert a.petscmat.getSize() == (W.dim(), Z.dim()) - assert a.petscmat.getType() == "nest" + assert a.petscmat.getType() == (mat_type if mat_type else "seqaij") u = Function(Z) u.subfunctions[0].sub(0).assign(1) @@ -550,11 +573,11 @@ def test_mixed_matrix(mode): result_matfree = assemble(Interpolate(u, TestFunction(W.dual()))) elif mode == "adjoint": I = Interpolate(TestFunction(Z), TrialFunction(W.dual())) - a = assemble(I) + a = assemble(I, mat_type=mat_type) assert a.arguments()[1].function_space() == W.dual() assert a.arguments()[0].function_space() == Z assert a.petscmat.getSize() == (Z.dim(), W.dim()) - assert a.petscmat.getType() == "nest" + assert a.petscmat.getType() == (mat_type if mat_type else "seqaij") u = Function(W.dual()) u.subfunctions[0].assign(1) @@ -584,9 +607,12 @@ def test_interpolator_reuse(family, degree, mode): u = Function(V.dual()) expr = interpolate(TestFunction(V), u) - I = Interpolator(expr, V) + Iorig = get_interpolator(expr) for k in range(3): + I = get_interpolator(expr) + assert I is Iorig + u.assign(rg.uniform(u.function_space())) expected = u.dat.data.copy() @@ -620,3 +646,98 @@ def test_mixed_space_bcs(): expected = assemble(interpolate(sum(w), V), bcs=bcs[-1:]) assert np.allclose(result.dat.data, expected.dat.data) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("mode", ["forward", "adjoint"]) +def test_interpolate_composition(mode): + mesh = UnitSquareMesh(4, 4) + x, y = SpatialCoordinate(mesh) + + V5 = FunctionSpace(mesh, "CG", 5) + V4 = FunctionSpace(mesh, "CG", 4) + V3 = FunctionSpace(mesh, "CG", 3) + V2 = FunctionSpace(mesh, "CG", 2) + V1 = FunctionSpace(mesh, "CG", 1) + + if mode == "forward": + u5 = Function(V5).interpolate(sin(x + y)) + u4 = interpolate(u5, V4) + u3 = interpolate(u4, V3) + u2 = interpolate(u3, V2) + u1 = interpolate(u2, V1) + + assert u1.function_space() == V1 + + res = assemble(u1) + res2 = assemble(interpolate(sin(x + y), V1)) + assert np.allclose(res.dat.data_ro, res2.dat.data_ro) + + if mode == "adjoint": + u1 = conj(TestFunction(V1)) * dx + u2 = interpolate(TestFunction(V2), u1) + u3 = interpolate(TestFunction(V3), u2) + u4 = interpolate(TestFunction(V4), u3) + u5 = interpolate(TestFunction(V5), u4) + + assert u5.function_space() == V5.dual() + + res_adj = assemble(u5) + res_adj2 = assemble(interpolate(TestFunction(V5), conj(TestFunction(V1)) * dx)) + assert np.allclose(res_adj.dat.data_ro, res_adj2.dat.data_ro) + + +@pytest.mark.parallel([1, 3]) +def test_interpolate_form(): + mesh = UnitSquareMesh(5, 5) + V3 = FunctionSpace(mesh, "CG", 3) + V2 = FunctionSpace(mesh, "CG", 2) + V1 = FunctionSpace(mesh, "CG", 1) + + V3_trial = TrialFunction(V3) + V2_test = TestFunction(V2) + V1_test = TestFunction(V1) + V2_dual_trial = TrialFunction(V2.dual()) + + two_form = inner(V3_trial, V2_test) * dx # V3 x V2 -> R, equiv V3 -> V2^* + interp = interpolate(V1_test, two_form) # V3 x V1 -> R, equiv V3 -> V1^* + assert interp.arguments() == (V1_test, V3_trial) + res1 = assemble(interp) + + I = interpolate(V1_test, V2_dual_trial) # V2^* x V1 -> R, equiv V2^* -> V1^* + interp2 = action(I, two_form) # V3 -> V1^* + assert interp2.arguments() == (V1_test, V3_trial) + res2 = assemble(interp2) + assert mat_equals(res1, res2) + + res3 = assemble(inner(V3_trial, V1_test) * dx) # V3 x V1 -> R + assert mat_equals(res1, res3) + + +@pytest.mark.parallel([1, 3]) +def test_interpolate_form_mixed(): + mesh = UnitSquareMesh(3, 3) + V1 = FunctionSpace(mesh, "CG", 1) + V2 = FunctionSpace(mesh, "CG", 2) + V3 = FunctionSpace(mesh, "CG", 3) + V4 = FunctionSpace(mesh, "CG", 4) + V = V3 * V4 + W = V1 * V2 + + u = TrialFunction(V) + v = TestFunction(V) + q = TestFunction(W) + + form = inner(u, v) * dx # V x V -> R, equiv V -> V^* + interp = interpolate(q, form) # V -> W^*, equiv V x W -> R + assert interp.arguments() == (q, u) + res1 = assemble(interp) + + I = interpolate(q, TrialFunction(V.dual())) # V^* x W -> R, equiv V^* -> W^* + interp2 = action(I, form) # V -> W^* + assert interp2.arguments() == (q, u) + res2 = assemble(interp2) + assert mat_equals(res1, res2) + + res3 = assemble(inner(u, q) * dx) # V x W -> R + assert mat_equals(res1, res3) diff --git a/tests/firedrake/regression/test_interpolate_cross_mesh.py b/tests/firedrake/regression/test_interpolate_cross_mesh.py index 311b0e2568..a04e3edbc0 100644 --- a/tests/firedrake/regression/test_interpolate_cross_mesh.py +++ b/tests/firedrake/regression/test_interpolate_cross_mesh.py @@ -1,10 +1,11 @@ from firedrake import * from firedrake.petsc import DEFAULT_PARTITIONER from firedrake.ufl_expr import extract_unique_domain +from firedrake.mesh import Mesh, plex_from_cell_list +from firedrake.formmanipulation import split_form import numpy as np import pytest from ufl import product -import subprocess def allgather(comm, coords): @@ -15,9 +16,9 @@ def allgather(comm, coords): return coords -def unitsquaresetup(): +def unitsquaresetup(dest_quad=True): m_src = UnitSquareMesh(2, 3) - m_dest = UnitSquareMesh(3, 5, quadrilateral=True) + m_dest = UnitSquareMesh(3, 5, quadrilateral=dest_quad) coords = np.array( [[0.56, 0.6], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9], [0.726, 0.6584]] ) # fairly arbitrary @@ -47,14 +48,7 @@ def make_high_order(m_low_order, degree): "unitsquare_vfs", "unitsquare_tfs", "unitsquare_N1curl_source", - pytest.param( - "unitsquare_SminusDiv_destination", - marks=pytest.mark.xfail( - # CalledProcessError is so the parallel tests correctly xfail - raises=(subprocess.CalledProcessError, NotImplementedError), - reason="Can only interpolate into spaces with point evaluation nodes", - ), - ), + "unitsquare_RT_N1curl_destination", "unitsquare_Regge_source", # This test fails in complex mode pytest.param("spheresphere", marks=pytest.mark.skipcomplex), @@ -186,14 +180,14 @@ def parameters(request): V_src = FunctionSpace(m_src, "N1curl", 2) # Not point evaluation nodes V_dest = VectorFunctionSpace(m_dest, "CG", 4) V_dest_2 = VectorFunctionSpace(m_dest, "DQ", 2) - elif request.param == "unitsquare_SminusDiv_destination": - m_src, m_dest, coords = unitsquaresetup() + elif request.param == "unitsquare_RT_N1curl_destination": + m_src, m_dest, coords = unitsquaresetup(dest_quad=False) expr_src = 2 * SpatialCoordinate(m_src) expr_dest = 2 * SpatialCoordinate(m_dest) expected = 2 * coords V_src = VectorFunctionSpace(m_src, "CG", 2) - V_dest = FunctionSpace(m_dest, "SminusDiv", 2) # Not point evaluation nodes - V_dest_2 = FunctionSpace(m_dest, "SminusCurl", 2) # Not point evaluation nodes + V_dest = FunctionSpace(m_dest, "RT", 2) # Not point evaluation nodes + V_dest_2 = FunctionSpace(m_dest, "N1curl", 2) # Not point evaluation nodes elif request.param == "unitsquare_Regge_source": m_src, m_dest, coords = unitsquaresetup() expr_src = outer(SpatialCoordinate(m_src), SpatialCoordinate(m_src)) @@ -339,12 +333,18 @@ def test_exact_refinement(): expr_in_V_fine = x**2 + y**2 + 1 f_fine = Function(V_fine).interpolate(expr_in_V_fine) + # Build interpolation matrices in both directions + coarse_to_fine = assemble(interpolate(TrialFunction(V_coarse), V_fine)) + coarse_to_fine_adjoint = assemble(interpolate(TestFunction(V_coarse), TrialFunction(V_fine.dual()))) + # If we now interpolate f_coarse into V_fine we should get a function # which has no interpolation error versus f_fine because we were able to # exactly represent expr_in_V_coarse in V_coarse and V_coarse is a subset # of V_fine f_coarse_on_fine = assemble(interpolate(f_coarse, V_fine)) assert np.allclose(f_coarse_on_fine.dat.data_ro, f_fine.dat.data_ro) + f_coarse_on_fine_mat = assemble(coarse_to_fine @ f_coarse) + assert np.allclose(f_coarse_on_fine_mat.dat.data_ro, f_fine.dat.data_ro) # Adjoint interpolation takes us from V_fine^* to V_coarse^* so we should # also get an exact result here. @@ -354,6 +354,10 @@ def test_exact_refinement(): assert np.allclose( cofunction_fine_on_coarse.dat.data_ro, cofunction_coarse.dat.data_ro ) + cofunction_fine_on_coarse_mat = assemble(action(coarse_to_fine_adjoint, cofunction_fine)) + assert np.allclose( + cofunction_fine_on_coarse_mat.dat.data_ro, cofunction_coarse.dat.data_ro + ) # Now we test with expressions which are NOT exactly representable in the # function spaces by introducing a cube term. This can't be represented @@ -406,32 +410,6 @@ def test_interpolate_unitsquare_tfs_shape(shape, symmetry): assemble(interpolate(f_src, V_dest)) -def test_interpolate_cross_mesh_not_point_eval(): - m_src = UnitSquareMesh(2, 3) - m_dest = UnitSquareMesh(3, 5, quadrilateral=True) - coords = np.array( - [[0.56, 0.6], [0.1, 0.9], [0.9, 0.1], [0.9, 0.9], [0.726, 0.6584]] - ) # fairly arbitrary - # add the coordinates of the mesh vertices to test boundaries - vertices_src = allgather(m_src.comm, m_src.coordinates.dat.data_ro) - coords = np.concatenate((coords, vertices_src)) - vertices_dest = allgather(m_dest.comm, m_dest.coordinates.dat.data_ro) - coords = np.concatenate((coords, vertices_dest)) - dest_eval = PointEvaluator(m_dest, coords) - expr_src = 2 * SpatialCoordinate(m_src) - expr_dest = 2 * SpatialCoordinate(m_dest) - expected = 2 * coords - V_src = FunctionSpace(m_src, "RT", 2) - V_dest = FunctionSpace(m_dest, "RTCE", 2) - atol = 1e-8 # default - # This might not make much mathematical sense, but it should test if we get - # the not implemented error for non-point evaluation nodes! - with pytest.raises(NotImplementedError): - interpolate_function( - m_src, m_dest, V_src, V_dest, dest_eval, expected, expr_src, expr_dest, atol - ) - - def interpolate_function( m_src, m_dest, V_src, V_dest, dest_eval, expected, expr_src, expr_dest, atol ): @@ -551,7 +529,7 @@ def test_missing_dofs(): V_src = FunctionSpace(m_src, "CG", 2) V_dest = FunctionSpace(m_dest, "CG", 3) with pytest.raises(DofNotDefinedError): - Interpolator(TestFunction(V_src), V_dest) + assemble(interpolate(TrialFunction(V_src), V_dest)) f_src = Function(V_src).interpolate(expr) f_dest = assemble(interpolate(f_src, V_dest, allow_missing_dofs=True)) dest_eval = PointEvaluator(m_dest, coords) @@ -604,8 +582,8 @@ def test_line_integral(): # Create a 1D line mesh in 2D from (0, 0) to (1, 1) with 1 cell cells = np.asarray([[0, 1]]) vertex_coords = np.asarray([[0.0, 0.0], [1.0, 1.0]]) - plex = mesh.plex_from_cell_list(1, cells, vertex_coords, comm=m.comm) - line = mesh.Mesh(plex, dim=2) + plex = plex_from_cell_list(1, cells, vertex_coords, comm=m.comm) + line = Mesh(plex, dim=2) x, y = SpatialCoordinate(line) V_line = FunctionSpace(line, "CG", 2) f_line = Function(V_line).interpolate(x * y) @@ -614,8 +592,8 @@ def test_line_integral(): # Create a 1D line around the unit square (2D) with 4 cells cells = np.asarray([[0, 1], [1, 2], [2, 3], [3, 0]]) vertex_coords = np.asarray([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) - plex = mesh.plex_from_cell_list(1, cells, vertex_coords, comm=m.comm) - line_square = mesh.Mesh(plex, dim=2) + plex = plex_from_cell_list(1, cells, vertex_coords, comm=m.comm) + line_square = Mesh(plex, dim=2) x, y = SpatialCoordinate(line_square) V_line_square = FunctionSpace(line_square, "CG", 2) f_line_square = Function(V_line_square).interpolate(x * y) @@ -657,9 +635,9 @@ def test_interpolate_matrix_cross_mesh(): f_at_points2 = assemble(interpolate(f, P0DG)) assert np.allclose(f_at_points.dat.data_ro, f_at_points2.dat.data_ro) # To get the points in the correct order in V we interpolate into vom.input_ordering - # We pass matfree=False which constructs the permutation matrix instead of using SFs + # We pass mat_type='aij' which constructs the permutation matrix instead of using SFs P0DG_io = FunctionSpace(vom.input_ordering, "DG", 0) - B = assemble(interpolate(TrialFunction(P0DG), P0DG_io, matfree=False)) + B = assemble(interpolate(TrialFunction(P0DG), P0DG_io), mat_type='aij') f_at_points_correct_order = assemble(B @ f_at_points) f_at_points_correct_order2 = assemble(interpolate(f_at_points, P0DG_io)) assert np.allclose(f_at_points_correct_order.dat.data_ro, f_at_points_correct_order2.dat.data_ro) @@ -681,6 +659,32 @@ def test_interpolate_matrix_cross_mesh(): f_interp2.dat.data_wo[:] = f_at_points_correct_order3.dat.data_ro[:] assert np.allclose(f_interp2.dat.data_ro, g.dat.data_ro) + interp_mat2 = assemble(interpolate(TrialFunction(U), V)) + assert interp_mat2.arguments() == (TestFunction(V.dual()), TrialFunction(U)) + f_interp3 = assemble(interp_mat2 @ f) + assert f_interp3.function_space() == V + assert np.allclose(f_interp3.dat.data_ro, g.dat.data_ro) + + +@pytest.mark.parallel([1, 3]) +def test_interpolate_matrix_cross_mesh_adjoint(): + mesh_fine = UnitSquareMesh(4, 4) + mesh_coarse = UnitSquareMesh(2, 2) + + V_coarse = FunctionSpace(mesh_coarse, "CG", 1) + V_fine = FunctionSpace(mesh_fine, "CG", 1) + + cofunc_fine = assemble(conj(TestFunction(V_fine)) * dx) + + interp = assemble(interpolate(TestFunction(V_coarse), TrialFunction(V_fine.dual()))) + cofunc_coarse = assemble(Action(interp, cofunc_fine)) + assert interp.arguments() == (TestFunction(V_coarse), TrialFunction(V_fine.dual())) + assert cofunc_coarse.function_space() == V_coarse.dual() + + # Compare cofunc_fine with direct interpolation + cofunc_coarse_direct = assemble(conj(TestFunction(V_coarse)) * dx) + assert np.allclose(cofunc_coarse.dat.data_ro, cofunc_coarse_direct.dat.data_ro) + @pytest.mark.parallel([2, 3, 4]) def test_voting_algorithm_edgecases(): @@ -715,3 +719,40 @@ def test_interpolate_cross_mesh_interval(periodic): f_dest = Function(V_dest).interpolate(f_src) x_dest, = SpatialCoordinate(m_dest) assert abs(assemble((f_dest - (-(x_dest - .5) ** 2)) ** 2 * dx)) < 1.e-16 + + +def test_mixed_interpolator_cross_mesh(): + # Tests assembly of mixed interpolator across meshes + mesh1 = UnitSquareMesh(4, 4) + mesh2 = UnitSquareMesh(3, 3, quadrilateral=True) + mesh3 = UnitDiskMesh(2) + mesh4 = UnitTriangleMesh(3) + V1 = FunctionSpace(mesh1, "CG", 1) + V2 = FunctionSpace(mesh2, "CG", 2) + V3 = FunctionSpace(mesh3, "CG", 3) + V4 = FunctionSpace(mesh4, "CG", 4) + + W = V1 * V2 + U = V3 * V4 + + w = TrialFunction(W) + w0, w1 = split(w) + expr = as_vector([w0 + w1, w0 + w1]) + mixed_interp = interpolate(expr, U, allow_missing_dofs=True) # Interpolating from W to U + + # The block matrix structure is + # | V1 -> V3 V2 -> V3 | + # | V1 -> V4 V2 -> V4 | + + res = assemble(mixed_interp, mat_type="nest") + assert isinstance(res, AssembledMatrix) + assert res.petscmat.type == "nest" + + split_interp = dict(split_form(mixed_interp)) + + for i in range(2): + for j in range(2): + interp_ij = split_interp[(i, j)] + assert isinstance(interp_ij, Interpolate) + res_block = assemble(interpolate(TrialFunction(W.sub(j)), U.sub(i), allow_missing_dofs=True)) + assert np.allclose(res.petscmat.getNestSubMatrix(i, j)[:, :], res_block.petscmat[:, :]) diff --git a/tests/firedrake/regression/test_interpolate_vs_project.py b/tests/firedrake/regression/test_interpolate_vs_project.py index 3a0fcc571f..41d335d39a 100644 --- a/tests/firedrake/regression/test_interpolate_vs_project.py +++ b/tests/firedrake/regression/test_interpolate_vs_project.py @@ -30,7 +30,7 @@ def V(request, mesh): def test_interpolate_vs_project(V): mesh = V.mesh() - dim = mesh.geometric_dimension() + dim = mesh.geometric_dimension if dim == 2: x, y = SpatialCoordinate(mesh) elif dim == 3: diff --git a/tests/firedrake/regression/test_interpolate_zany.py b/tests/firedrake/regression/test_interpolate_zany.py index b2054843cd..174453d3fe 100644 --- a/tests/firedrake/regression/test_interpolate_zany.py +++ b/tests/firedrake/regression/test_interpolate_zany.py @@ -1,5 +1,6 @@ import numpy import pytest +import ufl from firedrake import * @@ -92,7 +93,7 @@ def expr_at_vom(V, which, vom): P0 = VectorFunctionSpace(vom, "DG", 0) fvom = Function(P0) - point = Constant([0] * mesh.geometric_dimension()) + point = Constant([0] * mesh.geometric_dimension) expr_at_pt = ufl.replace(expr, {SpatialCoordinate(mesh): point}) for i, pt in enumerate(vom.coordinates.dat.data_ro): point.assign(pt) @@ -117,7 +118,7 @@ def test_interpolate_zany_into_vom(V, mesh, which, expr_at_vom): P0 = expr_at_vom.function_space() # Interpolate a Function into P0(vom) - f_at_vom = assemble(Interpolate(fexpr, P0)) + f_at_vom = assemble(interpolate(fexpr, P0)) assert numpy.allclose(f_at_vom.dat.data_ro, expr_at_vom.dat.data_ro) # Construct a Cofunction on P0(vom)* @@ -125,10 +126,26 @@ def test_interpolate_zany_into_vom(V, mesh, which, expr_at_vom): expected_action = assemble(action(Fvom, expr_at_vom)) # Interpolate a Function into Fvom - f_at_vom = assemble(Interpolate(fexpr, Fvom)) + f_at_vom = assemble(interpolate(fexpr, Fvom)) assert numpy.allclose(f_at_vom, expected_action) # Interpolate a TestFunction into Fvom - expr_vom = assemble(Interpolate(vexpr, Fvom)) + expr_vom = assemble(interpolate(vexpr, Fvom)) f_at_vom = assemble(action(expr_vom, f)) assert numpy.allclose(f_at_vom, expected_action) + + +@pytest.mark.parametrize("family,degree", [("Bernardi-Raugel", 1)]) +def test_interpolate_into_zany_piola_mapped(mesh, family, degree): + V = FunctionSpace(mesh, family, degree) + + CG = VectorFunctionSpace(mesh, "CG", 1) + RT = FunctionSpace(mesh, "RT", 1) + + x = SpatialCoordinate(mesh) + u1 = Function(CG).interpolate(x) + u2 = Function(RT).interpolate(x) + + for source in (x, u1, u2): + u = assemble(interpolate(source, V)) + assert errornorm(source, u) < 1E-12 diff --git a/tests/firedrake/regression/test_interpolation_manual.py b/tests/firedrake/regression/test_interpolation_manual.py index cf607a3426..cedaee54be 100644 --- a/tests/firedrake/regression/test_interpolation_manual.py +++ b/tests/firedrake/regression/test_interpolation_manual.py @@ -1,4 +1,5 @@ from firedrake import * +from firedrake.formmanipulation import split_form import pytest import numpy as np @@ -9,10 +10,10 @@ def test_interpolate_operator(): x, y = SpatialCoordinate(mesh) expression = x * y # [test_interpolate_operator 1] - # create a UFL expression for the interpolation operation. + # create a symbolic expression for the interpolation operation. f_i = interpolate(expression, V) - # numerically evaluate the interpolation to create a new Function + # assemble the interpolation to get the result f = assemble(f_i) # [test_interpolate_operator 2] assert isinstance(f, Function) @@ -40,6 +41,35 @@ def test_interpolate_operator(): f = assemble(interpolate(expression, trace)) # [test_interpolate_operator 10] + U = FunctionSpace(mesh, "CG", 3) + g = Function(U).interpolate(expression) + # [test_interpolate_operator 11] + A = assemble(interpolate(TrialFunction(U), V)) + # [test_interpolate_operator 12] + h = assemble(A @ g) + # [test_interpolate_operator 13] + assert np.allclose(h.dat.data_ro, f2.dat.data_ro) + + # [test_interpolate_operator 14] + Istar1 = interpolate(TestFunction(U), TrialFunction(V.dual())) + # [test_interpolate_operator 15] + Istar2 = adjoint(interpolate(TrialFunction(U), V)) + # [test_interpolate_operator 16] + cofunc = assemble(inner(1, TestFunction(V)) * dx) # a cofunction in V* + res1 = assemble(interpolate(TestFunction(U), cofunc)) # a cofunction in U* + # [test_interpolate_operator 17] + res2 = assemble(action(Istar1, cofunc)) # same as res1 + # [test_interpolate_operator 18] + u = Function(U) + # [test_interpolate_operator 19] + interpolate(u, cofunc) + # [test_interpolate_operator 20] + + res3 = assemble(action(Istar2, cofunc)) # same as res1 + assert isinstance(res1, Cofunction) + assert np.allclose(res1.dat.data_ro, res2.dat.data_ro) + assert np.allclose(res1.dat.data_ro, res3.dat.data_ro) + def test_interpolate_external(): m = UnitSquareMesh(2, 2) @@ -72,6 +102,7 @@ def mydata(points): def test_line_integral(): # [test_line_integral 1] + from firedrake.mesh import Mesh, plex_from_cell_list # Start with a simple field exactly represented in the function space over # the unit square domain. m = UnitSquareMesh(2, 2) @@ -83,8 +114,8 @@ def test_line_integral(): # Note that it only has 1 cell cells = np.asarray([[0, 1]]) vertex_coords = np.asarray([[0.0, 0.0], [1.0, 1.0]]) - plex = mesh.plex_from_cell_list(1, cells, vertex_coords, comm=m.comm) - line = mesh.Mesh(plex, dim=2) + plex = plex_from_cell_list(1, cells, vertex_coords, comm=m.comm) + line = Mesh(plex, dim=2) # [test_line_integral 2] x, y = SpatialCoordinate(line) V_line = FunctionSpace(line, "CG", 2) @@ -206,3 +237,50 @@ def correct_indent(): assert np.isclose(dest_eval05.evaluate(f_dest), 0.5) # x_src^2 + y_src^2 = 0.5 assert np.isclose(dest_eval15.evaluate(f_dest), 3.0) # x_dest + y_dest = 3.0 + + +def test_mixed_space_interpolation(): + mesh = UnitSquareMesh(2, 2) + V1 = FunctionSpace(mesh, "CG", 1) + V2 = FunctionSpace(mesh, "CG", 2) + V3 = FunctionSpace(mesh, "CG", 3) + V4 = FunctionSpace(mesh, "CG", 4) + W = V1 * V2 + U = V3 * V4 + + # [test_mixed_space_interpolation 1] + interp = interpolate(TrialFunction(U), W) + I = assemble(interp, mat_type="nest") + # [test_mixed_space_interpolation 2] + + # The block matrix structure is + # | V3 -> V1 0 | + # | 0 V4 -> V2 | + for i in range(2): + for j in range(2): + sub_mat = I.petscmat.getNestSubMatrix(i, j) + if i != j: + assert not sub_mat + continue + else: + res_block = assemble(interpolate(TrialFunction(U.sub(j)), W.sub(i))) + assert np.allclose(sub_mat[:, :], res_block.petscmat[:, :]) + assert sub_mat.type == "seqaij" + + # [test_mixed_space_interpolation 3] + u0, u1 = TrialFunctions(U) + expr = as_vector([u0 + u1, u0 + u1]) + interp = interpolate(expr, W) + I2 = assemble(interp, mat_type="nest") + # [test_mixed_space_interpolation 4] + + # The block matrix structure is + # | V3 -> V1 V4 -> V1 | + # | V3 -> V2 V4 -> V2 | + split_interp = dict(split_form(interp)) + for i in range(2): + for j in range(2): + interp_ij = split_interp[(i, j)] + assert isinstance(interp_ij, Interpolate) + res_block = assemble(interpolate(TrialFunction(U.sub(j)), W.sub(i), allow_missing_dofs=True)) + assert np.allclose(I2.petscmat.getNestSubMatrix(i, j)[:, :], res_block.petscmat[:, :]) diff --git a/tests/firedrake/regression/test_interpolation_nodes.py b/tests/firedrake/regression/test_interpolation_nodes.py index 13b52aa193..581bd69df6 100644 --- a/tests/firedrake/regression/test_interpolation_nodes.py +++ b/tests/firedrake/regression/test_interpolation_nodes.py @@ -40,7 +40,7 @@ def V(request, mesh, degree): def test_div_curl_preserving(V): mesh = V.mesh() - dim = mesh.geometric_dimension() + dim = mesh.geometric_dimension if dim == 2: x, y = SpatialCoordinate(mesh) elif dim == 3: @@ -66,7 +66,7 @@ def test_div_curl_preserving(V): def compute_interpolation_error(baseMesh, nref, space, degree): mh = MeshHierarchy(baseMesh, nref) - dim = mh[0].geometric_dimension() + dim = mh[0].geometric_dimension error = np.zeros((nref+1, 2)) for i, mesh in enumerate(mh): diff --git a/tests/firedrake/regression/test_interpolator_types.py b/tests/firedrake/regression/test_interpolator_types.py new file mode 100644 index 0000000000..c77900b781 --- /dev/null +++ b/tests/firedrake/regression/test_interpolator_types.py @@ -0,0 +1,173 @@ +from firedrake import * +from firedrake.interpolation import ( + MixedInterpolator, SameMeshInterpolator, CrossMeshInterpolator, + get_interpolator, VomOntoVomInterpolator, +) +import pytest + + +def params(): + params = [] + for mat_type in [None, "aij"]: + params.append(pytest.param(mat_type, None, id=f"mat_type={mat_type}")) + for sub_mat_type in [None, "aij", "baij"]: + params.append(pytest.param("nest", sub_mat_type, id=f"nest_sub_mat_type={sub_mat_type}")) + return params + + +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("value_shape", ["scalar", "vector"], ids=lambda v: f"fs_type={v}") +@pytest.mark.parametrize("mat_type", [None, "aij", "baij"], ids=lambda v: f"mat_type={v}") +def test_same_mesh_mattype(value_shape, mat_type): + if COMM_WORLD.size > 1: + prefix = "mpi" + else: + prefix = "seq" + mesh = UnitSquareMesh(4, 4) + if value_shape == "scalar": + fs_type = FunctionSpace + else: + fs_type = VectorFunctionSpace + V1 = fs_type(mesh, "CG", 1) + V2 = fs_type(mesh, "CG", 2) + + u = TrialFunction(V1) + + interp = interpolate(u, V2) + assert isinstance(get_interpolator(interp), SameMeshInterpolator) + res = assemble(interp, mat_type=mat_type) + + if value_shape == "scalar": + # Always seqaij for scalar + assert res.petscmat.type == prefix + "aij" + else: + # Defaults to seqaij + assert res.petscmat.type == prefix + (mat_type if mat_type else "aij") + + with pytest.raises(NotImplementedError): + # MatNest only implemented for interpolation between MixedFunctionSpaces + assemble(interp, mat_type="nest") + + +@pytest.mark.parametrize("value_shape", ["scalar", "vector"], ids=lambda v: f"fs_type={v}") +@pytest.mark.parametrize("mat_type", [None, "aij"], ids=lambda v: f"mat_type={v}") +def test_cross_mesh_mattype(value_shape, mat_type): + mesh1 = UnitSquareMesh(1, 1) + mesh2 = UnitSquareMesh(1, 1) + if value_shape == "scalar": + fs_type = FunctionSpace + else: + fs_type = VectorFunctionSpace + V1 = fs_type(mesh1, "CG", 1) + V2 = fs_type(mesh2, "CG", 1) + + u = TrialFunction(V1) + + interp = interpolate(u, V2) + assert isinstance(get_interpolator(interp), CrossMeshInterpolator) + res = assemble(interp, mat_type=mat_type) + + # only aij for cross-mesh + assert res.petscmat.type == "seqaij" + + +@pytest.mark.parametrize("value_shape", ["scalar", "vector"], ids=lambda v: f"fs_type={v}") +@pytest.mark.parametrize("mat_type", [None, "aij", "baij", "matfree"], ids=lambda v: f"mat_type={v}") +def test_vomtovom_mattype(value_shape, mat_type): + mesh = UnitSquareMesh(1, 1) + points = [[0.1, 0.1]] + vom = VertexOnlyMesh(mesh, points) + if value_shape == "scalar": + fs_type = FunctionSpace + else: + fs_type = VectorFunctionSpace + P0DG = fs_type(vom, "DG", 0) + P0DG_io = fs_type(vom.input_ordering, "DG", 0) + + u = TrialFunction(P0DG) + interp = interpolate(u, P0DG_io) + assert isinstance(get_interpolator(interp), VomOntoVomInterpolator) + res = assemble(interp, mat_type=mat_type) + if not mat_type or mat_type == "matfree": + assert res.petscmat.type == "python" + else: + if value_shape == "scalar": + # Always seqaij for scalar + assert res.petscmat.type == "seqaij" + else: + # Defaults to seqaij + assert res.petscmat.type == "seq" + (mat_type if mat_type else "aij") + + +@pytest.mark.parametrize("value_shape", ["scalar", "vector"], ids=lambda v: f"fs_type={v}") +@pytest.mark.parametrize("mat_type", [None, "aij", "baij"], ids=lambda v: f"mat_type={v}") +def test_point_eval_mattype(value_shape, mat_type): + mesh = UnitSquareMesh(1, 1) + points = [[0.1, 0.1], [0.5, 0.5], [0.9, 0.9]] + vom = VertexOnlyMesh(mesh, points) + if value_shape == "scalar": + fs_type = FunctionSpace + else: + fs_type = VectorFunctionSpace + P0DG = fs_type(vom, "DG", 0) + V = fs_type(mesh, "CG", 1) + + u = TrialFunction(V) + interp = interpolate(u, P0DG) + assert isinstance(get_interpolator(interp), SameMeshInterpolator) + res = assemble(interp, mat_type=mat_type) + + if value_shape == "scalar": + # Always seqaij for scalar + assert res.petscmat.type == "seqaij" + else: + # Defaults to seqaij + assert res.petscmat.type == "seq" + (mat_type if mat_type else "aij") + + +@pytest.mark.parametrize("value_shape", ["scalar", "vector"], ids=lambda v: f"fs_type={v}") +@pytest.mark.parametrize("mat_type,sub_mat_type", params()) +def test_mixed_same_mesh_mattype(value_shape, mat_type, sub_mat_type): + mesh = UnitSquareMesh(1, 1) + if value_shape == "scalar": + fs_type = FunctionSpace + else: + fs_type = VectorFunctionSpace + V1 = fs_type(mesh, "CG", 1) + V2 = fs_type(mesh, "CG", 2) + V3 = fs_type(mesh, "CG", 3) + V4 = fs_type(mesh, "CG", 4) + + W = V1 * V2 + U = V3 * V4 + + w = TrialFunction(W) + w0, w1 = split(w) + if value_shape == "scalar": + expr = as_vector([w0 + w1, w0 + w1]) + else: + w00, w01 = split(w0) + w10, w11 = split(w1) + expr = as_vector([w00 + w10, w00 + w10, w01 + w11, w01 + w11]) + interp = interpolate(expr, U) + assert isinstance(get_interpolator(interp), MixedInterpolator) + res = assemble(interp, mat_type=mat_type, sub_mat_type=sub_mat_type) + if not mat_type or mat_type == "aij": + # Defaults to seqaij + assert res.petscmat.type == "seqaij" + else: + assert res.petscmat.type == "nest" + for (i, j) in [(0, 0), (0, 1), (1, 0), (1, 1)]: + sub_mat = res.petscmat.getNestSubMatrix(i, j) + if value_shape == "scalar": + # Always seqaij for scalar + assert sub_mat.type == "seqaij" + else: + # matnest sub_mat_type defaults to aij + assert sub_mat.type == "seq" + (sub_mat_type if sub_mat_type else "aij") + + with pytest.raises(NotImplementedError): + assemble(interp, mat_type="baij") + + with pytest.raises(NotImplementedError): + assemble(interp, mat_type="matfree") diff --git a/tests/firedrake/regression/test_jax_coupling.py b/tests/firedrake/regression/test_jax_coupling.py index 0982d93e84..4e2fa02e98 100644 --- a/tests/firedrake/regression/test_jax_coupling.py +++ b/tests/firedrake/regression/test_jax_coupling.py @@ -1,5 +1,5 @@ import pytest - +import numpy as np from firedrake import * from pyadjoint.tape import get_working_tape, pause_annotation diff --git a/tests/firedrake/regression/test_linesmoother.py b/tests/firedrake/regression/test_linesmoother.py index 049421ddcc..720a084e07 100644 --- a/tests/firedrake/regression/test_linesmoother.py +++ b/tests/firedrake/regression/test_linesmoother.py @@ -46,8 +46,8 @@ def backend(request): @pytest.mark.parametrize("rhs", ["form_rhs", "cofunc_rhs"]) def test_linesmoother(mesh, S1family, expected, backend, rhs): base_cell = mesh._base_mesh.ufl_cell() - S2family = "DG" if base_cell.is_simplex() else "DQ" - DGfamily = "DG" if mesh.ufl_cell().is_simplex() else "DQ" + S2family = "DG" if base_cell.is_simplex else "DQ" + DGfamily = "DG" if mesh.ufl_cell().is_simplex else "DQ" nits = [] for degree in range(2): S1 = FiniteElement(S1family, base_cell, degree+1) diff --git a/tests/firedrake/regression/test_load_mesh.py b/tests/firedrake/regression/test_load_mesh.py index 48a63f8727..a6bc2ff2f9 100644 --- a/tests/firedrake/regression/test_load_mesh.py +++ b/tests/firedrake/regression/test_load_mesh.py @@ -1,20 +1,125 @@ +from math import pi + from firedrake import * import numpy as np import pytest from os.path import abspath, dirname, join +from pathlib import Path path = join(abspath(dirname(__file__)), '..', 'meshes') -def load_mesh(filename): - m = Mesh(join(path, filename)) - return m +def load_mesh(filename, use_path_object): + if use_path_object: + return Mesh(Path(path) / filename) + + return Mesh(join(path, filename)) @pytest.mark.parametrize( - 'filename', ['square.msh', 'square_binary.msh']) -def test_load_mesh(filename): - m = load_mesh(filename) + 'filename', [ + 'square.msh', + 'square_binary.msh', + ]) +@pytest.mark.parametrize('use_path_object', [False, True]) +def test_load_mesh(filename, use_path_object): + m = load_mesh(filename, use_path_object) v = assemble(1*dx(domain=m)) assert np.allclose(v, 1) + + +# --- Periodic Gmsh meshes --- + + +@pytest.fixture(params=["p2d", "p2d_xy"]) +def periodic_2d_mesh(request): + return Mesh(join(path, f"{request.param}.msh")) + + +def test_periodic_2d_coordinates(periodic_2d_mesh): + """Mesh uses a DG coordinate element after loading.""" + elem = periodic_2d_mesh.ufl_coordinate_element() + assert elem.family() == "Discontinuous Lagrange" + + +@pytest.mark.parallel([1, 2]) +def test_periodic_helmholtz_2d_x(): + """Helmholtz on x-periodic rectangle [0,0.6]x[0,0.5]. + + Manufactured solution u_exact = cos(2*pi*x/0.6) * y*(0.5 - y). + Periodic in x with non-constant boundary data, zero on y boundaries. + """ + mesh = Mesh(join(path, "p2d.msh")) + V = FunctionSpace(mesh, "CG", 1) + x = SpatialCoordinate(mesh) + + Lx = 0.6 + u_exact_expr = cos(2 * pi * x[0] / Lx) * x[1] * (0.5 - x[1]) + + u = TrialFunction(V) + v = TestFunction(V) + a = (inner(grad(u), grad(v)) + inner(u, v)) * dx + L = a(v, u_exact_expr) + + uh = Function(V) + bc = DirichletBC(V, Constant(0), [1, 3]) + solve(a == L, uh, bcs=bc, solver_parameters={"ksp_type": "cg"}) + + assert errornorm(u_exact_expr, uh, "L2") < 0.005 + + +@pytest.mark.parallel([1, 2]) +def test_periodic_2d_xy_solve(): + """Helmholtz on doubly-periodic rectangle [0,0.6]x[0,0.5]. + + Trigonometric manufactured solution + u_exact = cos(2*pi*x/0.6) * cos(2*pi*y/0.5), periodic in both + x and y with non-constant boundary data. No boundary conditions + needed. + + Uses a wider tolerance than the other tests because the + trigonometric solution requires fine resolution per wavelength. + """ + mesh = Mesh(join(path, "p2d_xy.msh")) + V = FunctionSpace(mesh, "CG", 1) + x = SpatialCoordinate(mesh) + + Lx, Ly = 0.6, 0.5 + u_exact_expr = cos(2 * pi * x[0] / Lx) * cos(2 * pi * x[1] / Ly) + + u = TrialFunction(V) + v = TestFunction(V) + a = (inner(grad(u), grad(v)) + inner(u, v)) * dx + L = a(v, u_exact_expr) + + uh = Function(V) + solve(a == L, uh) + + assert errornorm(u_exact_expr, uh, "L2") / norm(u_exact_expr, "L2") < 0.15 + + +@pytest.mark.parallel([1, 2]) +def test_periodic_3d_solve(): + """Helmholtz on x-periodic box [0,1]^3. + + Manufactured polynomial solution + u_exact = 42 + y*(1-y)*z*(1-z), periodic in x (constant in x), + zero on y/z boundaries. CG4 reproduces the polynomial exactly. + """ + mesh = Mesh(join(path, "p3d.msh")) + V = FunctionSpace(mesh, "CG", 4) + x = SpatialCoordinate(mesh) + + u_exact_expr = 42 + x[1] * (1 - x[1]) * x[2] * (1 - x[2]) + + u = TrialFunction(V) + v = TestFunction(V) + a = (inner(grad(u), grad(v)) + inner(u, v)) * dx + L = a(v, u_exact_expr) + + uh = Function(V) + bc = DirichletBC(V, u_exact_expr, [3, 4, 5, 6]) + solve(a == L, uh, bcs=bc, solver_parameters={"ksp_type": "cg"}) + + assert errornorm(u_exact_expr, uh, "L2") < 1e-12 diff --git a/tests/firedrake/regression/test_mark_entities.py b/tests/firedrake/regression/test_mark_entities.py index 5788e33c2b..c525f6be59 100644 --- a/tests/firedrake/regression/test_mark_entities.py +++ b/tests/firedrake/regression/test_mark_entities.py @@ -41,9 +41,9 @@ def test_mark_entities_mark_points_with_function_array(): assert abs(v - (4 * .5 + 4 * .5 * sqrt(2))) < 1.e-10 v = assemble(Constant(1) * dS(my_facet_label, domain=mesh)) assert abs(v - (1 * .5 + 1 * .5 * sqrt(2))) < 1.e-10 - v = assemble(Constant(1) * dS(unmarked, domain=mesh)) + v = assemble(Constant(1) * dS(UNMARKED, domain=mesh)) assert abs(v - (3 * .5 + 3 * .5 * sqrt(2))) < 1.e-10 - v = assemble(Constant(1) * dS((my_facet_label, unmarked), domain=mesh)) + v = assemble(Constant(1) * dS((my_facet_label, UNMARKED), domain=mesh)) assert abs(v - (4 * .5 + 4 * .5 * sqrt(2))) < 1.e-10 @@ -66,7 +66,7 @@ def test_mark_entities_overlapping_facet_subdomains(): assert abs(v - 1.5) < 1.e-10 v = assemble(Constant(1) * ds(removed_label, domain=mesh)) assert abs(v - 0.0) < 1.e-10 - v = assemble(Constant(1) * ds(unmarked, domain=mesh)) + v = assemble(Constant(1) * ds(UNMARKED, domain=mesh)) assert abs(v - 1.0) < 1.e-10 @@ -103,7 +103,7 @@ def test_mark_entities_mesh_mark_entities_2d(): plex = mesh.topology.topology_dm label = plex.getLabel(label_name) assert label.getStratumIS(label_value).getSize() == 2 - assert all(label.getStratumIS(label_value).getIndices() == [20, 30]) + assert all(label.getStratumIS(label_value).getIndices() == [20, 23]) def test_mark_entities_mesh_mark_entities_1d(): diff --git a/tests/firedrake/regression/test_mass_lumping.py b/tests/firedrake/regression/test_mass_lumping.py index 2712568c94..f81888e3ba 100644 --- a/tests/firedrake/regression/test_mass_lumping.py +++ b/tests/firedrake/regression/test_mass_lumping.py @@ -43,7 +43,7 @@ def mesh(request): def test_spectral_mass_lumping(mesh, degree): V = FunctionSpace(mesh, "Lagrange", degree) - dimension = mesh.topological_dimension() + dimension = mesh.topological_dimension quad_rule = gauss_lobatto_legendre_cube_rule(dimension=dimension, degree=degree) u = TrialFunction(V) diff --git a/tests/firedrake/regression/test_matrix.py b/tests/firedrake/regression/test_matrix.py index 48680747fe..32623c418c 100644 --- a/tests/firedrake/regression/test_matrix.py +++ b/tests/firedrake/regression/test_matrix.py @@ -1,5 +1,5 @@ from firedrake import * -from firedrake import matrix +from firedrake.matrix import Matrix, AssembledMatrix import pytest @@ -33,7 +33,7 @@ def mat_type(request): def test_assemble_returns_matrix(a): A = assemble(a) - assert isinstance(A, matrix.Matrix) + assert isinstance(A, Matrix) def test_solve_with_assembled_matrix(a): diff --git a/tests/firedrake/regression/test_matrix_free.py b/tests/firedrake/regression/test_matrix_free.py index 7da3efd6c5..aaf9f60f49 100644 --- a/tests/firedrake/regression/test_matrix_free.py +++ b/tests/firedrake/regression/test_matrix_free.py @@ -366,3 +366,45 @@ def test_matrix_free_fieldsplit_with_real(): }} stokes_solver = LinearVariationalSolver(stokes_problem, solver_parameters=opts) stokes_solver.solve() + + +@pytest.mark.parametrize("shape", ["scalar", "mixed"]) +def test_sub_matrix_not_subfield(shape): + mesh = UnitSquareMesh(2, 2) + if shape == "mixed": + V = VectorFunctionSpace(mesh, "CG", 2) + Q = FunctionSpace(mesh, "CG", 1) + Z = V * Q + u, p = TrialFunctions(Z) + v, q = TestFunctions(Z) + a = inner(grad(u), grad(v)) * dx - inner(p, div(v))*dx - inner(div(u), q)*dx + bcs = DirichletBC(Z.sub(0), 0, (1, 3)) + + elif shape == "scalar": + V = FunctionSpace(mesh, "CG", 1) + u = TrialFunction(V) + v = TestFunction(V) + a = inner(grad(u), grad(v)) * dx + bcs = DirichletBC(V, 0, (1, 3)) + + args = a.arguments() + rows = PETSc.IS().createGeneral(range(0, args[0].function_space().dim(), 2)) + cols = PETSc.IS().createGeneral(range(1, args[1].function_space().dim(), 2)) + + A = assemble(a, bcs=bcs, mat_type="matfree") + Amat = A.petscmat + Asub = Amat.createSubMatrix(rows, cols) + x, y = Asub.createVecs() + + m, n = Asub.getSize() + Asub_dense = np.zeros((m, n)) + for i in range(n): + x.set(0.0) + x[i] = 1.0 + Asub.mult(x, y) + Asub_dense[:, i] = y[:] + + A = assemble(a, bcs=bcs, mat_type="aij") + Amat = A.petscmat + Asub_aij = Amat.createSubMatrix(rows, cols) + assert np.allclose(Asub_aij[:, :], Asub_dense) diff --git a/tests/firedrake/regression/test_mesh_from_plex.py b/tests/firedrake/regression/test_mesh_from_plex.py index f639187ebc..d1bc7d494e 100644 --- a/tests/firedrake/regression/test_mesh_from_plex.py +++ b/tests/firedrake/regression/test_mesh_from_plex.py @@ -13,8 +13,8 @@ def get_plex_with_update_coordinates(mesh): """ Update the coordinates of the dmplex in mesh and return a copy of the dmplex """ - tdim = mesh.topological_dimension() - gdim = mesh.geometric_dimension() + tdim = mesh.topological_dimension + gdim = mesh.geometric_dimension entity_dofs = np.zeros(tdim + 1, dtype=np.int32) entity_dofs[0] = gdim coord_section, _ = mesh.create_section(entity_dofs) diff --git a/tests/firedrake/regression/test_mesh_generation.py b/tests/firedrake/regression/test_mesh_generation.py index 4cb7cc109a..824cd09277 100644 --- a/tests/firedrake/regression/test_mesh_generation.py +++ b/tests/firedrake/regression/test_mesh_generation.py @@ -74,7 +74,8 @@ def test_tensor_box(): assert abs(integrate_one(TensorBoxMesh(xcoords, ycoords, zcoords)) - 0.6) < 1e-3 -def run_one_element_advection(): +@pytest.mark.parallel([1, 2]) +def test_one_element_advection(): nx = 20 m = PeriodicRectangleMesh(nx, 1, 1.0, 1.0, quadrilateral=True) nlayers = 20 @@ -122,16 +123,8 @@ def run_one_element_advection(): assert assemble(inner(q0-q_init, q0-q_init)*dx)**0.5 < 0.005 -def test_one_element_advection(): - run_one_element_advection() - - -@pytest.mark.parallel(nprocs=2) -def test_one_element_advection_parallel(): - run_one_element_advection() - - -def run_one_element_mesh(): +@pytest.mark.parallel([1, 3]) +def test_one_element_mesh(): mesh = PeriodicRectangleMesh(20, 1, Lx=1.0, Ly=1.0, quadrilateral=True) x = SpatialCoordinate(mesh) V = FunctionSpace(mesh, "CG", 1) @@ -160,19 +153,12 @@ def run_one_element_mesh(): assert err > 1.0e-3 -def test_one_element_mesh(): - run_one_element_mesh() - - -@pytest.mark.parallel(nprocs=3) -def test_one_element_mesh_parallel(): - run_one_element_mesh() - - -def test_box(): - assert abs(integrate_one(BoxMesh(3, 3, 3, 1, 2, 3)) - 6) < 1e-3 +@pytest.mark.parametrize("hexahedral", [False, True]) +def test_box(hexahedral): + assert abs(integrate_one(BoxMesh(3, 3, 3, 1, 2, 3, hexahedral=hexahedral)) - 6) < 1e-3 +@pytest.mark.parallel([1, 3]) def test_periodic_unit_cube(): assert abs(integrate_one(PeriodicUnitCubeMesh(3, 3, 3)) - 1) < 1e-3 @@ -239,11 +225,6 @@ def test_tensor_box_parallel(): assert abs(integrate_one(TensorBoxMesh(xcoords, ycoords, zcoords)) - 0.6) < 1e-3 -@pytest.mark.parallel -def test_periodic_unit_cube_parallel(): - assert abs(integrate_one(PeriodicUnitCubeMesh(3, 3, 3)) - 1) < 1e-3 - - def assert_num_exterior_facets_equals_zero(m): # Need to initialise the mesh so that exterior facets have been # built. @@ -471,7 +452,7 @@ def test_boxmesh_kind(kind, num_cells): assert m.num_cells() == num_cells -@pytest.mark.parallel(nprocs=2) +@pytest.mark.parallel(2) def test_periodic_unit_cube_hex_cell(): mesh = PeriodicUnitCubeMesh(3, 3, 3, directions=[True, True, False], hexahedral=True) x, y, z = SpatialCoordinate(mesh) @@ -482,12 +463,9 @@ def test_periodic_unit_cube_hex_cell(): assert error < 1.e-30 -@pytest.mark.parallel(nprocs=4) +@pytest.mark.parallel(4) def test_periodic_unit_cube_hex_facet(): mesh = PeriodicUnitCubeMesh(3, 3, 3, directions=[True, False, False], hexahedral=True) - for subdomain_id in [1, 2]: - area = assemble(Constant(1.) * dS(domain=mesh, subdomain_id=subdomain_id)) - assert abs(area - 1.0) < 1.e-15 for subdomain_id in [3, 4, 5, 6]: area = assemble(Constant(1.) * ds(domain=mesh, subdomain_id=subdomain_id)) assert abs(area - 1.0) < 1.e-15 diff --git a/tests/firedrake/regression/test_mesh_overlaps.py b/tests/firedrake/regression/test_mesh_overlaps.py index ac766b4fad..fed7e1bc3e 100644 --- a/tests/firedrake/regression/test_mesh_overlaps.py +++ b/tests/firedrake/regression/test_mesh_overlaps.py @@ -98,3 +98,31 @@ def test_override_distribution_parameters(overlap): assert mesh.num_cells() == 2 assert fine_mesh.num_cells() == 4 + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize("reorder", [False, True]) +def test_submesh_distribution_parameters(overlap, reorder): + # Test that mesh._distribution_parameters and mesh._did_reordering + # are propagated + partition = True + params = {"partition": partition, + "overlap_type": overlap} + mesh = UnitSquareMesh(2, 2, reorder=reorder, + distribution_parameters=params) + orig_params = mesh._distribution_parameters + did_reordering = mesh._did_reordering + assert did_reordering == reorder + + x, *_ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + ind = Function(DG0).interpolate(conditional(lt(x, 0.5), 1, 0)) + label = 111 + rmesh = RelabeledMesh(mesh, [ind], [label]) + assert rmesh._distribution_parameters == orig_params + assert rmesh._did_reordering == did_reordering + + dim = mesh.topological_dimension + submesh = Submesh(rmesh, dim, label) + assert submesh._distribution_parameters == orig_params + assert submesh._did_reordering == did_reordering diff --git a/tests/firedrake/regression/test_mtw.py b/tests/firedrake/regression/test_mtw.py index dbe655ea40..921eb40e85 100755 --- a/tests/firedrake/regression/test_mtw.py +++ b/tests/firedrake/regression/test_mtw.py @@ -1,20 +1,28 @@ from firedrake import * +import pytest import numpy as np -convergence_orders = lambda x: np.log2(np.array(x)[:-1] / np.array(x)[1:]) - - -def test_mtw(): +@pytest.fixture(params=(2, 3)) +def mh(request): + dim = request.param N_base = 2 - msh = UnitSquareMesh(N_base, N_base) - mh = MeshHierarchy(msh, 5) + if dim == 2: + refine = 3 + msh = UnitSquareMesh(N_base, N_base) + elif dim == 3: + refine = 2 + msh = UnitCubeMesh(N_base, N_base, N_base) + else: + raise ValueError("Unexpected dimension") + mh = MeshHierarchy(msh, refine) V = FunctionSpace(msh, msh.coordinates.ufl_element()) eps = Constant(1 / 2**(N_base-1)) - x, y = SpatialCoordinate(msh) + x, y, *z = SpatialCoordinate(msh) + new = Function(V).interpolate(as_vector([x + eps*sin(2*pi*x)*sin(2*pi*y), - y - eps*sin(2*pi*x)*sin(2*pi*y)])) + y - eps*sin(2*pi*x)*sin(2*pi*y), *z])) # And propagate to refined meshes coords = [new] @@ -26,63 +34,86 @@ def test_mtw(): for msh, coord in zip(mh, coords): msh.coordinates.assign(coord) - - params = {"snes_type": "newtonls", - "snes_linesearch_type": "basic", - "snes_monitor": None, - "mat_type": "aij", - "snes_max_it": 10, - "snes_lag_jacobian": -2, - "snes_lag_preconditioner": -2, - "ksp_type": "preonly", - "pc_type": "lu", - "pc_factor_shift_type": "inblocks", - "snes_rtol": 1e-16, - "snes_atol": 1e-25} - + return mh + + +def mesh_sizes(mh): + mesh_size = [] + for msh in mh: + DG0 = FunctionSpace(msh, "DG", 0) + h = Function(DG0).interpolate(CellDiameter(msh)) + with h.dat.vec as hvec: + _, maxh = hvec.max() + mesh_size.append(maxh) + return mesh_size + + +def convergence_orders(error, h): + return np.diff(np.log2(error)) / np.diff(np.log2(h)) + + +def test_mtw_darcy_convergence(mh): + sp = { + "ksp_monitor": None, + "mat_type": "matfree", + "pmat_type": "nest", + "ksp_type": "minres", + "ksp_norm_type": "preconditioned", + "pc_type": "fieldsplit", + "pc_fieldsplit_type": "additive", + "fieldsplit_ksp_type": "preonly", + "fieldsplit_0_pc_type": "lu", + "fieldsplit_0_pc_factor_mat_solver_type": "mumps", + "fieldsplit_1_pc_type": "jacobi", + } + gamma = Constant(1E4) l2_u = [] l2_p = [] for msh in mh[1:]: - x, y = SpatialCoordinate(msh) + x, y, *z = SpatialCoordinate(msh) pex = sin(pi * x) * sin(2 * pi * y) + if z: + pex *= sin(pi*z[0]) + uex = -grad(pex) f = div(uex) - V = FunctionSpace(msh, "MTW", 3) - W = FunctionSpace(msh, "DG", 0) - Z = V * W + V = FunctionSpace(msh, "MTW", 1) + Q = FunctionSpace(msh, "DG", 0) + Z = V * Q - up = Function(Z) - u, p = split(up) - v, w = TestFunctions(Z) + u, p = TrialFunctions(Z) + v, q = TestFunctions(Z) + + a = -inner(u, v) * dx + inner(p, div(v)) * dx + inner(div(u), q) * dx + L = inner(f, q) * dx + + Jp = inner(u, v)*dx + inner(div(u), gamma*div(v))*dx + inner(p/gamma, q)*dx - F = (inner(u, v) * dx - inner(p, div(v)) * dx - + inner(div(u), w) * dx - inner(f, w) * dx) + up = Function(Z) - solve(F == 0, up, solver_parameters=params) + solve(a == L, up, Jp=Jp, solver_parameters=sp) u, p = up.subfunctions l2_u.append(errornorm(uex, u)) l2_p.append(errornorm(pex, p)) - assert min(convergence_orders(l2_u)) > 1.8 - assert min(convergence_orders(l2_p)) > 0.8 + h = mesh_sizes(mh[1:]) + assert min(convergence_orders(l2_u, h)) > 1.75 + assert min(convergence_orders(l2_p, h)) > 0.8 def test_mtw_interior_facet(): mesh = UnitSquareMesh(4, 4) - V = FunctionSpace(mesh, mesh.coordinates.ufl_element()) eps = Constant(0.5 / 2**3) x, y = SpatialCoordinate(mesh) - new = Function(V).interpolate(as_vector([x + eps*sin(2*pi*x)*sin(2*pi*y), - y - eps*sin(2*pi*x)*sin(2*pi*y)])) - mesh = Mesh(new) + mesh.coordinates.interpolate(as_vector([x + eps*sin(2*pi*x)*sin(2*pi*y), + y - eps*sin(2*pi*x)*sin(2*pi*y)])) - V = FunctionSpace(mesh, 'Mardal-Tai-Winther', 3) + V = FunctionSpace(mesh, 'Mardal-Tai-Winther', 1) - x, y = SpatialCoordinate(mesh) - uh = project(as_vector((x+y, 2*x-y)), V) + uh = Function(V).interpolate(as_vector((x+y, 2*x-y))) volume = assemble(div(uh)*dx) diff --git a/tests/firedrake/regression/test_multiple_domains.py b/tests/firedrake/regression/test_multiple_domains.py index 86766aba79..fb7c9bf04a 100644 --- a/tests/firedrake/regression/test_multiple_domains.py +++ b/tests/firedrake/regression/test_multiple_domains.py @@ -41,10 +41,10 @@ def test_mismatching_meshes_indexed_function(mesh1, mesh3): with pytest.raises(NotImplementedError): project(d1, target) - with pytest.raises(NotImplementedError): + with pytest.raises(MismatchingDomainError): assemble(inner(d1, TestFunction(V2))*dx(domain=mesh3)) - with pytest.raises(NotImplementedError): + with pytest.raises(MismatchingDomainError): assemble(inner(d1, TestFunction(V2))*dx(domain=mesh1)) @@ -58,11 +58,6 @@ def test_mismatching_meshes_real_space(mesh1, mesh3): project(donor, target) -def test_mismatching_topologies(mesh1, mesh3): - with pytest.raises(NotImplementedError): - assemble(1*dx(domain=mesh1) + 2*dx(domain=mesh3)) - - def test_functional(mesh1, mesh2): c = Constant(1) @@ -73,17 +68,21 @@ def test_functional(mesh1, mesh2): val = assemble(c*dx(domain=mesh2)) - assert np.allclose(val, cell_volume * (0.5**mesh1.topological_dimension())) + assert np.allclose(val, cell_volume * (0.5**mesh1.topological_dimension)) val = assemble(c*dx(domain=mesh1) + c*dx(domain=mesh2)) - assert np.allclose(val, cell_volume * (1 + 0.5**mesh1.topological_dimension())) + assert np.allclose(val, cell_volume * (1 + 0.5**mesh1.topological_dimension)) + + +def cell_measure(primal, secondary): + return Measure("dx", primal, intersect_measures=(Measure("dx", secondary),)) @pytest.mark.parametrize("form,expect", [ (lambda v, mesh1, mesh2: conj(v)*dx(domain=mesh1), lambda vol, dim: vol), - (lambda v, mesh1, mesh2: conj(v)*dx(domain=mesh2), lambda vol, dim: vol*(0.5**dim)), - (lambda v, mesh1, mesh2: conj(v)*dx(domain=mesh1) + conj(v)*dx(domain=mesh2), lambda vol, dim: vol*(1 + 0.5**dim)) + (lambda v, mesh1, mesh2: conj(v)*cell_measure(mesh2, mesh1), lambda vol, dim: vol*(0.5**dim)), + (lambda v, mesh1, mesh2: conj(v)*dx(domain=mesh1) + conj(v)*cell_measure(mesh2, mesh1), lambda vol, dim: vol*(1 + 0.5**dim)) ], ids=["conj(v)*dx(mesh1)", "conj(v)*dx(mesh2)", "conj(v)*(dx(mesh1) + dx(mesh2)"]) def test_one_form(mesh1, mesh2, form, expect): V = FunctionSpace(mesh1, "DG", 0) @@ -91,7 +90,7 @@ def test_one_form(mesh1, mesh2, form, expect): v = TestFunction(V) cell_volume = mesh1.coordinates.function_space().finat_element.cell.volume() - dim = mesh1.topological_dimension() + dim = mesh1.topological_dimension form = form(v, mesh1, mesh2) expect = expect(cell_volume, dim) @@ -102,8 +101,8 @@ def test_one_form(mesh1, mesh2, form, expect): @pytest.mark.parametrize("form,expect", [ (lambda u, v, mesh1, mesh2: inner(u, v)*dx(domain=mesh1), lambda vol, dim: vol), - (lambda u, v, mesh1, mesh2: inner(u, v)*dx(domain=mesh2), lambda vol, dim: vol*(0.5**dim)), - (lambda u, v, mesh1, mesh2: inner(u, v)*dx(domain=mesh1) + inner(u, v)*dx(domain=mesh2), lambda vol, dim: vol*(1 + 0.5**dim)) + (lambda u, v, mesh1, mesh2: inner(u, v)*cell_measure(mesh2, mesh1), lambda vol, dim: vol*(0.5**dim)), + (lambda u, v, mesh1, mesh2: inner(u, v)*dx(domain=mesh1) + inner(u, v)*cell_measure(mesh2, mesh1), lambda vol, dim: vol*(1 + 0.5**dim)) ], ids=["inner(u, v)*dx(mesh1)", "inner(u, v)*dx(mesh2)", "inner(u, v)*(dx(mesh1) + dx(mesh2)"]) def test_two_form(mesh1, mesh2, form, expect): V = FunctionSpace(mesh1, "DG", 0) @@ -112,10 +111,111 @@ def test_two_form(mesh1, mesh2, form, expect): u = TrialFunction(V) cell_volume = mesh1.coordinates.function_space().finat_element.cell.volume() - dim = mesh1.topological_dimension() + dim = mesh1.topological_dimension form = form(u, v, mesh1, mesh2) expect = expect(cell_volume, dim) val = assemble(form).M.values assert np.allclose(val, expect) + + +def test_multi_domain_solve(): + mesh1 = UnitSquareMesh(7, 7, quadrilateral=True) + x1, y1 = SpatialCoordinate(mesh1) + mesh2 = UnitSquareMesh(8, 8) + x2, y2 = SpatialCoordinate(mesh2) + V1 = FunctionSpace(mesh1, "Q", 3) + V2 = FunctionSpace(mesh2, "CG", 2) + V = V1 * V2 + + u1, u2 = TrialFunctions(V) + v1, v2 = TestFunctions(V) + + a = ( + inner(grad(u1), grad(v1))*dx(domain=mesh1) + + inner(grad(u2), grad(v2))*dx(domain=mesh2) + ) + + u_exact_expr1 = sin(pi * x1) * sin(pi * y1) + u_exact_expr2 = x2 * y2 * (1 - x2) * (1 - y2) + f1 = -div(grad(u_exact_expr1)) + f2 = -div(grad(u_exact_expr2)) + + L = ( + inner(f1, v1)*dx(domain=mesh1) + + inner(f2, v2)*dx(domain=mesh2) + ) + + bc1 = DirichletBC(V.sub(0), 0, "on_boundary") + bc2 = DirichletBC(V.sub(1), 0, "on_boundary") + u_sol = Function(V) + solve(a == L, u_sol, bcs=[bc1, bc2]) + u1_sol, u2_sol = u_sol.subfunctions + + u_exact = Function(V) + u1_exact, u2_exact = u_exact.subfunctions + u1_exact.interpolate(u_exact_expr1) + u2_exact.interpolate(u_exact_expr2) + + err1 = errornorm(u1_exact, u1_sol) + assert err1 < 1e-5 + err2 = errornorm(u2_exact, u2_sol) + assert err2 < 1e-5 + + +def test_multi_domain_assemble(): + mesh1 = UnitSquareMesh(1, 1, quadrilateral=True) + mesh2 = UnitSquareMesh(2, 2) + V1 = FunctionSpace(mesh1, "Q", 1) + V2 = FunctionSpace(mesh2, "CG", 1) + V = V1 * V2 + + u = TrialFunctions(V) + v = TestFunctions(V) + f = split(Function(V)) + + for i, j in [(0, 1), (1, 0)]: + a1 = inner(u[i], v[j])*dx(domain=mesh1) + with pytest.raises(MismatchingDomainError): + assemble(a1) + a2 = inner(u[i], v[j])*dx(domain=mesh2) + with pytest.raises(MismatchingDomainError): + assemble(a2) + l1 = inner(f[i], v[j])*dx(domain=mesh1) + with pytest.raises(MismatchingDomainError): + assemble(l1) + l2 = inner(f[i], v[j])*dx(domain=mesh2) + with pytest.raises(MismatchingDomainError): + assemble(l2) + + for i, j in [(0, 0), (1, 1)]: + a = inner(u[i], v[j])*dx(domain=mesh1) + if i == 1: + with pytest.raises(MismatchingDomainError): + assemble(a) + continue + A = assemble(a) + assert A.M.values.shape == (V.dim(), V.dim()) + + a = inner(u[0], v[0])*dx(domain=mesh1) + inner(u[0], v[1])*dx(domain=mesh2) + with pytest.raises(MismatchingDomainError): + assemble(a) + + a = inner(u[0], v[0])*dx(domain=mesh1) + inner(u[1], v[1])*dx(domain=mesh2) + A = assemble(a) + assert A.M.values.shape == (V.dim(), V.dim()) + + +def test_multidomain_assign_function(mesh1, mesh3): + V1 = FunctionSpace(mesh1, "DG", 0) + V2 = FunctionSpace(mesh3, "CG", 1) + Z = V1 * V2 + z = Function(Z) + z.subfunctions[0].assign(42) + z.subfunctions[1].assign(67) + + w = Function(Z) + w.assign(z) + for zsub, wsub in zip(z.subfunctions, w.subfunctions): + assert np.allclose(zsub.dat.data, wsub.dat.data) diff --git a/tests/firedrake/regression/test_netgen.py b/tests/firedrake/regression/test_netgen.py index dca547a9f0..8524b52748 100644 --- a/tests/firedrake/regression/test_netgen.py +++ b/tests/firedrake/regression/test_netgen.py @@ -3,6 +3,37 @@ import pytest +@pytest.mark.skipnetgen +@pytest.mark.parallel([1, 2]) +def test_netgen_csg_mesh_high_order(): + from netgen.geom2d import Circle, CSG2d + geo = CSG2d() + geo.Add(Circle(center=(0, 0), radius=1.0, mat="mat1", bc="circle")) + ngmesh = geo.GenerateMesh(maxh=0.75) + + # Test that setting the degree in netgen_flags produces a high-order mesh + order = 3 + mesh1 = Mesh(ngmesh, netgen_flags={"degree": order}) + assert mesh1.coordinates.function_space().ufl_element().degree() == order + dim = mesh1.topological_dimension + DG0 = FunctionSpace(mesh1, "DG", 0) + markers = Function(DG0) + + # Test mesh refinement: 1 refinement + markers.assign(1) + mesh2 = mesh1.refine_marked_elements(markers) + assert FunctionSpace(mesh1, "DG", 0).dim() * 2**dim == FunctionSpace(mesh2, "DG", 0).dim() + # Test that refining a high-order mesh gives a high-order mesh + assert mesh2.coordinates.function_space().ufl_element().degree() == order + + # Test mesh refinement: 2 refinements + markers.assign(2) + mesh3 = mesh1.refine_marked_elements(markers) + assert FunctionSpace(mesh1, "DG", 0).dim() * 4**dim == FunctionSpace(mesh3, "DG", 0).dim() + # Test that refining a high-order mesh gives a high-order mesh + assert mesh3.coordinates.function_space().ufl_element().degree() == order + + def square_geometry(h): from netgen.geom2d import SplineGeometry geo = SplineGeometry() @@ -92,7 +123,8 @@ def poisson3D(h, degree=2): @pytest.mark.skipnetgen -def test_firedrake_Poisson_netgen(): +@pytest.mark.parallel([1, 2]) +def test_netgen_csg_poisson_2d(): diff = np.array([poisson(h)[0] for h in [1/2, 1/4, 1/8]]) print("l2 error norms:", diff) conv = np.log2(diff[:-1] / diff[1:]) @@ -101,17 +133,8 @@ def test_firedrake_Poisson_netgen(): @pytest.mark.skipnetgen -@pytest.mark.parallel -def test_firedrake_Poisson_netgen_parallel(): - diff = np.array([poisson(h)[0] for h in [1/2, 1/4, 1/8]]) - print("l2 error norms:", diff) - conv = np.log2(diff[:-1] / diff[1:]) - print("convergence order:", conv) - assert (np.array(conv) > 2.8).all() - - -@pytest.mark.skipnetgen -def test_firedrake_Poisson3D_netgen(): +@pytest.mark.parallel([1, 2]) +def test_netgen_csg_poisson_3d(): diff = np.array([poisson3D(h) for h in [1, 1/2, 1/4]]) print("l2 error norms:", diff) conv = np.log2(diff[:-1] / diff[1:]) @@ -120,7 +143,7 @@ def test_firedrake_Poisson3D_netgen(): @pytest.mark.skipnetgen -def test_firedrake_integral_2D_netgen(): +def test_netgen_csg_2d_integral(): from netgen.geom2d import SplineGeometry import netgen @@ -142,7 +165,7 @@ def test_firedrake_integral_2D_netgen(): @pytest.mark.skipnetgen -def test_firedrake_integral_3D_netgen(): +def test_netgen_csg_3d_integral(): from netgen.csg import CSGeometry, OrthoBrick, Pnt import netgen @@ -167,14 +190,15 @@ def test_firedrake_integral_3D_netgen(): @pytest.mark.skipnetgen -def test_firedrake_integral_ball_netgen(): +@pytest.mark.parallel([1, 2]) +def test_netgen_csg_manifold(): from netgen.csg import CSGeometry, Pnt, Sphere from netgen.meshing import MeshingParameters from netgen.meshing import MeshingStep import netgen comm = COMM_WORLD - if comm.Get_rank() == 0: + if comm.rank == 0: geo = CSGeometry() geo.Add(Sphere(Pnt(0, 0, 0), 1).bc("sphere")) mp = MeshingParameters(maxh=0.05, perfstepsend=MeshingStep.MESHSURFACE) @@ -183,136 +207,69 @@ def test_firedrake_integral_ball_netgen(): ngmesh = netgen.libngpy._meshing.Mesh(3) msh = Mesh(ngmesh) + assert msh.topological_dimension == 2 + assert msh.geometric_dimension == 3 + V = FunctionSpace(msh, "CG", 3) - x, y, z = SpatialCoordinate(msh) - f = assemble(interpolate(1+0*x, V)) + f = assemble(interpolate(Constant(1), V)) assert abs(assemble(f * dx) - 4*np.pi) < 1.e-2 @pytest.mark.skipnetgen -def test_firedrake_integral_sphere_high_order_netgen(): - from netgen.csg import CSGeometry, Pnt, Sphere - import netgen +@pytest.mark.parallel([1, 2]) +def test_netgen_occ_manifold(): + from netgen.occ import Pnt, SplineApproximation, Face, Wire, Axis, OCCGeometry, Z + from netgen.meshing import MeshingStep + R = 3.0 + r = 1.5 + surface_area = R*r*(2*pi)**2 - comm = COMM_WORLD - if comm.Get_rank() == 0: - geo = CSGeometry() - geo.Add(Sphere(Pnt(0, 0, 0), 1).bc("sphere")) - ngmesh = geo.GenerateMesh(maxh=0.1) - else: - ngmesh = netgen.libngpy._meshing.Mesh(3) + def Curve(t): + return Pnt(0, R+r*np.cos(t), r*np.sin(t)) + + n = 100 + pnts = [Curve(2*np.pi*t/n) for t in range(n+1)] + + spline = SplineApproximation(pnts) + f = Face(Wire(spline)) + + torus = f.Revolve(Axis((0, 0, 0), Z), 360) + geo = OCCGeometry(torus, dim=3) + ngmesh = geo.GenerateMesh(maxh=0.5, perfstepsend=MeshingStep.MESHSURFACE) msh = Mesh(ngmesh) - homsh = Mesh(msh.curve_field(4)) - V = FunctionSpace(homsh, "CG", 4) - x, y, z = SpatialCoordinate(homsh) - f = assemble(interpolate(1+0*x, V)) - assert abs(assemble(f * dx) - (4/3)*np.pi) < 1.e-4 + assert msh.topological_dimension == 2 + assert msh.geometric_dimension == 3 + + V = FunctionSpace(msh, "CG", 3) + f = assemble(interpolate(Constant(1), V)) + assert abs(assemble(f * dx) - surface_area)/surface_area < 5.e-3 @pytest.mark.skipnetgen -@pytest.mark.parallel -def test_firedrake_integral_sphere_high_order_netgen_parallel(): +@pytest.mark.parallel([1, 2]) +def test_netgen_csg_high_order_integral(): from netgen.csg import CSGeometry, Pnt, Sphere import netgen comm = COMM_WORLD - if comm.Get_rank() == 0: + if comm.rank == 0: geo = CSGeometry() geo.Add(Sphere(Pnt(0, 0, 0), 1).bc("sphere")) ngmesh = geo.GenerateMesh(maxh=0.7) else: ngmesh = netgen.libngpy._meshing.Mesh(3) - msh = Mesh(ngmesh) - # The default value for location_tol is much too large (see https://github.com/NGSolve/ngsPETSc/issues/76) - # TODO: Once the default value is adjusted this can be removed - homsh = Mesh(msh.curve_field(2, location_tol=1e-8)) + homsh = Mesh(ngmesh, netgen_flags={"degree": 2}) V = FunctionSpace(homsh, "CG", 2) - x, y, z = SpatialCoordinate(homsh) - f = assemble(interpolate(1+0*x, V)) + f = assemble(interpolate(Constant(1), V)) assert abs(assemble(f * dx) - (4/3)*np.pi) < 1.e-2 @pytest.mark.skipcomplex @pytest.mark.skipnetgen -def test_firedrake_Adaptivity_netgen(): - from netgen.occ import WorkPlane, OCCGeometry, Axes - from netgen.occ import X, Z - - def solve_poisson(mesh): - V = FunctionSpace(mesh, "CG", 1) - uh = Function(V, name="Solution") - v = TestFunction(V) - bc = DirichletBC(V, 0, "on_boundary") - f = Constant(1) - F = inner(grad(uh), grad(v))*dx - inner(f, v)*dx - solve(F == 0, uh, bc) - return uh - - def estimate_error(mesh, uh): - W = FunctionSpace(mesh, "DG", 0) - eta_sq = Function(W) - w = TestFunction(W) - f = Constant(1) - h = CellDiameter(mesh) - n = FacetNormal(mesh) - v = CellVolume(mesh) - - # Compute error indicator cellwise - G = inner(eta_sq / v, w)*dx - G = G - inner(h**2 * (f + div(grad(uh)))**2, w) * dx - G = G - inner(h('+')/2 * jump(grad(uh), n)**2, w('+')) * dS - - # Each cell is an independent 1x1 solve, so Jacobi is exact - sp = {"mat_type": "matfree", - "ksp_type": "richardson", - "pc_type": "jacobi"} - solve(G == 0, eta_sq, solver_parameters=sp) - eta = Function(W) - eta.interpolate(sqrt(eta_sq)) # the above computed eta^2 - - with eta.dat.vec_ro as eta_: - error_est = sqrt(eta_.dot(eta_)) - return (eta, error_est) - - def adapt(mesh, eta): - W = FunctionSpace(mesh, "DG", 0) - markers = Function(W) - with eta.dat.vec_ro as eta_: - eta_max = eta_.max()[1] - - theta = 0.5 - should_refine = conditional(gt(eta, theta*eta_max), 1, 0) - markers.interpolate(should_refine) - - refined_mesh = mesh.refine_marked_elements(markers) - return refined_mesh - - rect1 = WorkPlane(Axes((0, 0, 0), n=Z, h=X)).Rectangle(1, 2).Face() - rect2 = WorkPlane(Axes((0, 1, 0), n=Z, h=X)).Rectangle(2, 1).Face() - L = rect1 + rect2 - - geo = OCCGeometry(L, dim=2) - ngmsh = geo.GenerateMesh(maxh=0.1) - mesh = Mesh(ngmsh) - - max_iterations = 10 - error_estimators = [] - dofs = [] - for i in range(max_iterations): - uh = solve_poisson(mesh) - (eta, error_est) = estimate_error(mesh, uh) - error_estimators.append(error_est) - dofs.append(uh.function_space().dim()) - mesh = adapt(mesh, eta) - assert error_estimators[-1] < 0.05 - - -@pytest.mark.skipcomplex -@pytest.mark.skipnetgen -@pytest.mark.parallel -def test_firedrake_Adaptivity_netgen_parallel(): +@pytest.mark.parallel([1, 2]) +def test_netgen_occ_adaptivity(): from netgen.occ import WorkPlane, OCCGeometry, Axes from netgen.occ import X, Z @@ -381,5 +338,7 @@ def adapt(mesh, eta): (eta, error_est) = estimate_error(mesh, uh) error_estimators.append(error_est) dofs.append(uh.function_space().dim()) + if error_est < 0.05: + break mesh = adapt(mesh, eta) assert error_estimators[-1] < 0.06 diff --git a/tests/firedrake/regression/test_nullspace.py b/tests/firedrake/regression/test_nullspace.py index 5b3538c41e..e7ba515579 100644 --- a/tests/firedrake/regression/test_nullspace.py +++ b/tests/firedrake/regression/test_nullspace.py @@ -1,5 +1,4 @@ from firedrake import * -from firedrake.petsc import PETSc import pytest import numpy as np @@ -236,7 +235,6 @@ def test_nullspace_mixed_multiple_components(): F_stokes += inner(g * rho * khat, N) * dx F_stokes += -inner(u, grad(M)) * dx - PETSc.Sys.popErrorHandler() solver_parameters = { 'mat_type': 'matfree', 'snes_type': 'ksponly', @@ -295,7 +293,6 @@ def test_near_nullspace_mixed(aux_pc, rhs): # test nullspace and nearnullspace for a mixed Stokes system # this is tested on the SINKER case of May and Moresi https://doi.org/10.1016/j.pepi.2008.07.036 # fails in parallel if nullspace is copied to fieldsplit_1_Mp_ksp solve (see PR #3488) - PETSc.Sys.popErrorHandler() n = 64 mesh = UnitSquareMesh(n, n) V = VectorFunctionSpace(mesh, "CG", 2) diff --git a/tests/firedrake/regression/test_p1pc.py b/tests/firedrake/regression/test_p1pc.py index 7f1d27e2d2..3658e7228c 100644 --- a/tests/firedrake/regression/test_p1pc.py +++ b/tests/firedrake/regression/test_p1pc.py @@ -16,11 +16,11 @@ def mesh(request): @pytest.fixture def expected(mesh): - if mesh.geometric_dimension() == 1: + if mesh.geometric_dimension == 1: return [2, 2, 2] - elif mesh.geometric_dimension() == 2: + elif mesh.geometric_dimension == 2: return [5, 5, 5] - elif mesh.geometric_dimension() == 3: + elif mesh.geometric_dimension == 3: return [7, 7, 7] diff --git a/tests/firedrake/regression/test_patch_pc.py b/tests/firedrake/regression/test_patch_pc.py index a533b0cf1d..039e7940e4 100644 --- a/tests/firedrake/regression/test_patch_pc.py +++ b/tests/firedrake/regression/test_patch_pc.py @@ -97,3 +97,80 @@ def test_jacobi_sor_equivalence(mesh, problem_type, multiplicative): patch_history = patch.snes.ksp.getConvergenceHistory() assert numpy.allclose(jacobi_history, patch_history) + + +def _patch_pc_exterior_facets_problem(a, L): + """Helper: solve with ASMStarPC and PatchPC, return iteration counts.""" + V = a.arguments()[0].function_space() + + u_star = Function(V) + problem = LinearVariationalProblem(a, L, u_star) + star_solver = LinearVariationalSolver( + problem, + solver_parameters={ + "mat_type": "aij", + "ksp_type": "gmres", + "pc_type": "python", + "pc_python_type": "firedrake.ASMStarPC", + "pc_star_construct_dim": 0, + "ksp_rtol": 1e-12, + }, + ) + star_solver.snes.ksp.setConvergenceHistory() + star_solver.solve() + star_its = len(star_solver.snes.ksp.getConvergenceHistory()) + + u_patch = Function(V) + problem_patch = LinearVariationalProblem(a, L, u_patch) + patch_solver = LinearVariationalSolver( + problem_patch, + options_prefix="", + solver_parameters={ + "mat_type": "matfree", + "ksp_type": "gmres", + "pc_type": "python", + "pc_python_type": "firedrake.PatchPC", + "patch_pc_patch_construct_type": "star", + "patch_pc_patch_construct_dim": 0, + "patch_pc_patch_save_operators": True, + "patch_sub_ksp_type": "preonly", + "patch_sub_pc_type": "lu", + "ksp_rtol": 1e-12, + }, + ) + patch_solver.snes.ksp.setConvergenceHistory() + patch_solver.solve() + patch_its = len(patch_solver.snes.ksp.getConvergenceHistory()) + + return star_its, patch_its + + +@pytest.mark.parallel([1, 3]) +def test_patch_pc_exterior_facets_dx_ds(): + """Test that PatchPC correctly handles exterior facet integrals (ds) + in both serial and parallel, by asserting it takes the same number + of iterations as ASMStarPC.""" + distribution = {"overlap_type": (DistributedMeshOverlapType.VERTEX, 1)} + mesh = UnitSquareMesh(4, 4, distribution_parameters=distribution) + V = FunctionSpace(mesh, "DG", 1) + u = TrialFunction(V) + v = TestFunction(V) + a = inner(u, v) * dx + inner(u, v) * ds + L = inner(Constant(1.0), v) * dx + star_its, patch_its = _patch_pc_exterior_facets_problem(a, L) + assert star_its == patch_its + + +def test_patch_pc_exterior_facets_dx_dS_ds(): + """Test that PatchPC correctly handles exterior (ds) and interior (dS) + facet integrals together, by asserting it takes the same number of + iterations as ASMStarPC.""" + distribution = {"overlap_type": (DistributedMeshOverlapType.VERTEX, 1)} + mesh = UnitSquareMesh(4, 4, distribution_parameters=distribution) + V = FunctionSpace(mesh, "DG", 1) + u = TrialFunction(V) + v = TestFunction(V) + a = inner(u, v) * dx + inner(avg(u), avg(v)) * dS + inner(u, v) * ds + L = inner(Constant(1.0), v) * dx + star_its, patch_its = _patch_pc_exterior_facets_problem(a, L) + assert star_its == patch_its diff --git a/tests/firedrake/regression/test_periodic_2d.py b/tests/firedrake/regression/test_periodic_2d.py index 8e403479da..4ace0f8aa0 100644 --- a/tests/firedrake/regression/test_periodic_2d.py +++ b/tests/firedrake/regression/test_periodic_2d.py @@ -20,33 +20,15 @@ from firedrake import * -@pytest.fixture(params=["x", "y", "both"]) -def direction(request): - return request.param - - -@pytest.fixture(params=[False, True], - ids=["tri", "quad"]) -def quadrilateral(request): - return request.param - - -@pytest.fixture(params=["left", "right", "crossed"]) -def diagonal(request): - return request.param - - -def run_periodic_helmholtz(direction, quadrilateral, diagonal): - if quadrilateral: - if diagonal == "left": - # run the test - diagonal = None - else: - # don't run the test - return - - mesh = PeriodicRectangleMesh(100, 60, 5, 3, quadrilateral=quadrilateral, - diagonal=diagonal, direction=direction) +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("direction", ["x", "y", "both"]) +@pytest.mark.parametrize("cell_options", + [{"quadrilateral": True}, + {"quadrilateral": False, "diagonal": "left"}, + {"quadrilateral": False, "diagonal": "right"}, + {"quadrilateral": False, "diagonal": "crossed"}]) +def test_periodic_helmholtz(direction, cell_options): + mesh = PeriodicRectangleMesh(100, 60, 5, 3, **cell_options, direction=direction) x = SpatialCoordinate(mesh) V = FunctionSpace(mesh, "CG", 1) @@ -56,7 +38,7 @@ def run_periodic_helmholtz(direction, quadrilateral, diagonal): f = Function(V).assign((244.0*pi*pi/225.0 + 1.0)*u_exact) - if direction in ("x", "y"): + if direction in {"x", "y"}: bcs = DirichletBC(V, Constant(0), (1, 2)) elif direction == "both": bcs = [] @@ -72,12 +54,3 @@ def run_periodic_helmholtz(direction, quadrilateral, diagonal): l2err = sqrt(assemble(inner((out-u_exact), (out-u_exact))*dx)) l2norm = sqrt(assemble(inner(u_exact, u_exact)*dx)) assert l2err/l2norm < 0.004 - - -def test_periodic_helmholtz(direction, quadrilateral, diagonal): - run_periodic_helmholtz(direction, quadrilateral, diagonal) - - -@pytest.mark.parallel(nprocs=3) -def test_periodic_helmholtz_parallel(direction, quadrilateral, diagonal): - run_periodic_helmholtz(direction, quadrilateral, diagonal) diff --git a/tests/firedrake/regression/test_projection_zany.py b/tests/firedrake/regression/test_projection_zany.py index dc1329dc19..43b8b7cf9f 100644 --- a/tests/firedrake/regression/test_projection_zany.py +++ b/tests/firedrake/regression/test_projection_zany.py @@ -80,7 +80,8 @@ def run_convergence_test(mh, el, degree, convrate): ('Hermite', 3, 3.8), ('Bell', 5, 4.7), ('Argyris', 5, 5.8), - ('Argyris', 6, 6.7)]) + ('Argyris', 6, 6.7), + ('Nonconforming Robust Wu-Xu', 7, 3.8)]) def test_projection_zany_convergence_2d(hierarchy_2d, el, deg, convrate): run_convergence_test(hierarchy_2d[2:], el, deg, convrate) @@ -97,7 +98,9 @@ def test_projection_zany_convergence_3d(hierarchy_3d, el, deg, convrate): ('HCT', 3), ('HCT', 4), ('Argyris', 5), - ('Argyris', 6)]) + ('Argyris', 6), + ('Nonconforming Wu-Xu', 4), + ('Alfeld C2', 5)]) def test_mass_conditioning(element, degree, hierarchy_2d): mass_cond = [] for msh in hierarchy_2d[1:4]: diff --git a/tests/firedrake/regression/test_quadrature.py b/tests/firedrake/regression/test_quadrature.py index 9c738b888b..225a4b244d 100644 --- a/tests/firedrake/regression/test_quadrature.py +++ b/tests/firedrake/regression/test_quadrature.py @@ -1,5 +1,6 @@ from firedrake import * import pytest +import numpy as np @pytest.fixture @@ -19,6 +20,25 @@ def test_hand_specified_quadrature(mesh): assert not np.allclose(a_q0.dat.data, a_q2.dat.data) +def test_hand_specified_max_quadrature(): + mesh = UnitIntervalMesh(1) + + x, = SpatialCoordinate(mesh) + a = (x**4)*dx + + # These should be the same because we only need degree=4 for exact integration. + x4 = assemble(a) + x4_maxquad5 = assemble(a, form_compiler_parameters={"max_quadrature_degree": 5}) + + assert np.isclose(x4, x4_maxquad5) + + # These should be the same because degree=2 will limit the quadrature + x4_quad2 = assemble(a, form_compiler_parameters={"quadrature_degree": 2}) + x4_maxquad2 = assemble(a, form_compiler_parameters={"max_quadrature_degree": 2}) + + assert np.isclose(x4_quad2, x4_maxquad2) + + @pytest.mark.parametrize("diagonal", [False, True]) @pytest.mark.parametrize("mat_type", ["matfree", "aij"]) @pytest.mark.parametrize("family", ["Quadrature", "Boundary Quadrature"]) diff --git a/tests/firedrake/regression/test_real_space.py b/tests/firedrake/regression/test_real_space.py index 74f4015c66..96839569bd 100644 --- a/tests/firedrake/regression/test_real_space.py +++ b/tests/firedrake/regression/test_real_space.py @@ -369,6 +369,53 @@ def test_real_interpolate(): assert np.allclose(float(a_int), 1.0) +@pytest.mark.skipcomplex +def test_real_interior_facet(): + mesh = UnitSquareMesh(2, 2) + K = FunctionSpace(mesh, "DG", 1) + R = FunctionSpace(mesh, "Real", 0) + + x, y = SpatialCoordinate(mesh) + + q = Function(K).interpolate(x*y) + v = TestFunction(K) + c1 = Function(R).assign(1.0) + c2 = Constant(1.0) + + F1 = avg(c1) * jump(v) * dS + F2 = c2 * jump(v) * dS + assert np.allclose(assemble(F1).dat.data_ro, assemble(F2).dat.data_ro) + + F1 = jump(c1*q) * jump(v) * dS + F2 = jump(c2*q) * jump(v) * dS + assert np.allclose(assemble(F1).dat.data_ro, assemble(F2).dat.data_ro) + + +@pytest.mark.skipcomplex +def test_real_interior_facet_reassembly(): + """Reassembling with an updated Real coefficient in a dS integral + must reflect the new value, even when parloops are cached.""" + from firedrake.assemble import get_assembler + + mesh = UnitSquareMesh(2, 2) + K = FunctionSpace(mesh, "DG", 0) + R = FunctionSpace(mesh, "Real", 0) + + v = TestFunction(K) + c = Function(R).assign(1.0) + + F = c * jump(v) * dS + + assembler = get_assembler(F) + r1 = assembler.assemble() + data1 = r1.dat.data_ro.copy() + + c.assign(2.0) + r2 = assembler.assemble() + + assert np.allclose(r2.dat.data_ro, 2.0 * data1) + + def test_real_space_hex(): mesh = BoxMesh(2, 1, 1, 2., 1., 1., hexahedral=True) DG = FunctionSpace(mesh, "DQ", 0) diff --git a/tests/firedrake/regression/test_restricted_function_space.py b/tests/firedrake/regression/test_restricted_function_space.py index 7ba97ef24a..021fa40e58 100644 --- a/tests/firedrake/regression/test_restricted_function_space.py +++ b/tests/firedrake/regression/test_restricted_function_space.py @@ -55,6 +55,32 @@ def test_restricted_function_space_j_j_square(j): compare_function_space_assembly(V, V_res, [bc]) +@pytest.mark.parallel([1, 2]) +@pytest.mark.parametrize("into_restricted", (True, False), ids=("into-restricted", "from-restricted")) +def test_restrict_assign(into_restricted): + mesh = UnitSquareMesh(3, 3) + + V = VectorFunctionSpace(mesh, "RT", 1) + Vres = RestrictedFunctionSpace(V, ("on_boundary",)) + + u = Function(V) + ures = Function(Vres) + + if into_restricted: + source = u + target = ures + else: + source = ures + target = u + + size = source.dat.data_ro.size + shape = source.dat.data_ro.shape + source.dat.data_wo[...] = np.arange(size).reshape(shape) + + target.assign(source) + assert errornorm(ures, u) < 1E-13 + + def test_poisson_homogeneous_bcs(): mesh = UnitSquareMesh(1, 1) V = FunctionSpace(mesh, "CG", 2) diff --git a/tests/firedrake/regression/test_solving_interface.py b/tests/firedrake/regression/test_solving_interface.py index 9ceb69f94f..d05105d9a4 100644 --- a/tests/firedrake/regression/test_solving_interface.py +++ b/tests/firedrake/regression/test_solving_interface.py @@ -1,4 +1,5 @@ import pytest +import numpy as np from firedrake import * from firedrake.petsc import PETSc from numpy.linalg import norm as np_norm @@ -345,7 +346,7 @@ def test_solve_pre_apply_bcs(mesh, mixed): # Hyperelastic energy functional lam = Constant(1E3) - dim = mesh.geometric_dimension() + dim = mesh.geometric_dimension F = grad(u) + Identity(dim) J = det(F) logJ = 0.5*ln(J**2) diff --git a/tests/firedrake/regression/test_star_pc.py b/tests/firedrake/regression/test_star_pc.py index c78455f9e5..d6e1383163 100644 --- a/tests/firedrake/regression/test_star_pc.py +++ b/tests/firedrake/regression/test_star_pc.py @@ -371,7 +371,7 @@ def base(request): @pytest.mark.parametrize("periodic", (False, True), ids=("extruded", "extruded-periodic")) def test_asm_extruded_star(base, periodic, family, degree): mesh = ExtrudedMesh(base, 5, periodic=periodic) - if mesh.topological_dimension() == 2: + if mesh.topological_dimension == 2: family = family.replace("N", "RT") V = FunctionSpace(mesh, family, degree) space = V.ufl_element().sobolev_space diff --git a/tests/firedrake/regression/test_tensor_algebra.py b/tests/firedrake/regression/test_tensor_algebra.py index 61871d4ef2..3306c6252a 100644 --- a/tests/firedrake/regression/test_tensor_algebra.py +++ b/tests/firedrake/regression/test_tensor_algebra.py @@ -34,8 +34,8 @@ def mesh(request): "-2*mu_s*inner(grad(u), outer(conj(v), n)) * ds")], ids=lambda x: x[0]) def form_expect(request, mesh): - dim = mesh.geometric_dimension() - if mesh.ufl_cell().cellname() == "quadrilateral": + dim = mesh.geometric_dimension + if mesh.ufl_cell().cellname == "quadrilateral": V = FunctionSpace(mesh, "RTCF", 1) else: V = FunctionSpace(mesh, "RT", 1) diff --git a/tests/firedrake/regression/test_tensor_elements.py b/tests/firedrake/regression/test_tensor_elements.py index 6c0a6f99ff..27cb7b5a0b 100644 --- a/tests/firedrake/regression/test_tensor_elements.py +++ b/tests/firedrake/regression/test_tensor_elements.py @@ -35,13 +35,13 @@ def test_tensor_continuity(mesh, family, degree): if space == HDivDiv: utrace = dot(n, dot(u, n)) elif space == HEin: - if mesh.topological_dimension() == 2: + if mesh.topological_dimension == 2: t = perp(n) else: t = as_matrix([[0, n[2], -n[1]], [-n[2], 0, n[0]], [n[1], -n[0], 0]]) utrace = dot(t, dot(u, t)) else: - if mesh.topological_dimension() == 2: + if mesh.topological_dimension == 2: t = perp(n) utrace = dot(t, dot(u, n)) else: diff --git a/tests/firedrake/slate/test_assemble_tensors.py b/tests/firedrake/slate/test_assemble_tensors.py index c35d43e27e..40830118d1 100644 --- a/tests/firedrake/slate/test_assemble_tensors.py +++ b/tests/firedrake/slate/test_assemble_tensors.py @@ -45,7 +45,7 @@ def f(function_space): if fs_i.rank == 1: fi.interpolate(as_vector((x[0]*x[1],) * fs_i.value_size)) elif fs_i.rank == 2: - fi.interpolate(as_tensor([[x[0]*x[1] for i in range(fs_i.mesh().geometric_dimension())] + fi.interpolate(as_tensor([[x[0]*x[1] for i in range(fs_i.mesh().geometric_dimension)] for j in range(fs_i.rank)])) else: fi.interpolate(x[0]*x[1]) @@ -66,7 +66,7 @@ def g(function_space): if fs_i.rank == 1: gi.interpolate(as_vector((x[0]*sin(x[1]),) * fs_i.value_size)) elif fs_i.rank == 2: - gi.interpolate(as_tensor([[x[0]*sin(x[1]) for i in range(fs_i.mesh().geometric_dimension())] + gi.interpolate(as_tensor([[x[0]*sin(x[1]) for i in range(fs_i.mesh().geometric_dimension)] for j in range(fs_i.rank)])) else: gi.interpolate(x[0]*sin(x[1])) diff --git a/tests/firedrake/submesh/test_submesh_assemble.py b/tests/firedrake/submesh/test_submesh_assemble.py new file mode 100644 index 0000000000..af317a8ec9 --- /dev/null +++ b/tests/firedrake/submesh/test_submesh_assemble.py @@ -0,0 +1,569 @@ +import os +import pytest +import numpy as np +from firedrake import * +from firedrake.cython import dmcommon +from petsc4py import PETSc + + +cwd = os.path.abspath(os.path.dirname(__file__)) + + +def test_submesh_assemble_cell_cell_integral_cell(): + dim = 2 + mesh = RectangleMesh(2, 1, 2., 1., quadrilateral=True) + x, y = SpatialCoordinate(mesh) + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(conditional(x > 1., 1, 0)) + mesh.mark_entities(indicator_function, 999) + subm = Submesh(mesh, dim, 999) + V0 = FunctionSpace(mesh, "CG", 1) + V1 = FunctionSpace(subm, "CG", 1) + V = V0 * V1 + u = TrialFunction(V) + v = TestFunction(V) + u0, u1 = split(u) + v0, v1 = split(v) + dx0 = Measure("dx", domain=mesh, intersect_measures=(Measure("dx", subm),)) + dx1 = Measure("dx", domain=subm, intersect_measures=(Measure("dx", mesh),)) + a = inner(u1, v0) * dx0(999) + inner(u0, v1) * dx1 + A = assemble(a, mat_type="nest") + assert np.allclose(A.M.sparsity[0][0].nnz, [1, 1, 1, 1, 1, 1]) # bc nodes + assert np.allclose(A.M.sparsity[0][1].nnz, [4, 4, 4, 4, 0, 0]) + assert np.allclose(A.M.sparsity[1][0].nnz, [4, 4, 4, 4]) + assert np.allclose(A.M.sparsity[1][1].nnz, [1, 1, 1, 1]) # bc nodes + M10 = np.array([[1./9. , 1./18., 1./36., 1./18., 0., 0.], # noqa + [1./18., 1./9. , 1./18., 1./36., 0., 0.], # noqa + [1./36., 1./18., 1./9. , 1./18., 0., 0.], # noqa + [1./18., 1./36., 1./18., 1./9. , 0., 0.]]) # noqa + assert np.allclose(A.M[0][1].values, np.transpose(M10)) + assert np.allclose(A.M[1][0].values, M10) + + +def test_submesh_assemble_cell_cell_integral_facet(): + dim = 2 + mesh = RectangleMesh(2, 1, 2., 1., quadrilateral=True) + x, y = SpatialCoordinate(mesh) + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(conditional(x > 1., 1, 0)) + mesh.mark_entities(indicator_function, 999) + subm = Submesh(mesh, dim, 999) + V0 = FunctionSpace(mesh, "DQ", 1, variant="equispaced") + V1 = FunctionSpace(subm, "DQ", 1, variant="equispaced") + V = V0 * V1 + u = TrialFunction(V) + v = TestFunction(V) + u0, u1 = split(u) + v0, v1 = split(v) + dS0 = Measure("dS", domain=mesh, intersect_measures=(Measure("ds", subm),)) + ds1 = Measure("ds", domain=subm, intersect_measures=(Measure("dS", mesh),)) + a = inner(u1, v0('+')) * dS0 + inner(u0('+'), v1) * ds1(5) + A = assemble(a, mat_type="nest") + assert np.allclose(A.M.sparsity[0][0].nnz, [1, 1, 1, 1, 1, 1, 1, 1]) # bc nodes + assert np.allclose(A.M.sparsity[0][1].nnz, [4, 4, 4, 4, 4, 4, 4, 4]) + assert np.allclose(A.M.sparsity[1][0].nnz, [8, 8, 8, 8]) + assert np.allclose(A.M.sparsity[1][1].nnz, [1, 1, 1, 1]) # bc nodes + + M10 = [[0, 0, 0, 0, 0, 0, 0, 0], # noqa + [0, 0, 0, 0, 1/3, 0, 1/6, 0], + [0, 0, 0, 0, 0, 0, 0, 0], # noqa + [0, 0, 0, 0, 1/6, 0, 1/3, 0]] + assert np.allclose(A.M[0][1].values, np.transpose(M10)) + assert np.allclose(A.M[1][0].values, M10) + + b = inner(u1, v0('+')) * ds1(5) + inner(u0('+'), v1) * dS0 + B = assemble(b, mat_type="nest") + assert np.allclose(B.M.sparsity[0][0].nnz, [1, 1, 1, 1, 1, 1, 1, 1]) # bc nodes + assert np.allclose(B.M.sparsity[0][1].nnz, [4, 4, 4, 4, 4, 4, 4, 4]) + assert np.allclose(B.M.sparsity[1][0].nnz, [8, 8, 8, 8]) + assert np.allclose(B.M.sparsity[1][1].nnz, [1, 1, 1, 1]) # bc nodes + assert np.allclose(B.M[0][1].values, A.M[0][1].values) + assert np.allclose(B.M[1][0].values, A.M[1][0].values) + + +def test_submesh_assemble_cell_cell_cell_cell_integral_various(): + # +-------+-------+-------+-------+ + # | | | | | + # | | 555 | | mesh + # | | | | | + # +-------+-------+-------+-------+ + # +-------+-------+ + # | | | + # | | 555 mesh_l + # | | | + # +-------+-------+ + # +-------+-------+ + # | | | + # 555 | | mesh_r + # | | | + # +-------+-------+ + # +-------+ + # | | + # 555 | mesh_rl + # | | + # +-------+ + dim = 2 + mesh = RectangleMesh(4, 1, 4., 1., quadrilateral=True) + x, y = SpatialCoordinate(mesh) + label_int = 555 + label_l = 81100 + label_r = 80011 + label_rl = 80010 + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + DG0 = FunctionSpace(mesh, "DG", 0) + f_int = Function(HDivTrace0).interpolate(conditional(And(x > 1.9, x < 2.1), 1, 0)) + f_l = Function(DG0).interpolate(conditional(x < 2., 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 2., 1, 0)) + f_rl = Function(DG0).interpolate(conditional(And(x > 2., x < 3.), 1, 0)) + mesh = RelabeledMesh(mesh, [f_int, f_l, f_r, f_rl], [label_int, label_l, label_r, label_rl]) + x, y = SpatialCoordinate(mesh) + mesh_l = Submesh(mesh, dim, label_l) + mesh_r = Submesh(mesh, dim, label_r) + mesh_rl = Submesh(mesh_r, dim, label_rl) + dS = Measure( + "dS", domain=mesh, + intersect_measures=( + Measure("ds", mesh_l), + Measure("ds", mesh_r), + Measure("ds", mesh_rl), + ) + ) + ds_l = Measure( + "ds", domain=mesh_l, + intersect_measures=( + Measure("dS", mesh), + Measure("ds", mesh_r), + Measure("ds", mesh_rl), + ) + ) + ds_r = Measure( + "ds", domain=mesh_r, + intersect_measures=( + Measure("dS", mesh), + Measure("ds", mesh_l), + Measure("ds", mesh_rl), + ) + ) + ds_rl = Measure( + "ds", domain=mesh_rl, + intersect_measures=( + Measure("dS", mesh), + Measure("ds", mesh_l), + Measure("ds", mesh_r), + ) + ) + n_l = FacetNormal(mesh_l) + n_rl = FacetNormal(mesh_rl) + assert assemble(dot(n_rl + n_l, n_rl + n_l) * ds_rl(label_int)) < 1.e-32 + assert assemble(dot(n_rl + n_l, n_rl + n_l) * ds_r(label_int)) < 1.e-32 + assert assemble(dot(n_rl + n_l, n_rl + n_l) * ds_l(label_int)) < 1.e-32 + assert assemble(dot(n_rl + n_l, n_rl + n_l) * dS(label_int)) < 1.e-32 + V_l = FunctionSpace(mesh_l, "DQ", 1, variant='equispaced') + V_rl = FunctionSpace(mesh_rl, "DQ", 1, variant='equispaced') + V = V_l * V_rl + u_l, u_rl = TrialFunctions(V) + v_l, v_rl = TestFunctions(V) + a = inner(u_rl, v_l) * ds_l(label_int) + inner(u_l, v_rl) * ds_rl(label_int) + A = assemble(a, mat_type="nest") + assert np.allclose(A.M.sparsity[0][0].nnz, [1, 1, 1, 1, 1, 1, 1, 1]) # bc nodes + assert np.allclose(A.M.sparsity[0][1].nnz, [4, 4, 4, 4, 0, 0, 0, 0]) + assert np.allclose(A.M.sparsity[1][0].nnz, [4, 4, 4, 4]) + assert np.allclose(A.M.sparsity[1][1].nnz, [1, 1, 1, 1]) # bc nodes + + M10 = [[ 0, 0, 0, 0, 0, 0, 0, 0], # noqa + [1/3, 0, 1/6, 0, 0, 0, 0, 0], + [ 0, 0, 0, 0, 0, 0, 0, 0], # noqa + [1/6, 0, 1/3, 0, 0, 0, 0, 0]] + assert np.allclose(A.M[0][1].values, np.transpose(M10)) + assert np.allclose(A.M[1][0].values, M10) + + b = inner(u_rl, v_l) * dS(label_int) + inner(u_l, v_rl) * dS(label_int) + B = assemble(b, mat_type="nest") + assert np.allclose(B.M.sparsity[0][0].nnz, [1, 1, 1, 1, 1, 1, 1, 1]) # bc nodes + assert np.allclose(B.M.sparsity[0][1].nnz, [4, 4, 4, 4, 0, 0, 0, 0]) + assert np.allclose(B.M.sparsity[1][0].nnz, [4, 4, 4, 4]) + assert np.allclose(B.M.sparsity[1][1].nnz, [1, 1, 1, 1]) # bc nodes + assert np.allclose(B.M[0][1].values, A.M[0][1].values) + assert np.allclose(B.M[1][0].values, A.M[1][0].values) + + +def test_submesh_assemble_cell_cell_cell_cell_integral_avg(): + # +-------+-------+-------+-------+ + # | | | | | + # | | 555 | | mesh + # | | | | | + # +-------+-------+-------+-------+ + # +-------+-------+-------+ + # | | | | + # | | 555 | mesh_l + # | | | | + # +-------+-------+-------+ + # +-------+-------+ + # | | | + # 555 | | mesh_r + # | | | + # +-------+-------+ + # +-------+ + # | | + # 555 | mesh_rl + # | | + # +-------+ + dim = 2 + mesh = RectangleMesh(4, 1, 4., 1., quadrilateral=True) + x, y = SpatialCoordinate(mesh) + label_int = 555 + label_l = 81110 + label_r = 80011 + label_rl = 80010 + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + DG0 = FunctionSpace(mesh, "DG", 0) + f_int = Function(HDivTrace0).interpolate(conditional(And(x > 1.9, x < 2.1), 1, 0)) + f_l = Function(DG0).interpolate(conditional(x < 3., 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 2., 1, 0)) + f_rl = Function(DG0).interpolate(conditional(And(x > 2., x < 3.), 1, 0)) + mesh = RelabeledMesh(mesh, [f_int, f_l, f_r, f_rl], [label_int, label_l, label_r, label_rl]) + x, y = SpatialCoordinate(mesh) + mesh_l = Submesh(mesh, dim, label_l) + x_l, y_l = SpatialCoordinate(mesh_l) + mesh_r = Submesh(mesh, dim, label_r) + x_r, y_r = SpatialCoordinate(mesh_r) + mesh_rl = Submesh(mesh_r, dim, label_rl) + x_rl, y_rl = SpatialCoordinate(mesh_rl) + dx = Measure( + "dx", domain=mesh, + intersect_measures=( + Measure("dx", mesh_l), + Measure("dx", mesh_r), + Measure("dx", mesh_rl), + ) + ) + dx_l = Measure( + "dx", domain=mesh_l, + intersect_measures=( + Measure("dx", mesh), + Measure("dx", mesh_r), + Measure("dx", mesh_rl), + ) + ) + dx_rl = Measure( + "dx", domain=mesh_rl, + intersect_measures=( + Measure("dx", mesh), + Measure("dx", mesh_l), + Measure("dx", mesh_r), + ) + ) + dS = Measure( + "dS", domain=mesh, + intersect_measures=( + Measure("dS", mesh_l), + Measure("ds", mesh_r), + Measure("ds", mesh_rl), + ) + ) + dS_l = Measure( + "dS", domain=mesh_l, + intersect_measures=( + Measure("dS", mesh), + Measure("ds", mesh_r), + Measure("ds", mesh_rl), + ) + ) + ds_rl = Measure( + "ds", domain=mesh_rl, + intersect_measures=( + Measure("dS", mesh), + Measure("dS", mesh_l), + Measure("ds", mesh_r), + ) + ) + assert abs(assemble(cell_avg(x) * dx(label_rl)) - 2.5) < 5.e-16 + assert abs(assemble(cell_avg(x) * dx_rl) - 2.5) < 5.e-16 + assert abs(assemble(cell_avg(x_rl) * dx(label_rl)) - 2.5) < 5.e-16 + assert abs(assemble(cell_avg(x_rl) * dx_l(label_rl)) - 2.5) < 5.e-16 + assert abs(assemble(cell_avg(x_l) * dx_rl) - 2.5) < 5.e-16 + assert abs(assemble(facet_avg(y * y) * dS(label_int)) - 1. / 3.) < 5.e-16 + assert abs(assemble(facet_avg(y('+') * y('-')) * ds_rl(label_int)) - 1. / 3.) < 5.e-16 + assert abs(assemble(facet_avg(y_rl * y_rl) * dS(label_int)) - 1. / 3.) < 5.e-16 + assert abs(assemble(facet_avg(y_rl * y_rl) * dS_l(label_int)) - 1. / 3.) < 5.e-16 + assert abs(assemble(facet_avg(y_l('+') * y_l('-')) * ds_rl(label_int)) - 1. / 3.) < 5.e-16 + + +def test_submesh_assemble_cell_cell_equation_bc(): + dim = 2 + mesh = RectangleMesh(2, 1, 2., 1., quadrilateral=True) + x, y = SpatialCoordinate(mesh) + label_int = 555 + label_l = 810 + label_r = 801 + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + DQ0 = FunctionSpace(mesh, "DQ", 0) + f_int = Function(HDivTrace0).interpolate(conditional(And(x > 0.9, x < 1.1), 1, 0)) + f_l = Function(DQ0).interpolate(conditional(x < 1., 1, 0)) + f_r = Function(DQ0).interpolate(conditional(x > 1., 1, 0)) + mesh = RelabeledMesh(mesh, [f_int, f_l, f_r], [label_int, label_l, label_r]) + mesh_l = Submesh(mesh, dim, label_l) + mesh_r = Submesh(mesh, dim, label_r) + V_l = FunctionSpace(mesh_l, "CG", 1) + V_r = FunctionSpace(mesh_r, "CG", 1) + V = V_l * V_r + u = TrialFunction(V) + v = TestFunction(V) + u_l, u_r = split(u) + v_l, v_r = split(v) + dx_l = Measure("dx", domain=mesh_l) + ds_l = Measure("ds", domain=mesh_l, intersect_measures=(Measure("ds", mesh_r),)) + a = inner(u_l, v_l) * dx_l + a_int = inner(u_l - u_r, v_l) * ds_l(label_int) + L_int = inner(Constant(0), v_l) * ds_l(label_int) + sol = Function(V) + bc = EquationBC(a_int == L_int, sol, label_int, V=V.sub(0)) + A = assemble(a, bcs=bc.extract_form('J'), mat_type="nest") + assert np.allclose(Function(V_l).interpolate(SpatialCoordinate(mesh_l)[0]).dat.data, [0., 1., 1., 0.]) + assert np.allclose(Function(V_l).interpolate(SpatialCoordinate(mesh_l)[1]).dat.data, [0., 0., 1., 1.]) + assert np.allclose(Function(V_r).interpolate(SpatialCoordinate(mesh_r)[0]).dat.data, [1., 2., 2., 1.]) + assert np.allclose(Function(V_r).interpolate(SpatialCoordinate(mesh_r)[1]).dat.data, [0., 0., 1., 1.]) + assert np.allclose(A.M.sparsity[0][0].nnz, [4, 4, 4, 4]) + assert np.allclose(A.M.sparsity[0][1].nnz, [4, 4, 4, 4]) + assert np.allclose(A.M.sparsity[1][0].nnz, [0, 0, 0, 0]) + assert np.allclose(A.M.sparsity[1][1].nnz, [1, 1, 1, 1]) # bc nodes + + + M00 = np.array([[ 1/9, 1/18, 1/36, 1/18], # noqa + [ 0, 1/3, 1/6, 0], # noqa + [ 0, 1/6, 1/3, 0], # noqa + [1/18, 1/36, 1/18, 1/9]]) # noqa + M01 = np.array([[ 0, 0, 0, 0], # noqa + [-1/3, 0, 0, -1/6], # noqa + [-1/6, 0, 0, -1/3], # noqa + [ 0, 0, 0, 0]]) # noqa + assert np.allclose(A.M[0][0].values, M00) + assert np.allclose(A.M[0][1].values, M01) + + +def test_submesh_assemble_cell_facet_integral_various(): + # CG1 DoF numbers (nprocs = 1): + # + # 5-------1-------2 + # | | | + # | | | mesh + # | | | + # 4-------0-------3 + # + # 0 + # | + # | subm + # | + # 1 + # + distribution_parameters = { + "overlap_type": (DistributedMeshOverlapType.RIDGE, 1), + } + subdomain_id = 777 + mesh = RectangleMesh(2, 1, 2., 1., quadrilateral=True, distribution_parameters=distribution_parameters) + x, y = SpatialCoordinate(mesh) + V1 = FunctionSpace(mesh, "HDiv Trace", 0) + f1 = Function(V1).interpolate(conditional(And(x > 0.9, x < 1.1), 1., 0.)) + mesh = RelabeledMesh(mesh, [f1], [subdomain_id]) + x, y = SpatialCoordinate(mesh) + subm = Submesh(mesh, mesh.topological_dimension - 1, subdomain_id) + subx, suby = SpatialCoordinate(subm) + V0 = FunctionSpace(mesh, "CG", 1) + V1 = FunctionSpace(subm, "CG", 1) + V = V0 * V1 + u = TrialFunction(V) + v = TestFunction(V) + u0, u1 = split(u) + v0, v1 = split(v) + coordV0 = VectorFunctionSpace(mesh, "CG", 1) + coordV1 = VectorFunctionSpace(subm, "CG", 1) + coordV = coordV0 * coordV1 + coords = Function(coordV) + coords.sub(0).assign(mesh.coordinates) + coords.sub(1).assign(subm.coordinates) + coords0, coords1 = split(coords) + + M10 = np.array([[1/3, 0, 0, 1/6, 0, 0], + [1/6, 0, 0, 1/3, 0, 0]]) + M10w = np.array([[1/12, 0, 0, 1/12, 0, 0], + [1/12, 0, 0, 1/4, 0, 0]]) # noqa + M10ww = np.array([[1/30, 0, 0, 1/20, 0, 0], + [1/20, 0, 0, 1/5, 0, 0]]) # noqa + # Use subm as primal integration domain. + measure = Measure( + "dx", subm, + intersect_measures=( + Measure("dS", mesh), + ), + ) + + a = inner(u0('-'), v1) * measure + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[1][0].values, M10) + + a = inner(u1, v0('+')) * measure + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[0][1].values, np.transpose(M10)) + + a = y * inner(u0('-'), v1) * measure + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[1][0].values, M10w) + + a = y * suby * inner(u0('-'), v1) * measure + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[1][0].values, M10ww) + + a = coords0[1] * inner(u0('-'), v1) * measure + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[1][0].values, M10w) + + a = coords0[1] * coords1[1] * inner(u0('-'), v1) * measure + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[1][0].values, M10ww) + + # Use mesh as primal integration domain. + measure = Measure( + "dS", mesh, + intersect_measures=( + Measure("dx", subm), + ), + ) + a = inner(u0('+'), v1) * measure(subdomain_id) + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[1][0].values, M10) + a = inner(u1, v0('-')) * measure(subdomain_id) + A = assemble(a, mat_type="nest") + assert np.allclose(A.M[0][1].values, np.transpose(M10)) + + +@pytest.mark.parallel([1, 2, 3]) +def test_submesh_assemble_quad_triangle_base(): + dim = 2 + label_ext = 1 + label_interf = 2 + mesh = Mesh(os.path.join(cwd, "..", "meshes", "mixed_cell_unit_square.msh")) + mesh.topology_dm.markBoundaryFaces(dmcommon.FACE_SETS_LABEL, label_ext) + mesh_t = Submesh(mesh, dim, PETSc.DM.PolytopeType.TRIANGLE, label_name="celltype", name="mesh_tri") + x_t, y_t = SpatialCoordinate(mesh_t) + n_t = FacetNormal(mesh_t) + mesh_q = Submesh(mesh, dim, PETSc.DM.PolytopeType.QUADRILATERAL, label_name="celltype", name="mesh_quad") + x_q, y_q = SpatialCoordinate(mesh_q) + n_q = FacetNormal(mesh_q) + # pgfplot(f, "mesh_tri.dat", degree=2) + dx_t = Measure("dx", mesh_t) + dx_q = Measure("dx", mesh_q) + ds_t = Measure("ds", mesh_t, intersect_measures=(Measure("ds", mesh_q),)) + ds_q = Measure("ds", mesh_q, intersect_measures=(Measure("ds", mesh_t),)) + A_t = assemble(Constant(1) * dx_t) + A_q = assemble(Constant(1) * dx_q) + assert abs(A_t + A_q - 1.0) < 1.e-13 + HDiv_t = FunctionSpace(mesh_t, "BDM", 3) + HDiv_q = FunctionSpace(mesh_q, "RTCF", 3) + hdiv_t = Function(HDiv_t).interpolate(as_vector([x_t**2, y_t**2])) + hdiv_q = Function(HDiv_q).project(as_vector([x_q**2, y_q**2]), solver_parameters={"ksp_rtol": 1.e-13}) + v_t = assemble(dot(hdiv_q, as_vector([x_q, y_q])) * ds_t(label_interf)) + v_q = assemble(dot(hdiv_t, as_vector([x_t, y_t])) * ds_q(label_interf)) + assert abs(v_q - v_t) < 1.e-13 + v_t = assemble(dot(hdiv_q, as_vector([x_t, y_t])) * ds_t(label_interf)) + v_q = assemble(dot(hdiv_t, as_vector([x_q, y_q])) * ds_q(label_interf)) + assert abs(v_q - v_t) < 1.e-13 + v_t = assemble(dot(hdiv_q, as_vector([x_q, y_t])) * ds_t(label_interf)) + v_q = assemble(dot(hdiv_t, as_vector([x_t, y_q])) * ds_q(label_interf)) + assert abs(v_q - v_t) < 1.e-13 + v = assemble(inner(n_t, as_vector([888., 999.])) * ds_t(label_interf)) + assert abs(v) < 1.e-13 + v = assemble(inner(n_q, as_vector([888., 999.])) * ds_q(label_interf)) + assert abs(v) < 1.e-13 + v = assemble(inner(n_q, as_vector([888., 999.])) * ds_t(label_interf)) + assert abs(v) < 1.e-13 + v = assemble(inner(n_t, as_vector([888., 999.])) * ds_q(label_interf)) + assert abs(v) < 1.e-13 + v = assemble(dot(n_q + n_t, n_q + n_t) * ds_t(label_interf)) + assert abs(v) < 1.e-30 + v = assemble(dot(n_q + n_t, n_q + n_t) * ds_q(label_interf)) + assert abs(v) < 1.e-30 + + +def test_submesh_assemble_quad_triangle(): + dim = 2 + label_ext = 1 + label_interf = 2 + mesh = Mesh(os.path.join(cwd, "..", "meshes", "mixed_cell_unit_square.msh")) + mesh.topology_dm.markBoundaryFaces(dmcommon.FACE_SETS_LABEL, label_ext) + mesh_t = Submesh(mesh, dim, PETSc.DM.PolytopeType.TRIANGLE, label_name="celltype", name="mesh_tri") + x_t, y_t = SpatialCoordinate(mesh_t) + n_t = FacetNormal(mesh_t) + mesh_q = Submesh(mesh, dim, PETSc.DM.PolytopeType.QUADRILATERAL, label_name="celltype", name="mesh_quad") + x_q, y_q = SpatialCoordinate(mesh_q) + n_q = FacetNormal(mesh_q) + V_t = FunctionSpace(mesh_t, "P", 4) + V_q = FunctionSpace(mesh_q, "Q", 3) + V = V_t * V_q + u = TrialFunction(V) + v = TestFunction(V) + u_t, u_q = split(u) + v_t, v_q = split(v) + ds_t = Measure("ds", mesh_t, intersect_measures=(Measure("ds", mesh_q),)) + ds_q = Measure("ds", mesh_q, intersect_measures=(Measure("ds", mesh_t),)) + # Test against the base cases. + c = x_t**2 * y_t**2 + a = c * inner(u_t, v_q) * ds_t(label_interf) + A = assemble(a) + c_ref = x_q**2 * y_q**2 + a_ref = c_ref * inner(TrialFunction(V_t), TestFunction(V_q)) * ds_t(label_interf) + A_ref = assemble(a_ref) + assert np.allclose(A.M[1][0].values, A_ref.M.values) + c = x_t**2 * y_q**2 + a = c * inner(u_q, v_t) * ds_t(label_interf) + A = assemble(a) + c_ref = x_q**2 * y_t**2 + a_ref = c_ref * inner(TrialFunction(V_q), TestFunction(V_t)) * ds_t(label_interf) + A_ref = assemble(a_ref) + assert np.allclose(A.M[0][1].values, A_ref.M.values) + c = dot(n_t, n_t) + a = c * inner(u_t, v_q) * ds_q(label_interf) + A = assemble(a) + c_ref = dot(n_q, n_q) + a_ref = c_ref * inner(TrialFunction(V_t), TestFunction(V_q)) * ds_q(label_interf) + A_ref = assemble(a_ref) + assert np.allclose(A.M[1][0].values, A_ref.M.values) + c = dot(n_t, n_q) + a = c * inner(u_q, v_t) * ds_q(label_interf) + A = assemble(a) + c_ref = dot(n_q, n_t) + a_ref = c_ref * inner(TrialFunction(V_q), TestFunction(V_t)) * ds_q(label_interf) + A_ref = assemble(a_ref) + assert np.allclose(A.M[0][1].values, A_ref.M.values) + + +@pytest.mark.parallel(3) +def test_assemble_parent_coefficient(): + subdomain_id = 999 + nx = 4 + mesh = UnitSquareMesh(2*nx, nx, quadrilateral=True, reorder=False) + x, y = SpatialCoordinate(mesh) + M = FunctionSpace(mesh, "DG", 0) + marker = Function(M).interpolate(conditional(Or(x > 0.5, y > 0.5), 1, 0)) + mesh = RelabeledMesh(mesh, [marker], [subdomain_id]) + submesh = Submesh(mesh, mesh.topological_dimension, subdomain_id, ignore_halo=True, reorder=False) + + def expr(m): + x = SpatialCoordinate(m) + return 1 + dot(x, x) + + Vsub = FunctionSpace(submesh, "CG", 1) + vsub = TestFunction(Vsub) + usub = TrialFunction(Vsub) + + Q = FunctionSpace(mesh, "DG", 0) + q = Function(Q).interpolate(expr(mesh)) + + subdx = Measure("dx", submesh, intersect_measures=(Measure("dx", mesh),)) + A = assemble(inner(grad(usub) * q, grad(vsub))*subdx) + + Qsub = FunctionSpace(submesh, "DG", 0) + qsub = Function(Qsub).interpolate(expr(submesh)) + A_ref = assemble(inner(grad(usub) * qsub, grad(vsub))*dx) + + A_ref.petscmat.axpy(-1, A.petscmat) + assert np.isclose(A_ref.petscmat.norm(PETSc.NormType.NORM_FROBENIUS), 0) diff --git a/tests/firedrake/submesh/test_submesh_assign.py b/tests/firedrake/submesh/test_submesh_assign.py new file mode 100644 index 0000000000..12adf956ce --- /dev/null +++ b/tests/firedrake/submesh/test_submesh_assign.py @@ -0,0 +1,309 @@ +import pytest +import numpy as np +from firedrake import * +import finat +from os.path import abspath, dirname, join + + +cwd = abspath(dirname(__file__)) + + +@pytest.mark.parallel(nprocs=2) +def test_submesh_assign_function_3_quads_2_processes(): + # mesh + # rank 0: + # 4---12----6---15---(8)-(18)-(10) + # | | | | + # 11 0 13 1 (17) (2) (19) + # | | | | + # 3---14----5---16---(7)-(20)--(9) + # rank 1: + # (7)-(13)---3----9----5 + # | | | + # (12) (1) 8 0 10 + # | | | plex points + # (6)-(14)---2---11----4 () = ghost + left = 111 + right = 222 + middle = 111222 + mesh = RectangleMesh( + 3, 1, 3., 1., quadrilateral=True, distribution_parameters={"partitioner_type": "simple"}, + ) + dim = mesh.topological_dimension + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + f_l = Function(DG0).interpolate(conditional(x < 2.0, 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 1.0, 1, 0)) + f_m = Function(DG0).interpolate(conditional(And(x < 2.0, x > 1.0), 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r, f_m], [left, right, middle]) + mesh_l = Submesh(mesh, dim, left) + mesh_r = Submesh(mesh, dim, right) + V = VectorFunctionSpace(mesh, "CG", 1) + V_l = VectorFunctionSpace(mesh_l, "CG", 1) + V_r = VectorFunctionSpace(mesh_r, "CG", 1) + # Test various combinations. + x = SpatialCoordinate(mesh) + f = Function(V).assign(mesh_l.coordinates, allow_missing_dofs=True) + e = sqrt(assemble(inner(f - x, f - x) * dx(left))) + assert abs(e) < 1.e-15 + x = SpatialCoordinate(mesh) + f = Function(V).assign(mesh_r.coordinates, allow_missing_dofs=True) + e = sqrt(assemble(inner(f - x, f - x) * dx(right))) + assert abs(e) < 1.e-15 + x = SpatialCoordinate(mesh_l) + f = Function(V_l).assign(mesh.coordinates) + e = sqrt(assemble(inner(f - x, f - x) * dx(left))) + assert abs(e) < 1.e-15 + x = SpatialCoordinate(mesh_r) + f = Function(V_r).assign(mesh.coordinates) + e = sqrt(assemble(inner(f - x, f - x) * dx(right))) + assert abs(e) < 1.e-15 + x = SpatialCoordinate(mesh_r) + f = Function(V_r).assign(mesh_l.coordinates, allow_missing_dofs=True) + e = sqrt(assemble(inner(f - x, f - x) * dx(middle))) + assert abs(e) < 1.e-15 + x = SpatialCoordinate(mesh_l) + f = Function(V_l).assign(mesh_r.coordinates, allow_missing_dofs=True) + e = sqrt(assemble(inner(f - x, f - x) * dx(middle))) + assert abs(e) < 1.e-15 + + +@pytest.mark.parallel(nprocs=2) +def test_submesh_assign_function_2_quads_2_processes_no_overlap(): + # mesh + # rank 0: + # 2----6---(4) + # | | + # 5 0 (7) + # | | + # 1----8---(3) + # rank 1: + # 2----6----4 + # | | + # 5 0 7 + # | | plex points + # 1----8----3 () = ghost + left = 111 + right = 222 + distribution_parameters = { + "overlap_type": (DistributedMeshOverlapType.NONE, 0), + "partitioner_type": "simple", + } + mesh = RectangleMesh( + 2, 1, 2., 1., quadrilateral=True, distribution_parameters=distribution_parameters, + ) + dim = mesh.topological_dimension + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + f_l = Function(DG0).interpolate(conditional(x < 1.0, 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 1.0, 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r], [left, right]) + mesh_l = Submesh(mesh, dim, left) + mesh_r = Submesh(mesh, dim, right) + elem = mesh.ufl_coordinate_element() + V = FunctionSpace(mesh, elem) + # Test various combinations. + x = SpatialCoordinate(mesh) + f = Function(V).assign(mesh_r.coordinates, allow_missing_dofs=True) + e = sqrt(assemble(inner(f - x, f - x) * dx(right))) + assert abs(e) < 1.e-15 + x = SpatialCoordinate(mesh) + f = Function(V).assign(mesh_l.coordinates, allow_missing_dofs=True) + e = sqrt(assemble(inner(f - x, f - x) * dx(left))) + assert abs(e) < 1.e-15 + + +@pytest.mark.parallel(nprocs=8) +@pytest.mark.parametrize('simplex', [True, False]) +@pytest.mark.parametrize('distribution_parameters', [None, {"overlap_type": (DistributedMeshOverlapType.NONE, 0)}]) +def test_submesh_assign_function_unstructured_8_processes(simplex, distribution_parameters): + if not simplex and distribution_parameters == {"overlap_type": (DistributedMeshOverlapType.NONE, 0)}: + pytest.skip(reason="quad orientation bug; see https://github.com/firedrakeproject/firedrake/issues/4476") + left = 111 + right = 222 + middle = 111222 + if simplex: + mesh_file = join(cwd, "..", "..", "..", "docs", "notebooks/stokes-control.msh") + mesh = Mesh(mesh_file, distribution_parameters=distribution_parameters) + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DP", 0) + f_l = Function(DG0).interpolate(conditional(x < 15., 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 7., 1, 0)) + f_m = Function(DG0).interpolate(conditional(And(x < 15., x > 7.), 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r, f_m], [left, right, middle]) + elem = finat.ufl.FiniteElement("RT", mesh.ufl_cell(), 2) + else: + mesh = Mesh(join(cwd, "..", "meshes", "unitsquare_unstructured_quadrilaterals.msh"), distribution_parameters=distribution_parameters) + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DQ", 0) + f_l = Function(DG0).interpolate(conditional(x < .75, 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > .50, 1, 0)) + f_m = Function(DG0).interpolate(conditional(And(x < .75, x > .50), 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r, f_m], [left, right, middle]) + elem = finat.ufl.FiniteElement("RTCF", mesh.ufl_cell(), 2) + dim = mesh.topological_dimension + mesh_l = Submesh(mesh, dim, left) + mesh_r = Submesh(mesh, dim, right) + V = FunctionSpace(mesh, elem) + V_l = FunctionSpace(mesh_l, elem) + V_r = FunctionSpace(mesh_r, elem) + f = Function(V).project(mesh.coordinates, solver_parameters={"ksp_rtol": 1.e-16}) + f_l = Function(V_l).project(mesh_l.coordinates, solver_parameters={"ksp_rtol": 1.e-16}) + f_r = Function(V_r).project(mesh_r.coordinates, solver_parameters={"ksp_rtol": 1.e-16}) + A_l = assemble(Constant(1.) * dx(domain=mesh_l)) + A_r = assemble(Constant(1.) * dx(domain=mesh_r)) + A_m = assemble(Constant(1.) * dx(domain=mesh, subdomain_id=middle)) + # Test various combinations. + x = SpatialCoordinate(mesh) + f_ = Function(V).assign(f_l, allow_missing_dofs=True) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(left))) + assert abs(e) / A_l < 1.e-14 + x = SpatialCoordinate(mesh) + f_ = Function(V).assign(f_r, allow_missing_dofs=True) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(right))) + assert abs(e) / A_r < 1.e-14 + x = SpatialCoordinate(mesh_l) + f_ = Function(V_l).assign(f) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(left))) + assert abs(e) / A_l < 1.e-14 + x = SpatialCoordinate(mesh_r) + f_ = Function(V_r).assign(f) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(right))) + assert abs(e) / A_r < 1.e-14 + x = SpatialCoordinate(mesh_l) + f_ = Function(V_l).assign(f_r, allow_missing_dofs=True) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(middle))) + assert abs(e) / A_m < 1.e-14 + x = SpatialCoordinate(mesh_r) + f_ = Function(V_r).assign(f_l, allow_missing_dofs=True) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(middle))) + assert abs(e) / A_m < 1.e-14 + + +@pytest.mark.parallel(nprocs=2) +def test_submesh_assign_function_subset_3_quads_2_processes(): + left = 111 + right = 222 + middle = 111222 + leftleft = 111111 + rightright = 222222 + mesh = RectangleMesh( + 3, 1, 3., 1., quadrilateral=True, distribution_parameters={"partitioner_type": "simple"}, + ) + dim = mesh.topological_dimension + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + f_l = Function(DG0).interpolate(conditional(x < 2.0, 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 1.0, 1, 0)) + f_m = Function(DG0).interpolate(conditional(And(x < 2.0, x > 1.0), 1, 0)) + f_ll = Function(DG0).interpolate(conditional(x < 1.0, 1, 0)) + f_rr = Function(DG0).interpolate(conditional(x > 2.0, 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r, f_m, f_ll, f_rr], [left, right, middle, leftleft, rightright]) + mesh_l = Submesh(mesh, dim, left) + mesh_r = Submesh(mesh, dim, right) + V = VectorFunctionSpace(mesh, "CG", 3) + V_l = VectorFunctionSpace(mesh_l, "CG", 3) + V_r = VectorFunctionSpace(mesh_r, "CG", 3) + f = Function(V).interpolate(SpatialCoordinate(mesh)) + f_l = Function(V_l).interpolate(SpatialCoordinate(mesh_l)) + f_r = Function(V_r).interpolate(SpatialCoordinate(mesh_r)) + # Test assign on the left two cells. + # -- mesh_l -> mesh + subset_indices = np.where(f.dat.data_ro_with_halos[:, 0] < 1.001) + subset = op2.Subset(f.node_set, subset_indices) + x = SpatialCoordinate(mesh) + f_ = Function(V).interpolate(2 * x) + f_.assign(f_l, subset=subset) + e = sqrt(assemble(inner(f_ - 2 * x, f_ - 2 * x) * dx(rightright))) + assert abs(e) < 1.e-14 + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(leftleft))) + assert abs(e) < 1.e-14 + # -- mesh -> mesh_l + subset_indices = np.where(f_l.dat.data_ro_with_halos[:, 0] < 1.001) + subset = op2.Subset(f_l.node_set, subset_indices) + x = SpatialCoordinate(mesh_l) + f_ = Function(V_l).interpolate(2 * x) + f_.assign(f, subset=subset) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(leftleft))) + assert abs(e) < 1.e-14 + # Test assign on the right two cells. + # -- mesh_r -> mesh + subset_indices = np.where(f.dat.data_ro_with_halos[:, 0] > 1.999) + subset = op2.Subset(f.node_set, subset_indices) + x = SpatialCoordinate(mesh) + f_ = Function(V).interpolate(2 * x) + f_.assign(f_r, subset=subset) + e = sqrt(assemble(inner(f_ - 2 * x, f_ - 2 * x) * dx(leftleft))) + assert abs(e) < 1.e-14 + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(rightright))) + assert abs(e) < 1.e-14 + # -- mesh -> mesh_r + subset_indices = np.where(f_r.dat.data_ro_with_halos[:, 0] > 1.999) + subset = op2.Subset(f_r.node_set, subset_indices) + x = SpatialCoordinate(mesh_r) + f_ = Function(V_r).interpolate(2 * x) + f_.assign(f, subset=subset) + e = sqrt(assemble(inner(f_ - x, f_ - x) * dx(rightright))) + assert abs(e) < 1.e-14 + + +@pytest.mark.parallel(nprocs=2) +def test_submesh_assign_cofunction_3_quads_2_processes(): + # mesh + # rank 0: + # 4---12----6---15---(8)-(18)-(10) + # | | | | + # 11 0 13 1 (17) (2) (19) + # | | | | + # 3---14----5---16---(7)-(20)--(9) + # rank 1: + # (7)-(13)---3----9----5 + # | | | + # (12) (1) 8 0 10 + # | | | plex points + # (6)-(14)---2---11----4 () = ghost + left = 111 + right = 222 + middle = 111222 + mesh = RectangleMesh( + 3, 1, 3., 1., quadrilateral=True, distribution_parameters={"partitioner_type": "simple"}, + ) + dim = mesh.topological_dimension + x, _ = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + f_l = Function(DG0).interpolate(conditional(x < 2.0, 1, 0)) + f_r = Function(DG0).interpolate(conditional(x > 1.0, 1, 0)) + f_m = Function(DG0).interpolate(conditional(And(x < 2.0, x > 1.0), 1, 0)) + mesh = RelabeledMesh(mesh, [f_l, f_r, f_m], [left, right, middle]) + mesh_l = Submesh(mesh, dim, left) + mesh_r = Submesh(mesh, dim, right) + V = VectorFunctionSpace(mesh, "CG", 1) + V_l = VectorFunctionSpace(mesh_l, "CG", 1) + V_r = VectorFunctionSpace(mesh_r, "CG", 1) + v = TestFunction(V) + v_l = TestFunction(V_l) + v_r = TestFunction(V_r) + coords = Function(V).interpolate(SpatialCoordinate(mesh)) + coords_l = Function(V_l).interpolate(SpatialCoordinate(mesh_l)) + coords_r = Function(V_r).interpolate(SpatialCoordinate(mesh_r)) + cof = assemble(inner(SpatialCoordinate(mesh), v) * dx) + cof_l = assemble(inner(SpatialCoordinate(mesh_l), v_l) * dx) + cof_r = assemble(inner(SpatialCoordinate(mesh_r), v_r) * dx) + # Test assign on the left two cells. + # -- mesh_l -> mesh + cof_ = Cofunction(V.dual()).assign(cof_l, allow_missing_dofs=True) + subset_indices = np.where(coords.dat.data_ro_with_halos[:, 0] < 1.001) + assert np.allclose(cof_.dat.data_ro_with_halos[subset_indices], cof.dat.data_ro_with_halos[subset_indices]) + # -- mesh -> mesh_l + cof_ = Cofunction(V_l.dual()).assign(cof) + subset_indices = np.where(coords_l.dat.data_ro_with_halos[:, 0] < 1.001) + assert np.allclose(cof_.dat.data_ro_with_halos[subset_indices], cof_l.dat.data_ro_with_halos[subset_indices]) + # Test assign on the right two cells. + # -- mesh_r -> mesh + cof_ = Cofunction(V.dual()).assign(cof_r, allow_missing_dofs=True) + subset_indices = np.where(coords.dat.data_ro_with_halos[:, 0] > 1.999) + assert np.allclose(cof_.dat.data_ro_with_halos[subset_indices], cof.dat.data_ro_with_halos[subset_indices]) + # -- mesh -> mesh_r + cof_ = Cofunction(V_r.dual()).assign(cof) + subset_indices = np.where(coords_r.dat.data_ro_with_halos[:, 0] > 1.999) + assert np.allclose(cof_.dat.data_ro_with_halos[subset_indices], cof_r.dat.data_ro_with_halos[subset_indices]) diff --git a/tests/firedrake/submesh/test_submesh_base.py b/tests/firedrake/submesh/test_submesh_base.py new file mode 100644 index 0000000000..58d379bf5a --- /dev/null +++ b/tests/firedrake/submesh/test_submesh_base.py @@ -0,0 +1,276 @@ +import pytest +import numpy as np +from firedrake import * + + +def _get_expr(m): + if m.geometric_dimension == 1: + x, = SpatialCoordinate(m) + y = x * x + z = x + y + elif m.geometric_dimension == 2: + x, y = SpatialCoordinate(m) + z = x + y + elif m.geometric_dimension == 3: + x, y, z = SpatialCoordinate(m) + else: + raise NotImplementedError("Not implemented") + return exp(x + y * y + z * z * z) + + +def _test_submesh_base_cell_integral_quad(family_degree, nelem): + dim = 2 + family, degree = family_degree + mesh = UnitSquareMesh(nelem, nelem, quadrilateral=True) + V = FunctionSpace(mesh, family, degree) + f = Function(V).interpolate(_get_expr(mesh)) + x, y = SpatialCoordinate(mesh) + cond = conditional(x > .5, 1, + conditional(y > .5, 1, 0)) # noqa: E128 + target = assemble(f * cond * dx) + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(cond) + label_value = 999 + mesh.mark_entities(indicator_function, label_value) + msub = Submesh(mesh, dim, label_value) + Vsub = FunctionSpace(msub, family, degree) + fsub = Function(Vsub).interpolate(_get_expr(msub)) + result = assemble(fsub * dx) + assert abs(result - target) < 1e-12 + + +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_cell_integral_quad_1_process(family_degree, nelem): + _test_submesh_base_cell_integral_quad(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_cell_integral_quad_2_processes(family_degree, nelem): + _test_submesh_base_cell_integral_quad(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=3) +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_cell_integral_quad_3_processes(family_degree, nelem): + _test_submesh_base_cell_integral_quad(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_cell_integral_quad_4_processes(family_degree, nelem): + _test_submesh_base_cell_integral_quad(family_degree, nelem) + + +def _test_submesh_base_facet_integral_quad(family_degree, nelem): + dim = 2 + family, degree = family_degree + mesh = UnitSquareMesh(nelem, nelem, quadrilateral=True) + x, y = SpatialCoordinate(mesh) + cond = conditional(x > .5, 1, + conditional(y > .5, 1, 0)) # noqa: E128 + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(cond) + label_value = 999 + mesh.mark_entities(indicator_function, label_value) + subm = Submesh(mesh, dim, label_value) + for i in [1, 2, 3, 4]: + target = assemble(cond * _get_expr(mesh) * ds(i)) + result = assemble(_get_expr(subm) * ds(i)) + assert abs(result - target) < 2e-12 + # Check new boundary. + assert abs(assemble(Constant(1.) * ds(subdomain_id=5, domain=subm)) - 1.0) < 1e-12 + x, y = SpatialCoordinate(subm) + assert abs(assemble(x**4 * ds(5)) - (.5**5 / 5 + .5**4 * .5)) < 1e-12 + assert abs(assemble(y**4 * ds(5)) - (.5**5 / 5 + .5**4 * .5)) < 1e-12 + + +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_facet_integral_quad_1_process(family_degree, nelem): + _test_submesh_base_facet_integral_quad(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_facet_integral_quad_2_processes(family_degree, nelem): + _test_submesh_base_facet_integral_quad(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=3) +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_facet_integral_quad_3_processes(family_degree, nelem): + _test_submesh_base_facet_integral_quad(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8, 16]) +def test_submesh_base_facet_integral_quad_4_processes(family_degree, nelem): + _test_submesh_base_facet_integral_quad(family_degree, nelem) + + +def _test_submesh_base_cell_integral_hex(family_degree, nelem): + dim = 3 + family, degree = family_degree + mesh = UnitCubeMesh(nelem, nelem, nelem, hexahedral=True) + V = FunctionSpace(mesh, family, degree) + f = Function(V).interpolate(_get_expr(mesh)) + x, y, z = SpatialCoordinate(mesh) + cond = conditional(x > .5, 1, + conditional(y > .5, 1, # noqa: E128 + conditional(z > .5, 1, 0))) # noqa: E128 + target = assemble(f * cond * dx) + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(cond) + label_value = 999 + mesh.mark_entities(indicator_function, label_value) + msub = Submesh(mesh, dim, label_value) + Vsub = FunctionSpace(msub, family, degree) + fsub = Function(Vsub).interpolate(_get_expr(msub)) + result = assemble(fsub * dx) + assert abs(result - target) < 1e-12 + + +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8]) +def test_submesh_base_cell_integral_hex_1_process(family_degree, nelem): + _test_submesh_base_cell_integral_hex(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8]) +def test_submesh_base_cell_integral_hex_2_processes(family_degree, nelem): + _test_submesh_base_cell_integral_hex(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('family_degree', [("Q", 4), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8]) +def test_submesh_base_cell_integral_hex_4_processes(family_degree, nelem): + _test_submesh_base_cell_integral_hex(family_degree, nelem) + + +def _test_submesh_base_facet_integral_hex(family_degree, nelem): + dim = 3 + family, degree = family_degree + mesh = UnitCubeMesh(nelem, nelem, nelem, hexahedral=True) + x, y, z = SpatialCoordinate(mesh) + cond = conditional(x > .5, 1, + conditional(y > .5, 1, # noqa: E128 + conditional(z > .5, 1, 0))) # noqa: E128 + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(cond) + label_value = 999 + mesh.mark_entities(indicator_function, label_value) + subm = Submesh(mesh, dim, label_value) + for i in [1, 2, 3, 4, 5, 6]: + target = assemble(cond * _get_expr(mesh) * ds(i)) + result = assemble(_get_expr(subm) * ds(i)) + assert abs(result - target) < 2e-12 + # Check new boundary. + assert abs(assemble(Constant(1) * ds(subdomain_id=7, domain=subm)) - .75) < 1e-12 + x, y, z = SpatialCoordinate(subm) + assert abs(assemble(x**4 * ds(7)) - (.5**5 / 5 * .5 * 2 + .5**4 * .5**2)) < 1e-12 + assert abs(assemble(y**4 * ds(7)) - (.5**5 / 5 * .5 * 2 + .5**4 * .5**2)) < 1e-12 + assert abs(assemble(z**4 * ds(7)) - (.5**5 / 5 * .5 * 2 + .5**4 * .5**2)) < 1e-12 + + +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8]) +def test_submesh_base_facet_integral_hex_1_process(family_degree, nelem): + _test_submesh_base_facet_integral_hex(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=2) +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8]) +def test_submesh_base_facet_integral_hex_2_processes(family_degree, nelem): + _test_submesh_base_facet_integral_hex(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('family_degree', [("Q", 3), ]) +@pytest.mark.parametrize('nelem', [2, 4, 8]) +def test_submesh_base_facet_integral_hex_4_processes(family_degree, nelem): + _test_submesh_base_facet_integral_hex(family_degree, nelem) + + +@pytest.mark.parallel(nprocs=2) +def test_submesh_base_entity_maps(): + + # 3---9--(5)-(12)(7) (7)-(13)-3---9---5 + # | | | | | | + # 8 0 (11) (1) (13) (12) (1) 8 0 10 mesh + # | | | | | | + # 2--10--(4)(14)-(6) (6)-(14)-2--11---4 + # + # 2---6---4 (4)-(7)-(2) + # | | | | + # 5 0 8 (6) (0) (5) submesh + # | | | | + # 1---7---3 (3)-(8)-(1) + # + # rank 0 rank 1 + + dim = 2 + mesh = RectangleMesh(2, 1, 2., 1., quadrilateral=True, distribution_parameters={"partitioner_type": "simple"}) + assert mesh.comm.size == 2 + rank = mesh.comm.rank + x, y = SpatialCoordinate(mesh) + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(conditional(x < 1., 1, 0)) + label_value = 999 + mesh.mark_entities(indicator_function, label_value) + submesh = Submesh(mesh, dim, label_value) + submesh.topology_dm.viewFromOptions("-dm_view") + subdm = submesh.topology.topology_dm + if rank == 0: + assert subdm.getLabel("pyop2_core").getStratumSize(1) == 0 + assert subdm.getLabel("pyop2_owned").getStratumSize(1) == 9 + assert subdm.getLabel("pyop2_ghost").getStratumSize(1) == 0 + assert (subdm.getLabel("pyop2_owned").getStratumIS(1).getIndices() == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])).all() + assert (mesh.interior_facets.facets == np.array([11])).all + assert (mesh.exterior_facets.facets == np.array([8, 9, 10, 12, 13, 14])).all + assert (submesh.interior_facets.facets == np.array([])).all + assert (submesh.exterior_facets.facets == np.array([5, 8, 6, 7])).all() + else: + assert subdm.getLabel("pyop2_core").getStratumSize(1) == 0 + assert subdm.getLabel("pyop2_owned").getStratumSize(1) == 0 + assert subdm.getLabel("pyop2_ghost").getStratumSize(1) == 9 + assert (subdm.getLabel("pyop2_ghost").getStratumIS(1).getIndices() == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])).all() + assert (mesh.interior_facets.facets == np.array([8])).all + assert (mesh.exterior_facets.facets == np.array([9, 10, 11, 12, 13, 14])).all + assert (submesh.interior_facets.facets == np.array([])).all + assert (submesh.exterior_facets.facets == np.array([6, 5, 7, 8])).all() + composed_map, integral_type = mesh.topology.trans_mesh_entity_map(submesh.topology, "cell", "everywhere", None) + assert integral_type == "cell" + if rank == 0: + assert (composed_map.maps_[0].values_with_halo == np.array([0])).all() + else: + assert (composed_map.maps_[0].values_with_halo == np.array([1])).all() + composed_map, integral_type = mesh.topology.trans_mesh_entity_map(submesh.topology, "exterior_facet", 5, None) + assert integral_type == "interior_facet" + if rank == 0: + assert (composed_map.maps_[0].values_with_halo == np.array([-1, 0, -1, -1]).reshape((-1, 1))).all() # entire exterior-interior map + else: + assert (composed_map.maps_[0].values_with_halo == np.array([-1, 0, -1, -1]).reshape((-1, 1))).all() # entire exterior-interior map + composed_map, integral_type = mesh.topology.trans_mesh_entity_map(submesh.topology, "exterior_facet", 4, None) + assert integral_type == "exterior_facet" + if rank == 0: + assert (composed_map.maps_[0].values_with_halo == np.array([0, -1, 1, 2]).reshape((-1, 1))).all() # entire exterior-exterior map + else: + assert (composed_map.maps_[0].values_with_halo == np.array([3, -1, 4, 5]).reshape((-1, 1))).all() # entire exterior-exterior map + composed_map, integral_type = submesh.topology.trans_mesh_entity_map(mesh.topology, "exterior_facet", 1, None) + assert integral_type == "exterior_facet" + if rank == 0: + assert (composed_map.maps_[0].values_with_halo == np.array([0, 2, 3, -1, -1, -1]).reshape((-1, 1))).all() + else: + assert (composed_map.maps_[0].values_with_halo == np.array([-1, -1, -1, 0, 2, 3]).reshape((-1, 1))).all() diff --git a/tests/firedrake/submesh/test_submesh_basics.py b/tests/firedrake/submesh/test_submesh_basics.py index 480105834f..ccd2500e88 100644 --- a/tests/firedrake/submesh/test_submesh_basics.py +++ b/tests/firedrake/submesh/test_submesh_basics.py @@ -11,6 +11,6 @@ def test_submesh_parent(): cell_marker = 100 parent = RelabeledMesh(mesh, [m], [cell_marker]) - submesh = Submesh(parent, parent.topological_dimension(), cell_marker) + submesh = Submesh(parent, parent.topological_dimension, cell_marker) assert submesh.topology.submesh_parent is parent.topology assert submesh.submesh_parent is parent diff --git a/tests/firedrake/submesh/test_submesh_comm.py b/tests/firedrake/submesh/test_submesh_comm.py new file mode 100644 index 0000000000..0a442179a9 --- /dev/null +++ b/tests/firedrake/submesh/test_submesh_comm.py @@ -0,0 +1,96 @@ +import pytest +import numpy as np +from firedrake import * +from firedrake.petsc import PETSc + + +def assert_local_equality(A, Asub, V, Vsub): + u = Function(V) + u.dat.data[:] = np.arange(*V.dof_dset.layout_vec.getOwnershipRange()) + usub = Function(Vsub).assign(u) + indices = usub.dat.data_ro.astype(PETSc.IntType) + rmap = PETSc.LGMap().create(indices, comm=A.getComm()) + + B = PETSc.Mat().create(comm=A.getComm()) + B.setSizes(A.getSizes()) + B.setType(PETSc.Mat.Type.IS) + B.setLGMap(rmap, rmap) + B.setISLocalMat(Asub) + B.setUp() + B.assemble() + D = PETSc.Mat() + B.convert(PETSc.Mat.Type.AIJ, D) + D.axpy(-1, A) + assert np.isclose(D.norm(PETSc.NormType.FROBENIUS), 0) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("reorder", [False, True]) +@pytest.mark.parametrize("ignore_halo", [False, True]) +def test_create_submesh_comm_self(reorder, ignore_halo): + nx = 4 + mesh = UnitSquareMesh(nx, nx, quadrilateral=True, reorder=reorder, distribution_parameters={"overlap_type": (DistributedMeshOverlapType.VERTEX, 1)}) + submesh = Submesh(mesh, ignore_halo=ignore_halo, reorder=reorder, comm=COMM_SELF) + assert submesh.submesh_parent is mesh + assert submesh.comm.size == 1 + # Submesh on COMM_SELF should not have halo + assert submesh.cell_set.total_size == submesh.cell_set.size + # Submesh on COMM_SELF should exclude the halo from the parent mesh if ignore_halo = True + expected_size = mesh.cell_set.size if ignore_halo else mesh.cell_set.total_size + assert submesh.cell_set.size == expected_size + + x = Function(submesh.coordinates.function_space()) + x.assign(mesh.coordinates) + assert np.allclose(submesh.coordinates.dat.data_ro, x.dat.data_ro) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("family,degree", [("DG", 0), ("CG", 1)]) +@pytest.mark.parametrize("reorder", [False, True]) +def test_assemble_submesh_comm_self(family, degree, reorder): + nx = 6 + ny = 5 + px = -np.cos(np.linspace(0, np.pi, nx)) + py = -np.cos(np.linspace(0, np.pi, ny)) + mesh = TensorRectangleMesh(px, py, reorder=reorder) + submesh = Submesh(mesh, ignore_halo=True, reorder=reorder, comm=COMM_SELF) + + Vsub = FunctionSpace(submesh, family, degree) + Asub = assemble(inner(TrialFunction(Vsub), TestFunction(Vsub))*dx) + + V = FunctionSpace(mesh, family, degree) + A = assemble(inner(TrialFunction(V), TestFunction(V))*dx) + assert_local_equality(A.petscmat, Asub.petscmat, V, Vsub) + + +@pytest.mark.parallel([1, 3]) +@pytest.mark.parametrize("label", ["some", "all"]) +@pytest.mark.parametrize("reorder", [False, True]) +def test_label_submesh_comm_self(label, reorder): + subdomain_id = 999 + nx = 8 + mesh = UnitSquareMesh(nx, nx, reorder=reorder) + + M = FunctionSpace(mesh, "DG", 0) + marker = Function(M) + if label == "some": + x, y = SpatialCoordinate(mesh) + marker.interpolate(conditional(Or(x > 0.5, y > 0.5), 1, 0)) + elif label == "all": + marker.assign(1) + else: + raise ValueError(f"Unrecognized label {label}") + + mesh = RelabeledMesh(mesh, [marker], [subdomain_id]) + submesh = Submesh(mesh, mesh.topological_dimension, subdomain_id, ignore_halo=True, reorder=reorder, comm=COMM_SELF) + + Vsub = FunctionSpace(submesh, "DG", 0) + Asub = assemble(inner(TrialFunction(Vsub), TestFunction(Vsub)) * dx) + + if label == "all": + V = FunctionSpace(mesh, "DG", 0) + else: + smesh = Submesh(mesh, mesh.topological_dimension, subdomain_id, reorder=reorder) + V = FunctionSpace(smesh, "DG", 0) + A = assemble(inner(TrialFunction(V), TestFunction(V)) * dx) + assert_local_equality(A.petscmat, Asub.petscmat, V, Vsub) diff --git a/tests/firedrake/submesh/test_submesh_facet.py b/tests/firedrake/submesh/test_submesh_facet.py index 1e9554c0c6..83ac29c404 100644 --- a/tests/firedrake/submesh/test_submesh_facet.py +++ b/tests/firedrake/submesh/test_submesh_facet.py @@ -85,7 +85,7 @@ def test_submesh_facet_corner_case_1(): V = FunctionSpace(mesh, "Q", 2) f = Function(V).interpolate(conditional(Or(facet0, facet1), 1, 0)) mesh = RelabeledMesh(mesh, [f], [999]) - subm = Submesh(mesh, mesh.topological_dimension() - 1, 999) + subm = Submesh(mesh, mesh.topological_dimension - 1, 999) v = assemble(Constant(1.) * dx(domain=subm)) assert abs(v - 2.) < 2.e-15 @@ -121,4 +121,16 @@ def test_submesh_facet_corner_case_2(): ) facet_value = 999 mesh = RelabeledMesh(mesh, [facet_function], [facet_value]) - _ = Submesh(mesh, mesh.topological_dimension() - 1, facet_value) + _ = Submesh(mesh, mesh.topological_dimension - 1, facet_value) + + +def test_submesh_facet_all_facets(): + mesh = UnitCubeMesh(2, 2, 2) + submesh1 = Submesh(mesh, mesh.topological_dimension - 1) + + V = FunctionSpace(mesh, "HDiv Trace", 0) + facet_function = Function(V).assign(1) + facet_value = 999 + rmesh = RelabeledMesh(mesh, [facet_function], [facet_value]) + submesh2 = Submesh(rmesh, mesh.topological_dimension - 1, facet_value) + assert submesh2.cell_set.size == submesh1.cell_set.size diff --git a/tests/firedrake/submesh/test_submesh_interpolate.py b/tests/firedrake/submesh/test_submesh_interpolate.py index a26c1acb08..39d7f8d70d 100644 --- a/tests/firedrake/submesh/test_submesh_interpolate.py +++ b/tests/firedrake/submesh/test_submesh_interpolate.py @@ -10,14 +10,14 @@ def _get_expr(V): m = V.ufl_domain() - if m.geometric_dimension() == 1: + if m.geometric_dimension == 1: x, = SpatialCoordinate(m) y = x * x z = x + y - elif m.geometric_dimension() == 2: + elif m.geometric_dimension == 2: x, y = SpatialCoordinate(m) z = x + y - elif m.geometric_dimension() == 3: + elif m.geometric_dimension == 3: x, y, z = SpatialCoordinate(m) else: raise NotImplementedError("Not implemented") @@ -28,7 +28,7 @@ def _get_expr(V): def make_submesh(mesh, subdomain_cond, label_value): - dim = mesh.topological_dimension() + dim = mesh.topological_dimension DG0 = FunctionSpace(mesh, "DG", 0) indicator_function = Function(DG0).interpolate(subdomain_cond) mesh.mark_entities(indicator_function, label_value) @@ -51,7 +51,7 @@ def _test_submesh_interpolate_cell_cell(mesh, subdomain_cond, fe_fesub): f = Function(V_).interpolate(f) v0 = Coargument(V.dual(), 0) v1 = TrialFunction(Vsub) - interp = Interpolate(v1, v0, allow_missing_dofs=True) + interp = interpolate(v1, v0, allow_missing_dofs=True) A = assemble(interp) g = assemble(action(A, gsub)) assert assemble(inner(g - f, g - f) * dx(label_value)).real < 1e-14 @@ -145,7 +145,7 @@ def test_submesh_interpolate_subcell_subcell_2_processes(): mesh = RectangleMesh( 3, 1, 3., 1., quadrilateral=True, distribution_parameters={"partitioner_type": "simple"}, ) - dim = mesh.topological_dimension() + dim = mesh.topological_dimension x, _ = SpatialCoordinate(mesh) DG0 = FunctionSpace(mesh, "DG", 0) f_l = Function(DG0).interpolate(conditional(x < 2.0, 1, 0)) @@ -165,7 +165,7 @@ def test_submesh_interpolate_subcell_subcell_2_processes(): f_l.dat.data_with_halos[:] = 3.0 v0 = Coargument(V_r.dual(), 0) v1 = TrialFunction(V_l) - interp = Interpolate(v1, v0, allow_missing_dofs=True) + interp = interpolate(v1, v0, allow_missing_dofs=True) A = assemble(interp) f_r = assemble(action(A, f_l)) g_r = Function(V_r).interpolate(conditional(x < 2.001, 3.0, 0.0)) @@ -210,7 +210,7 @@ def expr(m): ) facet_value = 999 mesh = RelabeledMesh(mesh, [facet_function], [facet_value]) - subm = Submesh(mesh, mesh.topological_dimension() - 1, facet_value) + subm = Submesh(mesh, mesh.topological_dimension - 1, facet_value) DG3d = FunctionSpace(mesh, "DG", degree) dg3d = Function(DG3d).interpolate(expr(mesh)) DG2d = FunctionSpace(subm, "DG", degree) @@ -258,7 +258,7 @@ def expr(m): facet_function = Function(V).interpolate(Constant(1.)) facet_value = 999 mesh = RelabeledMesh(mesh, [facet_function], [facet_value]) - subm = Submesh(mesh, mesh.topological_dimension() - 1, facet_value) + subm = Submesh(mesh, mesh.topological_dimension - 1, facet_value) HDivT3d = FunctionSpace(mesh, "HDiv Trace", degree) hdivt3d = Function(HDivT3d).interpolate(expr(mesh)) DG2d = FunctionSpace(subm, "DG", degree) diff --git a/tests/firedrake/submesh/test_submesh_solve.py b/tests/firedrake/submesh/test_submesh_solve.py new file mode 100644 index 0000000000..aa5e6725de --- /dev/null +++ b/tests/firedrake/submesh/test_submesh_solve.py @@ -0,0 +1,753 @@ +import os +import pytest +from os.path import abspath, dirname, join +import numpy as np +from firedrake import * +from firedrake.cython import dmcommon +from petsc4py import PETSc + + +cwd = abspath(dirname(__file__)) + + +def _solve_helmholtz(mesh): + V = FunctionSpace(mesh, "CG", 1) + u = TrialFunction(V) + v = TestFunction(V) + x = SpatialCoordinate(mesh) + u_exact = sin(x[0]) * sin(x[1]) + f = Function(V).interpolate(2 * u_exact) + a = (inner(grad(u), grad(v)) + inner(u, v)) * dx + L = inner(f, v) * dx + bc = DirichletBC(V, u_exact, "on_boundary") + sol = Function(V) + solve(a == L, sol, bcs=[bc], solver_parameters={'ksp_type': 'preonly', + 'pc_type': 'lu'}) + return sqrt(assemble((sol - u_exact)**2 * dx)) + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('nelem', [2, 4]) +@pytest.mark.parametrize('distribution_parameters', [None, {"overlap_type": (DistributedMeshOverlapType.NONE, 0)}]) +def test_submesh_solve_simple(nelem, distribution_parameters): + dim = 2 + # Compute reference error. + mesh = RectangleMesh(nelem, nelem * 2, 1., 1., quadrilateral=True, distribution_parameters=distribution_parameters) + error = _solve_helmholtz(mesh) + # Compute submesh error. + mesh = RectangleMesh(nelem * 2, nelem * 2, 2., 1., quadrilateral=True, distribution_parameters=distribution_parameters) + x, y = SpatialCoordinate(mesh) + DQ0 = FunctionSpace(mesh, "DQ", 0) + indicator_function = Function(DQ0).interpolate(conditional(x < 1., 1, 0)) + mesh.mark_entities(indicator_function, 999) + mesh = Submesh(mesh, dim, 999) + suberror = _solve_helmholtz(mesh) + assert abs(error - suberror) < 1e-15 + + +@pytest.mark.parallel(nprocs=3) +@pytest.mark.parametrize('dim', [2, 3]) +@pytest.mark.parametrize('simplex', [True, False]) +def test_submesh_solve_cell_cell_mixed_scalar(dim, simplex): + if dim == 2: + if simplex: + mesh = Mesh(join(cwd, "..", "..", "..", "docs", "notebooks/stokes-control.msh")) + bid = (1, 2, 3, 4, 5) + submesh_expr = lambda x: conditional(x[0] < 10., 1, 0) + solution_expr = lambda x: x[0] + x[1] + else: + mesh = Mesh(join(cwd, "..", "meshes", "unitsquare_unstructured_quadrilaterals.msh")) + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + x, y = SpatialCoordinate(mesh) + hdivtrace0x = Function(HDivTrace0).interpolate(conditional(And(x > .001, x < .999), 0, 1)) + hdivtrace0y = Function(HDivTrace0).interpolate(conditional(And(y > .001, y < .999), 0, 1)) + mesh = RelabeledMesh(mesh, [hdivtrace0x, hdivtrace0y], [111, 222]) + bid = (111, 222) + submesh_expr = lambda x: conditional(x[0] < .5, 1, 0) + solution_expr = lambda x: x[0] + x[1] + elif dim == 3: + if simplex: + nref = 3 + mesh = BoxMesh(2 ** nref, 2 ** nref, 2 ** nref, 1., 1., 1., hexahedral=False) + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + else: + mesh = Mesh(join(cwd, "..", "meshes", "cube_hex.msh")) + HDivTrace0 = FunctionSpace(mesh, "Q", 2) + x, y, z = SpatialCoordinate(mesh) + hdivtrace0x = Function(HDivTrace0).interpolate(conditional(And(x > .001, x < .999), 0, 1)) + hdivtrace0y = Function(HDivTrace0).interpolate(conditional(And(y > .001, y < .999), 0, 1)) + hdivtrace0z = Function(HDivTrace0).interpolate(conditional(And(z > .001, z < .999), 0, 1)) + mesh = RelabeledMesh(mesh, [hdivtrace0x, hdivtrace0y, hdivtrace0z], [111, 222, 333]) + bid = (111, 222, 333) + submesh_expr = lambda x: conditional(x[0] > .5, 1, 0) + solution_expr = lambda x: x[0] + x[1] + x[2] + else: + raise NotImplementedError + DG0 = FunctionSpace(mesh, "DG", 0) + submesh_function = Function(DG0).interpolate(submesh_expr(SpatialCoordinate(mesh))) + submesh_label = 999 + mesh.mark_entities(submesh_function, submesh_label) + subm = Submesh(mesh, dim, submesh_label) + V0 = FunctionSpace(mesh, "CG", 2) + V1 = FunctionSpace(subm, "CG", 3) + V = V0 * V1 + u = TrialFunction(V) + v = TestFunction(V) + u0, u1 = split(u) + v0, v1 = split(v) + dx0 = Measure("dx", domain=mesh, intersect_measures=(Measure("dx", subm),)) + dx1 = Measure("dx", domain=subm, intersect_measures=(Measure("dx", mesh),)) + a = inner(grad(u0), grad(v0)) * dx0 + inner(u0 - u1, v1) * dx1 + L = inner(Constant(0.), v1) * dx1 + g = Function(V0).interpolate(solution_expr(SpatialCoordinate(mesh))) + bc = DirichletBC(V.sub(0), g, bid) + solution = Function(V) + solve(a == L, solution, bcs=[bc]) + target = Function(V1).interpolate(solution_expr(SpatialCoordinate(subm))) + assert np.allclose(solution.subfunctions[1].dat.data_ro_with_halos, target.dat.data_ro_with_halos) + + +@pytest.mark.parallel(nprocs=3) +@pytest.mark.parametrize('dim', [2, 3]) +@pytest.mark.parametrize('simplex', [True, False]) +def test_submesh_solve_cell_cell_mixed_vector(dim, simplex): + if dim == 2: + if simplex: + mesh = Mesh(join(cwd, "..", "..", "..", "docs", "notebooks/stokes-control.msh")) + submesh_expr = lambda x: conditional(x[0] < 10., 1, 0) + elem0 = FiniteElement("RT", "triangle", 3) + elem1 = VectorElement("P", "triangle", 3) + else: + mesh = Mesh(join(cwd, "..", "meshes", "unitsquare_unstructured_quadrilaterals.msh")) + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + x, y = SpatialCoordinate(mesh) + hdivtrace0x = Function(HDivTrace0).interpolate(conditional(And(x > .001, x < .999), 0, 1)) + hdivtrace0y = Function(HDivTrace0).interpolate(conditional(And(y > .001, y < .999), 0, 1)) + mesh = RelabeledMesh(mesh, [hdivtrace0x, hdivtrace0y], [111, 222]) + submesh_expr = lambda x: conditional(x[0] < .5, 1, 0) + elem0 = FiniteElement("RTCF", "quadrilateral", 2) + elem1 = VectorElement("Q", "quadrilateral", 3) + elif dim == 3: + if simplex: + nref = 3 + mesh = BoxMesh(2 ** nref, 2 ** nref, 2 ** nref, 1., 1., 1., hexahedral=False) + x, y, z = SpatialCoordinate(mesh) + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + hdivtrace0x = Function(HDivTrace0).interpolate(conditional(And(x > .001, x < .999), 0, 1)) + hdivtrace0y = Function(HDivTrace0).interpolate(conditional(And(y > .001, y < .999), 0, 1)) + hdivtrace0z = Function(HDivTrace0).interpolate(conditional(And(z > .001, z < .999), 0, 1)) + mesh = RelabeledMesh(mesh, [hdivtrace0x, hdivtrace0y, hdivtrace0z], [111, 222, 333]) + submesh_expr = lambda x: conditional(x[0] > .5, 1, 0) + elem0 = FiniteElement("N1F", "tetrahedron", 3) + elem1 = VectorElement("P", "tetrahedron", 3) + else: + mesh = Mesh(join(cwd, "..", "meshes", "cube_hex.msh")) + HDivTrace0 = FunctionSpace(mesh, "Q", 2) + x, y, z = SpatialCoordinate(mesh) + hdivtrace0x = Function(HDivTrace0).interpolate(conditional(And(x > .001, x < .999), 0, 1)) + hdivtrace0y = Function(HDivTrace0).interpolate(conditional(And(y > .001, y < .999), 0, 1)) + hdivtrace0z = Function(HDivTrace0).interpolate(conditional(And(z > .001, z < .999), 0, 1)) + mesh = RelabeledMesh(mesh, [hdivtrace0x, hdivtrace0y, hdivtrace0z], [111, 222, 333]) + submesh_expr = lambda x: conditional(x[0] > .5, 1, 0) + elem0 = FiniteElement("NCF", "hexahedron", 2) + elem1 = VectorElement("Q", "hexahedron", 3) + with pytest.raises(NotImplementedError): + _ = FunctionSpace(mesh, elem0) + return + else: + raise NotImplementedError + DG0 = FunctionSpace(mesh, "DG", 0) + submesh_function = Function(DG0).interpolate(submesh_expr(SpatialCoordinate(mesh))) + submesh_label = 999 + mesh.mark_entities(submesh_function, submesh_label) + subm = Submesh(mesh, dim, submesh_label) + V0 = FunctionSpace(mesh, elem0) + V1 = FunctionSpace(subm, elem1) + V = V0 * V1 + u = TrialFunction(V) + v = TestFunction(V) + u0, u1 = split(u) + v0, v1 = split(v) + dx0 = Measure("dx", domain=mesh, intersect_measures=(Measure("dx", subm),)) + dx1 = Measure("dx", domain=subm, intersect_measures=(Measure("dx", mesh),)) + a = inner(u0, v0) * dx0 + inner(u0 - u1, v1) * dx1 + L = inner(SpatialCoordinate(mesh), v0) * dx0 + solution = Function(V) + solve(a == L, solution) + s0, s1 = split(solution) + x = SpatialCoordinate(subm) + assert assemble(inner(s1 - x, s1 - x) * dx1) < 1.e-20 + + +def _mixed_poisson_create_mesh_2d(nref, quadrilateral, submesh_region, label_submesh, label_submesh_compl): + # y + # | + # | + # 1.0 +--17---+--18---+ + # | | | + # 12 20 14 + # | | | + # 0.5 +--21---+--22---+ + # | | | + # 11 19 13 + # | | | + # 0.0 +--15---+--16---+----x + # + # 0.0 0.5 1.0 + mesh = UnitSquareMesh(2 ** nref, 2 ** nref, quadrilateral=quadrilateral) + eps = 1. / (2 ** nref) / 100. + x, y = SpatialCoordinate(mesh) + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + f11 = Function(HDivTrace0).interpolate(conditional(And(x < eps, y < .5), 1, 0)) + f12 = Function(HDivTrace0).interpolate(conditional(And(x < eps, y > .5), 1, 0)) + f13 = Function(HDivTrace0).interpolate(conditional(And(x > 1 - eps, y < .5), 1, 0)) + f14 = Function(HDivTrace0).interpolate(conditional(And(x > 1 - eps, y > .5), 1, 0)) + f15 = Function(HDivTrace0).interpolate(conditional(And(x < .5, y < eps), 1, 0)) + f16 = Function(HDivTrace0).interpolate(conditional(And(x > .5, y < eps), 1, 0)) + f17 = Function(HDivTrace0).interpolate(conditional(And(x < .5, y > 1 - eps), 1, 0)) + f18 = Function(HDivTrace0).interpolate(conditional(And(x > .5, y > 1 - eps), 1, 0)) + f19 = Function(HDivTrace0).interpolate(conditional(And(And(x > .5 - eps, x < .5 + eps), y < .5), 1, 0)) + f20 = Function(HDivTrace0).interpolate(conditional(And(And(x > .5 - eps, x < .5 + eps), y > .5), 1, 0)) + f21 = Function(HDivTrace0).interpolate(conditional(And(x < .5, And(y > .5 - eps, y < .5 + eps)), 1, 0)) + f22 = Function(HDivTrace0).interpolate(conditional(And(x > .5, And(y > .5 - eps, y < .5 + eps)), 1, 0)) + DG0 = FunctionSpace(mesh, "DG", 0) + if submesh_region == "left": + submesh_function = Function(DG0).interpolate(conditional(x < .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(x > .5, 1, 0)) + elif submesh_region == "right": + submesh_function = Function(DG0).interpolate(conditional(x > .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(x < .5, 1, 0)) + elif submesh_region == "bottom": + submesh_function = Function(DG0).interpolate(conditional(y < .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(y > .5, 1, 0)) + elif submesh_region == "top": + submesh_function = Function(DG0).interpolate(conditional(y > .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(y < .5, 1, 0)) + else: + raise NotImplementedError(f"Unknown submesh_region: {submesh_region}") + return RelabeledMesh(mesh, [f11, f12, f13, f14, f15, f16, f17, f18, f19, f20, f21, f22, submesh_function, submesh_function_compl], + [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, label_submesh, label_submesh_compl]) + + +def _mixed_poisson_solve_2d(nref, degree, quadrilateral, submesh_region): + dim = 2 + label_submesh = 999 + label_submesh_compl = 888 + mesh = _mixed_poisson_create_mesh_2d(nref, quadrilateral, submesh_region, label_submesh, label_submesh_compl) + x, y = SpatialCoordinate(mesh) + subm = Submesh(mesh, dim, label_submesh) + subx, suby = SpatialCoordinate(subm) + if submesh_region == "left": + boun_ext = (11, 12) + boun_int = (19, 20) + boun_dirichlet = (15, 17) + elif submesh_region == "right": + boun_ext = (13, 14) + boun_int = (19, 20) + boun_dirichlet = (16, 18) + elif submesh_region == "bottom": + boun_ext = (15, 16) + boun_int = (21, 22) + boun_dirichlet = (11, 13) + elif submesh_region == "top": + boun_ext = (17, 18) + boun_int = (21, 22) + boun_dirichlet = (12, 14) + else: + raise NotImplementedError(f"Unknown submesh_region: {submesh_region}") + BDM = FunctionSpace(subm, "RTCF" if quadrilateral else "BDM", degree) + DG = FunctionSpace(mesh, "DG", degree - 1) + W = BDM * DG + tau, v = TestFunctions(W) + nsub = FacetNormal(subm) + u_exact = Function(DG).interpolate(cos(2 * pi * x) * cos(2 * pi * y)) + sigma_exact = Function(BDM).project(as_vector([- 2 * pi * sin(2 * pi * subx) * cos(2 * pi * suby), - 2 * pi * cos(2 * pi * subx) * sin(2 * pi * suby)]), + solver_parameters={"ksp_type": "cg", "ksp_rtol": 1.e-16}) + f = Function(DG).interpolate(- 8 * pi * pi * cos(2 * pi * x) * cos(2 * pi * y)) + dx0 = Measure("dx", domain=mesh, intersect_measures=(Measure("dx", subm),)) + dx1 = Measure("dx", domain=subm, intersect_measures=(Measure("dx", mesh),)) + ds0 = Measure("ds", domain=mesh, intersect_measures=(Measure("ds", subm),)) + ds1_ext = Measure("ds", domain=subm, intersect_measures=(Measure("ds", mesh),)) + ds1_int = Measure("ds", domain=subm, intersect_measures=(Measure("dS", mesh),)) + dS0 = Measure("dS", domain=mesh, intersect_measures=(Measure("ds", subm),)) + bc = DirichletBC(W.sub(0), sigma_exact, boun_dirichlet) + # Do the base case. + w = Function(W) + sigma, u = split(w) + a = (inner(sigma, tau) + inner(u, div(tau)) + inner(div(sigma), v)) * dx1 + inner(u - u_exact, v) * dx0(label_submesh_compl) + L = inner(f, v) * dx1 + inner((u('+') + u('-')) / 2., dot(tau, nsub)) * dS0(boun_int) + inner(u_exact, dot(tau, nsub)) * ds0(boun_ext) + solve(a - L == 0, w, bcs=[bc]) + # Change domains of integration. + w_ = Function(W) + sigma_, u_ = split(w_) + a_ = (inner(sigma_, tau) + inner(u_, div(tau)) + inner(div(sigma_), v)) * dx1 + inner(u_ - u_exact, v) * dx0(label_submesh_compl) + L_ = inner(f, v) * dx0(label_submesh) + inner((u_('+') + u_('-')) / 2., dot(tau, nsub)) * ds1_int(boun_int) + inner(u_exact, dot(tau, nsub)) * ds1_ext(boun_ext) + solve(a_ - L_ == 0, w_, bcs=[bc]) + assert assemble(inner(sigma_ - sigma, sigma_ - sigma) * dx1) < 1.e-20 + assert assemble(inner(u_ - u, u_ - u) * dx0(label_submesh)) < 1.e-20 + sigma_error = sqrt(assemble(inner(sigma - sigma_exact, sigma - sigma_exact) * dx1)) + u_error = sqrt(assemble(inner(u - u_exact, u - u_exact) * dx0(label_submesh))) + return sigma_error, u_error + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('nref', [1, 2, 3, 4]) +@pytest.mark.parametrize('degree', [1]) +@pytest.mark.parametrize('quadrilateral', [False, True]) +@pytest.mark.parametrize('submesh_region', ["left", "right", "bottom", "top"]) +def test_submesh_solve_mixed_poisson_check_sanity_2d(nref, degree, quadrilateral, submesh_region): + _, _ = _mixed_poisson_solve_2d(nref, degree, quadrilateral, submesh_region) + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('quadrilateral', [True]) +@pytest.mark.parametrize('degree', [3]) +@pytest.mark.parametrize('submesh_region', ["left", "right"]) +def test_submesh_solve_mixed_poisson_check_convergence_2d(quadrilateral, degree, submesh_region): + nrefs = [5, 6, 7] + start = nrefs[0] + s_error_array = np.zeros(len(nrefs)) + u_error_array = np.zeros(len(nrefs)) + for nref in nrefs: + i = nref - start + s_error_array[i], u_error_array[i] = _mixed_poisson_solve_2d(nref, degree, quadrilateral, submesh_region) + assert (np.log2(s_error_array[:-1] / s_error_array[1:]) > degree + .95).all() + assert (np.log2(u_error_array[:-1] / u_error_array[1:]) > degree + .95).all() + + +def _mixed_poisson_create_mesh_3d(hexahedral, submesh_region, label_submesh, label_submesh_compl): + if hexahedral: + mesh = Mesh(join(cwd, "..", "meshes", "cube_hex.msh")) + DG0 = FunctionSpace(mesh, "DQ", 0) + HDivTrace0 = FunctionSpace(mesh, "Q", 2) + else: + mesh = BoxMesh(4, 4, 4, 1., 1., 1., hexahedral=False) + DG0 = FunctionSpace(mesh, "DP", 0) + HDivTrace0 = FunctionSpace(mesh, "HDiv Trace", 0) + x, y, z = SpatialCoordinate(mesh) + eps = 1.e-6 + f101 = Function(HDivTrace0).interpolate(conditional(x < eps, 1, 0)) + f102 = Function(HDivTrace0).interpolate(conditional(x > 1. - eps, 1, 0)) + f103 = Function(HDivTrace0).interpolate(conditional(y < eps, 1, 0)) + f104 = Function(HDivTrace0).interpolate(conditional(y > 1. - eps, 1, 0)) + f105 = Function(HDivTrace0).interpolate(conditional(z < eps, 1, 0)) + f106 = Function(HDivTrace0).interpolate(conditional(z > 1. - eps, 1, 0)) + if submesh_region == "left": + submesh_function = Function(DG0).interpolate(conditional(x < .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(x > .5, 1, 0)) + elif submesh_region == "right": + submesh_function = Function(DG0).interpolate(conditional(x > .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(x < .5, 1, 0)) + elif submesh_region == "front": + submesh_function = Function(DG0).interpolate(conditional(y < .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(y > .5, 1, 0)) + elif submesh_region == "back": + submesh_function = Function(DG0).interpolate(conditional(y > .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(y < .5, 1, 0)) + elif submesh_region == "bottom": + submesh_function = Function(DG0).interpolate(conditional(z < .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(z > .5, 1, 0)) + elif submesh_region == "top": + submesh_function = Function(DG0).interpolate(conditional(z > .5, 1, 0)) + submesh_function_compl = Function(DG0).interpolate(conditional(z < .5, 1, 0)) + else: + raise NotImplementedError(f"Unknown submesh_region: {submesh_region}") + return RelabeledMesh(mesh, [f101, f102, f103, f104, f105, f106, submesh_function, submesh_function_compl], + [101, 102, 103, 104, 105, 106, label_submesh, label_submesh_compl]) + + +def _mixed_poisson_solve_3d(hexahedral, degree, submesh_region): + dim = 3 + label_submesh = 999 + label_submesh_compl = 888 + mesh = _mixed_poisson_create_mesh_3d(hexahedral, submesh_region, label_submesh, label_submesh_compl) + x, y, z = SpatialCoordinate(mesh) + subm = Submesh(mesh, dim, label_submesh) + subx, suby, subz = SpatialCoordinate(subm) + if submesh_region == "left": + boun_ext = (101, ) + boun_dirichlet = (103, 104, 105, 106) + elif submesh_region == "right": + boun_ext = (102, ) + boun_dirichlet = (103, 104, 105, 106) + elif submesh_region == "front": + boun_ext = (103, ) + boun_dirichlet = (101, 102, 105, 106) + elif submesh_region == "back": + boun_ext = (104, ) + boun_dirichlet = (101, 102, 105, 106) + elif submesh_region == "bottom": + boun_ext = (105, ) + boun_dirichlet = (101, 102, 103, 104) + elif submesh_region == "top": + boun_ext = (106, ) + boun_dirichlet = (101, 102, 103, 104) + else: + raise NotImplementedError(f"Unknown submesh_region: {submesh_region}") + boun_int = (107, ) # labeled automatically. + NCF = FunctionSpace(subm, "NCF" if hexahedral else "N2F", degree) + DG = FunctionSpace(mesh, "DG", degree - 1) + W = NCF * DG + tau, v = TestFunctions(W) + nsub = FacetNormal(subm) + u_exact = Function(DG).interpolate(cos(2 * pi * x) * cos(2 * pi * y) * cos(2 * pi * z)) + sigma_exact = Function(NCF).project(as_vector([- 2 * pi * sin(2 * pi * subx) * cos(2 * pi * suby) * cos(2 * pi * subz), + - 2 * pi * cos(2 * pi * subx) * sin(2 * pi * suby) * cos(2 * pi * subz), + - 2 * pi * cos(2 * pi * subx) * cos(2 * pi * suby) * sin(2 * pi * subz)]), + solver_parameters={"ksp_type": "cg", "ksp_rtol": 1.e-16}) + f = Function(DG).interpolate(- 12 * pi * pi * cos(2 * pi * x) * cos(2 * pi * y) * cos(2 * pi * z)) + dx0 = Measure("dx", domain=mesh, intersect_measures=(Measure("dx", subm),)) + dx1 = Measure("dx", domain=subm, intersect_measures=(Measure("dx", mesh),)) + ds0 = Measure("ds", domain=mesh, intersect_measures=(Measure("ds", subm),)) + ds1 = Measure("ds", domain=subm, intersect_measures=(Measure("dS", mesh),)) + bc = DirichletBC(W.sub(0), sigma_exact, boun_dirichlet) + # Do the base case. + w = Function(W) + sigma, u = split(w) + a = (inner(sigma, tau) + inner(u, div(tau)) + inner(div(sigma), v)) * dx1 + inner(u - u_exact, v) * dx0(label_submesh_compl) + L = inner(f, v) * dx1 + inner((u('+') + u('-')) / 2., dot(tau, nsub)) * ds1(boun_int) + inner(u_exact, dot(tau, nsub)) * ds0(boun_ext) + solve(a - L == 0, w, bcs=[bc]) + sigma_error = sqrt(assemble(inner(sigma - sigma_exact, sigma - sigma_exact) * dx1)) + u_error = sqrt(assemble(inner(u - u_exact, u - u_exact) * dx0(label_submesh))) + return sigma_error, u_error + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('hexahedral', [False]) +@pytest.mark.parametrize('degree', [4]) +@pytest.mark.parametrize('submesh_region', ["left", "right", "front", "back", "bottom", "top"]) +def test_submesh_solve_mixed_poisson_check_sanity_3d(hexahedral, degree, submesh_region): + sigma_error, u_error = _mixed_poisson_solve_3d(hexahedral, degree, submesh_region) + assert sigma_error < 0.07 + assert u_error < 0.003 + + +@pytest.mark.parallel(nprocs=4) +@pytest.mark.parametrize('simplex', [True, False]) +@pytest.mark.parametrize('nref', [1, 3]) +@pytest.mark.parametrize('degree', [2, 4]) +def test_submesh_solve_cell_cell_equation_bc(nref, degree, simplex): + dim = 2 + mesh = RectangleMesh(3 ** nref, 2 ** nref, 3., 2., quadrilateral=not simplex) + x, y = SpatialCoordinate(mesh) + label_outer = 101 + label_inner = 100 + label_interface = 5 # automatically labeled by Submesh + DG0 = FunctionSpace(mesh, "DG", 0) + f_outer = Function(DG0).interpolate(conditional(Or(Or(x < 1., x > 2.), y > 1.), 1, 0)) + f_inner = Function(DG0).interpolate(conditional(And(And(x > 1., x < 2.), y < 1.), 1, 0)) + mesh = RelabeledMesh(mesh, [f_outer, f_inner], [label_outer, label_inner]) + x, y = SpatialCoordinate(mesh) + mesh_outer = Submesh(mesh, dim, label_outer) + x_outer, y_outer = SpatialCoordinate(mesh_outer) + mesh_inner = Submesh(mesh, dim, label_inner) + x_inner, y_inner = SpatialCoordinate(mesh_inner) + V_outer = FunctionSpace(mesh_outer, "CG", degree) + V_inner = FunctionSpace(mesh_inner, "CG", degree) + V = V_outer * V_inner + u = TrialFunction(V) + v = TestFunction(V) + sol = Function(V) + u_outer, u_inner = split(u) + v_outer, v_inner = split(v) + dx_outer = Measure("dx", domain=mesh_outer, intersect_measures=(Measure("dx", mesh), Measure("dx", mesh_inner))) + dx_inner = Measure("dx", domain=mesh_inner, intersect_measures=(Measure("dx", mesh), Measure("dx", mesh_outer))) + ds_outer = Measure("ds", domain=mesh_outer, intersect_measures=(Measure("ds", mesh_inner),)) + a = inner(grad(u_outer), grad(v_outer)) * dx_outer + \ + inner(u_inner, v_inner) * dx_inner + L = inner(x * y, v_inner) * dx_inner + dbc = DirichletBC(V.sub(0), x_outer * y_outer, (1, 2, 3, 4)) + ebc = EquationBC(inner(u_outer - u_inner, v_outer) * ds_outer(label_interface) == inner(Constant(0.), v_outer) * ds_outer(label_interface), sol, label_interface, V=V.sub(0)) + solve(a == L, sol, bcs=[dbc, ebc]) + assert sqrt(assemble(inner(sol[0] - x * y, sol[0] - x * y) * dx_outer)) < 1.e-12 + assert sqrt(assemble(inner(sol[1] - x * y, sol[1] - x * y) * dx_inner)) < 1.e-12 + + +def _test_submesh_solve_quad_triangle_poisson(nref, degree): + dim = 2 + label_ext = 1 + label_interf = 2 + distribution_parameters_noop = { + "partition": True, + "overlap_type": (DistributedMeshOverlapType.NONE, 0), + } + mesh = Mesh(os.path.join(cwd, "..", "meshes", "mixed_cell_unit_square.msh"), distribution_parameters=distribution_parameters_noop) + plex = mesh.topology_dm + for _ in range(nref): + plex = plex.refine() + plex.removeLabel("pyop2_core") + plex.removeLabel("pyop2_owned") + plex.removeLabel("pyop2_ghost") + mesh = Mesh(plex) + h = 0.1 / 2**nref # roughly + mesh.topology_dm.markBoundaryFaces(dmcommon.FACE_SETS_LABEL, label_ext) + mesh_t = Submesh(mesh, dim, PETSc.DM.PolytopeType.TRIANGLE, label_name="celltype", name="mesh_tri") + x_t, y_t = SpatialCoordinate(mesh_t) + n_t = FacetNormal(mesh_t) + mesh_q = Submesh(mesh, dim, PETSc.DM.PolytopeType.QUADRILATERAL, label_name="celltype", name="mesh_quad") + x_q, y_q = SpatialCoordinate(mesh_q) + n_q = FacetNormal(mesh_q) + V_t = FunctionSpace(mesh_t, "P", degree) + V_q = FunctionSpace(mesh_q, "Q", degree) + V = V_t * V_q + u = TrialFunction(V) + v = TestFunction(V) + u_t, u_q = split(u) + v_t, v_q = split(v) + dx_t = Measure("dx", mesh_t) + dx_q = Measure("dx", mesh_q) + ds_t = Measure("ds", mesh_t, intersect_measures=(Measure("ds", mesh_q),)) + ds_q = Measure("ds", mesh_q, intersect_measures=(Measure("ds", mesh_t),)) + g_t = cos(2 * pi * x_t) * cos(2 * pi * y_t) + g_q = cos(2 * pi * x_q) * cos(2 * pi * y_q) + f_t = 8 * pi**2 * g_t + f_q = 8 * pi**2 * g_q + a = ( + inner(grad(u_t), grad(v_t)) * dx_t + inner(grad(u_q), grad(v_q)) * dx_q + - inner( + (grad(u_q) + grad(u_t)) / 2, + (v_q * n_q + v_t * n_t) + ) * ds_q(label_interf) + - inner( + (u_q * n_q + u_t * n_t), + (grad(v_q) + grad(v_t)) / 2 + ) * ds_t(label_interf) + + 100 / h * inner(u_q - u_t, v_q - v_t) * ds_q(label_interf) + ) + L = ( + inner(f_t, v_t) * dx_t + inner(f_q, v_q) * dx_q + ) + sol = Function(V) + bc_q = DirichletBC(V.sub(1), g_q, label_ext) + solve(a == L, sol, bcs=[bc_q]) + sol_t, sol_q = split(sol) + L2Error_t = assemble(inner(sol_t - g_t, sol_t - g_t) * dx_t) + L2Error_q = assemble(inner(sol_q - g_q, sol_q - g_q) * dx_q) + H1Error_t = L2Error_t + assemble(inner(grad(sol_t - g_t), grad(sol_t - g_t)) * dx_t) + H1Error_q = L2Error_q + assemble(inner(grad(sol_q - g_q), grad(sol_q - g_q)) * dx_q) + return sqrt(L2Error_t + L2Error_q), sqrt(H1Error_t + H1Error_q) + + +@pytest.mark.parallel(nprocs=8) +def test_submesh_solve_quad_triangle_poisson_convergence(): + for degree in range(1, 5): + L2Errors = [] + H1Errors = [] + for nref in range(4): + L2Error, H1Error = _test_submesh_solve_quad_triangle_poisson(nref, degree) + L2Errors.append(L2Error) + H1Errors.append(H1Error) + L2Errors = [np.log2(c) - np.log2(f) for c, f in zip(L2Errors[:-1], L2Errors[1:])] + H1Errors = [np.log2(c) - np.log2(f) for c, f in zip(H1Errors[:-1], H1Errors[1:])] + assert (np.array(L2Errors) > (degree + 1) * 0.995).all() + assert (np.array(H1Errors) > (degree) * 0.995).all() + + +def _test_submesh_solve_3d_2d_poisson(simplex, direction, nref, degree): + distribution_parameters_noop = { + "partition": True, + "overlap_type": (DistributedMeshOverlapType.NONE, 0), + } + distribution_parameters = { + "overlap_type": (DistributedMeshOverlapType.RIDGE, 1), + } + dim = 3 + interf_at = 0.499 + if simplex: + nref_simplex = 3 + mesh = BoxMesh(2 ** nref_simplex, 2 ** nref_simplex, 2 ** nref_simplex, 1., 1., 1., hexahedral=False, distribution_parameters=distribution_parameters_noop) + xyz = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DG", 0) + c1 = Function(DG0).interpolate(conditional(xyz[direction] < interf_at, 1, 0)) + c2 = Function(DG0).interpolate(conditional(xyz[direction] > interf_at, 1, 0)) + mesh = RelabeledMesh(mesh, [c1, c2], [1, 2]) + family = "P" + else: + mesh = Mesh(join(cwd, "..", "meshes", "cube_hex.msh"), distribution_parameters=distribution_parameters_noop) + xyz = SpatialCoordinate(mesh) + DG0 = FunctionSpace(mesh, "DQ", 0) + c1 = Function(DG0).interpolate(conditional(xyz[direction] < interf_at, 1, 0)) + c2 = Function(DG0).interpolate(conditional(xyz[direction] > interf_at, 1, 0)) + HDivTrace0 = FunctionSpace(mesh, "Q", 2) + f1 = Function(HDivTrace0).interpolate(conditional(xyz[0] < .001, 1, 0)) + f2 = Function(HDivTrace0).interpolate(conditional(xyz[0] > .999, 1, 0)) + f3 = Function(HDivTrace0).interpolate(conditional(xyz[1] < .001, 1, 0)) + f4 = Function(HDivTrace0).interpolate(conditional(xyz[1] > .999, 1, 0)) + f5 = Function(HDivTrace0).interpolate(conditional(xyz[2] < .001, 1, 0)) + f6 = Function(HDivTrace0).interpolate(conditional(xyz[2] > .999, 1, 0)) + mesh = RelabeledMesh(mesh, [c1, c2, f1, f2, f3, f4, f5, f6], [1, 2, 1, 2, 3, 4, 5, 6]) + family = "Q" + plex = mesh.topology_dm + for _ in range(nref): + plex = plex.refine() + plex.removeLabel("pyop2_core") + plex.removeLabel("pyop2_owned") + plex.removeLabel("pyop2_ghost") + mesh = Mesh(plex, distribution_parameters=distribution_parameters) + mesh1 = Submesh(mesh, dim, 1) + x1, y1, z1 = SpatialCoordinate(mesh1) + mesh2 = Submesh(mesh, dim, 2) + x2, y2, z2 = SpatialCoordinate(mesh2) + label_interf = 7 # max + 1 + mesh12 = Submesh(mesh2, dim - 1, label_interf) + dx1 = Measure("dx", mesh1) + dx2 = Measure("dx", mesh2) + ds1_ds2 = Measure("ds", mesh1, intersect_measures=(Measure("ds", mesh2),)) + dx12_ds1_ds2 = Measure( + "dx", mesh12, + intersect_measures=( + Measure("ds", mesh1), + Measure("ds", mesh2), + ) + ) + # Check sanity. + vol1 = assemble(Constant(1) * dx1) + vol2 = assemble(Constant(1) * dx2) + assert abs(vol1 + vol2 - 1.) < 1.e-13 + # Solve Poisson problem. + V1 = FunctionSpace(mesh1, family, degree) + V12 = FunctionSpace(mesh12, family, degree) + V2 = FunctionSpace(mesh2, family, degree) + V = V1 * V12 * V2 + u = TrialFunction(V) + v = TestFunction(V) + u1, u12, u2 = split(u) + v1, v12, v2 = split(v) + g1 = cos(2 * pi * x1) * cos(2 * pi * y1) * cos(2 * pi * z1) + g2 = cos(2 * pi * x2) * cos(2 * pi * y2) * cos(2 * pi * z2) + f1 = 12 * pi**2 * g1 + f2 = 12 * pi**2 * g2 + n1 = FacetNormal(mesh1) + n2 = FacetNormal(mesh2) + h = 0.1 / 2**nref # roughly + a = ( + inner(grad(u1), grad(v1)) * dx1 + inner(grad(u2), grad(v2)) * dx2 + - inner( + u12, + (v1 - v2) + ) * dx12_ds1_ds2 + - inner( + (u1 * n1 + u2 * n2), + (grad(v1) + grad(v2)) / 2 + ) * dx12_ds1_ds2 + + 100 / h * inner(u1 - u2, v1 - v2) * ds1_ds2(label_interf) # Can also use dx12_ds1_ds2. + + inner( + (dot(grad(u1), n1) - dot(grad(u2), n2)) / 2 - u12, + v12 + ) * dx12_ds1_ds2 + ) + L = ( + inner(f1, v1) * dx1 + inner(f2, v2) * dx2 + ) + sol = Function(V) + bc1 = DirichletBC(V.sub(0), g1, [i for i in range(1, 7) if i != 2 * direction + 2]) + bc2 = DirichletBC(V.sub(2), g2, [i for i in range(1, 7) if i != 2 * direction + 1]) + solver_parameters = { + "mat_type": "matfree", + "ksp_type": "preonly", + "pc_type": "fieldsplit", + "pc_fieldsplit_type": "schur", + "pc_fieldsplit_schur_fact_type": "full", + "pc_fieldsplit_0_fields": "1", + "pc_fieldsplit_1_fields": "0, 2", + "fieldsplit_0_ksp_type": "cg", + "fieldsplit_0_ksp_rtol": 1e-14, + "fieldsplit_0_pc_type": "jacobi", + "fieldsplit_1_ksp_type": "cg", + "fieldsplit_1_ksp_rtol": 1e-14, + "fieldsplit_1_pc_type": "jacobi", + } + solve(a == L, sol, bcs=[bc1, bc2], solver_parameters=solver_parameters) + sol1, sol12, sol2 = split(sol) + L2Error1 = assemble(inner(sol1 - g1, sol1 - g1) * dx1) + L2Error2 = assemble(inner(sol2 - g2, sol2 - g2) * dx2) + H1Error1 = L2Error1 + assemble(inner(grad(sol1 - g1), grad(sol1 - g1)) * dx1) + H1Error2 = L2Error2 + assemble(inner(grad(sol2 - g2), grad(sol2 - g2)) * dx2) + return sqrt(L2Error1 + L2Error2), sqrt(H1Error1 + H1Error2) + + +@pytest.mark.parallel(nprocs=6) +@pytest.mark.parametrize('simplex', [True, False]) +@pytest.mark.parametrize('direction', [0, 1, 2]) +def test_submesh_solve_3d_2d_poisson_sanity(simplex, direction): + nref = 0 + degree = 4 + L2Error, H1Error = _test_submesh_solve_3d_2d_poisson(simplex, direction, nref, degree) + assert L2Error < 6.e-5 + assert H1Error < 5.e-3 + + +@pytest.mark.parallel(nprocs=8) +@pytest.mark.parametrize('simplex', [False]) +@pytest.mark.parametrize('direction', [0]) +@pytest.mark.parametrize('degree', [3]) +def test_submesh_solve_3d_2d_poisson_convergence(simplex, direction, degree): + L2Errors = [] + H1Errors = [] + for nref in range(2): + L2Error, H1Error = _test_submesh_solve_3d_2d_poisson(simplex, direction, nref, degree) + L2Errors.append(L2Error) + H1Errors.append(H1Error) + L2Errors = [np.log2(c) - np.log2(f) for c, f in zip(L2Errors[:-1], L2Errors[1:])] + H1Errors = [np.log2(c) - np.log2(f) for c, f in zip(H1Errors[:-1], H1Errors[1:])] + assert (np.array(L2Errors) > (degree + 1) * 0.96).all() + assert (np.array(H1Errors) > (degree) * 0.96).all() + + +@pytest.mark.parallel(nprocs=7) +def test_submesh_solve_2d_1d_poisson_hermite(): + distribution_parameters_noop = { + "partition": True, + "overlap_type": (DistributedMeshOverlapType.NONE, 0), + } + distribution_parameters = { + "overlap_type": (DistributedMeshOverlapType.RIDGE, 1), + } + mesh3d = Mesh(join(cwd, "..", "meshes", "cube_hex.msh"), distribution_parameters=distribution_parameters_noop) + plex = mesh3d.topology_dm + for _ in range(2): + plex = plex.refine() + plex.removeLabel("pyop2_core") + plex.removeLabel("pyop2_owned") + plex.removeLabel("pyop2_ghost") + mesh3d = Mesh(plex, distribution_parameters=distribution_parameters) + xyz = SpatialCoordinate(mesh3d) + HDivTrace0 = FunctionSpace(mesh3d, "Q", 2) + f1 = Function(HDivTrace0).interpolate(conditional(xyz[0] < .001, 1, 0)) + f2 = Function(HDivTrace0).interpolate(conditional(xyz[0] > .999, 1, 0)) + f3 = Function(HDivTrace0).interpolate(conditional(xyz[1] < .001, 1, 0)) + f4 = Function(HDivTrace0).interpolate(conditional(xyz[1] > .999, 1, 0)) + f5 = Function(HDivTrace0).interpolate(conditional(xyz[2] < .001, 1, 0)) + f6 = Function(HDivTrace0).interpolate(conditional(xyz[2] > .999, 1, 0)) + mesh3d = RelabeledMesh(mesh3d, [f1, f2, f3, f4, f5, f6], [1, 2, 3, 4, 5, 6]) + mesh2d = Submesh(mesh3d, mesh3d.topological_dimension - 1, 6) + mesh1d = Submesh(mesh2d, mesh2d.topological_dimension - 1, 4) + x2d = SpatialCoordinate(mesh2d) + x1d = SpatialCoordinate(mesh1d) + g2d = sin(2 * pi * x2d[0]) * sin(2 * pi * x2d[1]) + f2d = 2 * (2 * pi)**2 * g2d + g1d = sin(2 * pi * x1d[0]) + f1d = (2 * pi)**4 * g1d + V2d = FunctionSpace(mesh2d, "Q", 3) + V1d = FunctionSpace(mesh1d, "Hermite", 3) + V = V2d * V1d + u = TrialFunction(V) + v = TestFunction(V) + u2d, u1d = split(u) + v2d, v1d = split(v) + dx2d = Measure("dx", mesh2d) + dx1d = Measure("dx", mesh1d) + ds2d_dx1d = Measure("ds", mesh2d, intersect_measures=(Measure("dx", mesh1d),)) + a = inner(grad(u2d), grad(v2d)) * dx2d - \ + inner(2 * pi * u1d, v2d) * ds2d_dx1d(4) + \ + inner(grad(grad(u1d)), grad(grad(v1d))) * dx1d + L = inner(f2d, v2d) * dx2d + \ + inner(f1d, v1d) * dx1d + sol = Function(V) + bc2d = DirichletBC(V.sub(0), g2d, (1, 2, 3)) + bc1d = DirichletBC(V.sub(1), g1d, (1, 2)) + solve(a == L, sol, bcs=[bc2d, bc1d]) + error2d = assemble(inner(sol[0] - g2d, sol[0] - g2d) * dx2d) + error1d = assemble(inner(sol[1] - g1d, sol[1] - g1d) * dx1d) + assert sqrt(error2d) < 1.e-5 + assert sqrt(error1d) < 5.e-5 diff --git a/tests/firedrake/vertexonly/test_interpolation_from_parent.py b/tests/firedrake/vertexonly/test_interpolation_from_parent.py index 606b4bea13..02b99d96b7 100644 --- a/tests/firedrake/vertexonly/test_interpolation_from_parent.py +++ b/tests/firedrake/vertexonly/test_interpolation_from_parent.py @@ -54,7 +54,7 @@ def parentmesh(request): @pytest.fixture(params=[0, 1, 100], ids=lambda x: f"{x}-coords") def vertexcoords(request, parentmesh): - size = (request.param, parentmesh.geometric_dimension()) + size = (request.param, parentmesh.geometric_dimension) return pseudo_random_coords(size) @@ -81,10 +81,10 @@ def vfs(request, parentmesh): family = request.param[0] # skip where the element doesn't support the cell type if family != "CG": - if parentmesh.ufl_cell().cellname() == "quadrilateral": + if parentmesh.ufl_cell().cellname == "quadrilateral": if not (family == "RTCE" or family == "RTCF"): pytest.skip(f"{family} does not support {parentmesh.ufl_cell()} cells") - elif parentmesh.ufl_cell().cellname() == "triangle" or parentmesh.ufl_cell().cellname() == "tetrahedron": + elif parentmesh.ufl_cell().cellname == "triangle" or parentmesh.ufl_cell().cellname == "tetrahedron": if (not (family == "N1curl" or family == "N2curl" or family == "N1div" or family == "N2div")): pytest.skip(f"{family} does not support {parentmesh.ufl_cell()} cells") @@ -106,8 +106,8 @@ def vfs(request, parentmesh): def tfs(request, parentmesh): family = request.param[0] # skip where the element doesn't support the cell type - if (family != "CG" and parentmesh.ufl_cell().cellname() != "triangle" - and parentmesh.ufl_cell().cellname() != "tetrahedron"): + if (family != "CG" and parentmesh.ufl_cell().cellname != "triangle" + and parentmesh.ufl_cell().cellname != "tetrahedron"): pytest.skip(f"{family} does not support {parentmesh.ufl_cell()} cells") if parentmesh.name == "immersedsphere": # See https://github.com/firedrakeproject/firedrake/issues/3089 @@ -162,7 +162,7 @@ def test_scalar_spatialcoordinate_interpolation(parentmesh, vertexcoords): # Reshaping because for all meshes, we want (-1, gdim) but # when gdim == 1 PyOP2 doesn't distinguish between dats with shape # () and shape (1,). - vertexcoords = vm.coordinates.dat.data_ro.reshape(-1, parentmesh.geometric_dimension()) + vertexcoords = vm.coordinates.dat.data_ro.reshape(-1, parentmesh.geometric_dimension) W = FunctionSpace(vm, "DG", 0) expr = reduce(add, SpatialCoordinate(parentmesh)) w_expr = assemble(interpolate(expr, W)) @@ -173,7 +173,7 @@ def test_scalar_function_interpolation(parentmesh, vertexcoords, fs): if parentmesh.name == "immersedsphere": vertexcoords = immersed_sphere_vertexcoords(parentmesh, vertexcoords) vm = VertexOnlyMesh(parentmesh, vertexcoords, missing_points_behaviour="ignore") - vertexcoords = vm.coordinates.dat.data_ro.reshape(-1, parentmesh.geometric_dimension()) + vertexcoords = vm.coordinates.dat.data_ro.reshape(-1, parentmesh.geometric_dimension) fs_fam, fs_deg, fs_typ = fs if ( parentmesh.coordinates.function_space().ufl_element().family() @@ -227,7 +227,7 @@ def test_tensor_spatialcoordinate_interpolation(parentmesh, vertexcoords): vertexcoords = vm.coordinates.dat.data_ro W = TensorFunctionSpace(vm, "DG", 0) x = SpatialCoordinate(parentmesh) - gdim = parentmesh.geometric_dimension() + gdim = parentmesh.geometric_dimension expr = 2 * as_tensor([x]*gdim) assert W.shape == expr.ufl_shape w_expr = assemble(interpolate(expr, W)) @@ -257,7 +257,7 @@ def test_tensor_function_interpolation(parentmesh, vertexcoords, tfs): v = Function(V).interpolate(expr) result = np.asarray([np.outer(vertexcoords[i], vertexcoords[i]) for i in range(len(vertexcoords))]) if len(result) == 0: - result = result.reshape(vertexcoords.shape + (parentmesh.geometric_dimension(),)) + result = result.reshape(vertexcoords.shape + (parentmesh.geometric_dimension,)) w_v = assemble(interpolate(v, W)) assert np.allclose(w_v.dat.data_ro.reshape(result.shape), result) @@ -268,7 +268,7 @@ def test_mixed_function_interpolation(parentmesh, vertexcoords, tfs): tfs_fam, tfs_deg, tfs_typ = tfs vm = VertexOnlyMesh(parentmesh, vertexcoords, missing_points_behaviour="ignore") - vertexcoords = vm.coordinates.dat.data_ro.reshape(-1, parentmesh.geometric_dimension()) + vertexcoords = vm.coordinates.dat.data_ro.reshape(-1, parentmesh.geometric_dimension) if ( parentmesh.coordinates.function_space().ufl_element().family() == "Discontinuous Lagrange" @@ -291,7 +291,7 @@ def test_mixed_function_interpolation(parentmesh, vertexcoords, tfs): v1.interpolate(expr1) result1 = np.asarray([np.outer(vertexcoords[i], vertexcoords[i]) for i in range(len(vertexcoords))]) if len(result1) == 0: - result1 = result1.reshape(vertexcoords.shape + (parentmesh.geometric_dimension(),)) + result1 = result1.reshape(vertexcoords.shape + (parentmesh.geometric_dimension,)) # Get Function in V2 expr2 = reduce(add, SpatialCoordinate(parentmesh)) v2.interpolate(expr2) @@ -310,11 +310,6 @@ def test_scalar_real_interpolation(parentmesh, vertexcoords): vm = VertexOnlyMesh(parentmesh, vertexcoords, missing_points_behaviour="ignore") W = FunctionSpace(vm, "DG", 0) V = FunctionSpace(parentmesh, "Real", 0) - # Remove below when interpolating constant onto Real works for extruded - if type(parentmesh.topology) is mesh.ExtrudedMeshTopology: - with pytest.raises(ValueError): - assemble(interpolate(Constant(1), V)) - return v = assemble(interpolate(Constant(1), V)) w_v = assemble(interpolate(v, W)) assert np.allclose(w_v.dat.data_ro, 1.) diff --git a/tests/firedrake/vertexonly/test_poisson_inverse_conductivity.py b/tests/firedrake/vertexonly/test_poisson_inverse_conductivity.py index 4b890dae8f..15b03e867c 100644 --- a/tests/firedrake/vertexonly/test_poisson_inverse_conductivity.py +++ b/tests/firedrake/vertexonly/test_poisson_inverse_conductivity.py @@ -1,7 +1,7 @@ import pytest import numpy as np from firedrake import * -from pyadjoint.tape import get_working_tape, pause_annotation +from pyadjoint.tape import get_working_tape, pause_annotation, stop_annotating @pytest.fixture(autouse=True) diff --git a/tests/firedrake/vertexonly/test_swarm.py b/tests/firedrake/vertexonly/test_swarm.py index 96c4602511..7d70d2dedb 100644 --- a/tests/firedrake/vertexonly/test_swarm.py +++ b/tests/firedrake/vertexonly/test_swarm.py @@ -25,11 +25,11 @@ def cell_midpoints(m): num_cells_local = len(f.dat.data_ro) num_cells = MPI.COMM_WORLD.allreduce(num_cells_local, op=MPI.SUM) # reshape is for 1D case where f.dat.data_ro has shape (num_cells_local,) - local_midpoints = f.dat.data_ro.reshape(num_cells_local, m.geometric_dimension()) + local_midpoints = f.dat.data_ro.reshape(num_cells_local, m.geometric_dimension) local_midpoints_size = np.array(local_midpoints.size) local_midpoints_sizes = np.empty(MPI.COMM_WORLD.size, dtype=int) MPI.COMM_WORLD.Allgatherv(local_midpoints_size, local_midpoints_sizes) - midpoints = np.empty((num_cells, m.geometric_dimension()), dtype=local_midpoints.dtype) + midpoints = np.empty((num_cells, m.geometric_dimension), dtype=local_midpoints.dtype) MPI.COMM_WORLD.Allgatherv(local_midpoints, (midpoints, local_midpoints_sizes)) assert len(np.unique(midpoints, axis=0)) == len(midpoints) return midpoints, local_midpoints @@ -70,7 +70,7 @@ def point_ownership(m, points, localpoints): m.locate_cell(point). """ - out_of_mesh_point = np.full((1, m.geometric_dimension()), np.inf) + out_of_mesh_point = np.full((1, m.geometric_dimension), np.inf) cell_numbers = np.empty(len(localpoints), dtype=int) i = 0 for point in points: @@ -238,12 +238,12 @@ def test_pic_swarm_in_mesh(parentmesh, redundant, exclude_halos): assert plex.comm.size == swarm.comm.size # Check swarm fields are correct default_fields = [ - ("DMSwarmPIC_coor", parentmesh.geometric_dimension(), RealType), + ("DMSwarmPIC_coor", parentmesh.geometric_dimension, RealType), ("DMSwarm_rank", 1, IntType), ] default_extra_fields = [ ("parentcellnum", 1, IntType), - ("refcoord", parentmesh.topological_dimension(), RealType), + ("refcoord", parentmesh.topological_dimension, RealType), ("globalindex", 1, IntType), ("inputrank", 1, IntType), ("inputindex", 1, IntType), @@ -368,7 +368,7 @@ def test_pic_swarm_in_mesh(parentmesh, redundant, exclude_halos): # Now have DMPLex compute the cell IDs in cases where it can: if ( parentmesh.coordinates.ufl_element().family() != "Discontinuous Lagrange" - and parentmesh.geometric_dimension() == parentmesh.topological_dimension() + and parentmesh.geometric_dimension == parentmesh.topological_dimension and not parentmesh.extruded and not parentmesh.coordinates.dat.dat_version > 0 # shifted mesh ): @@ -391,10 +391,6 @@ def test_pic_swarm_in_mesh(parentmesh, redundant, exclude_halos): assert original_swarm.other_fields != swarm.other_fields assert isinstance(original_swarm.getCellDM(), PETSc.DMSwarm) - # out_of_mesh_point = np.full((2, parentmesh.geometric_dimension()), np.inf) - # swarm, n_missing_coords = mesh._pic_swarm_in_mesh(parentmesh, out_of_mesh_point, fields=fields) - # assert n_missing_coords == 2 - @pytest.mark.parallel def test_pic_swarm_in_mesh_parallel(parentmesh, redundant, exclude_halos): diff --git a/tests/firedrake/vertexonly/test_vertex_only_fs.py b/tests/firedrake/vertexonly/test_vertex_only_fs.py index 99c651876c..2b1e5a0b35 100644 --- a/tests/firedrake/vertexonly/test_vertex_only_fs.py +++ b/tests/firedrake/vertexonly/test_vertex_only_fs.py @@ -50,7 +50,7 @@ def parentmesh(request): @pytest.fixture(params=[0, 1, 100], ids=lambda x: f"{x}-coords") def vertexcoords(request, parentmesh): - size = (request.param, parentmesh.geometric_dimension()) + size = (request.param, parentmesh.geometric_dimension) return pseudo_random_coords(size) @@ -68,7 +68,7 @@ def pseudo_random_coords(size): # Function Space Generation Tests -def functionspace_tests(vm, petsc_raises): +def functionspace_tests(vm): # Prep num_cells = len(vm.coordinates.dat.data_ro) num_cells_mpi_global = MPI.COMM_WORLD.allreduce(num_cells, op=MPI.SUM) @@ -96,7 +96,7 @@ def functionspace_tests(vm, petsc_raises): # Reshaping because for all meshes, we want (-1, gdim) but # when gdim == 1 PyOP2 doesn't distinguish between dats with shape # () and shape (1,). - assert np.allclose(f.dat.data_ro, np.prod(vm.coordinates.dat.data_ro.reshape(-1, vm.geometric_dimension()), axis=1)) + assert np.allclose(f.dat.data_ro, np.prod(vm.coordinates.dat.data_ro.reshape(-1, vm.geometric_dimension), axis=1)) # Galerkin Projection of expression is the same as interpolation of # that expression since both exactly point evaluate the expression. assert np.allclose(f.dat.data_ro, g.dat.data_ro) @@ -118,10 +118,10 @@ def functionspace_tests(vm, petsc_raises): input_ordering_parent_cell_nums = vm.input_ordering.topology_dm.getField("parentcellnum").ravel() vm.input_ordering.topology_dm.restoreField("parentcellnum") idxs_to_include = input_ordering_parent_cell_nums != -1 - assert np.allclose(h.dat.data_ro_with_halos[idxs_to_include], np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension()), axis=1)) + assert np.allclose(h.dat.data_ro_with_halos[idxs_to_include], np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension), axis=1)) assert np.all(h.dat.data_ro_with_halos[~idxs_to_include] == -1) # Using permutation matrix - perm_mat = assemble(interpolate(TrialFunction(V), W, matfree=False)) + perm_mat = assemble(interpolate(TrialFunction(V), W), mat_type="aij") h2 = assemble(perm_mat @ g) assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], h.dat.data_ro_with_halos[idxs_to_include]) h2 = assemble(interpolate(g, W)) @@ -129,44 +129,42 @@ def functionspace_tests(vm, petsc_raises): # check we can interpolate expressions h2 = Function(W) h2.interpolate(2*g) - assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], 2*np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension()), axis=1)) + assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], 2*np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension), axis=1)) # Check that the opposite works g.dat.data_wo_with_halos[:] = -1 g.interpolate(h) - assert np.allclose(g.dat.data_ro_with_halos, np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension()), axis=1)) + assert np.allclose(g.dat.data_ro_with_halos, np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension), axis=1)) h = assemble(interpolate(g, W)) - assert np.allclose(h.dat.data_ro_with_halos[idxs_to_include], np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension()), axis=1)) + assert np.allclose(h.dat.data_ro_with_halos[idxs_to_include], np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension), axis=1)) assert np.all(h.dat.data_ro_with_halos[~idxs_to_include] == 0) h2 = assemble(interpolate(2*g, W)) - assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], 2*np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension()), axis=1)) + assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], 2*np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension), axis=1)) h_star = h.riesz_representation(riesz_map="l2") g = assemble(interpolate(TestFunction(V), h_star)) - assert np.allclose(g.dat.data_ro_with_halos, np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension()), axis=1)) - with petsc_raises(NotImplementedError): - # Can't use adjoint on interpolates with expressions yet - g2 = assemble(interpolate(2 * TestFunction(V), h_star)) - assert np.allclose(g2.dat.data_ro_with_halos, 2*np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension()), axis=1)) + assert np.allclose(g.dat.data_ro_with_halos, np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension), axis=1)) + + g2 = assemble(interpolate(2 * TestFunction(V), h_star)) + assert np.allclose(g2.dat.data_ro_with_halos, 2*np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension), axis=1)) h_star = assemble(interpolate(TestFunction(W), g)) h = h_star.riesz_representation(riesz_map="l2") - assert np.allclose(h.dat.data_ro_with_halos[idxs_to_include], np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension()), axis=1)) + assert np.allclose(h.dat.data_ro_with_halos[idxs_to_include], np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension), axis=1)) assert np.all(h.dat.data_ro_with_halos[~idxs_to_include] == 0) - with petsc_raises(NotImplementedError): - # Can't use adjoint on interpolates with expressions yet - h2 = assemble(interpolate(2 * TestFunction(W), g)) - assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], 2*np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension()), axis=1)) + + h2 = assemble(interpolate(2 * TestFunction(W), g)) + assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], 2*np.prod(vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include].reshape(-1, vm.input_ordering.geometric_dimension), axis=1)) g = assemble(interpolate(h, V)) - assert np.allclose(g.dat.data_ro_with_halos, np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension()), axis=1)) + assert np.allclose(g.dat.data_ro_with_halos, np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension), axis=1)) g2 = assemble(interpolate(2 * h, V)) - assert np.allclose(g2.dat.data_ro_with_halos, 2*np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension()), axis=1)) + assert np.allclose(g2.dat.data_ro_with_halos, 2*np.prod(vm.coordinates.dat.data_ro_with_halos.reshape(-1, vm.geometric_dimension), axis=1)) -def vectorfunctionspace_tests(vm, petsc_raises): +def vectorfunctionspace_tests(vm): # Prep - gdim = vm.geometric_dimension() + gdim = vm.geometric_dimension num_cells = len(vm.coordinates.dat.data_ro) num_cells_mpi_global = MPI.COMM_WORLD.allreduce(num_cells, op=MPI.SUM) num_cells_halo = len(vm.coordinates.dat.data_ro_with_halos) - num_cells @@ -196,7 +194,7 @@ def vectorfunctionspace_tests(vm, petsc_raises): # num_vertices (globally) times. Note that we get a vertex cell for # each geometric dimension so we have to sum over geometric # dimension too. - R = VectorFunctionSpace(vm, "R", dim=gdim) + R = VectorFunctionSpace(vm, "R", 0, dim=gdim) ones = Function(R).assign(1) f.interpolate(ones) assert np.isclose(assemble(inner(f, f)*dx), num_cells_mpi_global*gdim) @@ -216,7 +214,7 @@ def vectorfunctionspace_tests(vm, petsc_raises): assert np.allclose(h.dat.data_ro[idxs_to_include], 2*vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include]) assert np.all(h.dat.data_ro_with_halos[~idxs_to_include] == -1) # Using permutation matrix - perm_mat = assemble(interpolate(TrialFunction(V), W, matfree=False)) + perm_mat = assemble(interpolate(TrialFunction(V), W), mat_type="aij") h2 = assemble(perm_mat @ g) assert np.allclose(h2.dat.data_ro_with_halos[idxs_to_include], h.dat.data_ro_with_halos[idxs_to_include]) # check other interpolation APIs work identically @@ -240,18 +238,16 @@ def vectorfunctionspace_tests(vm, petsc_raises): h_star = h.riesz_representation(riesz_map="l2") g = assemble(interpolate(TestFunction(V), h_star)) assert np.allclose(g.dat.data_ro_with_halos, 2*vm.coordinates.dat.data_ro_with_halos) - with petsc_raises(NotImplementedError): - # Can't use adjoint on interpolate with expressions yet - g2 = assemble(interpolate(2 * TestFunction(V), h_star)) - assert np.allclose(g2.dat.data_ro_with_halos, 4*vm.coordinates.dat.data_ro_with_halos) + + g2 = assemble(interpolate(2 * TestFunction(V), h_star)) + assert np.allclose(g2.dat.data_ro_with_halos, 4*vm.coordinates.dat.data_ro_with_halos) h_star = assemble(interpolate(TestFunction(W), g)) assert np.allclose(h_star.dat.data_ro[idxs_to_include], 2*vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include]) assert np.all(h_star.dat.data_ro_with_halos[~idxs_to_include] == 0) - with petsc_raises(NotImplementedError): - # Can't use adjoint on interpolate with expressions yet - h2 = assemble(interpolate(2 * TestFunction(W), g)) - assert np.allclose(h2.dat.data_ro[idxs_to_include], 4*vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include]) + + h2 = assemble(interpolate(2 * TestFunction(W), g)) + assert np.allclose(h2.dat.data_ro[idxs_to_include], 4*vm.input_ordering.coordinates.dat.data_ro_with_halos[idxs_to_include]) h = h_star.riesz_representation(riesz_map="l2") g = assemble(interpolate(h, V)) @@ -261,12 +257,12 @@ def vectorfunctionspace_tests(vm, petsc_raises): @pytest.mark.parallel([1, 3]) -def test_functionspaces(parentmesh, vertexcoords, petsc_raises): +def test_functionspaces(parentmesh, vertexcoords): vm = VertexOnlyMesh(parentmesh, vertexcoords, missing_points_behaviour="ignore") - functionspace_tests(vm, petsc_raises) - vectorfunctionspace_tests(vm, petsc_raises) - functionspace_tests(vm.input_ordering, petsc_raises) - vectorfunctionspace_tests(vm.input_ordering, petsc_raises) + functionspace_tests(vm) + vectorfunctionspace_tests(vm) + functionspace_tests(vm.input_ordering) + vectorfunctionspace_tests(vm.input_ordering) @pytest.mark.parallel(nprocs=2) @@ -373,9 +369,9 @@ def test_tensorfs_permutation(tensorfs_and_expr): f = Function(V) f.interpolate(expr) f_in_W = assemble(interpolate(f, W)) - python_mat = assemble(interpolate(TrialFunction(V), W, matfree=False)) + python_mat = assemble(interpolate(TrialFunction(V), W), mat_type="matfree") f_in_W_2 = assemble(python_mat @ f) assert np.allclose(f_in_W.dat.data_ro, f_in_W_2.dat.data_ro) - petsc_mat = assemble(interpolate(TrialFunction(V), W, matfree=True)) + petsc_mat = assemble(interpolate(TrialFunction(V), W), mat_type="aij") f_in_W_petsc = assemble(petsc_mat @ f) assert np.allclose(f_in_W.dat.data_ro, f_in_W_petsc.dat.data_ro) diff --git a/tests/firedrake/vertexonly/test_vertex_only_mesh_generation.py b/tests/firedrake/vertexonly/test_vertex_only_mesh_generation.py index 4fec737690..4361733dcd 100644 --- a/tests/firedrake/vertexonly/test_vertex_only_mesh_generation.py +++ b/tests/firedrake/vertexonly/test_vertex_only_mesh_generation.py @@ -25,11 +25,11 @@ def cell_midpoints(m): num_cells_local = len(f.dat.data_ro) num_cells = MPI.COMM_WORLD.allreduce(num_cells_local, op=MPI.SUM) # reshape is for 1D case where f.dat.data_ro has shape (num_cells_local,) - local_midpoints = f.dat.data_ro.reshape(num_cells_local, m.geometric_dimension()) + local_midpoints = f.dat.data_ro.reshape(num_cells_local, m.geometric_dimension) local_midpoints_size = np.array(local_midpoints.size) local_midpoints_sizes = np.empty(MPI.COMM_WORLD.size, dtype=int) MPI.COMM_WORLD.Allgatherv(local_midpoints_size, local_midpoints_sizes) - midpoints = np.empty((num_cells, m.geometric_dimension()), dtype=local_midpoints.dtype) + midpoints = np.empty((num_cells, m.geometric_dimension), dtype=local_midpoints.dtype) MPI.COMM_WORLD.Allgatherv(local_midpoints, (midpoints, local_midpoints_sizes)) assert len(np.unique(midpoints, axis=0)) == len(midpoints) return midpoints, local_midpoints @@ -90,7 +90,7 @@ def redundant(request): @pytest.fixture(params=[0, 1, 100], ids=lambda x: f"{x}-coords") def vertexcoords(request, parentmesh): - size = (request.param, parentmesh.geometric_dimension()) + size = (request.param, parentmesh.geometric_dimension) return pseudo_random_coords(size) @@ -115,10 +115,10 @@ def verify_vertexonly_mesh(m, vm, inputvertexcoords, name): `inputvertexcoords` should be the same for all MPI ranks to avoid hanging. """ - gdim = m.geometric_dimension() + gdim = m.geometric_dimension # Correct dims - assert vm.geometric_dimension() == gdim - assert vm.topological_dimension() == 0 + assert vm.geometric_dimension == gdim + assert vm.topological_dimension == 0 # Can initialise # has correct name assert vm.name == name @@ -276,7 +276,7 @@ def test_generate_cell_midpoints(parentmesh, redundant): # Check size of biggest len(vm.coordinates.dat.data_ro) so # locate_cell can be called on every processor max_len = MPI.COMM_WORLD.allreduce(len(vm.coordinates.dat.data_ro), op=MPI.MAX) - out_of_mesh_point = np.full((1, parentmesh.geometric_dimension()), np.inf) + out_of_mesh_point = np.full((1, parentmesh.geometric_dimension), np.inf) for i in range(max_len): if i < len(vm.coordinates.dat.data_ro): # [*here] @@ -354,7 +354,7 @@ def test_point_tolerance(): m = UnitSquareMesh(1, 1) assert m.tolerance == 0.5 # Make the mesh non-axis-aligned. - m.coordinates.dat.data[1, :] = [1.1, 1] + m.coordinates.dat.data[2, :] = [1.1, 1] coords = [[1.0501, 0.5]] vm = VertexOnlyMesh(m, coords, tolerance=0.1) assert vm.cell_set.size == 1 @@ -378,7 +378,7 @@ def test_missing_points_behaviour(parentmesh): Generate points outside of the parentmesh and check we get the expected error behaviour """ - inputcoord = np.full((1, parentmesh.geometric_dimension()), np.inf) + inputcoord = np.full((1, parentmesh.geometric_dimension), np.inf) assert len(inputcoord) == 1 # Can surpress error vm = VertexOnlyMesh(parentmesh, inputcoord, missing_points_behaviour="ignore") @@ -416,7 +416,7 @@ def test_outside_boundary_behaviour(parentmesh): if parentmesh.name == "immersedsphereextruded" or parentmesh.name == "immersedsphere": # except here! edge_point = negative_coord_furthest_from_origin(parentmesh) - inputcoord = np.full((1, parentmesh.geometric_dimension()), edge_point-1e-15) + inputcoord = np.full((1, parentmesh.geometric_dimension), edge_point-1e-15) assert len(inputcoord) == 1 # Tolerance is too small to pick up point vm = VertexOnlyMesh(parentmesh, inputcoord, tolerance=1e-16, missing_points_behaviour="ignore") @@ -468,7 +468,7 @@ def test_inside_boundary_behaviour(parentmesh): if parentmesh.name == "immersedsphereextruded" or parentmesh.name == "immersedsphere": # except here! edge_point = negative_coord_furthest_from_origin(parentmesh) - inputcoord = np.full((1, parentmesh.geometric_dimension()), edge_point+1e-15) + inputcoord = np.full((1, parentmesh.geometric_dimension), edge_point+1e-15) assert len(inputcoord) == 1 # Tolerance is large enough to pick up point vm = VertexOnlyMesh(parentmesh, inputcoord, tolerance=1e-14, missing_points_behaviour="ignore") diff --git a/tests/pyop2/test_matrices.py b/tests/pyop2/test_matrices.py index f3c354d49f..96b25cd689 100644 --- a/tests/pyop2/test_matrices.py +++ b/tests/pyop2/test_matrices.py @@ -609,14 +609,7 @@ def test_mat_always_has_diagonal_space(self): d2 = op2.Set(3) m2 = op2.Map(s, d2, 1, [1]) sparsity = op2.Sparsity((d ** 1, d2 ** 1), [(m, m2, None)]) - - from petsc4py import PETSc - # petsc4py default error handler swallows SETERRQ, so just - # install the abort handler to notice an error. - PETSc.Sys.pushErrorHandler("abort") mat = op2.Mat(sparsity) - PETSc.Sys.popErrorHandler() - assert np.allclose(mat.handle.getDiagonal().array, 0.0) def test_minimal_zero_mat(self): diff --git a/tests/pyop2/test_mpi.py b/tests/pyop2/test_mpi.py new file mode 100644 index 0000000000..106b8377f2 --- /dev/null +++ b/tests/pyop2/test_mpi.py @@ -0,0 +1,26 @@ +import pytest +import pytest_mpi +from mpi4py import MPI + +import pyop2.mpi + + +def passing_test(): + return "pass" + + +def failing_test(): + raise RuntimeError("This test has failed") + + +@pytest.mark.parallel(2) +@pytest.mark.parametrize("root", [0, 1]) +def test_branches_on_rank_do_not_deadlock(root): + result = pyop2.mpi.safe_noncollective(MPI.COMM_WORLD, passing_test, root=root) + pytest_mpi.parallel_assert(result == "pass") + + try: + result = pyop2.mpi.safe_noncollective(MPI.COMM_WORLD, failing_test, root=root) + except BaseException as e: + result = e + pytest_mpi.parallel_assert(isinstance(result, RuntimeError) and str(result) == "This test has failed") diff --git a/tests/tsfc/test_dual_evaluation.py b/tests/tsfc/test_dual_evaluation.py index 85f1617678..16d07f172b 100644 --- a/tests/tsfc/test_dual_evaluation.py +++ b/tests/tsfc/test_dual_evaluation.py @@ -11,8 +11,7 @@ def test_ufl_only_simple(): v = ufl.Coefficient(V) expr = ufl.inner(v, v) W = V - to_element = create_element(W.ufl_element()) - kernel = compile_expression_dual_evaluation(expr, to_element, W.ufl_element()) + kernel = compile_expression_dual_evaluation(expr, W.ufl_element()) assert kernel.needs_external_coords is False @@ -22,8 +21,7 @@ def test_ufl_only_spatialcoordinate(): x, y = ufl.SpatialCoordinate(mesh) expr = x*y - y**2 + x W = V - to_element = create_element(W.ufl_element()) - kernel = compile_expression_dual_evaluation(expr, to_element, W.ufl_element()) + kernel = compile_expression_dual_evaluation(expr, W.ufl_element()) assert kernel.needs_external_coords is True @@ -33,8 +31,7 @@ def test_ufl_only_from_contravariant_piola(): v = ufl.Coefficient(V) expr = ufl.inner(v, v) W = ufl.FunctionSpace(mesh, finat.ufl.FiniteElement("P", ufl.triangle, 2)) - to_element = create_element(W.ufl_element()) - kernel = compile_expression_dual_evaluation(expr, to_element, W.ufl_element()) + kernel = compile_expression_dual_evaluation(expr, W.ufl_element()) assert kernel.needs_external_coords is True @@ -44,8 +41,7 @@ def test_ufl_only_to_contravariant_piola(): v = ufl.Coefficient(V) expr = ufl.as_vector([v, v]) W = ufl.FunctionSpace(mesh, finat.ufl.FiniteElement("RT", ufl.triangle, 1)) - to_element = create_element(W.ufl_element()) - kernel = compile_expression_dual_evaluation(expr, to_element, W.ufl_element()) + kernel = compile_expression_dual_evaluation(expr, W.ufl_element()) assert kernel.needs_external_coords is True @@ -59,4 +55,4 @@ def test_ufl_only_shape_mismatch(): to_element = create_element(W.ufl_element()) assert to_element.value_shape == (2,) with pytest.raises(ValueError): - compile_expression_dual_evaluation(expr, to_element, W.ufl_element()) + compile_expression_dual_evaluation(expr, W.ufl_element()) diff --git a/tests/tsfc/test_idempotency.py b/tests/tsfc/test_idempotency.py index cf554f4641..152f406848 100644 --- a/tests/tsfc/test_idempotency.py +++ b/tests/tsfc/test_idempotency.py @@ -9,7 +9,7 @@ ufl.triangle, ufl.quadrilateral, ufl.tetrahedron], - ids=lambda x: x.cellname()) + ids=lambda x: x.cellname) def cell(request): return request.param diff --git a/tests/tsfc/test_impero_loopy_flop_counts.py b/tests/tsfc/test_impero_loopy_flop_counts.py index 76bdad96e3..240067bd6e 100644 --- a/tests/tsfc/test_impero_loopy_flop_counts.py +++ b/tests/tsfc/test_impero_loopy_flop_counts.py @@ -32,7 +32,7 @@ def count_loopy_flops(kernel): @pytest.fixture(params=[interval, triangle, quadrilateral, TensorProductCell(triangle, interval)], - ids=lambda cell: cell.cellname()) + ids=lambda cell: cell.cellname) def cell(request): return request.param diff --git a/tests/tsfc/test_interpolation_factorisation.py b/tests/tsfc/test_interpolation_factorisation.py index 4355c24b1f..c76753a890 100644 --- a/tests/tsfc/test_interpolation_factorisation.py +++ b/tests/tsfc/test_interpolation_factorisation.py @@ -7,11 +7,10 @@ from finat.ufl import FiniteElement, VectorElement, TensorElement from tsfc import compile_expression_dual_evaluation -from finat.element_factory import create_element @pytest.fixture(params=[interval, quadrilateral, hexahedron], - ids=lambda x: x.cellname()) + ids=lambda x: x.cellname) def mesh(request): return Mesh(VectorElement("P", request.param, 1)) @@ -29,9 +28,8 @@ def element(request, mesh): def flop_count(mesh, source, target): Vtarget = FunctionSpace(mesh, target) Vsource = FunctionSpace(mesh, source) - to_element = create_element(Vtarget.ufl_element()) expr = Coefficient(Vsource) - kernel = compile_expression_dual_evaluation(expr, to_element, Vtarget.ufl_element()) + kernel = compile_expression_dual_evaluation(expr, Vtarget.ufl_element()) return kernel.flop_count @@ -44,7 +42,7 @@ def test_sum_factorisation(mesh, element): flops.append(flop_count(mesh, element(int(lo)), element(int(hi)))) flops = numpy.asarray(flops) rates = numpy.diff(numpy.log(flops)) / numpy.diff(numpy.log(degrees)) - assert (rates < (mesh.topological_dimension()+1)).all() + assert (rates < (mesh.topological_dimension+1)).all() def test_sum_factorisation_scalar_tensor(mesh, element): diff --git a/tests/tsfc/test_sum_factorisation.py b/tests/tsfc/test_sum_factorisation.py index 3e785c5b26..891cf1c6cc 100644 --- a/tests/tsfc/test_sum_factorisation.py +++ b/tests/tsfc/test_sum_factorisation.py @@ -20,9 +20,9 @@ def helmholtz(cell, degree): def split_mixed_poisson(cell, degree): m = Mesh(VectorElement('CG', cell, 1)) - if cell.cellname() in ['interval * interval', 'quadrilateral']: + if cell.cellname in ['interval * interval', 'quadrilateral']: hdiv_element = FiniteElement('RTCF', cell, degree) - elif cell.cellname() == 'triangle * interval': + elif cell.cellname == 'triangle * interval': U0 = FiniteElement('RT', triangle, degree) U1 = FiniteElement('DG', triangle, degree - 1) V0 = FiniteElement('CG', interval, degree) @@ -30,7 +30,7 @@ def split_mixed_poisson(cell, degree): Wa = HDivElement(TensorProductElement(U0, V1)) Wb = HDivElement(TensorProductElement(U1, V0)) hdiv_element = EnrichedElement(Wa, Wb) - elif cell.cellname() == 'quadrilateral * interval': + elif cell.cellname == 'quadrilateral * interval': hdiv_element = FiniteElement('NCF', cell, degree) RT = FunctionSpace(m, hdiv_element) DG = FunctionSpace(m, FiniteElement('DQ', cell, degree - 1)) @@ -43,9 +43,9 @@ def split_mixed_poisson(cell, degree): def split_vector_laplace(cell, degree): m = Mesh(VectorElement('CG', cell, 1)) - if cell.cellname() in ['interval * interval', 'quadrilateral']: + if cell.cellname in ['interval * interval', 'quadrilateral']: hcurl_element = FiniteElement('RTCE', cell, degree) - elif cell.cellname() == 'triangle * interval': + elif cell.cellname == 'triangle * interval': U0 = FiniteElement('RT', triangle, degree) U1 = FiniteElement('CG', triangle, degree) V0 = FiniteElement('CG', interval, degree) @@ -53,7 +53,7 @@ def split_vector_laplace(cell, degree): Wa = HCurlElement(TensorProductElement(U0, V0)) Wb = HCurlElement(TensorProductElement(U1, V1)) hcurl_element = EnrichedElement(Wa, Wb) - elif cell.cellname() == 'quadrilateral * interval': + elif cell.cellname == 'quadrilateral * interval': hcurl_element = FiniteElement('NCE', cell, degree) RT = FunctionSpace(m, hcurl_element) CG = FunctionSpace(m, FiniteElement('Q', cell, degree)) diff --git a/tests/tsfc/test_tensor.py b/tests/tsfc/test_tensor.py index 9d09a467fb..7e2d4de06b 100644 --- a/tests/tsfc/test_tensor.py +++ b/tests/tsfc/test_tensor.py @@ -55,7 +55,7 @@ def count_flops(form): (triangle, 4), (tetrahedron, 6)]) def test_bilinear(form, cell, order): - degrees = numpy.arange(1, 9 - 2 * cell.topological_dimension()) + degrees = numpy.arange(1, 9 - 2 * cell.topological_dimension) flops = [count_flops(form(cell, int(degree))) for degree in degrees] rates = numpy.diff(numpy.log(flops)) / numpy.diff(numpy.log(degrees + 1)) @@ -73,7 +73,7 @@ def form(cell, degree): v = TestFunction(V) return v*dx - degrees = numpy.arange(2, 9 - 1.5 * cell.topological_dimension()) + degrees = numpy.arange(2, 9 - 1.5 * cell.topological_dimension) flops = [count_flops(form(cell, int(degree))) for degree in degrees] rates = numpy.diff(numpy.log(flops)) / numpy.diff(numpy.log(degrees + 1)) @@ -91,7 +91,7 @@ def form(cell, degree): f = Coefficient(V) return div(f)*dx - dim = cell.topological_dimension() + dim = cell.topological_dimension degrees = numpy.arange(2, 8 - dim) + (3 - dim) flops = [count_flops(form(cell, int(degree))) for degree in degrees] diff --git a/tests/tsfc/test_tsfc_182.py b/tests/tsfc/test_tsfc_182.py index 556a6bafb0..a6208a491c 100644 --- a/tests/tsfc/test_tsfc_182.py +++ b/tests/tsfc/test_tsfc_182.py @@ -1,6 +1,6 @@ import pytest -from ufl import Coefficient, TestFunction, dx, inner, tetrahedron, Mesh, FunctionSpace +from ufl import Coefficient, TestFunction, dx, inner, tetrahedron, Mesh, MeshSequence, FunctionSpace from finat.ufl import FiniteElement, MixedElement, VectorElement from tsfc import compile_form @@ -20,7 +20,8 @@ def test_delta_elimination(mode): element_chi_lambda = MixedElement(element_eps_p, element_lambda) domain = Mesh(VectorElement("Lagrange", tetrahedron, 1)) - space = FunctionSpace(domain, element_chi_lambda) + domains = MeshSequence([domain, domain]) + space = FunctionSpace(domains, element_chi_lambda) chi_lambda = Coefficient(space) delta_chi_lambda = TestFunction(space) diff --git a/tests/tsfc/test_tsfc_204.py b/tests/tsfc/test_tsfc_204.py index 89f1481590..fe889e48b8 100644 --- a/tests/tsfc/test_tsfc_204.py +++ b/tests/tsfc/test_tsfc_204.py @@ -1,12 +1,13 @@ from tsfc import compile_form from ufl import (Coefficient, FacetNormal, - FunctionSpace, Mesh, as_matrix, + FunctionSpace, Mesh, MeshSequence, as_matrix, dot, dS, ds, dx, facet, grad, inner, outer, split, triangle) from finat.ufl import BrokenElement, FiniteElement, MixedElement, VectorElement def test_physically_mapped_facet(): mesh = Mesh(VectorElement("P", triangle, 1)) + meshes = MeshSequence([mesh, mesh, mesh, mesh, mesh]) # set up variational problem U = FiniteElement("Morley", mesh.ufl_cell(), 2) @@ -15,7 +16,7 @@ def test_physically_mapped_facet(): Vv = VectorElement(BrokenElement(V)) Qhat = VectorElement(BrokenElement(V[facet]), dim=2) Vhat = VectorElement(V[facet], dim=2) - Z = FunctionSpace(mesh, MixedElement(U, Vv, Qhat, Vhat, R)) + Z = FunctionSpace(meshes, MixedElement(U, Vv, Qhat, Vhat, R)) z = Coefficient(Z) u, d, qhat, dhat, lam = split(z) diff --git a/tests/tsfc/test_underintegration.py b/tests/tsfc/test_underintegration.py index 24221e05a7..cb9cfb78b7 100644 --- a/tests/tsfc/test_underintegration.py +++ b/tests/tsfc/test_underintegration.py @@ -21,7 +21,7 @@ def gll_quadrature_rule(cell, elem_deg): fiat_rule = GaussLobattoLegendreQuadratureLineRule(fiat_cell, elem_deg + 1) line_rules = [QuadratureRule(GaussLobattoLegendrePointSet(fiat_rule.get_points()), fiat_rule.get_weights()) - for _ in range(cell.topological_dimension())] + for _ in range(cell.topological_dimension)] finat_rule = reduce(lambda a, b: TensorProductQuadratureRule([a, b]), line_rules) return finat_rule @@ -31,7 +31,7 @@ def gl_quadrature_rule(cell, elem_deg): fiat_rule = GaussLegendreQuadratureLineRule(fiat_cell, elem_deg + 1) line_rules = [QuadratureRule(GaussLegendrePointSet(fiat_rule.get_points()), fiat_rule.get_weights()) - for _ in range(cell.topological_dimension())] + for _ in range(cell.topological_dimension)] finat_rule = reduce(lambda a, b: TensorProductQuadratureRule([a, b]), line_rules) return finat_rule diff --git a/tsfc/__init__.py b/tsfc/__init__.py index f12615ebe5..42f78e8071 100644 --- a/tsfc/__init__.py +++ b/tsfc/__init__.py @@ -1,5 +1,6 @@ from tsfc.driver import compile_form, compile_expression_dual_evaluation # noqa: F401 from tsfc.parameters import default_parameters # noqa: F401 +from tsfc.exceptions import MismatchingDomainError # noqa: F401 def register_citations(): diff --git a/tsfc/driver.py b/tsfc/driver.py index 89db890f24..1025fadfc5 100644 --- a/tsfc/driver.py +++ b/tsfc/driver.py @@ -1,34 +1,37 @@ import collections import time import sys -import numpy from itertools import chain -from finat.physically_mapped import DirectlyDefinedElement, PhysicallyMappedElement +from finat.physically_mapped import NeedsCoordinateMappingElement import ufl from ufl.algorithms import extract_coefficients from ufl.algorithms.analysis import has_type from ufl.algorithms.apply_coefficient_split import CoefficientSplitter from ufl.classes import Form, GeometricQuantity -from ufl.domain import extract_unique_domain +from ufl.domain import extract_unique_domain, extract_domains import gem import gem.impero_utils as impero_utils import finat +from finat.element_factory import as_fiat_cell from tsfc import fem, ufl_utils from tsfc.logging import logger +from tsfc.modified_terminals import analyse_modified_terminal from tsfc.parameters import default_parameters, is_complex -from tsfc.ufl_utils import apply_mapping, extract_firedrake_constants +from tsfc.ufl_utils import apply_mapping, extract_firedrake_constants, simplify_abs import tsfc.kernel_interface.firedrake_loopy as firedrake_interface_loopy +from tsfc.exceptions import MismatchingDomainError + # To handle big forms. The various transformations might need a deeper stack sys.setrecursionlimit(3000) TSFCIntegralDataInfo = collections.namedtuple("TSFCIntegralDataInfo", - ["domain", "integral_type", "subdomain_id", "domain_number", + ["domain", "integral_type", "subdomain_id", "domain_number", "domain_integral_type_map", "arguments", "coefficients", "coefficient_split", "coefficient_numbers"]) TSFCIntegralDataInfo.__doc__ = """ @@ -89,13 +92,17 @@ def compile_form(form, prefix="form", parameters=None, dont_split_numbers=(), di complex_mode=complex_mode, ) logger.info(GREEN % "compute_form_data finished in %g seconds.", time.time() - cpu_time) + + validate_domains(form_data.preprocessed_form) + # Create local kernels. kernels = [] for integral_data in form_data.integral_data: start = time.time() - kernel = compile_integral(integral_data, form_data, prefix, parameters, diagonal=diagonal) - if kernel is not None: - kernels.append(kernel) + if integral_data.integrals: + kernel = compile_integral(integral_data, form_data, prefix, parameters, diagonal=diagonal) + if kernel is not None: + kernels.append(kernel) logger.info(GREEN % "compile_integral finished in %g seconds.", time.time() - start) logger.info(GREEN % "TSFC finished in %g seconds.", time.time() - cpu_time) @@ -115,14 +122,10 @@ def compile_integral(integral_data, form_data, prefix, parameters, *, diagonal=F parameters = preprocess_parameters(parameters) scalar_type = parameters["scalar_type"] integral_type = integral_data.integral_type - mesh = integral_data.domain arguments = form_data.preprocessed_form.arguments() if integral_type.startswith("interior_facet") and diagonal and any(a.function_space().finat_element.is_dg() for a in arguments): raise NotImplementedError("Sorry, we can't assemble the diagonal of a form for interior facet integrals") kernel_name = f"{prefix}_{integral_type}_integral" - # Dict mapping domains to index in original_form.ufl_domains() - domain_numbering = form_data.original_form.domain_numbering() - domain_number = domain_numbering[integral_data.domain] # This is which coefficient in the original form the # current coefficient is. # Consider f*v*dx + g*v*ds, the full form contains two @@ -137,23 +140,32 @@ def compile_integral(integral_data, form_data, prefix, parameters, *, diagonal=F if coeff in form_data.coefficient_split: coefficient_split[coeff] = form_data.coefficient_split[coeff] coefficient_numbers.append(form_data.original_coefficient_positions[i]) + mesh = integral_data.domain + all_meshes = extract_domains(form_data.original_form) + domain_number = all_meshes.index(mesh) + integral_data_info = TSFCIntegralDataInfo( domain=integral_data.domain, integral_type=integral_data.integral_type, subdomain_id=integral_data.subdomain_id, domain_number=domain_number, + domain_integral_type_map={mesh: integral_data.domain_integral_type_map.get(mesh, None) for mesh in all_meshes}, arguments=arguments, coefficients=coefficients, coefficient_split=coefficient_split, coefficient_numbers=coefficient_numbers, ) + builder = firedrake_interface_loopy.KernelBuilder( integral_data_info, scalar_type, diagonal=diagonal, ) - builder.set_coordinates(mesh) - builder.set_cell_sizes(mesh) + builder.set_entity_numbers(all_meshes) + builder.set_entity_orientations(all_meshes) + builder.set_coordinates(all_meshes) + builder.set_cell_orientations(all_meshes) + builder.set_cell_sizes(all_meshes) builder.set_coefficients() # TODO: We do not want pass constants to kernels that do not need them # so we should attach the constants to integral data instead @@ -168,6 +180,31 @@ def compile_integral(integral_data, form_data, prefix, parameters, *, diagonal=F return builder.construct_kernel(kernel_name, ctx, parameters["add_petsc_events"]) +def validate_domains(form): + if len(extract_domains(form)) == 1: + # Not a multi-domain form, we do not need to keep checking + return + + for itg in form.integrals(): + # Check that all domains are related to each other + domain = itg.ufl_domain() + for other_domain in itg.extra_domain_integral_type_map(): + if domain.submesh_youngest_common_ancestor(other_domain) is None: + raise MismatchingDomainError("Assembly of forms over unrelated meshes is not supported. " + "Try using Submeshes or cross-mesh interpolation.") + + # Check that all Arguments and Coefficients are defined on the valid domains + valid_domains = set(itg.extra_domain_integral_type_map()) + valid_domains.add(domain) + + itg_domains = set(extract_domains(itg)) + if len(itg_domains - valid_domains) > 0: + raise MismatchingDomainError("Argument or Coefficient domain not found in integral. " + "Possibly, the form contains coefficients on different meshes " + "and requires measure intersection, for example: " + 'Measure("dx", argument_mesh, intersect_measures=[Measure("dx", coefficient_mesh)]).') + + def preprocess_parameters(parameters): if parameters is None: parameters = default_parameters() @@ -183,15 +220,14 @@ def preprocess_parameters(parameters): return parameters -def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, +def compile_expression_dual_evaluation(expression, ufl_element, *, domain=None, interface=None, - parameters=None): + parameters=None, name=None): """Compile a UFL expression to be evaluated against a compile-time known reference element's dual basis. Useful for interpolating UFL expressions into e.g. N1curl spaces. :arg expression: UFL expression - :arg to_element: A FInAT element for the target space :arg ufl_element: The UFL element of the target space. :arg domain: optional UFL domain the expression is defined on (required when expression contains no domain). :arg interface: backend module for the kernel interface @@ -208,9 +244,6 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, # Determine whether in complex mode complex_mode = is_complex(parameters["scalar_type"]) - if isinstance(to_element, (PhysicallyMappedElement, DirectlyDefinedElement)): - raise NotImplementedError("Don't know how to interpolate onto zany spaces, sorry") - orig_coefficients = extract_coefficients(expression) if isinstance(expression, ufl.Interpolate): v, operand = expression.argument_slots() @@ -223,6 +256,7 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, # Apply UFL preprocessing operand = ufl_utils.preprocess_expression(operand, complex_mode=complex_mode) + operand = simplify_abs(operand, complex_mode) # Reconstructed Interpolate with mapped operand expression = ufl.Interpolate(operand, v) @@ -242,11 +276,17 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, if domain is None: domain = extract_unique_domain(expression) assert domain is not None + builder._domain_integral_type_map = {domain: "cell"} + builder._entity_ids = {domain: (0,)} # Collect required coefficients and determine numbering coefficients = extract_coefficients(expression) coefficient_numbers = tuple(map(orig_coefficients.index, coefficients)) builder.set_coefficient_numbers(coefficient_numbers) + # Need this ad-hoc fix for now. + for c in coefficients: + d = extract_unique_domain(c) + builder._domain_integral_type_map[d] = "cell" elements = [f.ufl_element() for f in (*coefficients, *arguments)] @@ -255,7 +295,8 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, # Create a fake coordinate coefficient for a domain. coords_coefficient = ufl.Coefficient(ufl.FunctionSpace(domain, domain.ufl_coordinate_element())) builder.domain_coordinate[domain] = coords_coefficient - builder.set_cell_sizes(domain) + builder.set_cell_orientations((domain, )) + builder.set_cell_sizes((domain, )) coefficients = [coords_coefficient] + coefficients needs_external_coords = True builder.set_coefficients(coefficients) @@ -270,13 +311,20 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, # Set up kernel config for translation of UFL expression to gem kernel_cfg = dict(interface=builder, ufl_cell=domain.ufl_cell(), + integration_dim=as_fiat_cell(domain.ufl_cell()).get_dimension(), # FIXME: change if we ever implement # interpolation on facets. - integral_type="cell", argument_multiindices=argument_multiindices, index_cache={}, scalar_type=parameters["scalar_type"]) + # Create the finat element for the target space + try: + to_element = builder.create_element(ufl_element) + except KeyError: + # FInAT only elements + raise NotImplementedError(f"Don't know how to create FIAT element for {ufl_element}") + # Allow interpolation onto QuadratureElements to refer to the quadrature # rule they represent if isinstance(to_element, finat.QuadratureElement): @@ -289,14 +337,17 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, # Get the gem expression for dual evaluation and corresponding basis # indices needed for compilation of the expression - evaluation, basis_indices = to_element.dual_evaluation(fn) + if isinstance(to_element, NeedsCoordinateMappingElement): + ctx = fem.PointSetContext(**kernel_cfg) + mt = analyse_modified_terminal(ufl.Coefficient(dual_arg.ufl_function_space().dual())) + coordinate_mapping = fem.CoordinateMapping(mt, ctx) + else: + coordinate_mapping = None + evaluation, basis_indices = to_element.dual_evaluation(fn, coordinate_mapping) # Compute the action against the dual argument - if dual_arg in coefficients: - name = f"w_{coefficients.index(dual_arg)}" - shape = tuple(i.extent for i in basis_indices) - size = numpy.prod(shape, dtype=int) - gem_dual = gem.reshape(gem.Variable(name, shape=(size,)), shape) + if isinstance(dual_arg, ufl.Cofunction): + gem_dual = builder.coefficient_map[dual_arg] if complex_mode: evaluation = gem.MathFunction('conj', evaluation) evaluation = gem.IndexSum(evaluation * gem_dual[basis_indices], basis_indices) @@ -306,6 +357,13 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, argument_multiindices = dict(sorted(argument_multiindices.items())) + # Unroll + max_extent = parameters["unroll_indexsum"] + if max_extent: + def predicate(index): + return index.extent <= max_extent + evaluation, = gem.optimise.unroll_indexsum([evaluation], predicate=predicate) + # Build kernel body return_indices = tuple(chain.from_iterable(argument_multiindices.values())) return_shape = tuple(i.extent for i in return_indices) @@ -321,7 +379,7 @@ def compile_expression_dual_evaluation(expression, to_element, ufl_element, *, builder.register_requirements([evaluation]) builder.set_output(return_var) # Build kernel tuple - return builder.construct_kernel(impero_c, index_names, needs_external_coords, parameters["add_petsc_events"]) + return builder.construct_kernel(impero_c, index_names, needs_external_coords, parameters["add_petsc_events"], name=name) class DualEvaluationCallable(object): diff --git a/tsfc/exceptions.py b/tsfc/exceptions.py new file mode 100644 index 0000000000..8789008cd0 --- /dev/null +++ b/tsfc/exceptions.py @@ -0,0 +1,4 @@ + + +class MismatchingDomainError(Exception): + """Error raised for unsupported multidomain problems""" diff --git a/tsfc/fem.py b/tsfc/fem.py index 9166b4b8f0..943089052e 100644 --- a/tsfc/fem.py +++ b/tsfc/fem.py @@ -9,7 +9,7 @@ import numpy import ufl from FIAT.orientation_utils import Orientation as FIATOrientation -from FIAT.reference_element import UFCHexahedron, UFCSimplex, make_affine_mapping +from FIAT.reference_element import UFCHexahedron, UFCQuadrilateral, UFCSimplex, make_affine_mapping from FIAT.reference_element import TensorProductCell from finat.physically_mapped import (NeedsCoordinateMappingElement, PhysicalGeometry) @@ -31,9 +31,11 @@ from ufl.corealg.map_dag import map_expr_dag, map_expr_dags from ufl.corealg.multifunction import MultiFunction from ufl.domain import extract_unique_domain +from ufl.algorithms import extract_arguments from tsfc import ufl2gem from tsfc.kernel_interface import ProxyKernelInterface +from tsfc.kernel_interface.common import lower_integral_type from tsfc.modified_terminals import (analyse_modified_terminal, construct_modified_terminal) from tsfc.parameters import is_complex @@ -50,9 +52,7 @@ class ContextBase(ProxyKernelInterface): keywords = ( 'ufl_cell', 'fiat_cell', - 'integral_type', 'integration_dim', - 'entity_ids', 'argument_multiindices', 'facetarea', 'index_cache', @@ -74,9 +74,14 @@ def fiat_cell(self): @cached_property def integration_dim(self): - return self.fiat_cell.get_dimension() - - entity_ids = [0] + integration_dims = set() + for domain, integral_type in self.domain_integral_type_map.items(): + cell = domain.ufl_cell() + fiat_cell = as_fiat_cell(cell) + integration_dim, _ = lower_integral_type(fiat_cell, integral_type) + integration_dims.add(integration_dim) + integration_dim, = integration_dims + return integration_dim @cached_property def epsilon(self): @@ -86,7 +91,7 @@ def epsilon(self): def complex_mode(self): return is_complex(self.scalar_type) - def entity_selector(self, callback, restriction): + def entity_selector(self, callback, domain, restriction): """Selects code for the correct entity at run-time. Callback generates code for a specified entity. @@ -97,11 +102,11 @@ def entity_selector(self, callback, restriction): :arg restriction: Restriction of the modified terminal, used for entity selection. """ - if len(self.entity_ids) == 1: - return callback(self.entity_ids[0]) + if len(self.entity_ids(domain)) == 1: + return callback(self.entity_ids(domain)[0]) else: - f = self.entity_number(restriction) - return gem.select_expression(list(map(callback, self.entity_ids)), f) + f = self.entity_number(domain, restriction) + return gem.select_expression(list(map(callback, self.entity_ids(domain))), f) argument_multiindices = () @@ -119,7 +124,52 @@ def use_canonical_quadrature_point_ordering(self): # Directly set use_canonical_quadrature_point_ordering = False in context # for translation of special nodes, e.g., CellVolume, FacetArea, CellOrigin, and CellVertices, # as quadrature point ordering is not relevant for those node types. - return isinstance(self.fiat_cell, UFCHexahedron) and self.integral_type in ['exterior_facet', 'interior_facet'] + def _any(cell_type, integral_types): + for d, it in self.domain_integral_type_map.items(): + if it is None: + continue + c = as_fiat_cell(d.ufl_cell()) + if isinstance(c, cell_type) and it in integral_types: + return True + return False + if _any(UFCHexahedron, ['exterior_facet', 'interior_facet']): + return True + elif _any(UFCQuadrilateral, ['exterior_facet', 'interior_facet']) and _any(UFCSimplex, ['cell', 'exterior_facet', 'interior_facet']): + return True + else: + return False + + +class CellKernelInterface(ProxyKernelInterface): + # For a single-domain cell integration kernel. + + def __init__(self, wrapee, domain): + super().__init__(wrapee) + self._domain = domain + + def entity_ids(self, domain): + if domain is not self._domain: + raise ValueError(f"{domain} != {self._domain}") + return (0,) + + @cached_property + def domain_integral_type_map(self): + return {self._domain: "cell"} + + +class CellVolumeKernelInterface(CellKernelInterface): + # Since CellVolume is evaluated as a cell integral, we must ensure + # that the right restriction is applied when it is used in an + # interior facet integral. This proxy diverts coefficient + # translation to use a specified restriction. + + def __init__(self, wrapee, domain, restriction): + super().__init__(wrapee, domain) + self.restriction = restriction + + def coefficient(self, ufl_coefficient, r): + assert r is None + return self._wrapee.coefficient(ufl_coefficient, self.restriction) class CoordinateMapping(PhysicalGeometry): @@ -144,7 +194,8 @@ def preprocess(self, expr, context): :arg context: The translation context. :returns: A new UFL expression """ - ifacet = self.interface.integral_type.startswith("interior_facet") + domain = extract_unique_domain(self.mt.terminal) + ifacet = self.interface.domain_integral_type_map[domain].startswith("interior_facet") return preprocess_expression(expr, complex_mode=context.complex_mode, do_apply_restrictions=ifacet) @@ -155,7 +206,7 @@ def config(self): config["interface"] = self.interface return config - def translate_point_expression(self, expr, point=None): + def translate_point_expression(self, expr, point=None, interface=None): if self.mt.restriction == '+': expr = PositiveRestricted(expr) elif self.mt.restriction == '-': @@ -168,20 +219,26 @@ def translate_point_expression(self, expr, point=None): config = {"point_set": PointSingleton(point)} config.update(self.config) config.update(use_canonical_quadrature_point_ordering=False) # quad point ordering not relevant. + if interface: + config.update(interface=interface) context = PointSetContext(**config) expr = self.preprocess(expr, context) return map_expr_dag(context.translator, expr) def cell_size(self): - return self.interface.cell_size(self.mt.restriction) + return self.interface.cell_size(extract_unique_domain(self.mt.terminal), self.mt.restriction) def jacobian_at(self, point): - expr = Jacobian(extract_unique_domain(self.mt.terminal)) - return self.translate_point_expression(expr, point=point) + domain = extract_unique_domain(self.mt.terminal) + expr = Jacobian(domain) + interface = CellKernelInterface(self.interface, domain) + return self.translate_point_expression(expr, point=point, interface=interface) def detJ_at(self, point): - expr = JacobianDeterminant(extract_unique_domain(self.mt.terminal)) - return self.translate_point_expression(expr, point=point) + domain = extract_unique_domain(self.mt.terminal) + expr = JacobianDeterminant(domain) + interface = CellKernelInterface(self.interface, domain) + return self.translate_point_expression(expr, point=point, interface=interface) def reference_normals(self): cell = self.interface.fiat_cell @@ -195,6 +252,11 @@ def reference_edge_tangents(self): num_edges = len(cell.get_topology()[1]) return gem.Literal(numpy.asarray([cell.compute_edge_tangent(i) for i in range(num_edges)])) + def normalized_reference_edge_tangents(self): + cell = self.interface.fiat_cell + num_edges = len(cell.get_topology()[1]) + return gem.Literal(numpy.asarray([cell.compute_normalized_edge_tangent(i) for i in range(num_edges)])) + def physical_tangents(self): cell = self.interface.fiat_cell sd = cell.get_spatial_dimension() @@ -235,7 +297,7 @@ def physical_points(self, point_set, entity=None): e, _ = entity assert point_shape == e else: - assert point_shape == extract_unique_domain(expr).topological_dimension() + assert point_shape == extract_unique_domain(expr).topological_dimension if self.mt.restriction == '+': expr = PositiveRestricted(expr) elif self.mt.restriction == '-': @@ -244,7 +306,7 @@ def physical_points(self, point_set, entity=None): config.update(self.config) if entity is not None: config.update({name: getattr(self.interface, name) - for name in ["integration_dim", "entity_ids"]}) + for name in ["integration_dim"]}) config.update(use_canonical_quadrature_point_ordering=False) # quad point ordering not relevant. context = PointSetContext(**config) expr = self.preprocess(expr, context) @@ -271,13 +333,6 @@ def get_quadrature_rule(fiat_cell, integration_dim, quadrature_degree, scheme): return make_quadrature(integration_cell, quadrature_degree, scheme=scheme) -def make_basis_evaluation_key(ctx, finat_element, mt, entity_id): - ufl_element = mt.terminal.ufl_element() - domain = extract_unique_domain(mt.terminal) - coordinate_element = domain.ufl_coordinate_element() - return (ufl_element, mt.local_derivatives, ctx.point_set, ctx.integration_dim, entity_id, coordinate_element, mt.restriction) - - class PointSetContext(ContextBase): """Context for compile-time known evaluation points.""" @@ -308,12 +363,32 @@ def point_expr(self): def weight_expr(self): return self.quadrature_rule.weight_expression - @serial_cache(hashkey=make_basis_evaluation_key) + @staticmethod + def _make_basis_evaluation_key(finat_element, mt, entity_id): + ufl_element = mt.terminal.ufl_element() + domain = extract_unique_domain(mt.terminal) + coordinate_element = domain.ufl_coordinate_element() + # This way of caching is fragile. + # Should implement _hash_key_() in ModifiedTerminal and include the entire mt in the key, + # or only pass necessary bits in mt to basis_evaluation. + return (ufl_element, mt.local_derivatives, entity_id, coordinate_element, mt.restriction, domain._ufl_hash_data_()) + + @cached_property + def _basis_evaluation_cache(self): + return {} + def basis_evaluation(self, finat_element, mt, entity_id): - return finat_element.basis_evaluation(mt.local_derivatives, - self.point_set, - (self.integration_dim, entity_id), - coordinate_mapping=CoordinateMapping(mt, self)) + key = PointSetContext._make_basis_evaluation_key(finat_element, mt, entity_id) + try: + return self._basis_evaluation_cache[key] + except KeyError: + val = finat_element.basis_evaluation( + mt.local_derivatives, + self.point_set, + (self.integration_dim, entity_id), + coordinate_mapping=CoordinateMapping(mt, self), + ) + return self._basis_evaluation_cache.setdefault(key, val) class GemPointContext(ContextBase): @@ -351,14 +426,15 @@ def __init__(self, context): # Can't put these in the ufl2gem mixin, since they (unlike # everything else) want access to the translation context. def cell_avg(self, o): - if self.context.integral_type != "cell": + domain = extract_unique_domain(o) + integral_type = self.context.domain_integral_type_map[domain] + if integral_type != "cell": # Need to create a cell-based quadrature rule and # translate the expression using that (c.f. CellVolume # below). raise NotImplementedError("CellAvg on non-cell integrals not yet implemented") integrand, = o.ufl_operands - domain = extract_unique_domain(o) - measure = ufl.Measure(self.context.integral_type, domain=domain) + measure = ufl.Measure(integral_type, domain=domain) integrand, degree, argument_multiindices = entity_avg(integrand / CellVolume(domain), measure, self.context.argument_multiindices) config = {name: getattr(self.context, name) @@ -369,17 +445,17 @@ def cell_avg(self, o): return expr def facet_avg(self, o): - if self.context.integral_type == "cell": + domain = extract_unique_domain(o) + integral_type = self.context.domain_integral_type_map[domain] + if integral_type == "cell": raise ValueError("Can't take FacetAvg in cell integral") integrand, = o.ufl_operands - domain = extract_unique_domain(o) - measure = ufl.Measure(self.context.integral_type, domain=domain) + measure = ufl.Measure(integral_type, domain=domain) integrand, degree, argument_multiindices = entity_avg(integrand / FacetArea(domain), measure, self.context.argument_multiindices) config = {name: getattr(self.context, name) for name in ["ufl_cell", "index_cache", "scalar_type", - "integration_dim", "entity_ids", - "integral_type"]} + "integration_dim"]} config.update(quadrature_degree=degree, interface=self.context, argument_multiindices=argument_multiindices) expr, = compile_ufl(integrand, PointSetContext(**config), point_sum=True) @@ -416,7 +492,7 @@ def translate_geometricquantity(terminal, mt, ctx): @translate.register(CellOrientation) def translate_cell_orientation(terminal, mt, ctx): - return ctx.cell_orientation(mt.restriction) + return ctx.cell_orientation(extract_unique_domain(terminal), mt.restriction) @translate.register(ReferenceCellVolume) @@ -426,7 +502,7 @@ def translate_reference_cell_volume(terminal, mt, ctx): @translate.register(ReferenceFacetVolume) def translate_reference_facet_volume(terminal, mt, ctx): - assert ctx.integral_type != "cell" + assert ctx.domain_integral_type_map[extract_unique_domain(terminal)] != "cell" # Sum of quadrature weights is entity volume return gem.optimise.aggressive_unroll(gem.index_sum(ctx.weight_expr, ctx.point_indices)) @@ -440,7 +516,7 @@ def translate_cell_facet_jacobian(terminal, mt, ctx): def callback(entity_id): return gem.Literal(make_cell_facet_jacobian(cell, facet_dim, entity_id)) - return ctx.entity_selector(callback, mt.restriction) + return ctx.entity_selector(callback, extract_unique_domain(terminal), mt.restriction) def make_cell_facet_jacobian(cell, facet_dim, facet_i): @@ -462,10 +538,13 @@ def make_cell_facet_jacobian(cell, facet_dim, facet_i): @translate.register(ReferenceNormal) def translate_reference_normal(terminal, mt, ctx): + domain = extract_unique_domain(terminal) + fiat_cell = as_fiat_cell(domain.ufl_cell()) + def callback(facet_i): - n = ctx.fiat_cell.compute_reference_normal(ctx.integration_dim, facet_i) + n = fiat_cell.compute_reference_normal(ctx.integration_dim, facet_i) return gem.Literal(n) - return ctx.entity_selector(callback, mt.restriction) + return ctx.entity_selector(callback, domain, mt.restriction) @translate.register(ReferenceCellEdgeVectors) @@ -498,7 +577,7 @@ def callback(entity_id): data = numpy.asarray(list(map(t, ps.points))) return gem.Literal(data.reshape(point_shape + data.shape[1:])) - return gem.partial_indexed(ctx.entity_selector(callback, mt.restriction), + return gem.partial_indexed(ctx.entity_selector(callback, extract_unique_domain(terminal), mt.restriction), ps.indices) @@ -520,42 +599,33 @@ def translate_spatialcoordinate(terminal, mt, ctx): return ctx.translator(expr) -class CellVolumeKernelInterface(ProxyKernelInterface): - # Since CellVolume is evaluated as a cell integral, we must ensure - # that the right restriction is applied when it is used in an - # interior facet integral. This proxy diverts coefficient - # translation to use a specified restriction. - - def __init__(self, wrapee, restriction): - ProxyKernelInterface.__init__(self, wrapee) - self.restriction = restriction - - def coefficient(self, ufl_coefficient, r): - assert r is None - return self._wrapee.coefficient(ufl_coefficient, self.restriction) - - @translate.register(CellVolume) def translate_cellvolume(terminal, mt, ctx): - integrand, degree = one_times(ufl.dx(domain=extract_unique_domain(terminal))) - interface = CellVolumeKernelInterface(ctx, mt.restriction) + domain = extract_unique_domain(terminal) + integrand, degree = one_times(ufl.dx(domain=domain)) + interface = CellVolumeKernelInterface(ctx, domain, mt.restriction) config = {name: getattr(ctx, name) for name in ["ufl_cell", "index_cache", "scalar_type"]} - config.update(interface=interface, quadrature_degree=degree, use_canonical_quadrature_point_ordering=False) + config.update( + interface=interface, + quadrature_degree=degree, + use_canonical_quadrature_point_ordering=False, + ) expr, = compile_ufl(integrand, PointSetContext(**config), point_sum=True) return expr @translate.register(FacetArea) def translate_facetarea(terminal, mt, ctx): - assert ctx.integral_type != 'cell' domain = extract_unique_domain(terminal) - integrand, degree = one_times(ufl.Measure(ctx.integral_type, domain=domain)) + integral_type = ctx.domain_integral_type_map[domain] + assert integral_type != 'cell' + integrand, degree = one_times(ufl.Measure(integral_type, domain=domain)) config = {name: getattr(ctx, name) for name in ["ufl_cell", "integration_dim", "scalar_type", - "entity_ids", "index_cache"]} + "index_cache"]} config.update(interface=ctx, quadrature_degree=degree, use_canonical_quadrature_point_ordering=False) expr, = compile_ufl(integrand, PointSetContext(**config), point_sum=True) return expr @@ -566,7 +636,7 @@ def translate_cellorigin(terminal, mt, ctx): domain = extract_unique_domain(terminal) coords = SpatialCoordinate(domain) expression = construct_modified_terminal(mt, coords) - point_set = PointSingleton((0.0,) * domain.topological_dimension()) + point_set = PointSingleton((0.0,) * domain.topological_dimension) config = {name: getattr(ctx, name) for name in ["ufl_cell", "index_cache", "scalar_type"]} @@ -577,13 +647,18 @@ def translate_cellorigin(terminal, mt, ctx): @translate.register(CellVertices) def translate_cell_vertices(terminal, mt, ctx): - coords = SpatialCoordinate(extract_unique_domain(terminal)) + domain = extract_unique_domain(terminal) + coords = SpatialCoordinate(domain) ufl_expr = construct_modified_terminal(mt, coords) ps = PointSet(numpy.array(ctx.fiat_cell.get_vertices())) - + interface = CellKernelInterface(ctx, domain) config = {name: getattr(ctx, name) for name in ["ufl_cell", "index_cache", "scalar_type"]} - config.update(interface=ctx, point_set=ps, use_canonical_quadrature_point_ordering=False) + config.update( + interface=interface, + point_set=ps, + use_canonical_quadrature_point_ordering=False, + ) context = PointSetContext(**config) expr = context.translator(ufl_expr) @@ -649,10 +724,10 @@ def callback(entity_id): # A numerical hack that FFC used to apply on FIAT tables still # lives on after ditching FFC and switching to FInAT. return ffc_rounding(square, ctx.epsilon) - table = ctx.entity_selector(callback, mt.restriction) + table = ctx.entity_selector(callback, extract_unique_domain(terminal), mt.restriction) if ctx.use_canonical_quadrature_point_ordering: quad_multiindex = ctx.quadrature_rule.point_set.indices - quad_multiindex_permuted = _make_quad_multiindex_permuted(mt, ctx) + quad_multiindex_permuted = _make_quad_multiindex_permuted(terminal, mt, ctx) mapper = gem.node.MemoizerArg(gem.optimise.filtered_replace_indices) table = mapper(table, tuple(zip(quad_multiindex, quad_multiindex_permuted))) argument_multiindex = ctx.argument_multiindices[terminal.number()] @@ -666,17 +741,13 @@ def translate_constant_value(terminal, mt, ctx): @translate.register(Coefficient) def translate_coefficient(terminal, mt, ctx): + domain = extract_unique_domain(terminal) vec = ctx.coefficient(terminal, mt.restriction) - - if terminal.ufl_element().family() == 'Real': - assert mt.local_derivatives == 0 - return vec - element = ctx.create_element(terminal.ufl_element(), restriction=mt.restriction) # Collect FInAT tabulation for all entities per_derivative = collections.defaultdict(list) - for entity_id in ctx.entity_ids: + for entity_id in ctx.entity_ids(domain): finat_dict = ctx.basis_evaluation(element, mt, entity_id) for alpha, table in finat_dict.items(): # Filter out irrelevant derivatives @@ -688,14 +759,14 @@ def translate_coefficient(terminal, mt, ctx): per_derivative[alpha].append(table) # Merge entity tabulations for each derivative - if len(ctx.entity_ids) == 1: + if len(ctx.entity_ids(domain)) == 1: def take_singleton(xs): x, = xs # asserts singleton return x per_derivative = {alpha: take_singleton(tables) for alpha, tables in per_derivative.items()} else: - f = ctx.entity_number(mt.restriction) + f = ctx.entity_number(domain, mt.restriction) per_derivative = {alpha: gem.select_expression(tables, f) for alpha, tables in per_derivative.items()} @@ -727,13 +798,13 @@ def take_singleton(xs): if ctx.use_canonical_quadrature_point_ordering: quad_multiindex = ctx.quadrature_rule.point_set.indices - quad_multiindex_permuted = _make_quad_multiindex_permuted(mt, ctx) + quad_multiindex_permuted = _make_quad_multiindex_permuted(terminal, mt, ctx) mapper = gem.node.MemoizerArg(gem.optimise.filtered_replace_indices) result = mapper(result, tuple(zip(quad_multiindex, quad_multiindex_permuted))) return result -def _make_quad_multiindex_permuted(mt, ctx): +def _make_quad_multiindex_permuted(terminal, mt, ctx): quad_rule = ctx.quadrature_rule # Note that each quad index here represents quad points on a physical # cell axis, but the table is indexed by indices representing the points @@ -746,7 +817,8 @@ def _make_quad_multiindex_permuted(mt, ctx): if len(extents) != 1: raise ValueError("Must have the same number of quadrature points in each symmetric axis") quad_multiindex_permuted = [] - o = ctx.entity_orientation(mt.restriction) + domain = extract_unique_domain(terminal) + o = ctx.entity_orientation(domain, mt.restriction) if not isinstance(o, FIATOrientation): raise ValueError(f"Expecting an instance of FIATOrientation : got {o}") eo = cell.extract_extrinsic_orientation(o) @@ -761,27 +833,23 @@ def _make_quad_multiindex_permuted(mt, ctx): return tuple(quad_multiindex_permuted) -def compile_ufl(expression, context, interior_facet=False, point_sum=False): +def compile_ufl(expression, context, point_sum=False): """Translate a UFL expression to GEM. :arg expression: The UFL expression to compile. :arg context: translation context - either a :class:`GemPointContext` or :class:`PointSetContext` - :arg interior_facet: If ``true``, treat expression as an interior - facet integral (default ``False``) :arg point_sum: If ``true``, return a `gem.IndexSum` of the final gem expression along the ``context.point_indices`` (if present). """ # Abs-simplification expression = simplify_abs(expression, context.complex_mode) - if interior_facet: - expressions = [] - for rs in itertools.product(("+", "-"), repeat=len(context.argument_multiindices)): - expressions.append(map_expr_dag(PickRestriction(*rs), expression)) - else: - expressions = [expression] - + arguments = extract_arguments(expression) + domains = [extract_unique_domain(argument) for argument in arguments] + integral_types = [context.domain_integral_type_map[domain] for domain in domains] + rs_tuples = [("+", "-") if integral_type.startswith("interior_facet") else (None, ) for integral_type in integral_types] + expressions = [map_expr_dag(PickRestriction(*rs), expression) for rs in itertools.product(*rs_tuples)] # Translate UFL to GEM, lowering finite element specific nodes result = map_expr_dags(context.translator, expressions) if point_sum: diff --git a/tsfc/kernel_args.py b/tsfc/kernel_args.py index a397f0f937..aa5e5472b5 100644 --- a/tsfc/kernel_args.py +++ b/tsfc/kernel_args.py @@ -54,9 +54,13 @@ class InteriorFacetKernelArg(KernelArg): ... -class ExteriorFacetOrientationKernelArg(KernelArg): +class OrientationsCellKernelArg(KernelArg): ... -class InteriorFacetOrientationKernelArg(KernelArg): +class OrientationsExteriorFacetKernelArg(KernelArg): + ... + + +class OrientationsInteriorFacetKernelArg(KernelArg): ... diff --git a/tsfc/kernel_interface/__init__.py b/tsfc/kernel_interface/__init__.py index 5114263848..3c20720c33 100644 --- a/tsfc/kernel_interface/__init__.py +++ b/tsfc/kernel_interface/__init__.py @@ -22,19 +22,23 @@ def constant(self, const): """Return the GEM expression corresponding to the constant.""" @abstractmethod - def cell_orientation(self, restriction): + def cell_orientation(self, domain, restriction): """Cell orientation as a GEM expression.""" @abstractmethod - def cell_size(self, restriction): + def cell_size(self, domain, restriction): """Mesh cell size as a GEM expression. Shape (nvertex, ) in FIAT vertex ordering.""" @abstractmethod - def entity_number(self, restriction): + def entity_ids(self, domain): + """Target indices of entity_number.""" + + @abstractmethod + def entity_number(self, domain, restriction): """Facet or vertex number as a GEM index.""" @abstractmethod - def entity_orientation(self, restriction): + def entity_orientation(self, domain, restriction): """Entity orientation as a GEM index.""" @abstractmethod @@ -47,5 +51,9 @@ def unsummed_coefficient_indices(self): """A set of indices that coefficient evaluation should not sum over. Used for macro-cell integration.""" + @abstractproperty + def domain_integral_type_map(self): + """domain integral_type map.""" + ProxyKernelInterface = make_proxy_class('ProxyKernelInterface', KernelInterface) diff --git a/tsfc/kernel_interface/common.py b/tsfc/kernel_interface/common.py index 53d3d96afa..40d585786f 100644 --- a/tsfc/kernel_interface/common.py +++ b/tsfc/kernel_interface/common.py @@ -3,6 +3,10 @@ import string from functools import cached_property, reduce from itertools import chain, product +import copy + +from ufl.utils.sequences import max_degree +from ufl.domain import extract_unique_domain import gem import gem.impero_utils as impero_utils @@ -20,20 +24,14 @@ from finat.ufl import MixedElement from tsfc.kernel_interface import KernelInterface from tsfc.logging import logger -from ufl.utils.sequences import max_degree class KernelBuilderBase(KernelInterface): """Helper class for building local assembly kernels.""" - def __init__(self, scalar_type, interior_facet=False): - """Initialise a kernel builder. - - :arg interior_facet: kernel accesses two cells - """ - assert isinstance(interior_facet, bool) + def __init__(self, scalar_type): + """Initialise a kernel builder.""" self.scalar_type = scalar_type - self.interior_facet = interior_facet self.prepare = [] self.finalise = [] @@ -58,9 +56,9 @@ def coefficient(self, ufl_coefficient, restriction): """A function that maps :class:`ufl.Coefficient`s to GEM expressions.""" kernel_arg = self.coefficient_map[ufl_coefficient] - if ufl_coefficient.ufl_element().family() == 'Real': - return kernel_arg - elif not self.interior_facet: + domain = extract_unique_domain(ufl_coefficient) + assert self._domain_integral_type_map[domain] is not None + if not self._domain_integral_type_map[domain].startswith("interior_facet"): return kernel_arg else: return kernel_arg[{'+': 0, '-': 1}[restriction]] @@ -68,34 +66,43 @@ def coefficient(self, ufl_coefficient, restriction): def constant(self, const): return self.constant_map[const] - def cell_orientation(self, restriction): + def cell_orientation(self, domain, restriction): """Cell orientation as a GEM expression.""" + if not hasattr(self, "_cell_orientations"): + raise RuntimeError("Haven't called set_cell_orientations") f = {None: 0, '+': 0, '-': 1}[restriction] - # Assume self._cell_orientations tuple is set up at this point. - co_int = self._cell_orientations[f] + co_int = self._cell_orientations[domain][f] return gem.Conditional(gem.Comparison("==", co_int, gem.Literal(1)), gem.Literal(-1), gem.Conditional(gem.Comparison("==", co_int, gem.Zero()), gem.Literal(1), gem.Literal(numpy.nan))) - def cell_size(self, restriction): + def cell_size(self, domain, restriction): if not hasattr(self, "_cell_sizes"): raise RuntimeError("Haven't called set_cell_sizes") - if self.interior_facet: - return self._cell_sizes[{'+': 0, '-': 1}[restriction]] + if self._domain_integral_type_map[domain].startswith("interior_facet"): + return self._cell_sizes[domain][{'+': 0, '-': 1}[restriction]] else: - return self._cell_sizes + return self._cell_sizes[domain] + + def entity_ids(self, domain): + """Target indices of entity_number.""" + if not hasattr(self, "_entity_ids"): + raise RuntimeError("Haven't called set_entity_numbers") + return self._entity_ids[domain] - def entity_number(self, restriction): + def entity_number(self, domain, restriction): """Facet or vertex number as a GEM index.""" - # Assume self._entity_number dict is set up at this point. - return self._entity_number[restriction] + if not hasattr(self, "_entity_numbers"): + raise RuntimeError("Haven't called set_entity_numbers") + return self._entity_numbers[domain][restriction] - def entity_orientation(self, restriction): + def entity_orientation(self, domain, restriction): """Facet orientation as a GEM index.""" - # Assume self._entity_orientation dict is set up at this point. - return self._entity_orientation[restriction] + if not hasattr(self, "_entity_orientations"): + raise RuntimeError("Haven't called set_entity_orientations") + return self._entity_orientations[domain][restriction] def apply_glue(self, prepare=None, finalise=None): """Append glue code for operations that are not handled in the @@ -120,6 +127,11 @@ def register_requirements(self, ir): # Nothing is required by default pass + @property + def domain_integral_type_map(self): + """domain integral_type map.""" + return self._domain_integral_type_map + class KernelBuilderMixin(object): """Mixin for KernelBuilder classes.""" @@ -143,8 +155,7 @@ def compile_integrand(self, integrand, params, ctx): config['quadrature_rule'] = quad_rule config['index_cache'] = ctx['index_cache'] expressions = fem.compile_ufl(integrand, - fem.PointSetContext(**config), - interior_facet=self.interior_facet) + fem.PointSetContext(**config)) ctx['quadrature_indices'].extend(quad_rule.point_set.indices) return expressions @@ -214,7 +225,7 @@ def compile_gem(self, ctx): # Let the kernel interface inspect the optimised IR to register # what kind of external data is required (e.g., cell orientations, # cell sizes, etc.). - oriented, needs_cell_sizes, tabulations, need_facet_orientation = self.register_requirements(expressions) + oriented, needs_cell_sizes, tabulations = self.register_requirements(expressions) # Extract Variables that are actually used active_variables = gem.extract_type(expressions, gem.Variable) @@ -225,7 +236,7 @@ def compile_gem(self, ctx): impero_c = impero_utils.compile_gem(assignments, index_ordering, remove_zeros=True) except impero_utils.NoopError: impero_c = None - return impero_c, oriented, needs_cell_sizes, tabulations, active_variables, need_facet_orientation + return impero_c, oriented, needs_cell_sizes, tabulations, active_variables def fem_config(self): """Return a dictionary used with fem.compile_ufl. @@ -238,12 +249,10 @@ def fem_config(self): integral_type = info.integral_type cell = info.domain.ufl_cell() fiat_cell = as_fiat_cell(cell) - integration_dim, entity_ids = lower_integral_type(fiat_cell, integral_type) + integration_dim, _ = lower_integral_type(fiat_cell, integral_type) return dict(interface=self, ufl_cell=cell, - integral_type=integral_type, integration_dim=integration_dim, - entity_ids=entity_ids, scalar_type=self.fem_scalar_type) def create_context(self): @@ -311,11 +320,20 @@ def set_quad_rule(params, cell, integral_type, functions): if e.family() in {"Quadrature", "Boundary Quadrature"}) if len(quad_data) == 0: quadrature_degree = params["estimated_polynomial_degree"] - if all((asarray(quadrature_degree) > 10 * asarray(e.degree())).all() for e in elements): - logger.warning("Estimated quadrature degree %s more " - "than tenfold greater than any " - "argument/coefficient degree (max %s)", - quadrature_degree, max_degree([e.degree() for e in elements])) + if "max_quadrature_degree" in params: + max_allowed_degree = params["max_quadrature_degree"] + if quadrature_degree > max_allowed_degree: + logger.info("Estimated quadrature degree %s greater " + "than maximum allowed degree %s. " + "Using maximum degree %s instead.", + quadrature_degree, max_allowed_degree, max_allowed_degree) + quadrature_degree = max_allowed_degree + else: + if all((asarray(quadrature_degree) > 10 * asarray(e.degree())).all() for e in elements): + logger.warning("Estimated quadrature degree %s more " + "than tenfold greater than any " + "argument/coefficient degree (max %s)", + quadrature_degree, max_degree([e.degree() for e in elements])) else: try: (quadrature_degree, quad_rule), = quad_data @@ -327,8 +345,8 @@ def set_quad_rule(params, cell, integral_type, functions): fiat_cell = as_fiat_cell(cell) finat_elements = set(create_element(e) for e in elements if e.family() != "Real") fiat_cells = [fiat_cell] + [finat_el.complex for finat_el in finat_elements] - fiat_cell = max_complex(fiat_cells) - + if any(c.is_macrocell() for c in fiat_cells): + fiat_cell = max_complex(fiat_cells) integration_dim, _ = lower_integral_type(fiat_cell, integral_type) quad_rule = fem.get_quadrature_rule(fiat_cell, integration_dim, quadrature_degree, scheme) params["quadrature_rule"] = quad_rule @@ -439,19 +457,16 @@ def check_requirements(ir): in one pass.""" cell_orientations = False cell_sizes = False - facet_orientation = False rt_tabs = {} for node in traversal(ir): if isinstance(node, gem.Variable): - if node.name == "cell_orientations": + if node.name == "cell_orientations_0": cell_orientations = True - elif node.name == "cell_sizes": + elif node.name == "cell_sizes_0": cell_sizes = True elif node.name.startswith("rt_"): rt_tabs[node.name] = node.shape - elif node.name == "facet_orientation": - facet_orientation = True - return cell_orientations, cell_sizes, tuple(sorted(rt_tabs.items())), facet_orientation + return cell_orientations, cell_sizes, tuple(sorted(rt_tabs.items())) def prepare_constant(constant, number): @@ -468,55 +483,68 @@ def prepare_constant(constant, number): constant.ufl_shape) -def prepare_coefficient(coefficient, name, interior_facet=False): +def prepare_coefficient(coefficient, name, domain_integral_type_map): """Bridges the kernel interface and the GEM abstraction for Coefficients. - :arg coefficient: UFL Coefficient - :arg name: unique name to refer to the Coefficient in the kernel - :arg interior_facet: interior facet integral? - :returns: (funarg, expression) - expression - GEM expression referring to the Coefficient - values - """ - assert isinstance(interior_facet, bool) + Parameters + ---------- + coefficient : ufl.Coefficient + UFL Coefficient. + name : str + Unique name to refer to the Coefficient in the kernel. + domain_integral_type_map : dict + Map from domain to integral_type. - if coefficient.ufl_element().family() == 'Real': - # Constant - value_size = coefficient.ufl_function_space().value_size - expression = gem.reshape(gem.Variable(name, (value_size,)), - coefficient.ufl_shape) - return expression + Returns + ------- + gem.Node + GEM expression referring to the Coefficient values. + """ finat_element = create_element(coefficient.ufl_element()) shape = finat_element.index_shape size = numpy.prod(shape, dtype=int) - - if not interior_facet: - expression = gem.reshape(gem.Variable(name, (size,)), shape) - else: + domain = extract_unique_domain(coefficient) + integral_type = domain_integral_type_map[domain] + if integral_type is None: + # This means that this coefficient does not exist in the DAG, + # so corresponding gem expression will never be needed. + expression = None + elif integral_type.startswith("interior_facet"): varexp = gem.Variable(name, (2 * size,)) plus = gem.view(varexp, slice(size)) minus = gem.view(varexp, slice(size, 2 * size)) expression = (gem.reshape(plus, shape), gem.reshape(minus, shape)) + else: + expression = gem.reshape(gem.Variable(name, (size,)), shape) return expression -def prepare_arguments(arguments, multiindices, interior_facet=False, diagonal=False): +def prepare_arguments(arguments, multiindices, domain_integral_type_map, diagonal=False): """Bridges the kernel interface and the GEM abstraction for Arguments. Vector Arguments are rearranged here for interior facet integrals. - :arg arguments: UFL Arguments - :arg multiindices: Argument multiindices - :arg interior_facet: interior facet integral? - :arg diagonal: Are we assembling the diagonal of a rank-2 element tensor? - :returns: (funarg, expression) - expressions - GEM expressions referring to the argument - tensor - """ - assert isinstance(interior_facet, bool) + Parameters + ---------- + arguments : tuple + UFL Arguments. + multiindices : tuple + Argument multiindices. + domain_integral_type_map : dict + Map from domain to integral_type. + diagonal : bool + Are we assembling the diagonal of a rank-2 element tensor? + + Returns + ------- + tuple + Tuple of function arg and GEM expressions referring to the argument tensor. + """ + if len(multiindices) != len(arguments): + raise ValueError(f"Got inconsistent lengths of arguments ({len(arguments)}) and multiindices ({len(multiindices)})") if len(arguments) == 0: # No arguments expression = gem.Indexed(gem.Variable("A", (1,)), (0,)) @@ -532,25 +560,30 @@ def prepare_arguments(arguments, multiindices, interior_facet=False, diagonal=Fa element, = set(elements) except ValueError: raise ValueError("Diagonal only for diagonal blocks (test and trial spaces the same)") - elements = (element, ) shapes = tuple(element.index_shape for element in elements) multiindices = multiindices[:1] + arguments = arguments[:1] def expression(restricted): return gem.Indexed(gem.reshape(restricted, *shapes), tuple(chain(*multiindices))) u_shape = numpy.array([numpy.prod(shape, dtype=int) for shape in shapes]) - if interior_facet: - c_shape = tuple(2 * u_shape) - slicez = [[slice(r * s, (r + 1) * s) - for r, s in zip(restrictions, u_shape)] - for restrictions in product((0, 1), repeat=len(arguments))] - else: - c_shape = tuple(u_shape) - slicez = [[slice(s) for s in u_shape]] - - varexp = gem.Variable("A", c_shape) + c_shape = copy.deepcopy(u_shape) + rs_tuples = [] + for arg_num, arg in enumerate(arguments): + integral_type = domain_integral_type_map[extract_unique_domain(arg)] + if integral_type is None: + raise RuntimeError(f"Can not determine integral_type on {arg}") + if integral_type.startswith("interior_facet"): + rs_tuples.append((0, 1)) + c_shape[arg_num] *= 2 + else: + rs_tuples.append((0, )) + slicez = [[slice(r * s, (r + 1) * s) + for r, s in zip(restrictions, u_shape)] + for restrictions in product(*rs_tuples)] + varexp = gem.Variable("A", tuple(c_shape)) expressions = [expression(gem.view(varexp, *slices)) for slices in slicez] return tuple(prune(expressions)) diff --git a/tsfc/kernel_interface/firedrake_loopy.py b/tsfc/kernel_interface/firedrake_loopy.py index f13a7d1e33..cc8fd7a61e 100644 --- a/tsfc/kernel_interface/firedrake_loopy.py +++ b/tsfc/kernel_interface/firedrake_loopy.py @@ -2,6 +2,8 @@ from collections import namedtuple, OrderedDict from ufl import Coefficient, FunctionSpace +from ufl.domain import MeshSequence + from finat.ufl import MixedElement as ufl_MixedElement, FiniteElement import gem @@ -10,8 +12,8 @@ import loopy as lp from tsfc import kernel_args -from finat.element_factory import create_element -from tsfc.kernel_interface.common import KernelBuilderBase as _KernelBuilderBase, KernelBuilderMixin, get_index_names, check_requirements, prepare_coefficient, prepare_arguments, prepare_constant +from finat.element_factory import as_fiat_cell, create_element +from tsfc.kernel_interface.common import KernelBuilderBase as _KernelBuilderBase, KernelBuilderMixin, get_index_names, check_requirements, prepare_coefficient, prepare_arguments, prepare_constant, lower_integral_type from tsfc.loopy import generate as generate_loopy @@ -23,16 +25,29 @@ 'flop_count', 'event']) +ActiveDomainNumbers = namedtuple('ActiveDomainNumbers', ['coordinates', + 'cell_orientations', + 'cell_sizes', + 'exterior_facets', + 'interior_facets', + 'orientations_cell', + 'orientations_exterior_facet', + 'orientations_interior_facet']) +ActiveDomainNumbers.__doc__ = """ + Active domain numbers collected for each key. + + """ + + class Kernel: - __slots__ = ("ast", "arguments", "integral_type", "oriented", "subdomain_id", - "domain_number", "needs_cell_sizes", "tabulations", + __slots__ = ("ast", "arguments", "integral_type", "subdomain_id", + "domain_number", "active_domain_numbers", "tabulations", "coefficient_numbers", "name", "flop_count", "event", "__weakref__") """A compiled Kernel object. :kwarg ast: The loopy kernel object. :kwarg integral_type: The type of integral. - :kwarg oriented: Does the kernel require cell_orientations. :kwarg subdomain_id: What is the subdomain id for this kernel. :kwarg domain_number: Which domain number in the original form does this kernel correspond to (can be used to index into @@ -40,15 +55,13 @@ class Kernel: :kwarg coefficient_numbers: A list of which coefficients from the form the kernel needs. :kwarg tabulations: The runtime tabulations this kernel requires - :kwarg needs_cell_sizes: Does the kernel require cell sizes. :kwarg name: The name of this kernel. :kwarg flop_count: Estimated total flops for this kernel. :kwarg event: name for logging event """ - def __init__(self, ast=None, arguments=None, integral_type=None, oriented=False, - subdomain_id=None, domain_number=None, + def __init__(self, ast=None, arguments=None, integral_type=None, + subdomain_id=None, domain_number=None, active_domain_numbers=None, coefficient_numbers=(), - needs_cell_sizes=False, tabulations=None, flop_count=0, name=None, @@ -57,11 +70,10 @@ def __init__(self, ast=None, arguments=None, integral_type=None, oriented=False, self.ast = ast self.arguments = arguments self.integral_type = integral_type - self.oriented = oriented self.domain_number = domain_number + self.active_domain_numbers = active_domain_numbers self.subdomain_id = subdomain_id self.coefficient_numbers = coefficient_numbers - self.needs_cell_sizes = needs_cell_sizes self.tabulations = tabulations self.flop_count = flop_count self.name = name @@ -70,21 +82,9 @@ def __init__(self, ast=None, arguments=None, integral_type=None, oriented=False, class KernelBuilderBase(_KernelBuilderBase): - def __init__(self, scalar_type, interior_facet=False): - """Initialise a kernel builder. - - :arg interior_facet: kernel accesses two cells - """ - super().__init__(scalar_type=scalar_type, interior_facet=interior_facet) - - # Cell orientation - if self.interior_facet: - cell_orientations = gem.Variable("cell_orientations", (2,), dtype=gem.uint_type) - self._cell_orientations = (gem.Indexed(cell_orientations, (0,)), - gem.Indexed(cell_orientations, (1,))) - else: - cell_orientations = gem.Variable("cell_orientations", (1,), dtype=gem.uint_type) - self._cell_orientations = (gem.Indexed(cell_orientations, (0,)),) + def __init__(self, scalar_type): + """Initialise a kernel builder.""" + super().__init__(scalar_type=scalar_type) def _coefficient(self, coefficient, name): """Prepare a coefficient. Adds glue code for the coefficient @@ -94,24 +94,58 @@ def _coefficient(self, coefficient, name): :arg name: coefficient name :returns: GEM expression representing the coefficient """ - expr = prepare_coefficient(coefficient, name, interior_facet=self.interior_facet) + expr = prepare_coefficient(coefficient, name, self._domain_integral_type_map) self.coefficient_map[coefficient] = expr return expr - def set_coordinates(self, domain): - """Prepare the coordinate field. + def set_coordinates(self, domains): + """Set coordinates for each domain. + + Parameters + ---------- + domains : list or tuple + All domains in the form. - :arg domain: :class:`ufl.Domain` """ # Create a fake coordinate coefficient for a domain. - f = Coefficient(FunctionSpace(domain, domain.ufl_coordinate_element())) - self.domain_coordinate[domain] = f - self._coefficient(f, "coords") + for i, domain in enumerate(domains): + if isinstance(domain, MeshSequence): + raise RuntimeError("Found a MeshSequence") + f = Coefficient(FunctionSpace(domain, domain.ufl_coordinate_element())) + self.domain_coordinate[domain] = f + self._coefficient(f, f"coords_{i}") + + def set_cell_orientations(self, domains): + """Set cell orientations for each domain. + + Parameters + ---------- + domains : list or tuple + All domains in the form. - def set_cell_sizes(self, domain): - """Setup a fake coefficient for "cell sizes". + """ + # Cell orientation + self._cell_orientations = {} + for i, domain in enumerate(domains): + integral_type = self._domain_integral_type_map[domain] + if integral_type is None: + # See comment in prepare_coefficient. + self._cell_orientations[domain] = None + elif integral_type.startswith("interior_facet"): + cell_orientations = gem.Variable(f"cell_orientations_{i}", (2,), dtype=gem.uint_type) + self._cell_orientations[domain] = (gem.Indexed(cell_orientations, (0,)), + gem.Indexed(cell_orientations, (1,))) + else: + cell_orientations = gem.Variable(f"cell_orientations_{i}", (1,), dtype=gem.uint_type) + self._cell_orientations[domain] = (gem.Indexed(cell_orientations, (0,)),) - :arg domain: The domain of the integral. + def set_cell_sizes(self, domains): + """Setup a fake coefficient for "cell sizes" for each domain. + + Parameters + ---------- + domains : list or tuple + All domains in the form. This is required for scaling of derivative basis functions on physically mapped elements (Argyris, Bell, etc...). We need a @@ -121,13 +155,15 @@ def set_cell_sizes(self, domain): Should the domain have topological dimension 0 this does nothing. """ - if domain.ufl_cell().topological_dimension() > 0: - # Can't create P1 since only P0 is a valid finite element if - # topological_dimension is 0 and the concept of "cell size" - # is not useful for a vertex. - f = Coefficient(FunctionSpace(domain, FiniteElement("P", domain.ufl_cell(), 1))) - expr = prepare_coefficient(f, "cell_sizes", interior_facet=self.interior_facet) - self._cell_sizes = expr + self._cell_sizes = {} + for i, domain in enumerate(domains): + if domain.ufl_cell().topological_dimension > 0: + # Can't create P1 since only P0 is a valid finite element if + # topological_dimension is 0 and the concept of "cell size" + # is not useful for a vertex. + f = Coefficient(FunctionSpace(domain, FiniteElement("P", domain.ufl_cell(), 1))) + expr = prepare_coefficient(f, f"cell_sizes_{i}", self._domain_integral_type_map) + self._cell_sizes[domain] = expr def create_element(self, element, **kwargs): """Create a FInAT element (suitable for tabulating with) given @@ -194,14 +230,14 @@ def set_coefficient_numbers(self, coefficient_numbers): def register_requirements(self, ir): """Inspect what is referenced by the IR that needs to be provided by the kernel interface.""" - self.oriented, self.cell_sizes, self.tabulations, _ = check_requirements(ir) + self.oriented, self.cell_sizes, self.tabulations = check_requirements(ir) def set_output(self, o): """Produce the kernel return argument""" loopy_arg = lp.GlobalArg(o.name, dtype=self.scalar_type, shape=o.shape) self.output_arg = kernel_args.OutputKernelArg(loopy_arg) - def construct_kernel(self, impero_c, index_names, needs_external_coords, log=False): + def construct_kernel(self, impero_c, index_names, needs_external_coords, log=False, name=None): """Constructs an :class:`ExpressionKernel`. :arg impero_c: gem.ImperoC object that represents the kernel @@ -214,10 +250,12 @@ def construct_kernel(self, impero_c, index_names, needs_external_coords, log=Fal """ args = [self.output_arg] if self.oriented: - funarg = self.generate_arg_from_expression(self._cell_orientations, dtype=numpy.int32) + cell_orientations, = tuple(self._cell_orientations.values()) + funarg = self.generate_arg_from_expression(cell_orientations, dtype=numpy.int32) args.append(kernel_args.CellOrientationsKernelArg(funarg)) if self.cell_sizes: - funarg = self.generate_arg_from_expression(self._cell_sizes) + cell_sizes, = tuple(self._cell_sizes.values()) + funarg = self.generate_arg_from_expression(cell_sizes) args.append(kernel_args.CellSizesKernelArg(funarg)) for _, expr in self.coefficient_map.items(): # coefficient_map is OrderedDict. @@ -235,7 +273,7 @@ def construct_kernel(self, impero_c, index_names, needs_external_coords, log=Fal loopy_args = [arg.loopy_arg for arg in args] - name = "expression_kernel" + name = name or "expression_kernel" loopy_kernel, event = generate_loopy(impero_c, loopy_args, self.scalar_type, name, index_names, log=log) return ExpressionKernel(loopy_kernel, self.oriented, self.cell_sizes, @@ -249,48 +287,18 @@ class KernelBuilder(KernelBuilderBase, KernelBuilderMixin): def __init__(self, integral_data_info, scalar_type, diagonal=False): """Initialise a kernel builder.""" - integral_type = integral_data_info.integral_type - super(KernelBuilder, self).__init__(scalar_type, integral_type.startswith("interior_facet")) + super(KernelBuilder, self).__init__(scalar_type) self.fem_scalar_type = scalar_type - self.diagonal = diagonal self.local_tensor = None self.coefficient_number_index_map = OrderedDict() - - # Facet number - if integral_type in ['exterior_facet', 'exterior_facet_vert']: - facet = gem.Variable('facet', (1,), dtype=gem.uint_type) - self._entity_number = {None: gem.VariableIndex(gem.Indexed(facet, (0,)))} - facet_orientation = gem.Variable('facet_orientation', (1,), dtype=gem.uint_type) - self._entity_orientation = {None: gem.OrientationVariableIndex(gem.Indexed(facet_orientation, (0,)))} - elif integral_type in ['interior_facet', 'interior_facet_vert']: - facet = gem.Variable('facet', (2,), dtype=gem.uint_type) - self._entity_number = { - '+': gem.VariableIndex(gem.Indexed(facet, (0,))), - '-': gem.VariableIndex(gem.Indexed(facet, (1,))) - } - facet_orientation = gem.Variable('facet_orientation', (2,), dtype=gem.uint_type) - self._entity_orientation = { - '+': gem.OrientationVariableIndex(gem.Indexed(facet_orientation, (0,))), - '-': gem.OrientationVariableIndex(gem.Indexed(facet_orientation, (1,))) - } - elif integral_type == 'interior_facet_horiz': - self._entity_number = {'+': 1, '-': 0} - facet_orientation = gem.Variable('facet_orientation', (1,), dtype=gem.uint_type) # base mesh entity orientation - self._entity_orientation = { - '+': gem.OrientationVariableIndex(gem.Indexed(facet_orientation, (0,))), - '-': gem.OrientationVariableIndex(gem.Indexed(facet_orientation, (0,))) - } - - self.set_arguments(integral_data_info.arguments) self.integral_data_info = integral_data_info + self._domain_integral_type_map = integral_data_info.domain_integral_type_map # For consistency with ExpressionKernelBuilder. + self.set_arguments() - def set_arguments(self, arguments): - """Process arguments. - - :arg arguments: :class:`ufl.Argument`s - :returns: GEM expression representing the return variable - """ + def set_arguments(self): + """Process arguments.""" + arguments = self.integral_data_info.arguments argument_multiindices = tuple(create_element(arg.ufl_element()).get_indices() for arg in arguments) if self.diagonal: @@ -301,11 +309,77 @@ def set_arguments(self, arguments): argument_multiindices = (a, a) return_variables = prepare_arguments(arguments, argument_multiindices, - interior_facet=self.interior_facet, + self.integral_data_info.domain_integral_type_map, diagonal=self.diagonal) self.return_variables = return_variables self.argument_multiindices = argument_multiindices + def set_entity_numbers(self, domains): + """Set entity numbers for each domain. + + Parameters + ---------- + domains : list or tuple + All domains in the form. + + """ + self._entity_numbers = {} + self._entity_ids = {} + for i, domain in enumerate(domains): + fiat_cell = as_fiat_cell(domain.ufl_cell()) + integral_type = self.integral_data_info.domain_integral_type_map[domain] + if integral_type is None: + # Set placeholder for unused domain. + entity_ids = None + else: + _, entity_ids = lower_integral_type(fiat_cell, integral_type) + self._entity_ids[domain] = entity_ids + if integral_type in ['exterior_facet', 'exterior_facet_vert']: + facet = gem.Variable(f'facet_{i}', (1,), dtype=gem.uint_type) + self._entity_numbers[domain] = {None: gem.VariableIndex(gem.Indexed(facet, (0,))), } + elif integral_type in ['interior_facet', 'interior_facet_vert']: + facet = gem.Variable(f'facet_{i}', (2,), dtype=gem.uint_type) + self._entity_numbers[domain] = { + '+': gem.VariableIndex(gem.Indexed(facet, (0,))), + '-': gem.VariableIndex(gem.Indexed(facet, (1,))) + } + elif integral_type == 'interior_facet_horiz': + self._entity_numbers[domain] = {'+': 1, '-': 0} + else: + self._entity_numbers[domain] = {None: None} + + def set_entity_orientations(self, domains): + """Set entity orientations for each domain. + + Parameters + ---------- + domains : list or tuple + All domains in the form. + + """ + self._entity_orientations = {} + for i, domain in enumerate(domains): + integral_type = self.integral_data_info.domain_integral_type_map[domain] + variable_name = f"entity_orientations_{i}" + if integral_type in ['exterior_facet', 'exterior_facet_vert']: + o = gem.Variable(variable_name, (1,), dtype=gem.uint_type) + self._entity_orientations[domain] = {None: gem.OrientationVariableIndex(gem.Indexed(o, (0,))), } + elif integral_type in ['interior_facet', 'interior_facet_vert']: + o = gem.Variable(variable_name, (2,), dtype=gem.uint_type) + self._entity_orientations[domain] = { + '+': gem.OrientationVariableIndex(gem.Indexed(o, (0,))), + '-': gem.OrientationVariableIndex(gem.Indexed(o, (1,))) + } + elif integral_type == 'interior_facet_horiz': + o = gem.Variable(variable_name, (1,), dtype=gem.uint_type) # base mesh entity orientation + self._entity_orientations[domain] = { + '+': gem.OrientationVariableIndex(gem.Indexed(o, (0,))), + '-': gem.OrientationVariableIndex(gem.Indexed(o, (0,))) + } + else: + o = gem.Variable(variable_name, (1,), dtype=gem.uint_type) + self._entity_orientations[domain] = {None: gem.OrientationVariableIndex(gem.Indexed(o, (0,))), } + def set_coefficients(self): """Prepare the coefficients of the form.""" info = self.integral_data_info @@ -342,7 +416,7 @@ def construct_kernel(self, name, ctx, log=False): :arg log: bool if the Kernel should be profiled with Log events :returns: :class:`Kernel` object """ - impero_c, oriented, needs_cell_sizes, tabulations, active_variables, need_facet_orientation = self.compile_gem(ctx) + impero_c, _, _, tabulations, active_variables = self.compile_gem(ctx) if impero_c is None: return self.construct_empty_kernel(name) info = self.integral_data_info @@ -358,50 +432,91 @@ def construct_kernel(self, name, ctx, log=False): # Add return arg funarg = self.generate_arg_from_expression(self.return_variables) args = [kernel_args.OutputKernelArg(funarg)] - # Add coordinates arg - coord = self.domain_coordinate[info.domain] - expr = self.coefficient_map[coord] - funarg = self.generate_arg_from_expression(expr) - args.append(kernel_args.CoordinatesKernelArg(funarg)) - if oriented: - funarg = self.generate_arg_from_expression(self._cell_orientations, dtype=numpy.int32) - args.append(kernel_args.CellOrientationsKernelArg(funarg)) - if needs_cell_sizes: - funarg = self.generate_arg_from_expression(self._cell_sizes) - args.append(kernel_args.CellSizesKernelArg(funarg)) + active_domain_numbers_coordinates, args_ = self.make_active_domain_numbers({d: self.coefficient_map[c] for d, c in self.domain_coordinate.items()}, + active_variables, + kernel_args.CoordinatesKernelArg) + args.extend(args_) + active_domain_numbers_cell_orientations, args_ = self.make_active_domain_numbers(self._cell_orientations, + active_variables, + kernel_args.CellOrientationsKernelArg, + dtype=numpy.int32) + args.extend(args_) + active_domain_numbers_cell_sizes, args_ = self.make_active_domain_numbers(self._cell_sizes, + active_variables, + kernel_args.CellSizesKernelArg) + args.extend(args_) coefficient_indices = OrderedDict() for coeff, (number, index) in self.coefficient_number_index_map.items(): a = coefficient_indices.setdefault(number, []) expr = self.coefficient_map[coeff] + if expr is None: + # See comment in prepare_coefficient. + continue var, = gem.extract_type(expr if isinstance(expr, tuple) else (expr, ), gem.Variable) if var in active_variables: funarg = self.generate_arg_from_expression(expr) args.append(kernel_args.CoefficientKernelArg(funarg)) a.append(index) - - # now constants for gemexpr in self.constant_map.values(): funarg = self.generate_arg_from_expression(gemexpr) args.append(kernel_args.ConstantKernelArg(funarg)) - coefficient_indices = tuple(tuple(v) for v in coefficient_indices.values()) assert len(coefficient_indices) == len(info.coefficient_numbers) - if info.integral_type in ["exterior_facet", "exterior_facet_vert"]: - ext_loopy_arg = lp.GlobalArg("facet", numpy.uint32, shape=(1,)) - args.append(kernel_args.ExteriorFacetKernelArg(ext_loopy_arg)) - elif info.integral_type in ["interior_facet", "interior_facet_vert"]: - int_loopy_arg = lp.GlobalArg("facet", numpy.uint32, shape=(2,)) - args.append(kernel_args.InteriorFacetKernelArg(int_loopy_arg)) - # The submesh PR will introduce a robust mechanism to check if a Variable - # is actually used in the final form of the expression, so there will be - # no need to get "need_facet_orientation" from self.compile_gem(). - if need_facet_orientation: - if info.integral_type == "exterior_facet": - ext_ornt_loopy_arg = lp.GlobalArg("facet_orientation", gem.uint_type, shape=(1,)) - args.append(kernel_args.ExteriorFacetOrientationKernelArg(ext_ornt_loopy_arg)) - elif info.integral_type == "interior_facet": - int_ornt_loopy_arg = lp.GlobalArg("facet_orientation", gem.uint_type, shape=(2,)) - args.append(kernel_args.InteriorFacetOrientationKernelArg(int_ornt_loopy_arg)) + ext_dict = {} + for domain, expr in self._entity_numbers.items(): + integral_type = info.domain_integral_type_map[domain] + ext_dict[domain] = expr[None].expression if integral_type in ["exterior_facet", "exterior_facet_vert"] else None + active_domain_numbers_exterior_facets, args_ = self.make_active_domain_numbers( + ext_dict, + active_variables, + kernel_args.ExteriorFacetKernelArg, + dtype=numpy.uint32, + ) + args.extend(args_) + int_dict = {} + for domain, expr in self._entity_numbers.items(): + integral_type = info.domain_integral_type_map[domain] + int_dict[domain] = expr['+'].expression if integral_type in ["interior_facet", "interior_facet_vert"] else None + active_domain_numbers_interior_facets, args_ = self.make_active_domain_numbers( + int_dict, + active_variables, + kernel_args.InteriorFacetKernelArg, + dtype=numpy.uint32, + ) + args.extend(args_) + cell_dict = {} + for domain, expr in self._entity_orientations.items(): + integral_type = info.domain_integral_type_map[domain] + cell_dict[domain] = expr[None].expression if integral_type == "cell" else None + active_domain_numbers_orientations_cell, args_ = self.make_active_domain_numbers( + cell_dict, + active_variables, + kernel_args.OrientationsCellKernelArg, + dtype=gem.uint_type, + ) + args.extend(args_) + ext_dict = {} + for domain, expr in self._entity_orientations.items(): + integral_type = info.domain_integral_type_map[domain] + ext_dict[domain] = expr[None].expression if integral_type in ["exterior_facet", "exterior_facet_vert"] else None + active_domain_numbers_orientations_exterior_facet, args_ = self.make_active_domain_numbers( + ext_dict, + active_variables, + kernel_args.OrientationsExteriorFacetKernelArg, + dtype=gem.uint_type, + ) + args.extend(args_) + int_dict = {} + for domain, expr in self._entity_orientations.items(): + integral_type = info.domain_integral_type_map[domain] + int_dict[domain] = expr['+'].expression if integral_type in ["interior_facet", "interior_facet_vert", "interior_facet_horiz"] else None + active_domain_numbers_orientations_interior_facet, args_ = self.make_active_domain_numbers( + int_dict, + active_variables, + kernel_args.OrientationsInteriorFacetKernelArg, + dtype=gem.uint_type, + ) + args.extend(args_) for name_, shape in tabulations: tab_loopy_arg = lp.GlobalArg(name_, dtype=self.scalar_type, shape=shape) args.append(kernel_args.TabulationKernelArg(tab_loopy_arg)) @@ -414,9 +529,17 @@ def construct_kernel(self, name, ctx, log=False): integral_type=info.integral_type, subdomain_id=info.subdomain_id, domain_number=info.domain_number, + active_domain_numbers=ActiveDomainNumbers( + coordinates=tuple(active_domain_numbers_coordinates), + cell_orientations=tuple(active_domain_numbers_cell_orientations), + cell_sizes=tuple(active_domain_numbers_cell_sizes), + exterior_facets=tuple(active_domain_numbers_exterior_facets), + interior_facets=tuple(active_domain_numbers_interior_facets), + orientations_cell=tuple(active_domain_numbers_orientations_cell), + orientations_exterior_facet=tuple(active_domain_numbers_orientations_exterior_facet), + orientations_interior_facet=tuple(active_domain_numbers_orientations_interior_facet), + ), coefficient_numbers=tuple(zip(info.coefficient_numbers, coefficient_indices)), - oriented=oriented, - needs_cell_sizes=needs_cell_sizes, tabulations=tabulations, flop_count=flop_count, name=name, @@ -429,3 +552,36 @@ def construct_empty_kernel(self, name): :returns: None """ return None + + def make_active_domain_numbers(self, domain_expr_dict, active_variables, kernel_arg_type, dtype=None): + """Make active domain numbers. + + Parameters + ---------- + domain_expr_dict : dict + Map from domains to expressions; must be ordered as extract_domains(form). + active_variables : tuple + Active variables in the DAG. + kernel_arg_type : KernelArg + Type of `KernelArg`. + dtype : numpy.dtype + dtype. + + Returns + ------- + tuple + Tuple of active domain numbers and corresponding kernel args. + + """ + active_dns = [] + args = [] + for i, expr in enumerate(domain_expr_dict.values()): + if expr is None: + var = None + else: + var, = gem.extract_type(expr if isinstance(expr, tuple) else (expr, ), gem.Variable) + if var in active_variables: + funarg = self.generate_arg_from_expression(expr, dtype=dtype) + args.append(kernel_arg_type(funarg)) + active_dns.append(i) + return tuple(active_dns), tuple(args) diff --git a/tsfc/modified_terminals.py b/tsfc/modified_terminals.py index 8c5162bf97..a26e5c2980 100644 --- a/tsfc/modified_terminals.py +++ b/tsfc/modified_terminals.py @@ -158,7 +158,7 @@ def construct_modified_terminal(mt, terminal): if mt.reference_value: expr = ReferenceValue(expr) - dim = extract_unique_domain(expr).topological_dimension() + dim = extract_unique_domain(expr).topological_dimension for n in range(mt.local_derivatives): # Return zero if expression is trivially constant. This has to # happen here because ReferenceGrad has no access to the diff --git a/tsfc/ufl_utils.py b/tsfc/ufl_utils.py index 18173a9660..c26febd68e 100644 --- a/tsfc/ufl_utils.py +++ b/tsfc/ufl_utils.py @@ -40,6 +40,7 @@ def compute_form_data(form, do_apply_integral_scaling=True, do_apply_geometry_lowering=True, preserve_geometry_types=preserve_geometry_types, + do_apply_default_restrictions=True, do_apply_restrictions=True, do_estimate_degrees=True, coefficients_to_split=None, @@ -57,6 +58,7 @@ def compute_form_data(form, do_apply_integral_scaling=do_apply_integral_scaling, do_apply_geometry_lowering=do_apply_geometry_lowering, preserve_geometry_types=preserve_geometry_types, + do_apply_default_restrictions=do_apply_default_restrictions, do_apply_restrictions=do_apply_restrictions, do_estimate_degrees=do_estimate_degrees, do_replace_functions=True, @@ -166,6 +168,8 @@ def _modified_terminal(self, o): positive_restricted = _modified_terminal negative_restricted = _modified_terminal + single_value_restricted = _modified_terminal + to_be_restricted = _modified_terminal reference_grad = _modified_terminal reference_value = _modified_terminal @@ -197,8 +201,11 @@ def modified_terminal(self, o): mt = analyse_modified_terminal(o) t = mt.terminal r = mt.restriction - if isinstance(t, Argument) and r != self.restrictions[t.number()]: - return Zero(o.ufl_shape, o.ufl_free_indices, o.ufl_index_dimensions) + if isinstance(t, Argument) and r in ['+', '-']: + if r == self.restrictions[t.number()]: + return o + else: + return Zero(o.ufl_shape, o.ufl_free_indices, o.ufl_index_dimensions) else: return o