Skip to content

Commit

Permalink
Merge branch 'development' of github.com:ECP-WarpX/WarpX into 2d_lumi…
Browse files Browse the repository at this point in the history
…nosity
  • Loading branch information
aeriforme committed Jan 14, 2025
2 parents a5be100 + 57703f8 commit c180e20
Show file tree
Hide file tree
Showing 74 changed files with 445 additions and 195 deletions.
1 change: 1 addition & 0 deletions .azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ pr:
paths:
exclude:
- Docs
- '**/*.rst'

jobs:
- job:
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/clang_sanitizers.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-clangsanitizers
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/clang_tidy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-clangtidy
Expand Down
3 changes: 2 additions & 1 deletion .github/workflows/cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-cuda
Expand Down Expand Up @@ -126,7 +127,7 @@ jobs:
which nvcc || echo "nvcc not in PATH!"
git clone https://github.com/AMReX-Codes/amrex.git ../amrex
cd ../amrex && git checkout --detach b3f67385e62f387b548389222840486c0fffca57 && cd -
cd ../amrex && git checkout --detach 25.01 && cd -
make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4
ccache -s
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/hip.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-hip
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/insitu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-insituvis
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/intel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-intel
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/macos.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-macos
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-ubuntu
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/windows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ on:
pull_request:
paths-ignore:
- "Docs/**"
- "**.rst"

concurrency:
group: ${{ github.ref }}-${{ github.head_ref }}-windows
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ repos:
# Python: Ruff linter & formatter
# https://docs.astral.sh/ruff/
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.4
rev: v0.9.1
hooks:
# Run the linter
- id: ruff
Expand Down
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Preamble ####################################################################
#
cmake_minimum_required(VERSION 3.24.0)
project(WarpX VERSION 24.12)
project(WarpX VERSION 25.01)

include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake)

Expand Down Expand Up @@ -480,7 +480,7 @@ foreach(D IN LISTS WarpX_DIMS)
warpx_enable_IPO(pyWarpX_${SD})
else()
# conditionally defined target in pybind11
# https://github.com/pybind/pybind11/blob/v2.12.0/tools/pybind11Common.cmake#L397-L403
# https://github.com/pybind/pybind11/blob/v2.13.0/tools/pybind11Common.cmake#L407-L413
target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::lto)
endif()
endif()
Expand Down
4 changes: 2 additions & 2 deletions Docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs):
# built documents.
#
# The short X.Y version.
version = "24.12"
version = "25.01"
# The full version, including alpha/beta/rc tags.
release = "24.12"
release = "25.01"

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
5 changes: 0 additions & 5 deletions Docs/source/developers/checksum.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,6 @@ This relies on the function ``evaluate_checksum``:

.. autofunction:: checksumAPI.evaluate_checksum

Here's an example:

.. literalinclude:: ../../../Examples/Tests/embedded_circle/analysis.py
:language: python

This can also be included as part of an existing analysis script.

How to evaluate checksums from the command line
Expand Down
4 changes: 2 additions & 2 deletions Docs/source/developers/fields.rst
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,9 @@ Bilinear filter

The multi-pass bilinear filter (applied on the current density) is implemented in ``Source/Filter/``, and class ``WarpX`` holds an instance of this class in member variable ``WarpX::bilinear_filter``. For performance reasons (to avoid creating too many guard cells), this filter is directly applied in communication routines, see ``WarpX::AddCurrentFromFineLevelandSumBoundary`` above and

.. doxygenfunction:: WarpX::ApplyFilterMF(const amrex::Vector<std::array<std::unique_ptr<amrex::MultiFab>, 3>> &mfvec, int lev, int idim)
.. doxygenfunction:: WarpX::ApplyFilterMF(const ablastr::fields::MultiLevelVectorField &mfvec, int lev, int idim)

.. doxygenfunction:: WarpX::SumBoundaryJ(const amrex::Vector<std::array<std::unique_ptr<amrex::MultiFab>, 3>> &current, int lev, int idim, const amrex::Periodicity &period)
.. doxygenfunction:: WarpX::SumBoundaryJ(const ablastr::fields::MultiLevelVectorField &current, int lev, int idim, const amrex::Periodicity &period)

Godfrey's anti-NCI filter for FDTD simulations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion Docs/source/developers/gnumake/python.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
Installing WarpX as a Python package
====================================

A full Python installation of WarpX can be done, which includes a build of all of the C++ code, or a pure Python version can be made which only installs the Python scripts. WarpX requires Python version 3.8 or newer.
A full Python installation of WarpX can be done, which includes a build of all of the C++ code, or a pure Python version can be made which only installs the Python scripts. WarpX requires Python version 3.9 or newer.

For a full Python installation of WarpX
---------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion Docs/source/developers/particles.rst
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ Main functions

.. doxygenfunction:: PhysicalParticleContainer::PushPX

.. doxygenfunction:: WarpXParticleContainer::DepositCurrent(amrex::Vector<std::array<std::unique_ptr<amrex::MultiFab>, 3>> &J, amrex::Real dt, amrex::Real relative_time)
.. doxygenfunction:: WarpXParticleContainer::DepositCurrent(ablastr::fields::MultiLevelVectorField const &J, amrex::Real dt, amrex::Real relative_time)

.. note::
The current deposition is used both by ``PhysicalParticleContainer`` and ``LaserParticleContainer``, so it is in the parent class ``WarpXParticleContainer``.
Expand Down
2 changes: 1 addition & 1 deletion Docs/source/install/dependencies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Optional dependencies include:
- `SENSEI 4.0.0+ <https://sensei-insitu.org>`__: for in situ analysis and visualization
- `CCache <https://ccache.dev>`__: to speed up rebuilds (For CUDA support, needs version 3.7.9+ and 4.2+ is recommended)
- `Ninja <https://ninja-build.org>`__: for faster parallel compiles
- `Python 3.8+ <https://www.python.org>`__
- `Python 3.9+ <https://www.python.org>`__

- `mpi4py <https://mpi4py.readthedocs.io>`__
- `numpy <https://numpy.org>`__
Expand Down
6 changes: 3 additions & 3 deletions Docs/source/install/hpc/dane.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@
Dane (LLNL)
=============

The `Dane Intel CPU cluster <https://hpc.llnl.gov/hardware/compute-platforms/dane>`_ is located at LLNL.
The `Dane Intel CPU cluster <https://hpc.llnl.gov/hardware/compute-platforms/dane>`__ is located at LLNL.


Introduction
------------

If you are new to this system, **please see the following resources**:

* `LLNL user account <https://lc.llnl.gov`__ (login required)
* `LLNL user account <https://lc.llnl.gov>`__ (login required)
* `Jupyter service <https://lc.llnl.gov/jupyter>`__ (`documentation <https://lc.llnl.gov/confluence/display/LC/JupyterHub+and+Jupyter+Notebook>`__, login required)
* `Production directories <https://hpc.llnl.gov/hardware/file-systems>`_:
* `Production directories <https://hpc.llnl.gov/hardware/file-systems>`__:

* ``/p/lustre1/$(whoami)`` and ``/p/lustre2/$(whoami)``: personal directory on the parallel filesystem
* Note that the ``$HOME`` directory and the ``/usr/workspace/$(whoami)`` space are NFS mounted and *not* suitable for production quality data generation.
Expand Down
2 changes: 1 addition & 1 deletion Docs/source/install/hpc/lawrencium.rst
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ And since Lawrencium does not yet provide a module for them, install ADIOS2, BLA
cmake -S src/lapackpp -B src/lapackpp-v100-build -DCMAKE_CXX_STANDARD=17 -Dgpu_backend=cuda -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/v100/lapackpp-master -Duse_cmake_find_lapack=ON -DBLAS_LIBRARIES=${LAPACK_DIR}/lib/libblas.a -DLAPACK_LIBRARIES=${LAPACK_DIR}/lib/liblapack.a
cmake --build src/lapackpp-v100-build --target install --parallel 12
Optionally, download and install Python packages for :ref:`PICMI <usage-picmi>` or dynamic ensemble optimizations (:ref:`libEnsemble <libensemble>`):
Optionally, download and install Python packages for :ref:`PICMI <usage-picmi>` or dynamic ensemble optimizations (`libEnsemble <https://libensemble.readthedocs.io/en/main/>`__):

.. code-block:: bash
Expand Down
5 changes: 4 additions & 1 deletion Docs/source/maintenance/release.rst
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ In order to create a GitHub release, you need to:
1. Create a new branch from ``development`` and update the version number in all source files.
We usually wait for the AMReX release to be tagged first, then we also point to its tag.

There is a script for updating core dependencies of WarpX and the WarpX version:
There are scripts for updating core dependencies of WarpX and the WarpX version:

.. code-block:: sh
Expand All @@ -42,6 +42,9 @@ In order to create a GitHub release, you need to:

Then open a PR, wait for tests to pass and then merge.

The maintainer script ``Tools/Release/releasePR.py`` automates the steps above.
Please read through the instructions in the script before running.

2. **Local Commit** (Optional): at the moment, ``@ax3l`` is managing releases and signs tags (naming: ``YY.MM``) locally with his GPG key before uploading them to GitHub.

**Publish**: On the `GitHub Release page <https://github.com/ECP-WarpX/WarpX/releases>`__, create a new release via ``Draft a new release``.
Expand Down
1 change: 1 addition & 0 deletions Docs/source/refs.bib
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,7 @@ @misc{Fallahi2020
@article{VayFELA2009,
title = {FULL ELECTROMAGNETIC SIMULATION OF FREE-ELECTRON LASER AMPLIFIER PHYSICS VIA THE LORENTZ-BOOSTED FRAME APPROACH},
author = {Fawley, William M and Vay, Jean-Luc},
journal = {},
abstractNote = {Numerical simulation of some systems containing charged particles with highly relativistic directed motion can by speeded up by orders of magnitude by choice of the proper Lorentz-boosted frame[1]. A particularly good example is that of short wavelength free-electron lasers (FELs) in which a high energy electron beam interacts with a static magnetic undulator. In the optimal boost frame with Lorentz factor gamma_F , the red-shifted FEL radiation and blue shifted undulator have identical wavelengths and the number of required time-steps (presuming the Courant condition applies) decreases by a factor of 2(gamma_F)**2 for fully electromagnetic simulation. We have adapted the WARP code [2]to apply this method to several FEL problems involving coherent spontaneous emission (CSE) from pre-bunched ebeams, including that in a biharmonic undulator.},
url = {https://www.osti.gov/biblio/964405},
place = {United States},
Expand Down
2 changes: 1 addition & 1 deletion Docs/source/theory/multiphysics/collisions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ The process is also the same as for elastic scattering except the excitation ene
Benchmarks
----------

See the :ref:`MCC example <examples-mcc-turner>` for a benchmark of the MCC
See the :ref:`MCC example <examples-capacitive-discharge>` for a benchmark of the MCC
implementation against literature results.

Particle cooling due to elastic collisions
Expand Down
8 changes: 0 additions & 8 deletions Docs/source/usage/examples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -65,14 +65,6 @@ Microelectronics
* `ARTEMIS manual <https://artemis-em.readthedocs.io>`__


Nuclear Fusion
--------------

.. note::

TODO


Fundamental Plasma Physics
--------------------------

Expand Down
1 change: 1 addition & 0 deletions Docs/source/usage/examples/thomson_parabola_spectrometer
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True):
diag_particle_list = ["weighting", "position", "momentum"]
coarse_btd_end = int((L_plasma_bulk + 0.001 + stage_spacing * (N_stage - 1)) * 100000)
stage_end_snapshots = [
f"{int((L_plasma_bulk+stage_spacing*ii)*100000)}:{int((L_plasma_bulk+stage_spacing*ii)*100000+50)}:5"
f"{int((L_plasma_bulk + stage_spacing * ii) * 100000)}:{int((L_plasma_bulk + stage_spacing * ii) * 100000 + 50)}:5"
for ii in range(1)
]
btd_particle_diag = picmi.LabFrameParticleDiagnostic(
Expand Down
2 changes: 1 addition & 1 deletion Docs/source/usage/workflows/ml_materials/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def test_dataset(model, test_source, test_target, loss_fun):
)
# Manual: Training loop END
t4 = time.time()
print(f"total training time: {t4-t3:.3f}s")
print(f"total training time: {t4 - t3:.3f}s")

######### save model #########

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ def run_sim(self):
assert hasattr(self.solver, "phi")

if libwarpx.amr.ParallelDescriptor.MyProc() == 0:
np.save(f"ion_density_case_{self.n+1}.npy", self.ion_density_array)
np.save(f"ion_density_case_{self.n + 1}.npy", self.ion_density_array)

# query the particle z-coordinates if this is run during CI testing
# to cover that functionality
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,15 +20,15 @@
# this is in C (Python) order; r is the fastest varying index
(Nm, Nz, Nr) = jt.shape

assert (
Nm == 3
), "Wrong number of angular modes stored or possible incorrect ordering when flushed"
assert (
Nr == 64
), "Wrong number of radial points stored or possible incorrect ordering when flushed"
assert (
Nz == 512
), "Wrong number of z points stored or possible incorrect ordering when flushed"
assert Nm == 3, (
"Wrong number of angular modes stored or possible incorrect ordering when flushed"
)
assert Nr == 64, (
"Wrong number of radial points stored or possible incorrect ordering when flushed"
)
assert Nz == 512, (
"Wrong number of z points stored or possible incorrect ordering when flushed"
)

assert ii.meshes["part_per_grid"][io.Mesh_Record_Component.SCALAR].shape == [
512,
Expand Down Expand Up @@ -60,6 +60,6 @@
electron_meanz = np.sum(np.dot(zlist, rhoe0)) / np.sum(rhoe0)
beam_meanz = np.sum(np.dot(zlist, rhob0)) / np.sum(rhob0)

assert (
(electron_meanz > 0) and (beam_meanz < 0)
), "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_<species> diagnostics?"
assert (electron_meanz > 0) and (beam_meanz < 0), (
"problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_<species> diagnostics?"
)
4 changes: 2 additions & 2 deletions Examples/Physics_applications/laser_ion/plot_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def visualize_density_iteration(ts, iteration, out_dir):
for ax in axs[:-1]:
ax.set_xticklabels([])
axs[2].set_xlabel(r"$z$ ($\mu$m)")
fig.suptitle(f"Iteration: {it}, Time: {time/1e-15:.1f} fs")
fig.suptitle(f"Iteration: {it}, Time: {time / 1e-15:.1f} fs")

plt.tight_layout()

Expand Down Expand Up @@ -190,7 +190,7 @@ def visualize_field_iteration(ts, iteration, out_dir):
for ax in axs[:-1]:
ax.set_xticklabels([])
axs[2].set_xlabel(r"$z$ ($\mu$m)")
fig.suptitle(f"Iteration: {it}, Time: {time/1e-15:.1f} fs")
fig.suptitle(f"Iteration: {it}, Time: {time / 1e-15:.1f} fs")

plt.tight_layout()

Expand Down
6 changes: 3 additions & 3 deletions Examples/Physics_applications/spacecraft_charging/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,6 @@ def func(x, v0, tau):
print("percentage error for v0 = " + str(diff_v0 * 100) + "%")
print("percentage error for tau = " + str(diff_tau * 100) + "%")

assert (diff_v0 < tolerance_v0) and (
diff_tau < tolerance_tau
), "Test spacecraft_charging did not pass"
assert (diff_v0 < tolerance_v0) and (diff_tau < tolerance_tau), (
"Test spacecraft_charging did not pass"
)
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ The PICMI input file is not available for this example yet.

For `MPI-parallel <https://www.mpi-forum.org>`__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system.

.. literalinclude:: inputs
.. literalinclude:: inputs_test_3d_thomson_parabola_spectrometer
:language: ini
:caption: You can copy this file from ``Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer``.

Expand Down
6 changes: 4 additions & 2 deletions Examples/Tests/accelerator_lattice/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,9 +118,11 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength):
xx = xx + dt * vx

# Compare the analytic to the simulated final values
print(f"Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.01")
print(
f"Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002"
f"Error in x position is {abs(np.abs((xx - xx_sim) / xx))}, which should be < 0.01"
)
print(
f"Error in x velocity is {abs(np.abs((ux - ux_sim) / ux))}, which should be < 0.002"
)

assert abs(np.abs((xx - xx_sim) / xx)) < 0.01, Exception("error in x particle position")
Expand Down
12 changes: 6 additions & 6 deletions Examples/Tests/boundaries/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,9 @@ def do_periodic(x):
assert len(a_id) == 1, "Absorbing particles not absorbed"
assert np.all(vx == -vx0), "Reflecting particle velocity not correct"
assert np.all(vz == +vz0), "Periodic particle velocity not correct"
assert np.all(
np.abs((xx - xxa) / xx) < 1.0e-15
), "Reflecting particle position not correct"
assert np.all(
np.abs((zz - zza) / zz) < 1.0e-15
), "Periodic particle position not correct"
assert np.all(np.abs((xx - xxa) / xx) < 1.0e-15), (
"Reflecting particle position not correct"
)
assert np.all(np.abs((zz - zza) / zz) < 1.0e-15), (
"Periodic particle position not correct"
)
Loading

0 comments on commit c180e20

Please sign in to comment.