diff --git a/.ci/azure/docs.yml b/.ci/azure/docs.yml index d2b976d8b5..1c5a982e7b 100644 --- a/.ci/azure/docs.yml +++ b/.ci/azure/docs.yml @@ -5,7 +5,7 @@ jobs: pool: vmImage: ubuntu-latest variables: - python.version: "3.10" + python.version: "3.11" timeoutInMinutes: 240 steps: # Checkout simpeg repo. diff --git a/.ci/azure/pypi.yml b/.ci/azure/pypi.yml index c6eaa93dad..237f5c4b1d 100644 --- a/.ci/azure/pypi.yml +++ b/.ci/azure/pypi.yml @@ -12,7 +12,7 @@ jobs: - task: UsePythonVersion@0 inputs: - versionSpec: "3.10" + versionSpec: "3.11" displayName: "Setup Python" - bash: | @@ -57,7 +57,7 @@ jobs: - task: UsePythonVersion@0 inputs: - versionSpec: "3.10" + versionSpec: "3.11" displayName: "Setup Python" - bash: | diff --git a/.ci/azure/setup_env.sh b/.ci/azure/setup_env.sh index 8d873a601d..a23942d3d5 100755 --- a/.ci/azure/setup_env.sh +++ b/.ci/azure/setup_env.sh @@ -2,10 +2,17 @@ set -ex #echo on and exit if any line fails # TF_BUILD is set to True on azure pipelines. -is_azure=$(${TF_BUILD:-false} | tr '[:upper:]' '[:lower:]') +is_azure=$(echo "${TF_BUILD:-false}" | tr '[:upper:]' '[:lower:]') if ${is_azure} then + # Add conda-forge as channel + conda config --add channels conda-forge + # Remove defaults channels + # (both from ~/.condarc and from /usr/share/miniconda/.condarc) + conda config --remove channels defaults + conda config --remove channels defaults --system + # Update conda conda update --yes -n base conda fi @@ -30,4 +37,4 @@ echo "Conda Environment:" conda list echo "Installed SimPEG version:" -python -c "import simpeg; print(simpeg.__version__)" \ No newline at end of file +python -c "import simpeg; print(simpeg.__version__)" diff --git a/.ci/azure/test.yml b/.ci/azure/test.yml index fe6bfebe2b..403a4d69f6 100644 --- a/.ci/azure/test.yml +++ b/.ci/azure/test.yml @@ -1,6 +1,6 @@ parameters: os : ['ubuntu-latest'] - py_vers: ['3.10'] + py_vers: ['3.11'] test: ['tests/em', 'tests/base tests/flow tests/seis tests/utils', 'tests/meta', diff --git a/.ci/environment_test.yml b/.ci/environment_test.yml index e1ca51a0a4..42401cd9ab 100644 --- a/.ci/environment_test.yml +++ b/.ci/environment_test.yml @@ -3,12 +3,13 @@ channels: - conda-forge dependencies: - numpy>=1.22 - - scipy>=1.8 + - scipy>=1.12 - pymatsolver>=0.3 - matplotlib-base - discretize>=0.11 - geoana>=0.7 - libdlf + - typing_extensions # optional dependencies - dask @@ -24,6 +25,8 @@ dependencies: - sphinx - sphinx-gallery>=0.1.13 - sphinxcontrib-apidoc + - sphinx-reredirects + - sphinx-design - pydata-sphinx-theme - nbsphinx - numpydoc @@ -43,4 +46,4 @@ dependencies: # PyPI uploading - wheel - twine - - build + - python-build diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 130f534ba2..4887a6e685 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -1,7 +1,7 @@ name: Bug report description: Report a bug in SimPEG. title: "BUG: " -labels: [Bug] +labels: ["bug"] body: - type: markdown @@ -46,6 +46,7 @@ body: description: > Please include the output from `simpeg.Report()` to describe your system for us. Paste the output from `from simpeg import Report; print(Report())` below. + render: shell validations: required: true diff --git a/.github/zizmor.yml b/.github/zizmor.yml new file mode 100644 index 0000000000..b506949a16 --- /dev/null +++ b/.github/zizmor.yml @@ -0,0 +1,17 @@ +# Zizmor configuration +# -------------------- +# +# This file configures zizmor. This is not a workflow that gets run in GitHub +# Actions. +# +# References: https://woodruffw.github.io/zizmor/configuration + +rules: + unpinned-uses: + config: + policies: + # Mimic default behaviour: official actions can get pinned by tag. + actions/*: ref-pin + # Allow to use tags to pin reviewdog actions. + reviewdog/action-black: ref-pin + reviewdog/action-flake8: ref-pin diff --git a/.gitignore b/.gitignore index 53545d898d..aaddeda18b 100644 --- a/.gitignore +++ b/.gitignore @@ -47,8 +47,8 @@ docs/_build/ docs/warnings.txt docs/content/api/generated/* .DS_Store -docs/content/examples/* -docs/content/tutorials/* +docs/content/user-guide/examples/* +docs/content/user-guide/tutorials/* docs/modules/* docs/sg_execution_times.rst .vscode/* diff --git a/CITATION.rst b/CITATION.rst index b0b2a76ff7..dcd90a96f4 100644 --- a/CITATION.rst +++ b/CITATION.rst @@ -1,13 +1,17 @@ +============= Citing SimPEG -------------- +============= -There is a `paper about SimPEG `_, if you use this code, please help our scientific visibility by citing our work! +There is a paper about SimPEG! If you are using this library in your research, +we greatly appreciate that the software gets cited! - Cockett, R., Kang, S., Heagy, L. J., Pidlisecky, A., & Oldenburg, D. W. (2015). SimPEG: An open source framework for simulation and gradient based parameter estimation in geophysical applications. Computers & Geosciences. + Cockett, R., Kang, S., Heagy, L. J., Pidlisecky, A., & Oldenburg, D. W. + (2015). SimPEG: An open source framework for simulation and gradient based + parameter estimation in geophysical applications. Computers & Geosciences. -BibTex: +Here is a Bibtex entry to make things easier if you’re using Latex: .. code:: @@ -19,3 +23,24 @@ BibTex: publisher={Elsevier} } +Electromagnetics +---------------- + +If you are using the :mod:`simpeg.electromagnetics` module, please also cite: + + Lindsey J. Heagy, Rowan Cockett, Seogi Kang, Gudni K. Rosenkjaer, Douglas W. Oldenburg, A framework for simulation and inversion in electromagnetics, Computers & Geosciences, Volume 107, 2017, Pages 1-19, ISSN 0098-3004, http://dx.doi.org/10.1016/j.cageo.2017.06.018. + +Here is a Bibtex entry to make things easier if you’re using Latex: + +.. code:: + + @article{heagy2017, + title={A framework for simulation and inversion in electromagnetics}, + author={Lindsey J. Heagy and Rowan Cockett and Seogi Kang and Gudni K. Rosenkjaer and Douglas W. Oldenburg}, + journal={Computers & Geosciences}, + volume={107}, + pages={1 - 19}, + year={2017}, + issn={0098-3004}, + doi={http://dx.doi.org/10.1016/j.cageo.2017.06.018} + } diff --git a/README.rst b/README.rst index 54041d24e3..8875b3a6c5 100644 --- a/README.rst +++ b/README.rst @@ -48,7 +48,7 @@ The vision is to create a package for finite volume simulation with applications You are welcome to join our forum and engage with people who use and develop SimPEG at: https://simpeg.discourse.group/. -Weekly meetings are open to all. They are generally held on Wednesdays at 10:30am PDT. Please see the calendar (`GCAL `_, `ICAL `_) for information on the next meeting. +Weekly meetings are open to all. They are generally held on Wednesdays at 3:00 PM Pacific Time. Please see the calendar (`GCAL `_, `ICAL `_) for information on the next meeting. Overview Video ============== @@ -120,9 +120,8 @@ Meetings SimPEG hosts weekly meetings for users to interact with each other, for developers to discuss upcoming changes to the code base, and for discussing topics related to geophysics in general. -Currently our meetings are held every Wednesday, alternating between -a mornings (10:30 am pacific time) and afternoons (3:00 pm pacific time) -on even numbered Wednesdays. Find more info on our +Currently our meetings are held every Wednesday at 3:00 PM Pacific Time. +Find more info on our `Mattermost `_. diff --git a/docs/Makefile b/docs/Makefile index 3751f949e1..cf1d326cfb 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -43,8 +43,8 @@ help: clean: rm -rf $(BUILDDIR)/* rm -rf content/api/generated/ - rm -rf content/examples/ - rm -rf content/tutorials/ + rm -rf content/user-guide/examples/ + rm -rf content/user-guide/tutorials/ html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html -j auto diff --git a/docs/_static/versions.json b/docs/_static/versions.json index 665a00a498..b35427db2c 100644 --- a/docs/_static/versions.json +++ b/docs/_static/versions.json @@ -4,11 +4,16 @@ "url": "https://docs.simpeg.xyz/dev/" }, { - "name": "v0.23.0 (latest)", - "version": "0.23.0", - "url": "https://docs.simpeg.xyz/v0.23.0/", + "name": "v0.24.0 (latest)", + "version": "0.24.0", + "url": "https://docs.simpeg.xyz/v0.24.0/", "preferred": true }, + { + "name": "v0.23.0", + "version": "0.23.0", + "url": "https://docs.simpeg.xyz/v0.23.0/" + }, { "name": "v0.22.2", "version": "0.22.2", diff --git a/docs/conf.py b/docs/conf.py index 5b7c5c9175..f5a72fcf30 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -10,7 +10,7 @@ # # All configuration values have a default; values that are commented out # serve to show the default. - +from pathlib import Path import sys import os from sphinx_gallery.sorting import FileNameSortKey @@ -44,11 +44,13 @@ "sphinx.ext.coverage", "sphinx.ext.doctest", "sphinx.ext.extlinks", + "sphinx_design", "sphinx.ext.intersphinx", "sphinx.ext.mathjax", "sphinx_gallery.gen_gallery", "sphinx.ext.todo", "matplotlib.sphinxext.plot_directive", + "sphinx_reredirects", ] # Autosummary pages will be generated by sphinx-autogen instead of sphinx-build @@ -98,8 +100,8 @@ linkcheck_ignore = [ r"https://github.com/simpeg/simpeg*", - "/content/examples/*", - "/content/tutorials/*", + "/content/user-guide/examples/*", + "/content/user-guide/tutorials/*", r"https://www.pardiso-project.org", r"https://docs.github.com/*", # GJI refuses the connexion during the check @@ -296,6 +298,7 @@ def linkcode_resolve(domain, info): "json_url": "https://docs.simpeg.xyz/latest/_static/versions.json", }, "show_version_warning_banner": True, + "navigation_with_keys": True, } html_logo = "images/simpeg-logo.png" @@ -464,7 +467,9 @@ def linkcode_resolve(domain, info): ] tutorial_dirs = glob.glob("../tutorials/[!_]*") -tut_gallery_dirs = ["content/tutorials/" + os.path.basename(f) for f in tutorial_dirs] +tut_gallery_dirs = [ + "content/user-guide/tutorials/" + os.path.basename(f) for f in tutorial_dirs +] # Scaping images to generate on website from plotly.io._sg_scraper import plotly_sg_scraper @@ -475,7 +480,7 @@ def linkcode_resolve(domain, info): sphinx_gallery_conf = { # path to your examples scripts "examples_dirs": ["../examples"] + tutorial_dirs, - "gallery_dirs": ["content/examples"] + tut_gallery_dirs, + "gallery_dirs": ["content/user-guide/examples"] + tut_gallery_dirs, "within_subsection_order": FileNameSortKey, "filename_pattern": "\.py", "backreferences_dir": "content/api/generated/backreferences", @@ -531,3 +536,51 @@ def linkcode_resolve(domain, info): ("py:class", "builtins.complex"), ("py:meth", "__call__"), ] + + +# Configure redirects +# ------------------- +# Redirect some pages to support old links +OLD_FILES_FNAME = Path(__file__).parent.resolve() / "old-docs-files.txt" +MAPS = { + "content/tutorials": "content/user-guide/tutorials", + "content/examples": "content/user-guide/examples", + "content/getting_started": "content/user-guide/getting-started", +} +IGNORE = ["content/getting_started/index.html", "content/user_guide.html"] + + +def _get_source_target(old_fname: str) -> tuple[str, str]: + for old_dir, new_dir in MAPS.items(): + if old_fname.startswith(old_dir): + source = old_fname.removesuffix(".html") + n_parents = len([p for p in Path(old_fname).parents if p != Path(".")]) + target = "../" * n_parents + old_fname.replace(old_dir, new_dir, 1) + return source, target + raise ValueError() + + +def build_redirects(): + """ + Build redirects dictionary for sphinx-reredirects. + """ + redirects = {} + with OLD_FILES_FNAME.open(mode="r") as f: + for line in f: + if line.startswith("#"): + continue + old_fname = line.strip() + if old_fname in IGNORE: + continue + source, target = _get_source_target(old_fname) + redirects[source] = target + return redirects + + +redirects = build_redirects() +redirects.update( + { + "content/getting_started/index": "../../content/user-guide/index.html", + "content/user_guide": "../content/user-guide/index.html", + } +) diff --git a/docs/content/api/index.rst b/docs/content/api/index.rst index e401b4422d..a2de68f773 100644 --- a/docs/content/api/index.rst +++ b/docs/content/api/index.rst @@ -32,6 +32,15 @@ Regularizations simpeg.regularization +Optimizers +---------- +Optimizers used within SimPEG inversions. + +.. toctree:: + :maxdepth: 2 + + simpeg.optimization + Directives ---------- .. toctree:: @@ -41,7 +50,6 @@ Directives Utilities --------- - Classes and functions for performing useful operations. .. toctree:: diff --git a/docs/content/api/simpeg.optimization.rst b/docs/content/api/simpeg.optimization.rst new file mode 100644 index 0000000000..822bee4abe --- /dev/null +++ b/docs/content/api/simpeg.optimization.rst @@ -0,0 +1 @@ +.. automodule:: simpeg.optimization \ No newline at end of file diff --git a/docs/content/getting_started/index.rst b/docs/content/getting_started/index.rst deleted file mode 100644 index dfef8b8d96..0000000000 --- a/docs/content/getting_started/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _getting_started: - -=============== -Getting Started -=============== - -Here you'll find instructions on getting up and running with SimPEG. - -.. toctree:: - :maxdepth: 2 - - big_picture - installing - contributing/index.rst diff --git a/docs/content/release/0.24.0-notes.rst b/docs/content/release/0.24.0-notes.rst new file mode 100644 index 0000000000..3e24893612 --- /dev/null +++ b/docs/content/release/0.24.0-notes.rst @@ -0,0 +1,232 @@ +.. _0.24.0_notes: + +=========================== +SimPEG 0.24.0 Release Notes +=========================== + +April 24th, 2025 + +.. contents:: Highlights + :depth: 3 + +Updates +======= + +New features +------------ + +Speed up of dot products involved in PGI +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This release includes optimizations of some dot products carried out in the +:class:`~simpeg.regularization.pgi.PGIsmallness`. They significantly reduce the +computation time of Petrophysically and Geologically Guided Inversions (PGI). + +Specifically, these changes optimize the dot products involved when evaluating +the regularization function itself and its derivatives. The optimization takes +advantage of the :func:`numpy.einsum` function. + +See https://github.com/simpeg/simpeg/pull/1587 and +https://github.com/simpeg/simpeg/pull/1588 for more information. + + +Potential field sensitivity matrices as Linear Operators +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The gravity and magnetic field simulations are now capable of building the +sensitivity matrix ``G`` as a SciPy +:class:`~scipy.sparse.linalg.LinearOperator` object when the +``store_sensitivities`` argument is set to ``"forward_only"``. +The :class:`~scipy.sparse.linalg.LinearOperator` objects +can be used to compute the dot product with any vector (``G +@ v``), or the dot product of their transpose (``G.T @ v``) as if they were +arrays, although the dense matrix is never fully built nor allocated in memory. +Instead, the forward computation is carried out whenever a dot product is +requested. + +This change allows to compute the simulation derivatives without requiring +large amount of memory to store large sensitivity matrices, enabling users to +run inversions of large models where the sensitivity matrix is larger than the +available memory. + +Using methods like +:meth:`~simpeg.potential_fields.gravity.Simulation3DIntegral.Jvec`, +:meth:`~simpeg.potential_fields.gravity.Simulation3DIntegral.Jtvec`, +and +:meth:`~simpeg.potential_fields.gravity.Simulation3DIntegral.getJtJdiag`, make +use of +:attr:`~simpeg.potential_fields.gravity.Simulation3DIntegral.G` +a linear operator when ``store_sensitivities="forward_only"``. +Meanwhile, the +:meth:`~simpeg.potential_fields.gravity.Simulation3DIntegral.getJ` +method returns a composite +:class:`~scipy.sparse.linalg.LinearOperator` object that can also be used to +compute dot products with any vector. + +See https://github.com/simpeg/simpeg/pull/1622 and +https://github.com/simpeg/simpeg/pull/1634 for more information. + +Move indexing of arrays from :class:`simpeg.data.Data` to Surveys +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We moved the indexing capabilities of the :class:`~simpeg.data.Data` objects to +the different ``Survey`` objects. This is useful in case we have some data as +a flat array that is related to a particular survey (or combination of sources +and receivers), and we want to obtain the data values associated to +a particular pair of source and receiver. + +With this change, we don't need to define a new :class:`~simpeg.data.Data` +object to slice an array, we can use the ``Survey`` itself. +For example, let's say we have a survey with two sources, and three receivers +each: + +.. code:: python + + receivers_a = [Recevier([[-2, 0]]), Recevier([[0, 0]]), Recevier([[2, 0]])] + source_a = Source(receiver_list=receivers_a) + receivers_b = [Recevier([[3, 1]]), Recevier([[4, 1]]), Recevier([[5, 1]])] + source_b = Source(receiver_list=receivers_b) + survey = Survey(source_list=[source_a, source_b]) + +And we have a ``dobs`` array that corresponds to this survey. We can obtain the +values of the ``dobs`` array associated with the second receiver and the first +source by using the ``get_slice`` method to obtain a ``slice`` object, and then +use it to index the ``dobs`` array: + +.. code:: python + + slice_obj = survey.get_slice(source_a, receivers_a[1]) + dobs_slice = dobs[slice_obj] + +See https://github.com/simpeg/simpeg/pull/1616 and +https://github.com/simpeg/simpeg/pull/1632 for more information. + +Documentation +------------- + +The documentation pages have been reorganized, merging the _Getting Started_ +section into the :ref:`User Guide `. +This change makes it easier to navigate through the different documentation +pages, with the assistance of a table of contents on the side. + +We updated the :ref:`installation instructions `, with `Miniforge +`_ as the recommended Python +distribution. + +We have also improved the documentation of some classes and methods. + + +Bugfixes +-------- + +This release includes a list of bug fixes. We solved issues related to the +``getJ`` method of the DC, SIP, TDEM, and FDEM simulations. The EM1D +simulations can now work with receivers objects with multiple locations. +The :class:`~simpeg.data_misfit.BaseDataMisfit` class and its children raise errors in case the +simulation is retuning non-numeric values as output. + +We have also improved some of the error messages that users get when things +don't work as expected, aiming to catch those mistakes earlier than late. + +Contributors +============ + +Contributors + +- `@ghwilliams `__ +- `@jcapriot `__ +- `@johnweis0480 `__ +- `@lheagy `__ +- `@santisoler `__ + + +Pull Requests +============= + +- Bugfix for TDEM magnetic dipole sources by `@lheagy `__ in + https://github.com/simpeg/simpeg/pull/1572 +- Fix ubcstyle printout by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1577 +- Add docstring to ``n_processes`` in potential field simulations by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1578 +- Move simulation solver from base simulation to PDE simulation by + `@jcapriot `__ in https://github.com/simpeg/simpeg/pull/1582 +- Update and fix instructions to build the docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1583 +- Change location of ``mesh`` attribute by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1585 +- Speed up most commonly used deriv/deriv2 in PGI by `@johnweis0480 `__ in + https://github.com/simpeg/simpeg/pull/1587 +- Improve dot products in ``PGIsmallness.__call__`` and update docstring + by `@johnweis0480 `__ in https://github.com/simpeg/simpeg/pull/1588 +- Rename delete on model update by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1589 +- update PGI Example plotting script for deprecated collections by + `@jcapriot `__ in https://github.com/simpeg/simpeg/pull/1595 +- Coverage upload on failed test by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1596 +- Use zizmor to lint GitHub Actions workflows by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1592 +- Update installation instructions in docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1597 +- Set ``permissions`` in Actions to avoid zizmor’s + ``excessive-permissions`` by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1602 +- Fix for removed quadrature function on new scipy versions by `@jcapriot `__ + in https://github.com/simpeg/simpeg/pull/1603 +- Install zizmor through conda-forge in ``environment.yml`` by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1600 +- Raise errors if dpred in ``BaseDataMisfit`` has nans by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1615 +- Update Black’s Python versions in ``pyproject.toml`` by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1620 +- Use shell rendering in Bug report template by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1612 +- Merge Getting Started and Examples into User Guide by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1619 +- Fix usage of “bug” label in bug report template by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1624 +- Fix redirects links in docs by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1623 +- Fix bug on ``getJ`` of gravity simulation by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1621 +- Fix redirect to user guide index page by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1627 +- Move indexing of flat arrays to Survey classes by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1616 +- Replace Data indexing for Survey slicing where needed by `@santisoler `__ + in https://github.com/simpeg/simpeg/pull/1632 +- Implement ``G`` matrix as ``LinearOperator`` in gravity simulation by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1622 +- Set maximum number of iterations in eq sources tests by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1636 +- Em1d multiple rx locs by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1637 +- Fix definition of model in gravity J-related tests by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1643 +- Improve docstring of dip_azimuth2cartesian by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1642 +- Improve variable names in gravity test by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1641 +- Test transpose of gravity getJ as linear operator by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1644 +- Configure zizmor to pin reviewdog actions with tags by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1650 +- Deprecate ``components`` in potential field surveys by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1633 +- Fix bug on magnetic simulation ``nD`` property by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1646 +- Make pytest error on random seeded test by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1598 +- Add support for potential fields survey indexing by `@jcapriot `__ in + https://github.com/simpeg/simpeg/pull/1635 +- Implement magnetic G as linear operator by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1634 +- Use Numpy’s RNG in tests for depth weighting by `@santisoler `__ in + https://github.com/simpeg/simpeg/pull/1570 +- Raise NotImplementedError on getJ for NSEM 1D simulations by + `@santisoler `__ in https://github.com/simpeg/simpeg/pull/1653 +- Set the model when calling ``getJ`` in DC and SIP simulations by + `@lheagy `__ in https://github.com/simpeg/simpeg/pull/1361 +- Fix ``getJ`` method in TDEM and FDEM 1D simulations by `@ghwilliams `__ in + https://github.com/simpeg/simpeg/pull/1638 diff --git a/docs/content/release/index.rst b/docs/content/release/index.rst index bb9bef3492..b99e649f57 100644 --- a/docs/content/release/index.rst +++ b/docs/content/release/index.rst @@ -1,3 +1,5 @@ +.. _release_notes: + ************* Release Notes ************* @@ -5,6 +7,7 @@ Release Notes .. toctree:: :maxdepth: 2 + 0.24.0 <0.24.0-notes> 0.23.0 <0.23.0-notes> 0.22.2 <0.22.2-notes> 0.22.1 <0.22.1-notes> diff --git a/docs/content/user-guide/getting-started/about-simpeg.rst b/docs/content/user-guide/getting-started/about-simpeg.rst new file mode 100644 index 0000000000..225122ca43 --- /dev/null +++ b/docs/content/user-guide/getting-started/about-simpeg.rst @@ -0,0 +1,44 @@ +.. _about_simpeg: + +============== +What's SimPEG? +============== + +SimPEG (Simulation and Parameter Estimation in Geophysics) is a +**Python library** for simulations, inversions and parameter estimation for +geophysical applications. + +The vision is to create a package for finite volume simulation with +applications to geophysical imaging and subsurface flow. To enable the +understanding of the many different components, this package has the following +features: + +* Modular with respect to the spacial discretization, optimization routine, and + geophysical problem. +* Built with the inverse problem in mind. +* Provides a framework for geophysical and hydrogeologic problems. +* Supports 1D, 2D and 3D problems. +* Designed for large-scale inversions. + +Overview Talk at SciPy 2016 +--------------------------- + +.. raw:: html + + diff --git a/docs/content/getting_started/big_picture.rst b/docs/content/user-guide/getting-started/big_picture.rst similarity index 97% rename from docs/content/getting_started/big_picture.rst rename to docs/content/user-guide/getting-started/big_picture.rst index f8060f0bb6..8a152f7b5a 100644 --- a/docs/content/getting_started/big_picture.rst +++ b/docs/content/user-guide/getting-started/big_picture.rst @@ -67,7 +67,7 @@ implementation is a model, which, prior to interpretation, must be evaluated. This requires considering, and often re-assessing, the choices and assumptions made in both the input and implementation stages. -.. image:: ../../images/InversionWorkflow-PreSimPEG.png +.. image:: ../../../images/InversionWorkflow-PreSimPEG.png :width: 400 px :alt: Components :align: center @@ -86,7 +86,7 @@ of inversions into various units. We present it in this specific modular style, as each unit contains a targeted subset of choices crucial to the inversion process. -.. image:: ../../images/InversionWorkflow.png +.. image:: ../../../images/InversionWorkflow.png :width: 400 px :alt: Framework :align: center @@ -133,15 +133,7 @@ be exploited through inheritance of base classes, and differences can be expressed through subtype polymorphism. Please look at the documentation here for more in-depth information. - -.. include:: ../../../CITATION.rst - -Authors -------- - -.. include:: ../../../AUTHORS.rst - License ------- -.. include:: ../../../LICENSE +.. include:: ../../../../LICENSE diff --git a/docs/content/user-guide/getting-started/citing.rst b/docs/content/user-guide/getting-started/citing.rst new file mode 100644 index 0000000000..1ae6828364 --- /dev/null +++ b/docs/content/user-guide/getting-started/citing.rst @@ -0,0 +1,3 @@ +.. _citing: + +.. include:: ../../../../CITATION.rst diff --git a/docs/content/getting_started/contributing/advanced.rst b/docs/content/user-guide/getting-started/contributing/advanced.rst similarity index 100% rename from docs/content/getting_started/contributing/advanced.rst rename to docs/content/user-guide/getting-started/contributing/advanced.rst diff --git a/docs/content/getting_started/contributing/code-style.rst b/docs/content/user-guide/getting-started/contributing/code-style.rst similarity index 100% rename from docs/content/getting_started/contributing/code-style.rst rename to docs/content/user-guide/getting-started/contributing/code-style.rst diff --git a/docs/content/getting_started/contributing/documentation.rst b/docs/content/user-guide/getting-started/contributing/documentation.rst similarity index 100% rename from docs/content/getting_started/contributing/documentation.rst rename to docs/content/user-guide/getting-started/contributing/documentation.rst diff --git a/docs/content/getting_started/contributing/index.rst b/docs/content/user-guide/getting-started/contributing/index.rst similarity index 100% rename from docs/content/getting_started/contributing/index.rst rename to docs/content/user-guide/getting-started/contributing/index.rst diff --git a/docs/content/getting_started/contributing/pull-requests.rst b/docs/content/user-guide/getting-started/contributing/pull-requests.rst similarity index 100% rename from docs/content/getting_started/contributing/pull-requests.rst rename to docs/content/user-guide/getting-started/contributing/pull-requests.rst diff --git a/docs/content/getting_started/contributing/setting-up-environment.rst b/docs/content/user-guide/getting-started/contributing/setting-up-environment.rst similarity index 100% rename from docs/content/getting_started/contributing/setting-up-environment.rst rename to docs/content/user-guide/getting-started/contributing/setting-up-environment.rst diff --git a/docs/content/getting_started/contributing/testing.rst b/docs/content/user-guide/getting-started/contributing/testing.rst similarity index 100% rename from docs/content/getting_started/contributing/testing.rst rename to docs/content/user-guide/getting-started/contributing/testing.rst diff --git a/docs/content/getting_started/contributing/working-with-github.rst b/docs/content/user-guide/getting-started/contributing/working-with-github.rst similarity index 89% rename from docs/content/getting_started/contributing/working-with-github.rst rename to docs/content/user-guide/getting-started/contributing/working-with-github.rst index cb944eadd0..979cab63ef 100644 --- a/docs/content/getting_started/contributing/working-with-github.rst +++ b/docs/content/user-guide/getting-started/contributing/working-with-github.rst @@ -3,7 +3,7 @@ Working with Git and GitHub --------------------------- -.. image:: https://github.githubassets.com/images/modules/logos_page/Octocat.png +.. image:: https://octodex.github.com/images/original.png :align: right :width: 100 :target: https://github.com @@ -25,7 +25,7 @@ There are two ways you can clone a repository: 2. Using a desktop client such as SourceTree_ or GitKraken_. - .. image:: ../../../images/sourceTreeSimPEG.png + .. image:: ../../../../images/sourceTreeSimPEG.png :align: center :width: 400 :target: https://www.sourcetreeapp.com/ @@ -34,7 +34,7 @@ There are two ways you can clone a repository: it is also handy to set up the remote account so it remembers your github_ user name and password - .. image:: ../../../images/sourceTreeRemote.png + .. image:: ../../../../images/sourceTreeRemote.png :align: center :width: 400 diff --git a/docs/content/getting_started/installing.rst b/docs/content/user-guide/getting-started/installing.rst similarity index 98% rename from docs/content/getting_started/installing.rst rename to docs/content/user-guide/getting-started/installing.rst index a3bc1cac3b..f3233d4a76 100644 --- a/docs/content/getting_started/installing.rst +++ b/docs/content/user-guide/getting-started/installing.rst @@ -1,7 +1,8 @@ -.. _api_installing: +.. _installing: -Getting Started with SimPEG -*************************** +========== +Installing +========== .. _installing_python: diff --git a/docs/content/user-guide/getting-started/version-compatibility.rst b/docs/content/user-guide/getting-started/version-compatibility.rst new file mode 100644 index 0000000000..a4a65b625f --- /dev/null +++ b/docs/content/user-guide/getting-started/version-compatibility.rst @@ -0,0 +1,36 @@ +Version compatibility +===================== + +SimPEG follows the time-window based policy for support of Python and Numpy +versions introduced in `NEP29 +`_. In summary, SimPEG supports: + +- all minor versions of Python released in the **prior 42 months** before + a SimPEG release, and +- all minor versions of Numpy released in the **prior 24 months** before + a SimPEG release. + +We follow these guidelines conservatively, meaning that we might not drop +support for older versions of our dependencies if they are not causing any +issue. We include notes in the :ref:`release_notes` every time we drop support +for a Python or Numpy version. + + +Supported Python versions +------------------------- + +If you require support for older Python versions, please pin SimPEG to the +following releases to ensure compatibility: + + +.. list-table:: + :widths: 40 60 + + * - **Python version** + - **Last compatible release** + * - 3.8 + - 0.22.2 + * - 3.9 + - 0.22.2 + * - 3.10 + - 0.24.0 diff --git a/docs/content/user-guide/how-to-guide/choosing-solvers.rst b/docs/content/user-guide/how-to-guide/choosing-solvers.rst new file mode 100644 index 0000000000..b46c12da9e --- /dev/null +++ b/docs/content/user-guide/how-to-guide/choosing-solvers.rst @@ -0,0 +1,110 @@ +.. _choosing-solvers: + +================ +Choosing solvers +================ + +Several simulations available in SimPEG need to numerically solve a partial +differential equations system (PDE), such as +:class:`~simpeg.electromagnetics.static.resistivity.Simulation3DNodal` (DC) +:class:`~simpeg.electromagnetics.time_domain.Simulation3DMagneticFluxDensity` +(TDEM) +and +:class:`~simpeg.electromagnetics.frequency_domain.Simulation3DMagneticFluxDensity` +(FDEM). +A numerical solver is needed to solve the PDEs. +SimPEG can make use of the solvers available in :mod:`pymatsolver`, like +:class:`pymatsolver.Pardiso`, :class:`pymatsolver.Mumps` or +:class:`pymatsolver.SolverLU`. +The choice of an appropriate solver can affect the computation time required to +solve the PDE. Generally we recommend using direct solvers over iterative solvers +for SimPEG, but be aware that direct solvers have much larger memory requirements. + +The ``Pardiso`` solver wraps the `oneMKL PARDISO +`_ +solver available for x86_64 CPUs. + +The ``Mumps`` solver wraps `MUMPS +`_, a fast solver available for +all CPU brands, including Apple silicon architecture. + +The ``SolverLU`` wraps SciPy's :func:`scipy.sparse.linalg.splu`. The +performance of this solver is not up to the level of ``Mumps`` and ``Pardiso``. +Usage of the ``SolveLU`` is recommended only when it's not possible to use +other faster solvers. + + +The default solver +------------------ + +We can use :func:`simpeg.utils.get_default_solver` to obtain a reasonable default +solver available for our system: + +.. code:: python + + import simpeg + import simpeg.electromagnetics.static.resistivity as dc + + # Get default solver + solver = simpeg.utils.get_default_solver() + print(f"Solver: {solver}") + +which would print out on an x86_64 cpu: + +.. code:: + + Solver: + +We can then use this solver in a simulation: + +.. code:: python + + # Define a simple mesh + h = [(1.0, 10)] + mesh = discretize.TensorMesh([h, h, h], origin="CCC") + + # And a DC survey + receiver = dc.receivers.Dipole(locations_m=(-1, 0, 0), locations_n=(1, 0, 0)) + source = dc.sources.Dipole( + receiver_list=[receiver], location_a=(-2, 0, 0), location_b=(2, 0, 0) + ) + survey = dc.Survey([source]) + + # Use the default solver in the simulation + simulation = dc.Simulation3DNodal(mesh=mesh, survey=survey, solver=solver) + +.. note:: + + The priority list used to choose a default solver is: + + 1) ``Pardiso`` + 2) ``Mumps`` + 3) ``SolverLU`` + + +Setting solvers manually +------------------------ + +Alternatively, we can manually set a solver. For example, if we want to use +``Mumps`` in our DC resistivity simulation, we can import +:class:`pymatsolver.Mumps` and pass it to our simulation: + +.. code:: python + + import simpeg.electromagnetics.static.resistivity as dc + from pymatsolver import Mumps + + # Manually set Mumps as our solver + simulation = dc.Simulation3DNodal(mesh=mesh, survey=survey, solver=Mumps) + +.. note:: + + When sharing your notebook or script with a colleague, keep in mind that + your code might not work if ``Pardiso`` is not available in their system. + + For such scenarios, we recommend using the + :func:`simpeg.utils.get_default_solver` function, that will always return + a suitable solver for the current system. + +Ultimately, choosing the best solver is a mixture of the problem you are solving and your current system. Experiment with different solvers yourself to choose the best. + diff --git a/docs/content/user-guide/how-to-guide/move-mesh-to-survey.rst b/docs/content/user-guide/how-to-guide/move-mesh-to-survey.rst new file mode 100644 index 0000000000..754e209dde --- /dev/null +++ b/docs/content/user-guide/how-to-guide/move-mesh-to-survey.rst @@ -0,0 +1,202 @@ +============================ +Locating mesh on survey area +============================ + +The :mod:`discretize` package allows us to define 3D meshes that can be used +for running SimPEG's forward and inverse problems. +Mesh dimensions for :class:`discretize.TensorMesh` and +:class:`discretize.TreeMesh` are assumed to be in meters, and by default their +origin (the westmost-southmost-lowest point) is located in the origin of the +coordinate system (the ``(0, 0, 0)``). + +When working with real-world data, we want our mesh to be located around the +survey area. We can move our mesh location by by shifting its ``origin``. + +For example, suppose we want to invert some magnetic data from Osborne Mine in +Australia that spans in a region between 448353.0 m and 482422.0 m along the +easting, and between 7578158.0 m and 7594834.0 m along the northing +(UTM zone 54). +Let's also assume that the maximum topographic height of the area is 417 +m. + +We can build a mesh that spans 34 km on the easting, 17 km on the northing, and +5500 m vertically: + +.. code:: python + + import discretize + + dx, dy, dz = 200.0, 200.0, 100.0 + nx, ny, nz = 170, 85, 55 + hx, hy, hz = [(dx, nx)], [(dy, ny)], [(dz, nz)] + + mesh = discretize.TensorMesh([hx, hy, hz]) + print(mesh) + + +.. code:: + + TensorMesh: 794,750 cells + + MESH EXTENT CELL WIDTH FACTOR + dir nC min max min max max + --- --- --------------------------- ------------------ ------ + x 170 0.00 34,000.00 200.00 200.00 1.00 + y 85 0.00 17,000.00 200.00 200.00 1.00 + z 55 0.00 5,500.00 100.00 100.00 1.00 + + +The ``origin`` of this mesh is located on ``(0, 0, 0)``. We can move it to the +survey area by shifting it to (448353.0 m, 7578158.0 m, -5000 m): + +.. code:: python + + mesh.origin = (448353.0, 7578158.0, -5000) + print(mesh) + +.. code:: + + TensorMesh: 794,750 cells + + MESH EXTENT CELL WIDTH FACTOR + dir nC min max min max max + --- --- --------------------------- ------------------ ------ + x 170 448,353.00 482,353.00 200.00 200.00 1.00 + y 85 7,578,158.00 7,595,158.00 200.00 200.00 1.00 + z 55 -5,000.00 500.00 100.00 100.00 1.00 + + +By shifting the ``origin`` we are not changing the number of cells in the mesh +nor their dimensions. We are just moving the location of the mesh in the three +directions. + +.. note:: + + We shift the z coordinate of the origin to -5000 m so we leave 500 m above + the zeroth height to possibly account for topography. + + +.. tip:: + + Alternatively, we can set the ``origin`` when defining the mesh, by passing + it as an argument. For example: + + .. code:: python + + origin = (448353.0, 7578158.0, -5000) + mesh = discretize.TensorMesh([hx, hy, hz], origin=origin) + print(mesh) + + +Considering padding: simple case +-------------------------------- + +It's best practice to add some padding to the mesh when using it in an +inversion. The padding cells will allocate any potential body outside the +survey area, which effect might be present in the data. + +Let's take the previous example and build a mesh that has 3 km of padding +on each horizontal direction: + +.. code:: python + + hx = [(200.0, 15), (dx, nx), (200.0, 15)] + hy = [(200.0, 15), (dy, ny), (200.0, 15)] + hz = [(dz, nz)] + + mesh = discretize.TensorMesh([hx, hy, hz]) + print(mesh) + +.. code:: + + TensorMesh: 1,265,000 cells + + MESH EXTENT CELL WIDTH FACTOR + dir nC min max min max max + --- --- --------------------------- ------------------ ------ + x 200 0.00 40,000.00 200.00 200.00 1.00 + y 115 0.00 23,000.00 200.00 200.00 1.00 + z 55 0.00 5,500.00 100.00 100.00 1.00 + +Now we can shift the ``origin``, but we also need to take into account the +padding cells. We will set the origin to the westmost-southmost corner of the +survey minus the padding distance we added to the mesh (3km): + +.. code:: python + + mesh.origin = (448353.0 - 3000, 7578158.0 - 3000, -5000) + print(mesh) + +.. code:: + + TensorMesh: 1,265,000 cells + + MESH EXTENT CELL WIDTH FACTOR + dir nC min max min max max + --- --- --------------------------- ------------------ ------ + x 200 445,353.00 485,353.00 200.00 200.00 1.00 + y 115 7,575,158.00 7,598,158.00 200.00 200.00 1.00 + z 55 -5,000.00 500.00 100.00 100.00 1.00 + + +Considering padding: padding factor +----------------------------------- + +Alternatively, we can introduce padding through a *padding factor*. Instead of +creating padding cells of the same size, we can use the padding factor to +create padding cells that increase in volume as they move away from the survey +area. +This is the usual approach to add padding cells to +:class:`discretize.TensorMesh` since it reduces the amount of cells in the +mesh, making inversions less expensive. + +Following the previous example, let's add 7 cells to each side of the +horizontal directions. Let's make the first cells the same size of the ones in +the mesh, and then start increasing their size with a factor of 1.5: + +.. code:: python + + n_pad_cells = 7 + factor = 1.5 + + hx = [(dx, n_pad_cells, -factor), (dx, nx), (dx, n_pad_cells, factor)] + hy = [(dy, n_pad_cells, -factor), (dy, ny), (dy, n_pad_cells, factor)] + hz = [(dz, nz)] + + mesh = discretize.TensorMesh([hx, hy, hz]) + print(mesh) + +.. code:: + + TensorMesh: 1,001,880 cells + + MESH EXTENT CELL WIDTH FACTOR + dir nC min max min max max + --- --- --------------------------- ------------------ ------ + x 184 0.00 53,303.12 200.00 3,417.19 1.50 + y 99 0.00 36,303.12 200.00 3,417.19 1.50 + z 55 0.00 5,500.00 100.00 100.00 1.00 + + +As before, we need to consider the padding cells when shifting the ``origin`` +of the mesh. Since we know that we added 7 cells to each side, we can leverage +that by shifting the 7th node of the x and y axes to the westmost-southmost +corner of the survey: + +.. code:: python + + x_node_7th = mesh.nodes_x[n_pad_cells] + y_node_7th = mesh.nodes_y[n_pad_cells] + mesh.origin = (448353.0 - x_node_7th, 7578158.0 - y_node_7th, -5000) + print(mesh) + +.. code:: + + TensorMesh: 1,001,880 cells + + MESH EXTENT CELL WIDTH FACTOR + dir nC min max min max max + --- --- --------------------------- ------------------ ------ + x 184 438,701.44 492,004.56 200.00 3,417.19 1.50 + y 99 7,568,506.44 7,604,809.56 200.00 3,417.19 1.50 + z 55 -5,000.00 500.00 100.00 100.00 1.00 diff --git a/docs/content/user-guide/index.rst b/docs/content/user-guide/index.rst new file mode 100644 index 0000000000..8ca423fb58 --- /dev/null +++ b/docs/content/user-guide/index.rst @@ -0,0 +1,43 @@ +.. _user_guide: + +SimPEG User Guide +================= + +This guide is aimed to help users to get started with SimPEG and to learn how +to simulate physics and run inversions for different types of geophysical data. + +For details on the available classes and functions in SimPEG, please visit the +:ref:`api`. + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Getting Started + + getting-started/about-simpeg.rst + getting-started/big_picture + getting-started/installing + getting-started/contributing/index.rst + getting-started/citing.rst + getting-started/version-compatibility.rst + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: How to Guide + + how-to-guide/choosing-solvers + how-to-guide/move-mesh-to-survey.rst + +.. toctree:: + :glob: + :maxdepth: 1 + :caption: Tutorials + + tutorials/**/index + +.. toctree:: + :maxdepth: 2 + :caption: Examples + + examples/index diff --git a/docs/content/user_guide.rst b/docs/content/user_guide.rst deleted file mode 100644 index aeebe15070..0000000000 --- a/docs/content/user_guide.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _user_guide: - -========== -User Guide -========== - -We've included some tutorials and gallery examples that will walk you through using -discretize to solve your PDE. For more details on any of the functions, check out the -API documentation. - -Tutorials ---------- -.. toctree:: - :glob: - :maxdepth: 2 - - tutorials/**/index - -Examples --------- -.. toctree:: - :maxdepth: 1 - - examples/index diff --git a/docs/index.rst b/docs/index.rst index 7f742f6860..8769985ee9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,18 +1,90 @@ -.. include:: ../README.rst +:html_theme.sidebar_secondary.remove: true + +.. image:: ./images/simpeg-logo.png + :alt: SimPEG logo + +==================== +SimPEG Documentation +==================== + +Simulation and Parameter Estimation in Geophysics. +An open-source Python library for simulation, inversion and parameter +estimation for geophysical applications. + +**Useful links:** +:ref:`Install ` | +`GitHub Repository `_ | +`Bugs and Issues `_ | +`SimPEG website `_ | +`License `_ + +.. grid:: 1 2 1 2 + :margin: 5 5 0 0 + :padding: 0 0 0 0 + :gutter: 4 + + .. grid-item-card:: :fas:`book-open` User Guide + :text-align: center + :class-title: sd-fs-5 + :class-card: sd-p-3 + + Learn how to use SimPEG. + + .. button-ref:: user_guide + :ref-type: ref + :click-parent: + :color: primary + :shadow: + :expand: + + .. grid-item-card:: :fas:`book` User Tutorials + :text-align: center + :class-title: sd-fs-5 + :class-card: sd-p-3 + + Apply SimPEG to geophysical problems. + + .. button-link:: https://simpeg.xyz/user-tutorials + :click-parent: + :color: primary + :shadow: + :expand: + + Checkout the User Tutorials :octicon:`link-external` + + .. grid-item-card:: :fas:`code` API Reference + :text-align: center + :class-title: sd-fs-5 + :class-card: sd-p-3 + + A list of modules, classes and functions. + + .. button-ref:: api + :ref-type: ref + :color: primary + :shadow: + :expand: + + .. grid-item-card:: :fas:`comments` Contact + :text-align: center + :class-title: sd-fs-5 + :class-card: sd-p-3 + + Chat with the rest of the community. + + .. button-link:: https://mattermost.softwareunderground.org/simpeg + :click-parent: + :color: primary + :shadow: + :expand: + + Join our Mattermost channel :octicon:`link-external` .. toctree:: :maxdepth: 2 :hidden: :titlesonly: - content/getting_started/index - content/user_guide - content/api/index - content/release/index - -.. Project Index & Search -.. ====================== - -.. * :ref:`genindex` -.. * :ref:`modindex` -.. * :ref:`search` + User Guide + API Reference + Release Notes diff --git a/docs/old-docs-files.txt b/docs/old-docs-files.txt new file mode 100644 index 0000000000..9107882ac3 --- /dev/null +++ b/docs/old-docs-files.txt @@ -0,0 +1,154 @@ +# This file contains a list of old html files that got generated when building +# the docs (simpeg v0.23.0). The list is used to create sphinx-reredirects, so +# the old links to these files can be redirected to the new locations. +# See docs/conf.py for more details. +content/examples/01-maps/index.html +content/examples/01-maps/plot_block_in_layer.html +content/examples/01-maps/plot_combo.html +content/examples/01-maps/plot_layer.html +content/examples/01-maps/plot_mesh2mesh.html +content/examples/01-maps/plot_sumMap.html +content/examples/01-maps/sg_execution_times.html +content/examples/02-gravity/index.html +content/examples/02-gravity/plot_inv_grav_tiled.html +content/examples/02-gravity/sg_execution_times.html +content/examples/03-magnetics/index.html +content/examples/03-magnetics/plot_0_analytic.html +content/examples/03-magnetics/plot_inv_mag_MVI_Sparse_TreeMesh.html +content/examples/03-magnetics/plot_inv_mag_MVI_VectorAmplitude.html +content/examples/03-magnetics/plot_inv_mag_nonLinear_Amplitude.html +content/examples/03-magnetics/sg_execution_times.html +content/examples/04-dcip/index.html +content/examples/04-dcip/plot_dc_analytic.html +content/examples/04-dcip/plot_inv_dcip_dipoledipole_3Dinversion_twospheres.html +content/examples/04-dcip/plot_inv_dcip_dipoledipole_parametric_inversion.html +content/examples/04-dcip/plot_read_DC_data_with_IO_class.html +content/examples/04-dcip/sg_execution_times.html +content/examples/05-fdem/index.html +content/examples/05-fdem/plot_0_fdem_analytic.html +content/examples/05-fdem/plot_inv_fdem_loop_loop_2Dinversion.html +content/examples/05-fdem/sg_execution_times.html +content/examples/06-tdem/index.html +content/examples/06-tdem/plot_0_tdem_analytic.html +content/examples/06-tdem/plot_fwd_tdem_3d_model.html +content/examples/06-tdem/plot_fwd_tdem_inductive_src_permeable_target.html +content/examples/06-tdem/plot_fwd_tdem_waveforms.html +content/examples/06-tdem/plot_inv_tdem_1D.html +content/examples/06-tdem/plot_inv_tdem_1D_raw_waveform.html +content/examples/06-tdem/sg_execution_times.html +content/examples/07-nsem/index.html +content/examples/07-nsem/plot_fwd_nsem_MTTipper3D.html +content/examples/07-nsem/sg_execution_times.html +content/examples/08-vrm/index.html +content/examples/08-vrm/plot_fwd_vrm.html +content/examples/08-vrm/plot_inv_vrm_eq.html +content/examples/08-vrm/sg_execution_times.html +content/examples/09-flow/index.html +content/examples/09-flow/plot_fwd_flow_richards_1D.html +content/examples/09-flow/plot_inv_flow_richards_1D.html +content/examples/09-flow/sg_execution_times.html +content/examples/10-pgi/index.html +content/examples/10-pgi/plot_inv_0_PGI_Linear_1D.html +content/examples/10-pgi/plot_inv_1_PGI_Linear_1D_joint_WithRelationships.html +content/examples/10-pgi/sg_execution_times.html +content/examples/20-published/index.html +content/examples/20-published/plot_booky_1Dstitched_resolve_inv.html +content/examples/20-published/plot_booky_1D_time_freq_inv.html +content/examples/20-published/plot_effective_medium_theory.html +content/examples/20-published/plot_heagyetal2017_casing.html +content/examples/20-published/plot_heagyetal2017_cyl_inversions.html +content/examples/20-published/plot_laguna_del_maule_inversion.html +content/examples/20-published/plot_load_booky.html +content/examples/20-published/plot_richards_celia1990.html +content/examples/20-published/plot_schenkel_morrison_casing.html +content/examples/20-published/plot_tomo_joint_with_volume.html +content/examples/20-published/plot_vadose_vangenuchten.html +content/examples/20-published/sg_execution_times.html +content/examples/index.html +content/examples/sg_execution_times.html +content/getting_started/big_picture.html +content/getting_started/contributing/advanced.html +content/getting_started/contributing/code-style.html +content/getting_started/contributing/documentation.html +content/getting_started/contributing/index.html +content/getting_started/contributing/pull-requests.html +content/getting_started/contributing/setting-up-environment.html +content/getting_started/contributing/testing.html +content/getting_started/contributing/working-with-github.html +content/getting_started/index.html +content/getting_started/installing.html +content/tutorials/01-models_mapping/index.html +content/tutorials/01-models_mapping/plot_1_tensor_models.html +content/tutorials/01-models_mapping/plot_2_cyl_models.html +content/tutorials/01-models_mapping/plot_3_tree_models.html +content/tutorials/01-models_mapping/sg_execution_times.html +content/tutorials/02-linear_inversion/index.html +content/tutorials/02-linear_inversion/plot_inv_1_inversion_lsq.html +content/tutorials/02-linear_inversion/plot_inv_2_inversion_irls.html +content/tutorials/02-linear_inversion/sg_execution_times.html +content/tutorials/03-gravity/index.html +content/tutorials/03-gravity/plot_1a_gravity_anomaly.html +content/tutorials/03-gravity/plot_1b_gravity_gradiometry.html +content/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.html +content/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.html +content/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.html +content/tutorials/03-gravity/sg_execution_times.html +content/tutorials/04-magnetics/index.html +content/tutorials/04-magnetics/plot_2a_magnetics_induced.html +content/tutorials/04-magnetics/plot_2b_magnetics_mvi.html +content/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.html +content/tutorials/04-magnetics/sg_execution_times.html +content/tutorials/05-dcr/index.html +content/tutorials/05-dcr/plot_fwd_1_dcr_sounding.html +content/tutorials/05-dcr/plot_fwd_2_dcr2d.html +content/tutorials/05-dcr/plot_fwd_3_dcr3d.html +content/tutorials/05-dcr/plot_gen_3_3d_to_2d.html +content/tutorials/05-dcr/plot_inv_1_dcr_sounding.html +content/tutorials/05-dcr/plot_inv_1_dcr_sounding_irls.html +content/tutorials/05-dcr/plot_inv_1_dcr_sounding_parametric.html +content/tutorials/05-dcr/plot_inv_2_dcr2d.html +content/tutorials/05-dcr/plot_inv_2_dcr2d_irls.html +content/tutorials/05-dcr/plot_inv_3_dcr3d.html +content/tutorials/05-dcr/sg_execution_times.html +content/tutorials/06-ip/index.html +content/tutorials/06-ip/plot_fwd_2_dcip2d.html +content/tutorials/06-ip/plot_fwd_3_dcip3d.html +content/tutorials/06-ip/plot_inv_2_dcip2d.html +content/tutorials/06-ip/plot_inv_3_dcip3d.html +content/tutorials/06-ip/sg_execution_times.html +content/tutorials/07-fdem/index.html +content/tutorials/07-fdem/plot_fwd_1_em1dfm_dispersive.html +content/tutorials/07-fdem/plot_fwd_1_em1dfm.html +content/tutorials/07-fdem/plot_fwd_2_fem_cyl.html +content/tutorials/07-fdem/plot_fwd_3_fem_3d.html +content/tutorials/07-fdem/plot_inv_1_em1dfm.html +content/tutorials/07-fdem/sg_execution_times.html +content/tutorials/08-tdem/index.html +content/tutorials/08-tdem/plot_fwd_1_em1dtm_dispersive.html +content/tutorials/08-tdem/plot_fwd_1_em1dtm.html +content/tutorials/08-tdem/plot_fwd_1_em1dtm_waveforms.html +content/tutorials/08-tdem/plot_fwd_2_tem_cyl.html +content/tutorials/08-tdem/plot_fwd_3_tem_3d.html +content/tutorials/08-tdem/plot_inv_1_em1dtm.html +content/tutorials/08-tdem/sg_execution_times.html +content/tutorials/09-nsem/index.html +content/tutorials/09-nsem/sg_execution_times.html +content/tutorials/10-vrm/index.html +content/tutorials/10-vrm/plot_fwd_1_vrm_layer.html +content/tutorials/10-vrm/plot_fwd_2_vrm_topsoil.html +content/tutorials/10-vrm/plot_fwd_3_vrm_tem.html +content/tutorials/10-vrm/sg_execution_times.html +content/tutorials/11-flow/index.html +content/tutorials/11-flow/sg_execution_times.html +content/tutorials/12-seismic/index.html +content/tutorials/12-seismic/plot_fwd_1_tomography_2D.html +content/tutorials/12-seismic/plot_inv_1_tomography_2D.html +content/tutorials/12-seismic/sg_execution_times.html +content/tutorials/13-joint_inversion/index.html +content/tutorials/13-joint_inversion/plot_inv_3_cross_gradient_pf.html +content/tutorials/13-joint_inversion/sg_execution_times.html +content/tutorials/14-pgi/index.html +content/tutorials/14-pgi/plot_inv_1_joint_pf_pgi_full_info_tutorial.html +content/tutorials/14-pgi/plot_inv_2_joint_pf_pgi_no_info_tutorial.html +content/tutorials/14-pgi/sg_execution_times.html +content/user_guide.html diff --git a/environment.yml b/environment.yml index 26fd37631c..58bc664044 100644 --- a/environment.yml +++ b/environment.yml @@ -5,12 +5,13 @@ dependencies: # dependencies - python=3.11 - numpy>=1.22 - - scipy>=1.8 + - scipy>=1.12 - pymatsolver>=0.3 - matplotlib-base - discretize>=0.11 - geoana>=0.7 - libdlf + - typing_extensions # solver # uncomment the next line if you are on an intel platform @@ -32,6 +33,8 @@ dependencies: - sphinx - sphinx-gallery>=0.1.13 - sphinxcontrib-apidoc + - sphinx-reredirects + - sphinx-design - pydata-sphinx-theme - empymod>=2.0.0 - nbsphinx @@ -56,11 +59,8 @@ dependencies: - flake8-mutable==1.2.0 - flake8-rst-docstrings==0.3.0 - flake8-docstrings==1.7.0 + - zizmor # lint GitHub Actions workflows # recommended - jupyter - pyvista - - - pip - - pip: - - zizmor # lint GitHub Actions workflows diff --git a/examples/07-nsem/plot_fwd_nsem_MTTipper3D.py b/examples/07-nsem/plot_fwd_nsem_MTTipper3D.py index e0f2725378..014b112a6b 100644 --- a/examples/07-nsem/plot_fwd_nsem_MTTipper3D.py +++ b/examples/07-nsem/plot_fwd_nsem_MTTipper3D.py @@ -56,11 +56,19 @@ def run(plotIt=True): # Make a receiver list receiver_list = [] for rx_orientation in ["xx", "xy", "yx", "yy"]: - receiver_list.append(NSEM.Rx.PointNaturalSource(rx_loc, rx_orientation, "real")) - receiver_list.append(NSEM.Rx.PointNaturalSource(rx_loc, rx_orientation, "imag")) + receiver_list.append( + NSEM.Rx.Impedance(rx_loc, orientation=rx_orientation, component="real") + ) + receiver_list.append( + NSEM.Rx.Impedance(rx_loc, orientation=rx_orientation, component="imag") + ) for rx_orientation in ["zx", "zy"]: - receiver_list.append(NSEM.Rx.Point3DTipper(rx_loc, rx_orientation, "real")) - receiver_list.append(NSEM.Rx.Point3DTipper(rx_loc, rx_orientation, "imag")) + receiver_list.append( + NSEM.Rx.Tipper(rx_loc, orientation=rx_orientation, component="real") + ) + receiver_list.append( + NSEM.Rx.Tipper(rx_loc, orientation=rx_orientation, component="imag") + ) # Source list source_list = [ diff --git a/examples/09-flow/plot_fwd_flow_richards_1D.py b/examples/09-flow/plot_fwd_flow_richards_1D.py index 1ce540fa12..9cbdd370f3 100644 --- a/examples/09-flow/plot_fwd_flow_richards_1D.py +++ b/examples/09-flow/plot_fwd_flow_richards_1D.py @@ -77,7 +77,6 @@ def run(plotIt=True): initial_conditions=h, do_newton=False, method="mixed", - debug=False, ) prob.time_steps = [(5, 25, 1.1), (60, 40)] diff --git a/examples/09-flow/plot_inv_flow_richards_1D.py b/examples/09-flow/plot_inv_flow_richards_1D.py index 567e41f112..20940bcf05 100644 --- a/examples/09-flow/plot_inv_flow_richards_1D.py +++ b/examples/09-flow/plot_inv_flow_richards_1D.py @@ -71,7 +71,6 @@ def run(plotIt=True): initial_conditions=h, do_newton=False, method="mixed", - debug=False, ) prob.time_steps = [(5, 25, 1.1), (60, 40)] diff --git a/pyproject.toml b/pyproject.toml index 4ddf6e4ae8..c97663c4f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,6 +24,7 @@ dependencies = [ "numpy>=1.22", "pymatsolver>=0.3", "scipy>=1.8", + "typing_extensions; python_version<'3.13'", ] classifiers = [ @@ -78,6 +79,8 @@ docs = [ "sphinx", "sphinx-gallery>=0.1.13", "sphinxcontrib-apidoc", + "sphinx-reredirects", + "sphinx-design", "pydata-sphinx-theme", "nbsphinx", "empymod>=2.0.0", @@ -260,5 +263,10 @@ rst-roles = [ [tool.pytest.ini_options] filterwarnings = [ "ignore::simpeg.utils.solver_utils.DefaultSolverWarning", + "error:You are running a pytest without setting a random seed.*:UserWarning", + "error:The `index_dictionary` property has been deprecated:FutureWarning", + 'error:The `simpeg\.directives\.[a-z_]+` submodule has been deprecated', + 'error:Casting complex values to real discards the imaginary part', ] +xfail_strict = true diff --git a/simpeg/__init__.py b/simpeg/__init__.py index c6cbe5e65d..baf84190d3 100644 --- a/simpeg/__init__.py +++ b/simpeg/__init__.py @@ -73,24 +73,32 @@ maps.ComboMap maps.ComplexMap maps.ExpMap - maps.LinearMap maps.IdentityMap maps.InjectActiveCells - maps.MuRelative - maps.LogMap + maps.LinearMap maps.LogisticSigmoidMap + maps.LogMap + maps.Mesh2Mesh + maps.MuRelative maps.ParametricBlock + maps.ParametricBlockInLayer + maps.ParametricCasingAndLayer maps.ParametricCircleMap maps.ParametricEllipsoid maps.ParametricLayer maps.ParametricPolyMap + maps.ParametricSplineMap + maps.PolynomialPetroClusterMap maps.Projection maps.ReciprocalMap + maps.SelfConsistentEffectiveMedium maps.SphericalSystem + maps.SumMap maps.Surject2Dto3D maps.SurjectFull maps.SurjectUnits maps.SurjectVertical1D + maps.TileMap maps.Weighting maps.Wires diff --git a/simpeg/base/pde_simulation.py b/simpeg/base/pde_simulation.py index eddc7d88f1..470040af4f 100644 --- a/simpeg/base/pde_simulation.py +++ b/simpeg/base/pde_simulation.py @@ -1,4 +1,5 @@ import inspect +import warnings import numpy as np import pymatsolver import scipy.sparse as sp @@ -8,8 +9,7 @@ from .. import props from scipy.constants import mu_0 -from ..utils import validate_type -from ..utils.solver_utils import get_default_solver +from ..utils import validate_type, get_default_solver, get_logger, PerformanceWarning def __inner_mat_mul_op(M, u, v=None, adjoint=False): @@ -436,12 +436,24 @@ class BasePDESimulation(BaseSimulation): pairs of keyword arguments and parameter values for the solver. Please visit `pymatsolver `__ to learn more about solvers and their parameters. - """ def __init__(self, mesh, solver=None, solver_opts=None, **kwargs): self.mesh = mesh super().__init__(**kwargs) + if solver is None: + solver = get_default_solver() + get_logger().info( + f"Setting the default solver '{solver.__name__}' for the " + f"'{type(self).__name__}'.\n" + "To avoid receiving this message, pass a solver to the simulation. " + "For example:" + "\n\n" + " from simpeg.utils import get_default_solver\n" + "\n" + " solver = get_default_solver()\n" + f" simulation = {type(self).__name__}(solver=solver, ...)" + ) self.solver = solver if solver_opts is None: solver_opts = {} @@ -486,10 +498,6 @@ def solver(self): type[pymatsolver.solvers.Base] Numerical solver used to solve the forward problem. """ - if self._solver is None: - # do not cache this, in case the user wants to - # change it after the first time it is requested. - return get_default_solver(warn=True) return self._solver @solver.setter @@ -501,6 +509,15 @@ def solver(self, cls): raise TypeError( f"{cls.__qualname__} is not a subclass of pymatsolver.base.BaseSolver" ) + if cls in (pymatsolver.SolverLU, pymatsolver.Solver): + warnings.warn( + f"The 'pymatsolver.{cls.__name__}' solver might lead to high " + "computation times. " + "We recommend using a faster alternative such as 'pymatsolver.Pardiso' " + "or 'pymatsolver.Mumps'.", + PerformanceWarning, + stacklevel=2, + ) self._solver = cls @property diff --git a/simpeg/data.py b/simpeg/data.py index ecca25c37f..1f81b57593 100644 --- a/simpeg/data.py +++ b/simpeg/data.py @@ -4,6 +4,15 @@ from .survey import BaseSurvey from .utils import mkvc, validate_ndarray_with_shape, validate_float, validate_type +try: + from warnings import deprecated +except ImportError: + # Use the deprecated decorator provided by typing_extensions (which + # supports older versions of Python) if it cannot be imported from + # warnings. + from typing_extensions import deprecated + + __all__ = ["Data", "SyntheticData"] @@ -283,6 +292,13 @@ def shape(self): return self.dobs.shape @property + @deprecated( + "The `index_dictionary` property has been deprecated. " + "Use the `get_slice()` or `get_all_slices()` methods provided " + "by the Survey object instead." + "This property will be removed in SimPEG v0.25.0.", + category=FutureWarning, + ) def index_dictionary(self): """Dictionary for indexing data by sources and receiver. @@ -327,12 +343,12 @@ def index_dictionary(self): ########################## def __setitem__(self, key, value): - index = self.index_dictionary[key[0]][key[1]] - self.dobs[index] = mkvc(value) + slice_obj = self.survey.get_slice(*key) + self.dobs[slice_obj] = mkvc(value) def __getitem__(self, key): - index = self.index_dictionary[key[0]][key[1]] - return self.dobs[index] + slice_obj = self.survey.get_slice(*key) + return self.dobs[slice_obj] def tovec(self): """Convert observed data to a vector diff --git a/simpeg/data_misfit.py b/simpeg/data_misfit.py index ef8273b36f..1225a2e154 100644 --- a/simpeg/data_misfit.py +++ b/simpeg/data_misfit.py @@ -240,9 +240,14 @@ def residual(self, m, f=None): (n_data, ) numpy.ndarray The data residual vector. """ - if self.data is None: - raise Exception("data must be set before a residual can be calculated.") - return self.simulation.residual(m, self.data.dobs, f=f) + dpred = self.simulation.dpred(m, f=f) + if np.isnan(dpred).any() or np.isinf(dpred).any(): + msg = ( + f"The `{type(self.simulation).__name__}.dpred()` method " + "returned an array that contains `nan`s and/or `inf`s." + ) + raise ValueError(msg) + return dpred - self.data.dobs class L2DataMisfit(BaseDataMisfit): @@ -281,7 +286,10 @@ def __call__(self, m, f=None): """Evaluate the residual for a given model.""" R = self.W * self.residual(m, f=f) - return np.vdot(R, R) + # Imaginary part is always zero, even for complex data, as it takes the + # complex-conjugate dot-product. Ensure it returns a float + # (``np.vdot(R, R).real`` is the same as ``np.linalg.norm(R)**2``). + return np.vdot(R, R).real @timeIt def deriv(self, m, f=None): diff --git a/simpeg/directives/__init__.py b/simpeg/directives/__init__.py index e5e225a88a..4d20d6572a 100644 --- a/simpeg/directives/__init__.py +++ b/simpeg/directives/__init__.py @@ -99,10 +99,9 @@ """ -from .directives import ( +from ._directives import ( InversionDirective, DirectiveList, - BetaEstimateDerivative, BetaEstimateMaxDerivative, BetaEstimate_ByEig, BetaSchedule, @@ -118,7 +117,6 @@ ScalingMultipleDataMisfits_ByEig, JointScalingSchedule, UpdateSensitivityWeights, - Update_IRLS, ScaleMisfitMultipliers, ) @@ -140,16 +138,19 @@ ProjectSphericalBounds, ) -from .pgi_directives import ( +from ._pgi_directives import ( PGI_UpdateParameters, PGI_BetaAlphaSchedule, PGI_AddMrefInSmooth, ) -from .sim_directives import ( +from ._sim_directives import ( SimilarityMeasureInversionDirective, SimilarityMeasureSaveOutputEveryIteration, PairedBetaEstimate_ByEig, PairedBetaSchedule, MovingAndMultiTargetStopping, ) + +### Deprecated class +from ._regularization import Update_IRLS diff --git a/simpeg/directives/_directives.py b/simpeg/directives/_directives.py new file mode 100644 index 0000000000..0e4465b977 --- /dev/null +++ b/simpeg/directives/_directives.py @@ -0,0 +1,2767 @@ +from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING + +from datetime import datetime +import pathlib +import numpy as np +import matplotlib.pyplot as plt +import warnings +import scipy.sparse as sp +from ..typing import RandomSeed +from ..data_misfit import BaseDataMisfit +from ..objective_function import BaseObjectiveFunction, ComboObjectiveFunction +from ..maps import IdentityMap, Wires +from ..regularization import ( + WeightedLeastSquares, + BaseRegularization, + Smallness, + Sparse, + SparseSmallness, + PGIsmallness, + SmoothnessFirstOrder, + SparseSmoothness, + BaseSimilarityMeasure, +) +from ..utils import ( + mkvc, + set_kwargs, + sdiag, + estimate_diagonal, + spherical2cartesian, + cartesian2spherical, + Zero, + eigenvalue_by_power_iteration, + validate_string, + get_logger, +) +from ..utils.code_utils import ( + deprecate_property, + validate_type, + validate_integer, + validate_float, + validate_ndarray_with_shape, +) + +if TYPE_CHECKING: + from ..simulation import BaseSimulation + from ..survey import BaseSurvey + + +class InversionDirective: + """Base inversion directive class. + + SimPEG directives initialize and update parameters used by the inversion algorithm; + e.g. setting the initial beta or updating the regularization. ``InversionDirective`` + is a parent class responsible for connecting directives to the data misfit, regularization + and optimization defining the inverse problem. + + Parameters + ---------- + inversion : simpeg.inversion.BaseInversion, None + An SimPEG inversion object; i.e. an instance of :class:`simpeg.inversion.BaseInversion`. + dmisfit : simpeg.data_misfit.BaseDataMisfit, None + A data data misfit; i.e. an instance of :class:`simpeg.data_misfit.BaseDataMisfit`. + reg : simpeg.regularization.BaseRegularization, None + The regularization, or model objective function; i.e. an instance of :class:`simpeg.regularization.BaseRegularization`. + verbose : bool + Whether or not to print debugging information. + """ + + _REGISTRY = {} + + _regPair = [WeightedLeastSquares, BaseRegularization, ComboObjectiveFunction] + _dmisfitPair = [BaseDataMisfit, ComboObjectiveFunction] + + def __init__(self, inversion=None, dmisfit=None, reg=None, verbose=False, **kwargs): + self.inversion = inversion + self.dmisfit = dmisfit + self.reg = reg + self.verbose = verbose + set_kwargs(self, **kwargs) + + @property + def verbose(self): + """Whether or not to print debugging information. + + Returns + ------- + bool + """ + return self._verbose + + @verbose.setter + def verbose(self, value): + self._verbose = validate_type("verbose", value, bool) + + @property + def inversion(self): + """Inversion object associated with the directive. + + Returns + ------- + simpeg.inversion.BaseInversion + The inversion associated with the directive. + """ + if not hasattr(self, "_inversion"): + return None + return self._inversion + + @inversion.setter + def inversion(self, i): + if getattr(self, "_inversion", None) is not None: + warnings.warn( + "InversionDirective {0!s} has switched to a new inversion.".format( + self.__class__.__name__ + ), + stacklevel=2, + ) + self._inversion = i + + @property + def invProb(self): + """Inverse problem associated with the directive. + + Returns + ------- + simpeg.inverse_problem.BaseInvProblem + The inverse problem associated with the directive. + """ + return self.inversion.invProb + + @property + def opt(self): + """Optimization algorithm associated with the directive. + + Returns + ------- + simpeg.optimization.Minimize + Optimization algorithm associated with the directive. + """ + return self.invProb.opt + + @property + def reg(self) -> BaseObjectiveFunction: + """Regularization associated with the directive. + + Returns + ------- + simpeg.regularization.BaseRegularization + The regularization associated with the directive. + """ + if getattr(self, "_reg", None) is None: + self.reg = self.invProb.reg # go through the setter + return self._reg + + @reg.setter + def reg(self, value): + if value is not None: + assert any( + [isinstance(value, regtype) for regtype in self._regPair] + ), "Regularization must be in {}, not {}".format(self._regPair, type(value)) + + if isinstance(value, WeightedLeastSquares): + value = 1 * value # turn it into a combo objective function + self._reg = value + + @property + def dmisfit(self) -> BaseObjectiveFunction: + """Data misfit associated with the directive. + + Returns + ------- + simpeg.data_misfit.BaseDataMisfit + The data misfit associated with the directive. + """ + if getattr(self, "_dmisfit", None) is None: + self.dmisfit = self.invProb.dmisfit # go through the setter + return self._dmisfit + + @dmisfit.setter + def dmisfit(self, value): + if value is not None: + assert any( + [isinstance(value, dmisfittype) for dmisfittype in self._dmisfitPair] + ), "Misfit must be in {}, not {}".format(self._dmisfitPair, type(value)) + + if not isinstance(value, ComboObjectiveFunction): + value = 1 * value # turn it into a combo objective function + self._dmisfit = value + + @property + def survey(self) -> list["BaseSurvey"]: + """Return survey for all data misfits + + Assuming that ``dmisfit`` is always a ``ComboObjectiveFunction``, + return a list containing the survey for each data misfit; i.e. + [survey1, survey2, ...] + + Returns + ------- + list of simpeg.survey.Survey + Survey for all data misfits. + """ + return [objfcts.simulation.survey for objfcts in self.dmisfit.objfcts] + + @property + def simulation(self) -> list["BaseSimulation"]: + """Return simulation for all data misfits. + + Assuming that ``dmisfit`` is always a ``ComboObjectiveFunction``, + return a list containing the simulation for each data misfit; i.e. + [sim1, sim2, ...]. + + Returns + ------- + list of simpeg.simulation.BaseSimulation + Simulation for all data misfits. + """ + return [objfcts.simulation for objfcts in self.dmisfit.objfcts] + + def initialize(self): + """Initialize inversion parameter(s) according to directive.""" + pass + + def endIter(self): + """Update inversion parameter(s) according to directive at end of iteration.""" + pass + + def finish(self): + """Update inversion parameter(s) according to directive at end of inversion.""" + pass + + def validate(self, directiveList=None): + """Validate directive. + + The `validate` method returns ``True`` if the directive and its location within + the directives list does not encounter conflicts. Otherwise, an appropriate error + message is returned describing the conflict. + + Parameters + ---------- + directive_list : simpeg.directives.DirectiveList + List of directives used in the inversion. + + Returns + ------- + bool + Returns ``True`` if validated, otherwise an approriate error is returned. + """ + return True + + +class DirectiveList(object): + """Directives list + + SimPEG directives initialize and update parameters used by the inversion algorithm; + e.g. setting the initial beta or updating the regularization. ``DirectiveList`` stores + the set of directives used in the inversion algorithm. + + Parameters + ---------- + *directives : simpeg.directives.InversionDirective + Directives for the inversion. + inversion : simpeg.inversion.BaseInversion + The inversion associated with the directives list. + debug : bool + Whether to print debugging information. + + """ + + def __init__(self, *directives, inversion=None, debug=False, **kwargs): + super().__init__(**kwargs) + self.dList = [] + for d in directives: + assert isinstance( + d, InversionDirective + ), "All directives must be InversionDirectives not {}".format(type(d)) + self.dList.append(d) + self.inversion = inversion + self.verbose = debug + + @property + def debug(self): + """Whether or not to print debugging information + + Returns + ------- + bool + """ + return getattr(self, "_debug", False) + + @debug.setter + def debug(self, value): + for d in self.dList: + d.debug = value + self._debug = value + + @property + def inversion(self): + """Inversion object associated with the directives list. + + Returns + ------- + simpeg.inversion.BaseInversion + The inversion associated with the directives list. + """ + return getattr(self, "_inversion", None) + + @inversion.setter + def inversion(self, i): + if self.inversion is i: + return + if getattr(self, "_inversion", None) is not None: + warnings.warn( + "{0!s} has switched to a new inversion.".format( + self.__class__.__name__ + ), + stacklevel=2, + ) + for d in self.dList: + d.inversion = i + self._inversion = i + + def call(self, ruleType): + if self.dList is None: + if self.verbose: + print("DirectiveList is None, no directives to call!") + return + + directives = ["initialize", "endIter", "finish"] + assert ruleType in directives, 'Directive type must be in ["{0!s}"]'.format( + '", "'.join(directives) + ) + for r in self.dList: + getattr(r, ruleType)() + + def validate(self): + [directive.validate(self) for directive in self] + return True + + def __iter__(self): + return iter(self.dList) + + +class BaseBetaEstimator(InversionDirective): + """Base class for estimating initial trade-off parameter (beta). + + This class has properties and methods inherited by directive classes which estimate + the initial trade-off parameter (beta). This class is not used directly to create + directives for the inversion. + + Parameters + ---------- + beta0_ratio : float + Desired ratio between data misfit and model objective function at initial beta iteration. + random_seed : None or :class:`~simpeg.typing.RandomSeed`, optional + Random seed used for random sampling. It can either be an int, + a predefined Numpy random number generator, or any valid input to + ``numpy.random.default_rng``. + + """ + + def __init__( + self, + beta0_ratio=1.0, + random_seed: RandomSeed | None = None, + **kwargs, + ): + # Deprecate seed argument + if kwargs.pop("seed", None) is not None: + raise TypeError( + "'seed' has been removed in " + " SimPEG v0.24.0, please use 'random_seed' instead.", + ) + super().__init__(**kwargs) + self.beta0_ratio = beta0_ratio + self.random_seed = random_seed + + @property + def beta0_ratio(self): + """The estimated ratio is multiplied by this to obtain beta. + + Returns + ------- + float + """ + return self._beta0_ratio + + @beta0_ratio.setter + def beta0_ratio(self, value): + self._beta0_ratio = validate_float( + "beta0_ratio", value, min_val=0.0, inclusive_min=False + ) + + @property + def random_seed(self): + """Random seed to initialize with. + + Returns + ------- + int, numpy.random.Generator or None + """ + return self._random_seed + + @random_seed.setter + def random_seed(self, value): + try: + np.random.default_rng(value) + except TypeError as err: + msg = ( + "Unable to initialize the random number generator with " + f"a {type(value).__name__}" + ) + raise TypeError(msg) from err + self._random_seed = value + + def validate(self, directive_list): + ind = [isinstance(d, BaseBetaEstimator) for d in directive_list.dList] + assert np.sum(ind) == 1, ( + "Multiple directives for computing initial beta detected in directives list. " + "Only one directive can be used to set the initial beta." + ) + + return True + + seed = deprecate_property( + random_seed, + "seed", + "random_seed", + removal_version="0.24.0", + error=True, + ) + + +class BetaEstimateMaxDerivative(BaseBetaEstimator): + r"""Estimate initial trade-off parameter (beta) using largest derivatives. + + The initial trade-off parameter (beta) is estimated by scaling the ratio + between the largest derivatives in the gradient of the data misfit and + model objective function. The estimated trade-off parameter is used to + update the **beta** property in the associated :class:`simpeg.inverse_problem.BaseInvProblem` + object prior to running the inversion. A separate directive is used for updating the + trade-off parameter at successive beta iterations; see :class:`BetaSchedule`. + + Parameters + ---------- + beta0_ratio: float + Desired ratio between data misfit and model objective function at initial beta iteration. + random_seed : None or :class:`~simpeg.typing.RandomSeed`, optional + Random seed used for random sampling. It can either be an int, + a predefined Numpy random number generator, or any valid input to + ``numpy.random.default_rng``. + + Notes + ----- + Let :math:`\phi_d` represent the data misfit, :math:`\phi_m` represent the model + objective function and :math:`\mathbf{m_0}` represent the starting model. The first + model update is obtained by minimizing the a global objective function of the form: + + .. math:: + \phi (\mathbf{m_0}) = \phi_d (\mathbf{m_0}) + \beta_0 \phi_m (\mathbf{m_0}) + + where :math:`\beta_0` represents the initial trade-off parameter (beta). + + We define :math:`\gamma` as the desired ratio between the data misfit and model objective + functions at the initial beta iteration (defined by the 'beta0_ratio' input argument). + Here, the initial trade-off parameter is computed according to: + + .. math:: + \beta_0 = \gamma \frac{| \nabla_m \phi_d (\mathbf{m_0}) |_{max}}{| \nabla_m \phi_m (\mathbf{m_0 + \delta m}) |_{max}} + + where + + .. math:: + \delta \mathbf{m} = \frac{m_{max}}{\mu_{max}} \boldsymbol{\mu} + + and :math:`\boldsymbol{\mu}` is a set of independent samples from the + continuous uniform distribution between 0 and 1. + + """ + + def __init__( + self, beta0_ratio=1.0, random_seed: RandomSeed | None = None, **kwargs + ): + super().__init__(beta0_ratio=beta0_ratio, random_seed=random_seed, **kwargs) + + def initialize(self): + rng = np.random.default_rng(seed=self.random_seed) + + if self.verbose: + print("Calculating the beta0 parameter.") + + m = self.invProb.model + + x0 = rng.random(size=m.shape) + phi_d_deriv = np.abs(self.dmisfit.deriv(m)).max() + dm = x0 / x0.max() * m.max() + phi_m_deriv = np.abs(self.reg.deriv(m + dm)).max() + + self.ratio = np.asarray(phi_d_deriv / phi_m_deriv) + self.beta0 = self.beta0_ratio * self.ratio + self.invProb.beta = self.beta0 + + +class BetaEstimate_ByEig(BaseBetaEstimator): + r"""Estimate initial trade-off parameter (beta) by power iteration. + + The initial trade-off parameter (beta) is estimated by scaling the ratio + between the largest eigenvalue in the second derivative of the data + misfit and the model objective function. The largest eigenvalues are estimated + using the power iteration method; see :func:`simpeg.utils.eigenvalue_by_power_iteration`. + The estimated trade-off parameter is used to update the **beta** property in the + associated :class:`simpeg.inverse_problem.BaseInvProblem` object prior to running the inversion. + Note that a separate directive is used for updating the trade-off parameter at successive + beta iterations; see :class:`BetaSchedule`. + + Parameters + ---------- + beta0_ratio: float + Desired ratio between data misfit and model objective function at initial beta iteration. + n_pw_iter : int + Number of power iterations used to estimate largest eigenvalues. + random_seed : None or :class:`~simpeg.typing.RandomSeed`, optional + Random seed used for random sampling. It can either be an int, + a predefined Numpy random number generator, or any valid input to + ``numpy.random.default_rng``. + + Notes + ----- + Let :math:`\phi_d` represent the data misfit, :math:`\phi_m` represent the model + objective function and :math:`\mathbf{m_0}` represent the starting model. The first + model update is obtained by minimizing the a global objective function of the form: + + .. math:: + \phi (\mathbf{m_0}) = \phi_d (\mathbf{m_0}) + \beta_0 \phi_m (\mathbf{m_0}) + + where :math:`\beta_0` represents the initial trade-off parameter (beta). + Let :math:`\gamma` define the desired ratio between the data misfit and model + objective functions at the initial beta iteration (defined by the 'beta0_ratio' input argument). + Using the power iteration approach, our initial trade-off parameter is given by: + + .. math:: + \beta_0 = \gamma \frac{\lambda_d}{\lambda_m} + + where :math:`\lambda_d` as the largest eigenvalue of the Hessian of the data misfit, and + :math:`\lambda_m` as the largest eigenvalue of the Hessian of the model objective function. + For each Hessian, the largest eigenvalue is computed using power iteration. The input + parameter 'n_pw_iter' sets the number of power iterations used in the estimate. + + For a description of the power iteration approach for estimating the larges eigenvalue, + see :func:`simpeg.utils.eigenvalue_by_power_iteration`. + + """ + + def __init__( + self, + beta0_ratio=1.0, + n_pw_iter=4, + random_seed: RandomSeed | None = None, + **kwargs, + ): + super().__init__(beta0_ratio=beta0_ratio, random_seed=random_seed, **kwargs) + self.n_pw_iter = n_pw_iter + + @property + def n_pw_iter(self): + """Number of power iterations for estimating largest eigenvalues. + + Returns + ------- + int + Number of power iterations for estimating largest eigenvalues. + """ + return self._n_pw_iter + + @n_pw_iter.setter + def n_pw_iter(self, value): + self._n_pw_iter = validate_integer("n_pw_iter", value, min_val=1) + + def initialize(self): + rng = np.random.default_rng(seed=self.random_seed) + + if self.verbose: + print("Calculating the beta0 parameter.") + + m = self.invProb.model + + dm_eigenvalue = eigenvalue_by_power_iteration( + self.dmisfit, + m, + n_pw_iter=self.n_pw_iter, + random_seed=rng, + ) + reg_eigenvalue = eigenvalue_by_power_iteration( + self.reg, + m, + n_pw_iter=self.n_pw_iter, + random_seed=rng, + ) + + self.ratio = np.asarray(dm_eigenvalue / reg_eigenvalue) + self.beta0 = self.beta0_ratio * self.ratio + self.invProb.beta = self.beta0 + + +class BetaSchedule(InversionDirective): + """Reduce trade-off parameter (beta) at successive iterations using a cooling schedule. + + Updates the **beta** property in the associated :class:`simpeg.inverse_problem.BaseInvProblem` + while the inversion is running. + For linear least-squares problems, the optimization problem can be solved in a + single step and the cooling rate can be set to *1*. For non-linear optimization + problems, multiple steps are required obtain the minimizer for a fixed trade-off + parameter. In this case, the cooling rate should be larger than 1. + + Parameters + ---------- + coolingFactor : float + The factor by which the trade-off parameter is decreased when updated. + The preexisting value of the trade-off parameter is divided by the cooling factor. + coolingRate : int + Sets the number of successive iterations before the trade-off parameter is reduced. + Use *1* for linear least-squares optimization problems. Use *2* for weakly non-linear + optimization problems. Use *3* for general non-linear optimization problems. + + """ + + def __init__(self, coolingFactor=8.0, coolingRate=3, **kwargs): + super().__init__(**kwargs) + self.coolingFactor = coolingFactor + self.coolingRate = coolingRate + + @property + def coolingFactor(self): + """Beta is divided by this value every `coolingRate` iterations. + + Returns + ------- + float + """ + return self._coolingFactor + + @coolingFactor.setter + def coolingFactor(self, value): + self._coolingFactor = validate_float( + "coolingFactor", value, min_val=0.0, inclusive_min=False + ) + + @property + def coolingRate(self): + """Cool after this number of iterations. + + Returns + ------- + int + """ + return self._coolingRate + + @coolingRate.setter + def coolingRate(self, value): + self._coolingRate = validate_integer("coolingRate", value, min_val=1) + + def endIter(self): + it = self.opt.iter + if 0 < it < self.opt.maxIter and it % self.coolingRate == 0: + if self.verbose: + print( + "BetaSchedule is cooling Beta. Iteration: {0:d}".format( + self.opt.iter + ) + ) + self.invProb.beta /= self.coolingFactor + + +class AlphasSmoothEstimate_ByEig(InversionDirective): + """ + Estimate the alphas multipliers for the smoothness terms of the regularization + as a multiple of the ratio between the highest eigenvalue of the + smallness term and the highest eigenvalue of each smoothness term of the regularization. + The highest eigenvalue are estimated through power iterations and Rayleigh quotient. + """ + + def __init__( + self, + alpha0_ratio=1.0, + n_pw_iter=4, + random_seed: RandomSeed | None = None, + **kwargs, + ): + # Deprecate seed argument + if kwargs.pop("seed", None) is not None: + raise TypeError( + "'seed' has been removed in " + " SimPEG v0.24.0, please use 'random_seed' instead.", + ) + super().__init__(**kwargs) + self.alpha0_ratio = alpha0_ratio + self.n_pw_iter = n_pw_iter + self.random_seed = random_seed + + @property + def alpha0_ratio(self): + """the estimated Alpha_smooth is multiplied by this ratio (int or array). + + Returns + ------- + numpy.ndarray + """ + return self._alpha0_ratio + + @alpha0_ratio.setter + def alpha0_ratio(self, value): + self._alpha0_ratio = validate_ndarray_with_shape( + "alpha0_ratio", value, shape=("*",) + ) + + @property + def n_pw_iter(self): + """Number of power iterations for estimation. + + Returns + ------- + int + """ + return self._n_pw_iter + + @n_pw_iter.setter + def n_pw_iter(self, value): + self._n_pw_iter = validate_integer("n_pw_iter", value, min_val=1) + + @property + def random_seed(self): + """Random seed to initialize with. + + Returns + ------- + int, numpy.random.Generator or None + """ + return self._random_seed + + @random_seed.setter + def random_seed(self, value): + try: + np.random.default_rng(value) + except TypeError as err: + msg = ( + "Unable to initialize the random number generator with " + f"a {type(value).__name__}" + ) + raise TypeError(msg) from err + self._random_seed = value + + seed = deprecate_property( + random_seed, + "seed", + "random_seed", + removal_version="0.24.0", + error=True, + ) + + def initialize(self): + """""" + rng = np.random.default_rng(seed=self.random_seed) + + smoothness = [] + smallness = [] + parents = {} + for regobjcts in self.reg.objfcts: + if isinstance(regobjcts, ComboObjectiveFunction): + objfcts = regobjcts.objfcts + else: + objfcts = [regobjcts] + + for obj in objfcts: + if isinstance( + obj, + ( + Smallness, + SparseSmallness, + PGIsmallness, + ), + ): + smallness += [obj] + + elif isinstance(obj, (SmoothnessFirstOrder, SparseSmoothness)): + parents[obj] = regobjcts + smoothness += [obj] + + if len(smallness) == 0: + raise UserWarning( + "Directive 'AlphasSmoothEstimate_ByEig' requires a regularization with at least one Small instance." + ) + + smallness_eigenvalue = eigenvalue_by_power_iteration( + smallness[0], + self.invProb.model, + n_pw_iter=self.n_pw_iter, + random_seed=rng, + ) + + self.alpha0_ratio = self.alpha0_ratio * np.ones(len(smoothness)) + + if len(self.alpha0_ratio) != len(smoothness): + raise ValueError( + f"Input values for 'alpha0_ratio' should be of len({len(smoothness)}). Provided {self.alpha0_ratio}" + ) + + alphas = [] + for user_alpha, obj in zip(self.alpha0_ratio, smoothness): + smooth_i_eigenvalue = eigenvalue_by_power_iteration( + obj, + self.invProb.model, + n_pw_iter=self.n_pw_iter, + random_seed=rng, + ) + ratio = smallness_eigenvalue / smooth_i_eigenvalue + + mtype = obj._multiplier_pair + + new_alpha = getattr(parents[obj], mtype) * user_alpha * ratio + setattr(parents[obj], mtype, new_alpha) + alphas += [new_alpha] + + if self.verbose: + print(f"Alpha scales: {alphas}") + + +class ScalingMultipleDataMisfits_ByEig(InversionDirective): + """ + For multiple data misfits only: multiply each data misfit term + by the inverse of its highest eigenvalue and then + normalize the sum of the data misfit multipliers to one. + The highest eigenvalue are estimated through power iterations and Rayleigh quotient. + """ + + def __init__( + self, + chi0_ratio=None, + n_pw_iter=4, + random_seed: RandomSeed | None = None, + **kwargs, + ): + # Deprecate seed argument + if kwargs.pop("seed", None) is not None: + raise TypeError( + "'seed' has been removed in " + " SimPEG v0.24.0, please use 'random_seed' instead.", + ) + super().__init__(**kwargs) + self.chi0_ratio = chi0_ratio + self.n_pw_iter = n_pw_iter + self.random_seed = random_seed + + @property + def chi0_ratio(self): + """the estimated Alpha_smooth is multiplied by this ratio (int or array) + + Returns + ------- + numpy.ndarray + """ + return self._chi0_ratio + + @chi0_ratio.setter + def chi0_ratio(self, value): + if value is not None: + value = validate_ndarray_with_shape("chi0_ratio", value, shape=("*",)) + self._chi0_ratio = value + + @property + def n_pw_iter(self): + """Number of power iterations for estimation. + + Returns + ------- + int + """ + return self._n_pw_iter + + @n_pw_iter.setter + def n_pw_iter(self, value): + self._n_pw_iter = validate_integer("n_pw_iter", value, min_val=1) + + @property + def random_seed(self): + """Random seed to initialize with + + Returns + ------- + int, numpy.random.Generator or None + """ + return self._random_seed + + @random_seed.setter + def random_seed(self, value): + try: + np.random.default_rng(value) + except TypeError as err: + msg = ( + "Unable to initialize the random number generator with " + f"a {type(value).__name__}" + ) + raise TypeError(msg) from err + self._random_seed = value + + seed = deprecate_property( + random_seed, + "seed", + "random_seed", + removal_version="0.24.0", + error=True, + ) + + def initialize(self): + """""" + rng = np.random.default_rng(seed=self.random_seed) + + if self.verbose: + print("Calculating the scaling parameter.") + + if ( + getattr(self.dmisfit, "objfcts", None) is None + or len(self.dmisfit.objfcts) == 1 + ): + raise TypeError( + "ScalingMultipleDataMisfits_ByEig only applies to joint inversion" + ) + + ndm = len(self.dmisfit.objfcts) + if self.chi0_ratio is not None: + self.chi0_ratio = self.chi0_ratio * np.ones(ndm) + else: + self.chi0_ratio = self.dmisfit.multipliers + + m = self.invProb.model + + dm_eigenvalue_list = [] + for dm in self.dmisfit.objfcts: + dm_eigenvalue_list += [ + eigenvalue_by_power_iteration(dm, m, random_seed=rng) + ] + + self.chi0 = self.chi0_ratio / np.r_[dm_eigenvalue_list] + self.chi0 = self.chi0 / np.sum(self.chi0) + self.dmisfit.multipliers = self.chi0 + + if self.verbose: + print("Scale Multipliers: ", self.dmisfit.multipliers) + + +class JointScalingSchedule(InversionDirective): + """ + For multiple data misfits only: rebalance each data misfit term + during the inversion when some datasets are fit, and others not + using the ratios of current misfits and their respective target. + It implements the strategy described in https://doi.org/10.1093/gji/ggaa378. + """ + + def __init__( + self, warmingFactor=1.0, chimax=1e10, chimin=1e-10, update_rate=1, **kwargs + ): + super().__init__(**kwargs) + self.mode = 1 + self.warmingFactor = warmingFactor + self.chimax = chimax + self.chimin = chimin + self.update_rate = update_rate + + @property + def mode(self): + """The type of update to perform. + + Returns + ------- + {1, 2} + """ + return self._mode + + @mode.setter + def mode(self, value): + self._mode = validate_integer("mode", value, min_val=1, max_val=2) + + @property + def warmingFactor(self): + """Factor to adjust scaling of the data misfits by. + + Returns + ------- + float + """ + return self._warmingFactor + + @warmingFactor.setter + def warmingFactor(self, value): + self._warmingFactor = validate_float( + "warmingFactor", value, min_val=0.0, inclusive_min=False + ) + + @property + def chimax(self): + """Maximum chi factor. + + Returns + ------- + float + """ + return self._chimax + + @chimax.setter + def chimax(self, value): + self._chimax = validate_float("chimax", value, min_val=0.0, inclusive_min=False) + + @property + def chimin(self): + """Minimum chi factor. + + Returns + ------- + float + """ + return self._chimin + + @chimin.setter + def chimin(self, value): + self._chimin = validate_float("chimin", value, min_val=0.0, inclusive_min=False) + + @property + def update_rate(self): + """Will update the data misfit scalings after this many iterations. + + Returns + ------- + int + """ + return self._update_rate + + @update_rate.setter + def update_rate(self, value): + self._update_rate = validate_integer("update_rate", value, min_val=1) + + def initialize(self): + if ( + getattr(self.dmisfit, "objfcts", None) is None + or len(self.dmisfit.objfcts) == 1 + ): + raise TypeError("JointScalingSchedule only applies to joint inversion") + + targetclass = np.r_[ + [ + isinstance(dirpart, MultiTargetMisfits) + for dirpart in self.inversion.directiveList.dList + ] + ] + if ~np.any(targetclass): + self.DMtarget = None + else: + self.targetclass = np.where(targetclass)[0][-1] + self.DMtarget = self.inversion.directiveList.dList[ + self.targetclass + ].DMtarget + + if self.verbose: + print("Initial data misfit scales: ", self.dmisfit.multipliers) + + def endIter(self): + self.dmlist = self.inversion.directiveList.dList[self.targetclass].dmlist + + if np.any(self.dmlist < self.DMtarget): + self.mode = 2 + else: + self.mode = 1 + + if self.opt.iter > 0 and self.opt.iter % self.update_rate == 0: + if self.mode == 2: + if np.all(np.r_[self.dmisfit.multipliers] > self.chimin) and np.all( + np.r_[self.dmisfit.multipliers] < self.chimax + ): + indx = self.dmlist > self.DMtarget + if np.any(indx): + multipliers = self.warmingFactor * np.median( + self.DMtarget[~indx] / self.dmlist[~indx] + ) + if np.sum(indx) == 1: + indx = np.where(indx)[0][0] + self.dmisfit.multipliers[indx] *= multipliers + self.dmisfit.multipliers /= np.sum(self.dmisfit.multipliers) + + if self.verbose: + print("Updating scaling for data misfits by ", multipliers) + print("New scales:", self.dmisfit.multipliers) + + +class TargetMisfit(InversionDirective): + """ + ... note:: Currently this target misfit is not set up for joint inversion. + Check out MultiTargetMisfits + """ + + def __init__(self, target=None, phi_d_star=None, chifact=1.0, **kwargs): + super().__init__(**kwargs) + self.chifact = chifact + self.phi_d_star = phi_d_star + if phi_d_star is not None and target is not None: + raise AttributeError("Attempted to set both target and phi_d_star.") + if target is not None: + self.target = target + + @property + def target(self): + """The target value for the data misfit + + Returns + ------- + float + """ + if getattr(self, "_target", None) is None: + self._target = self.chifact * self.phi_d_star + return self._target + + @target.setter + def target(self, val): + self._target = validate_float("target", val, min_val=0.0, inclusive_min=False) + + @property + def chifact(self): + """The a multiplier for the target data misfit value. + + The target value is `chifact` times `phi_d_star` + + Returns + ------- + float + """ + return self._chifact + + @chifact.setter + def chifact(self, value): + self._chifact = validate_float( + "chifact", value, min_val=0.0, inclusive_min=False + ) + self._target = None + + @property + def phi_d_star(self): + """The target phi_d value for the data misfit. + + The target value is `chifact` times `phi_d_star` + + Returns + ------- + float + """ + # phid = ||dpred - dobs||^2 + if self._phi_d_star is None: + nD = 0 + for survey in self.survey: + nD += survey.nD + self._phi_d_star = nD + return self._phi_d_star + + @phi_d_star.setter + def phi_d_star(self, value): + # phid = ||dpred - dobs||^2 + if value is not None: + value = validate_float( + "phi_d_star", value, min_val=0.0, inclusive_min=False + ) + self._phi_d_star = value + self._target = None + + def initialize(self): + logger = get_logger() + logger.info( + f"Directive {self.__class__.__name__}: Target data misfit is {self.target}" + ) + + def endIter(self): + if self.invProb.phi_d < self.target: + self.opt.stopNextIteration = True + self.print_final_misfit() + + def print_final_misfit(self): + if self.opt.print_type == "ubc": + self.opt.print_target = ( + ">> Target misfit: %.1f (# of data) is achieved" + ) % (self.target) + + +class MultiTargetMisfits(InversionDirective): + def __init__( + self, + WeightsInTarget=False, + chifact=1.0, + phi_d_star=None, + TriggerSmall=True, + chiSmall=1.0, + phi_ms_star=None, + TriggerTheta=False, + ToleranceTheta=1.0, + distance_norm=np.inf, + **kwargs, + ): + super().__init__(**kwargs) + + self.WeightsInTarget = WeightsInTarget + # Chi factor for Geophsyical Data Misfit + self.chifact = chifact + self.phi_d_star = phi_d_star + + # Chifact for Clustering/Smallness + self.TriggerSmall = TriggerSmall + self.chiSmall = chiSmall + self.phi_ms_star = phi_ms_star + + # Tolerance for parameters difference with their priors + self.TriggerTheta = TriggerTheta # deactivated by default + self.ToleranceTheta = ToleranceTheta + self.distance_norm = distance_norm + + self._DM = False + self._CL = False + self._DP = False + + @property + def WeightsInTarget(self): + """Whether to account for weights in the petrophysical misfit. + + Returns + ------- + bool + """ + return self._WeightsInTarget + + @WeightsInTarget.setter + def WeightsInTarget(self, value): + self._WeightsInTarget = validate_type("WeightsInTarget", value, bool) + + @property + def chifact(self): + """The a multiplier for the target Geophysical data misfit value. + + The target value is `chifact` times `phi_d_star` + + Returns + ------- + numpy.ndarray + """ + return self._chifact + + @chifact.setter + def chifact(self, value): + self._chifact = validate_ndarray_with_shape("chifact", value, shape=("*",)) + self._DMtarget = None + + @property + def phi_d_star(self): + """The target phi_d value for the Geophysical data misfit. + + The target value is `chifact` times `phi_d_star` + + Returns + ------- + float + """ + # phid = || dpred - dobs||^2 + if getattr(self, "_phi_d_star", None) is None: + # Check if it is a ComboObjective + if isinstance(self.dmisfit, ComboObjectiveFunction): + value = np.r_[[survey.nD for survey in self.survey]] + else: + value = np.r_[[self.survey.nD]] + self._phi_d_star = value + self._DMtarget = None + + return self._phi_d_star + + @phi_d_star.setter + def phi_d_star(self, value): + # phid =|| dpred - dobs||^2 + if value is not None: + value = validate_ndarray_with_shape("phi_d_star", value, shape=("*",)) + self._phi_d_star = value + self._DMtarget = None + + @property + def chiSmall(self): + """The a multiplier for the target petrophysical misfit value. + + The target value is `chiSmall` times `phi_ms_star` + + Returns + ------- + float + """ + return self._chiSmall + + @chiSmall.setter + def chiSmall(self, value): + self._chiSmall = validate_float("chiSmall", value) + self._CLtarget = None + + @property + def phi_ms_star(self): + """The target value for the petrophysical data misfit. + + The target value is `chiSmall` times `phi_ms_star` + + Returns + ------- + float + """ + return self._phi_ms_star + + @phi_ms_star.setter + def phi_ms_star(self, value): + if value is not None: + value = validate_float("phi_ms_star", value) + self._phi_ms_star = value + self._CLtarget = None + + @property + def TriggerSmall(self): + """Whether to trigger the smallness misfit test. + + Returns + ------- + bool + """ + return self._TriggerSmall + + @TriggerSmall.setter + def TriggerSmall(self, value): + self._TriggerSmall = validate_type("TriggerSmall", value, bool) + + @property + def TriggerTheta(self): + """Whether to trigger the GMM misfit test. + + Returns + ------- + bool + """ + return self._TriggerTheta + + @TriggerTheta.setter + def TriggerTheta(self, value): + self._TriggerTheta = validate_type("TriggerTheta", value, bool) + + @property + def ToleranceTheta(self): + """Target value for the GMM misfit. + + Returns + ------- + float + """ + return self._ToleranceTheta + + @ToleranceTheta.setter + def ToleranceTheta(self, value): + self._ToleranceTheta = validate_float("ToleranceTheta", value, min_val=0.0) + + @property + def distance_norm(self): + """Distance norm to use for GMM misfit measure. + + Returns + ------- + float + """ + return self._distance_norm + + @distance_norm.setter + def distance_norm(self, value): + self._distance_norm = validate_float("distance_norm", value, min_val=0.0) + + def initialize(self): + self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]] + + if getattr(self.invProb.reg.objfcts[0], "objfcts", None) is not None: + smallness = np.r_[ + [ + ( + np.r_[ + i, + j, + isinstance(regpart, PGIsmallness), + ] + ) + for i, regobjcts in enumerate(self.invProb.reg.objfcts) + for j, regpart in enumerate(regobjcts.objfcts) + ] + ] + if smallness[smallness[:, 2] == 1][:, :2].size == 0: + warnings.warn( + "There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)", + stacklevel=2, + ) + self.smallness = -1 + self.pgi_smallness = None + + else: + self.smallness = smallness[smallness[:, 2] == 1][:, :2][0] + self.pgi_smallness = self.invProb.reg.objfcts[ + self.smallness[0] + ].objfcts[self.smallness[1]] + + if self.verbose: + print( + type( + self.invProb.reg.objfcts[self.smallness[0]].objfcts[ + self.smallness[1] + ] + ) + ) + + self._regmode = 1 + + else: + smallness = np.r_[ + [ + ( + np.r_[ + j, + isinstance(regpart, PGIsmallness), + ] + ) + for j, regpart in enumerate(self.invProb.reg.objfcts) + ] + ] + if smallness[smallness[:, 1] == 1][:, :1].size == 0: + if self.TriggerSmall: + warnings.warn( + "There is no PGI regularization. Smallness target is turned off (TriggerSmall flag).", + stacklevel=2, + ) + self.TriggerSmall = False + self.smallness = -1 + else: + self.smallness = smallness[smallness[:, 1] == 1][:, :1][0] + self.pgi_smallness = self.invProb.reg.objfcts[self.smallness[0]] + + if self.verbose: + print(type(self.invProb.reg.objfcts[self.smallness[0]])) + + self._regmode = 2 + + @property + def DM(self): + """Whether the geophysical data misfit target was satisfied. + + Returns + ------- + bool + """ + return self._DM + + @property + def CL(self): + """Whether the petrophysical misfit target was satisified. + + Returns + ------- + bool + """ + return self._CL + + @property + def DP(self): + """Whether the GMM misfit was below the threshold. + + Returns + ------- + bool + """ + return self._DP + + @property + def AllStop(self): + """Whether all target misfit values have been met. + + Returns + ------- + bool + """ + + return self.DM and self.CL and self.DP + + @property + def DMtarget(self): + if getattr(self, "_DMtarget", None) is None: + self._DMtarget = self.chifact * self.phi_d_star + return self._DMtarget + + @DMtarget.setter + def DMtarget(self, val): + self._DMtarget = val + + @property + def CLtarget(self): + if not getattr(self.pgi_smallness, "approx_eval", True): + # if nonlinear prior, compute targer numerically at each GMM update + samples, _ = self.pgi_smallness.gmm.sample( + len(self.pgi_smallness.gmm.cell_volumes) + ) + self.phi_ms_star = self.pgi_smallness( + mkvc(samples), externalW=self.WeightsInTarget + ) + + self._CLtarget = self.chiSmall * self.phi_ms_star + + elif getattr(self, "_CLtarget", None) is None: + # phid = ||dpred - dobs||^2 + if self.phi_ms_star is None: + # Expected value is number of active cells * number of physical + # properties + self.phi_ms_star = len(self.invProb.model) + + self._CLtarget = self.chiSmall * self.phi_ms_star + + return self._CLtarget + + @property + def CLnormalizedConstant(self): + if ~self.WeightsInTarget: + return 1.0 + elif np.any(self.smallness == -1): + return np.sum( + sp.csr_matrix.diagonal(self.invProb.reg.objfcts[0].W) ** 2.0 + ) / len(self.invProb.model) + else: + return np.sum(sp.csr_matrix.diagonal(self.pgi_smallness.W) ** 2.0) / len( + self.invProb.model + ) + + @CLtarget.setter + def CLtarget(self, val): + self._CLtarget = val + + def phims(self): + if np.any(self.smallness == -1): + return self.invProb.reg.objfcts[0](self.invProb.model) + else: + return ( + self.pgi_smallness( + self.invProb.model, external_weights=self.WeightsInTarget + ) + / self.CLnormalizedConstant + ) + + def ThetaTarget(self): + maxdiff = 0.0 + + for i in range(self.invProb.reg.gmm.n_components): + meandiff = np.linalg.norm( + (self.invProb.reg.gmm.means_[i] - self.invProb.reg.gmmref.means_[i]) + / self.invProb.reg.gmmref.means_[i], + ord=self.distance_norm, + ) + maxdiff = np.maximum(maxdiff, meandiff) + + if ( + self.invProb.reg.gmm.covariance_type == "full" + or self.invProb.reg.gmm.covariance_type == "spherical" + ): + covdiff = np.linalg.norm( + ( + self.invProb.reg.gmm.covariances_[i] + - self.invProb.reg.gmmref.covariances_[i] + ) + / self.invProb.reg.gmmref.covariances_[i], + ord=self.distance_norm, + ) + else: + covdiff = np.linalg.norm( + ( + self.invProb.reg.gmm.covariances_ + - self.invProb.reg.gmmref.covariances_ + ) + / self.invProb.reg.gmmref.covariances_, + ord=self.distance_norm, + ) + maxdiff = np.maximum(maxdiff, covdiff) + + pidiff = np.linalg.norm( + [ + ( + self.invProb.reg.gmm.weights_[i] + - self.invProb.reg.gmmref.weights_[i] + ) + / self.invProb.reg.gmmref.weights_[i] + ], + ord=self.distance_norm, + ) + maxdiff = np.maximum(maxdiff, pidiff) + + return maxdiff + + def endIter(self): + self._DM = False + self._CL = True + self._DP = True + self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]] + self.targetlist = np.r_[ + [dm < tgt for dm, tgt in zip(self.dmlist, self.DMtarget)] + ] + + if np.all(self.targetlist): + self._DM = True + + if self.TriggerSmall and np.any(self.smallness != -1): + if self.phims() > self.CLtarget: + self._CL = False + + if self.TriggerTheta: + if self.ThetaTarget() > self.ToleranceTheta: + self._DP = False + + if self.verbose: + message = "geophys. misfits: " + "; ".join( + map( + str, + [ + "{0} (target {1} [{2}])".format(val, tgt, cond) + for val, tgt, cond in zip( + np.round(self.dmlist, 1), + np.round(self.DMtarget, 1), + self.targetlist, + ) + ], + ) + ) + if self.TriggerSmall: + message += ( + " | smallness misfit: {0:.1f} (target: {1:.1f} [{2}])".format( + self.phims(), self.CLtarget, self.CL + ) + ) + if self.TriggerTheta: + message += " | GMM parameters within tolerance: {}".format(self.DP) + print(message) + + if self.AllStop: + self.opt.stopNextIteration = True + if self.verbose: + print("All targets have been reached") + + +class SaveEveryIteration(InversionDirective, metaclass=ABCMeta): + """SaveEveryIteration + + This directive saves information at each iteration. + + Parameters + ---------- + directory : pathlib.Path or str, optional + The directory to store output information to, defaults to current directory. + name : str, optional + Root of the filename to be saved, commonly this will get iteration specific + details appended to it. + on_disk : bool, optional + Whether this directive will save a log file to disk. + """ + + def __init__(self, directory=".", name="InversionModel", on_disk=True, **kwargs): + self._on_disk = validate_type("on_disk", on_disk, bool) + + super().__init__(**kwargs) + if self.on_disk: + self.directory = directory + else: + self.directory = None + self.name = name + self._time_string_format = "%Y-%m-%d-%H-%M" + self._iter_format = "03d" + self._iter_string = "###" + self._start_time = self._time_string_format + + def initialize(self): + self._start_time = datetime.now().strftime(self._time_string_format) + if opt := getattr(self, "opt", None): + max_digit = len(str(opt.maxIter)) + self._iter_format = f"0{max_digit}d" + + @property + def on_disk(self) -> bool: + """Whether this object stores information to `file_abs_path`.""" + return self._on_disk + + @on_disk.setter + def on_disk(self, value): + self._on_disk = validate_type("on_disk", value, bool) + + @property + def directory(self) -> pathlib.Path: + """Directory to save results in. + + Returns + ------- + pathlib.Path + """ + if not self.on_disk: + raise AttributeError( + f"'{type(self).__qualname__}.directory' is only available if saving to disk." + ) + return self._directory + + @directory.setter + def directory(self, value): + if value is None and self.on_disk: + raise ValueError("Directory is not optional if 'on_disk==True'.") + if value is not None: + value = validate_type("directory", value, pathlib.Path).resolve() + self._directory = value + + @property + def name(self) -> str: + """Root of the filename to be saved. + + Returns + ------- + str + """ + return self._name + + @name.setter + def name(self, value): + self._name = validate_string("name", value) + + @property + def _time_iter_file_name(self) -> pathlib.Path: + time_string = self._start_time + if not getattr(self, "opt", None): + iter_string = "###" + else: + itr = getattr(self.opt, "iter", 0) + iter_string = f"{itr:{self._iter_format}}" + + return pathlib.Path(f"{self.name}_{time_string}_{iter_string}") + + @property + def _time_file_name(self) -> pathlib.Path: + return pathlib.Path(f"{self.name}_{self._start_time}") + + def _mkdir_and_check_output_file(self, should_exist=False): + """ + Use this to ensure a directory exists, and to check if file_abs_path exists. + Issues a warning if the output file exists but should not, + or if it doesn't exist but does. + + Parameters + ---------- + should_exist : bool, optional + Whether file_abs_path should exist. + """ + self.directory.mkdir(exist_ok=True) + fp = self.file_abs_path + exists = fp.exists() + if exists and not should_exist: + warnings.warn(f"Overwriting file {fp}", UserWarning, stacklevel=2) + if not exists and should_exist: + warnings.warn( + f"File {fp} was not found, creating a new one.", + UserWarning, + stacklevel=2, + ) + + @property + def fileName(self): + warnings.warn( + "'fileName' has been deprecated and will be removed in SimPEG 0.26.0 use 'file_abs_path'", + FutureWarning, + stacklevel=2, + ) + return self.file_abs_path.stem + + @property + @abstractmethod + def file_abs_path(self) -> pathlib.Path: + """The absolute path to the saved output file. + + Returns + ------- + pathlib.Path + """ + + +class SaveModelEveryIteration(SaveEveryIteration): + """Saves the inversion model at the end of every iteration to a directory + + Parameters + ---------- + directory : pathlib.Path or str, optional + The directory to store output information to, defaults to current directory. + name : str, optional + Root of the filename to be saved, defaults to ``'InversionModel'`` + + Notes + ----- + + This directive saves the model as a numpy array at each iteration. The + default directory is the current directory and the models are saved as + `name` + ``'_YYYY-MM-DD-HH-MM_iter.npy'`` + """ + + def __init__(self, **kwargs): + if "on_disk" in kwargs: + msg = ( + f"The 'on_disk' argument is ignored by the '{type(self).__name__}' " + "directive, it's always True." + ) + warnings.warn(msg, UserWarning, stacklevel=2) + kwargs.pop("on_disk") + super().__init__(on_disk=True, **kwargs) + + def initialize(self): + super().initialize() + print( + f"{type(self).__qualname__} will save your models as: " + f"'{self.file_abs_path}'" + ) + + @property + def on_disk(self) -> bool: + """This class always saves to disk. + + Returns + ------- + bool + """ + return True + + @on_disk.setter + def on_disk(self, value): # noqa: F811 + """This class always saves to disk.""" + msg = ( + f"Cannot modify value of 'on_disk' for {type(self).__name__}' directive. " + "It's always True." + ) + raise AttributeError(msg) + + @property + def file_abs_path(self) -> pathlib.Path: + return self.directory / self._time_iter_file_name.with_suffix(".npy") + + def endIter(self): + self._mkdir_and_check_output_file(should_exist=False) + np.save(self.file_abs_path, self.opt.xc) + + +class SaveOutputEveryIteration(SaveEveryIteration): + """Keeps track of the objective function values. + + Parameters + ---------- + on_disk : bool, optional + Whether this directive additionally stores the log to a text file. + directory : pathlib.Path, optional + The directory to store output information to if `on_disk`, defaults to current directory. + name : str, optional + The root name of the file to save to, will append the inversion start time to this value. + """ + + def __init__(self, on_disk=True, **kwargs): + if (save_txt := kwargs.pop("save_txt", None)) is not None: + self.save_txt = save_txt + on_disk = self.save_txt + super().__init__(on_disk=on_disk, **kwargs) + + def initialize(self): + super().initialize() + if self.on_disk: + fp = self.file_abs_path + print( + f"'{type(self).__qualname__}' will save your inversion " + f"progress to: '{fp}'" + ) + self._mkdir_and_check_output_file(should_exist=False) + with open(fp, "w") as f: + f.write(f"{self._header}\n") + self._initialize_lists() + + @property + def _header(self): + return " # beta phi_d phi_m phi_m_small phi_m_smoomth_x phi_m_smoomth_y phi_m_smoomth_z phi" + + def _initialize_lists(self): + # Create a list of each + self.beta = [] + self.phi_d = [] + self.phi_m = [] + self.phi_m_small = [] + self.phi_m_smooth_x = [] + self.phi_m_smooth_y = [] + self.phi_m_smooth_z = [] + self.phi = [] + + @property + def file_abs_path(self) -> pathlib.Path | None: + """The absolute path to the saved log file.""" + if self.on_disk: + return self.directory / self._time_file_name.with_suffix(".txt") + + save_txt = deprecate_property( + SaveEveryIteration.on_disk, + "save_txt", + removal_version="0.26.0", + future_warn=True, + ) + + def endIter(self): + phi_s, phi_x, phi_y, phi_z = 0, 0, 0, 0 + + for reg in self.reg.objfcts: + if isinstance(reg, Sparse): + i_s, i_x, i_y, i_z = 0, 1, 2, 3 + else: + i_s, i_x, i_y, i_z = 0, 1, 3, 5 + if getattr(reg, "alpha_s", None): + phi_s += reg.objfcts[i_s](self.invProb.model) * reg.alpha_s + if getattr(reg, "alpha_x", None): + phi_x += reg.objfcts[i_x](self.invProb.model) * reg.alpha_x + + if reg.regularization_mesh.dim > 1 and getattr(reg, "alpha_y", None): + phi_y += reg.objfcts[i_y](self.invProb.model) * reg.alpha_y + if reg.regularization_mesh.dim > 2 and getattr(reg, "alpha_z", None): + phi_z += reg.objfcts[i_z](self.invProb.model) * reg.alpha_z + + self.beta.append(self.invProb.beta) + self.phi_d.append(self.invProb.phi_d) + self.phi_m.append(self.invProb.phi_m) + self.phi_m_small.append(phi_s) + self.phi_m_smooth_x.append(phi_x) + self.phi_m_smooth_y.append(phi_y) + self.phi_m_smooth_z.append(phi_z) + self.phi.append(self.opt.f) + + if self.on_disk: + self._mkdir_and_check_output_file(should_exist=True) + with open(self.file_abs_path, "a") as f: + f.write( + " {0:3d} {1:1.4e} {2:1.4e} {3:1.4e} {4:1.4e} {5:1.4e} " + "{6:1.4e} {7:1.4e} {8:1.4e}\n".format( + self.opt.iter, + self.beta[-1], + self.phi_d[-1], + self.phi_m[-1], + self.phi_m_small[-1], + self.phi_m_smooth_x[-1], + self.phi_m_smooth_y[-1], + self.phi_m_smooth_z[-1], + self.phi[-1], + ) + ) + + def load_results(self, file_name=None): + if file_name is None: + if not self.on_disk: + raise TypeError( + f"'file_name' is a required argument if '{type(self).__qualname__}.on_disk' is `False`" + ) + file_name = self.file_abs_path + results = np.loadtxt(file_name, comments="#") + if results.shape[1] != 9: + raise ValueError(f"{file_name} does not have valid results") + + self.beta = results[:, 1] + self.phi_d = results[:, 2] + self.phi_m = results[:, 3] + self.phi_m_small = results[:, 4] + self.phi_m_smooth_x = results[:, 5] + self.phi_m_smooth_y = results[:, 6] + self.phi_m_smooth_z = results[:, 7] + self.f = results[:, 8] + + self.phi_m_smooth = ( + self.phi_m_smooth_x + self.phi_m_smooth_y + self.phi_m_smooth_z + ) + + self.target_misfit = self.invProb.dmisfit.simulation.survey.nD + self.i_target = None + + if self.invProb.phi_d < self.target_misfit: + i_target = 0 + while self.phi_d[i_target] > self.target_misfit: + i_target += 1 + self.i_target = i_target + + def plot_misfit_curves( + self, + fname=None, + dpi=300, + plot_small_smooth=False, + plot_phi_m=True, + plot_small=False, + plot_smooth=False, + ): + self.target_misfit = np.sum([dmis.nD for dmis in self.invProb.dmisfit.objfcts]) + self.i_target = None + + if self.invProb.phi_d < self.target_misfit: + i_target = 0 + while self.phi_d[i_target] > self.target_misfit: + i_target += 1 + self.i_target = i_target + + fig = plt.figure(figsize=(5, 2)) + ax = plt.subplot(111) + ax_1 = ax.twinx() + ax.semilogy( + np.arange(len(self.phi_d)), self.phi_d, "k-", lw=2, label=r"$\phi_d$" + ) + + if plot_phi_m: + ax_1.semilogy( + np.arange(len(self.phi_d)), self.phi_m, "r", lw=2, label=r"$\phi_m$" + ) + + if plot_small_smooth or plot_small: + ax_1.semilogy( + np.arange(len(self.phi_d)), self.phi_m_small, "ro", label="small" + ) + if plot_small_smooth or plot_smooth: + ax_1.semilogy( + np.arange(len(self.phi_d)), self.phi_m_smooth_x, "rx", label="smooth_x" + ) + ax_1.semilogy( + np.arange(len(self.phi_d)), self.phi_m_smooth_y, "rx", label="smooth_y" + ) + ax_1.semilogy( + np.arange(len(self.phi_d)), self.phi_m_smooth_z, "rx", label="smooth_z" + ) + + ax.legend(loc=1) + ax_1.legend(loc=2) + + ax.plot( + np.r_[ax.get_xlim()[0], ax.get_xlim()[1]], + np.ones(2) * self.target_misfit, + "k:", + ) + ax.set_xlabel("Iteration") + ax.set_ylabel(r"$\phi_d$") + ax_1.set_ylabel(r"$\phi_m$", color="r") + ax_1.tick_params(axis="y", which="both", colors="red") + + plt.show() + if fname is not None: + fig.savefig(fname, dpi=dpi) + + def plot_tikhonov_curves(self, fname=None, dpi=200): + self.target_misfit = self.invProb.dmisfit.simulation.survey.nD + self.i_target = None + + if self.invProb.phi_d < self.target_misfit: + i_target = 0 + while self.phi_d[i_target] > self.target_misfit: + i_target += 1 + self.i_target = i_target + + fig = plt.figure(figsize=(5, 8)) + ax1 = plt.subplot(311) + ax2 = plt.subplot(312) + ax3 = plt.subplot(313) + + ax1.plot(self.beta, self.phi_d, "k-", lw=2, ms=4) + ax1.set_xlim(np.hstack(self.beta).min(), np.hstack(self.beta).max()) + ax1.set_xlabel(r"$\beta$", fontsize=14) + ax1.set_ylabel(r"$\phi_d$", fontsize=14) + + ax2.plot(self.beta, self.phi_m, "k-", lw=2) + ax2.set_xlim(np.hstack(self.beta).min(), np.hstack(self.beta).max()) + ax2.set_xlabel(r"$\beta$", fontsize=14) + ax2.set_ylabel(r"$\phi_m$", fontsize=14) + + ax3.plot(self.phi_m, self.phi_d, "k-", lw=2) + ax3.set_xlim(np.hstack(self.phi_m).min(), np.hstack(self.phi_m).max()) + ax3.set_xlabel(r"$\phi_m$", fontsize=14) + ax3.set_ylabel(r"$\phi_d$", fontsize=14) + + if self.i_target is not None: + ax1.plot(self.beta[self.i_target], self.phi_d[self.i_target], "k*", ms=10) + ax2.plot(self.beta[self.i_target], self.phi_m[self.i_target], "k*", ms=10) + ax3.plot(self.phi_m[self.i_target], self.phi_d[self.i_target], "k*", ms=10) + + for ax in [ax1, ax2, ax3]: + ax.set_xscale("linear") + ax.set_yscale("linear") + plt.tight_layout() + plt.show() + if fname is not None: + fig.savefig(fname, dpi=dpi) + + +class SaveOutputDictEveryIteration(SaveEveryIteration): + """Saves inversion parameters to a dictionary at every iteration. + + At the end of every iteration, information about the current iteration is + saved to the `outDict` property of this object. + + Parameters + ---------- + on_disk : bool, optional + Whether to also save the parameters to an `npz` file at the end of each iteration. + directory : pathlib.Path or str, optional + Directory to save inversion parameters to if `on_disk`, defaults to current directory. + name : str, optional + Root name of the output file. The inversion start time and the iteration are appended to this. + """ + + # Initialize the output dict + def __init__(self, on_disk=False, **kwargs): + if (save_on_disk := kwargs.pop("saveOnDisk", None)) is not None: + self.saveOnDisk = save_on_disk + on_disk = self.saveOnDisk + super().__init__(on_disk=on_disk, **kwargs) + + saveOnDisk = deprecate_property( + SaveEveryIteration.on_disk, + "saveOnDisk", + removal_version="0.26.0", + future_warn=True, + ) + + @property + def file_abs_path(self) -> pathlib.Path | None: + if self.on_disk: + return self.directory / self._time_iter_file_name.with_suffix(".npz") + + def initialize(self): + super().initialize() + self.outDict = {} + if self.on_disk: + print( + f"'{type(self).__qualname__}' will save your inversion progress as a dictionary to: " + f"'{self.file_abs_path}'" + ) + + def endIter(self): + # regCombo = ["phi_ms", "phi_msx"] + + # if self.simulation[0].mesh.dim >= 2: + # regCombo += ["phi_msy"] + + # if self.simulation[0].mesh.dim == 3: + # regCombo += ["phi_msz"] + + # Initialize the output dict + iterDict = {} + + # Save the data. + iterDict["iter"] = self.opt.iter + iterDict["beta"] = self.invProb.beta + iterDict["phi_d"] = self.invProb.phi_d + iterDict["phi_m"] = self.invProb.phi_m + + # for label, fcts in zip(regCombo, self.reg.objfcts[0].objfcts): + # iterDict[label] = fcts(self.invProb.model) + + iterDict["f"] = self.opt.f + iterDict["m"] = self.invProb.model + iterDict["dpred"] = self.invProb.dpred + + for reg in self.reg.objfcts: + if isinstance(reg, Sparse): + for reg_part, norm in zip(reg.objfcts, reg.norms): + reg_name = f"{type(reg_part).__name__}" + if hasattr(reg_part, "orientation"): + reg_name = reg_part.orientation + " " + reg_name + iterDict[reg_name + ".irls_threshold"] = reg_part.irls_threshold + iterDict[reg_name + ".norm"] = norm + + # Save the file as a npz + if self.on_disk: + self._mkdir_and_check_output_file(should_exist=False) + np.savez(self.file_abs_path, iterDict) + + self.outDict[self.opt.iter] = iterDict + + +class UpdatePreconditioner(InversionDirective): + """ + Create a Jacobi preconditioner for the linear problem + """ + + def __init__(self, update_every_iteration=True, **kwargs): + super().__init__(**kwargs) + self.update_every_iteration = update_every_iteration + + @property + def update_every_iteration(self): + """Whether to update the preconditioner at every iteration. + + Returns + ------- + bool + """ + return self._update_every_iteration + + @update_every_iteration.setter + def update_every_iteration(self, value): + self._update_every_iteration = validate_type( + "update_every_iteration", value, bool + ) + + def initialize(self): + # Create the pre-conditioner + regDiag = np.zeros_like(self.invProb.model) + m = self.invProb.model + + for reg in self.reg.objfcts: + # Check if regularization has a projection + rdg = reg.deriv2(m) + if not isinstance(rdg, Zero): + regDiag += rdg.diagonal() + + JtJdiag = compute_JtJdiags(self.dmisfit, self.invProb.model) + + diagA = JtJdiag + self.invProb.beta * regDiag + diagA[diagA != 0] = diagA[diagA != 0] ** -1.0 + PC = sdiag((diagA)) + + self.opt.approxHinv = PC + + def endIter(self): + # Cool the threshold parameter + if self.update_every_iteration is False: + return + + # Create the pre-conditioner + regDiag = np.zeros_like(self.invProb.model) + m = self.invProb.model + + for reg in self.reg.objfcts: + # Check if he has wire + regDiag += reg.deriv2(m).diagonal() + + JtJdiag = compute_JtJdiags(self.dmisfit, self.invProb.model) + + diagA = JtJdiag + self.invProb.beta * regDiag + diagA[diagA != 0] = diagA[diagA != 0] ** -1.0 + PC = sdiag((diagA)) + self.opt.approxHinv = PC + + +class Update_Wj(InversionDirective): + """ + Create approx-sensitivity base weighting using the probing method + """ + + def __init__(self, k=None, itr=None, **kwargs): + self.k = k + self.itr = itr + super().__init__(**kwargs) + + @property + def k(self): + """Number of probing cycles for the estimator. + + Returns + ------- + int + """ + return self._k + + @k.setter + def k(self, value): + if value is not None: + value = validate_integer("k", value, min_val=1) + self._k = value + + @property + def itr(self): + """Which iteration to update the sensitivity. + + Will always update if `None`. + + Returns + ------- + int or None + """ + return self._itr + + @itr.setter + def itr(self, value): + if value is not None: + value = validate_integer("itr", value, min_val=1) + self._itr = value + + def endIter(self): + if self.itr is None or self.itr == self.opt.iter: + m = self.invProb.model + if self.k is None: + self.k = int(self.survey.nD / 10) + + def JtJv(v): + Jv = self.simulation.Jvec(m, v) + + return self.simulation.Jtvec(m, Jv) + + JtJdiag = estimate_diagonal(JtJv, len(m), k=self.k) + JtJdiag = JtJdiag / max(JtJdiag) + + self.reg.wght = JtJdiag + + +class UpdateSensitivityWeights(InversionDirective): + r""" + Sensitivity weighting for linear and non-linear least-squares inverse problems. + + This directive computes the root-mean squared sensitivities for the forward + simulation(s) attached to the inverse problem, then truncates and scales the result + to create cell weights which are applied in the regularization. + + .. important:: + + This directive **requires** that the map for the regularization function is + either :class:`simpeg.maps.Wires` or :class:`simpeg.maps.IdentityMap`. In other + words, the sensitivity weighting cannot be applied for parametric inversion. In + addition, the simulation(s) connected to the inverse problem **must** have + a ``getJ`` or ``getJtJdiag`` method. + + .. important:: + + This directive **must** be placed before any directives which update the + preconditioner for the inverse problem (i.e. :class:`UpdatePreconditioner`), and + **must** be before any directives that estimate the starting trade-off parameter + (i.e. :class:`BetaEstimate_ByEig` and :class:`BetaEstimateMaxDerivative`). + + Parameters + ---------- + every_iteration : bool + When ``True``, update sensitivity weighting at every model update; non-linear problems. + When ``False``, create sensitivity weights for starting model only; linear problems. + threshold : float + Threshold value for smallest weighting value. + threshold_method : {'amplitude', 'global', 'percentile'} + Threshold method for how `threshold_value` is applied: + + - amplitude: + The smallest root-mean squared sensitivity is a fractional percent of the + largest value; must be between 0 and 1. + - global: + The ``threshold_value`` is added to the cell weights prior to normalization; + must be greater than 0. + - percentile: + The smallest root-mean squared sensitivity is set using percentile + threshold; must be between 0 and 100. + + normalization_method : {'maximum', 'min_value', None} + Normalization method applied to sensitivity weights. + + Options are: + + - maximum: + Sensitivity weights are normalized by the largest value such that the + largest weight is equal to 1. + - minimum: + Sensitivity weights are normalized by the smallest value, after + thresholding, such that the smallest weights are equal to 1. + - ``None``: + Normalization is not applied. + + Notes + ----- + Let :math:`\mathbf{J}` represent the Jacobian. To create sensitivity weights, root-mean squared (RMS) sensitivities + :math:`\mathbf{s}` are computed by summing the squares of the rows of the Jacobian: + + .. math:: + \mathbf{s} = \Bigg [ \sum_i \, \mathbf{J_{i, \centerdot }}^2 \, \Bigg ]^{1/2} + + The dynamic range of RMS sensitivities can span many orders of magnitude. When computing sensitivity + weights, thresholding is generally applied to set a minimum value. + + **Thresholding:** + + If **global** thresholding is applied, we add a constant :math:`\tau` to the RMS sensitivities: + + .. math:: + \mathbf{\tilde{s}} = \mathbf{s} + \tau + + In the case of **percentile** thresholding, we let :math:`s_{\%}` represent a given percentile. + Thresholding to set a minimum value is applied as follows: + + .. math:: + \tilde{s}_j = \begin{cases} + s_j \;\; for \;\; s_j \geq s_{\%} \\ + s_{\%} \;\; for \;\; s_j < s_{\%} + \end{cases} + + If **absolute** thresholding is applied, we define :math:`\eta` as a fractional percent. + In this case, thresholding is applied as follows: + + .. math:: + \tilde{s}_j = \begin{cases} + s_j \;\; for \;\; s_j \geq \eta s_{max} \\ + \eta s_{max} \;\; for \;\; s_j < \eta s_{max} + \end{cases} + """ + + def __init__( + self, + every_iteration=False, + threshold_value=1e-12, + threshold_method="amplitude", + normalization_method="maximum", + **kwargs, + ): + + super().__init__(**kwargs) + + self.every_iteration = every_iteration + self.threshold_value = threshold_value + self.threshold_method = threshold_method + self.normalization_method = normalization_method + + @property + def every_iteration(self): + """Update sensitivity weights when model is updated. + + When ``True``, update sensitivity weighting at every model update; non-linear problems. + When ``False``, create sensitivity weights for starting model only; linear problems. + + Returns + ------- + bool + """ + return self._every_iteration + + @every_iteration.setter + def every_iteration(self, value): + self._every_iteration = validate_type("every_iteration", value, bool) + + @property + def threshold_value(self): + """Threshold value used to set minimum weighting value. + + The way thresholding is applied to the weighting model depends on the + `threshold_method` property. The choices for `threshold_method` are: + + - global: + `threshold_value` is added to the cell weights prior to normalization; must be greater than 0. + - percentile: + `threshold_value` is a percentile cutoff; must be between 0 and 100 + - amplitude: + `threshold_value` is the fractional percent of the largest value; must be between 0 and 1 + + + Returns + ------- + float + """ + return self._threshold_value + + @threshold_value.setter + def threshold_value(self, value): + self._threshold_value = validate_float("threshold_value", value, min_val=0.0) + + @property + def threshold_method(self): + """Threshold method for how `threshold_value` is applied: + + - global: + `threshold_value` is added to the cell weights prior to normalization; must be greater than 0. + - percentile: + the smallest root-mean squared sensitivity is set using percentile threshold; must be between 0 and 100 + - amplitude: + the smallest root-mean squared sensitivity is a fractional percent of the largest value; must be between 0 and 1 + + + Returns + ------- + str + """ + return self._threshold_method + + @threshold_method.setter + def threshold_method(self, value): + self._threshold_method = validate_string( + "threshold_method", value, string_list=["global", "percentile", "amplitude"] + ) + + @property + def normalization_method(self): + """Normalization method applied to sensitivity weights. + + Options are: + + - ``None`` + normalization is not applied + - maximum: + sensitivity weights are normalized by the largest value such that the largest weight is equal to 1. + - minimum: + sensitivity weights are normalized by the smallest value, after thresholding, such that the smallest weights are equal to 1. + + Returns + ------- + None, str + """ + return self._normalization_method + + @normalization_method.setter + def normalization_method(self, value): + if value is None: + self._normalization_method = value + else: + self._normalization_method = validate_string( + "normalization_method", value, string_list=["minimum", "maximum"] + ) + + def initialize(self): + """Compute sensitivity weights upon starting the inversion.""" + for reg in self.reg.objfcts: + if not isinstance(reg.mapping, (IdentityMap, Wires)): + raise TypeError( + f"Mapping for the regularization must be of type {IdentityMap} or {Wires}. " + + f"Input mapping of type {type(reg.mapping)}." + ) + + self.update() + + def endIter(self): + """Execute end of iteration.""" + + if self.every_iteration: + self.update() + + def update(self): + """Update sensitivity weights""" + + jtj_diag = compute_JtJdiags(self.dmisfit, self.invProb.model) + + # Compute and sum root-mean squared sensitivities for all objective functions + wr = np.zeros_like(self.invProb.model) + for reg in self.reg.objfcts: + if isinstance(reg, BaseSimilarityMeasure): + continue + + mesh = reg.regularization_mesh + n_cells = mesh.nC + mapped_jtj_diag = reg.mapping * jtj_diag + # reshape the mapped, so you can divide by volume + # (let's say it was a vector or anisotropic model) + mapped_jtj_diag = mapped_jtj_diag.reshape((n_cells, -1), order="F") + wr_temp = mapped_jtj_diag / reg.regularization_mesh.vol[:, None] ** 2.0 + wr_temp = wr_temp.reshape(-1, order="F") + + wr += reg.mapping.deriv(self.invProb.model).T * wr_temp + + wr **= 0.5 + + # Apply thresholding + if self.threshold_method == "global": + wr += self.threshold_value + elif self.threshold_method == "percentile": + wr = np.clip( + wr, a_min=np.percentile(wr, self.threshold_value), a_max=np.inf + ) + else: + wr = np.clip(wr, a_min=self.threshold_value * wr.max(), a_max=np.inf) + + # Apply normalization + if self.normalization_method == "maximum": + wr /= wr.max() + elif self.normalization_method == "minimum": + wr /= wr.min() + + # Add sensitivity weighting to all model objective functions + for reg in self.reg.objfcts: + if not isinstance(reg, BaseSimilarityMeasure): + sub_regs = getattr(reg, "objfcts", [reg]) + for sub_reg in sub_regs: + sub_reg.set_weights(sensitivity=sub_reg.mapping * wr) + + def validate(self, directiveList): + """Validate directive against directives list. + + The ``UpdateSensitivityWeights`` directive impacts the regularization by applying + cell weights. As a result, its place in the :class:`DirectivesList` must be + before any directives which update the preconditioner for the inverse problem + (i.e. :class:`UpdatePreconditioner`), and must be before any directives that + estimate the starting trade-off parameter (i.e. :class:`EstimateBeta_ByEig` + and :class:`EstimateBetaMaxDerivative`). + + + Returns + ------- + bool + Returns ``True`` if validation passes. Otherwise, an error is thrown. + """ + # check if a beta estimator is in the list after setting the weights + dList = directiveList.dList + self_ind = dList.index(self) + + beta_estimator_ind = [isinstance(d, BaseBetaEstimator) for d in dList] + lin_precond_ind = [isinstance(d, UpdatePreconditioner) for d in dList] + + if any(beta_estimator_ind): + assert beta_estimator_ind.index(True) > self_ind, ( + "The directive for setting intial beta must be after UpdateSensitivityWeights " + "in the directiveList" + ) + + if any(lin_precond_ind): + assert lin_precond_ind.index(True) > self_ind, ( + "The directive 'UpdatePreconditioner' must be after UpdateSensitivityWeights " + "in the directiveList" + ) + + return True + + +class ProjectSphericalBounds(InversionDirective): + r""" + Trick for spherical coordinate system. + Project :math:`\theta` and :math:`\phi` angles back to :math:`[-\pi,\pi]` + using back and forth conversion. + spherical->cartesian->spherical + """ + + def initialize(self): + x = self.invProb.model + # Convert to cartesian than back to avoid over rotation + nC = int(len(x) / 3) + + xyz = spherical2cartesian(x.reshape((nC, 3), order="F")) + m = cartesian2spherical(xyz.reshape((nC, 3), order="F")) + + self.invProb.model = m + + for sim in self.simulation: + sim.model = m + + self.opt.xc = m + + def endIter(self): + x = self.invProb.model + nC = int(len(x) / 3) + + # Convert to cartesian than back to avoid over rotation + xyz = spherical2cartesian(x.reshape((nC, 3), order="F")) + m = cartesian2spherical(xyz.reshape((nC, 3), order="F")) + + self.invProb.model = m + + phi_m_last = [] + for reg in self.reg.objfcts: + reg.model = self.invProb.model + phi_m_last += [reg(self.invProb.model)] + + self.invProb.phi_m_last = phi_m_last + + for sim in self.simulation: + sim.model = m + + self.opt.xc = m + + +class ScaleMisfitMultipliers(InversionDirective): + """ + Scale the misfits by the relative chi-factors of multiple misfit functions. + + The goal is to reduce the relative influence of the misfit functions with + lowest chi-factors so that all functions reach a similar level of fit at + convergence to the global target. + + Parameters + ---------- + + path : str + Path to save the chi-factors log file. + """ + + def __init__(self, path: pathlib.Path | None = None, **kwargs): + self.last_beta = None + self.chi_factors = None + + if path is None: + path = pathlib.Path("./") + + self.filepath = path / "ChiFactors.log" + + super().__init__(**kwargs) + + def initialize(self): + self.last_beta = self.invProb.beta + self.multipliers = self.invProb.dmisfit.multipliers + self.scalings = np.ones_like(self.multipliers) + with open(self.filepath, "w", encoding="utf-8") as f: + f.write("Logging of [scaling * chi factor] per misfit function.\n\n") + f.write( + "Iterations\t" + + "\t".join( + f"[{objfct.name}]" for objfct in self.invProb.dmisfit.objfcts + ) + ) + f.write("\n") + + def endIter(self): + ratio = self.invProb.beta / self.last_beta + chi_factors = [] + for residual in self.invProb.residuals: + phi_d = np.vdot(residual, residual) + chi_factors.append(phi_d / len(residual)) + + self.chi_factors = np.asarray(chi_factors) + + if np.all(self.chi_factors < 1) or ratio >= 1: + self.last_beta = self.invProb.beta + self.write_log() + return + + # Normalize scaling between [ratio, 1] + scalings = ( + 1 + - (1 - ratio) + * (self.chi_factors.max() - self.chi_factors) + / self.chi_factors.max() + ) + + # Force the ones that overshot target + scalings[self.chi_factors < 1] = ( + ratio # * self.chi_factors[self.chi_factors < 1] + ) + + # Update the scaling + self.scalings = self.scalings * scalings + + # Normalize total phi_d with scalings + self.invProb.dmisfit.multipliers = self.multipliers * self.scalings + self.last_beta = self.invProb.beta + self.write_log() + + def write_log(self): + """ + Write the scaling factors to the log file. + """ + with open(self.filepath, "a", encoding="utf-8") as f: + f.write( + f"{self.opt.iter}\t" + + "\t".join( + f"{multi:.2e} * {chi:.2e}" + for multi, chi in zip( + self.invProb.dmisfit.multipliers, self.chi_factors + ) + ) + + "\n" + ) + + +def compute_JtJdiags(data_misfit, m): + if hasattr(data_misfit, "getJtJdiag"): + return data_misfit.getJtJdiag(m) + else: + jtj_diag_list = [] + jtj_diag = np.zeros_like(m) + + for dmisfit in data_misfit.objfcts: + if isinstance(dmisfit, ComboObjectiveFunction): + jtj_diag += compute_JtJdiags(dmisfit, m) + + else: + jtj_diag_list.append(dmisfit.getJtJdiag(m)) + + for multiplier, diag in zip(data_misfit.multipliers, jtj_diag_list): + jtj_diag += multiplier * diag + + return np.asarray(jtj_diag) diff --git a/simpeg/directives/_pgi_directives.py b/simpeg/directives/_pgi_directives.py new file mode 100644 index 0000000000..8127a913cf --- /dev/null +++ b/simpeg/directives/_pgi_directives.py @@ -0,0 +1,475 @@ +############################################################################### +# # +# Directives for PGI: Petrophysically guided Regularization # +# # +############################################################################### + +import copy + +import numpy as np + +from ..directives import InversionDirective, MultiTargetMisfits +from ..regularization import ( + PGI, + PGIsmallness, + SmoothnessFirstOrder, + SparseSmoothness, +) +from ..utils import ( + GaussianMixtureWithNonlinearRelationships, + GaussianMixtureWithNonlinearRelationshipsWithPrior, + GaussianMixtureWithPrior, + WeightedGaussianMixture, + mkvc, +) + + +class PGI_UpdateParameters(InversionDirective): + """ + This directive is to be used with regularization from regularization.pgi. + It updates: + - the reference model and weights in the smallness (L2-approximation of PGI) + - the GMM as a MAP estimate between the prior and the current model + For more details, please consult: + - https://doi.org/10.1093/gji/ggz389 + """ + + verbose = False # print info. about the GMM at each iteration + update_rate = 1 # updates at each `update_rate` iterations + update_gmm = False # update the GMM + zeta = ( + 1e10 # confidence in the prior proportions; default: high value, keep GMM fixed + ) + nu = ( + 1e10 # confidence in the prior covariances; default: high value, keep GMM fixed + ) + kappa = 1e10 # confidence in the prior means;default: high value, keep GMM fixed + update_covariances = ( + True # Average the covariances, If false: average the precisions + ) + fixed_membership = None # keep the membership of specific cells fixed + keep_ref_fixed_in_Smooth = True # keep mref fixed in the Smoothness + + def initialize(self): + pgi_reg = self.reg.get_functions_of_type(PGIsmallness) + if len(pgi_reg) != 1: + raise UserWarning( + "'PGI_UpdateParameters' requires one 'PGIsmallness' regularization " + "in the objective function." + ) + self.pgi_reg = pgi_reg[0] + + def endIter(self): + if self.opt.iter > 0 and self.opt.iter % self.update_rate == 0: + m = self.invProb.model + modellist = self.pgi_reg.wiresmap * m + model = np.c_[[a * b for a, b in zip(self.pgi_reg.maplist, modellist)]].T + + if self.update_gmm and isinstance( + self.pgi_reg.gmmref, GaussianMixtureWithNonlinearRelationships + ): + clfupdate = GaussianMixtureWithNonlinearRelationshipsWithPrior( + gmmref=self.pgi_reg.gmmref, + zeta=self.zeta, + kappa=self.kappa, + nu=self.nu, + verbose=self.verbose, + prior_type="semi", + update_covariances=self.update_covariances, + max_iter=self.pgi_reg.gmm.max_iter, + n_init=self.pgi_reg.gmm.n_init, + reg_covar=self.pgi_reg.gmm.reg_covar, + weights_init=self.pgi_reg.gmm.weights_, + means_init=self.pgi_reg.gmm.means_, + precisions_init=self.pgi_reg.gmm.precisions_, + random_state=self.pgi_reg.gmm.random_state, + tol=self.pgi_reg.gmm.tol, + verbose_interval=self.pgi_reg.gmm.verbose_interval, + warm_start=self.pgi_reg.gmm.warm_start, + fixed_membership=self.fixed_membership, + ) + clfupdate = clfupdate.fit(model) + + elif self.update_gmm and isinstance( + self.pgi_reg.gmmref, WeightedGaussianMixture + ): + clfupdate = GaussianMixtureWithPrior( + gmmref=self.pgi_reg.gmmref, + zeta=self.zeta, + kappa=self.kappa, + nu=self.nu, + verbose=self.verbose, + prior_type="semi", + update_covariances=self.update_covariances, + max_iter=self.pgi_reg.gmm.max_iter, + n_init=self.pgi_reg.gmm.n_init, + reg_covar=self.pgi_reg.gmm.reg_covar, + weights_init=self.pgi_reg.gmm.weights_, + means_init=self.pgi_reg.gmm.means_, + precisions_init=self.pgi_reg.gmm.precisions_, + random_state=self.pgi_reg.gmm.random_state, + tol=self.pgi_reg.gmm.tol, + verbose_interval=self.pgi_reg.gmm.verbose_interval, + warm_start=self.pgi_reg.gmm.warm_start, + fixed_membership=self.fixed_membership, + ) + clfupdate = clfupdate.fit(model) + + else: + clfupdate = copy.deepcopy(self.pgi_reg.gmmref) + + self.pgi_reg.gmm = clfupdate + membership = self.pgi_reg.gmm.predict(model) + + if clfupdate.fixed_membership is not None: + self.fixed_membership = clfupdate.fixed_membership + membership[self.fixed_membership[:, 0]] = self.fixed_membership[:, 1] + + mref = mkvc(self.pgi_reg.gmm.means_[membership]) + self.pgi_reg.reference_model = mref + if getattr(self.fixed_membership, "shape", [0, 0])[0] < len(membership): + self.pgi_reg._r_second_deriv = None + + +class PGI_BetaAlphaSchedule(InversionDirective): + """ + This directive is to be used with regularizations from regularization.pgi. + It implements the strategy described in https://doi.org/10.1093/gji/ggz389 + for iteratively updating beta and alpha_s for fitting the + geophysical and smallness targets. + """ + + verbose = False # print information (progress, updates made) + tolerance = 0.0 # tolerance on the geophysical target misfit for cooling + progress = 0.1 # minimum percentage progress (default 10%) before cooling beta + coolingFactor = 2.0 # when cooled, beta is divided by it + warmingFactor = 1.0 # when warmed, alpha_s is multiplied by the ratio of the + # geophysical target with their current misfit, times this factor + mode = 1 # mode 1: start with nothing fitted. Mode 2: warmstart with fitted geophysical data + alphasmax = 1e10 # max alpha_s + betamin = 1e-10 # minimum beta + update_rate = 1 # update every `update_rate` iterations + pgi_reg = None + ratio_in_cooling = ( + False # add the ratio of geophysical misfit with their target in cooling + ) + + def initialize(self): + """Initialize the directive.""" + self.update_previous_score() + self.update_previous_dmlist() + + def endIter(self): + """Run after the end of each iteration in the inversion.""" + # Get some variables from the MultiTargetMisfits directive + data_misfits_achieved = self.multi_target_misfits_directive.DM + data_misfits_target = self.multi_target_misfits_directive.DMtarget + dmlist = self.multi_target_misfits_directive.dmlist + targetlist = self.multi_target_misfits_directive.targetlist + + # Change mode if data misfit targets have been achieved + if data_misfits_achieved: + self.mode = 2 + + # Don't cool beta of warm alpha if we are in the first iteration or if + # the current iteration doesn't match the update rate + if self.opt.iter == 0 or self.opt.iter % self.update_rate != 0: + self.update_previous_score() + self.update_previous_dmlist() + return None + + if self.verbose: + targets = np.round( + np.maximum( + (1.0 - self.progress) * self.previous_dmlist, + (1.0 + self.tolerance) * data_misfits_target, + ), + decimals=1, + ) + dmlist_rounded = np.round(dmlist, decimals=1) + print( + f"Beta cooling evaluation: progress: {dmlist_rounded}; " + f"minimum progress targets: {targets}" + ) + + # Decide if we should cool beta + threshold = np.maximum( + (1.0 - self.progress) * self.previous_dmlist[~targetlist], + data_misfits_target[~targetlist], + ) + if ( + (dmlist[~targetlist] > threshold).all() + and not data_misfits_achieved + and self.mode == 1 + and self.invProb.beta > self.betamin + ): + self.cool_beta() + if self.verbose: + print("Decreasing beta to counter data misfit decrase plateau.") + + # Decide if we should warm alpha instead + elif ( + data_misfits_achieved + and self.mode == 2 + and np.all(self.pgi_regularization.alpha_pgi < self.alphasmax) + ): + self.warm_alpha() + if self.verbose: + print( + "Warming alpha_pgi to favor clustering: ", + self.pgi_regularization.alpha_pgi, + ) + + # Decide if we should cool beta (to counter data misfit increase) + elif ( + np.any(dmlist > (1.0 + self.tolerance) * data_misfits_target) + and self.mode == 2 + and self.invProb.beta > self.betamin + ): + self.cool_beta() + if self.verbose: + print("Decreasing beta to counter data misfit increase.") + + # Update previous score and dmlist + self.update_previous_score() + self.update_previous_dmlist() + + def cool_beta(self): + """Cool beta according to schedule.""" + data_misfits_target = self.multi_target_misfits_directive.DMtarget + dmlist = self.multi_target_misfits_directive.dmlist + ratio = 1.0 + indx = dmlist > (1.0 + self.tolerance) * data_misfits_target + if np.any(indx) and self.ratio_in_cooling: + ratio = np.median([dmlist[indx] / data_misfits_target[indx]]) + self.invProb.beta /= self.coolingFactor * ratio + + def warm_alpha(self): + """Warm alpha according to schedule.""" + data_misfits_target = self.multi_target_misfits_directive.DMtarget + dmlist = self.multi_target_misfits_directive.dmlist + ratio = np.median(data_misfits_target / dmlist) + self.pgi_regularization.alpha_pgi *= self.warmingFactor * ratio + + def update_previous_score(self): + """ + Update the value of the ``previous_score`` attribute. + + Update it with the current value of the petrophysical misfit, obtained + from the :meth:`MultiTargetMisfit.phims()` method. + """ + self.previous_score = copy.deepcopy(self.multi_target_misfits_directive.phims()) + + def update_previous_dmlist(self): + """ + Update the value of the ``previous_dmlist`` attribute. + + Update it with the current value of the data misfits, obtained + from the :meth:`MultiTargetMisfit.dmlist` attribute. + """ + self.previous_dmlist = copy.deepcopy(self.multi_target_misfits_directive.dmlist) + + @property + def directives(self): + """List of all the directives in the :class:`simpeg.inverison.BaseInversion``.""" + return self.inversion.directiveList.dList + + @property + def multi_target_misfits_directive(self): + """``MultiTargetMisfit`` directive in the :class:`simpeg.inverison.BaseInversion``.""" + if not hasattr(self, "_mtm_directive"): + # Obtain multi target misfits directive from the directive list + multi_target_misfits_directive = [ + directive + for directive in self.directives + if isinstance(directive, MultiTargetMisfits) + ] + if not multi_target_misfits_directive: + raise UserWarning( + "No MultiTargetMisfits directive found in the current inversion. " + "A MultiTargetMisfits directive is needed by the " + "PGI_BetaAlphaSchedule directive." + ) + (self._mtm_directive,) = multi_target_misfits_directive + return self._mtm_directive + + @property + def pgi_update_params_directive(self): + """``PGI_UpdateParam``s directive in the :class:`simpeg.inverison.BaseInversion``.""" + if not hasattr(self, "_pgi_update_params"): + # Obtain PGI_UpdateParams directive from the directive list + pgi_update_params_directive = [ + directive + for directive in self.directives + if isinstance(directive, PGI_UpdateParameters) + ] + if pgi_update_params_directive: + (self._pgi_update_params,) = pgi_update_params_directive + else: + self._pgi_update_params = None + return self._pgi_update_params + + @property + def pgi_regularization(self): + """PGI regularization in the :class:`simpeg.inverse_problem.BaseInvProblem``.""" + if not hasattr(self, "_pgi_regularization"): + pgi_regularization = self.reg.get_functions_of_type(PGI) + if len(pgi_regularization) != 1: + raise UserWarning( + "'PGI_UpdateParameters' requires one 'PGI' regularization " + "in the objective function." + ) + self._pgi_regularization = pgi_regularization[0] + return self._pgi_regularization + + +class PGI_AddMrefInSmooth(InversionDirective): + """ + This directive is to be used with regularizations from regularization.pgi. + It implements the strategy described in https://doi.org/10.1093/gji/ggz389 + for including the learned reference model, once stable, in the smoothness terms. + """ + + # Chi factor for Data Misfit + chifact = 1.0 + tolerance_phid = 0.0 + phi_d_target = None + wait_till_stable = True + tolerance = 0.0 + verbose = False + + def initialize(self): + targetclass = np.r_[ + [ + isinstance(dirpart, MultiTargetMisfits) + for dirpart in self.inversion.directiveList.dList + ] + ] + if ~np.any(targetclass): + self.DMtarget = None + else: + self.targetclass = np.where(targetclass)[0][-1] + self._DMtarget = self.inversion.directiveList.dList[ + self.targetclass + ].DMtarget + + self.pgi_updategmm_class = np.r_[ + [ + isinstance(dirpart, PGI_UpdateParameters) + for dirpart in self.inversion.directiveList.dList + ] + ] + + if getattr(self.reg.objfcts[0], "objfcts", None) is not None: + # Find the petrosmallness terms in a two-levels combo-regularization. + petrosmallness = np.where( + np.r_[[isinstance(regpart, PGI) for regpart in self.reg.objfcts]] + )[0][0] + self.petrosmallness = petrosmallness + + # Find the smoothness terms in a two-levels combo-regularization. + Smooth = [] + for i, regobjcts in enumerate(self.reg.objfcts): + for j, regpart in enumerate(regobjcts.objfcts): + Smooth += [ + [ + i, + j, + isinstance( + regpart, (SmoothnessFirstOrder, SparseSmoothness) + ), + ] + ] + self.Smooth = np.r_[Smooth] + + self.nbr = np.sum( + [len(self.reg.objfcts[i].objfcts) for i in range(len(self.reg.objfcts))] + ) + self._regmode = 1 + self.pgi_reg = self.reg.objfcts[self.petrosmallness] + + else: + self._regmode = 2 + self.pgi_reg = self.reg + self.nbr = len(self.reg.objfcts) + self.Smooth = np.r_[ + [ + isinstance(regpart, (SmoothnessFirstOrder, SparseSmoothness)) + for regpart in self.reg.objfcts + ] + ] + self._regmode = 2 + + if ~np.any(self.pgi_updategmm_class): + self.previous_membership = self.pgi_reg.membership(self.invProb.model) + else: + self.previous_membership = self.pgi_reg.compute_quasi_geology_model() + + @property + def DMtarget(self): + if getattr(self, "_DMtarget", None) is None: + self.phi_d_target = self.invProb.dmisfit.survey.nD + self._DMtarget = self.chifact * self.phi_d_target + return self._DMtarget + + @DMtarget.setter + def DMtarget(self, val): + self._DMtarget = val + + def endIter(self): + self.DM = self.inversion.directiveList.dList[self.targetclass].DM + self.dmlist = self.inversion.directiveList.dList[self.targetclass].dmlist + + if ~np.any(self.pgi_updategmm_class): + self.membership = self.pgi_reg.membership(self.invProb.model) + else: + self.membership = self.pgi_reg.compute_quasi_geology_model() + + same_mref = np.all(self.membership == self.previous_membership) + percent_diff = ( + len(self.membership) + - np.count_nonzero(self.previous_membership == self.membership) + ) / len(self.membership) + if self.verbose: + print( + "mref changed in ", + len(self.membership) + - np.count_nonzero(self.previous_membership == self.membership), + " places", + ) + if ( + self.DM or np.all(self.dmlist < (1 + self.tolerance_phid) * self.DMtarget) + ) and ( + same_mref or not self.wait_till_stable or percent_diff <= self.tolerance + ): + self.reg.reference_model_in_smooth = True + self.pgi_reg.reference_model_in_smooth = True + + if self._regmode == 2: + for i in range(self.nbr): + if self.Smooth[i]: + self.reg.objfcts[i].reference_model = mkvc( + self.pgi_reg.gmm.means_[self.membership] + ) + if self.verbose: + print( + "Add mref to Smoothness. Changes in mref happened in {} % of the cells".format( + percent_diff + ) + ) + + elif self._regmode == 1: + for i in range(self.nbr): + if self.Smooth[i, 2]: + idx = self.Smooth[i, :2] + self.reg.objfcts[idx[0]].objfcts[idx[1]].reference_model = mkvc( + self.pgi_reg.gmm.means_[self.membership] + ) + if self.verbose: + print( + "Add mref to Smoothness. Changes in mref happened in {} % of the cells".format( + percent_diff + ) + ) + + self.previous_membership = copy.deepcopy(self.membership) diff --git a/simpeg/directives/_regularization.py b/simpeg/directives/_regularization.py index d3fb7be2b3..08ba229f80 100644 --- a/simpeg/directives/_regularization.py +++ b/simpeg/directives/_regularization.py @@ -6,14 +6,14 @@ from dataclasses import dataclass from ..maps import Projection -from .directives import InversionDirective, UpdatePreconditioner, BetaSchedule +from ._directives import InversionDirective, UpdatePreconditioner, BetaSchedule from ..regularization import ( Sparse, BaseSparse, SmoothnessFirstOrder, WeightedLeastSquares, ) -from ..utils import validate_integer, validate_float +from ..utils import validate_integer, validate_float, deprecate_class @dataclass @@ -224,20 +224,20 @@ def adjust_cooling_schedule(self): """ Adjust the cooling schedule based on the misfit. """ - if self.metrics.start_irls_iter is None: - return + if self.metrics.start_irls_iter is not None: + ratio = self.invProb.phi_d / self.misfit_from_chi_factor( + self.chifact_target + ) + if np.abs(1.0 - ratio) > self.misfit_tolerance: - ratio = self.invProb.phi_d / self.misfit_from_chi_factor(self.chifact_target) - if np.abs(1.0 - ratio) > self.misfit_tolerance: + if ratio > 1: + update_ratio = 1 / np.mean([0.75, 1 / ratio]) + else: + update_ratio = 1 / np.mean([2.0, 1 / ratio]) - if ratio > 1: - update_ratio = 1 / np.mean([0.75, 1 / ratio]) + self.cooling_factor = update_ratio else: - update_ratio = 1 / np.mean([2.0, 1 / ratio]) - - self.cooling_factor = update_ratio - else: - self.cooling_factor = 1.0 + self.cooling_factor = 1.0 def initialize(self): """ @@ -497,3 +497,8 @@ def update_scaling(self): continue obj.set_weights(angle_scale=np.ones_like(amplitude) * max_p / np.pi) + + +@deprecate_class(removal_version="0.24.0", error=True) +class Update_IRLS(UpdateIRLS): + pass diff --git a/simpeg/directives/_save_geoh5.py b/simpeg/directives/_save_geoh5.py index 9ad798ab67..cbbb082e0b 100644 --- a/simpeg/directives/_save_geoh5.py +++ b/simpeg/directives/_save_geoh5.py @@ -7,7 +7,7 @@ from scipy.sparse import csc_matrix, csr_matrix from simpeg.regularization import PGIsmallness -from .directives import InversionDirective +from ._directives import InversionDirective from simpeg.maps import IdentityMap from geoh5py.data import FloatData @@ -16,7 +16,7 @@ from geoh5py.groups import UIJsonGroup from geoh5py.objects import ObjectBase from geoh5py.ui_json.utils import fetch_active_workspace -from simpeg.directives.directives import compute_JtJdiags +from simpeg.directives._directives import compute_JtJdiags class BaseSaveGeoH5(InversionDirective, ABC): @@ -397,13 +397,16 @@ def write(self, iteration: int, **_): iteration = 0 for line in file: val = re.findall(r"[+-]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+-]?\d+)", line) - if len(val) == 5: - log.append(val[:-2]) + if len(val) >= 4: + log.append(val[:3]) iteration += 1 if len(log) > 0: with open(filepath, "a", encoding="utf-8") as file: date_time = datetime.now().strftime("%b-%d-%Y:%H:%M:%S") + + if len(log) == 2: # First iteration with 0th iter + file.write(f"{0} " + " ".join(log[0]) + f" {date_time}\n") file.write(f"{iteration-1} " + " ".join(log[-1]) + f" {date_time}\n") self.save_log() diff --git a/simpeg/directives/_sim_directives.py b/simpeg/directives/_sim_directives.py new file mode 100644 index 0000000000..7a5f13a5ef --- /dev/null +++ b/simpeg/directives/_sim_directives.py @@ -0,0 +1,379 @@ +import numpy as np +from ..regularization import BaseSimilarityMeasure +from ..utils import eigenvalue_by_power_iteration +from ..optimization import IterationPrinters, StoppingCriteria +from ._directives import InversionDirective, SaveOutputEveryIteration + + +############################################################################### +# # +# Directives of joint inversion # +# # +############################################################################### +class SimilarityMeasureInversionPrinters: + beta = { + "title": "betas", + "value": lambda M: [f"{elem:1.2e}" for elem in M.parent.betas], + "width": 26, + "format": lambda v: f"{v!s}", + } + lambd = { + "title": "lambda", + "value": lambda M: M.parent.lambd, + "width": 10, + "format": lambda v: f"{v:1.2e}", + } + phi_d = { + "title": "phi_d", + "value": lambda M: [f"{elem:1.2e}" for elem in M.parent.dmisfit._last_obj_vals], + "width": 26, + "format": lambda v: f"{v!s}", + } + phi_m = { + "title": "phi_m", + "value": lambda M: [ + f"{elem:1.2e}" for elem in M.parent.reg._last_obj_vals[:-1] + ], + "width": 26, + "format": lambda v: f"{v!s}", + } + phi_sim = { + "title": "phi_sim", + "value": lambda M: M.parent.reg._last_obj_vals[-1], + "width": 10, + "format": lambda v: f"{v:1.2e}", + } + + +class SimilarityMeasureInversionDirective(InversionDirective): + """ + Directive for two model similiraty measure joint inversions. Sets Printers and + StoppingCriteria. + + Notes + ----- + Methods assume we are working with two models, and a single similarity measure. + Also, the SimilarityMeasure objective function must be the last regularization. + """ + + printers = [ + IterationPrinters.iteration, + SimilarityMeasureInversionPrinters.beta, + SimilarityMeasureInversionPrinters.lambd, + IterationPrinters.f, + SimilarityMeasureInversionPrinters.phi_d, + SimilarityMeasureInversionPrinters.phi_m, + SimilarityMeasureInversionPrinters.phi_sim, + IterationPrinters.iterationCG, + IterationPrinters.iteration_CG_rel_residual, + IterationPrinters.iteration_CG_abs_residual, + ] + + def initialize(self): + if not isinstance(self.reg.objfcts[-1], BaseSimilarityMeasure): + raise TypeError( + f"The last regularization function must be an instance of " + f"BaseSimilarityMeasure, got {type(self.reg.objfcts[-1])}." + ) + + # define relevant attributes + self.betas = self.reg.multipliers[:-1] + self.lambd = self.reg.multipliers[-1] + self.phi_d_list = [] + self.phi_m_list = [] + self.phi_sim = 0.0 + + # pass attributes to invProb + self.invProb.betas = self.betas + self.invProb.num_models = len(self.betas) + self.invProb.lambd = self.lambd + self.invProb.phi_d_list = self.phi_d_list + self.invProb.phi_m_list = self.phi_m_list + self.invProb.phi_sim = self.phi_sim + + self.opt.printers = self.printers + self.opt.stoppers = [StoppingCriteria.iteration] + + def validate(self, directiveList): + # check that this directive is first in the DirectiveList + dList = directiveList.dList + self_ind = dList.index(self) + if self_ind != 0: + raise IndexError( + "The CrossGradientInversionDirective must be first in directive list." + ) + return True + + def endIter(self): + # compute attribute values + phi_d = self.dmisfit._last_obj_vals + + phi_m = self.reg._last_obj_vals + + # pass attributes values to invProb + self.invProb.phi_d_list = phi_d + self.invProb.phi_m_list = phi_m[:-1] + self.invProb.phi_sim = phi_m[-1] + self.invProb.betas = self.reg.multipliers[:-1] + # Assume last reg.objfct is the coupling + self.invProb.lambd = self.reg.multipliers[-1] + + +class SimilarityMeasureSaveOutputEveryIteration(SaveOutputEveryIteration): + """ + SaveOutputEveryIteration for Joint Inversions. + Saves information on the tradeoff parameters, data misfits, regularizations, + coupling term, number of CG iterations, and value of cost function. + """ + + @property + def _header(self): + return " # betas lambda joint_phi_d joint_phi_m phi_sim iterCG phi " + + def _initialize_lists(self): + # Create a list of each + self.betas = [] + self.lambd = [] + self.phi_d = [] + self.phi_m = [] + self.phi = [] + self.phi_sim = [] + + def endIter(self): + self.betas.append(self.invProb.betas) + self.phi_d.append(self.invProb.phi_d_list) + self.phi_m.append(self.invProb.phi_m_list) + self.lambd.append(self.invProb.lambd) + self.phi_sim.append(self.invProb.phi_sim) + self.phi.append(self.opt.f) + + if self.on_disk: + self._mkdir_and_check_output_file(should_exist=True) + with open(self.file_abs_path, "a") as f: + f.write( + " {0:2d} {1} {2:.2e} {3} {4} {5:1.4e} {6:d} {7:1.4e}\n".format( + self.opt.iter, + [f"{el:.2e}" for el in self.betas[-1]], + self.lambd[-1], + [f"{el:.3e}" for el in self.phi_d[-1]], + [f"{el:.3e}" for el in self.phi_m[-1]], + self.phi_sim[-1], + self.opt.cg_count, + self.phi[-1], + ) + ) + + def load_results(self, file_name=None): + if file_name is None: + if not self.on_disk: + raise TypeError( + f"'file_name' is a required argument if '{type(self).__qualname__}.on_disk' is `False`" + ) + file_name = self.file_abs_path + results = np.loadtxt(file_name, comments="#") + + if results.shape[1] != 8: + raise ValueError(f"{file_name} does not have valid results") + + self.betas = results[:, 1] + self.lambd = results[:, 2] + self.phi_d = results[:, 3] + self.phi_m = results[:, 4] + self.phi_sim = results[:, 5] + self.phi = results[:, 7] + + +class PairedBetaEstimate_ByEig(InversionDirective): + """ + Estimate the trade-off parameter, beta, between pairs of data misfit(s) and the + regularization(s) as a multiple of the ratio between the highest eigenvalue of the + data misfit term and the highest eigenvalue of the regularization. + The highest eigenvalues are estimated through power iterations and Rayleigh + quotient. + + Notes + ----- + This class assumes the order of the data misfits for each model parameter match + the order for the respective regularizations, i.e. + + >>> data_misfits = [phi_d_m1, phi_d_m2, phi_d_m3] + >>> regs = [phi_m_m1, phi_m_m2, phi_m_m3] + + In which case it will estimate regularization parameters for each respective pair. + """ + + beta0_ratio = 1.0 #: the estimated ratio is multiplied by this to obtain beta + n_pw_iter = 4 #: number of power iterations for estimation. + seed = None #: Random seed for the directive + + def initialize(self): + r""" + The initial beta is calculated by comparing the estimated + eigenvalues of :math:`J^T J` and :math:`W^T W`. + To estimate the eigenvector of **A**, we will use one iteration + of the *Power Method*: + + .. math:: + + \mathbf{x_1 = A x_0} + + Given this (very course) approximation of the eigenvector, we can + use the *Rayleigh quotient* to approximate the largest eigenvalue. + + .. math:: + + \lambda_0 = \frac{\mathbf{x^\top A x}}{\mathbf{x^\top x}} + + We will approximate the largest eigenvalue for both JtJ and WtW, + and use some ratio of the quotient to estimate beta0. + + .. math:: + + \beta_0 = \gamma \frac{\mathbf{x^\top J^\top J x}}{\mathbf{x^\top W^\top W x}} + + :rtype: float + :return: beta0 + """ + rng = np.random.default_rng(seed=self.seed) + + if self.verbose: + print("Calculating the beta0 parameter.") + + m = self.invProb.model + dmis_eigenvalues = [] + reg_eigenvalues = [] + dmis_objs = self.dmisfit.objfcts + reg_objs = [ + obj + for obj in self.reg.objfcts + if not isinstance(obj, BaseSimilarityMeasure) + ] + if len(dmis_objs) != len(reg_objs): + raise ValueError( + f"There must be the same number of data misfit and regularizations." + f"Got {len(dmis_objs)} and {len(reg_objs)} respectively." + ) + for dmis, reg in zip(dmis_objs, reg_objs): + dmis_eigenvalues.append( + eigenvalue_by_power_iteration( + dmis, + m, + n_pw_iter=self.n_pw_iter, + random_seed=rng, + ) + ) + + reg_eigenvalues.append( + eigenvalue_by_power_iteration( + reg, + m, + n_pw_iter=self.n_pw_iter, + random_seed=rng, + ) + ) + + self.ratios = np.array(dmis_eigenvalues) / np.array(reg_eigenvalues) + self.invProb.betas = self.beta0_ratio * self.ratios + self.reg.multipliers[:-1] = self.invProb.betas + + +class PairedBetaSchedule(InversionDirective): + """ + Directive for beta cooling schedule to determine the tradeoff + parameters when using paired data misfits and regularizations for a joint inversion. + """ + + chifact_target = 1.0 + beta_tol = 1e-1 + update_beta = True + cooling_rate = 1 + cooling_factor = 2 + dmis_met = False + + @property + def target(self): + if getattr(self, "_target", None) is None: + nD = np.array([survey.nD for survey in self.survey]) + + self._target = nD * self.chifact_target + + return self._target + + @target.setter + def target(self, val): + self._target = val + + def initialize(self): + self.dmis_met = np.zeros_like(self.invProb.betas, dtype=bool) + + def endIter(self): + # Check if target misfit has been reached, if so, set dmis_met to True + for i, phi_d in enumerate(self.invProb.phi_d_list): + self.dmis_met[i] = phi_d < self.target[i] + + # check separately if misfits are within the tolerance, + # otherwise, scale beta individually + for i, phi_d in enumerate(self.invProb.phi_d_list): + if self.opt.iter > 0 and self.opt.iter % self.cooling_rate == 0: + target = self.target[i] + ratio = phi_d / target + if self.update_beta and ratio <= (1.0 + self.beta_tol): + if ratio <= 1: + ratio = np.maximum(0.75, ratio) + else: + ratio = np.minimum(1.5, ratio) + + self.invProb.betas[i] /= ratio + elif ratio > 1.0: + self.invProb.betas[i] /= self.cooling_factor + + self.reg.multipliers[:-1] = self.invProb.betas + + +class MovingAndMultiTargetStopping(InversionDirective): + r""" + Directive for setting stopping criteria for a joint inversion. + Ensures both that all target misfits are met and there is a small change in the + model. Computes the percentage change of the current model from the previous model. + + ..math:: + \frac {\| \mathbf{m_i} - \mathbf{m_{i-1}} \|} {\| \mathbf{m_{i-1}} \|} + """ + + tol = 1e-5 + beta_tol = 1e-1 + chifact_target = 1.0 + + @property + def target(self): + if getattr(self, "_target", None) is None: + nD = [] + for survey in self.survey: + nD += [survey.nD] + nD = np.array(nD) + + self._target = nD * self.chifact_target + + return self._target + + @target.setter + def target(self, val): + self._target = val + + def endIter(self): + for phi_d, target in zip(self.invProb.phi_d_list, self.target): + if np.abs(1.0 - phi_d / target) >= self.beta_tol: + return + if ( + np.linalg.norm(self.opt.xc - self.opt.x_last) + / np.linalg.norm(self.opt.x_last) + > self.tol + ): + return + + print( + "stopping criteria met: ", + np.linalg.norm(self.opt.xc - self.opt.x_last) + / np.linalg.norm(self.opt.x_last), + ) + self.opt.stopNextIteration = True diff --git a/simpeg/directives/_vector_models.py b/simpeg/directives/_vector_models.py index 1636b46415..e6b69ffd96 100644 --- a/simpeg/directives/_vector_models.py +++ b/simpeg/directives/_vector_models.py @@ -5,7 +5,6 @@ InversionDirective, SaveModelGeoH5, SphericalUnitsWeights, - Update_IRLS, UpdateIRLS, UpdateSensitivityWeights, ) @@ -251,7 +250,7 @@ def endIter(self): directive.transforms = transforms - elif isinstance(directive, Update_IRLS | UpdateIRLS): + elif isinstance(directive, UpdateIRLS): directive.sphericalDomain = True directive.model = model directive.coolingFactor = 1.5 diff --git a/simpeg/directives/directives.py b/simpeg/directives/directives.py index d88902f69c..c40e514918 100644 --- a/simpeg/directives/directives.py +++ b/simpeg/directives/directives.py @@ -1,3107 +1,18 @@ -from __future__ import annotations # needed to use type operands in Python 3.8 +""" +Backward compatibility with the ``simpeg.directives.directives`` submodule. -from datetime import datetime -from pathlib import Path -from typing import TYPE_CHECKING +This file will be deleted when the ``simpeg.directives.directives`` submodule is +removed. +""" - -import numpy as np -import matplotlib.pyplot as plt import warnings -import os -import scipy.sparse as sp - -from ..typing import RandomSeed - -from ..data_misfit import BaseDataMisfit -from ..objective_function import BaseObjectiveFunction, ComboObjectiveFunction -from ..maps import IdentityMap, Wires - -from ..regularization import ( - WeightedLeastSquares, - BaseRegularization, - BaseSparse, - Smallness, - Sparse, - SparseSmallness, - PGIsmallness, - SmoothnessFirstOrder, - SparseSmoothness, - BaseSimilarityMeasure, -) -from ..utils import ( - mkvc, - set_kwargs, - sdiag, - estimate_diagonal, - Zero, - eigenvalue_by_power_iteration, - validate_string, -) - - -from ..utils.code_utils import ( - deprecate_class, - deprecate_property, - validate_type, - validate_integer, - validate_float, - validate_ndarray_with_shape, +from ._directives import * # noqa: F403,F401 + +warnings.warn( + "The `simpeg.directives.directives` submodule has been deprecated, " + "and will be removed in SimPEG v0.26.0." + "Import any directive class directly from the `simpeg.directives` module. " + "E.g.: `from simpeg.directives import BetaSchedule`", + FutureWarning, + stacklevel=2, ) - - -def compute_JtJdiags(data_misfit, m): - if hasattr(data_misfit, "getJtJdiag"): - return data_misfit.getJtJdiag(m) - else: - jtj_diag_list = [] - jtj_diag = np.zeros_like(m) - - for dmisfit in data_misfit.objfcts: - if isinstance(dmisfit, ComboObjectiveFunction): - jtj_diag += compute_JtJdiags(dmisfit, m) - - else: - jtj_diag_list.append(dmisfit.getJtJdiag(m)) - - for multiplier, diag in zip(data_misfit.multipliers, jtj_diag_list): - jtj_diag += multiplier * diag - - return np.asarray(jtj_diag) - - -if TYPE_CHECKING: - from ..simulation import BaseSimulation - from ..survey import BaseSurvey - - -class InversionDirective: - """Base inversion directive class. - - SimPEG directives initialize and update parameters used by the inversion algorithm; - e.g. setting the initial beta or updating the regularization. ``InversionDirective`` - is a parent class responsible for connecting directives to the data misfit, regularization - and optimization defining the inverse problem. - - Parameters - ---------- - inversion : simpeg.inversion.BaseInversion, None - An SimPEG inversion object; i.e. an instance of :class:`simpeg.inversion.BaseInversion`. - dmisfit : simpeg.data_misfit.BaseDataMisfit, None - A data data misfit; i.e. an instance of :class:`simpeg.data_misfit.BaseDataMisfit`. - reg : simpeg.regularization.BaseRegularization, None - The regularization, or model objective function; i.e. an instance of :class:`simpeg.regularization.BaseRegularization`. - verbose : bool - Whether or not to print debugging information. - """ - - _REGISTRY = {} - - _regPair = [WeightedLeastSquares, BaseRegularization, ComboObjectiveFunction] - _dmisfitPair = [BaseDataMisfit, ComboObjectiveFunction] - - def __init__(self, inversion=None, dmisfit=None, reg=None, verbose=False, **kwargs): - # Raise error on deprecated arguments - if (key := "debug") in kwargs.keys(): - raise TypeError(f"'{key}' property has been removed. Please use 'verbose'.") - self.inversion = inversion - self.dmisfit = dmisfit - self.reg = reg - self.verbose = verbose - set_kwargs(self, **kwargs) - - @property - def verbose(self): - """Whether or not to print debugging information. - - Returns - ------- - bool - """ - return self._verbose - - @verbose.setter - def verbose(self, value): - self._verbose = validate_type("verbose", value, bool) - - debug = deprecate_property( - verbose, "debug", "verbose", removal_version="0.19.0", error=True - ) - - @property - def inversion(self): - """Inversion object associated with the directive. - - Returns - ------- - simpeg.inversion.BaseInversion - The inversion associated with the directive. - """ - if not hasattr(self, "_inversion"): - return None - return self._inversion - - @inversion.setter - def inversion(self, i): - if getattr(self, "_inversion", None) is not None and i is not self.inversion: - warnings.warn( - "InversionDirective {0!s} has switched to a new inversion.".format( - self.__class__.__name__ - ), - stacklevel=2, - ) - self._inversion = i - - @property - def invProb(self): - """Inverse problem associated with the directive. - - Returns - ------- - simpeg.inverse_problem.BaseInvProblem - The inverse problem associated with the directive. - """ - return self.inversion.invProb - - @property - def opt(self): - """Optimization algorithm associated with the directive. - - Returns - ------- - simpeg.optimization.Minimize - Optimization algorithm associated with the directive. - """ - return self.invProb.opt - - @property - def reg(self) -> BaseObjectiveFunction: - """Regularization associated with the directive. - - Returns - ------- - simpeg.regularization.BaseRegularization - The regularization associated with the directive. - """ - if getattr(self, "_reg", None) is None: - self.reg = self.invProb.reg # go through the setter - return self._reg - - @reg.setter - def reg(self, value): - if value is not None: - assert any( - [isinstance(value, regtype) for regtype in self._regPair] - ), "Regularization must be in {}, not {}".format(self._regPair, type(value)) - - if isinstance(value, WeightedLeastSquares): - value = 1 * value # turn it into a combo objective function - self._reg = value - - @property - def dmisfit(self) -> BaseObjectiveFunction: - """Data misfit associated with the directive. - - Returns - ------- - simpeg.data_misfit.BaseDataMisfit - The data misfit associated with the directive. - """ - if getattr(self, "_dmisfit", None) is None: - self._dmisfit = self.invProb.dmisfit # go through the setter - return self._dmisfit - - @dmisfit.setter - def dmisfit(self, value): - if value is not None: - assert any( - [isinstance(value, dmisfittype) for dmisfittype in self._dmisfitPair] - ), "Misfit must be in {}, not {}".format(self._dmisfitPair, type(value)) - - if not isinstance(value, ComboObjectiveFunction): - value = 1 * value # turn it into a combo objective function - self._dmisfit = value - - @property - def survey(self) -> list["BaseSurvey"]: - """Return survey for all data misfits - - Assuming that ``dmisfit`` is always a ``ComboObjectiveFunction``, - return a list containing the survey for each data misfit; i.e. - [survey1, survey2, ...] - - Returns - ------- - list of simpeg.survey.Survey - Survey for all data misfits. - """ - return [objfcts.simulation.survey for objfcts in self.dmisfit.objfcts] - - @property - def simulation(self) -> list["BaseSimulation"]: - """Return simulation for all data misfits. - - Assuming that ``dmisfit`` is always a ``ComboObjectiveFunction``, - return a list containing the simulation for each data misfit; i.e. - [sim1, sim2, ...]. - - Returns - ------- - list of simpeg.simulation.BaseSimulation - Simulation for all data misfits. - """ - return [objfcts.simulation for objfcts in self.dmisfit.objfcts] - - def initialize(self): - """Initialize inversion parameter(s) according to directive.""" - pass - - def endIter(self): - """Update inversion parameter(s) according to directive at end of iteration.""" - pass - - def finish(self): - """Update inversion parameter(s) according to directive at end of inversion.""" - pass - - def validate(self, directiveList=None): - """Validate directive. - - The `validate` method returns ``True`` if the directive and its location within - the directives list does not encounter conflicts. Otherwise, an appropriate error - message is returned describing the conflict. - - Parameters - ---------- - directive_list : simpeg.directives.DirectiveList - List of directives used in the inversion. - - Returns - ------- - bool - Returns ``True`` if validated, otherwise an approriate error is returned. - """ - return True - - -class DirectiveList(object): - """Directives list - - SimPEG directives initialize and update parameters used by the inversion algorithm; - e.g. setting the initial beta or updating the regularization. ``DirectiveList`` stores - the set of directives used in the inversion algorithm. - - Parameters - ---------- - directives : list of simpeg.directives.InversionDirective - List of directives. - inversion : simpeg.inversion.BaseInversion - The inversion associated with the directives list. - debug : bool - Whether or not to print debugging information. - - """ - - def __init__(self, *directives, inversion=None, debug=False, **kwargs): - super().__init__(**kwargs) - self.dList = [] - for d in directives: - assert isinstance( - d, InversionDirective - ), "All directives must be InversionDirectives not {}".format(type(d)) - self.dList.append(d) - self.inversion = inversion - self.verbose = debug - - @property - def debug(self): - """Whether or not to print debugging information - - Returns - ------- - bool - """ - return getattr(self, "_debug", False) - - @debug.setter - def debug(self, value): - for d in self.dList: - d.debug = value - self._debug = value - - @property - def inversion(self): - """Inversion object associated with the directives list. - - Returns - ------- - simpeg.inversion.BaseInversion - The inversion associated with the directives list. - """ - return getattr(self, "_inversion", None) - - @inversion.setter - def inversion(self, i): - if self.inversion is i: - return - if getattr(self, "_inversion", None) is not None: - warnings.warn( - "{0!s} has switched to a new inversion.".format( - self.__class__.__name__ - ), - stacklevel=2, - ) - for d in self.dList: - d.inversion = i - self._inversion = i - - def call(self, ruleType): - if self.dList is None: - if self.verbose: - print("DirectiveList is None, no directives to call!") - return - - directives = ["initialize", "endIter", "finish"] - assert ruleType in directives, 'Directive type must be in ["{0!s}"]'.format( - '", "'.join(directives) - ) - for r in self.dList: - getattr(r, ruleType)() - - def validate(self): - [directive.validate(self) for directive in self.dList] - return True - - -class BaseBetaEstimator(InversionDirective): - """Base class for estimating initial trade-off parameter (beta). - - This class has properties and methods inherited by directive classes which estimate - the initial trade-off parameter (beta). This class is not used directly to create - directives for the inversion. - - Parameters - ---------- - beta0_ratio : float - Desired ratio between data misfit and model objective function at initial beta iteration. - random_seed : None or :class:`~simpeg.typing.RandomSeed`, optional - Random seed used for random sampling. It can either be an int, - a predefined Numpy random number generator, or any valid input to - ``numpy.random.default_rng``. - seed : None or :class:`~simpeg.typing.RandomSeed`, optional - - .. deprecated:: 0.23.0 - - Argument ``seed`` is deprecated in favor of ``random_seed`` and will - be removed in SimPEG v0.24.0. - - """ - - def __init__( - self, - beta0_ratio=1.0, - random_seed: RandomSeed | None = None, - seed: RandomSeed | None = None, - **kwargs, - ): - super().__init__(**kwargs) - self.beta0_ratio = beta0_ratio - - # Deprecate seed argument - if seed is not None: - if random_seed is not None: - raise TypeError( - "Cannot pass both 'random_seed' and 'seed'." - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - ) - warnings.warn( - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - FutureWarning, - stacklevel=2, - ) - random_seed = seed - self.random_seed = random_seed - - @property - def beta0_ratio(self): - """The estimated ratio is multiplied by this to obtain beta. - - Returns - ------- - float - """ - return self._beta0_ratio - - @beta0_ratio.setter - def beta0_ratio(self, value): - self._beta0_ratio = validate_float( - "beta0_ratio", value, min_val=0.0, inclusive_min=False - ) - - @property - def random_seed(self): - """Random seed to initialize with. - - Returns - ------- - int, numpy.random.Generator or None - """ - return self._random_seed - - @random_seed.setter - def random_seed(self, value): - try: - np.random.default_rng(value) - except TypeError as err: - msg = ( - "Unable to initialize the random number generator with " - f"a {type(value).__name__}" - ) - raise TypeError(msg) from err - - self._random_seed = value - - def validate(self, directive_list): - ind = [isinstance(d, BaseBetaEstimator) for d in directive_list.dList] - assert np.sum(ind) == 1, ( - "Multiple directives for computing initial beta detected in directives list. " - "Only one directive can be used to set the initial beta." - ) - - return True - - seed = deprecate_property( - random_seed, - "seed", - "random_seed", - removal_version="0.24.0", - future_warn=True, - error=False, - ) - - -class BetaEstimateMaxDerivative(BaseBetaEstimator): - r"""Estimate initial trade-off parameter (beta) using largest derivatives. - - The initial trade-off parameter (beta) is estimated by scaling the ratio - between the largest derivatives in the gradient of the data misfit and - model objective function. The estimated trade-off parameter is used to - update the **beta** property in the associated :class:`simpeg.inverse_problem.BaseInvProblem` - object prior to running the inversion. A separate directive is used for updating the - trade-off parameter at successive beta iterations; see :class:`BetaSchedule`. - - Parameters - ---------- - beta0_ratio: float - Desired ratio between data misfit and model objective function at initial beta iteration. - random_seed : None or :class:`~simpeg.typing.RandomSeed`, optional - Random seed used for random sampling. It can either be an int, - a predefined Numpy random number generator, or any valid input to - ``numpy.random.default_rng``. - seed : None or :class:`~simpeg.typing.RandomSeed`, optional - - .. deprecated:: 0.23.0 - - Argument ``seed`` is deprecated in favor of ``random_seed`` and will - be removed in SimPEG v0.24.0. - - Notes - ----- - Let :math:`\phi_d` represent the data misfit, :math:`\phi_m` represent the model - objective function and :math:`\mathbf{m_0}` represent the starting model. The first - model update is obtained by minimizing the a global objective function of the form: - - .. math:: - \phi (\mathbf{m_0}) = \phi_d (\mathbf{m_0}) + \beta_0 \phi_m (\mathbf{m_0}) - - where :math:`\beta_0` represents the initial trade-off parameter (beta). - - We define :math:`\gamma` as the desired ratio between the data misfit and model objective - functions at the initial beta iteration (defined by the 'beta0_ratio' input argument). - Here, the initial trade-off parameter is computed according to: - - .. math:: - \beta_0 = \gamma \frac{| \nabla_m \phi_d (\mathbf{m_0}) |_{max}}{| \nabla_m \phi_m (\mathbf{m_0 + \delta m}) |_{max}} - - where - - .. math:: - \delta \mathbf{m} = \frac{m_{max}}{\mu_{max}} \boldsymbol{\mu} - - and :math:`\boldsymbol{\mu}` is a set of independent samples from the - continuous uniform distribution between 0 and 1. - - """ - - def __init__( - self, beta0_ratio=1.0, random_seed: RandomSeed | None = None, **kwargs - ): - super().__init__(beta0_ratio=beta0_ratio, random_seed=random_seed, **kwargs) - - def initialize(self): - rng = np.random.default_rng(seed=self.random_seed) - - if self.verbose: - print("Calculating the beta0 parameter.") - - m = self.invProb.model - - x0 = rng.random(size=m.shape) - phi_d_deriv = np.abs(self.dmisfit.deriv(m)).max() - dm = x0 / x0.max() * m.max() - phi_m_deriv = np.abs(self.reg.deriv(m + dm)).max() - - self.ratio = np.asarray(phi_d_deriv / phi_m_deriv) - self.beta0 = self.beta0_ratio * self.ratio - self.invProb.beta = self.beta0 - - -class BetaEstimateDerivative(BaseBetaEstimator): - r"""Estimate initial trade-off parameter (beta) using largest derivatives. - - The initial trade-off parameter (beta) is estimated by scaling the ratio - between the largest derivatives in the gradient of the data misfit and - model objective function. The estimated trade-off parameter is used to - update the **beta** property in the associated :class:`simpeg.inverse_problem.BaseInvProblem` - object prior to running the inversion. A separate directive is used for updating the - trade-off parameter at successive beta iterations; see :class:`BetaSchedule`. - - Parameters - ---------- - beta0_ratio: float - Desired ratio between data misfit and model objective function at initial beta iteration. - seed : None or :class:`~simpeg.typing.RandomSeed`, optional - Random seed used for random sampling. It can either be an int, - a predefined Numpy random number generator, or any valid input to - ``numpy.random.default_rng``. - - Notes - ----- - Let :math:`\phi_d` represent the data misfit, :math:`\phi_m` represent the model - objective function and :math:`\mathbf{m_0}` represent the starting model. The first - model update is obtained by minimizing the a global objective function of the form: - - .. math:: - \phi (\mathbf{m_0}) = \phi_d (\mathbf{m_0}) + \beta_0 \phi_m (\mathbf{m_0}) - - where :math:`\beta_0` represents the initial trade-off parameter (beta). - - We define :math:`\gamma` as the desired ratio between the data misfit and model objective - functions at the initial beta iteration (defined by the 'beta0_ratio' input argument). - Here, the initial trade-off parameter is computed according to: - - .. math:: - \beta_0 = \gamma \frac{| \nabla_m \phi_d (\mathbf{m_0}) |_{max}}{| \nabla_m \phi_m (\mathbf{m_0 + \delta m}) |_{max}} - - where - - .. math:: - \delta \mathbf{m} = \frac{m_{max}}{\mu_{max}} \boldsymbol{\mu} - - and :math:`\boldsymbol{\mu}` is a set of independent samples from the - continuous uniform distribution between 0 and 1. - - """ - - def __init__(self, beta0_ratio=1.0, seed: RandomSeed | None = None, **kwargs): - super().__init__(beta0_ratio=beta0_ratio, seed=seed, **kwargs) - - def initialize(self): - rng = np.random.default_rng(seed=self.random_seed) - - if self.verbose: - print("Calculating the beta0 parameter.") - - m = self.invProb.model - - x0 = rng.random(size=m.shape) - phi_d_deriv = self.dmisfit.deriv2(m, x0) - t = np.dot(x0, phi_d_deriv) - reg = self.reg.deriv2(m, v=x0) - b = np.dot(x0, reg) - self.ratio = np.asarray(t / b) - self.beta0 = self.beta0_ratio * self.ratio - self.invProb.beta = self.beta0 - - -class BetaEstimate_ByEig(BaseBetaEstimator): - r"""Estimate initial trade-off parameter (beta) by power iteration. - - The initial trade-off parameter (beta) is estimated by scaling the ratio - between the largest eigenvalue in the second derivative of the data - misfit and the model objective function. The largest eigenvalues are estimated - using the power iteration method; see :func:`simpeg.utils.eigenvalue_by_power_iteration`. - The estimated trade-off parameter is used to update the **beta** property in the - associated :class:`simpeg.inverse_problem.BaseInvProblem` object prior to running the inversion. - Note that a separate directive is used for updating the trade-off parameter at successive - beta iterations; see :class:`BetaSchedule`. - - Parameters - ---------- - beta0_ratio: float - Desired ratio between data misfit and model objective function at initial beta iteration. - n_pw_iter : int - Number of power iterations used to estimate largest eigenvalues. - random_seed : None or :class:`~simpeg.typing.RandomSeed`, optional - Random seed used for random sampling. It can either be an int, - a predefined Numpy random number generator, or any valid input to - ``numpy.random.default_rng``. - seed : None or :class:`~simpeg.typing.RandomSeed`, optional - - .. deprecated:: 0.23.0 - - Argument ``seed`` is deprecated in favor of ``random_seed`` and will - be removed in SimPEG v0.24.0. - - Notes - ----- - Let :math:`\phi_d` represent the data misfit, :math:`\phi_m` represent the model - objective function and :math:`\mathbf{m_0}` represent the starting model. The first - model update is obtained by minimizing the a global objective function of the form: - - .. math:: - \phi (\mathbf{m_0}) = \phi_d (\mathbf{m_0}) + \beta_0 \phi_m (\mathbf{m_0}) - - where :math:`\beta_0` represents the initial trade-off parameter (beta). - Let :math:`\gamma` define the desired ratio between the data misfit and model - objective functions at the initial beta iteration (defined by the 'beta0_ratio' input argument). - Using the power iteration approach, our initial trade-off parameter is given by: - - .. math:: - \beta_0 = \gamma \frac{\lambda_d}{\lambda_m} - - where :math:`\lambda_d` as the largest eigenvalue of the Hessian of the data misfit, and - :math:`\lambda_m` as the largest eigenvalue of the Hessian of the model objective function. - For each Hessian, the largest eigenvalue is computed using power iteration. The input - parameter 'n_pw_iter' sets the number of power iterations used in the estimate. - - For a description of the power iteration approach for estimating the larges eigenvalue, - see :func:`simpeg.utils.eigenvalue_by_power_iteration`. - - """ - - def __init__( - self, - beta0_ratio=1.0, - n_pw_iter=4, - random_seed: RandomSeed | None = None, - seed: RandomSeed | None = None, - **kwargs, - ): - super().__init__( - beta0_ratio=beta0_ratio, random_seed=random_seed, seed=seed, **kwargs - ) - - self.n_pw_iter = n_pw_iter - - @property - def n_pw_iter(self): - """Number of power iterations for estimating largest eigenvalues. - - Returns - ------- - int - Number of power iterations for estimating largest eigenvalues. - """ - return self._n_pw_iter - - @n_pw_iter.setter - def n_pw_iter(self, value): - self._n_pw_iter = validate_integer("n_pw_iter", value, min_val=1) - - def initialize(self): - rng = np.random.default_rng(seed=self.random_seed) - - if self.verbose: - print("Calculating the beta0 parameter.") - - m = self.invProb.model - - dm_eigenvalue = eigenvalue_by_power_iteration( - self.dmisfit, - m, - n_pw_iter=self.n_pw_iter, - random_seed=rng, - ) - reg_eigenvalue = eigenvalue_by_power_iteration( - self.reg, - m, - n_pw_iter=self.n_pw_iter, - random_seed=rng, - ) - - self.ratio = np.asarray(dm_eigenvalue / reg_eigenvalue) - self.beta0 = self.beta0_ratio * self.ratio - self.invProb.beta = self.beta0 - - -class BetaSchedule(InversionDirective): - """Reduce trade-off parameter (beta) at successive iterations using a cooling schedule. - - Updates the **beta** property in the associated :class:`simpeg.inverse_problem.BaseInvProblem` - while the inversion is running. - For linear least-squares problems, the optimization problem can be solved in a - single step and the cooling rate can be set to *1*. For non-linear optimization - problems, multiple steps are required obtain the minimizer for a fixed trade-off - parameter. In this case, the cooling rate should be larger than 1. - - Parameters - ---------- - coolingFactor : float - The factor by which the trade-off parameter is decreased when updated. - The preexisting value of the trade-off parameter is divided by the cooling factor. - coolingRate : int - Sets the number of successive iterations before the trade-off parameter is reduced. - Use *1* for linear least-squares optimization problems. Use *2* for weakly non-linear - optimization problems. Use *3* for general non-linear optimization problems. - - """ - - def __init__(self, coolingFactor=8.0, coolingRate=3, **kwargs): - super().__init__(**kwargs) - self.coolingFactor = coolingFactor - self.coolingRate = coolingRate - - @property - def coolingFactor(self): - """Beta is divided by this value every `coolingRate` iterations. - - Returns - ------- - float - """ - return self._coolingFactor - - @coolingFactor.setter - def coolingFactor(self, value): - self._coolingFactor = validate_float( - "coolingFactor", value, min_val=0.0, inclusive_min=False - ) - - @property - def coolingRate(self): - """Cool after this number of iterations. - - Returns - ------- - int - """ - return self._coolingRate - - @coolingRate.setter - def coolingRate(self, value): - self._coolingRate = validate_integer("coolingRate", value, min_val=1) - - def endIter(self): - if self.opt.iter > 0 and self.opt.iter % self.coolingRate == 0: - if self.verbose: - print( - "BetaSchedule is cooling Beta. Iteration: {0:d}".format( - self.opt.iter - ) - ) - self.invProb.beta /= self.coolingFactor - - -class AlphasSmoothEstimate_ByEig(InversionDirective): - """ - Estimate the alphas multipliers for the smoothness terms of the regularization - as a multiple of the ratio between the highest eigenvalue of the - smallness term and the highest eigenvalue of each smoothness term of the regularization. - The highest eigenvalue are estimated through power iterations and Rayleigh quotient. - """ - - def __init__( - self, - alpha0_ratio=1.0, - n_pw_iter=4, - random_seed: RandomSeed | None = None, - seed: RandomSeed | None = None, - **kwargs, - ): - super().__init__(**kwargs) - self.alpha0_ratio = alpha0_ratio - self.n_pw_iter = n_pw_iter - - # Deprecate seed argument - if seed is not None: - if random_seed is not None: - raise TypeError( - "Cannot pass both 'random_seed' and 'seed'." - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - ) - warnings.warn( - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - FutureWarning, - stacklevel=2, - ) - random_seed = seed - self.random_seed = random_seed - - @property - def alpha0_ratio(self): - """the estimated Alpha_smooth is multiplied by this ratio (int or array). - - Returns - ------- - numpy.ndarray - """ - return self._alpha0_ratio - - @alpha0_ratio.setter - def alpha0_ratio(self, value): - self._alpha0_ratio = validate_ndarray_with_shape( - "alpha0_ratio", value, shape=("*",) - ) - - @property - def n_pw_iter(self): - """Number of power iterations for estimation. - - Returns - ------- - int - """ - return self._n_pw_iter - - @n_pw_iter.setter - def n_pw_iter(self, value): - self._n_pw_iter = validate_integer("n_pw_iter", value, min_val=1) - - @property - def random_seed(self): - """Random seed to initialize with. - - Returns - ------- - int, numpy.random.Generator or None - """ - return self._random_seed - - @random_seed.setter - def random_seed(self, value): - try: - np.random.default_rng(value) - except TypeError as err: - msg = ( - "Unable to initialize the random number generator with " - f"a {type(value).__name__}" - ) - raise TypeError(msg) from err - self._random_seed = value - - seed = deprecate_property( - random_seed, - "seed", - "random_seed", - removal_version="0.24.0", - future_warn=True, - error=False, - ) - - def initialize(self): - """""" - rng = np.random.default_rng(seed=self.random_seed) - - smoothness = [] - smallness = [] - parents = {} - for regobjcts in self.reg.objfcts: - if isinstance(regobjcts, ComboObjectiveFunction): - objfcts = regobjcts.objfcts - else: - objfcts = [regobjcts] - - for obj in objfcts: - if isinstance( - obj, - ( - Smallness, - SparseSmallness, - PGIsmallness, - ), - ): - smallness += [obj] - - elif isinstance(obj, (SmoothnessFirstOrder, SparseSmoothness)): - parents[obj] = regobjcts - smoothness += [obj] - - if len(smallness) == 0: - raise UserWarning( - "Directive 'AlphasSmoothEstimate_ByEig' requires a regularization with at least one Small instance." - ) - - smallness_eigenvalue = eigenvalue_by_power_iteration( - smallness[0], - self.invProb.model, - n_pw_iter=self.n_pw_iter, - random_seed=rng, - ) - - self.alpha0_ratio = self.alpha0_ratio * np.ones(len(smoothness)) - - if len(self.alpha0_ratio) != len(smoothness): - raise ValueError( - f"Input values for 'alpha0_ratio' should be of len({len(smoothness)}). Provided {self.alpha0_ratio}" - ) - - alphas = [] - for user_alpha, obj in zip(self.alpha0_ratio, smoothness): - smooth_i_eigenvalue = eigenvalue_by_power_iteration( - obj, - self.invProb.model, - n_pw_iter=self.n_pw_iter, - random_seed=rng, - ) - ratio = smallness_eigenvalue / smooth_i_eigenvalue - - mtype = obj._multiplier_pair - - new_alpha = getattr(parents[obj], mtype) * user_alpha * ratio - setattr(parents[obj], mtype, new_alpha) - alphas += [new_alpha] - - if self.verbose: - print(f"Alpha scales: {alphas}") - - -class ScalingMultipleDataMisfits_ByEig(InversionDirective): - """ - For multiple data misfits only: multiply each data misfit term - by the inverse of its highest eigenvalue and then - normalize the sum of the data misfit multipliers to one. - The highest eigenvalue are estimated through power iterations and Rayleigh quotient. - """ - - def __init__( - self, - chi0_ratio=None, - n_pw_iter=4, - random_seed: RandomSeed | None = None, - seed: RandomSeed | None = None, - **kwargs, - ): - super().__init__(**kwargs) - self.chi0_ratio = chi0_ratio - self.n_pw_iter = n_pw_iter - - # Deprecate seed argument - if seed is not None: - if random_seed is not None: - raise TypeError( - "Cannot pass both 'random_seed' and 'seed'." - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - ) - warnings.warn( - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - FutureWarning, - stacklevel=2, - ) - random_seed = seed - self.random_seed = random_seed - - @property - def chi0_ratio(self): - """the estimated Alpha_smooth is multiplied by this ratio (int or array) - - Returns - ------- - numpy.ndarray - """ - return self._chi0_ratio - - @chi0_ratio.setter - def chi0_ratio(self, value): - if value is not None: - value = validate_ndarray_with_shape("chi0_ratio", value, shape=("*",)) - self._chi0_ratio = value - - @property - def n_pw_iter(self): - """Number of power iterations for estimation. - - Returns - ------- - int - """ - return self._n_pw_iter - - @n_pw_iter.setter - def n_pw_iter(self, value): - self._n_pw_iter = validate_integer("n_pw_iter", value, min_val=1) - - @property - def random_seed(self): - """Random seed to initialize with - - Returns - ------- - int, numpy.random.Generator or None - """ - return self._random_seed - - @random_seed.setter - def random_seed(self, value): - try: - np.random.default_rng(value) - except TypeError as err: - msg = ( - "Unable to initialize the random number generator with " - f"a {type(value).__name__}" - ) - raise TypeError(msg) from err - self._random_seed = value - - seed = deprecate_property( - random_seed, - "seed", - "random_seed", - removal_version="0.24.0", - future_warn=True, - error=False, - ) - - def initialize(self): - """""" - rng = np.random.default_rng(seed=self.random_seed) - - if self.verbose: - print("Calculating the scaling parameter.") - - if ( - getattr(self.dmisfit, "objfcts", None) is None - or len(self.dmisfit.objfcts) == 1 - ): - raise TypeError( - "ScalingMultipleDataMisfits_ByEig only applies to joint inversion" - ) - - ndm = len(self.dmisfit.objfcts) - if self.chi0_ratio is not None: - self.chi0_ratio = self.chi0_ratio * np.ones(ndm) - else: - self.chi0_ratio = self.dmisfit.multipliers - - m = self.invProb.model - - dm_eigenvalue_list = [] - for dm in self.dmisfit.objfcts: - dm_eigenvalue_list += [ - eigenvalue_by_power_iteration(dm, m, random_seed=rng) - ] - - self.chi0 = self.chi0_ratio / np.r_[dm_eigenvalue_list] - self.chi0 = self.chi0 / np.sum(self.chi0) - self.dmisfit.multipliers = self.chi0 - - if self.verbose: - print("Scale Multipliers: ", self.dmisfit.multipliers) - - -class JointScalingSchedule(InversionDirective): - """ - For multiple data misfits only: rebalance each data misfit term - during the inversion when some datasets are fit, and others not - using the ratios of current misfits and their respective target. - It implements the strategy described in https://doi.org/10.1093/gji/ggaa378. - """ - - def __init__( - self, warmingFactor=1.0, chimax=1e10, chimin=1e-10, update_rate=1, **kwargs - ): - super().__init__(**kwargs) - self.mode = 1 - self.warmingFactor = warmingFactor - self.chimax = chimax - self.chimin = chimin - self.update_rate = update_rate - - @property - def mode(self): - """The type of update to perform. - - Returns - ------- - {1, 2} - """ - return self._mode - - @mode.setter - def mode(self, value): - self._mode = validate_integer("mode", value, min_val=1, max_val=2) - - @property - def warmingFactor(self): - """Factor to adjust scaling of the data misfits by. - - Returns - ------- - float - """ - return self._warmingFactor - - @warmingFactor.setter - def warmingFactor(self, value): - self._warmingFactor = validate_float( - "warmingFactor", value, min_val=0.0, inclusive_min=False - ) - - @property - def chimax(self): - """Maximum chi factor. - - Returns - ------- - float - """ - return self._chimax - - @chimax.setter - def chimax(self, value): - self._chimax = validate_float("chimax", value, min_val=0.0, inclusive_min=False) - - @property - def chimin(self): - """Minimum chi factor. - - Returns - ------- - float - """ - return self._chimin - - @chimin.setter - def chimin(self, value): - self._chimin = validate_float("chimin", value, min_val=0.0, inclusive_min=False) - - @property - def update_rate(self): - """Will update the data misfit scalings after this many iterations. - - Returns - ------- - int - """ - return self._update_rate - - @update_rate.setter - def update_rate(self, value): - self._update_rate = validate_integer("update_rate", value, min_val=1) - - def initialize(self): - if ( - getattr(self.dmisfit, "objfcts", None) is None - or len(self.dmisfit.objfcts) == 1 - ): - raise TypeError("JointScalingSchedule only applies to joint inversion") - - targetclass = np.r_[ - [ - isinstance(dirpart, MultiTargetMisfits) - for dirpart in self.inversion.directiveList.dList - ] - ] - if ~np.any(targetclass): - self.DMtarget = None - else: - self.targetclass = np.where(targetclass)[0][-1] - self.DMtarget = self.inversion.directiveList.dList[ - self.targetclass - ].DMtarget - - if self.verbose: - print("Initial data misfit scales: ", self.dmisfit.multipliers) - - def endIter(self): - self.dmlist = self.inversion.directiveList.dList[self.targetclass].dmlist - - if np.any(self.dmlist < self.DMtarget): - self.mode = 2 - else: - self.mode = 1 - - if self.opt.iter > 0 and self.opt.iter % self.update_rate == 0: - if self.mode == 2: - if np.all(np.r_[self.dmisfit.multipliers] > self.chimin) and np.all( - np.r_[self.dmisfit.multipliers] < self.chimax - ): - indx = self.dmlist > self.DMtarget - if np.any(indx): - multipliers = self.warmingFactor * np.median( - self.DMtarget[~indx] / self.dmlist[~indx] - ) - if np.sum(indx) == 1: - indx = np.where(indx)[0][0] - self.dmisfit.multipliers[indx] *= multipliers - self.dmisfit.multipliers /= np.sum(self.dmisfit.multipliers) - - if self.verbose: - print("Updating scaling for data misfits by ", multipliers) - print("New scales:", self.dmisfit.multipliers) - - -class TargetMisfit(InversionDirective): - """ - ... note:: Currently this target misfit is not set up for joint inversion. - Check out MultiTargetMisfits - """ - - def __init__(self, target=None, phi_d_star=None, chifact=1.0, **kwargs): - super().__init__(**kwargs) - self.chifact = chifact - self.phi_d_star = phi_d_star - if phi_d_star is not None and target is not None: - raise AttributeError("Attempted to set both target and phi_d_star.") - if target is not None: - self.target = target - - @property - def target(self): - """The target value for the data misfit - - Returns - ------- - float - """ - if getattr(self, "_target", None) is None: - self._target = self.chifact * self.phi_d_star - return self._target - - @target.setter - def target(self, val): - self._target = validate_float("target", val, min_val=0.0, inclusive_min=False) - - @property - def chifact(self): - """The a multiplier for the target data misfit value. - - The target value is `chifact` times `phi_d_star` - - Returns - ------- - float - """ - return self._chifact - - @chifact.setter - def chifact(self, value): - self._chifact = validate_float( - "chifact", value, min_val=0.0, inclusive_min=False - ) - self._target = None - - @property - def phi_d_star(self): - """The target phi_d value for the data misfit. - - The target value is `chifact` times `phi_d_star` - - Returns - ------- - float - """ - # phid = ||dpred - dobs||^2 - if self._phi_d_star is None: - nD = 0 - for survey in self.survey: - nD += survey.nD - self._phi_d_star = nD - return self._phi_d_star - - @phi_d_star.setter - def phi_d_star(self, value): - # phid = ||dpred - dobs||^2 - if value is not None: - value = validate_float( - "phi_d_star", value, min_val=0.0, inclusive_min=False - ) - self._phi_d_star = value - self._target = None - - def endIter(self): - if self.invProb.phi_d < self.target: - self.opt.stopNextIteration = True - self.print_final_misfit() - - def print_final_misfit(self): - if self.opt.print_type == "ubc": - self.opt.print_target = ( - ">> Target misfit: %.1f (# of data) is achieved" - ) % (self.target) - - -class MultiTargetMisfits(InversionDirective): - def __init__( - self, - WeightsInTarget=False, - chifact=1.0, - phi_d_star=None, - TriggerSmall=True, - chiSmall=1.0, - phi_ms_star=None, - TriggerTheta=False, - ToleranceTheta=1.0, - distance_norm=np.inf, - **kwargs, - ): - super().__init__(**kwargs) - - self.WeightsInTarget = WeightsInTarget - # Chi factor for Geophsyical Data Misfit - self.chifact = chifact - self.phi_d_star = phi_d_star - - # Chifact for Clustering/Smallness - self.TriggerSmall = TriggerSmall - self.chiSmall = chiSmall - self.phi_ms_star = phi_ms_star - - # Tolerance for parameters difference with their priors - self.TriggerTheta = TriggerTheta # deactivated by default - self.ToleranceTheta = ToleranceTheta - self.distance_norm = distance_norm - - self._DM = False - self._CL = False - self._DP = False - - @property - def WeightsInTarget(self): - """Whether to account for weights in the petrophysical misfit. - - Returns - ------- - bool - """ - return self._WeightsInTarget - - @WeightsInTarget.setter - def WeightsInTarget(self, value): - self._WeightsInTarget = validate_type("WeightsInTarget", value, bool) - - @property - def chifact(self): - """The a multiplier for the target Geophysical data misfit value. - - The target value is `chifact` times `phi_d_star` - - Returns - ------- - numpy.ndarray - """ - return self._chifact - - @chifact.setter - def chifact(self, value): - self._chifact = validate_ndarray_with_shape("chifact", value, shape=("*",)) - self._DMtarget = None - - @property - def phi_d_star(self): - """The target phi_d value for the Geophysical data misfit. - - The target value is `chifact` times `phi_d_star` - - Returns - ------- - float - """ - # phid = || dpred - dobs||^2 - if getattr(self, "_phi_d_star", None) is None: - # Check if it is a ComboObjective - if isinstance(self.dmisfit, ComboObjectiveFunction): - value = np.r_[[survey.nD for survey in self.survey]] - else: - value = np.r_[[self.survey.nD]] - self._phi_d_star = value - self._DMtarget = None - - return self._phi_d_star - - @phi_d_star.setter - def phi_d_star(self, value): - # phid =|| dpred - dobs||^2 - if value is not None: - value = validate_ndarray_with_shape("phi_d_star", value, shape=("*",)) - self._phi_d_star = value - self._DMtarget = None - - @property - def chiSmall(self): - """The a multiplier for the target petrophysical misfit value. - - The target value is `chiSmall` times `phi_ms_star` - - Returns - ------- - float - """ - return self._chiSmall - - @chiSmall.setter - def chiSmall(self, value): - self._chiSmall = validate_float("chiSmall", value) - self._CLtarget = None - - @property - def phi_ms_star(self): - """The target value for the petrophysical data misfit. - - The target value is `chiSmall` times `phi_ms_star` - - Returns - ------- - float - """ - return self._phi_ms_star - - @phi_ms_star.setter - def phi_ms_star(self, value): - if value is not None: - value = validate_float("phi_ms_star", value) - self._phi_ms_star = value - self._CLtarget = None - - @property - def TriggerSmall(self): - """Whether to trigger the smallness misfit test. - - Returns - ------- - bool - """ - return self._TriggerSmall - - @TriggerSmall.setter - def TriggerSmall(self, value): - self._TriggerSmall = validate_type("TriggerSmall", value, bool) - - @property - def TriggerTheta(self): - """Whether to trigger the GMM misfit test. - - Returns - ------- - bool - """ - return self._TriggerTheta - - @TriggerTheta.setter - def TriggerTheta(self, value): - self._TriggerTheta = validate_type("TriggerTheta", value, bool) - - @property - def ToleranceTheta(self): - """Target value for the GMM misfit. - - Returns - ------- - float - """ - return self._ToleranceTheta - - @ToleranceTheta.setter - def ToleranceTheta(self, value): - self._ToleranceTheta = validate_float("ToleranceTheta", value, min_val=0.0) - - @property - def distance_norm(self): - """Distance norm to use for GMM misfit measure. - - Returns - ------- - float - """ - return self._distance_norm - - @distance_norm.setter - def distance_norm(self, value): - self._distance_norm = validate_float("distance_norm", value, min_val=0.0) - - def initialize(self): - self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]] - - if getattr(self.invProb.reg.objfcts[0], "objfcts", None) is not None: - smallness = np.r_[ - [ - ( - np.r_[ - i, - j, - isinstance(regpart, PGIsmallness), - ] - ) - for i, regobjcts in enumerate(self.invProb.reg.objfcts) - for j, regpart in enumerate(regobjcts.objfcts) - ] - ] - if smallness[smallness[:, 2] == 1][:, :2].size == 0: - warnings.warn( - "There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)", - stacklevel=2, - ) - self.smallness = -1 - self.pgi_smallness = None - - else: - self.smallness = smallness[smallness[:, 2] == 1][:, :2][0] - self.pgi_smallness = self.invProb.reg.objfcts[ - self.smallness[0] - ].objfcts[self.smallness[1]] - - if self.verbose: - print( - type( - self.invProb.reg.objfcts[self.smallness[0]].objfcts[ - self.smallness[1] - ] - ) - ) - - self._regmode = 1 - - else: - smallness = np.r_[ - [ - ( - np.r_[ - j, - isinstance(regpart, PGIsmallness), - ] - ) - for j, regpart in enumerate(self.invProb.reg.objfcts) - ] - ] - if smallness[smallness[:, 1] == 1][:, :1].size == 0: - if self.TriggerSmall: - warnings.warn( - "There is no PGI regularization. Smallness target is turned off (TriggerSmall flag).", - stacklevel=2, - ) - self.TriggerSmall = False - self.smallness = -1 - else: - self.smallness = smallness[smallness[:, 1] == 1][:, :1][0] - self.pgi_smallness = self.invProb.reg.objfcts[self.smallness[0]] - - if self.verbose: - print(type(self.invProb.reg.objfcts[self.smallness[0]])) - - self._regmode = 2 - - @property - def DM(self): - """Whether the geophysical data misfit target was satisfied. - - Returns - ------- - bool - """ - return self._DM - - @property - def CL(self): - """Whether the petrophysical misfit target was satisified. - - Returns - ------- - bool - """ - return self._CL - - @property - def DP(self): - """Whether the GMM misfit was below the threshold. - - Returns - ------- - bool - """ - return self._DP - - @property - def AllStop(self): - """Whether all target misfit values have been met. - - Returns - ------- - bool - """ - - return self.DM and self.CL and self.DP - - @property - def DMtarget(self): - if getattr(self, "_DMtarget", None) is None: - self._DMtarget = self.chifact * self.phi_d_star - return self._DMtarget - - @DMtarget.setter - def DMtarget(self, val): - self._DMtarget = val - - @property - def CLtarget(self): - if not getattr(self.pgi_smallness, "approx_eval", True): - # if nonlinear prior, compute targer numerically at each GMM update - samples, _ = self.pgi_smallness.gmm.sample( - len(self.pgi_smallness.gmm.cell_volumes) - ) - self.phi_ms_star = self.pgi_smallness( - mkvc(samples), externalW=self.WeightsInTarget - ) - - self._CLtarget = self.chiSmall * self.phi_ms_star - - elif getattr(self, "_CLtarget", None) is None: - # phid = ||dpred - dobs||^2 - if self.phi_ms_star is None: - # Expected value is number of active cells * number of physical - # properties - self.phi_ms_star = len(self.invProb.model) - - self._CLtarget = self.chiSmall * self.phi_ms_star - - return self._CLtarget - - @property - def CLnormalizedConstant(self): - if ~self.WeightsInTarget: - return 1.0 - elif np.any(self.smallness == -1): - return np.sum( - sp.csr_matrix.diagonal(self.invProb.reg.objfcts[0].W) ** 2.0 - ) / len(self.invProb.model) - else: - return np.sum(sp.csr_matrix.diagonal(self.pgi_smallness.W) ** 2.0) / len( - self.invProb.model - ) - - @CLtarget.setter - def CLtarget(self, val): - self._CLtarget = val - - def phims(self): - if np.any(self.smallness == -1): - return self.invProb.reg.objfcts[0](self.invProb.model) - else: - return ( - self.pgi_smallness( - self.invProb.model, external_weights=self.WeightsInTarget - ) - / self.CLnormalizedConstant - ) - - def ThetaTarget(self): - maxdiff = 0.0 - - for i in range(self.invProb.reg.gmm.n_components): - meandiff = np.linalg.norm( - (self.invProb.reg.gmm.means_[i] - self.invProb.reg.gmmref.means_[i]) - / self.invProb.reg.gmmref.means_[i], - ord=self.distance_norm, - ) - maxdiff = np.maximum(maxdiff, meandiff) - - if ( - self.invProb.reg.gmm.covariance_type == "full" - or self.invProb.reg.gmm.covariance_type == "spherical" - ): - covdiff = np.linalg.norm( - ( - self.invProb.reg.gmm.covariances_[i] - - self.invProb.reg.gmmref.covariances_[i] - ) - / self.invProb.reg.gmmref.covariances_[i], - ord=self.distance_norm, - ) - else: - covdiff = np.linalg.norm( - ( - self.invProb.reg.gmm.covariances_ - - self.invProb.reg.gmmref.covariances_ - ) - / self.invProb.reg.gmmref.covariances_, - ord=self.distance_norm, - ) - maxdiff = np.maximum(maxdiff, covdiff) - - pidiff = np.linalg.norm( - [ - ( - self.invProb.reg.gmm.weights_[i] - - self.invProb.reg.gmmref.weights_[i] - ) - / self.invProb.reg.gmmref.weights_[i] - ], - ord=self.distance_norm, - ) - maxdiff = np.maximum(maxdiff, pidiff) - - return maxdiff - - def endIter(self): - self._DM = False - self._CL = True - self._DP = True - self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]] - self.targetlist = np.r_[ - [dm < tgt for dm, tgt in zip(self.dmlist, self.DMtarget)] - ] - - if np.all(self.targetlist): - self._DM = True - - if self.TriggerSmall and np.any(self.smallness != -1): - if self.phims() > self.CLtarget: - self._CL = False - - if self.TriggerTheta: - if self.ThetaTarget() > self.ToleranceTheta: - self._DP = False - - if self.verbose: - message = "geophys. misfits: " + "; ".join( - map( - str, - [ - "{0} (target {1} [{2}])".format(val, tgt, cond) - for val, tgt, cond in zip( - np.round(self.dmlist, 1), - np.round(self.DMtarget, 1), - self.targetlist, - ) - ], - ) - ) - if self.TriggerSmall: - message += ( - " | smallness misfit: {0:.1f} (target: {1:.1f} [{2}])".format( - self.phims(), self.CLtarget, self.CL - ) - ) - if self.TriggerTheta: - message += " | GMM parameters within tolerance: {}".format(self.DP) - print(message) - - if self.AllStop: - self.opt.stopNextIteration = True - if self.verbose: - print("All targets have been reached") - - -class SaveEveryIteration(InversionDirective): - """SaveEveryIteration - - This directive saves an array at each iteration. The default - directory is the current directory and the models are saved as - ``InversionModel-YYYY-MM-DD-HH-MM-iter.npy`` - """ - - def __init__(self, directory=".", name="InversionModel", **kwargs): - super().__init__(**kwargs) - self.directory = directory - self.name = name - - @property - def directory(self): - """Directory to save results in. - - Returns - ------- - str - """ - return self._directory - - @directory.setter - def directory(self, value): - value = validate_string("directory", value) - fullpath = os.path.abspath(os.path.expanduser(value)) - - if not os.path.isdir(fullpath): - os.mkdir(fullpath) - self._directory = value - - @property - def name(self): - """Root of the filename to be saved. - - Returns - ------- - str - """ - return self._name - - @name.setter - def name(self, value): - self._name = validate_string("name", value) - - @property - def fileName(self): - if getattr(self, "_fileName", None) is None: - self._fileName = "{0!s}-{1!s}".format( - self.name, datetime.now().strftime("%Y-%m-%d-%H-%M") - ) - return self._fileName - - -class SaveModelEveryIteration(SaveEveryIteration): - """SaveModelEveryIteration - - This directive saves the model as a numpy array at each iteration. The - default directory is the current directoy and the models are saved as - ``InversionModel-YYYY-MM-DD-HH-MM-iter.npy`` - """ - - def initialize(self): - print( - "simpeg.SaveModelEveryIteration will save your models as: " - "'{0!s}###-{1!s}.npy'".format(self.directory + os.path.sep, self.fileName) - ) - - def endIter(self): - np.save( - "{0!s}{1:03d}-{2!s}".format( - self.directory + os.path.sep, self.opt.iter, self.fileName - ), - self.opt.xc, - ) - - -class SaveOutputEveryIteration(SaveEveryIteration): - """SaveOutputEveryIteration""" - - def __init__(self, save_txt=True, **kwargs): - super().__init__(**kwargs) - - self.save_txt = save_txt - - @property - def save_txt(self): - """Whether to save the output as a text file. - - Returns - ------- - bool - """ - return self._save_txt - - @save_txt.setter - def save_txt(self, value): - self._save_txt = validate_type("save_txt", value, bool) - - def initialize(self): - if self.save_txt is True: - print( - "simpeg.SaveOutputEveryIteration will save your inversion " - "progress as: '###-{0!s}.txt'".format(self.fileName) - ) - f = open(self.fileName + ".txt", "w") - header = " # beta phi_d phi_m phi_m_small phi_m_smoomth_x phi_m_smoomth_y phi_m_smoomth_z phi\n" - f.write(header) - f.close() - - # Create a list of each - - self.beta = [] - self.phi_d = [] - self.phi_m = [] - self.phi_m_small = [] - self.phi_m_smooth_x = [] - self.phi_m_smooth_y = [] - self.phi_m_smooth_z = [] - self.phi = [] - - def endIter(self): - phi_s, phi_x, phi_y, phi_z = 0, 0, 0, 0 - - for reg in self.reg.objfcts: - if isinstance(reg, Sparse): - i_s, i_x, i_y, i_z = 0, 1, 2, 3 - else: - i_s, i_x, i_y, i_z = 0, 1, 3, 5 - if getattr(reg, "alpha_s", None): - phi_s += reg.objfcts[i_s](self.invProb.model) * reg.alpha_s - if getattr(reg, "alpha_x", None): - phi_x += reg.objfcts[i_x](self.invProb.model) * reg.alpha_x - - if reg.regularization_mesh.dim > 1 and getattr(reg, "alpha_y", None): - phi_y += reg.objfcts[i_y](self.invProb.model) * reg.alpha_y - if reg.regularization_mesh.dim > 2 and getattr(reg, "alpha_z", None): - phi_z += reg.objfcts[i_z](self.invProb.model) * reg.alpha_z - - self.beta.append(self.invProb.beta) - self.phi_d.append(self.invProb.phi_d) - self.phi_m.append(self.invProb.phi_m) - self.phi_m_small.append(phi_s) - self.phi_m_smooth_x.append(phi_x) - self.phi_m_smooth_y.append(phi_y) - self.phi_m_smooth_z.append(phi_z) - self.phi.append(self.opt.f) - - if self.save_txt: - f = open(self.fileName + ".txt", "a") - f.write( - " {0:3d} {1:1.4e} {2:1.4e} {3:1.4e} {4:1.4e} {5:1.4e} " - "{6:1.4e} {7:1.4e} {8:1.4e}\n".format( - self.opt.iter, - self.beta[self.opt.iter - 1], - self.phi_d[self.opt.iter - 1], - self.phi_m[self.opt.iter - 1], - self.phi_m_small[self.opt.iter - 1], - self.phi_m_smooth_x[self.opt.iter - 1], - self.phi_m_smooth_y[self.opt.iter - 1], - self.phi_m_smooth_z[self.opt.iter - 1], - self.phi[self.opt.iter - 1], - ) - ) - f.close() - - def load_results(self): - results = np.loadtxt(self.fileName + str(".txt"), comments="#") - self.beta = results[:, 1] - self.phi_d = results[:, 2] - self.phi_m = results[:, 3] - self.phi_m_small = results[:, 4] - self.phi_m_smooth_x = results[:, 5] - self.phi_m_smooth_y = results[:, 6] - self.phi_m_smooth_z = results[:, 7] - - self.phi_m_smooth = ( - self.phi_m_smooth_x + self.phi_m_smooth_y + self.phi_m_smooth_z - ) - - self.f = results[:, 7] - - self.target_misfit = self.invProb.dmisfit.simulation.survey.nD - self.i_target = None - - if self.invProb.phi_d < self.target_misfit: - i_target = 0 - while self.phi_d[i_target] > self.target_misfit: - i_target += 1 - self.i_target = i_target - - def plot_misfit_curves( - self, - fname=None, - dpi=300, - plot_small_smooth=False, - plot_phi_m=True, - plot_small=False, - plot_smooth=False, - ): - self.target_misfit = np.sum([dmis.nD for dmis in self.invProb.dmisfit.objfcts]) - self.i_target = None - - if self.invProb.phi_d < self.target_misfit: - i_target = 0 - while self.phi_d[i_target] > self.target_misfit: - i_target += 1 - self.i_target = i_target - - fig = plt.figure(figsize=(5, 2)) - ax = plt.subplot(111) - ax_1 = ax.twinx() - ax.semilogy( - np.arange(len(self.phi_d)), self.phi_d, "k-", lw=2, label=r"$\phi_d$" - ) - - if plot_phi_m: - ax_1.semilogy( - np.arange(len(self.phi_d)), self.phi_m, "r", lw=2, label=r"$\phi_m$" - ) - - if plot_small_smooth or plot_small: - ax_1.semilogy( - np.arange(len(self.phi_d)), self.phi_m_small, "ro", label="small" - ) - if plot_small_smooth or plot_smooth: - ax_1.semilogy( - np.arange(len(self.phi_d)), self.phi_m_smooth_x, "rx", label="smooth_x" - ) - ax_1.semilogy( - np.arange(len(self.phi_d)), self.phi_m_smooth_y, "rx", label="smooth_y" - ) - ax_1.semilogy( - np.arange(len(self.phi_d)), self.phi_m_smooth_z, "rx", label="smooth_z" - ) - - ax.legend(loc=1) - ax_1.legend(loc=2) - - ax.plot( - np.r_[ax.get_xlim()[0], ax.get_xlim()[1]], - np.ones(2) * self.target_misfit, - "k:", - ) - ax.set_xlabel("Iteration") - ax.set_ylabel(r"$\phi_d$") - ax_1.set_ylabel(r"$\phi_m$", color="r") - ax_1.tick_params(axis="y", which="both", colors="red") - - plt.show() - if fname is not None: - fig.savefig(fname, dpi=dpi) - - def plot_tikhonov_curves(self, fname=None, dpi=200): - self.target_misfit = self.invProb.dmisfit.simulation.survey.nD - self.i_target = None - - if self.invProb.phi_d < self.target_misfit: - i_target = 0 - while self.phi_d[i_target] > self.target_misfit: - i_target += 1 - self.i_target = i_target - - fig = plt.figure(figsize=(5, 8)) - ax1 = plt.subplot(311) - ax2 = plt.subplot(312) - ax3 = plt.subplot(313) - - ax1.plot(self.beta, self.phi_d, "k-", lw=2, ms=4) - ax1.set_xlim(np.hstack(self.beta).min(), np.hstack(self.beta).max()) - ax1.set_xlabel(r"$\beta$", fontsize=14) - ax1.set_ylabel(r"$\phi_d$", fontsize=14) - - ax2.plot(self.beta, self.phi_m, "k-", lw=2) - ax2.set_xlim(np.hstack(self.beta).min(), np.hstack(self.beta).max()) - ax2.set_xlabel(r"$\beta$", fontsize=14) - ax2.set_ylabel(r"$\phi_m$", fontsize=14) - - ax3.plot(self.phi_m, self.phi_d, "k-", lw=2) - ax3.set_xlim(np.hstack(self.phi_m).min(), np.hstack(self.phi_m).max()) - ax3.set_xlabel(r"$\phi_m$", fontsize=14) - ax3.set_ylabel(r"$\phi_d$", fontsize=14) - - if self.i_target is not None: - ax1.plot(self.beta[self.i_target], self.phi_d[self.i_target], "k*", ms=10) - ax2.plot(self.beta[self.i_target], self.phi_m[self.i_target], "k*", ms=10) - ax3.plot(self.phi_m[self.i_target], self.phi_d[self.i_target], "k*", ms=10) - - for ax in [ax1, ax2, ax3]: - ax.set_xscale("linear") - ax.set_yscale("linear") - plt.tight_layout() - plt.show() - if fname is not None: - fig.savefig(fname, dpi=dpi) - - -class SaveOutputDictEveryIteration(SaveEveryIteration): - """ - Saves inversion parameters at every iteration. - """ - - # Initialize the output dict - def __init__(self, saveOnDisk=False, **kwargs): - super().__init__(**kwargs) - self.saveOnDisk = saveOnDisk - - @property - def saveOnDisk(self): - """Whether to save the output dict to disk. - - Returns - ------- - bool - """ - return self._saveOnDisk - - @saveOnDisk.setter - def saveOnDisk(self, value): - self._saveOnDisk = validate_type("saveOnDisk", value, bool) - - def initialize(self): - self.outDict = {} - if self.saveOnDisk: - print( - "simpeg.SaveOutputDictEveryIteration will save your inversion progress as dictionary: '###-{0!s}.npz'".format( - self.fileName - ) - ) - - def endIter(self): - # regCombo = ["phi_ms", "phi_msx"] - - # if self.simulation[0].mesh.dim >= 2: - # regCombo += ["phi_msy"] - - # if self.simulation[0].mesh.dim == 3: - # regCombo += ["phi_msz"] - - # Initialize the output dict - iterDict = {} - - # Save the data. - iterDict["iter"] = self.opt.iter - iterDict["beta"] = self.invProb.beta - iterDict["phi_d"] = self.invProb.phi_d - iterDict["phi_m"] = self.invProb.phi_m - - # for label, fcts in zip(regCombo, self.reg.objfcts[0].objfcts): - # iterDict[label] = fcts(self.invProb.model) - - iterDict["f"] = self.opt.f - iterDict["m"] = self.invProb.model - iterDict["dpred"] = self.invProb.dpred - - for reg in self.reg.objfcts: - if isinstance(reg, Sparse): - for reg_part, norm in zip(reg.objfcts, reg.norms): - reg_name = f"{type(reg_part).__name__}" - if hasattr(reg_part, "orientation"): - reg_name = reg_part.orientation + " " + reg_name - iterDict[reg_name + ".irls_threshold"] = reg_part.irls_threshold - iterDict[reg_name + ".norm"] = norm - - # Save the file as a npz - if self.saveOnDisk: - np.savez("{:03d}-{:s}".format(self.opt.iter, self.fileName), iterDict) - - self.outDict[self.opt.iter] = iterDict - - -@deprecate_class(removal_version="0.24.0", error=False) -class Update_IRLS(InversionDirective): - f_old = 0 - f_min_change = 1e-2 - beta_tol = 1e-1 - beta_ratio_l2 = None - prctile = 100 - chifact_start = 1.0 - chifact_target = 1.0 - - # Solving parameter for IRLS (mode:2) - irls_iteration = 0 - minGNiter = 1 - iterStart = 0 - sphericalDomain = False - - # Beta schedule - ComboObjFun = False - mode = 1 - coolEpsOptimized = True - coolEps_p = True - coolEps_q = True - floorEps_p = 1e-8 - floorEps_q = 1e-8 - coolEpsFact = 1.2 - silent = False - fix_Jmatrix = False - - def __init__( - self, - max_irls_iterations=20, - max_beta_iterations=20, - update_beta=True, - beta_search=False, - coolingFactor=2.0, - coolingRate=1, - **kwargs, - ): - super().__init__(**kwargs) - self.max_irls_iterations = max_irls_iterations - self.max_beta_iterations = max_beta_iterations - self.update_beta = update_beta - self.beta_search = beta_search - self.coolingFactor = coolingFactor - self.coolingRate = coolingRate - - @property - def max_irls_iterations(self): - """Maximum irls iterations. - - Returns - ------- - int - """ - return self._max_irls_iterations - - @max_irls_iterations.setter - def max_irls_iterations(self, value): - self._max_irls_iterations = validate_integer( - "max_irls_iterations", value, min_val=0 - ) - - @property - def max_beta_iterations(self): - """Maximum beta iterations. - - Returns - ------- - int - """ - return self._max_beta_iterations - - @max_beta_iterations.setter - def max_beta_iterations(self, value): - self._max_beta_iterations = validate_integer( - "max_beta_iterations", value, min_val=0 - ) - - @property - def coolingFactor(self): - """Beta is divided by this value every `coolingRate` iterations. - - Returns - ------- - float - """ - return self._coolingFactor - - @coolingFactor.setter - def coolingFactor(self, value): - self._coolingFactor = validate_float( - "coolingFactor", value, min_val=0.0, inclusive_min=False - ) - - @property - def coolingRate(self): - """Cool after this number of iterations. - - Returns - ------- - int - """ - return self._coolingRate - - @coolingRate.setter - def coolingRate(self, value): - self._coolingRate = validate_integer("coolingRate", value, min_val=1) - - @property - def update_beta(self): - """Whether to update beta. - - Returns - ------- - bool - """ - return self._update_beta - - @update_beta.setter - def update_beta(self, value): - self._update_beta = validate_type("update_beta", value, bool) - - @property - def beta_search(self): - """Whether to do a beta search. - - Returns - ------- - bool - """ - return self._beta_search - - @beta_search.setter - def beta_search(self, value): - self._beta_search = validate_type("beta_search", value, bool) - - @property - def target(self): - if getattr(self, "_target", None) is None: - nD = 0 - for survey in self.survey: - nD += survey.nD - - self._target = nD * self.chifact_target - - return self._target - - @target.setter - def target(self, val): - self._target = val - - @property - def start(self): - if getattr(self, "_start", None) is None: - if isinstance(self.survey, list): - self._start = 0 - for survey in self.survey: - self._start += survey.nD * self.chifact_start - - else: - self._start = self.survey.nD * self.chifact_start - return self._start - - @start.setter - def start(self, val): - self._start = val - - def initialize(self): - if self.mode == 1: - self.norms = [] - for reg in self.reg.objfcts: - - if not isinstance(reg, Sparse): - continue - - self.norms.append(reg.norms) - reg.norms = [2.0 for obj in reg.objfcts] - reg.model = self.invProb.model - - # Update the model used by the regularization - for reg in self.reg.objfcts: - if not isinstance(reg, Sparse): - continue - - reg.model = self.invProb.model - - if self.sphericalDomain: - self.angleScale() - - def endIter(self): - if self.sphericalDomain: - self.angleScale() - - # Check if misfit is within the tolerance, otherwise scale beta - if np.all( - [ - np.abs(1.0 - self.invProb.phi_d / self.target) > self.beta_tol, - self.update_beta, - self.mode != 1, - ] - ): - ratio = self.target / self.invProb.phi_d - - if ratio > 1: - ratio = np.mean([2.0, ratio]) - else: - ratio = np.mean([0.75, ratio]) - - self.invProb.beta = self.invProb.beta * ratio - - if np.all([self.mode != 1, self.beta_search]): - print("Beta search step") - # self.update_beta = False - # Re-use previous model and continue with new beta - self.invProb.model = self.reg.objfcts[0].model - self.opt.xc = self.reg.objfcts[0].model - self.opt.iter -= 1 - return - - elif np.all([self.mode == 1, self.opt.iter % self.coolingRate == 0]): - self.invProb.beta = self.invProb.beta / self.coolingFactor - - # After reaching target misfit with l2-norm, switch to IRLS (mode:2) - if np.all([self.invProb.phi_d < self.start, self.mode == 1]): - self.start_irls() - - # Only update after GN iterations - if np.all( - [(self.opt.iter - self.iterStart) % self.minGNiter == 0, self.mode != 1] - ): - if self.stopping_criteria(): - self.opt.stopNextIteration = True - return - - # Print to screen - for reg in self.reg.objfcts: - if not isinstance(reg, Sparse): - continue - - for obj in reg.objfcts: - if isinstance(reg, (Sparse, BaseSparse)): - obj.irls_threshold = obj.irls_threshold / self.coolEpsFact - - self.irls_iteration += 1 - - # Reset the regularization matrices so that it is - # recalculated for current model. Do it to all levels of comboObj - for reg in self.reg.objfcts: - if not isinstance(reg, Sparse): - continue - - reg.update_weights(reg.model) - - self.update_beta = True - self.invProb.phi_m_last = self.reg(self.invProb.model) - - def start_irls(self): - if not self.silent: - print( - "Reached starting chifact with l2-norm regularization:" - + " Start IRLS steps..." - ) - - self.mode = 2 - - if getattr(self.opt, "iter", None) is None: - self.iterStart = 0 - else: - self.iterStart = self.opt.iter - - self.invProb.phi_m_last = self.reg(self.invProb.model) - - # Either use the supplied irls_threshold, or fix base on distribution of - # model values - for reg in self.reg.objfcts: - if not isinstance(reg, Sparse): - continue - - for obj in reg.objfcts: - threshold = np.percentile( - np.abs(obj.mapping * obj._delta_m(self.invProb.model)), self.prctile - ) - if isinstance(obj, SmoothnessFirstOrder): - threshold /= reg.regularization_mesh.base_length - - obj.irls_threshold = threshold - - # Re-assign the norms supplied by user l2 -> lp - for reg, norms in zip(self.reg.objfcts, self.norms): - if not isinstance(reg, Sparse): - - continue - reg.norms = norms - - if not self.silent: - print("irls_threshold " + str(reg.objfcts[0].irls_threshold)) - - # Save l2-model - self.invProb.l2model = self.invProb.model.copy() - - # Print to screen - for reg in self.reg.objfcts: - if not isinstance(reg, Sparse): - continue - if not self.silent: - print("irls_threshold " + str(reg.objfcts[0].irls_threshold)) - - def angleScale(self): - """ - Update the scales used by regularization for the - different block of models - """ - # Currently implemented for MVI-S only - for reg in self.reg.objfcts: - if hasattr(reg, "units") and reg.units == "amplitude": - max_amp = abs(reg.objfcts[0].f_m(self.invProb.model)).max() - - for reg in self.reg.objfcts: - if hasattr(reg, "units") and reg.units == "radian": - reg.set_weights( - angle_scale=np.ones(reg.mapping.shape[0]) * max_amp / np.pi - ) - - def validate(self, directiveList): - dList = directiveList.dList - self_ind = dList.index(self) - lin_precond_ind = [isinstance(d, UpdatePreconditioner) for d in dList] - - if any(lin_precond_ind): - assert lin_precond_ind.index(True) > self_ind, ( - "The directive 'UpdatePreconditioner' must be after Update_IRLS " - "in the directiveList" - ) - else: - warnings.warn( - "Without a Linear preconditioner, convergence may be slow. " - "Consider adding `Directives.UpdatePreconditioner` to your " - "directives list", - stacklevel=2, - ) - return True - - def stopping_criteria(self): - """ - Check for stopping criteria of max_irls_iteration or minimum change. - """ - if self.opt.iter > self.max_beta_iterations: - print("Reached max beta iterations.") - self.opt.stopNextIteration = True - return - - phim_new = 0 - for reg in self.reg.objfcts: - if isinstance(reg, (Sparse, BaseSparse)): - reg.model = self.invProb.model - phim_new += reg(reg.model) - - # Check for maximum number of IRLS cycles1 - if self.irls_iteration == self.max_irls_iterations: - if not self.silent: - print( - "Reach maximum number of IRLS cycles:" - + " {0:d}".format(self.max_irls_iterations) - ) - return True - - # Check if the function has changed enough - f_change = np.abs(self.f_old - phim_new) / (self.f_old + 1e-12) - if np.all( - [ - f_change < self.f_min_change, - self.irls_iteration > 1, - np.abs(1.0 - self.invProb.phi_d / self.target) < self.beta_tol, - ] - ): - print("Minimum decrease in regularization." + "End of IRLS") - return True - - self.f_old = phim_new - - return False - - -class UpdatePreconditioner(InversionDirective): - """ - Create a Jacobi preconditioner for the linear problem - """ - - def __init__(self, update_every_iteration=True, **kwargs): - super().__init__(**kwargs) - self.update_every_iteration = update_every_iteration - - @property - def update_every_iteration(self): - """Whether to update the preconditioner at every iteration. - - Returns - ------- - bool - """ - return self._update_every_iteration - - @update_every_iteration.setter - def update_every_iteration(self, value): - self._update_every_iteration = validate_type( - "update_every_iteration", value, bool - ) - - def initialize(self): - # Create the pre-conditioner - regDiag = np.zeros_like(self.invProb.model) - m = self.invProb.model - - for multiplier, reg in self.reg: - # Check if regularization has a projection - rdg = reg.deriv2(m) - if not isinstance(rdg, Zero): - regDiag += multiplier * rdg.diagonal() - - JtJdiag = compute_JtJdiags(self.dmisfit, self.invProb.model) - - diagA = JtJdiag + self.invProb.beta * regDiag - diagA[diagA != 0] = diagA[diagA != 0] ** -1.0 - PC = sdiag((diagA)) - - self.opt.approxHinv = PC - - def endIter(self): - # Cool the threshold parameter - if self.update_every_iteration is False: - return - - # Create the pre-conditioner - regDiag = np.zeros_like(self.invProb.model) - m = self.invProb.model - - for multiplier, reg in self.reg: - # Check if regularization has a projection - regDiag += multiplier * reg.deriv2(m).diagonal() - - JtJdiag = compute_JtJdiags(self.dmisfit, m) - - diagA = JtJdiag + self.invProb.beta * regDiag - diagA[diagA != 0] = diagA[diagA != 0] ** -1.0 - PC = sdiag((diagA)) - self.opt.approxHinv = PC - - -class Update_Wj(InversionDirective): - """ - Create approx-sensitivity base weighting using the probing method - """ - - def __init__(self, k=None, itr=None, **kwargs): - self.k = k - self.itr = itr - super().__init__(**kwargs) - - @property - def k(self): - """Number of probing cycles for the estimator. - - Returns - ------- - int - """ - return self._k - - @k.setter - def k(self, value): - if value is not None: - value = validate_integer("k", value, min_val=1) - self._k = value - - @property - def itr(self): - """Which iteration to update the sensitivity. - - Will always update if `None`. - - Returns - ------- - int or None - """ - return self._itr - - @itr.setter - def itr(self, value): - if value is not None: - value = validate_integer("itr", value, min_val=1) - self._itr = value - - def endIter(self): - if self.itr is None or self.itr == self.opt.iter: - m = self.invProb.model - if self.k is None: - self.k = int(self.survey.nD / 10) - - def JtJv(v): - Jv = self.simulation.Jvec(m, v) - - return self.simulation.Jtvec(m, Jv) - - JtJdiag = estimate_diagonal(JtJv, len(m), k=self.k) - JtJdiag = JtJdiag / max(JtJdiag) - - self.reg.wght = JtJdiag - - -class UpdateSensitivityWeights(InversionDirective): - r""" - Sensitivity weighting for linear and non-linear least-squares inverse problems. - - This directive computes the root-mean squared sensitivities for the - forward simulation(s) attached to the inverse problem, then truncates - and scales the result to create cell weights which are applied in the regularization. - The underlying theory is provided below in the `Notes` section. - - This directive **requires** that the map for the regularization function is either - class:`simpeg.maps.Wires` or class:`simpeg.maps.Identity`. In other words, the - sensitivity weighting cannot be applied for parametric inversion. In addition, - the simulation(s) connected to the inverse problem **must** have a ``getJ`` or - ``getJtJdiag`` method. - - This directive's place in the :class:`DirectivesList` **must** be - before any directives which update the preconditioner for the inverse problem - (i.e. :class:`UpdatePreconditioner`), and **must** be before any directives that - estimate the starting trade-off parameter (i.e. :class:`EstimateBeta_ByEig` - and :class:`EstimateBetaMaxDerivative`). - - Parameters - ---------- - every_iteration : bool - When ``True``, update sensitivity weighting at every model update; non-linear problems. - When ``False``, create sensitivity weights for starting model only; linear problems. - threshold : float - Threshold value for smallest weighting value. - threshold_method : {'amplitude', 'global', 'percentile'} - Threshold method for how `threshold_value` is applied: - - - amplitude: - the smallest root-mean squared sensitivity is a fractional percent of the largest value; must be between 0 and 1. - - global: - `threshold_value` is added to the cell weights prior to normalization; must be greater than 0. - - percentile: - the smallest root-mean squared sensitivity is set using percentile threshold; must be between 0 and 100. - - normalization_method : {'maximum', 'min_value', None} - Normalization method applied to sensitivity weights. - - Options are: - - - maximum: - sensitivity weights are normalized by the largest value such that the largest weight is equal to 1. - - minimum: - sensitivity weights are normalized by the smallest value, after thresholding, such that the smallest weights are equal to 1. - - ``None``: - normalization is not applied. - - Notes - ----- - Let :math:`\mathbf{J}` represent the Jacobian. To create sensitivity weights, root-mean squared (RMS) sensitivities - :math:`\mathbf{s}` are computed by summing the squares of the rows of the Jacobian: - - .. math:: - \mathbf{s} = \Bigg [ \sum_i \, \mathbf{J_{i, \centerdot }}^2 \, \Bigg ]^{1/2} - - The dynamic range of RMS sensitivities can span many orders of magnitude. When computing sensitivity - weights, thresholding is generally applied to set a minimum value. - - **Thresholding:** - - If **global** thresholding is applied, we add a constant :math:`\tau` to the RMS sensitivities: - - .. math:: - \mathbf{\tilde{s}} = \mathbf{s} + \tau - - In the case of **percentile** thresholding, we let :math:`s_{\%}` represent a given percentile. - Thresholding to set a minimum value is applied as follows: - - .. math:: - \tilde{s}_j = \begin{cases} - s_j \;\; for \;\; s_j \geq s_{\%} \\ - s_{\%} \;\; for \;\; s_j < s_{\%} - \end{cases} - - If **absolute** thresholding is applied, we define :math:`\eta` as a fractional percent. - In this case, thresholding is applied as follows: - - .. math:: - \tilde{s}_j = \begin{cases} - s_j \;\; for \;\; s_j \geq \eta s_{max} \\ - \eta s_{max} \;\; for \;\; s_j < \eta s_{max} - \end{cases} - """ - - def __init__( - self, - every_iteration=False, - threshold_value=1e-12, - threshold_method="amplitude", - normalization_method="maximum", - **kwargs, - ): - # Raise errors on deprecated arguments - if (key := "everyIter") in kwargs.keys(): - raise TypeError( - f"'{key}' property has been removed. Please use 'every_iteration'.", - ) - if (key := "threshold") in kwargs.keys(): - raise TypeError( - f"'{key}' property has been removed. Please use 'threshold_value'.", - ) - if (key := "normalization") in kwargs.keys(): - raise TypeError( - f"'{key}' property has been removed. " - "Please define normalization using 'normalization_method'.", - ) - - super().__init__(**kwargs) - - self.every_iteration = every_iteration - self.threshold_value = threshold_value - self.threshold_method = threshold_method - self.normalization_method = normalization_method - - @property - def every_iteration(self): - """Update sensitivity weights when model is updated. - - When ``True``, update sensitivity weighting at every model update; non-linear problems. - When ``False``, create sensitivity weights for starting model only; linear problems. - - Returns - ------- - bool - """ - return self._every_iteration - - @every_iteration.setter - def every_iteration(self, value): - self._every_iteration = validate_type("every_iteration", value, bool) - - everyIter = deprecate_property( - every_iteration, - "everyIter", - "every_iteration", - removal_version="0.20.0", - error=True, - ) - - @property - def threshold_value(self): - """Threshold value used to set minimum weighting value. - - The way thresholding is applied to the weighting model depends on the - `threshold_method` property. The choices for `threshold_method` are: - - - global: - `threshold_value` is added to the cell weights prior to normalization; must be greater than 0. - - percentile: - `threshold_value` is a percentile cutoff; must be between 0 and 100 - - amplitude: - `threshold_value` is the fractional percent of the largest value; must be between 0 and 1 - - - Returns - ------- - float - """ - return self._threshold_value - - @threshold_value.setter - def threshold_value(self, value): - self._threshold_value = validate_float("threshold_value", value, min_val=0.0) - - threshold = deprecate_property( - threshold_value, - "threshold", - "threshold_value", - removal_version="0.20.0", - error=True, - ) - - @property - def threshold_method(self): - """Threshold method for how `threshold_value` is applied: - - - global: - `threshold_value` is added to the cell weights prior to normalization; must be greater than 0. - - percentile: - the smallest root-mean squared sensitivity is set using percentile threshold; must be between 0 and 100 - - amplitude: - the smallest root-mean squared sensitivity is a fractional percent of the largest value; must be between 0 and 1 - - - Returns - ------- - str - """ - return self._threshold_method - - @threshold_method.setter - def threshold_method(self, value): - self._threshold_method = validate_string( - "threshold_method", value, string_list=["global", "percentile", "amplitude"] - ) - - @property - def normalization_method(self): - """Normalization method applied to sensitivity weights. - - Options are: - - - ``None`` - normalization is not applied - - maximum: - sensitivity weights are normalized by the largest value such that the largest weight is equal to 1. - - minimum: - sensitivity weights are normalized by the smallest value, after thresholding, such that the smallest weights are equal to 1. - - Returns - ------- - None, str - """ - return self._normalization_method - - @normalization_method.setter - def normalization_method(self, value): - if value is None: - self._normalization_method = value - else: - self._normalization_method = validate_string( - "normalization_method", value, string_list=["minimum", "maximum"] - ) - - normalization = deprecate_property( - normalization_method, - "normalization", - "normalization_method", - removal_version="0.20.0", - error=True, - ) - - def initialize(self): - """Compute sensitivity weights upon starting the inversion.""" - for reg in self.reg.objfcts: - if not isinstance(reg.mapping, (IdentityMap, Wires)): - raise TypeError( - f"Mapping for the regularization must be of type {IdentityMap} or {Wires}. " - + f"Input mapping of type {type(reg.mapping)}." - ) - - self.update() - - def endIter(self): - """Execute end of iteration.""" - - if self.every_iteration: - self.update() - - def update(self): - """Update sensitivity weights""" - - jtj_diag = compute_JtJdiags(self.dmisfit, self.invProb.model) - - # Compute and sum root-mean squared sensitivities for all objective functions - wr = np.zeros_like(self.invProb.model) - for reg in self.reg.objfcts: - if isinstance(reg, BaseSimilarityMeasure): - continue - - mesh = reg.regularization_mesh - n_cells = mesh.nC - mapped_jtj_diag = reg.mapping * jtj_diag - # reshape the mapped, so you can divide by volume - # (let's say it was a vector or anisotropic model) - mapped_jtj_diag = mapped_jtj_diag.reshape((n_cells, -1), order="F") - wr_temp = mapped_jtj_diag / reg.regularization_mesh.vol[:, None] ** 2.0 - wr_temp = wr_temp.reshape(-1, order="F") - - wr += reg.mapping.deriv(self.invProb.model).T * wr_temp - - wr **= 0.5 - - # Apply thresholding - if self.threshold_method == "global": - wr += self.threshold_value - elif self.threshold_method == "percentile": - wr = np.clip( - wr, a_min=np.percentile(wr, self.threshold_value), a_max=np.inf - ) - else: - wr = np.clip(wr, a_min=self.threshold_value * wr.max(), a_max=np.inf) - - # Apply normalization - if self.normalization_method == "maximum": - wr /= wr.max() - elif self.normalization_method == "minimum": - wr /= wr.min() - - # Add sensitivity weighting to all model objective functions - for reg in self.reg.objfcts: - if not isinstance(reg, BaseSimilarityMeasure): - sub_regs = getattr(reg, "objfcts", [reg]) - for sub_reg in sub_regs: - sub_reg.set_weights(sensitivity=sub_reg.mapping * wr) - - def validate(self, directiveList): - """Validate directive against directives list. - - The ``UpdateSensitivityWeights`` directive impacts the regularization by applying - cell weights. As a result, its place in the :class:`DirectivesList` must be - before any directives which update the preconditioner for the inverse problem - (i.e. :class:`UpdatePreconditioner`), and must be before any directives that - estimate the starting trade-off parameter (i.e. :class:`EstimateBeta_ByEig` - and :class:`EstimateBetaMaxDerivative`). - - - Returns - ------- - bool - Returns ``True`` if validation passes. Otherwise, an error is thrown. - """ - # check if a beta estimator is in the list after setting the weights - dList = directiveList.dList - self_ind = dList.index(self) - - beta_estimator_ind = [isinstance(d, BaseBetaEstimator) for d in dList] - lin_precond_ind = [isinstance(d, UpdatePreconditioner) for d in dList] - - if any(beta_estimator_ind): - assert beta_estimator_ind.index(True) > self_ind, ( - "The directive for setting intial beta must be after UpdateSensitivityWeights " - "in the directiveList" - ) - - if any(lin_precond_ind): - assert lin_precond_ind.index(True) > self_ind, ( - "The directive 'UpdatePreconditioner' must be after UpdateSensitivityWeights " - "in the directiveList" - ) - - return True - - -class ScaleMisfitMultipliers(InversionDirective): - """ - Scale the misfits by the relative chi-factors of multiple misfit functions. - - The goal is to reduce the relative influence of the misfit functions with - lowest chi-factors so that all functions reach a similar level of fit at - convergence to the global target. - - Parameters - ---------- - - path : str - Path to save the chi-factors log file. - """ - - def __init__(self, path: Path | None = None, **kwargs): - self.last_beta = None - self.chi_factors = None - - if path is None: - path = Path("./") - - self.filepath = path / "ChiFactors.log" - - super().__init__(**kwargs) - - def initialize(self): - self.last_beta = self.invProb.beta - self.multipliers = self.invProb.dmisfit.multipliers - self.scalings = np.ones_like(self.multipliers) - with open(self.filepath, "w", encoding="utf-8") as f: - f.write("Logging of [scaling * chi factor] per misfit function.\n\n") - f.write( - "Iterations\t" - + "\t".join( - f"[{objfct.name}]" for objfct in self.invProb.dmisfit.objfcts - ) - ) - f.write("\n") - - def endIter(self): - ratio = self.invProb.beta / self.last_beta - chi_factors = [] - for residual in self.invProb.residuals: - phi_d = np.vdot(residual, residual) - chi_factors.append(phi_d / len(residual)) - - self.chi_factors = np.asarray(chi_factors) - - if np.all(self.chi_factors < 1) or ratio >= 1: - self.last_beta = self.invProb.beta - self.write_log() - return - - # Normalize scaling between [ratio, 1] - scalings = ( - 1 - - (1 - ratio) - * (self.chi_factors.max() - self.chi_factors) - / self.chi_factors.max() - ) - - # Force the ones that overshot target - scalings[self.chi_factors < 1] = ( - ratio # * self.chi_factors[self.chi_factors < 1] - ) - - # Update the scaling - self.scalings = self.scalings * scalings - - # Normalize total phi_d with scalings - self.invProb.dmisfit.multipliers = self.multipliers * self.scalings - self.last_beta = self.invProb.beta - self.write_log() - - def write_log(self): - """ - Write the scaling factors to the log file. - """ - with open(self.filepath, "a", encoding="utf-8") as f: - f.write( - f"{self.opt.iter}\t" - + "\t".join( - f"{multi:.2e} * {chi:.2e}" - for multi, chi in zip( - self.invProb.dmisfit.multipliers, self.chi_factors - ) - ) - + "\n" - ) diff --git a/simpeg/directives/pgi_directives.py b/simpeg/directives/pgi_directives.py index 8127a913cf..ec0108210c 100644 --- a/simpeg/directives/pgi_directives.py +++ b/simpeg/directives/pgi_directives.py @@ -1,475 +1,18 @@ -############################################################################### -# # -# Directives for PGI: Petrophysically guided Regularization # -# # -############################################################################### - -import copy - -import numpy as np - -from ..directives import InversionDirective, MultiTargetMisfits -from ..regularization import ( - PGI, - PGIsmallness, - SmoothnessFirstOrder, - SparseSmoothness, -) -from ..utils import ( - GaussianMixtureWithNonlinearRelationships, - GaussianMixtureWithNonlinearRelationshipsWithPrior, - GaussianMixtureWithPrior, - WeightedGaussianMixture, - mkvc, +""" +Backward compatibility with the ``simpeg.directives.pgi_directives`` submodule. + +This file will be deleted when the ``simpeg.directives.pgi_directives`` submodule is +removed. +""" + +import warnings +from ._pgi_directives import * # noqa: F403,F401 + +warnings.warn( + "The `simpeg.directives.pgi_directives` submodule has been deprecated, " + "and will be removed in SimPEG v0.26.0." + "Import any directive class directly from the `simpeg.directives` module. " + "E.g.: `from simpeg.directives import PGI_UpdateParameters`. ", + FutureWarning, + stacklevel=2, ) - - -class PGI_UpdateParameters(InversionDirective): - """ - This directive is to be used with regularization from regularization.pgi. - It updates: - - the reference model and weights in the smallness (L2-approximation of PGI) - - the GMM as a MAP estimate between the prior and the current model - For more details, please consult: - - https://doi.org/10.1093/gji/ggz389 - """ - - verbose = False # print info. about the GMM at each iteration - update_rate = 1 # updates at each `update_rate` iterations - update_gmm = False # update the GMM - zeta = ( - 1e10 # confidence in the prior proportions; default: high value, keep GMM fixed - ) - nu = ( - 1e10 # confidence in the prior covariances; default: high value, keep GMM fixed - ) - kappa = 1e10 # confidence in the prior means;default: high value, keep GMM fixed - update_covariances = ( - True # Average the covariances, If false: average the precisions - ) - fixed_membership = None # keep the membership of specific cells fixed - keep_ref_fixed_in_Smooth = True # keep mref fixed in the Smoothness - - def initialize(self): - pgi_reg = self.reg.get_functions_of_type(PGIsmallness) - if len(pgi_reg) != 1: - raise UserWarning( - "'PGI_UpdateParameters' requires one 'PGIsmallness' regularization " - "in the objective function." - ) - self.pgi_reg = pgi_reg[0] - - def endIter(self): - if self.opt.iter > 0 and self.opt.iter % self.update_rate == 0: - m = self.invProb.model - modellist = self.pgi_reg.wiresmap * m - model = np.c_[[a * b for a, b in zip(self.pgi_reg.maplist, modellist)]].T - - if self.update_gmm and isinstance( - self.pgi_reg.gmmref, GaussianMixtureWithNonlinearRelationships - ): - clfupdate = GaussianMixtureWithNonlinearRelationshipsWithPrior( - gmmref=self.pgi_reg.gmmref, - zeta=self.zeta, - kappa=self.kappa, - nu=self.nu, - verbose=self.verbose, - prior_type="semi", - update_covariances=self.update_covariances, - max_iter=self.pgi_reg.gmm.max_iter, - n_init=self.pgi_reg.gmm.n_init, - reg_covar=self.pgi_reg.gmm.reg_covar, - weights_init=self.pgi_reg.gmm.weights_, - means_init=self.pgi_reg.gmm.means_, - precisions_init=self.pgi_reg.gmm.precisions_, - random_state=self.pgi_reg.gmm.random_state, - tol=self.pgi_reg.gmm.tol, - verbose_interval=self.pgi_reg.gmm.verbose_interval, - warm_start=self.pgi_reg.gmm.warm_start, - fixed_membership=self.fixed_membership, - ) - clfupdate = clfupdate.fit(model) - - elif self.update_gmm and isinstance( - self.pgi_reg.gmmref, WeightedGaussianMixture - ): - clfupdate = GaussianMixtureWithPrior( - gmmref=self.pgi_reg.gmmref, - zeta=self.zeta, - kappa=self.kappa, - nu=self.nu, - verbose=self.verbose, - prior_type="semi", - update_covariances=self.update_covariances, - max_iter=self.pgi_reg.gmm.max_iter, - n_init=self.pgi_reg.gmm.n_init, - reg_covar=self.pgi_reg.gmm.reg_covar, - weights_init=self.pgi_reg.gmm.weights_, - means_init=self.pgi_reg.gmm.means_, - precisions_init=self.pgi_reg.gmm.precisions_, - random_state=self.pgi_reg.gmm.random_state, - tol=self.pgi_reg.gmm.tol, - verbose_interval=self.pgi_reg.gmm.verbose_interval, - warm_start=self.pgi_reg.gmm.warm_start, - fixed_membership=self.fixed_membership, - ) - clfupdate = clfupdate.fit(model) - - else: - clfupdate = copy.deepcopy(self.pgi_reg.gmmref) - - self.pgi_reg.gmm = clfupdate - membership = self.pgi_reg.gmm.predict(model) - - if clfupdate.fixed_membership is not None: - self.fixed_membership = clfupdate.fixed_membership - membership[self.fixed_membership[:, 0]] = self.fixed_membership[:, 1] - - mref = mkvc(self.pgi_reg.gmm.means_[membership]) - self.pgi_reg.reference_model = mref - if getattr(self.fixed_membership, "shape", [0, 0])[0] < len(membership): - self.pgi_reg._r_second_deriv = None - - -class PGI_BetaAlphaSchedule(InversionDirective): - """ - This directive is to be used with regularizations from regularization.pgi. - It implements the strategy described in https://doi.org/10.1093/gji/ggz389 - for iteratively updating beta and alpha_s for fitting the - geophysical and smallness targets. - """ - - verbose = False # print information (progress, updates made) - tolerance = 0.0 # tolerance on the geophysical target misfit for cooling - progress = 0.1 # minimum percentage progress (default 10%) before cooling beta - coolingFactor = 2.0 # when cooled, beta is divided by it - warmingFactor = 1.0 # when warmed, alpha_s is multiplied by the ratio of the - # geophysical target with their current misfit, times this factor - mode = 1 # mode 1: start with nothing fitted. Mode 2: warmstart with fitted geophysical data - alphasmax = 1e10 # max alpha_s - betamin = 1e-10 # minimum beta - update_rate = 1 # update every `update_rate` iterations - pgi_reg = None - ratio_in_cooling = ( - False # add the ratio of geophysical misfit with their target in cooling - ) - - def initialize(self): - """Initialize the directive.""" - self.update_previous_score() - self.update_previous_dmlist() - - def endIter(self): - """Run after the end of each iteration in the inversion.""" - # Get some variables from the MultiTargetMisfits directive - data_misfits_achieved = self.multi_target_misfits_directive.DM - data_misfits_target = self.multi_target_misfits_directive.DMtarget - dmlist = self.multi_target_misfits_directive.dmlist - targetlist = self.multi_target_misfits_directive.targetlist - - # Change mode if data misfit targets have been achieved - if data_misfits_achieved: - self.mode = 2 - - # Don't cool beta of warm alpha if we are in the first iteration or if - # the current iteration doesn't match the update rate - if self.opt.iter == 0 or self.opt.iter % self.update_rate != 0: - self.update_previous_score() - self.update_previous_dmlist() - return None - - if self.verbose: - targets = np.round( - np.maximum( - (1.0 - self.progress) * self.previous_dmlist, - (1.0 + self.tolerance) * data_misfits_target, - ), - decimals=1, - ) - dmlist_rounded = np.round(dmlist, decimals=1) - print( - f"Beta cooling evaluation: progress: {dmlist_rounded}; " - f"minimum progress targets: {targets}" - ) - - # Decide if we should cool beta - threshold = np.maximum( - (1.0 - self.progress) * self.previous_dmlist[~targetlist], - data_misfits_target[~targetlist], - ) - if ( - (dmlist[~targetlist] > threshold).all() - and not data_misfits_achieved - and self.mode == 1 - and self.invProb.beta > self.betamin - ): - self.cool_beta() - if self.verbose: - print("Decreasing beta to counter data misfit decrase plateau.") - - # Decide if we should warm alpha instead - elif ( - data_misfits_achieved - and self.mode == 2 - and np.all(self.pgi_regularization.alpha_pgi < self.alphasmax) - ): - self.warm_alpha() - if self.verbose: - print( - "Warming alpha_pgi to favor clustering: ", - self.pgi_regularization.alpha_pgi, - ) - - # Decide if we should cool beta (to counter data misfit increase) - elif ( - np.any(dmlist > (1.0 + self.tolerance) * data_misfits_target) - and self.mode == 2 - and self.invProb.beta > self.betamin - ): - self.cool_beta() - if self.verbose: - print("Decreasing beta to counter data misfit increase.") - - # Update previous score and dmlist - self.update_previous_score() - self.update_previous_dmlist() - - def cool_beta(self): - """Cool beta according to schedule.""" - data_misfits_target = self.multi_target_misfits_directive.DMtarget - dmlist = self.multi_target_misfits_directive.dmlist - ratio = 1.0 - indx = dmlist > (1.0 + self.tolerance) * data_misfits_target - if np.any(indx) and self.ratio_in_cooling: - ratio = np.median([dmlist[indx] / data_misfits_target[indx]]) - self.invProb.beta /= self.coolingFactor * ratio - - def warm_alpha(self): - """Warm alpha according to schedule.""" - data_misfits_target = self.multi_target_misfits_directive.DMtarget - dmlist = self.multi_target_misfits_directive.dmlist - ratio = np.median(data_misfits_target / dmlist) - self.pgi_regularization.alpha_pgi *= self.warmingFactor * ratio - - def update_previous_score(self): - """ - Update the value of the ``previous_score`` attribute. - - Update it with the current value of the petrophysical misfit, obtained - from the :meth:`MultiTargetMisfit.phims()` method. - """ - self.previous_score = copy.deepcopy(self.multi_target_misfits_directive.phims()) - - def update_previous_dmlist(self): - """ - Update the value of the ``previous_dmlist`` attribute. - - Update it with the current value of the data misfits, obtained - from the :meth:`MultiTargetMisfit.dmlist` attribute. - """ - self.previous_dmlist = copy.deepcopy(self.multi_target_misfits_directive.dmlist) - - @property - def directives(self): - """List of all the directives in the :class:`simpeg.inverison.BaseInversion``.""" - return self.inversion.directiveList.dList - - @property - def multi_target_misfits_directive(self): - """``MultiTargetMisfit`` directive in the :class:`simpeg.inverison.BaseInversion``.""" - if not hasattr(self, "_mtm_directive"): - # Obtain multi target misfits directive from the directive list - multi_target_misfits_directive = [ - directive - for directive in self.directives - if isinstance(directive, MultiTargetMisfits) - ] - if not multi_target_misfits_directive: - raise UserWarning( - "No MultiTargetMisfits directive found in the current inversion. " - "A MultiTargetMisfits directive is needed by the " - "PGI_BetaAlphaSchedule directive." - ) - (self._mtm_directive,) = multi_target_misfits_directive - return self._mtm_directive - - @property - def pgi_update_params_directive(self): - """``PGI_UpdateParam``s directive in the :class:`simpeg.inverison.BaseInversion``.""" - if not hasattr(self, "_pgi_update_params"): - # Obtain PGI_UpdateParams directive from the directive list - pgi_update_params_directive = [ - directive - for directive in self.directives - if isinstance(directive, PGI_UpdateParameters) - ] - if pgi_update_params_directive: - (self._pgi_update_params,) = pgi_update_params_directive - else: - self._pgi_update_params = None - return self._pgi_update_params - - @property - def pgi_regularization(self): - """PGI regularization in the :class:`simpeg.inverse_problem.BaseInvProblem``.""" - if not hasattr(self, "_pgi_regularization"): - pgi_regularization = self.reg.get_functions_of_type(PGI) - if len(pgi_regularization) != 1: - raise UserWarning( - "'PGI_UpdateParameters' requires one 'PGI' regularization " - "in the objective function." - ) - self._pgi_regularization = pgi_regularization[0] - return self._pgi_regularization - - -class PGI_AddMrefInSmooth(InversionDirective): - """ - This directive is to be used with regularizations from regularization.pgi. - It implements the strategy described in https://doi.org/10.1093/gji/ggz389 - for including the learned reference model, once stable, in the smoothness terms. - """ - - # Chi factor for Data Misfit - chifact = 1.0 - tolerance_phid = 0.0 - phi_d_target = None - wait_till_stable = True - tolerance = 0.0 - verbose = False - - def initialize(self): - targetclass = np.r_[ - [ - isinstance(dirpart, MultiTargetMisfits) - for dirpart in self.inversion.directiveList.dList - ] - ] - if ~np.any(targetclass): - self.DMtarget = None - else: - self.targetclass = np.where(targetclass)[0][-1] - self._DMtarget = self.inversion.directiveList.dList[ - self.targetclass - ].DMtarget - - self.pgi_updategmm_class = np.r_[ - [ - isinstance(dirpart, PGI_UpdateParameters) - for dirpart in self.inversion.directiveList.dList - ] - ] - - if getattr(self.reg.objfcts[0], "objfcts", None) is not None: - # Find the petrosmallness terms in a two-levels combo-regularization. - petrosmallness = np.where( - np.r_[[isinstance(regpart, PGI) for regpart in self.reg.objfcts]] - )[0][0] - self.petrosmallness = petrosmallness - - # Find the smoothness terms in a two-levels combo-regularization. - Smooth = [] - for i, regobjcts in enumerate(self.reg.objfcts): - for j, regpart in enumerate(regobjcts.objfcts): - Smooth += [ - [ - i, - j, - isinstance( - regpart, (SmoothnessFirstOrder, SparseSmoothness) - ), - ] - ] - self.Smooth = np.r_[Smooth] - - self.nbr = np.sum( - [len(self.reg.objfcts[i].objfcts) for i in range(len(self.reg.objfcts))] - ) - self._regmode = 1 - self.pgi_reg = self.reg.objfcts[self.petrosmallness] - - else: - self._regmode = 2 - self.pgi_reg = self.reg - self.nbr = len(self.reg.objfcts) - self.Smooth = np.r_[ - [ - isinstance(regpart, (SmoothnessFirstOrder, SparseSmoothness)) - for regpart in self.reg.objfcts - ] - ] - self._regmode = 2 - - if ~np.any(self.pgi_updategmm_class): - self.previous_membership = self.pgi_reg.membership(self.invProb.model) - else: - self.previous_membership = self.pgi_reg.compute_quasi_geology_model() - - @property - def DMtarget(self): - if getattr(self, "_DMtarget", None) is None: - self.phi_d_target = self.invProb.dmisfit.survey.nD - self._DMtarget = self.chifact * self.phi_d_target - return self._DMtarget - - @DMtarget.setter - def DMtarget(self, val): - self._DMtarget = val - - def endIter(self): - self.DM = self.inversion.directiveList.dList[self.targetclass].DM - self.dmlist = self.inversion.directiveList.dList[self.targetclass].dmlist - - if ~np.any(self.pgi_updategmm_class): - self.membership = self.pgi_reg.membership(self.invProb.model) - else: - self.membership = self.pgi_reg.compute_quasi_geology_model() - - same_mref = np.all(self.membership == self.previous_membership) - percent_diff = ( - len(self.membership) - - np.count_nonzero(self.previous_membership == self.membership) - ) / len(self.membership) - if self.verbose: - print( - "mref changed in ", - len(self.membership) - - np.count_nonzero(self.previous_membership == self.membership), - " places", - ) - if ( - self.DM or np.all(self.dmlist < (1 + self.tolerance_phid) * self.DMtarget) - ) and ( - same_mref or not self.wait_till_stable or percent_diff <= self.tolerance - ): - self.reg.reference_model_in_smooth = True - self.pgi_reg.reference_model_in_smooth = True - - if self._regmode == 2: - for i in range(self.nbr): - if self.Smooth[i]: - self.reg.objfcts[i].reference_model = mkvc( - self.pgi_reg.gmm.means_[self.membership] - ) - if self.verbose: - print( - "Add mref to Smoothness. Changes in mref happened in {} % of the cells".format( - percent_diff - ) - ) - - elif self._regmode == 1: - for i in range(self.nbr): - if self.Smooth[i, 2]: - idx = self.Smooth[i, :2] - self.reg.objfcts[idx[0]].objfcts[idx[1]].reference_model = mkvc( - self.pgi_reg.gmm.means_[self.membership] - ) - if self.verbose: - print( - "Add mref to Smoothness. Changes in mref happened in {} % of the cells".format( - percent_diff - ) - ) - - self.previous_membership = copy.deepcopy(self.membership) diff --git a/simpeg/directives/sim_directives.py b/simpeg/directives/sim_directives.py index f40b828c7d..e2c3f8a5f3 100644 --- a/simpeg/directives/sim_directives.py +++ b/simpeg/directives/sim_directives.py @@ -1,390 +1,18 @@ -import numpy as np -from ..regularization import BaseSimilarityMeasure -from ..utils import eigenvalue_by_power_iteration -from ..optimization import IterationPrinters, StoppingCriteria -from .directives import InversionDirective, SaveEveryIteration - - -############################################################################### -# # -# Directives of joint inversion # -# # -############################################################################### -class SimilarityMeasureInversionPrinters: - betas = { - "title": "betas", - "value": lambda M: ["{:.2e}".format(elem) for elem in M.parent.betas], - "width": 26, - "format": "%s", - } - lambd = { - "title": "lambda", - "value": lambda M: M.parent.lambd, - "width": 10, - "format": "%1.2e", - } - phi_d_list = { - "title": "phi_d", - "value": lambda M: ["{:.2e}".format(elem) for elem in M.parent.phi_d_list], - "width": 26, - "format": "%s", - } - phi_m_list = { - "title": "phi_m", - "value": lambda M: ["{:.2e}".format(elem) for elem in M.parent.phi_m_list], - "width": 26, - "format": "%s", - } - phi_sim = { - "title": "phi_sim", - "value": lambda M: M.parent.phi_sim, - "width": 10, - "format": "%1.2e", - } - iterationCG = { - "title": "iterCG", - "value": lambda M: M.cg_count, - "width": 10, - "format": "%3d", - } - - -class SimilarityMeasureInversionDirective(InversionDirective): - """ - Directive for two model similiraty measure joint inversions. Sets Printers and - StoppingCriteria. - - Notes - ----- - Methods assume we are working with two models, and a single similarity measure. - Also, the SimilarityMeasure objective function must be the last regularization. - """ - - printers = [ - IterationPrinters.iteration, - SimilarityMeasureInversionPrinters.betas, - SimilarityMeasureInversionPrinters.lambd, - IterationPrinters.f, - SimilarityMeasureInversionPrinters.phi_d_list, - SimilarityMeasureInversionPrinters.phi_m_list, - SimilarityMeasureInversionPrinters.phi_sim, - SimilarityMeasureInversionPrinters.iterationCG, - ] - - def initialize(self): - if not isinstance(self.reg.objfcts[-1], BaseSimilarityMeasure): - raise TypeError( - f"The last regularization function must be an instance of " - f"BaseSimilarityMeasure, got {type(self.reg.objfcts[-1])}." - ) - - # define relevant attributes - self.betas = self.reg.multipliers[:-1] - self.lambd = self.reg.multipliers[-1] - self.phi_d_list = [] - self.phi_m_list = [] - self.phi_sim = 0.0 - - # pass attributes to invProb - self.invProb.betas = self.betas - self.invProb.num_models = len(self.betas) - self.invProb.lambd = self.lambd - self.invProb.phi_d_list = self.phi_d_list - self.invProb.phi_m_list = self.phi_m_list - self.invProb.phi_sim = self.phi_sim - - self.opt.printers = self.printers - self.opt.stoppers = [StoppingCriteria.iteration] - - def validate(self, directiveList): - # check that this directive is first in the DirectiveList - dList = directiveList.dList - self_ind = dList.index(self) - if self_ind != 0: - raise IndexError( - "The CrossGradientInversionDirective must be first in directive list." - ) - return True - - def endIter(self): - # compute attribute values - phi_d = [] - for dmis in self.dmisfit.objfcts: - phi_d.append(dmis(self.opt.xc)) - - phi_m = [] - for reg in self.reg.objfcts: - phi_m.append(reg(self.opt.xc)) - - # pass attributes values to invProb - self.invProb.phi_d_list = phi_d - self.invProb.phi_m_list = phi_m[:-1] - self.invProb.phi_sim = phi_m[-1] - self.invProb.betas = self.reg.multipliers[:-1] - # Assume last reg.objfct is the coupling - self.invProb.lambd = self.reg.multipliers[-1] - - -class SimilarityMeasureSaveOutputEveryIteration(SaveEveryIteration): - """ - SaveOutputEveryIteration for Joint Inversions. - Saves information on the tradeoff parameters, data misfits, regularizations, - coupling term, number of CG iterations, and value of cost function. - """ - - header = None - save_txt = True - betas = None - phi_d = None - phi_m = None - phi_sim = None - phi = None - - def initialize(self): - if self.save_txt is True: - print( - "CrossGradientSaveOutputEveryIteration will save your inversion " - "progress as: '###-{0!s}.txt'".format(self.fileName) - ) - f = open(self.fileName + ".txt", "w") - self.header = " # betas lambda joint_phi_d joint_phi_m phi_sim iterCG phi \n" - f.write(self.header) - f.close() - - # Create a list of each - self.betas = [] - self.lambd = [] - self.phi_d = [] - self.phi_m = [] - self.phi = [] - self.phi_sim = [] - - def endIter(self): - self.betas.append(["{:.2e}".format(elem) for elem in self.invProb.betas]) - self.phi_d.append(["{:.3e}".format(elem) for elem in self.invProb.phi_d_list]) - self.phi_m.append(["{:.3e}".format(elem) for elem in self.invProb.phi_m_list]) - self.lambd.append("{:.2e}".format(self.invProb.lambd)) - self.phi_sim.append(self.invProb.phi_sim) - self.phi.append(self.opt.f) - - if self.save_txt: - f = open(self.fileName + ".txt", "a") - i = self.opt.iter - f.write( - " {0:2d} {1} {2} {3} {4} {5:1.4e} {6:d} {7:1.4e}\n".format( - i, - self.betas[i - 1], - self.lambd[i - 1], - self.phi_d[i - 1], - self.phi_m[i - 1], - self.phi_sim[i - 1], - self.opt.cg_count, - self.phi[i - 1], - ) - ) - f.close() - - def load_results(self): - results = np.loadtxt(self.fileName + str(".txt"), comments="#") - self.betas = results[:, 1] - self.lambd = results[:, 2] - self.phi_d = results[:, 3] - self.phi_m = results[:, 4] - self.phi_sim = results[:, 5] - self.f = results[:, 7] - - -class PairedBetaEstimate_ByEig(InversionDirective): - """ - Estimate the trade-off parameter, beta, between pairs of data misfit(s) and the - regularization(s) as a multiple of the ratio between the highest eigenvalue of the - data misfit term and the highest eigenvalue of the regularization. - The highest eigenvalues are estimated through power iterations and Rayleigh - quotient. - - Notes - ----- - This class assumes the order of the data misfits for each model parameter match - the order for the respective regularizations, i.e. - - >>> data_misfits = [phi_d_m1, phi_d_m2, phi_d_m3] - >>> regs = [phi_m_m1, phi_m_m2, phi_m_m3] - - In which case it will estimate regularization parameters for each respective pair. - """ - - beta0_ratio = 1.0 #: the estimated ratio is multiplied by this to obtain beta - n_pw_iter = 4 #: number of power iterations for estimation. - seed = None #: Random seed for the directive - - def initialize(self): - r""" - The initial beta is calculated by comparing the estimated - eigenvalues of :math:`J^T J` and :math:`W^T W`. - To estimate the eigenvector of **A**, we will use one iteration - of the *Power Method*: - - .. math:: - - \mathbf{x_1 = A x_0} - - Given this (very course) approximation of the eigenvector, we can - use the *Rayleigh quotient* to approximate the largest eigenvalue. - - .. math:: - - \lambda_0 = \frac{\mathbf{x^\top A x}}{\mathbf{x^\top x}} - - We will approximate the largest eigenvalue for both JtJ and WtW, - and use some ratio of the quotient to estimate beta0. - - .. math:: - - \beta_0 = \gamma \frac{\mathbf{x^\top J^\top J x}}{\mathbf{x^\top W^\top W x}} - - :rtype: float - :return: beta0 - """ - rng = np.random.default_rng(seed=self.seed) - - if self.verbose: - print("Calculating the beta0 parameter.") - - m = self.invProb.model - dmis_eigenvalues = [] - reg_eigenvalues = [] - dmis_objs = self.dmisfit.objfcts - reg_objs = [ - obj - for obj in self.reg.objfcts - if not isinstance(obj, BaseSimilarityMeasure) - ] - if len(dmis_objs) != len(reg_objs): - raise ValueError( - f"There must be the same number of data misfit and regularizations." - f"Got {len(dmis_objs)} and {len(reg_objs)} respectively." - ) - for dmis, reg in zip(dmis_objs, reg_objs): - dmis_eigenvalues.append( - eigenvalue_by_power_iteration( - dmis, - m, - n_pw_iter=self.n_pw_iter, - random_seed=rng, - ) - ) - - reg_eigenvalues.append( - eigenvalue_by_power_iteration( - reg, - m, - n_pw_iter=self.n_pw_iter, - random_seed=rng, - ) - ) - - self.ratios = np.array(dmis_eigenvalues) / np.array(reg_eigenvalues) - self.invProb.betas = self.beta0_ratio * self.ratios - self.reg.multipliers[:-1] = self.invProb.betas - - -class PairedBetaSchedule(InversionDirective): - """ - Directive for beta cooling schedule to determine the tradeoff - parameters when using paired data misfits and regularizations for a joint inversion. - """ - - chifact_target = 1.0 - beta_tol = 1e-1 - update_beta = True - cooling_rate = 1 - cooling_factor = 2 - dmis_met = False - - @property - def target(self): - if getattr(self, "_target", None) is None: - nD = np.array([survey.nD for survey in self.survey]) - - self._target = nD * self.chifact_target - - return self._target - - @target.setter - def target(self, val): - self._target = val - - def initialize(self): - self.dmis_met = np.zeros_like(self.invProb.betas, dtype=bool) - - def endIter(self): - # Check if target misfit has been reached, if so, set dmis_met to True - for i, phi_d in enumerate(self.invProb.phi_d_list): - self.dmis_met[i] = phi_d < self.target[i] - - # check separately if misfits are within the tolerance, - # otherwise, scale beta individually - for i, phi_d in enumerate(self.invProb.phi_d_list): - if self.opt.iter > 0 and self.opt.iter % self.cooling_rate == 0: - target = self.target[i] - ratio = phi_d / target - if self.update_beta and ratio <= (1.0 + self.beta_tol): - if ratio <= 1: - ratio = np.maximum(0.75, ratio) - else: - ratio = np.minimum(1.5, ratio) - - self.invProb.betas[i] /= ratio - elif ratio > 1.0: - self.invProb.betas[i] /= self.cooling_factor - - self.reg.multipliers[:-1] = self.invProb.betas - - -class MovingAndMultiTargetStopping(InversionDirective): - r""" - Directive for setting stopping criteria for a joint inversion. - Ensures both that all target misfits are met and there is a small change in the - model. Computes the percentage change of the current model from the previous model. - - ..math:: - \frac {\| \mathbf{m_i} - \mathbf{m_{i-1}} \|} {\| \mathbf{m_{i-1}} \|} - """ - - tol = 1e-5 - beta_tol = 1e-1 - chifact_target = 1.0 - - @property - def target(self): - if getattr(self, "_target", None) is None: - nD = [] - for survey in self.survey: - nD += [survey.nD] - nD = np.array(nD) - - self._target = nD * self.chifact_target - - return self._target - - @target.setter - def target(self, val): - self._target = val - - def endIter(self): - for phi_d, target in zip(self.invProb.phi_d_list, self.target): - if np.abs(1.0 - phi_d / target) >= self.beta_tol: - return - if ( - np.linalg.norm(self.opt.xc - self.opt.x_last) - / np.linalg.norm(self.opt.x_last) - > self.tol - ): - return - - print( - "stopping criteria met: ", - np.linalg.norm(self.opt.xc - self.opt.x_last) - / np.linalg.norm(self.opt.x_last), - ) - self.opt.stopNextIteration = True +""" +Backward compatibility with the ``simpeg.directives.sim_directives`` submodule. + +This file will be deleted when the ``simpeg.directives.sim_directives`` submodule +is removed. +""" + +import warnings +from ._sim_directives import * # noqa: F403,F401 + +warnings.warn( + "The `simpeg.directives.sim_directives` submodule has been deprecated, " + "and will be removed in SimPEG v0.26.0." + "Import any directive class directly from the `simpeg.directives` module. " + "E.g.: `from simpeg.directives import PairedBetaEstimate_ByEig`", + FutureWarning, + stacklevel=2, +) diff --git a/simpeg/electromagnetics/base_1d.py b/simpeg/electromagnetics/base_1d.py index 0aa4237b1c..0e7bdf43de 100644 --- a/simpeg/electromagnetics/base_1d.py +++ b/simpeg/electromagnetics/base_1d.py @@ -334,7 +334,7 @@ def compute_complex_mu(self, frequencies): return mu_complex def Jvec(self, m, v, f=None): - Js = self.getJ(m, f=f) + Js = self._getJ(m, f=f) out = 0.0 if self.hMap is not None: out = out + Js["dh"] @ (self.hDeriv @ v) @@ -347,7 +347,7 @@ def Jvec(self, m, v, f=None): return out def Jtvec(self, m, v, f=None): - Js = self.getJ(m, f=f) + Js = self._getJ(m, f=f) out = 0.0 if self.hMap is not None: out = out + self.hDeriv.T @ (Js["dh"].T @ v) @@ -558,8 +558,9 @@ def _compute_hankel_coefficients(self): C1s.append(np.exp(-lambd * (z + h)[:, None]) * C1 / offsets[:, None]) lambs.append(lambd) n_w_past += n_w - Is.append(np.ones(n_w, dtype=int) * i_count) - i_count += 1 + for _ in range(rx.locations.shape[0]): + Is.append(np.ones(n_w, dtype=int) * i_count) + i_count += 1 # Store these on the simulation for faster future executions self._lambs = np.vstack(lambs) diff --git a/simpeg/electromagnetics/frequency_domain/fields.py b/simpeg/electromagnetics/frequency_domain/fields.py index bf2c298cd3..136010d7ef 100644 --- a/simpeg/electromagnetics/frequency_domain/fields.py +++ b/simpeg/electromagnetics/frequency_domain/fields.py @@ -1135,6 +1135,7 @@ def startup(self): self._nC = self.simulation.mesh.nC self._MeI = self.simulation.MeI self._MfI = self.simulation.MfI + self._faceDiv = self.simulation.mesh.face_divergence def _GLoc(self, fieldType): if fieldType in ["h", "hSecondary", "hPrimary", "b"]: @@ -1563,6 +1564,7 @@ def startup(self): self._nC = self.simulation.mesh.nC self._MfI = self.simulation.MfI self._MeI = self.simulation.MeI + self._faceDiv = self.simulation.mesh.face_divergence def _GLoc(self, fieldType): if fieldType in ["h", "hSecondary", "hPrimary", "b"]: diff --git a/simpeg/electromagnetics/frequency_domain/simulation.py b/simpeg/electromagnetics/frequency_domain/simulation.py index e7be9dc693..d092226196 100644 --- a/simpeg/electromagnetics/frequency_domain/simulation.py +++ b/simpeg/electromagnetics/frequency_domain/simulation.py @@ -3,7 +3,6 @@ from discretize.utils import Zero from ... import props -from ...data import Data from ...utils import mkvc, validate_type from ..base import BaseEMSimulation from ..utils import omega @@ -241,7 +240,8 @@ def Jvec(self, m, v, f=None): self.model = m - Jv = Data(self.survey) + survey_slices = self.survey.get_all_slices() + Jv = np.full(self.survey.nD, fill_value=np.nan) for nf, freq in enumerate(self.survey.frequencies): for src in self.survey.get_sources_by_frequency(freq): @@ -250,9 +250,12 @@ def Jvec(self, m, v, f=None): dRHS_dm_v = self.getRHSDeriv(freq, src, v) du_dm_v = self.Ainv[nf] * (-dA_dm_v + dRHS_dm_v) for rx in src.receiver_list: - Jv[src, rx] = rx.evalDeriv(src, self.mesh, f, du_dm_v=du_dm_v, v=v) + src_rx_slice = survey_slices[src, rx] + Jv[src_rx_slice] = mkvc( + rx.evalDeriv(src, self.mesh, f, du_dm_v=du_dm_v, v=v) + ) - return Jv.dobs + return Jv def Jtvec(self, m, v, f=None): r"""Compute the adjoint sensitivity matrix times a vector. @@ -290,9 +293,8 @@ def Jtvec(self, m, v, f=None): self.model = m - # Ensure v is a data object. - if not isinstance(v, Data): - v = Data(self.survey, v) + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = self.survey.get_all_slices() Jtv = np.zeros(m.size) @@ -302,8 +304,9 @@ def Jtvec(self, m, v, f=None): df_duT_sum = 0 df_dmT_sum = 0 for rx in src.receiver_list: + src_rx_slice = survey_slices[src, rx] df_duT, df_dmT = rx.evalDeriv( - src, self.mesh, f, v=v[src, rx], adjoint=True + src, self.mesh, f, v=v[src_rx_slice], adjoint=True ) if not isinstance(df_duT, Zero): df_duT_sum += df_duT @@ -355,7 +358,8 @@ def getJ(self, m, f=None): Jmatrix = np.zeros((self.survey.nD, m_size)) - data = Data(self.survey) + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = self.survey.get_all_slices() for A_i, freq in zip(Ainv, self.survey.frequencies): for src in self.survey.get_sources_by_frequency(freq): @@ -382,8 +386,9 @@ def getJ(self, m, f=None): du_dmT += np.hstack(df_dmT) block = np.array(du_dmT, dtype=complex).real.T - data_inds = data.index_dictionary[src][rx] - Jmatrix[data_inds] = block + + src_rx_slice = survey_slices[src, rx] + Jmatrix[src_rx_slice] = block self._Jmatrix = Jmatrix diff --git a/simpeg/electromagnetics/frequency_domain/simulation_1d.py b/simpeg/electromagnetics/frequency_domain/simulation_1d.py index 7d880dd09c..c51e8e3d33 100644 --- a/simpeg/electromagnetics/frequency_domain/simulation_1d.py +++ b/simpeg/electromagnetics/frequency_domain/simulation_1d.py @@ -127,7 +127,27 @@ def fields(self, m): return self._project_to_data(v) - def getJ(self, m, f=None): + def _getJ(self, m, f=None): + """Build Jacobian matrix by blocks. + + This method builds the Jacobian matrix by blocks, each block for a particular + invertible property (receiver height, conductivity, permeability, layer + thickness). Each block of the Jacobian matrix is stored within a dictionary. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + dict + Dictionary containing the blocks of the Jacobian matrix for the invertible + properties. The keys of the dictionary can be `"dh"`, `"ds"`, `"dmu"`, and + `"dthick"`. + """ self.model = m if getattr(self, "_J", None) is None: self._J = {} @@ -174,9 +194,9 @@ def getJ(self, m, f=None): rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) rTE = rTE[i_freq] rTE = np.take_along_axis(rTE, inv_lambs, axis=1) - v_dh_temp = (C0s_dh * rTE) @ self._fhtfilt.j0 + ( - C1s_dh * rTE - ) @ self._fhtfilt.j1 + v_dh_temp = ((C0s_dh * rTE) @ self._fhtfilt.j0).real + ( + (C1s_dh * rTE) @ self._fhtfilt.j1 + ).real v_dh_temp += W @ v_dh_temp # need to re-arange v_dh as it's currently (n_data x 1) # however it already contains all the relevant information... @@ -244,6 +264,44 @@ def getJ(self, m, f=None): self._J["dthick"] = self._project_to_data(v_dthick) return self._J + def getJ(self, m, f=None): + r"""Get the Jacobian matrix. + + This method generates and stores the full Jacobian matrix for the + model provided. I.e.: + + .. math:: + \mathbf{J} = \dfrac{\partial f(\mu(\mathbf{m}))}{\partial \mathbf{m}} + + where :math:`f()` is the forward modelling function, :math:`\mu()` is the + mapping, and :math:`\mathbf{m}` is the model vector. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (n_data, n_param) numpy.ndarray + The full Jacobian matrix. + """ + Js = self._getJ(m, f=f) + # Map parameters with their corresponding derivatives + param_and_derivs = { + "dh": self.hDeriv, + "ds": self.sigmaDeriv, + "dmu": self.muDeriv, + "dthick": self.thicknessesDeriv, + } + + # Compute J matrix + J = sum(Js[param] @ param_and_derivs[param] for param in Js) + + return J + def _project_to_data(self, v): i_dat = 0 i_v = 0 @@ -288,6 +346,10 @@ def _project_to_data(self, v): out[i_dat:i_dat_p1] = v_slice.real elif rx.component == "imag": out[i_dat:i_dat_p1] = v_slice.imag + else: + raise NotImplementedError( + f"receiver component {rx.component} not implemented." + ) i_dat = i_dat_p1 i_v = i_v_p1 return out diff --git a/simpeg/electromagnetics/frequency_domain/sources.py b/simpeg/electromagnetics/frequency_domain/sources.py index 9f7ea1a566..8b71287245 100644 --- a/simpeg/electromagnetics/frequency_domain/sources.py +++ b/simpeg/electromagnetics/frequency_domain/sources.py @@ -14,7 +14,6 @@ validate_direction, validate_integer, ) -from ...utils.code_utils import deprecate_property from ..utils import omega from ..utils import segmented_line_current_source_term, line_through_faces @@ -360,6 +359,12 @@ class MagDipole(BaseFDEMSrc): \mathbf{M_{\sigma}^e} \mathbf{e^S} = -\mathbf{C}^T \mathbf{{M_{\mu^{-1}}^f}^S} \mathbf{b^P}} + To obtain $\mathbf{b^P}$, we compute it by taking the curl of the vector potential due to a point dipole. This is provided by :py:meth:`geoana.em.static.MagneticDipoleWholeSpace.vector_potential`. Specifically, + + .. math:: + + \vec{B}^P = \nabla \times \vec{A} + Parameters ---------- receiver_list : list of simpeg.electromagnetics.frequency_domain.receivers.BaseRx @@ -493,40 +498,31 @@ def bPrimary(self, simulation): numpy.ndarray Primary magnetic flux density """ - formulation = simulation._formulation coordinates = "cartesian" - if formulation == "EB": - gridX = simulation.mesh.gridEx - gridY = simulation.mesh.gridEy - gridZ = simulation.mesh.gridEz + if simulation._formulation == "EB": C = simulation.mesh.edge_curl - elif formulation == "HJ": - gridX = simulation.mesh.gridFx - gridY = simulation.mesh.gridFy - gridZ = simulation.mesh.gridFz - C = simulation.mesh.edge_curl.T - - if simulation.mesh._meshType == "CYL": - coordinates = "cylindrical" - - if simulation.mesh.is_symmetric is True: - if not (np.linalg.norm(self.orientation - np.r_[0.0, 0.0, 1.0]) < 1e-6): - raise AssertionError( - "for cylindrical symmetry, the dipole must be oriented" - " in the Z direction" - ) - a = self._srcFct(gridY)[:, 1] + if simulation.mesh._meshType == "CYL": + coordinates = "cylindrical" - return C * a + if simulation.mesh.is_symmetric is True: + if not ( + np.linalg.norm(self.orientation - np.r_[0.0, 0.0, 1.0]) < 1e-6 + ): + raise AssertionError( + "for cylindrical symmetry, the dipole must be oriented" + " in the Z direction" + ) + a = self._srcFct(simulation.mesh.edges_y, coordinates)[:, 1] + return C * a - ax = self._srcFct(gridX, coordinates)[:, 0] - ay = self._srcFct(gridY, coordinates)[:, 1] - az = self._srcFct(gridZ, coordinates)[:, 2] - a = np.concatenate((ax, ay, az)) + avec = self._srcFct(simulation.mesh.edges, coordinates) + a = simulation.mesh.project_edge_vector(avec) + return C * a - return C * a + elif simulation._formulation == "HJ": + return self.mu * self.hPrimary(simulation) def hPrimary(self, simulation): """Compute primary magnetic field. @@ -557,8 +553,37 @@ def hPrimary(self, simulation): out.append(h_rx @ rx.orientation) self._1d_h = out return self._1d_h - b = self.bPrimary(simulation) - return 1.0 / self.mu * b + + if simulation._formulation == "EB": + b = self.bPrimary(simulation) + return ( + 1.0 / self.mu * b + ) # same as MfI * Mfmui * b (mu primary must be a scalar) + + elif simulation._formulation == "HJ": + coordinates = "cartesian" + if simulation.mesh._meshType == "CYL": + coordinates = "cylindrical" + if simulation.mesh.is_symmetric is True: + raise AssertionError( + "for cylindrical symmetry, you must use the EB formulation for the simulation" + ) + + avec = self._srcFct(simulation.mesh.faces, coordinates) + a = simulation.mesh.project_face_vector(avec) + + a_boundary = mkvc(self._srcFct(simulation.mesh.boundary_edges)) + a_bc = simulation.mesh.boundary_edge_vector_integral * a_boundary + + return ( + 1.0 + / self.mu + * simulation.MeI + * simulation.mesh.edge_curl.T + * simulation.Mf + * a + - 1 / self.mu * simulation.MeI * a_bc + ) def s_m(self, simulation): """Magnetic source term (s_m) @@ -574,10 +599,13 @@ def s_m(self, simulation): Magnetic source term on mesh. """ - b_p = self.bPrimary(simulation) - if simulation._formulation == "HJ": - b_p = simulation.Me * b_p - return -1j * omega(self.frequency) * b_p + if simulation._formulation == "EB": + b_p = self.bPrimary(simulation) + return -1j * omega(self.frequency) * b_p + elif simulation._formulation == "HJ": + h_p = self.hPrimary(simulation) + MeMu = simulation.MeMu + return -1j * omega(self.frequency) * MeMu * h_p def s_e(self, simulation): """Electric source term (s_e) @@ -773,10 +801,6 @@ def __init__( **kwargs, ): kwargs.pop("moment", None) - - # Raise error on deprecated arguments - if (key := "N") in kwargs.keys(): - raise TypeError(f"'{key}' property has been removed. Please use 'n_turns'.") self.n_turns = n_turns super().__init__( @@ -877,10 +901,6 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): out[np.isnan(out)] = 0 return self.n_turns * out - N = deprecate_property( - n_turns, "N", "n_turns", removal_version="0.19.0", error=True - ) - class PrimSecSigma(BaseFDEMSrc): def __init__( diff --git a/simpeg/electromagnetics/natural_source/__init__.py b/simpeg/electromagnetics/natural_source/__init__.py index 07cf986b76..337d6da777 100644 --- a/simpeg/electromagnetics/natural_source/__init__.py +++ b/simpeg/electromagnetics/natural_source/__init__.py @@ -27,8 +27,6 @@ receivers.Admittance receivers.ApparentConductivity receivers.Tipper - receivers.PointNaturalSource - receivers.Point3DTipper Sources ======= diff --git a/simpeg/electromagnetics/natural_source/receivers.py b/simpeg/electromagnetics/natural_source/receivers.py index dcf9a86969..aa4135534e 100644 --- a/simpeg/electromagnetics/natural_source/receivers.py +++ b/simpeg/electromagnetics/natural_source/receivers.py @@ -4,7 +4,6 @@ validate_ndarray_with_shape, deprecate_class, ) -import warnings import numpy as np from scipy.constants import mu_0 from scipy.sparse import csr_matrix @@ -301,7 +300,7 @@ def orientation(self, var): def _eval_impedance(self, src, mesh, f): if mesh.dim < 3 and self.orientation in ["xx", "yy"]: - return 0.0 + return np.zeros((self.nD, 1), dtype=complex) e = f[src, "e"] h = f[src, "h"] if mesh.dim == 3: @@ -1239,158 +1238,19 @@ def evalDeriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False): ) -@deprecate_class(removal_version="0.24.0", future_warn=True, replace_docstring=False) +@deprecate_class(removal_version="0.24.0", error=True, replace_docstring=False) class PointNaturalSource(Impedance): - """Point receiver class for magnetotelluric simulations. - + """ .. warning:: - This class is deprecated and will be removed in SimPEG v0.24.0. + This class was removed in SimPEG v0.24.0. Please use :class:`.natural_source.receivers.Impedance`. - - Assumes that the data locations are standard xyz coordinates; - i.e. (x,y,z) is (Easting, Northing, up). - - Parameters - ---------- - locations : (n_loc, n_dim) numpy.ndarray - Receiver locations. - orientation : {'xx', 'xy', 'yx', 'yy'} - MT receiver orientation. - component : {'real', 'imag', 'apparent_resistivity', 'phase'} - MT data type. """ - def __init__( - self, - locations=None, - orientation="xy", - component="real", - locations_e=None, - locations_h=None, - **kwargs, - ): - if locations is None: - if (locations_e is None) ^ ( - locations_h is None - ): # if only one of them is none - raise TypeError( - "Either locations or both locations_e and locations_h must be passed" - ) - if locations_e is None and locations_h is None: - warnings.warn( - "Using the default for locations is deprecated behavior. Please explicitly set locations. ", - FutureWarning, - stacklevel=2, - ) - locations_e = np.array([[0.0]]) - locations_h = locations_e - else: # locations was not None - if locations_e is not None or locations_h is not None: - raise TypeError( - "Cannot pass both locations and locations_e or locations_h at the same time." - ) - if isinstance(locations, list): - if len(locations) == 2: - locations_e = locations[0] - locations_h = locations[1] - elif len(locations) == 1: - locations_e = locations[0] - locations_h = locations[0] - else: - raise ValueError("incorrect size of list, must be length of 1 or 2") - else: - locations_e = locations_h = locations - - super().__init__( - locations_e=locations_e, - locations_h=locations_h, - orientation=orientation, - component=component, - **kwargs, - ) - - def eval(self, src, mesh, f, return_complex=False): # noqa: A003 - if return_complex: - warnings.warn( - "Calling with return_complex=True is deprecated in SimPEG 0.23. Instead set rx.component='complex'", - FutureWarning, - stacklevel=2, - ) - temp = self.component - self.component = "complex" - out = super().eval(src, mesh, f) - self.component = temp - else: - out = super().eval(src, mesh, f) - return out - - locations = property(lambda self: self._locations[0], Impedance.locations.fset) - -@deprecate_class(removal_version="0.24.0", future_warn=True, replace_docstring=False) +@deprecate_class(removal_version="0.24.0", error=True, replace_docstring=False) class Point3DTipper(Tipper): - """Point receiver class for Z-axis tipper simulations. - + """ .. warning:: - This class is deprecated and will be removed in SimPEG v0.24.0. + This class was removed in SimPEG v0.24.0. Please use :class:`.natural_source.receivers.Tipper`. - - Assumes that the data locations are standard xyz coordinates; - i.e. (x,y,z) is (Easting, Northing, up). - - Parameters - ---------- - locations : (n_loc, n_dim) numpy.ndarray - Receiver locations. - orientation : str, default = 'zx' - NSEM receiver orientation. Must be one of {'zx', 'zy'} - component : str, default = 'real' - NSEM data type. Choose one of {'real', 'imag', 'apparent_resistivity', 'phase'} """ - - def __init__( - self, - locations, - orientation="zx", - component="real", - locations_e=None, - locations_h=None, - **kwargs, - ): - # note locations_e and locations_h never did anything for this class anyways - # so can just issue a warning here... - if locations_e is not None or locations_h is not None: - warnings.warn( - "locations_e and locations_h are unused for this class", - UserWarning, - stacklevel=2, - ) - if isinstance(locations, list): - if len(locations) < 3: - locations = locations[0] - else: - raise ValueError("incorrect size of list, must be length of 1 or 2") - - super().__init__( - locations_h=locations, - orientation=orientation, - component=component, - **kwargs, - ) - - def eval(self, src, mesh, f, return_complex=False): # noqa: A003 - if return_complex: - warnings.warn( - "Calling with return_complex=True is deprecated in SimPEG 0.23. Instead set rx.component='complex'", - FutureWarning, - stacklevel=2, - ) - temp = self.component - self.component = "complex" - out = super().eval(src, mesh, f) - self.component = temp - else: - out = super().eval(src, mesh, f) - return out - - locations = property(lambda self: self._locations[0], Tipper.locations.fset) diff --git a/simpeg/electromagnetics/natural_source/simulation.py b/simpeg/electromagnetics/natural_source/simulation.py index 651a347957..a7cf938a2b 100644 --- a/simpeg/electromagnetics/natural_source/simulation.py +++ b/simpeg/electromagnetics/natural_source/simulation.py @@ -110,6 +110,23 @@ def getADeriv(self, freq, u, v, adjoint=False): freq, u, v, adjoint ) + def getJ(self, m, f=None): + r"""Generate the full sensitivity matrix. + + .. important:: + + This method hasn't been implemented yet for this class. + + Raises + ------- + NotImplementedError + """ + msg = ( + "The getJ method hasn't been implemented for the " + f"{type(self).__name__} yet." + ) + raise NotImplementedError(msg) + class Simulation1DMagneticField(BaseFDEMSimulation): """ @@ -172,6 +189,23 @@ def getADeriv(self, freq, u, v, adjoint=False): freq, u, v, adjoint ) + def getJ(self, m, f=None): + r"""Generate the full sensitivity matrix. + + .. important:: + + This method hasn't been implemented yet for this class. + + Raises + ------- + NotImplementedError + """ + msg = ( + "The getJ method hasn't been implemented for the " + f"{type(self).__name__} yet." + ) + raise NotImplementedError(msg) + class Simulation1DPrimarySecondary(Simulation1DElectricField): r""" diff --git a/simpeg/electromagnetics/natural_source/simulation_1d.py b/simpeg/electromagnetics/natural_source/simulation_1d.py index e421a30cca..56325e8945 100644 --- a/simpeg/electromagnetics/natural_source/simulation_1d.py +++ b/simpeg/electromagnetics/natural_source/simulation_1d.py @@ -248,8 +248,7 @@ def dpred(self, m, f=None): ) elif rx.component == "phase": d.append( - (180.0 / np.pi) - * np.arctan(np.imag(Z[i_freq]) / np.real(Z[i_freq])) + (180.0 / np.pi) * np.arctan2(Z[i_freq].imag, Z[i_freq].real) ) return np.array(d) diff --git a/simpeg/electromagnetics/natural_source/survey.py b/simpeg/electromagnetics/natural_source/survey.py index 18023547e2..fc3751dc1d 100644 --- a/simpeg/electromagnetics/natural_source/survey.py +++ b/simpeg/electromagnetics/natural_source/survey.py @@ -80,6 +80,7 @@ def toRecArray(self, returnType="RealImag"): ("tzy", complex), ] + survey_slices = self.survey.get_all_slices() for src in self.survey.source_list: # Temp array for all the receivers of the source. # Note: needs to be written more generally, @@ -100,7 +101,7 @@ def toRecArray(self, returnType="RealImag"): ).view(dtRI) # Get the type and the value for the DataNSEM object as a list typeList = [ - [rx.orientation, rx.component, self[src, rx]] + [rx.orientation, rx.component, self.dobs[survey_slices[src, rx]]] for rx in src.receiver_list ] # Insert the values to the temp array diff --git a/simpeg/electromagnetics/natural_source/utils/data_utils.py b/simpeg/electromagnetics/natural_source/utils/data_utils.py index eb1577be2b..1d226b0104 100644 --- a/simpeg/electromagnetics/natural_source/utils/data_utils.py +++ b/simpeg/electromagnetics/natural_source/utils/data_utils.py @@ -6,8 +6,8 @@ import simpeg as simpeg from simpeg.electromagnetics.natural_source.survey import Survey, Data from simpeg.electromagnetics.natural_source.receivers import ( - PointNaturalSource, - Point3DTipper, + Impedance, + Tipper, ) from simpeg.electromagnetics.natural_source.sources import PlanewaveXYPrimary from simpeg.electromagnetics.natural_source.utils import ( @@ -61,13 +61,16 @@ def extract_data_info(NSEMdata): """ dL, freqL, rxTL = [], [], [] + survey_slices = NSEMdata.survey.get_all_slices() + for src in NSEMdata.survey.source_list: for rx in src.receiver_list: - dL.append(NSEMdata[src, rx]) + src_rx_slice = survey_slices[src, rx] + dL.append(NSEMdata.dobs[src_rx_slice]) freqL.append(np.ones(rx.nD) * src.frequency) - if isinstance(rx, PointNaturalSource): + if isinstance(rx, Impedance): rxTL.extend((("z" + rx.orientation + " ") * rx.nD).split()) - if isinstance(rx, Point3DTipper): + if isinstance(rx, Tipper): rxTL.extend((("t" + rx.orientation + " ") * rx.nD).split()) return np.concatenate(dL), np.concatenate(freqL), np.array(rxTL) @@ -121,9 +124,9 @@ def resample_data(NSEMdata, locs="All", freqs="All", rxs="All", verbose=False): rx_comp = [] for rxT in rxs: if "z" in rxT[0]: - rxtype = PointNaturalSource + rxtype = Impedance elif "t" in rxT[0]: - rxtype = Point3DTipper + rxtype = Tipper else: raise IOError("Unknown rx type string") orient = rxT[1:3] @@ -255,8 +258,8 @@ def convert3Dto1Dobject(NSEMdata, rxType3D="yx"): for loc in uniLocs: # Make the receiver list rx1DList = [] - rx1DList.append(PointNaturalSource(simpeg.mkvc(loc, 2).T, "real")) - rx1DList.append(PointNaturalSource(simpeg.mkvc(loc, 2).T, "imag")) + rx1DList.append(Impedance(simpeg.mkvc(loc, 2).T, component="real")) + rx1DList.append(Impedance(simpeg.mkvc(loc, 2).T, component="imag")) # Source list locrecData = recData[ np.sqrt( diff --git a/simpeg/electromagnetics/natural_source/utils/data_viewer.py b/simpeg/electromagnetics/natural_source/utils/data_viewer.py index 1e8270fcaa..6054d4406f 100644 --- a/simpeg/electromagnetics/natural_source/utils/data_viewer.py +++ b/simpeg/electromagnetics/natural_source/utils/data_viewer.py @@ -71,9 +71,9 @@ def __init__(self, data, data_dict=None, backend="qt"): ] ) ) - if rx.PointNaturalSource in unique_rx: + if rx.Impedance in unique_rx: self.station_figs.append(ApparentResPhsStationPlot()) - if rx.Point3DTipper in unique_rx: + if rx.Tipper in unique_rx: self.station_figs.append(TipperAmplitudeStationPlot()) self.freqency_figs = [] diff --git a/simpeg/electromagnetics/natural_source/utils/plot_utils.py b/simpeg/electromagnetics/natural_source/utils/plot_utils.py index 4be450445b..c608940fec 100644 --- a/simpeg/electromagnetics/natural_source/utils/plot_utils.py +++ b/simpeg/electromagnetics/natural_source/utils/plot_utils.py @@ -831,6 +831,10 @@ def _extract_location_data(data, location, orientation, component, return_uncert data_list = [] std_list = [] floor_list = [] + + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = data.survey.get_all_slices() + for src in data.survey.source_list: rx_list = [ rx @@ -850,9 +854,9 @@ def _extract_location_data(data, location, orientation, component, return_uncert data_list.append(data[src, rx][ind_loc]) if return_uncert: - index = data.index_dictionary[src][rx] - std_list.append(data.relative_error[index][ind_loc]) - floor_list.append(data.noise_floor[index][ind_loc]) + src_rx_slice = survey_slices[src, rx] + std_list.append(data.relative_error[src_rx_slice][ind_loc]) + floor_list.append(data.noise_floor[src_rx_slice][ind_loc]) if return_uncert: return ( np.array(freq_list), diff --git a/simpeg/electromagnetics/natural_source/utils/solutions_1d.py b/simpeg/electromagnetics/natural_source/utils/solutions_1d.py index 54513bab01..7c9b533e0f 100644 --- a/simpeg/electromagnetics/natural_source/utils/solutions_1d.py +++ b/simpeg/electromagnetics/natural_source/utils/solutions_1d.py @@ -34,7 +34,7 @@ def get1DEfields(m1d, sigma, freq, sourceAmp=1.0): ## Note: The analytic solution is derived with e^iwt bc = np.r_[Etot[0], Etot[-1]] # The right hand side - rhs = Aio * bc + rhs = -Aio * bc # Solve the system Aii_inv = Solver(Aii) eii = Aii_inv * rhs diff --git a/simpeg/electromagnetics/natural_source/utils/test_utils.py b/simpeg/electromagnetics/natural_source/utils/test_utils.py index 34943aae44..88f8131db9 100644 --- a/simpeg/electromagnetics/natural_source/utils/test_utils.py +++ b/simpeg/electromagnetics/natural_source/utils/test_utils.py @@ -1,11 +1,11 @@ import numpy as np import discretize -from simpeg import maps, mkvc, utils, Data +from simpeg import maps, mkvc, utils from ....utils import unpack_widths from ..receivers import ( - PointNaturalSource, - Point3DTipper, + Impedance, + Tipper, ) from ..survey import Survey from ..sources import PlanewaveXYPrimary, Planewave @@ -18,9 +18,9 @@ def getAppResPhs(NSEMdata, survey): - NSEMdata = Data(dobs=NSEMdata, survey=survey) # Make impedance zList = [] + survey_slices = survey.get_all_slices() for src in survey.source_list: zc = [src.frequency] for rx in src.receiver_list: @@ -28,14 +28,15 @@ def getAppResPhs(NSEMdata, survey): m = 1j else: m = 1 - zc.append(m * NSEMdata[src, rx]) + src_rx_slice = survey_slices[src, rx] + zc.append(m * NSEMdata[src_rx_slice]) zList.append(zc) return [ appResPhs(zList[i][0], np.sum(zList[i][1:3])) for i in np.arange(len(zList)) ] -def setup1DSurvey(sigmaHalf, tD=False, structure=False): +def setup1DSurvey(sigmaHalf, tD=False, structure=False, rx_orientation="xy"): # Frequency num_frequencies = 33 freqs = np.logspace(3, -3, num_frequencies) @@ -67,10 +68,14 @@ def setup1DSurvey(sigmaHalf, tD=False, structure=False): receiver_list = [] for _ in range(len(["z1d", "z1d"])): receiver_list.append( - PointNaturalSource(mkvc(np.array([0.0]), 2).T, component="real") + Impedance( + mkvc(np.array([0.0]), 2).T, component="real", orientation=rx_orientation + ) ) receiver_list.append( - PointNaturalSource(mkvc(np.array([0.0]), 2).T, component="imag") + Impedance( + mkvc(np.array([0.0]), 2).T, component="imag", orientation=rx_orientation + ) ) # Source list source_list = [] @@ -112,8 +117,8 @@ def setup1DSurveyElectricMagnetic(sigmaHalf, tD=False, structure=False): rxList = [] for _ in range(len(["z1d", "z1d"])): - rxList.append(PointNaturalSource(mkvc(np.array([0.0]), 2).T, component="real")) - rxList.append(PointNaturalSource(mkvc(np.array([0.0]), 2).T, component="imag")) + rxList.append(Impedance(mkvc(np.array([0.0]), 2).T, component="real")) + rxList.append(Impedance(mkvc(np.array([0.0]), 2).T, component="imag")) # Source list # srcList = [] src_list = [Planewave([], frequency=f) for f in frequencies] @@ -175,50 +180,44 @@ def setupSimpegNSEM_tests_location_assign_list( if comp == "Res": if singleList: rxList.append( - PointNaturalSource( - locations=[rx_loc], + Impedance( + rx_loc, orientation=rx_type, component="apparent_resistivity", ) ) rxList.append( - PointNaturalSource( - locations=[rx_loc], orientation=rx_type, component="phase" - ) + Impedance(rx_loc, orientation=rx_type, component="phase") ) else: rxList.append( - PointNaturalSource( - locations=[rx_loc, rx_loc], + Impedance( + locations_e=rx_loc, + locations_h=rx_loc, orientation=rx_type, component="apparent_resistivity", ) ) rxList.append( - PointNaturalSource( - locations=[rx_loc, rx_loc], + Impedance( + locations_e=rx_loc, + locations_h=rx_loc, orientation=rx_type, component="phase", ) ) else: + rxList.append(Impedance(rx_loc, orientation=rx_type, component="real")) rxList.append( - PointNaturalSource( - orientation=rx_type, component="real", locations=[rx_loc] - ) - ) - rxList.append( - PointNaturalSource( - orientation=rx_type, component="imag", locations=[rx_loc] + Impedance( + rx_loc, + orientation=rx_type, + component="imag", ) ) if rx_type in ["zx", "zy"]: - rxList.append( - Point3DTipper(orientation=rx_type, component="real", locations=[rx_loc]) - ) - rxList.append( - Point3DTipper(orientation=rx_type, component="imag", locations=[rx_loc]) - ) + rxList.append(Tipper(rx_loc, orientation=rx_type, component="real")) + rxList.append(Tipper(rx_loc, orientation=rx_type, component="imag")) srcList = [] if singleFreq: @@ -327,23 +326,19 @@ def setupSimpegNSEM_PrimarySecondary(inputSetup, freqs, comp="Imp", singleFreq=F if rx_type in ["xx", "xy", "yx", "yy"]: if comp == "Res": rxList.append( - PointNaturalSource( - locations=rx_loc, + Impedance( + rx_loc, orientation=rx_type, component="apparent_resistivity", ) ) - rxList.append( - PointNaturalSource( - locations=rx_loc, orientation=rx_type, component="phase" - ) - ) + rxList.append(Impedance(rx_loc, orientation=rx_type, component="phase")) else: - rxList.append(PointNaturalSource(rx_loc, rx_type, "real")) - rxList.append(PointNaturalSource(rx_loc, rx_type, "imag")) + rxList.append(Impedance(rx_loc, orientation=rx_type, component="real")) + rxList.append(Impedance(rx_loc, orientation=rx_type, component="imag")) if rx_type in ["zx", "zy"]: - rxList.append(Point3DTipper(rx_loc, rx_type, "real")) - rxList.append(Point3DTipper(rx_loc, rx_type, "imag")) + rxList.append(Tipper(rx_loc, orientation=rx_type, component="real")) + rxList.append(Tipper(rx_loc, orientation=rx_type, component="imag")) srcList = [] if singleFreq: @@ -427,7 +422,7 @@ def setupSimpegNSEM_ePrimSec(inputSetup, comp="Imp", singleFreq=False, expMap=Tr if rx_type in ["xx", "xy", "yx", "yy"]: if comp == "Res": receiver_list.append( - PointNaturalSource( + Impedance( locations_e=rx_loc, locations_h=rx_loc, orientation=rx_type, @@ -435,7 +430,7 @@ def setupSimpegNSEM_ePrimSec(inputSetup, comp="Imp", singleFreq=False, expMap=Tr ) ) receiver_list.append( - PointNaturalSource( + Impedance( locations_e=rx_loc, locations_h=rx_loc, orientation=rx_type, @@ -443,11 +438,19 @@ def setupSimpegNSEM_ePrimSec(inputSetup, comp="Imp", singleFreq=False, expMap=Tr ) ) else: - receiver_list.append(PointNaturalSource(rx_loc, rx_type, "real")) - receiver_list.append(PointNaturalSource(rx_loc, rx_type, "imag")) + receiver_list.append( + Impedance(rx_loc, orientation=rx_type, component="real") + ) + receiver_list.append( + Impedance(rx_loc, orientation=rx_type, component="imag") + ) if rx_type in ["zx", "zy"]: - receiver_list.append(Point3DTipper(rx_loc, rx_type, "real")) - receiver_list.append(Point3DTipper(rx_loc, rx_type, "imag")) + receiver_list.append( + Impedance(rx_loc, orientation=rx_type, component="real") + ) + receiver_list.append( + Impedance(rx_loc, orientation=rx_type, component="imag") + ) # Source list source_list = [] diff --git a/simpeg/electromagnetics/static/induced_polarization/simulation.py b/simpeg/electromagnetics/static/induced_polarization/simulation.py index 5aa88b19b9..203d015eee 100644 --- a/simpeg/electromagnetics/static/induced_polarization/simulation.py +++ b/simpeg/electromagnetics/static/induced_polarization/simulation.py @@ -5,7 +5,7 @@ from .... import maps, props from ....base import BasePDESimulation -from ....data import Data +from ....utils import mkvc from ..resistivity import Simulation2DCellCentered as DC_2D_CC from ..resistivity import Simulation2DNodal as DC_2D_N from ..resistivity import Simulation3DCellCentered as DC_3D_CC @@ -43,7 +43,8 @@ def rhoDeriv(self): @cached_property def _scale(self): - scale = Data(self.survey, np.ones(self.survey.nD)) + survey_slices = self.survey.get_all_slices() + scale = np.ones(self.survey.nD) if self._f is None: # re-uses the DC simulation's fields method self._f = super().fields(None) @@ -55,8 +56,9 @@ def _scale(self): for src in self.survey.source_list: for rx in src.receiver_list: if rx.data_type == "apparent_chargeability": - scale[src, rx] = 1.0 / rx.eval(src, self.mesh, f) - return scale.dobs + src_rx_slice = survey_slices[src, rx] + scale[src_rx_slice] = mkvc(1.0 / rx.eval(src, self.mesh, f)) + return scale eta, etaMap, etaDeriv = props.Invertible("Electrical Chargeability (V/V)") diff --git a/simpeg/electromagnetics/static/resistivity/simulation.py b/simpeg/electromagnetics/static/resistivity/simulation.py index 36c1742311..92a3adf578 100644 --- a/simpeg/electromagnetics/static/resistivity/simulation.py +++ b/simpeg/electromagnetics/static/resistivity/simulation.py @@ -112,6 +112,7 @@ def fields(self, m=None, calcJ=True): return f def getJ(self, m, f=None): + self.model = m if getattr(self, "_Jmatrix", None) is None: if f is None: f = self.fields(m) @@ -215,7 +216,6 @@ def _Jtvec(self, m, v=None, f=None): if isinstance(v, Data): v = v.dobs v = self._mini_survey_dataT(v) - v = Data(survey, v) Jtv = np.zeros(m.size) else: # This is for forming full sensitivity matrix @@ -223,13 +223,17 @@ def _Jtvec(self, m, v=None, f=None): istrt = int(0) iend = int(0) + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = survey.get_all_slices() + for source in survey.source_list: u_source = f[source, self._solutionType].copy() for rx in source.receiver_list: # wrt f, need possibility wrt m if v is not None: + src_rx_slice = survey_slices[source, rx] PTv = rx.evalDeriv( - source, self.mesh, f, v[source, rx], adjoint=True + source, self.mesh, f, v[src_rx_slice], adjoint=True ) else: PTv = rx.evalDeriv(source, self.mesh, f).toarray().T diff --git a/simpeg/electromagnetics/static/resistivity/simulation_2d.py b/simpeg/electromagnetics/static/resistivity/simulation_2d.py index 09718ab450..5657af8988 100644 --- a/simpeg/electromagnetics/static/resistivity/simulation_2d.py +++ b/simpeg/electromagnetics/static/resistivity/simulation_2d.py @@ -278,10 +278,10 @@ def getJ(self, m, f=None): """ Generate Full sensitivity matrix """ + self.model = m if getattr(self, "_Jmatrix", None) is None: if self.verbose: print("Calculating J and storing") - self.model = m if f is None: f = self.fields(m) self._Jmatrix = (self._Jtvec(m, v=None, f=f)).T @@ -367,16 +367,18 @@ def _Jtvec(self, m, v=None, f=None): v = self._mini_survey_dataT(v) Jtv = np.zeros(m.size, dtype=float) + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = survey.get_all_slices() + for iky, ky in enumerate(kys): u_ky = f[:, self._solutionType, iky] - count = 0 for i_src, src in enumerate(survey.source_list): u_src = u_ky[:, i_src] df_duT_sum = 0 df_dmT_sum = 0 for rx in src.receiver_list: - my_v = v[count : count + rx.nD] - count += rx.nD + src_rx_slice = survey_slices[src, rx] + my_v = v[src_rx_slice] # wrt f, need possibility wrt m PTv = rx.evalDeriv(src, self.mesh, f, my_v, adjoint=True) df_duTFun = getattr(f, "_{0!s}Deriv".format(rx.projField), None) @@ -572,23 +574,7 @@ def setBC(self, ky=None): else: mesh = self.mesh boundary_faces = mesh.boundary_faces - boundary_normals = mesh.boundary_face_outward_normals - n_bf = len(boundary_faces) - - # Top gets 0 Neumann - alpha = np.zeros(n_bf) - beta = np.ones(n_bf) - gamma = 0 - - # assume a source point at the middle of the top of the mesh - middle = np.median(mesh.nodes, axis=0) top_v = np.max(mesh.nodes[:, -1]) - source_point = np.r_[middle[:-1], top_v] - - r_vec = boundary_faces - source_point - r = np.linalg.norm(r_vec, axis=-1) - r_hat = r_vec / r[:, None] - r_dot_n = np.einsum("ij,ij->i", r_hat, boundary_normals) if self.surface_faces is None: # determine faces that are on the sides and bottom of the mesh... @@ -610,11 +596,29 @@ def setBC(self, ky=None): else: not_top = ~self.surface_faces + n_bf = len(boundary_faces) + + # Top gets 0 Neumann + alpha = np.zeros(n_bf) + beta = np.ones(n_bf) + gamma = 0 + + # assume a source point at the middle of the top of the mesh + middle = np.median(mesh.nodes, axis=0) + source_point = np.r_[middle[:-1], top_v] + + boundary_faces = boundary_faces[not_top] + boundary_normals = mesh.boundary_face_outward_normals[not_top] + r_vec = boundary_faces - source_point + r = np.linalg.norm(r_vec, axis=-1) + r_hat = r_vec / r[:, None] # small stabilizer to avoid divide by zero + r_dot_n = np.einsum("ij,ij->i", r_hat, boundary_normals) + # use the exponentialy scaled modified bessel function of second kind, # (the division will cancel out the scaling) # This is more stable for large values of ky * r # actual ratio is k1/k0... - alpha[not_top] = (ky * k1e(ky * r) / k0e(ky * r) * r_dot_n)[not_top] + alpha[not_top] = ky * k1e(ky * r) / k0e(ky * r) * r_dot_n B, bc = self.mesh.cell_gradient_weak_form_robin(alpha, beta, gamma) # bc should always be 0 because gamma was always 0 above @@ -751,20 +755,7 @@ def setBC(self, ky=None): mesh = self.mesh # calculate alpha, beta, gamma at the boundary faces boundary_faces = mesh.boundary_faces - boundary_normals = mesh.boundary_face_outward_normals - n_bf = len(boundary_faces) - - alpha = np.zeros(n_bf) - - # assume a source point at the middle of the top of the mesh - middle = np.median(mesh.nodes, axis=0) top_v = np.max(mesh.nodes[:, -1]) - source_point = np.r_[middle[:-1], top_v] - - r_vec = boundary_faces - source_point - r = np.linalg.norm(r_vec, axis=-1) + 1e-16 - r_hat = r_vec / r[:, None] - r_dot_n = np.einsum("ij,ij->i", r_hat, boundary_normals) if self.surface_faces is None: # determine faces that are on the sides and bottom of the mesh... @@ -786,11 +777,27 @@ def setBC(self, ky=None): else: not_top = ~self.surface_faces + n_bf = len(boundary_faces) + + boundary_faces = boundary_faces[not_top] + boundary_normals = mesh.boundary_face_outward_normals[not_top] + + alpha = np.zeros(n_bf) + + # assume a source point at the middle of the top of the mesh + middle = np.median(mesh.nodes, axis=0) + source_point = np.r_[middle[:-1], top_v] + + r_vec = boundary_faces - source_point + r = np.linalg.norm(r_vec, axis=-1) + r_hat = r_vec / r[:, None] + r_dot_n = np.einsum("ij,ij->i", r_hat, boundary_normals) + # use the exponentiall scaled modified bessel function of second kind, # (the division will cancel out the scaling) # This is more stable for large values of ky * r # actual ratio is k1/k0... - alpha[not_top] = (ky * k1e(ky * r) / k0e(ky * r) * r_dot_n)[not_top] + alpha[not_top] = ky * k1e(ky * r) / k0e(ky * r) * r_dot_n P_bf = self.mesh.project_face_to_boundary_face diff --git a/simpeg/electromagnetics/static/resistivity/survey.py b/simpeg/electromagnetics/static/resistivity/survey.py index 1da25060d4..b1ad5bfbf7 100644 --- a/simpeg/electromagnetics/static/resistivity/survey.py +++ b/simpeg/electromagnetics/static/resistivity/survey.py @@ -1,4 +1,3 @@ -import warnings import numpy as np from ....utils.code_utils import validate_string @@ -8,7 +7,6 @@ from . import receivers as Rx from . import sources as Src from ..utils import static_utils -from simpeg import data class Survey(BaseSurvey): @@ -28,16 +26,12 @@ def __init__( survey_geometry="surface", **kwargs, ): - if (key := "survey_type") in kwargs: - warnings.warn( - f"Argument '{key}' is ignored and will be removed in future " - "versions of SimPEG. Types of sources and their corresponding " - "receivers are obtained from their respective classes, without " + if kwargs.pop("survey_type", None) is not None: + raise TypeError( + "Argument 'survey_type' has been removed in SimPEG 0.24.0. Types of sources and" + "their corresponding receivers are obtained from their respective classes, without " "the need to specify the survey type.", - FutureWarning, - stacklevel=0, ) - kwargs.pop(key) super(Survey, self).__init__(source_list, **kwargs) self.survey_geometry = survey_geometry @@ -45,8 +39,6 @@ def __init__( def survey_geometry(self): """Survey geometry - This property is deprecated. - Returns ------- str @@ -66,36 +58,17 @@ def survey_type(self): """ ``survey_type`` has been removed. - Survey type; one of {"dipole-dipole", "pole-dipole", "dipole-pole", "pole-pole"} - .. important: The `survey_type` property has been removed. Types of sources and their corresponding receivers are obtained from their respective classes, without the need to specify the survey type. - - Returns - ------- - str - Survey type; one of {"dipole-dipole", "pole-dipole", "dipole-pole", "pole-pole"} """ - warnings.warn( - "Property 'survey_type' has been removed." - "Types of sources and their corresponding receivers are obtained from " - "their respective classes, without the need to specify the survey type.", - FutureWarning, - stacklevel=0, - ) + raise AttributeError("'survey_type' has been removed.") @survey_type.setter def survey_type(self, var): - warnings.warn( - "Property 'survey_type' has been removed." - "Types of sources and their corresponding receivers are obtained from " - "their respective classes, without the need to specify the survey type.", - FutureWarning, - stacklevel=0, - ) + raise AttributeError("'survey_type' has been removed.") def __repr__(self): return f"{self.__class__.__name__}(#sources: {self.nSrc}; #data: {self.nD})" @@ -194,8 +167,6 @@ def source_locations(self): def set_geometric_factor( self, space_type="halfspace", - data_type=None, - survey_type=None, ): """ Set and return the geometric factor for all data @@ -204,33 +175,21 @@ def set_geometric_factor( ---------- space_type : {'halfspace', 'wholespace'} Calculate geometric factors using a half-space or whole-space formula. - data_type : str, default = ``None`` - This input argument is now deprecated - survey_type : str, default = ``None`` - This input argument is now deprecated Returns ------- (nD) numpy.ndarray The geometric factor for each datum """ - if data_type is not None: - raise TypeError( - "The data_type kwarg has been removed, please set the data_type on the " - "receiver object itself." - ) - if survey_type is not None: - raise TypeError("The survey_type parameter is no longer needed") - geometric_factor = static_utils.geometric_factor(self, space_type=space_type) - geometric_factor = data.Data(self, geometric_factor) + # geometric_factor = data.Data(self, geometric_factor) + survey_slices = self.get_all_slices() for source in self.source_list: for rx in source.receiver_list: - if data_type is not None: - rx.data_type = data_type if rx.data_type == "apparent_resistivity": - rx._geometric_factor[source] = geometric_factor[source, rx] + src_rx_slice = survey_slices[source, rx] + rx._geometric_factor[source] = geometric_factor[src_rx_slice] return geometric_factor def _set_abmn_locations(self): @@ -276,14 +235,6 @@ def _set_abmn_locations(self): self._locations_m = np.vstack(locations_m) self._locations_n = np.vstack(locations_n) - def getABMN_locations(self): - """The 'getABMN_locations' method has been removed.""" - raise TypeError( - "The getABMN_locations method has been Removed. Please instead " - "ask for the property of interest: survey.locations_a, " - "survey.locations_b, survey.locations_m, or survey.locations_n." - ) - def drape_electrodes_on_topography( self, mesh, @@ -291,7 +242,6 @@ def drape_electrodes_on_topography( option="top", topography=None, force=False, - ind_active=None, ): """Shift electrode locations to discrete surface topography. @@ -307,20 +257,7 @@ def drape_electrodes_on_topography( Surface topography force : bool, default = ``False`` If ``True`` force electrodes to surface even if borehole - ind_active : numpy.ndarray of int or bool, optional - - .. deprecated:: 0.23.0 - - Argument ``ind_active`` is deprecated in favor of ``active_cells`` - and will be removed in SimPEG v0.24.0. - """ - # Deprecate ind_active argument - if ind_active is not None: - raise TypeError( - "'ind_active' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead." - ) if self.survey_geometry == "surface": loc_a = self.locations_a[:, :2] @@ -368,10 +305,3 @@ def drape_electrodes_on_topography( raise Exception( f"Input valid survey survey_geometry: {self.survey_geometry}" ) - - def drapeTopo(self, *args, **kwargs): - """This method is deprecated. See :meth:`drape_electrodes_on_topography`""" - raise TypeError( - "The drapeTopo method has been removed. Please instead " - "use the drape_electrodes_on_topography method." - ) diff --git a/simpeg/electromagnetics/static/spectral_induced_polarization/run.py b/simpeg/electromagnetics/static/spectral_induced_polarization/run.py index 8a13cd0b88..34d6963be1 100644 --- a/simpeg/electromagnetics/static/spectral_induced_polarization/run.py +++ b/simpeg/electromagnetics/static/spectral_induced_polarization/run.py @@ -1,4 +1,3 @@ -import warnings import numpy as np from simpeg import ( maps, @@ -20,7 +19,7 @@ def spectral_ip_mappings( is_log_eta=True, is_log_tau=True, is_log_c=True, - indActive=None, + **kwargs, ): """ Generates Mappings for Spectral Induced Polarization Simulation. @@ -38,21 +37,14 @@ def spectral_ip_mappings( TODO: Illustrate input and output variables """ + # Deprecate indActive argument - if indActive is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'indActive'." - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead." ) - active_cells = indActive + if kwargs: # TODO Remove this when removing kwargs argument. + raise TypeError("Unsupported keyword argument " + kwargs.popitem()[0]) if active_cells is None: active_cells = np.ones(mesh.nC, dtype=bool) diff --git a/simpeg/electromagnetics/static/spectral_induced_polarization/simulation.py b/simpeg/electromagnetics/static/spectral_induced_polarization/simulation.py index 5abc4b4e8f..b0b3344e93 100644 --- a/simpeg/electromagnetics/static/spectral_induced_polarization/simulation.py +++ b/simpeg/electromagnetics/static/spectral_induced_polarization/simulation.py @@ -359,6 +359,7 @@ def getJ(self, m, f=None): """ Generate Full sensitivity matrix """ + self.model = m if self._Jmatrix is not None: return self._Jmatrix diff --git a/simpeg/electromagnetics/static/spectral_induced_polarization/simulation_2d.py b/simpeg/electromagnetics/static/spectral_induced_polarization/simulation_2d.py index 2a0faf4906..7b1a3be006 100644 --- a/simpeg/electromagnetics/static/spectral_induced_polarization/simulation_2d.py +++ b/simpeg/electromagnetics/static/spectral_induced_polarization/simulation_2d.py @@ -17,6 +17,8 @@ def getJ(self, m, f=None): Generate Full sensitivity matrix """ + self.model = m + if self.verbose: print(">> Compute Sensitivity matrix") diff --git a/simpeg/electromagnetics/static/spectral_induced_polarization/survey.py b/simpeg/electromagnetics/static/spectral_induced_polarization/survey.py index 0826d84f4b..0fdc0c54a4 100644 --- a/simpeg/electromagnetics/static/spectral_induced_polarization/survey.py +++ b/simpeg/electromagnetics/static/spectral_induced_polarization/survey.py @@ -1,4 +1,3 @@ -import warnings from ....survey import BaseTimeSurvey from . import sources from . import receivers @@ -20,20 +19,13 @@ class Survey(BaseTimeSurvey): _n_pulse = 2 _T = 8.0 - def __init__(self, source_list=None, survey_geometry="surface", **kwargs): - if (key := "survey_type") in kwargs: - warnings.warn( - f"Argument '{key}' is ignored and will be removed in future " - "versions of SimPEG. Types of sources and their corresponding " - "receivers are obtained from their respective classes, without " + def __init__(self, source_list, survey_geometry="surface", **kwargs): + if kwargs.pop("survey_type", None) is not None: + raise TypeError( + "Argument 'survey_type' has been removed in SimPEG 0.24.0. Types of sources and" + "their corresponding receivers are obtained from their respective classes, without " "the need to specify the survey type.", - FutureWarning, - stacklevel=1, ) - kwargs.pop(key) - - if source_list is None: - raise AttributeError("Survey cannot be instantiated without sources") super(Survey, self).__init__(source_list, **kwargs) self.survey_geometry = survey_geometry @@ -61,7 +53,7 @@ def T(self): @property def survey_geometry(self): - """Survey geometry; one of {"surface", "borehole", "general"} + """Survey geometry Returns ------- @@ -81,36 +73,17 @@ def survey_type(self): """ ``survey_type`` has been removed. - Survey type; one of {"dipole-dipole", "pole-dipole", "dipole-pole", "pole-pole"} - .. important: The `survey_type` property has been removed. Types of sources and their corresponding receivers are obtained from their respective classes, without the need to specify the survey type. - - Returns - ------- - str - Survey type; one of {"dipole-dipole", "pole-dipole", "dipole-pole", "pole-pole"} """ - warnings.warn( - "Property 'survey_type' has been removed." - "Types of sources and their corresponding receivers are obtained from " - "their respective classes, without the need to specify the survey type.", - FutureWarning, - stacklevel=1, - ) + raise AttributeError("'survey_type' has been removed.") @survey_type.setter def survey_type(self, var): - warnings.warn( - "Property 'survey_type' has been removed." - "Types of sources and their corresponding receivers are obtained from " - "their respective classes, without the need to specify the survey type.", - FutureWarning, - stacklevel=1, - ) + raise AttributeError("'survey_type' has been removed.") @property def n_locations(self): diff --git a/simpeg/electromagnetics/static/spontaneous_potential/__init__.py b/simpeg/electromagnetics/static/spontaneous_potential/__init__.py index 86323379dd..758d141a51 100644 --- a/simpeg/electromagnetics/static/spontaneous_potential/__init__.py +++ b/simpeg/electromagnetics/static/spontaneous_potential/__init__.py @@ -1,69 +1,3 @@ -""" -============================================================================================ -Spontaneous Potential (:mod:`simpeg.electromagnetics.static.spontaneous_potential`) -============================================================================================ -.. currentmodule:: simpeg.electromagnetics.static.spontaneous_potential - -.. admonition:: important - - This module will be deprecated in favour of ``simpeg.electromagnetics.static.self_potential`` - - -Simulations -=========== -.. autosummary:: - :toctree: generated/ - - Simulation3DCellCentered - -Receivers -========= -This module makes use of the receivers in :mod:`simpeg.electromagnetics.static.resistivity` - -Sources -======= -.. autosummary:: - :toctree: generated/ - - sources.StreamingCurrents - -Surveys -======= -.. autosummary:: - :toctree: generated/ - - Survey - -Maps -==== -The spontaneous potential simulation provides two specialized maps to extend to inversions -with different types of model sources. - -.. autosummary:: - :toctree: generated/ - - CurrentDensityMap - HydraulicHeadMap - -""" - -import warnings - -warnings.warn( - ( - "The 'spontaneous_potential' module has been renamed to 'self_potential'. " - "Please use the 'self_potential' module instead. " - "The 'spontaneous_potential' module will be removed in SimPEG 0.23." - ), - FutureWarning, - stacklevel=2, +raise ImportError( + "The 'spontaneous_potential' module has been moved to 'self_potential'." ) - -from ..self_potential.simulation import ( - Simulation3DCellCentered, - Survey, - CurrentDensityMap, - HydraulicHeadMap, -) -from ..self_potential import sources -from ..self_potential import simulation diff --git a/simpeg/electromagnetics/static/utils/static_utils.py b/simpeg/electromagnetics/static/utils/static_utils.py index 7a552a03bd..8722e02e66 100644 --- a/simpeg/electromagnetics/static/utils/static_utils.py +++ b/simpeg/electromagnetics/static/utils/static_utils.py @@ -197,12 +197,10 @@ def pseudo_locations(survey, wenner_tolerance=0.1, **kwargs): if not isinstance(survey, dc.Survey): raise TypeError(f"Input must be instance of {dc.Survey}, not {type(survey)}") - if len(kwargs) > 0: - warnings.warn( - "The keyword arguments of this function have been deprecated." + if kwargs: + raise TypeError( + "The keyword arguments of this function have been removed." " All of the necessary information is now in the DC survey class", - DeprecationWarning, - stacklevel=2, ) # Pre-allocate @@ -571,11 +569,6 @@ def plot_pseudosection( The axis object that holds the plot """ - - removed_kwargs = ["dim", "y_values", "sameratio", "survey_type"] - for kwarg in removed_kwargs: - if kwarg in kwargs: - raise TypeError(r"The {kwarg} keyword has been removed.") if len(kwargs) > 0: warnings.warn( f"plot_pseudosection unused kwargs: {list(kwargs.keys())}", stacklevel=2 @@ -1597,9 +1590,7 @@ def gettopoCC(mesh, ind_active, option="top"): raise NotImplementedError(f"{type(mesh)} mesh is not supported.") -def drapeTopotoLoc( - mesh, pts, active_cells=None, option="top", topo=None, ind_active=None -): +def drapeTopotoLoc(mesh, pts, active_cells=None, option="top", topo=None, **kwargs): """Drape locations right below discretized surface topography This function projects the set of locations provided to the discrete @@ -1620,28 +1611,15 @@ def drapeTopotoLoc( topo : (n, dim) numpy.ndarray Surface topography. Can be used if an active indices array cannot be provided for the input parameter 'ind_active' - ind_active : numpy.ndarray of int or bool, optional - - .. deprecated:: 0.23.0 - - Argument ``ind_active`` is deprecated in favor of ``active_cells`` - and will be removed in SimPEG v0.24.0. """ - # Deprecate ind_active argument - if ind_active is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'ind_active'." - "'ind_active' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'ind_active' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, + + # Deprecate indActive argument + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead." ) - active_cells = ind_active + if kwargs: # TODO Remove this when removing kwargs argument. + raise TypeError("Unsupported keyword argument " + kwargs.popitem()[0]) if isinstance(mesh, discretize.CurvilinearMesh): raise ValueError("Curvilinear mesh is not supported.") diff --git a/simpeg/electromagnetics/time_domain/simulation.py b/simpeg/electromagnetics/time_domain/simulation.py index c896e3d9d3..5f84cacbee 100644 --- a/simpeg/electromagnetics/time_domain/simulation.py +++ b/simpeg/electromagnetics/time_domain/simulation.py @@ -1,7 +1,6 @@ import numpy as np import scipy.sparse as sp -from ...data import Data from ...simulation import BaseTimeSimulation from ...utils import mkvc, sdiag, speye, Zero, validate_type, validate_float from ..base import BaseEMSimulation @@ -321,9 +320,8 @@ def Jtvec(self, m, v, f=None): self.model = m ftype = self._fieldType + "Solution" # the thing we solved for - # Ensure v is a data object. - if not isinstance(v, Data): - v = Data(self.survey, v) + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = self.survey.get_all_slices() df_duT_v = self.Fields_Derivs(self) @@ -351,8 +349,9 @@ def Jtvec(self, m, v, f=None): ) for rx in src.receiver_list: + src_rx_slice = survey_slices[src, rx] PT_v[src, "{}Deriv".format(rx.projField), :] = rx.evalDeriv( - src, self.mesh, self.time_mesh, f, mkvc(v[src, rx]), adjoint=True + src, self.mesh, self.time_mesh, f, v[src_rx_slice], adjoint=True ) # this is += # PT_v = np.reshape(curPT_v,(len(curPT_v)/self.time_mesh.nN, @@ -1208,9 +1207,8 @@ def Jtvec(self, m, v, f=None): self.model = m ftype = self._fieldType + "Solution" # the thing we solved for - # Ensure v is a data object. - if not isinstance(v, Data): - v = Data(self.survey, v) + # Get dict of flat array slices for each source-receiver pair in the survey + survey_slices = self.survey.get_all_slices() df_duT_v = self.Fields_Derivs(self) @@ -1238,8 +1236,9 @@ def Jtvec(self, m, v, f=None): ) for rx in src.receiver_list: + src_rx_slice = survey_slices[src, rx] PT_v[src, "{}Deriv".format(rx.projField), :] = rx.evalDeriv( - src, self.mesh, self.time_mesh, f, mkvc(v[src, rx]), adjoint=True + src, self.mesh, self.time_mesh, f, v[src_rx_slice], adjoint=True ) # this is += diff --git a/simpeg/electromagnetics/time_domain/simulation_1d.py b/simpeg/electromagnetics/time_domain/simulation_1d.py index e2f5b8a6bb..a62bb9701e 100644 --- a/simpeg/electromagnetics/time_domain/simulation_1d.py +++ b/simpeg/electromagnetics/time_domain/simulation_1d.py @@ -243,13 +243,34 @@ def fields(self, m): sig = self.compute_complex_sigma(frequencies) mu = self.compute_complex_mu(frequencies) - rTE = rTE_forward(frequencies, unique_lambs, sig, mu, self.thicknesses) + # TODO geoana currently only support real mu input. + rTE = rTE_forward(frequencies, unique_lambs, sig, mu.real, self.thicknesses) rTE = rTE[:, inv_lambs] v = ((C0s * rTE) @ self._fhtfilt.j0 + (C1s * rTE) @ self._fhtfilt.j1) @ W.T return self._project_to_data(v.T) - def getJ(self, m, f=None): + def _getJ(self, m, f=None): + """Build Jacobian matrix by blocks. + + This method builds the Jacobian matrix by blocks, each block for a particular + invertible property (receiver height, conductivity, permeability, layer + thickness). Each block of the Jacobian matrix is stored within a dictionary. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + dict + Dictionary containing the blocks of the Jacobian matrix for the invertible + properties. The keys of the dictionary can be `"dh"`, `"ds"`, `"dmu"`, and + `"dthick"`. + """ self.model = m if getattr(self, "_J", None) is None: self._J = {} @@ -288,8 +309,8 @@ def getJ(self, m, f=None): v_dh_temp = ( W @ ( - (C0s_dh * rTE) @ self._fhtfilt.j0 - + (C1s_dh * rTE) @ self._fhtfilt.j1 + ((C0s_dh * rTE) @ self._fhtfilt.j0).real + + ((C1s_dh * rTE) @ self._fhtfilt.j1).real ).T ) # need to re-arange v_dh as it's currently (n_data x n_freqs) @@ -346,6 +367,45 @@ def getJ(self, m, f=None): self._J["dthick"] = self._project_to_data(v_dthick) return self._J + def getJ(self, m, f=None): + r"""Get the Jacobian matrix. + + This method generates and stores the full Jacobian matrix for the + model provided. I.e.: + + .. math:: + \mathbf{J} = \dfrac{\partial f(\mu(\mathbf{m}))}{\partial \mathbf{m}} + + where :math:`f()` is the forward modelling function, :math:`\mu()` is the + mapping, and :math:`\mathbf{m}` is the model vector. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (n_data, n_param) numpy.ndarray + The full Jacobian matrix. + """ + Js = self._getJ(m, f=f) + + # Map parameters with their corresponding derivatives + param_and_derivs = { + "dh": self.hDeriv, + "ds": self.sigmaDeriv, + "dmu": self.muDeriv, + "dthick": self.thicknessesDeriv, + } + + # Compute J matrix + J = sum(Js[param] @ param_and_derivs[param] for param in Js) + + return J + def _project_to_data(self, v): As = self._As if v.ndim == 3: diff --git a/simpeg/electromagnetics/time_domain/sources.py b/simpeg/electromagnetics/time_domain/sources.py index fe7c18b8ab..89d8dc5ee3 100644 --- a/simpeg/electromagnetics/time_domain/sources.py +++ b/simpeg/electromagnetics/time_domain/sources.py @@ -1,12 +1,13 @@ import warnings +from discretize.utils import mkvc + import numpy as np from geoana.em.static import CircularLoopWholeSpace, MagneticDipoleWholeSpace from scipy.constants import mu_0 from ...utils import Zero, sdiag from ...utils.code_utils import ( - deprecate_property, validate_callable, validate_direction, validate_float, @@ -530,18 +531,6 @@ class TriangularWaveform(TrapezoidWaveform): """ def __init__(self, start_time, off_time, peak_time, **kwargs): - if kwargs.get("startTime", None): - AttributeError( - "startTime will be deprecated in 0.17.0. Please update your code to use start_time instead", - ) - if kwargs.get("peak_time", None): - AttributeError( - "peak_time will be deprecated in 0.17.0. Please update your code to use peak_time instead", - ) - if kwargs.get("offTime", None): - AttributeError( - "offTime will be deprecated in 0.17.0. Please update your code to use off_time instead", - ) ramp_on = np.r_[start_time, peak_time] ramp_off = np.r_[peak_time, off_time] @@ -1247,27 +1236,23 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): def _aSrc(self, simulation): coordinates = "cartesian" - if simulation._formulation == "EB": - gridX = simulation.mesh.gridEx - gridY = simulation.mesh.gridEy - gridZ = simulation.mesh.gridEz - - elif simulation._formulation == "HJ": - gridX = simulation.mesh.gridFx - gridY = simulation.mesh.gridFy - gridZ = simulation.mesh.gridFz if simulation.mesh._meshType == "CYL": coordinates = "cylindrical" if simulation.mesh.is_symmetric: - return self._srcFct(gridY)[:, 1] + if simulation._formulation != "EB": + raise AssertionError( + "For cylindrical symmtery, we must use the EB formulation of Maxwell's equations" + ) + return self._srcFct(simulation.mesh.edges, coordinates)[:, 1] - ax = self._srcFct(gridX, coordinates)[:, 0] - ay = self._srcFct(gridY, coordinates)[:, 1] - az = self._srcFct(gridZ, coordinates)[:, 2] - a = np.concatenate((ax, ay, az)) + if simulation._formulation == "EB": + avec = self._srcFct(simulation.mesh.edges, coordinates) + return simulation.mesh.project_edge_vector(avec) - return a + elif simulation._formulation == "HJ": + avec = self._srcFct(simulation.mesh.faces, coordinates) + return simulation.mesh.project_face_vector(avec) def _getAmagnetostatic(self, simulation): if simulation._formulation == "EB": @@ -1324,11 +1309,30 @@ def _phiSrc(self, simulation): def _bSrc(self, simulation): if simulation._formulation == "EB": C = simulation.mesh.edge_curl + return C * self._aSrc(simulation) elif simulation._formulation == "HJ": - C = simulation.mesh.edge_curl.T + return self.mu * self._hSrc(simulation) - return C * self._aSrc(simulation) + def _hSrc(self, simulation): + if simulation._formulation == "EB": + return 1 / self.mu * self._bSrc(simulation) + + elif simulation._formulation == "HJ": + a = self._aSrc(simulation) + + a_boundary = mkvc(self._srcFct(simulation.mesh.boundary_edges)) + a_bc = simulation.mesh.boundary_edge_vector_integral * a_boundary + + return ( + 1.0 + / self.mu + * simulation.MeI + * simulation.mesh.edge_curl.T + * simulation.Mf + * a + - 1 / self.mu * simulation.MeI * a_bc + ) def bInitial(self, simulation): """Compute initial magnetic flux density. @@ -1380,11 +1384,7 @@ def hInitial(self, simulation): if self.waveform.has_initial_fields is False: return Zero() - # if simulation._formulation == 'EB': - # return simulation.MfMui * self.bInitial(simulation) - # elif simulation._formulation == 'HJ': - # return simulation.MeMuI * self.bInitial(simulation) - return 1.0 / self.mu * self.bInitial(simulation) + return self._hSrc(simulation) def s_m(self, simulation, time): """Magnetic source term (s_m) at a given time @@ -1494,16 +1494,10 @@ def __init__( if location is None: location = np.r_[0.0, 0.0, 0.0] - if "moment" in kwargs: - kwargs.pop("moment") - - # Raise error on deprecated arguments - if (key := "N") in kwargs.keys(): - raise TypeError(f"'{key}' property has been removed. Please use 'n_turns'.") self.n_turns = n_turns BaseTDEMSrc.__init__( - self, receiver_list=receiver_list, location=location, moment=None, **kwargs + self, receiver_list=receiver_list, location=location, **kwargs ) self.orientation = orientation @@ -1600,10 +1594,6 @@ def _srcFct(self, obsLoc, coordinates="cartesian"): out[np.isnan(out)] = 0 return self.n_turns * out - N = deprecate_property( - n_turns, "N", "n_turns", removal_version="0.19.0", error=True - ) - class LineCurrent(BaseTDEMSrc): """Line current source. diff --git a/simpeg/electromagnetics/utils/__init__.py b/simpeg/electromagnetics/utils/__init__.py index bf7fd197b9..ab1970bcbe 100644 --- a/simpeg/electromagnetics/utils/__init__.py +++ b/simpeg/electromagnetics/utils/__init__.py @@ -41,7 +41,6 @@ from .current_utils import ( edge_basis_function, getStraightLineCurrentIntegral, - getSourceTermLineCurrentPolygon, segmented_line_current_source_term, line_through_faces, ) diff --git a/simpeg/electromagnetics/utils/current_utils.py b/simpeg/electromagnetics/utils/current_utils.py index 959f6e1260..698c7a45b2 100644 --- a/simpeg/electromagnetics/utils/current_utils.py +++ b/simpeg/electromagnetics/utils/current_utils.py @@ -511,11 +511,3 @@ def not_aligned_error(i): ) return current - - -def getSourceTermLineCurrentPolygon(xorig, hx, hy, hz, px, py, pz): - """getSourceTermLineCurrentPolygon is deprecated. Use :func:`segmented_line_current_source_term`""" - raise NotImplementedError( - "getSourceTermLineCurrentPolygon has been deprecated and will be" - "removed in SimPEG 0.17.0. Please use segmented_line_current_source_term.", - ) diff --git a/simpeg/electromagnetics/utils/em1d_utils.py b/simpeg/electromagnetics/utils/em1d_utils.py index c551cb311d..fa77c48e1c 100644 --- a/simpeg/electromagnetics/utils/em1d_utils.py +++ b/simpeg/electromagnetics/utils/em1d_utils.py @@ -222,7 +222,7 @@ def LogUniform(f, chi_inf=0.05, del_chi=0.05, tau1=1e-5, tau2=1e-2): def get_splined_dlf_points(filt, v_min, v_max): - """ + """Get the splined points used for the digital linear filter. Parameters ---------- diff --git a/simpeg/electromagnetics/viscous_remanent_magnetization/receivers.py b/simpeg/electromagnetics/viscous_remanent_magnetization/receivers.py index 5d5823311f..cc1c730704 100644 --- a/simpeg/electromagnetics/viscous_remanent_magnetization/receivers.py +++ b/simpeg/electromagnetics/viscous_remanent_magnetization/receivers.py @@ -30,11 +30,6 @@ class Point(BaseRx): def __init__( self, locations=None, times=None, field_type=None, orientation="z", **kwargs ): - if kwargs.pop("fieldType", None): - raise AttributeError( - "'fieldType' is a deprecated property. Please use 'field_type' instead." - "'fieldType' be removed in SimPEG 0.17.0." - ) if field_type is None: raise AttributeError( "VRM receiver class cannot be instantiated witout 'field_type" @@ -179,18 +174,6 @@ def __init__( quadrature_order=3, **kwargs, ): - if "nTurns" in kwargs: - raise AttributeError( - "'nTurns' is a deprecated property. Please use 'n_turns' instead." - "'nTurns' be removed in SimPEG 0.17.0." - ) - - if "quadOrder" in kwargs: - raise AttributeError( - "'quadOrder' is a deprecated property. Please use 'quadrature_order' instead." - "'quadOrder' be removed in SimPEG 0.17.0." - ) - super(SquareLoop, self).__init__( locations=locations, times=times, diff --git a/simpeg/electromagnetics/viscous_remanent_magnetization/simulation.py b/simpeg/electromagnetics/viscous_remanent_magnetization/simulation.py index fe9f707ad5..6d4a63cecc 100644 --- a/simpeg/electromagnetics/viscous_remanent_magnetization/simulation.py +++ b/simpeg/electromagnetics/viscous_remanent_magnetization/simulation.py @@ -1,4 +1,3 @@ -import warnings import discretize import numpy as np import scipy.sparse as sp @@ -35,9 +34,13 @@ def __init__( refinement_factor=None, refinement_distance=None, active_cells=None, - indActive=None, **kwargs, ): + # Deprecate indActive argument + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead." + ) self.mesh = mesh super().__init__(survey=survey, **kwargs) @@ -53,22 +56,6 @@ def __init__( ) self.refinement_distance = refinement_distance - # Deprecate indActive argument - if indActive is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'indActive'." - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, - ) - active_cells = indActive - if active_cells is None: active_cells = np.ones(self.mesh.n_cells, dtype=bool) self.active_cells = active_cells @@ -160,8 +147,7 @@ def active_cells(self, value): "indActive", "active_cells", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) def _getH0matrix(self, xyz, pp): diff --git a/simpeg/flow/richards/simulation.py b/simpeg/flow/richards/simulation.py index 20d5055c9e..29e5b10a53 100644 --- a/simpeg/flow/richards/simulation.py +++ b/simpeg/flow/richards/simulation.py @@ -10,7 +10,6 @@ from ...utils import ( validate_type, validate_ndarray_with_shape, - deprecate_property, validate_string, validate_integer, validate_float, @@ -37,9 +36,6 @@ def __init__( root_finder_tol=1e-4, **kwargs, ): - debug = kwargs.pop("debug", None) - if debug is not None: - self.debug = debug super().__init__(mesh=mesh, **kwargs) self.hydraulic_conductivity = hydraulic_conductivity self.water_retention = water_retention @@ -104,14 +100,6 @@ def initial_conditions(self, value): "initial_conditions", value ) - debug = deprecate_property( - BaseTimeSimulation.verbose, - "debug", - "verbose", - removal_version="0.19.0", - future_warn=True, - ) - @property def method(self): """Formulation used. diff --git a/simpeg/inverse_problem.py b/simpeg/inverse_problem.py index c0d60667a6..7d07404d08 100644 --- a/simpeg/inverse_problem.py +++ b/simpeg/inverse_problem.py @@ -1,10 +1,13 @@ +import textwrap + import numpy as np import scipy.sparse as sp import gc + from .data_misfit import BaseDataMisfit from .regularization import BaseRegularization, WeightedLeastSquares, Sparse from .objective_function import BaseObjectiveFunction, ComboObjectiveFunction -from .optimization import Minimize +from .optimization import Minimize, BFGS from .utils import ( call_hooks, timeIt, @@ -12,9 +15,10 @@ validate_float, validate_type, validate_ndarray_with_shape, + get_logger, ) from .version import __version__ as simpeg_version -from .utils.solver_utils import get_default_solver +from .utils import get_default_solver class BaseInvProblem: @@ -29,6 +33,7 @@ def __init__( debug=False, counter=None, print_version=True, + init_bfgs=True, **kwargs, ): super().__init__(**kwargs) @@ -45,6 +50,7 @@ def __init__( self.counter = counter self.model = None self.print_version = print_version + self.init_bfgs = init_bfgs # TODO: Remove: (and make iteration printers better!) self.opt.parent = self self.reg.parent = self @@ -175,6 +181,15 @@ def model(self, value): delattr(self, prop) self._model = value + @property + def init_bfgs(self): + """Initialize BFGS minimizers with the inverse of the regularization's Hessian.""" + return self._init_bfgs + + @init_bfgs.setter + def init_bfgs(self, value): + self._init_bfgs = validate_type("init_bfgs", value, bool) + @call_hooks("startup") def startup(self, m0): """startup(m0) @@ -187,12 +202,14 @@ def startup(self, m0): if self.print_version: print(f"\nRunning inversion with SimPEG v{simpeg_version}") + logger = get_logger() + for fct in self.reg.objfcts: if ( hasattr(fct, "reference_model") and getattr(fct, "reference_model", None) is None ): - print( + logger.info( "simpeg.InvProblem will set Regularization.reference_model to m0." ) fct.reference_model = m0 @@ -202,39 +219,35 @@ def startup(self, m0): self.model = m0 - set_default = True - for objfct in self.dmisfit.objfcts: - if ( - isinstance(objfct, BaseDataMisfit) - and getattr(objfct.simulation, "solver", None) is not None - ): - solver = objfct.simulation.solver - solver_opts = objfct.simulation.solver_opts - print( + if self.init_bfgs and isinstance(self.opt, BFGS): + + sim = None # Find the first sim in data misfits that has a non None solver attribute + for objfct in self.dmisfit.objfcts: + if ( + isinstance(objfct, BaseDataMisfit) + and getattr(objfct.simulation, "solver", None) is not None + ): + sim = objfct.simulation + break + if sim is not None: + solver = sim.solver + solver_opts = sim.solver_opts + msg = f""" + simpeg.InvProblem is setting bfgsH0 to the inverse of the reg.deriv2 + using the same solver as the {sim.__class__.__name__} simulation with the 'is_symmetric=True` option set. """ - simpeg.InvProblem is setting bfgsH0 to the inverse of the eval2Deriv. - ***Done using same Solver, and solver_opts as the {} problem*** - """.format( - objfct.simulation.__class__.__name__ - ) - ) - set_default = False - break - if set_default: - solver = get_default_solver() - print( - """ - simpeg.InvProblem is setting bfgsH0 to the inverse of the eval2Deriv. - ***Done using the default solver {} and no solver_opts.*** - """.format( - solver.__name__ - ) - ) - solver_opts = {} + else: + solver = get_default_solver() + msg = f""" + simpeg.InvProblem is setting bfgsH0 to the inverse of the reg.deriv2. + using the default solver {solver.__name__} with the 'is_symmetric=True` option set. + """ + solver_opts = dict(is_symmetric=True) - self.opt.bfgsH0 = solver( - sp.csr_matrix(self.reg.deriv2(self.model)), **solver_opts - ) + logger.info(textwrap.dedent(msg)) + self.opt.bfgsH0 = solver( + sp.csr_matrix(self.reg.deriv2(self.model)), **solver_opts + ) @property def warmstart(self): diff --git a/simpeg/inversion.py b/simpeg/inversion.py index a3e51cf541..e444b32f1e 100644 --- a/simpeg/inversion.py +++ b/simpeg/inversion.py @@ -1,7 +1,5 @@ -import numpy as np - -from .optimization import IterationPrinters, StoppingCriteria -from .directives import DirectiveList +from .optimization import IterationPrinters, StoppingCriteria, InexactGaussNewton +from .directives import DirectiveList, UpdatePreconditioner from .utils import timeIt, Counter, validate_type, validate_string @@ -107,9 +105,13 @@ def run(self, m0): Runs the inversion! """ + if isinstance(self.opt, InexactGaussNewton) and any( + isinstance(drctv, UpdatePreconditioner) for drctv in self.directiveList + ): + self.invProb.init_bfgs = False + self.invProb.startup(m0) self.directiveList.call("initialize") - print("model has any nan: {:b}".format(np.any(np.isnan(self.invProb.model)))) self.m = self.opt.minimize(self.invProb.evalFunction, self.invProb.model) self.directiveList.call("finish") diff --git a/simpeg/maps/__init__.py b/simpeg/maps/__init__.py index d24d07aa9f..e912ef6bef 100644 --- a/simpeg/maps/__init__.py +++ b/simpeg/maps/__init__.py @@ -13,6 +13,7 @@ from ._property_maps import ( ChiMap, ComplexMap, + EffectiveSusceptibilityMap, ExpMap, LogisticSigmoidMap, LogMap, diff --git a/simpeg/maps/_base.py b/simpeg/maps/_base.py index 40de5d503c..e1ae8109d2 100644 --- a/simpeg/maps/_base.py +++ b/simpeg/maps/_base.py @@ -1198,11 +1198,24 @@ def deriv(self, m): class TileMap(IdentityMap): - """ - Mapping for tiled inversion. + """Mapping for tiled inversion. Uses volume averaging to map a model defined on a global mesh to the local mesh. Everycell in the local mesh must also be in the global mesh. + + Parameters + ---------- + global_mesh : discretize.TreeMesh + Global TreeMesh defining the entire domain. + global_active : numpy.ndarray of bool or int + Defines the active cells in the global mesh. + local_mesh : discretize.TreeMesh + Local TreeMesh for the simulation. + tol : float, optional + Tolerance to avoid zero division + components : int, optional + Number of components in the model. E.g. a vector model in 3D would have 3 + components. """ def __init__( @@ -1230,7 +1243,7 @@ def __init__( Number of components in the model. E.g. a vector model in 3D would have 3 components. """ - super().__init__(mesh=None) + super().__init__(mesh=None, **kwargs) self._global_mesh = validate_type( "global_mesh", global_mesh, discretize.TreeMesh, cast=False ) diff --git a/simpeg/maps/_injection.py b/simpeg/maps/_injection.py index e99c5bad10..68c02c2e61 100644 --- a/simpeg/maps/_injection.py +++ b/simpeg/maps/_injection.py @@ -2,7 +2,6 @@ Injection and interpolation map classes. """ -import warnings import discretize import numpy as np import scipy.sparse as sp @@ -23,13 +22,19 @@ class Mesh2Mesh(IdentityMap): Takes a model on one mesh are translates it to another mesh. """ - def __init__(self, meshes, active_cells=None, indActive=None, **kwargs): + def __init__(self, meshes, active_cells=None, **kwargs): # Sanity checks for the meshes parameter try: mesh, mesh2 = meshes except TypeError: raise TypeError("Couldn't unpack 'meshes' into two meshes.") + # Deprecate indActive argument + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead.", + ) + super().__init__(mesh=mesh, **kwargs) self.mesh2 = mesh2 @@ -40,22 +45,6 @@ def __init__(self, meshes, active_cells=None, indActive=None, **kwargs): + "Both meshes must have the same dimension." ) - # Deprecate indActive argument - if indActive is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'indActive'." - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, - ) - active_cells = indActive - self.active_cells = active_cells # reset to not accepted None for mesh @@ -100,8 +89,7 @@ def active_cells(self, value): "indActive", "active_cells", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) @property @@ -165,20 +153,6 @@ class InjectActiveCells(IdentityMap): or a ``numpy.ndarray`` of ``int`` containing the indices of the active cells. value_inactive : float or numpy.ndarray The physical property value assigned to all inactive cells in the mesh - indActive : numpy.ndarray - - .. deprecated:: 0.23.0 - - Argument ``indActive`` is deprecated in favor of ``active_cells`` and will - be removed in SimPEG v0.24.0. - - valInactive : float or numpy.ndarray - - .. deprecated:: 0.23.0 - - Argument ``valInactive`` is deprecated in favor of ``value_inactive`` and - will be removed in SimPEG v0.24.0. - """ def __init__( @@ -187,43 +161,23 @@ def __init__( active_cells=None, value_inactive=0.0, nC=None, - indActive=None, - valInactive=0.0, + **kwargs, ): self.mesh = mesh self.nC = nC or mesh.nC # Deprecate indActive argument - if indActive is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'indActive'." - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead." ) - active_cells = indActive - # Deprecate valInactive argument - if not isinstance(valInactive, Number) or valInactive != 0.0: - if not isinstance(value_inactive, Number) or value_inactive != 0.0: - raise TypeError( - "Cannot pass both 'value_inactive' and 'valInactive'." - "'valInactive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'value_inactive' instead.", - ) - warnings.warn( - "'valInactive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'value_inactive' instead.", - FutureWarning, - stacklevel=2, + if kwargs.pop("valInactive", None) is not None: + raise TypeError( + "'valInactive' was removed in SimPEG v0.24.0, please use 'value_inactive' instead." ) - value_inactive = valInactive + if kwargs: # TODO Remove this when removing kwargs argument. + raise TypeError("Unsupported keyword argument " + kwargs.popitem()[0]) self.active_cells = active_cells self._nP = np.sum(self.active_cells) @@ -260,13 +214,12 @@ def value_inactive(self, value): "valInactive", "value_inactive", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) @property def active_cells(self): - """ + """A boolean array representing the active values in the map's output array. Returns ------- @@ -286,8 +239,7 @@ def active_cells(self, value): "indActive", "active_cells", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) @property diff --git a/simpeg/maps/_parametric.py b/simpeg/maps/_parametric.py index 814808eb84..06b8e05b7c 100644 --- a/simpeg/maps/_parametric.py +++ b/simpeg/maps/_parametric.py @@ -2,7 +2,6 @@ Parametric map classes. """ -import warnings import discretize import numpy as np from numpy.polynomial import polynomial @@ -335,12 +334,6 @@ class ParametricPolyMap(IdentityMap): Active cells array. Can be a boolean ``numpy.ndarray`` of length ``mesh.n_cells`` or a ``numpy.ndarray`` of ``int`` containing the indices of the active cells. - actInd : numpy.ndarray, optional - - .. deprecated:: 0.23.0 - - Argument ``actInd`` is deprecated in favor of ``active_cells`` and will - be removed in SimPEG v0.24.0. Examples -------- @@ -404,7 +397,7 @@ def __init__( normal="X", active_cells=None, slope=1e4, - actInd=None, + **kwargs, ): super().__init__(mesh=mesh) self.logSigma = logSigma @@ -412,21 +405,13 @@ def __init__( self.normal = normal self.slope = slope - # Deprecate actInd argument - if actInd is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'actInd'." - "'actInd' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'actInd' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, + # Deprecate indActive argument + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead." ) - active_cells = actInd + if kwargs: # TODO Remove this when removing kwargs argument. + raise TypeError("Unsupported keyword argument " + kwargs.popitem()[0]) if active_cells is None: active_cells = np.ones(mesh.n_cells, dtype=bool) @@ -498,8 +483,7 @@ def active_cells(self, value): "actInd", "active_cells", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) @property @@ -1103,13 +1087,6 @@ class BaseParametric(IdentityMap): active_cells : numpy.ndarray, optional Active cells array. Can be a boolean ``numpy.ndarray`` of length *mesh.nC* or a ``numpy.ndarray`` of ``int`` containing the indices of the active cells. - indActive : numpy.ndarray - - .. deprecated:: 0.23.0 - - Argument ``indActive`` is deprecated in favor of ``active_cells`` and will - be removed in SimPEG v0.24.0. - """ @@ -1119,26 +1096,15 @@ def __init__( slope=None, slopeFact=1.0, active_cells=None, - indActive=None, **kwargs, ): - super(BaseParametric, self).__init__(mesh, **kwargs) - # Deprecate indActive argument - if indActive is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'indActive'." - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'indActive' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, + if kwargs.pop("indActive", None) is not None: + raise TypeError( + "'indActive' was removed in SimPEG v0.24.0, please use 'active_cells' instead." ) - active_cells = indActive + + super(BaseParametric, self).__init__(mesh, **kwargs) self.active_cells = active_cells self.slopeFact = slopeFact @@ -1189,8 +1155,7 @@ def active_cells(self, value): "indActive", "active_cells", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) @property @@ -1321,12 +1286,6 @@ class ParametricLayer(BaseParametric): slopeFact : float Scaling factor for the sharpness of the boundaries based on cell size. Using this option, we set *a = slopeFact / dh*. - indActive : numpy.ndarray - - .. deprecated:: 0.23.0 - - Argument ``indActive`` is deprecated in favor of ``active_cells`` and will - be removed in SimPEG v0.24.0. Examples -------- @@ -1358,9 +1317,6 @@ class ParametricLayer(BaseParametric): """ - def __init__(self, mesh, **kwargs): - super().__init__(mesh, **kwargs) - @property def nP(self): """Number of model parameters the mapping acts on; i.e 4 @@ -1589,12 +1545,6 @@ class ParametricBlock(BaseParametric): Epsilon value used in the ekblom representation of the block p : float p-value used in the ekblom representation of the block. - indActive : numpy.ndarray - - .. deprecated:: 0.23.0 - - Argument ``indActive`` is deprecated in favor of ``active_cells`` and will - be removed in SimPEG v0.24.0. Examples -------- @@ -1938,12 +1888,6 @@ class ParametricEllipsoid(ParametricBlock): Using this option, we set *a = slopeFact / dh*. epsilon : float Epsilon value used in the ekblom representation of the block - indActive : numpy.ndarray - - .. deprecated:: 0.23.0 - - Argument ``indActive`` is deprecated in favor of ``active_cells`` and will - be removed in SimPEG v0.24.0. Examples -------- diff --git a/simpeg/maps/_property_maps.py b/simpeg/maps/_property_maps.py index 87bf56b48b..88005dc2d6 100644 --- a/simpeg/maps/_property_maps.py +++ b/simpeg/maps/_property_maps.py @@ -3,6 +3,7 @@ """ import warnings +from numbers import Real import numpy as np import scipy.sparse as sp from scipy.sparse.linalg import LinearOperator @@ -554,6 +555,53 @@ def inverse(self, m): return m / mu_0 - 1 +class EffectiveSusceptibilityMap(IdentityMap): + r"""Effective susceptibility Map + + Parameters + ---------- + mesh : discretize.BaseMesh + The number of parameters accepted by the mapping is set to equal the number + of mesh cells. + nP : int + Set the number of parameters accepted by the mapping directly. Used if the + number of parameters is known. Used generally when the number of parameters + is not equal to the number of cells in a mesh. + ambient_field_magnitude : float + The magnitude of the ambient geomagnetic field in nT. + + Notes + ----- + This map converts effective susceptibility values (:math:`\chi_\text{eff}`) into magnetic + polarization (:math:`\mathbf{I}`): + + .. math:: + \mathbf{I} = \mu_0 \mathbf{M} = \chi_\text{eff} \lVert \mathbf{B}_0 \rVert + + where :math:`\mathbf{M}` is the magnetization vector, and + :math:`\lVert \mathbf{B}_0 \rVert` is the magnitude of the ambient field in nT. + """ + + def __init__(self, ambient_field_magnitude, mesh=None, nP=None, **kwargs): + super().__init__(mesh=mesh, nP=nP, **kwargs) + if not isinstance(ambient_field_magnitude, Real): + raise TypeError( + "ambient_field_magnitude must be a float (or int convertible to float)" + ) + self.ambient_field_magnitude = ambient_field_magnitude + + def _transform(self, m): + return m * self.ambient_field_magnitude + + def deriv(self, m, v=None): + if v is not None: + return self.ambient_field_magnitude * v + return self.ambient_field_magnitude * sp.eye(self.nP) + + def inverse(self, m): + return m / self.ambient_field_magnitude + + class MuRelative(IdentityMap): r"""Mapping that computes the magnetic permeability given a set of relative permeabilities. diff --git a/simpeg/objective_function.py b/simpeg/objective_function.py index f5afd55bc8..9188168b79 100644 --- a/simpeg/objective_function.py +++ b/simpeg/objective_function.py @@ -416,6 +416,7 @@ def __init__( self.objfcts = objfcts self._multipliers = multipliers self._unpack_on_add = unpack_on_add + self._last_obj_vals = [np.nan] * len(objfcts) def __len__(self): return len(self.multipliers) @@ -454,6 +455,7 @@ def multipliers(self, value): def __call__(self, m, f=None): """Evaluate the objective functions for a given model.""" fct = 0.0 + obj_vals = [] for i, phi in enumerate(self): multiplier, objfct = phi if multiplier == 0.0: # don't evaluate the fct @@ -463,6 +465,8 @@ def __call__(self, m, f=None): else: objective_func_value = objfct(m) fct += multiplier * objective_func_value + obj_vals.append(objective_func_value) + self._last_obj_vals = obj_vals return fct def deriv(self, m, f=None): diff --git a/simpeg/optimization.py b/simpeg/optimization.py index f7b47e70d5..343ff132ab 100644 --- a/simpeg/optimization.py +++ b/simpeg/optimization.py @@ -1,8 +1,89 @@ +""" +======================================================== +SimPEG Optimizers (:mod:`simpeg.optimization`) +======================================================== +.. currentmodule:: simpeg.optimization + +Optimizers +========== + +These optimizers are available within SimPEG for use during inversion. + +Unbound Optimizers +------------------ + +These optimizers all work on unbound minimization functions. + +.. autosummary:: + :toctree: generated/ + + SteepestDescent + BFGS + GaussNewton + InexactGaussNewton + +Box Bounded Optimizers +---------------------- +These optimizers support box bound constraints on the model parameters + +.. autosummary:: + :toctree: generated/ + + ProjectedGradient + ProjectedGNCG + +Root Finding +------------ +.. autosummary:: + :toctree: generated/ + + NewtonRoot + +Minimization Base Classes +=========================== + +These classes are usually inherited or used by the optimization algorithms +above to control their execution. + +Base Minimizer +-------------- +.. autosummary:: + :toctree: generated/ + + Minimize + + +Minimizer Mixins +---------------- +.. autosummary:: + :toctree: generated/ + + Remember + Bounded + InexactCG + +Iteration Printers and Stoppers +------------------------------- +.. autosummary:: + :toctree: generated/ + + IterationPrinters + StoppingCriteria + +""" + +import warnings +from collections.abc import Callable +from typing import Any, Optional + import numpy as np -import scipy +import numpy.typing as npt import scipy.sparse as sp +from discretize.utils import Identity + +from pymatsolver import Solver, SolverCG -from pymatsolver import Solver, Diagonal, SolverCG +from .typing import MinimizeCallable from .utils import ( call_hooks, check_stoppers, @@ -13,28 +94,16 @@ print_line, print_stoppers, print_done, + validate_float, + validate_integer, + validate_type, + validate_ndarray_with_shape, + deprecate_property, ) norm = np.linalg.norm -# Create a flag if the installed version of SciPy is newer or equal to 1.12.0 -# (Used to choose whether to pass `tol` or `rtol` to the solvers. See #1516). -class Version: - def __init__(self, version): - self.version = version - - def as_tuple(self) -> tuple[int, int]: - major, minor = tuple(int(p) for p in self.version.split(".")[:2]) - return (major, minor) - - def __ge__(self, other): - return self.as_tuple() >= other.as_tuple() - - -SCIPY_1_12 = Version(scipy.__version__) >= Version("1.12.0") - - __all__ = [ "Minimize", "Remember", @@ -43,6 +112,7 @@ def __ge__(self, other): "GaussNewton", "InexactGaussNewton", "ProjectedGradient", + "ProjectedGNCG", "NewtonRoot", "StoppingCriteria", "IterationPrinters", @@ -140,112 +210,153 @@ class StoppingCriteria(object): class IterationPrinters(object): """docstring for IterationPrinters""" - iteration = {"title": "#", "value": lambda M: M.iter, "width": 5, "format": "%3d"} - f = {"title": "f", "value": lambda M: M.f, "width": 10, "format": "%1.2e"} + iteration = { + "title": "#", + "value": lambda M: M.iter, + "width": 5, + "format": lambda v: f"{v:3d}", + } + f = { + "title": "f", + "value": lambda M: M.f, + "width": 10, + "format": lambda v: f"{v:1.2e}", + } norm_g = { "title": "|proj(x-g)-x|", - "value": lambda M: norm(M.projection(M.xc - M.g) - M.xc), + "value": lambda M: ( + None if M.iter == 0 else norm(M.projection(M.xc - M.g) - M.xc) + ), "width": 15, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", + } + totalLS = { + "title": "LS", + "value": lambda M: None if M.iter == 0 else M.iterLS, + "width": 5, + "format": lambda v: f"{v:d}", } - totalLS = {"title": "LS", "value": lambda M: M.iterLS, "width": 5, "format": "%d"} iterationLS = { "title": "#", "value": lambda M: (M.iter, M.iterLS), "width": 5, - "format": "%3d.%d", + "format": lambda v: f"{v[0]:3d}.{v[1]:d}", + } + LS_ft = { + "title": "ft", + "value": lambda M: M._LS_ft, + "width": 10, + "format": lambda v: f"{v:1.2e}", + } + LS_t = { + "title": "t", + "value": lambda M: M._LS_t, + "width": 10, + "format": lambda v: f"{v:0.5f}", } - LS_ft = {"title": "ft", "value": lambda M: M._LS_ft, "width": 10, "format": "%1.2e"} - LS_t = {"title": "t", "value": lambda M: M._LS_t, "width": 10, "format": "%0.5f"} LS_armijoGoldstein = { "title": "f + alp*g.T*p", "value": lambda M: M.f + M.LSreduction * M._LS_descent, "width": 16, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } LS_WolfeCurvature = { "title": "alp*g.T*p", "str": "%d : ft = %1.4e >= alp*descent = %1.4e", "value": lambda M: M.LScurvature * M._LS_descent, "width": 16, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } itType = { "title": "itType", "value": lambda M: M._itType, "width": 8, - "format": "%s", + "format": lambda v: f"{v:s}", } aSet = { "title": "aSet", - "value": lambda M: np.sum(M.activeSet(M.xc)), + "value": lambda M: None if M.iter == 0 else np.sum(M.activeSet(M.xc)), "width": 8, - "format": "%d", + "format": lambda v: f"{v:d}", } bSet = { "title": "bSet", - "value": lambda M: np.sum(M.bindingSet(M.xc)), + "value": lambda M: None if M.iter == 0 else np.sum(M.bindingSet(M.xc)), "width": 8, - "format": "%d", + "format": lambda v: f"{v:d}", } comment = { "title": "Comment", "value": lambda M: M.comment, "width": 12, - "format": "%s", + "format": lambda v: f"{v:s}", } beta = { "title": "beta", "value": lambda M: M.parent.beta, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } phi_d = { "title": "phi_d", "value": lambda M: M.parent.phi_d, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } phi_m = { "title": "phi_m", "value": lambda M: M.parent.phi_m, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } phi_s = { "title": "phi_s", "value": lambda M: M.parent.phi_s, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } phi_x = { "title": "phi_x", "value": lambda M: M.parent.phi_x, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } phi_y = { "title": "phi_y", "value": lambda M: M.parent.phi_y, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } phi_z = { "title": "phi_z", "value": lambda M: M.parent.phi_z, "width": 10, - "format": "%1.2e", + "format": lambda v: f"{v:1.2e}", } iterationCG = { - "title": "iterCG", - "value": lambda M: M.cg_count, + "title": "iter_CG", + "value": lambda M: getattr(M, "cg_count", None), "width": 10, - "format": "%3d", + "format": lambda v: f"{v:d}", + } + + iteration_CG_rel_residual = { + "title": "CG |Ax-b|/|b|", + "value": lambda M: getattr(M, "cg_rel_resid", None), + "width": 15, + "format": lambda v: f"{v:1.2e}", + } + + iteration_CG_abs_residual = { + "title": "CG |Ax-b|", + "value": lambda M: getattr(M, "cg_abs_resid", None), + "width": 11, + "format": lambda v: f"{v:1.2e}", } @@ -268,10 +379,11 @@ class Minimize(object): tolX = 1e-1 #: Tolerance on norm(x) movement tolG = 1e-1 #: Tolerance on gradient norm eps = 1e-5 #: Small value + require_decrease = True #: Require decrease in the objective function. If False, we will still take a step when the linesearch fails stopNextIteration = False #: Stops the optimization program nicely. use_WolfeCurvature = False #: add the Wolfe Curvature criteria for line search - force_line_search = True + debug = False #: Print debugging information debugLS = False #: Print debugging information for the line-search @@ -327,70 +439,68 @@ def __init__(self, **kwargs): ] @property - def callback(self): + def callback(self) -> Optional[Callable[[np.ndarray], Any]]: + """A used defined callback function. + + Returns + ------- + None or Callable[[np.ndarray], Any] + The optional user supplied callback function accepting the current iteration + value as an input. + """ return getattr(self, "_callback", None) @callback.setter - def callback(self, value): + def callback(self, value: Callable[[np.ndarray], Any]): if self.callback is not None: print( - "The callback on the {0!s} Optimization was " - "replaced.".format(self.__class__.__name__) + f"The callback on the {self.__class__.__name__} minimizer was replaced." ) self._callback = value @timeIt - def minimize(self, evalFunction, x0): + def minimize(self, evalFunction: MinimizeCallable, x0: np.ndarray) -> np.ndarray: """minimize(evalFunction, x0) Minimizes the function (evalFunction) starting at the location x0. - :param callable evalFunction: function handle that evaluates: f, g, H = F(x) - :param numpy.ndarray x0: starting location - :rtype: numpy.ndarray - :return: x, the last iterate of the optimization algorithm - - evalFunction is a function handle:: - - (f[, g][, H]) = evalFunction(x, return_g=False, return_H=False ) - - def evalFunction(x, return_g=False, return_H=False): - out = (f,) - if return_g: - out += (g,) - if return_H: - out += (H,) - return out if len(out) > 1 else out[0] - - - The algorithm for general minimization is as follows:: - - startup(x0) - printInit() - - while True: - doStartIteration() - f, g, H = evalFunction(xc) - printIter() - if stoppingCriteria(): break - p = findSearchDirection() - p = scaleSearchDirection(p) - xt, passLS = modifySearchDirection(p) - if not passLS: - xt, caught = modifySearchDirectionBreak(p) - if not caught: return xc - doEndIteration(xt) - - print_done() - finish() - return xc + Parameters + ---------- + evalFunction : callable + The objective function to be minimized:: + + evalFunction( + x: numpy.ndarray, + return_g: bool, + return_H: bool + ) -> ( + float + | tuple[float, numpy.ndarray] + | tuple[float, LinearOperator] + | tuple[float, numpy.ndarray, LinearOperator] + ) + + That will optionally return the gradient as a ``numpy.ndarray`` and the Hessian as any class + that supports matrix vector multiplication using the `*` operator. + + x0 : numpy.ndarray + Initial guess. + + Returns + ------- + x_min : numpy.ndarray + The last iterate of the optimization algorithm. """ self.evalFunction = evalFunction self.startup(x0) self.printInit() - if self.print_type != "ubc": - print("x0 has any nan: {:b}".format(np.any(np.isnan(x0)))) + if np.any(np.isnan(x0)): + raise ValueError("x0 has a nan.") + # self.f = evalFunction( + # self.xc, return_g=False, return_H=False + # ) # will stash the fields objects + # self.printIter() while True: self.doStartIteration() self.f, self.g, self.H = evalFunction(self.xc, return_g=True, return_H=True) @@ -403,10 +513,14 @@ def evalFunction(x, return_g=False, return_H=False): ) #: Doing this saves memory, as it is not needed in the rest of the computations. p = self.scaleSearchDirection(self.searchDirection) xt, passLS = self.modifySearchDirection(p) - if not passLS and not self.force_line_search: - xt, caught = self.modifySearchDirectionBreak(p) - if not caught: - return self.xc + + if not passLS: + if self.require_decrease is True: + xt, caught = self.modifySearchDirectionBreak(p) + if not caught: + return self.xc + else: + print("Linesearch failed. Stepping anyways...") self.doEndIteration(xt) if self.stopNextIteration: @@ -418,9 +532,8 @@ def evalFunction(x, return_g=False, return_H=False): return self.xc @call_hooks("startup") - def startup(self, x0): - """ - **startup** is called at the start of any new minimize call. + def startup(self, x0: np.ndarray) -> None: + """Called at the start of any new minimize call. This will set:: @@ -428,16 +541,21 @@ def startup(self, x0): xc = x0 iter = iterLS = 0 - :param numpy.ndarray x0: initial x - :rtype: None - :return: None + Parameters + ---------- + x0 : numpy.ndarray + initial x """ self.iter = 0 self.iterLS = 0 self.stopNextIteration = False - x0 = self.projection(x0) # ensure that we start of feasible. + try: + x0 = self.projection(x0) # ensure that we start of feasible. + except Exception as err: + raise RuntimeError("Initial model is not projectable") from err + self.x0 = x0 self.xc = x0 self.f_last = np.nan @@ -445,34 +563,34 @@ def startup(self, x0): @count @call_hooks("doStartIteration") - def doStartIteration(self): - """doStartIteration() - - **doStartIteration** is called at the start of each minimize - iteration. - - :rtype: None - :return: None - """ + def doStartIteration(self) -> None: + """Called at the start of each minimize iteration.""" pass - def printInit(self, inLS=False): - """ - **printInit** is called at the beginning of the optimization - routine. + def printInit(self, inLS: bool = False) -> None: + """Called at the beginning of the optimization routine. If there is a parent object, printInit will check for a parent.printInit function and call that. + Parameters + ---------- + inLS : bool + Whether this is being called from a line search. + """ pad = " " * 10 if inLS else "" name = self.name if not inLS else self.nameLS print_titles(self, self.printers if not inLS else self.printersLS, name, pad) @call_hooks("printIter") - def printIter(self, inLS=False): - """ - **printIter** is called directly after function evaluations. + def printIter(self, inLS: bool = False) -> None: + """Called directly after function evaluations. + + Parameters + ---------- + inLS : bool + Whether this is being called from a line search. If there is a parent object, printIter will check for a parent.printIter function and call that. @@ -481,13 +599,17 @@ def printIter(self, inLS=False): pad = " " * 10 if inLS else "" print_line(self, self.printers if not inLS else self.printersLS, pad=pad) - def printDone(self, inLS=False): - """ - **printDone** is called at the end of the optimization routine. + def printDone(self, inLS: bool = False) -> None: + """Called at the end of the optimization routine. If there is a parent object, printDone will check for a parent.printDone function and call that. + Parameters + ---------- + inLS : bool + Whether this is being called from a line search. + """ pad = " " * 10 if inLS else "" stop, done = ( @@ -507,7 +629,6 @@ def printDone(self, inLS=False): self.printers, pad=pad, ) - print(self.print_target) except AttributeError: print_done( self, @@ -518,18 +639,11 @@ def printDone(self, inLS=False): print_stoppers(self, stoppers, pad="", stop=stop, done=done) @call_hooks("finish") - def finish(self): - """finish() - - **finish** is called at the end of the optimization. - - :rtype: None - :return: None - - """ + def finish(self) -> None: + """Called at the end of the optimization.""" pass - def stoppingCriteria(self, inLS=False): + def stoppingCriteria(self, inLS: bool = False) -> bool: if self.iter == 0: self.f0 = self.f self.g0 = self.g @@ -537,63 +651,70 @@ def stoppingCriteria(self, inLS=False): @timeIt @call_hooks("projection") - def projection(self, p): - """projection(p) + def projection(self, p: np.ndarray) -> np.ndarray: + """Projects a model onto bounds (if given) - projects the search direction. + By default, no projection is applied. - by default, no projection is applied. + Parameters + ---------- + p : numpy.ndarray + The model to project - :param numpy.ndarray p: searchDirection - :rtype: numpy.ndarray - :return: p, projected search direction + Returns + ------- + numpy.ndarray + The projected model. """ return p @timeIt - def findSearchDirection(self): - """findSearchDirection() + def findSearchDirection(self) -> np.ndarray: + """Return the direction to search along for a minimum value. - **findSearchDirection** should return an approximation of: + Returns + ------- + numpy.ndarray + The search direction. - .. math:: + Notes + ----- + This should usually return an approximation of: - H p = - g + .. math:: - Where you are solving for the search direction, p + p = - H^{-1} g The default is: .. math:: - H = I - p = - g - And corresponds to SteepestDescent. + Corresponding to the steepest descent direction The latest function evaluations are present in:: self.f, self.g, self.H - - :rtype: numpy.ndarray - :return: p, Search Direction """ return -self.g @count - def scaleSearchDirection(self, p): - """scaleSearchDirection(p) + def scaleSearchDirection(self, p: np.ndarray) -> np.ndarray: + """Scales the search direction if appropriate. - **scaleSearchDirection** should scale the search direction if - appropriate. + Set the parameter ``maxStep`` in the minimize object, to scale back + the search direction to a maximum size. - Set the parameter **maxStep** in the minimize object, to scale back - the gradient to a maximum size. + Parameters + ---------- + p : numpy.ndarray + The current search direction. - :param numpy.ndarray p: searchDirection - :rtype: numpy.ndarray - :return: p, Scaled Search Direction + Returns + ------- + numpy.ndarray + The scaled search direction. """ if self.maxStep < np.abs(p.max()): @@ -603,12 +724,21 @@ def scaleSearchDirection(self, p): nameLS = "Armijo linesearch" #: The line-search name @timeIt - def modifySearchDirection(self, p): - """modifySearchDirection(p) + def modifySearchDirection(self, p: np.ndarray) -> np.ndarray: + """Changes the search direction based on some sort of linesearch or trust-region criteria. - **modifySearchDirection** changes the search direction based on - some sort of linesearch or trust-region criteria. + Parameters + ---------- + p : numpy.ndarray + The current search direction. + Returns + ------- + numpy.ndarray + The modified search direction. + + Notes + ----- By default, an Armijo backtracking linesearch is preformed with the following parameters: @@ -619,11 +749,7 @@ def modifySearchDirection(self, p): If the linesearch is completed, and a descent direction is found, passLS is returned as True. - Else, a modifySearchDirectionBreak call is preformed. - - :param numpy.ndarray p: searchDirection - :rtype: tuple - :return: (xt, passLS) numpy.ndarray, bool + Else, a `modifySearchDirectionBreak` call is preformed. """ # Projected Armijo linesearch self._LS_t = 1.0 @@ -659,11 +785,8 @@ def modifySearchDirection(self, p): return self._LS_xt, self.iterLS < self.maxIterLS @count - def modifySearchDirectionBreak(self, p): - """modifySearchDirectionBreak(p) - - Code is called if modifySearchDirection fails - to find a descent direction. + def modifySearchDirectionBreak(self, p: np.ndarray) -> np.ndarray: + """Called if modifySearchDirection fails to find a descent direction. The search direction is passed as input and this function must pass back both a new searchDirection, @@ -672,9 +795,18 @@ def modifySearchDirectionBreak(self, p): By default, no additional work is done, and the evalFunction returns a False indicating the break was not caught. - :param numpy.ndarray p: searchDirection - :rtype: tuple - :return: (xt, breakCaught) numpy.ndarray, bool + Parameters + ---------- + p : numpy.ndarray + The failed search direction. + + Returns + ------- + xt : numpy.ndarray + An alternative search direction to use. + was_caught : bool + Whether the break was caught. The minimization algorithm will + break early if ``not was_caught``. """ self.printDone(inLS=True) print("The linesearch got broken. Boo.") @@ -682,22 +814,23 @@ def modifySearchDirectionBreak(self, p): @count @call_hooks("doEndIteration") - def doEndIteration(self, xt): - """doEndIteration(xt) - - **doEndIteration** is called at the end of each minimize iteration. + def doEndIteration(self, xt: np.ndarray) -> None: + """Operation called at the end of each minimize iteration. By default, function values and x locations are shuffled to store 1 past iteration in memory. - self.xc must be updated in this code. - - :param numpy.ndarray xt: tested new iterate that ensures a descent direction. - :rtype: None - :return: None + Parameters + ---------- + xt : numpy.ndarray + An accepted model at the end of each iteration. """ # store old values self.f_last = self.f + if hasattr(self, "_LS_ft"): + self.f = self._LS_ft + + # the current iterate, `self.xc`, must be set in this function if overridden in a base class self.x_last, self.xc = self.xc, xt self.iter += 1 if self.debug: @@ -775,66 +908,93 @@ def _doEndIterationRemember(self, *args): self._rememberList[param[0]].append(param[1](self)) -class ProjectedGradient(Minimize, Remember): - name = "Projected Gradient" - - maxIterCG = 5 - tolCG = 1e-1 +class Bounded(object): + """Mixin class for bounded minimizers - lower = -np.inf - upper = np.inf + Parameters + ---------- + lower, upper : float or numpy.ndarray, optional + The lower and upper bounds. + """ - def __init__(self, **kwargs): - super(ProjectedGradient, self).__init__(**kwargs) + def __init__( + self, + *, + lower: None | float | npt.NDArray[np.float64], + upper: None | float | npt.NDArray[np.float64] = None, + **kwargs, + ): + self.lower = lower + self.upper = upper + super().__init__(**kwargs) - self.stoppers.append(StoppingCriteria.bindingSet) - self.stoppersLS.append(StoppingCriteria.bindingSet_LS) + @property + def lower(self) -> None | float | npt.NDArray[np.float64]: + """The lower bound value. - self.printers.extend( - [ - IterationPrinters.itType, - IterationPrinters.aSet, - IterationPrinters.bSet, - IterationPrinters.comment, - ] - ) + Returns + ------- + lower : None, float, numpy.ndarray + """ + return self._lower - def _startup(self, x0): - # ensure bound vectors are the same size as the model - if not isinstance(self.lower, np.ndarray): - self.lower = np.ones_like(x0) * self.lower - if not isinstance(self.upper, np.ndarray): - self.upper = np.ones_like(x0) * self.upper + @lower.setter + def lower(self, value): + if value is not None: + try: + value = validate_float("lower", value) + except TypeError: + value = validate_ndarray_with_shape("lower", value, shape=("*",)) + self._lower = value - self.explorePG = True - self.exploreCG = False - self.stopDoingPG = False + @property + def upper(self) -> None | float | npt.NDArray[np.float64]: + """The upper bound value. - self._itType = "SD" - self.comment = "" + Returns + ------- + upper : None, float, numpy.ndarray + """ + return self._upper - self.aSet_prev = self.activeSet(x0) + @upper.setter + def upper(self, value): + if value is not None: + try: + value = validate_float("upper", value) + except TypeError: + value = validate_ndarray_with_shape("upper", value, shape=("*",)) + self._upper = value @count - def projection(self, x): + def projection(self, x: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: """projection(x) Make sure we are feasible. """ - return np.median(np.c_[self.lower, x, self.upper], axis=1) + if self.lower is not None: + x = np.maximum(x, self.lower) + if self.upper is not None: + x = np.minimum(x, self.upper) + return x @count - def activeSet(self, x): + def activeSet(self, x: npt.NDArray[np.float64]) -> npt.NDArray[bool]: """activeSet(x) If we are on a bound """ - return np.logical_or(x == self.lower, x == self.upper) + out = np.zeros(x.shape, dtype=bool) + if self.lower is not None: + out |= x <= self.lower + if self.upper is not None: + out |= x >= self.upper + return out @count - def inactiveSet(self, x): + def inactiveSet(self, x: npt.NDArray[np.float64]) -> npt.NDArray[bool]: """inactiveSet(x) The free variables. @@ -843,7 +1003,7 @@ def inactiveSet(self, x): return np.logical_not(self.activeSet(x)) @count - def bindingSet(self, x): + def bindingSet(self, x: npt.NDArray[np.float64]) -> npt.NDArray[bool]: """bindingSet(x) If we are on a bound and the negative gradient points away from the @@ -852,9 +1012,160 @@ def bindingSet(self, x): Optimality condition. (Satisfies Kuhn-Tucker) MoreToraldo91 """ - bind_up = np.logical_and(x == self.lower, self.g >= 0) - bind_low = np.logical_and(x == self.upper, self.g <= 0) - return np.logical_or(bind_up, bind_low) + out = np.zeros(x.shape, dtype=bool) + if self.lower is not None: + out |= (x <= self.lower) & (self.g >= 0) + if self.upper is not None: + out |= (x >= self.upper) & (self.g <= 0) + return out + + +class InexactCG(object): + """Mixin to hold common parameters for a CG solver. + + Parameters + ---------- + cg_rtol : float, optional + Relative tolerance stopping condition on the CG residual + cg_atol : float, optional + Absolute tolerance stopping condition on the CG residual + cg_maxiter : int, optional + Maximum number of CG iterations to perform + + Notes + ----- + + The convergence check for CG is: + >>> norm(A @ x_k - b) <= max(cg_rtol * norm(A @ x_0 - b), cg_atol) + + See Also + -------- + scipy.sparse.linalg.cg + + """ + + def __init__( + self, + *, + cg_rtol: float = 1e-1, + cg_atol: float = 0, + cg_maxiter: int = 5, + **kwargs, + ): + + if (val := kwargs.pop("tolCG", None)) is not None: + self.tolCG = val # Deprecated cg_rtol + else: + self.cg_rtol = cg_rtol + self.cg_atol = cg_atol + + if (val := kwargs.pop("maxIterCG", None)) is not None: + self.maxIterCG = val + else: + self.cg_maxiter = cg_maxiter + + super().__init__(**kwargs) + + @property + def cg_atol(self) -> float: + """Absolute tolerance for inner CG iterations. + + CG iterations are terminated if: + >>> norm(A @ x_k - b) <= max(cg_rtol * norm(A @ x_0 - b), cg_atol) + + or if the maximum number of CG iterations is reached. + + Returns + ------- + float + + See Also + -------- + cg_rtol, scipy.sparse.linalg.cg + """ + return self._cg_atol + + @cg_atol.setter + def cg_atol(self, value): + self._cg_atol = validate_float("cg_atol", value, min_val=0, inclusive_min=True) + + @property + def cg_rtol(self) -> float: + """Relative tolerance for inner CG iterations. + + CG iterations are terminated if: + >>> norm(A @ x_k - b) <= max(cg_rtol * norm(A @ x_0 - b), cg_atol) + + or if the maximum number of CG iterations is reached. + + Returns + ------- + float + + See Also + -------- + cg_rtol, scipy.sparse.linalg.cg + """ + return self._cg_rtol + + @cg_rtol.setter + def cg_rtol(self, value): + self._cg_rtol = validate_float("cg_rtol", value, min_val=0, inclusive_min=True) + + @property + def cg_maxiter(self) -> int: + """Maximum number of CG iterations. + Returns + ------- + int + """ + return self._cg_maxiter + + @cg_maxiter.setter + def cg_maxiter(self, value): + self._cg_maxiter = validate_integer("cg_maxiter", value, min_val=1) + + maxIterCG = deprecate_property( + cg_maxiter, old_name="maxIterCG", removal_version="0.26.0", future_warn=True + ) + tolCG = deprecate_property( + cg_rtol, old_name="tolCG", removal_version="0.26.0", future_warn=True + ) + + +class ProjectedGradient(Bounded, InexactCG, Minimize, Remember): + name = "Projected Gradient" + + def __init__( + self, *, lower=-np.inf, upper=np.inf, cg_rtol=1e-1, cg_maxiter=5, **kwargs + ): + super().__init__( + lower=lower, upper=upper, cg_rtol=cg_rtol, cg_maxiter=cg_maxiter, **kwargs + ) + + self.stoppers.append(StoppingCriteria.bindingSet) + self.stoppersLS.append(StoppingCriteria.bindingSet_LS) + + self.printers.extend( + [ + IterationPrinters.itType, + IterationPrinters.aSet, + IterationPrinters.bSet, + IterationPrinters.comment, + ] + ) + + def startup(self, x0): + super().startup(x0) + + self.explorePG = True + self.exploreCG = False + self.stopDoingPG = False + + self._itType = "SD" + self.comment = "" + + self.aSet_prev = self.activeSet(x0) @timeIt def findSearchDirection(self): @@ -908,11 +1219,13 @@ def reduceHess(v): (shape[1], shape[1]), reduceHess, dtype=self.xc.dtype ) - # Choose `rtol` or `tol` argument based on installed scipy version - tol_key = "rtol" if SCIPY_1_12 else "tol" - - inp = {tol_key: self.tolCG, "maxiter": self.maxIterCG} - p, info = sp.linalg.cg(operator, -Z.T * self.g, **inp) + p, info = sp.linalg.cg( + operator, + -Z.T * self.g, + rtol=self.cg_rtol, + atol=self.cg_atol, + maxiter=self.cg_maxiter, + ) p = Z * p # bring up to full size # aSet_after = self.activeSet(self.xc+p) return p @@ -955,9 +1268,6 @@ class BFGS(Minimize, Remember): name = "BFGS" nbfgs = 10 - def __init__(self, **kwargs): - Minimize.__init__(self, **kwargs) - @property def bfgsH0(self): """ @@ -966,12 +1276,7 @@ def bfgsH0(self): Must be a simpeg.Solver """ if getattr(self, "_bfgsH0", None) is None: - print( - """ - Default solver: Diagonal is being used in bfgsH0 - """ - ) - self._bfgsH0 = Diagonal(sp.identity(self.xc.size)) + self._bfgsH0 = Identity() return self._bfgsH0 @bfgsH0.setter @@ -1039,7 +1344,7 @@ def findSearchDirection(self): return Solver(self.H) * (-self.g) -class InexactGaussNewton(BFGS, Minimize, Remember): +class InexactGaussNewton(InexactCG, BFGS): r""" Minimizes using CG as the inexact solver of @@ -1056,13 +1361,21 @@ class InexactGaussNewton(BFGS, Minimize, Remember): """ - def __init__(self, **kwargs): - Minimize.__init__(self, **kwargs) + def __init__( + self, + *, + cg_rtol: float = 1e-1, + cg_atol: float = 0.0, + cg_maxiter: int = 5, + **kwargs, + ): + super().__init__( + cg_rtol=cg_rtol, cg_atol=cg_atol, cg_maxiter=cg_maxiter, **kwargs + ) - name = "Inexact Gauss Newton" + self._was_default_hinv = False - maxIterCG = 5 - tolCG = 1e-1 + name = "Inexact Gauss Newton" @property def approxHinv(self): @@ -1078,7 +1391,9 @@ def approxHinv(self): M = sp.linalg.LinearOperator( (self.xc.size, self.xc.size), self.bfgs, dtype=self.xc.dtype ) + self._was_default_hinv = True return M + self._was_default_hinv = False return _approxHinv @approxHinv.setter @@ -1087,13 +1402,20 @@ def approxHinv(self, value): @timeIt def findSearchDirection(self): - # Choose `rtol` or `tol` argument based on installed scipy version - tol_key = "rtol" if SCIPY_1_12 else "tol" - inp = {tol_key: self.tolCG, "maxiter": self.maxIterCG} - Hinv = SolverCG(self.H, M=self.approxHinv, **inp) + Hinv = SolverCG( + self.H, + M=self.approxHinv, + rtol=self.cg_rtol, + atol=self.cg_atol, + maxiter=self.cg_maxiter, + ) p = Hinv * (-self.g) return p + def _doEndIteration_BFGS(self, xt): + if self._was_default_hinv: + super()._doEndIteration_BFGS(xt) + class SteepestDescent(Minimize, Remember): name = "Steepest Descent" @@ -1197,65 +1519,109 @@ def evalFunction(x, return_g=False): return x -class ProjectedGNCG(BFGS, Minimize, Remember): - def __init__(self, **kwargs): - Minimize.__init__(self, **kwargs) - - name = "Projected GNCG" - - maxIterCG = 5 - tolCG = 1e-1 - cg_count = 0 - stepOffBoundsFact = 1e-2 # perturbation of the inactive set off the bounds - stepActiveset = True - lower = -np.inf - upper = np.inf - - def _startup(self, x0): - # ensure bound vectors are the same size as the model - if not isinstance(self.lower, np.ndarray): - self.lower = np.ones_like(x0) * self.lower - if not isinstance(self.upper, np.ndarray): - self.upper = np.ones_like(x0) * self.upper +class ProjectedGNCG(Bounded, InexactGaussNewton): + def __init__( + self, + *, + lower: None | float | npt.NDArray[np.float64] = -np.inf, + upper: None | float | npt.NDArray[np.float64] = np.inf, + cg_maxiter: int = 5, + cg_rtol: float = None, + cg_atol: float = None, + step_active_set: bool = True, + active_set_grad_scale: float = 1e-2, + **kwargs, + ): + if (val := kwargs.pop("tolCG", None)) is not None: + # Deprecated path when tolCG is passed. + self.tolCG = val + cg_atol = val + cg_rtol = 0.0 + elif cg_rtol is None and cg_atol is None: + # Note these defaults match previous settings... + # but they're not good in general... + # Ideally they will change to cg_rtol=1E-3 and cg_atol=0.0 + warnings.warn( + "The defaults for ProjectedGNCG will change in SimPEG 0.26.0. If you want to maintain the " + "previous behavior, explicitly set 'cg_atol=1E-3' and 'cg_rtol=0.0'.", + FutureWarning, + stacklevel=2, + ) + cg_atol = 1e-3 + cg_rtol = 0.0 + # defaults for if someone passes just cg_rtol or just cg_atol (to be removed on deprecation removal) + # These will likely be the future defaults + elif cg_atol is None: + cg_atol = 0.0 + elif cg_rtol is None: + cg_rtol = 1e-3 + + if (val := kwargs.pop("stepActiveSet", None)) is not None: + self.stepActiveSet = val + else: + self.step_active_set = step_active_set - @count - def projection(self, x): - """projection(x) + if (val := kwargs.pop("stepOffBoundsFact", None)) is not None: + self.stepOffBoundsFact = val + else: + self.active_set_grad_scale = active_set_grad_scale + + super().__init__( + lower=lower, + upper=upper, + cg_maxiter=cg_maxiter, + cg_rtol=cg_rtol, + cg_atol=cg_atol, + **kwargs, + ) - Make sure we are feasible. + # initialize some tracking parameters + self.cg_count = 0 + self.cg_abs_resid = np.inf + self.cg_rel_resid = np.inf - """ - return np.median(np.c_[self.lower, x, self.upper], axis=1) + self.printers.extend( + [ + IterationPrinters.iterationCG, + IterationPrinters.iteration_CG_rel_residual, + IterationPrinters.iteration_CG_abs_residual, + ] + ) - @count - def activeSet(self, x): - """activeSet(x) + name = "Projected GNCG" - If we are on a bound + @property + def step_active_set(self) -> bool: + """Whether to include the active set's gradient in the step direction. + Returns + ------- + bool """ - return np.logical_or(x <= self.lower, x >= self.upper) + return self._step_active_set + + @step_active_set.setter + def step_active_set(self, value: bool): + self._step_active_set = validate_type("step_active_set", value, bool) @property - def approxHinv(self): - """ - The approximate Hessian inverse is used to precondition CG. + def active_set_grad_scale(self) -> float: + """Scalar to apply to the active set's gradient - Default uses BFGS, with an initial H0 of *bfgsH0*. + if `step_active_set` is `True`, then the active set's gradient is multiplied by this value + when including it in the search direction. - Must be a scipy.sparse.linalg.LinearOperator + Returns + ------- + float """ - _approxHinv = getattr(self, "_approxHinv", None) - if _approxHinv is None: - M = sp.linalg.LinearOperator( - (self.xc.size, self.xc.size), self.bfgs, dtype=self.xc.dtype - ) - return M - return _approxHinv + return self._active_set_grad_scale - @approxHinv.setter - def approxHinv(self, value): - self._approxHinv = value + @active_set_grad_scale.setter + def active_set_grad_scale(self, value: float): + self._active_set_grad_scale = validate_float( + "active_set_grad_scale", value, min_val=0, inclusive_min=True + ) @timeIt def findSearchDirection(self): @@ -1263,58 +1629,107 @@ def findSearchDirection(self): findSearchDirection() Finds the search direction based on projected CG """ + # remember, "active" means cell with values equal to the limit + # "inactive" are cells with values inside the limits. + + # The basic logic of this method is to do CG iterations only + # on the inactive set, then also add a scaled gradient for the + # active set, (if that gradient points away from the limits.) + self.cg_count = 0 - Active = self.activeSet(self.xc) - temp = sum((np.ones_like(self.xc.size) - Active)) + active = self.activeSet(self.xc) + inactive = ~active step = np.zeros(self.g.size) - resid = -(1 - Active) * self.g + resid = inactive * (-self.g) - r = resid - (1 - Active) * (self.H * step) + r = resid # - Inactive * (self.H * step)# step is zero p = self.approxHinv * r sold = np.dot(r, p) count = 0 + r_norm0 = norm(r) - while np.all([np.linalg.norm(r) > self.tolCG, count < self.maxIterCG]): + atol = max(self.cg_rtol * norm(r_norm0), self.cg_atol) + if self.debug: + print(f"CG Target tolerance: {atol}") + r_norm = r_norm0 + while r_norm > atol and count < self.cg_maxiter: + if self.debug: + print(f"CG Iteration: {count}, residual norm: {r_norm}") count += 1 - q = (1 - Active) * (self.H * p) + q = inactive * (self.H * p) alpha = sold / (np.dot(p, q)) step += alpha * p r -= alpha * q + r_norm = norm(r) h = self.approxHinv * r snew = np.dot(r, h) - p = h + (snew / sold * p) + p = h + (snew / sold) * p sold = snew # End CG Iterations - self.cg_count += count - - # Take a gradient step on the active cells if exist - if temp != self.xc.size: - rhs_a = (Active) * -self.g - + self.cg_count = count + self.cg_abs_resid = r_norm + self.cg_rel_resid = r_norm / r_norm0 + + # Also include the gradient for cells on the boundary + # if that gradient would move them away from the boundary. + # aka, active and not bound. + bound = self.bindingSet(self.xc) + active_not_bound = active & (~bound) + if self.step_active_set and np.any(active_not_bound): + rhs_a = active_not_bound * -self.g + + # active means x == boundary + # bound means x == boundary and g == 0 or -g points beyond boundary + # active and not bound means + # x == boundary and g neq 0 and g points inside + # so can safely discard a non-zero check on + # if np.any(rhs_a) + + # reasonable guess at the step length for the gradient on the + # active cell boundaries. Basically scale it to have the same + # maximum as the cg step on the cells that are not on the + # boundary. dm_i = max(abs(step)) dm_a = max(abs(rhs_a)) - # perturb inactive set off of bounds so that they are included - # in the step - step = step + self.stepOffBoundsFact * (rhs_a * dm_i / dm_a) + # add the active set's gradients. + step += self.active_set_grad_scale * (rhs_a * dm_i / dm_a) - # Only keep gradients going in the right direction on the active - # set - indx = ((self.xc <= self.lower) & (step < 0)) | ( - (self.xc >= self.upper) & (step > 0) - ) - step[indx] = 0.0 + # Only keep search directions going in the right direction + step[bound] = 0 return step + + stepActiveSet = deprecate_property( + step_active_set, + old_name="stepActiveSet", + removal_version="0.26.0", + future_warn=True, + ) + + stepOffBoundsFact = deprecate_property( + active_set_grad_scale, + old_name="stepOffBoundsFact", + removal_version="0.26.0", + future_warn=True, + ) + + # This was the weird part from before... the default tolerance was used as an absolute tolerance... + tolCG = deprecate_property( + InexactGaussNewton.cg_atol, + old_name="tolCG", + removal_version="0.26.0", + future_warn=True, + ) diff --git a/simpeg/potential_fields/_numba_utils.py b/simpeg/potential_fields/_numba_utils.py index 331cbb9e9d..58f216a1a8 100644 --- a/simpeg/potential_fields/_numba_utils.py +++ b/simpeg/potential_fields/_numba_utils.py @@ -110,7 +110,7 @@ def evaluate_kernels_on_cell( located on the observation point :math:`\mathbf{p}`. """ # Initialize result floats to zero - result_x, result_y, result_z = 0, 0, 0 + result_x, result_y, result_z = 0.0, 0.0, 0.0 # Iterate over the vertices of the prism for i in range(2): # Compute shifted easting coordinate @@ -149,3 +149,107 @@ def evaluate_kernels_on_cell( shift_east, shift_north, shift_upward, radius ) return result_x, result_y, result_z + + +@jit(nopython=True) +def evaluate_six_kernels_on_cell( + easting, + northing, + upward, + prism_west, + prism_east, + prism_south, + prism_north, + prism_bottom, + prism_top, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, +): + r""" + Evaluate six kernel functions on every shifted vertex of a prism. + + Similar to ``evaluate_kernels_on_cell``, but designed to evaluate six kernels + instead of three. This function comes useful for magnetic forwards, when six kernels + are needed to be evaluated. + + .. note:: + + This function was inspired in the ``_evaluate_kernel`` function in + Choclo (released under BSD 3-clause Licence): + https://www.fatiando.org/choclo + + Parameters + ---------- + easting, northing, upward : float + Easting, northing and upward coordinates of the observation point. Must + be in meters. + prism_west, prism_east : floats + The West and East boundaries of the prism. Must be in meters. + prism_south, prism_north : floats + The South and North boundaries of the prism. Must be in meters. + prism_bottom, prism_top : floats + The bottom and top boundaries of the prism. Must be in meters. + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callable + Kernel functions that will be evaluated on each one of the shifted + vertices of the prism. + + Returns + ------- + result_xx, result_yy, result_zz, result_xy, result_xz, result_yz : float + Evaluation of the kernel functions on each one of the vertices of the + prism. + """ + # Initialize result floats to zero + result_xx, result_yy, result_zz = 0.0, 0.0, 0.0 + result_xy, result_xz, result_yz = 0.0, 0.0, 0.0 + # Iterate over the vertices of the prism + for i in range(2): + # Compute shifted easting coordinate + if i == 0: + shift_east = prism_east - easting + else: + shift_east = prism_west - easting + shift_east_sq = shift_east**2 + for j in range(2): + # Compute shifted northing coordinate + if j == 0: + shift_north = prism_north - northing + else: + shift_north = prism_south - northing + shift_north_sq = shift_north**2 + for k in range(2): + # Compute shifted upward coordinate + if k == 0: + shift_upward = prism_top - upward + else: + shift_upward = prism_bottom - upward + shift_upward_sq = shift_upward**2 + # Compute the radius + radius = np.sqrt(shift_east_sq + shift_north_sq + shift_upward_sq) + # If i, j or k is 1, the corresponding shifted + # coordinate will refer to the lower boundary, + # meaning the corresponding term should have a minus + # sign. + result_xx += (-1) ** (i + j + k) * kernel_xx( + shift_east, shift_north, shift_upward, radius + ) + result_yy += (-1) ** (i + j + k) * kernel_yy( + shift_east, shift_north, shift_upward, radius + ) + result_zz += (-1) ** (i + j + k) * kernel_zz( + shift_east, shift_north, shift_upward, radius + ) + result_xy += (-1) ** (i + j + k) * kernel_xy( + shift_east, shift_north, shift_upward, radius + ) + result_xz += (-1) ** (i + j + k) * kernel_xz( + shift_east, shift_north, shift_upward, radius + ) + result_yz += (-1) ** (i + j + k) * kernel_yz( + shift_east, shift_north, shift_upward, radius + ) + return result_xx, result_yy, result_zz, result_xy, result_xz, result_yz diff --git a/simpeg/potential_fields/base.py b/simpeg/potential_fields/base.py index b31c8865bf..c2f311de08 100644 --- a/simpeg/potential_fields/base.py +++ b/simpeg/potential_fields/base.py @@ -1,13 +1,9 @@ import os -import warnings from multiprocessing.pool import Pool import discretize import numpy as np from discretize import TensorMesh, TreeMesh -from scipy.sparse import csr_matrix as csr - -from simpeg.utils import mkvc from ..simulation import LinearSimulation from ..utils import validate_active_indices, validate_integer, validate_string @@ -56,12 +52,6 @@ class BasePFSimulation(LinearSimulation): If True, the simulation will run in parallel. If False, it will run in serial. If ``engine`` is not ``"choclo"`` this argument will be ignored. - ind_active : np.ndarray of int or bool - - .. deprecated:: 0.23.0 - - Argument ``ind_active`` is deprecated in favor of - ``active_cells`` and will be removed in SimPEG v0.24.0. Notes ----- @@ -88,28 +78,13 @@ def __init__( sensitivity_dtype=np.float32, engine="geoana", numba_parallel=True, - ind_active=None, **kwargs, ): - # Deprecate ind_active argument - if ind_active is not None: - if active_cells is not None: - raise TypeError( - "Cannot pass both 'active_cells' and 'ind_active'." - "'ind_active' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - ) - warnings.warn( - "'ind_active' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'active_cells' instead.", - FutureWarning, - stacklevel=2, - ) - active_cells = ind_active - - if "forwardOnly" in kwargs: - raise AttributeError( - "forwardOnly was removed in SimPEG 0.17.0, please set store_sensitivities=None" + # removed ind_active argument + if kwargs.pop("ind_active", None) is not None: + raise TypeError( + "'ind_active' has been removed in " + "SimPEG v0.24.0, please use 'active_cells' instead.", ) self.mesh = mesh @@ -310,8 +285,7 @@ def active_cells(self): "ind_active", "active_cells", removal_version="0.24.0", - future_warn=True, - error=False, + error=True, ) def linear_operator(self): @@ -523,110 +497,7 @@ def progress(iteration, prog, final): return prog -def get_dist_wgt(mesh, receiver_locations, actv, R, R0): - """Compute distance weights for potential field simulations. - - Parameters - ---------- - mesh : discretize.BaseMesh - A discretize mesh - receiver_locations : (n, 3) numpy.ndarray - Observation locations [x, y, z] - actv : (n_cell) numpy.ndarray of bool - Active cells vector [0:air , 1: ground] - R : float - Decay factor (mag=3, grav =2) - R0 : float - Stabilization factor. Usually a fraction of the minimum cell size - - Returns - ------- - wr : (n_cell) numpy.ndarray - Distance weighting model; 0 for all inactive cells - """ - warnings.warn( - "The get_dist_wgt function has been deprecated, please import " - "simpeg.utils.distance_weighting. This will be removed in SimPEG 0.24.0", - FutureWarning, - stacklevel=2, - ) - # Find non-zero cells - if actv.dtype == "bool": - inds = ( - np.asarray([inds for inds, elem in enumerate(actv, 1) if elem], dtype=int) - - 1 - ) - else: - inds = actv - - nC = len(inds) - - # Create active cell projector - P = csr((np.ones(nC), (inds, range(nC))), shape=(mesh.nC, nC)) - - # Geometrical constant - p = 1 / np.sqrt(3) - - # Create cell center location - Ym, Xm, Zm = np.meshgrid( - mesh.cell_centers_y, mesh.cell_centers_x, mesh.cell_centers_z +def get_dist_wgt(*args, **kwargs): + raise NotImplementedError( + "The get_dist_wgt function has been removed in SimPEG 0.24.0, please import simpeg.utils.distance_weighting." ) - hY, hX, hZ = np.meshgrid(mesh.h[1], mesh.h[0], mesh.h[2]) - - # Remove air cells - Xm = P.T * mkvc(Xm) - Ym = P.T * mkvc(Ym) - Zm = P.T * mkvc(Zm) - - hX = P.T * mkvc(hX) - hY = P.T * mkvc(hY) - hZ = P.T * mkvc(hZ) - - V = P.T * mkvc(mesh.cell_volumes) - wr = np.zeros(nC) - - ndata = receiver_locations.shape[0] - count = -1 - print("Begin calculation of distance weighting for R= " + str(R)) - - for dd in range(ndata): - nx1 = (Xm - hX * p - receiver_locations[dd, 0]) ** 2 - nx2 = (Xm + hX * p - receiver_locations[dd, 0]) ** 2 - - ny1 = (Ym - hY * p - receiver_locations[dd, 1]) ** 2 - ny2 = (Ym + hY * p - receiver_locations[dd, 1]) ** 2 - - nz1 = (Zm - hZ * p - receiver_locations[dd, 2]) ** 2 - nz2 = (Zm + hZ * p - receiver_locations[dd, 2]) ** 2 - - R1 = np.sqrt(nx1 + ny1 + nz1) - R2 = np.sqrt(nx1 + ny1 + nz2) - R3 = np.sqrt(nx2 + ny1 + nz1) - R4 = np.sqrt(nx2 + ny1 + nz2) - R5 = np.sqrt(nx1 + ny2 + nz1) - R6 = np.sqrt(nx1 + ny2 + nz2) - R7 = np.sqrt(nx2 + ny2 + nz1) - R8 = np.sqrt(nx2 + ny2 + nz2) - - temp = ( - (R1 + R0) ** -R - + (R2 + R0) ** -R - + (R3 + R0) ** -R - + (R4 + R0) ** -R - + (R5 + R0) ** -R - + (R6 + R0) ** -R - + (R7 + R0) ** -R - + (R8 + R0) ** -R - ) - - wr = wr + (V * temp / 8.0) ** 2.0 - - count = progress(dd, count, ndata) - - wr = np.sqrt(wr) / V - wr = mkvc(wr) - wr = np.sqrt(wr / (np.max(wr))) - - print("Done 100% ...distance weighting completed!!\n") - - return wr diff --git a/simpeg/potential_fields/gravity/_numba/_2d_mesh.py b/simpeg/potential_fields/gravity/_numba/_2d_mesh.py new file mode 100644 index 0000000000..06cb489a37 --- /dev/null +++ b/simpeg/potential_fields/gravity/_numba/_2d_mesh.py @@ -0,0 +1,455 @@ +""" +Numba functions for gravity simulation on 2D meshes. + +These functions assumes 3D prisms formed by a 2D mesh plus top and bottom boundaries for +each prism. +""" + +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + + +def _forward_gravity( + receivers, + cells_bounds, + top, + bottom, + densities, + fields, + forward_func, + constant_factor, +): + """ + Forward gravity fields of 2D meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_function = jit(nopython=True, parallel=True)(_forward_gravity) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + densities : (n_active_cells) numpy.ndarray + Array with densities of each active cell in the mesh. + fields : (n_receivers) numpy.ndarray + Array full of zeros where the gravity fields on each receiver will be + stored. This could be a preallocated array or a slice of it. + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + ``fields`` array. + + Notes + ----- + The constant factor is applied here to each element of fields because + it's more efficient than doing it afterwards: it would require to + index the elements that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Forward model the gravity field of each cell on each receiver location + for i in prange(n_receivers): + for j in range(n_cells): + fields[i] += constant_factor * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + densities[j], + ) + + +def _sensitivity_gravity( + receivers, + cells_bounds, + top, + bottom, + sensitivity_matrix, + forward_func, + constant_factor, +): + """ + Fill the sensitivity matrix + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_function = jit(nopython=True, parallel=True)(_sensitivity_gravity) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The constant factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + for j in range(n_cells): + sensitivity_matrix[i, j] = constant_factor * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + 1.0, # use unitary density to get sensitivities + ) + + +@jit(nopython=True, parallel=False) +def _g_t_dot_v_serial( + receivers, + cells_bounds, + top, + bottom, + forward_func, + constant_factor, + vector, + result, +): + """ + Compute ``G.T @ v`` in serial, without building G, for a 2D mesh. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + for i in range(n_receivers): + for j in range(n_cells): + # Compute the i-th row of the sensitivity matrix and multiply it by the + # i-th element of the vector. + result[j] += constant_factor * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + vector[i], + ) + + +@jit(nopython=True, parallel=True) +def _g_t_dot_v_parallel( + receivers, + cells_bounds, + top, + bottom, + forward_func, + constant_factor, + vector, + result, +): + """ + Compute ``G.T @ v`` in parallel, without building G, for a 2D mesh. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + for i in prange(n_receivers): + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(n_cells) + for j in range(n_cells): + # Compute the i-th row of the sensitivity matrix and multiply it by the + # i-th element of the vector. + local_row[j] = constant_factor * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + vector[i], + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_serial( + receivers, + cells_bounds, + top, + bottom, + forward_func, + constant_factor, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` without storing ``G``, in serial. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_parallel`` one for parallelized computations. + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + g_element = constant_factor * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + 1.0, # use unitary density to get sensitivities + ) + diagonal[j] += weights[i] * g_element**2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_parallel( + receivers, + cells_bounds, + top, + bottom, + forward_func, + constant_factor, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` without storing ``G``, in parallel. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_serial`` one for serialized computations. + + This implementation instructs each thread to allocate their own array for + the diagonal elements of ``G.T @ G`` that correspond to a single receiver. + After computing them, the ``local_diagonal`` array gets added to the running + ``diagonal`` array through a reduction operation handled by Numba. + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(n_cells) + for j in range(n_cells): + g_element = constant_factor * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + 1.0, # use unitary density to get sensitivities + ) + local_diagonal[j] = weights[i] * g_element**2 + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +# Define a dictionary with decorated versions of the Numba functions. +NUMBA_FUNCTIONS_2D = { + "sensitivity": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_gravity) + for parallel in (True, False) + }, + "forward": { + parallel: jit(nopython=True, parallel=parallel)(_forward_gravity) + for parallel in (True, False) + }, + "gt_dot_v": { + False: _g_t_dot_v_serial, + True: _g_t_dot_v_parallel, + }, + "diagonal_gtg": { + False: _diagonal_G_T_dot_G_serial, + True: _diagonal_G_T_dot_G_parallel, + }, +} diff --git a/simpeg/potential_fields/gravity/_numba/_3d_mesh.py b/simpeg/potential_fields/gravity/_numba/_3d_mesh.py new file mode 100644 index 0000000000..996455cfe9 --- /dev/null +++ b/simpeg/potential_fields/gravity/_numba/_3d_mesh.py @@ -0,0 +1,519 @@ +""" +Numba functions for gravity simulation on 3D meshes. +""" + +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + +from ..._numba_utils import kernels_in_nodes_to_cell + + +def _forward_gravity( + receivers, + nodes, + densities, + fields, + cell_nodes, + kernel_func, + constant_factor, +): + """ + Forward model the gravity field of active cells on receivers + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward_gravity = jit(nopython=True, parallel=True)(_forward_gravity) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + densities : (n_active_cells) numpy.ndarray + Array with densities of each active cell in the mesh. + fields : (n_receivers) numpy.ndarray + Array full of zeros where the gravity fields on each receiver will be + stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + ``fields`` array. + + Notes + ----- + The constant factor is applied here to each element of fields because + it's more efficient than doing it afterwards: it would require to + index the elements that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) + # Compute fields from the kernel values + for k in range(n_cells): + fields[i] += ( + constant_factor + * densities[k] + * kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, :], + ) + ) + + +def _sensitivity_gravity( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + kernel_func, + constant_factor, +): + """ + Fill the sensitivity matrix + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)(_sensitivity_gravity) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + sensitivity_matrix : (n_receivers, n_active_nodes) array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + + Notes + ----- + The constant factor is applied here to each row of the sensitivity matrix + because it's more efficient than doing it afterwards: it would require to + index the rows that corresponds to each component. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + sensitivity_matrix[i, k] = constant_factor * kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, :], + ) + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_serial( + receivers, + nodes, + cell_nodes, + kernel_func, + constant_factor, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` without storing ``G``, in serial. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_parallel`` one for parallelized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) + # Add diagonal components in the running result. + for k in range(n_cells): + diagonal[k] += ( + weights[i] + * ( + constant_factor + * kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, :], + ) + ) + ** 2 + ) + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_parallel( + receivers, + nodes, + cell_nodes, + kernel_func, + constant_factor, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` without storing ``G``, in parallel. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_serial`` one for serialized computations. + + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(n_cells) + for k in range(n_cells): + local_diagonal[k] = ( + weights[i] + * ( + constant_factor + * kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, :], + ) + ) + ** 2 + ) + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +@jit(nopython=True, parallel=False) +def _sensitivity_gravity_t_dot_v_serial( + receivers, + nodes, + cell_nodes, + kernel_func, + constant_factor, + vector, + result, +): + """ + Compute ``G.T @ v`` in serial, without building G. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_sensitivity_gravity_t_dot_v_parallel``. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + for j in range(n_nodes): + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) + # Compute the i-th row of the sensitivity matrix and multiply it by the + # i-th element of the vector. + for k in range(n_cells): + result[k] += ( + constant_factor + * vector[i] + * kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, :], + ) + ) + + +@jit(nopython=True, parallel=True) +def _sensitivity_gravity_t_dot_v_parallel( + receivers, + nodes, + cell_nodes, + kernel_func, + constant_factor, + vector, + result, +): + """ + Compute ``G.T @ v`` in parallel without building G. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + kernel_func : callable + Kernel function that will be evaluated on each node of the mesh. Choose + one of the kernel functions in ``choclo.prism``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A serialized implementation of this function is available in + ``_sensitivity_gravity_t_dot_v_serial``. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vector for kernels evaluated on mesh nodes + kernels = np.empty(n_nodes) + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(n_cells) + for j in range(n_nodes): + kernels[j] = _evaluate_kernel( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + nodes[j, 0], + nodes[j, 1], + nodes[j, 2], + kernel_func, + ) + # Compute fields from the kernel values + for k in range(n_cells): + local_row[k] = ( + constant_factor + * vector[i] + * kernels_in_nodes_to_cell( + kernels, + cell_nodes[k, :], + ) + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True) +def _evaluate_kernel( + receiver_x, receiver_y, receiver_z, node_x, node_y, node_z, kernel_func +): + """ + Evaluate a kernel function for a single node and receiver + + Parameters + ---------- + receiver_x, receiver_y, receiver_z : floats + Coordinates of the receiver. + node_x, node_y, node_z : floats + Coordinates of the node. + kernel_func : callable + Kernel function that should be evaluated. For example, use one of the + kernel functions in ``choclo.prism``. + + Returns + ------- + float + Kernel evaluated on the given node and receiver. + """ + dx = node_x - receiver_x + dy = node_y - receiver_y + dz = node_z - receiver_z + distance = np.sqrt(dx**2 + dy**2 + dz**2) + return kernel_func(dx, dy, dz, distance) + + +# Define a dictionary with decorated versions of the Numba functions. +NUMBA_FUNCTIONS_3D = { + "sensitivity": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_gravity) + for parallel in (True, False) + }, + "forward": { + parallel: jit(nopython=True, parallel=parallel)(_forward_gravity) + for parallel in (True, False) + }, + "diagonal_gtg": { + False: _diagonal_G_T_dot_G_serial, + True: _diagonal_G_T_dot_G_parallel, + }, + "gt_dot_v": { + False: _sensitivity_gravity_t_dot_v_serial, + True: _sensitivity_gravity_t_dot_v_parallel, + }, +} diff --git a/simpeg/potential_fields/gravity/_numba/__init__.py b/simpeg/potential_fields/gravity/_numba/__init__.py new file mode 100644 index 0000000000..8f9cac68a9 --- /dev/null +++ b/simpeg/potential_fields/gravity/_numba/__init__.py @@ -0,0 +1,13 @@ +""" +Numba functions for gravity simulations. +""" + +from ._2d_mesh import NUMBA_FUNCTIONS_2D +from ._3d_mesh import NUMBA_FUNCTIONS_3D + +try: + import choclo +except ImportError: + choclo = None + +__all__ = ["choclo", "NUMBA_FUNCTIONS_3D", "NUMBA_FUNCTIONS_2D"] diff --git a/simpeg/potential_fields/gravity/_numba_functions.py b/simpeg/potential_fields/gravity/_numba_functions.py deleted file mode 100644 index fe2e69e202..0000000000 --- a/simpeg/potential_fields/gravity/_numba_functions.py +++ /dev/null @@ -1,352 +0,0 @@ -""" -Numba functions for gravity simulation using Choclo. -""" - -import numpy as np - -try: - import choclo -except ImportError: - # Define dummy jit decorator - def jit(*args, **kwargs): - return lambda f: f - - choclo = None -else: - from numba import jit, prange - -from .._numba_utils import kernels_in_nodes_to_cell - - -def _forward_gravity( - receivers, - nodes, - densities, - fields, - cell_nodes, - kernel_func, - constant_factor, -): - """ - Forward model the gravity field of active cells on receivers - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward_gravity = jit(nopython=True, parallel=True)(_forward_gravity) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - nodes : (n_active_nodes, 3) numpy.ndarray - Array with the location of the mesh nodes. - densities : (n_active_cells) numpy.ndarray - Array with densities of each active cell in the mesh. - fields : (n_receivers) numpy.ndarray - Array full of zeros where the gravity fields on each receiver will be - stored. This could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) numpy.ndarray - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - kernel_func : callable - Kernel function that will be evaluated on each node of the mesh. Choose - one of the kernel functions in ``choclo.prism``. - constant_factor : float - Constant factor that will be used to multiply each element of the - ``fields`` array. - - Notes - ----- - The constant factor is applied here to each element of fields because - it's more efficient than doing it afterwards: it would require to - index the elements that corresponds to each component. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vector for kernels evaluated on mesh nodes - kernels = np.empty(n_nodes) - for j in range(n_nodes): - kernels[j] = _evaluate_kernel( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - nodes[j, 0], - nodes[j, 1], - nodes[j, 2], - kernel_func, - ) - # Compute fields from the kernel values - for k in range(n_cells): - fields[i] += ( - constant_factor - * densities[k] - * kernels_in_nodes_to_cell( - kernels, - cell_nodes[k, :], - ) - ) - - -def _sensitivity_gravity( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - kernel_func, - constant_factor, -): - """ - Fill the sensitivity matrix - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)(_sensitivity_gravity) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - nodes : (n_active_nodes, 3) numpy.ndarray - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - cell_nodes : (n_active_cells, 8) numpy.ndarray - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - kernel_func : callable - Kernel function that will be evaluated on each node of the mesh. Choose - one of the kernel functions in ``choclo.prism``. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - - Notes - ----- - The constant factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vector for kernels evaluated on mesh nodes - kernels = np.empty(n_nodes) - for j in range(n_nodes): - kernels[j] = _evaluate_kernel( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - nodes[j, 0], - nodes[j, 1], - nodes[j, 2], - kernel_func, - ) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - sensitivity_matrix[i, k] = constant_factor * kernels_in_nodes_to_cell( - kernels, - cell_nodes[k, :], - ) - - -@jit(nopython=True) -def _evaluate_kernel( - receiver_x, receiver_y, receiver_z, node_x, node_y, node_z, kernel_func -): - """ - Evaluate a kernel function for a single node and receiver - - Parameters - ---------- - receiver_x, receiver_y, receiver_z : floats - Coordinates of the receiver. - node_x, node_y, node_z : floats - Coordinates of the node. - kernel_func : callable - Kernel function that should be evaluated. For example, use one of the - kernel functions in ``choclo.prism``. - - Returns - ------- - float - Kernel evaluated on the given node and receiver. - """ - dx = node_x - receiver_x - dy = node_y - receiver_y - dz = node_z - receiver_z - distance = np.sqrt(dx**2 + dy**2 + dz**2) - return kernel_func(dx, dy, dz, distance) - - -def _forward_gravity_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - densities, - fields, - forward_func, - constant_factor, -): - """ - Forward gravity fields of 2D meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_function = jit(nopython=True, parallel=True)(_forward_gravity_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - densities : (n_active_cells) numpy.ndarray - Array with densities of each active cell in the mesh. - fields : (n_receivers) numpy.ndarray - Array full of zeros where the gravity fields on each receiver will be - stored. This could be a preallocated array or a slice of it. - forward_func : callable - Forward function that will be evaluated on each node of the mesh. Choose - one of the forward functions in ``choclo.prism``. - constant_factor : float - Constant factor that will be used to multiply each element of the - ``fields`` array. - - Notes - ----- - The constant factor is applied here to each element of fields because - it's more efficient than doing it afterwards: it would require to - index the elements that corresponds to each component. - """ - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - # Forward model the gravity field of each cell on each receiver location - for i in prange(n_receivers): - for j in range(n_cells): - fields[i] += constant_factor * forward_func( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - densities[j], - ) - - -def _sensitivity_gravity_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - sensitivity_matrix, - forward_func, - constant_factor, -): - """ - Fill the sensitivity matrix - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_function = jit(nopython=True, parallel=True)(_sensitivity_gravity_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - forward_func : callable - Forward function that will be evaluated on each node of the mesh. Choose - one of the forward functions in ``choclo.prism``. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - - Notes - ----- - The constant factor is applied here to each row of the sensitivity matrix - because it's more efficient than doing it afterwards: it would require to - index the rows that corresponds to each component. - """ - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - for j in range(n_cells): - sensitivity_matrix[i, j] = constant_factor * forward_func( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - 1.0, # use unitary density to get sensitivities - ) - - -# Define decorated versions of these functions -_sensitivity_gravity_parallel = jit(nopython=True, parallel=True)(_sensitivity_gravity) -_sensitivity_gravity_serial = jit(nopython=True, parallel=False)(_sensitivity_gravity) -_forward_gravity_parallel = jit(nopython=True, parallel=True)(_forward_gravity) -_forward_gravity_serial = jit(nopython=True, parallel=False)(_forward_gravity) -_forward_gravity_2d_mesh_serial = jit(nopython=True, parallel=False)( - _forward_gravity_2d_mesh -) -_forward_gravity_2d_mesh_parallel = jit(nopython=True, parallel=True)( - _forward_gravity_2d_mesh -) -_sensitivity_gravity_2d_mesh_serial = jit(nopython=True, parallel=False)( - _sensitivity_gravity_2d_mesh -) -_sensitivity_gravity_2d_mesh_parallel = jit(nopython=True, parallel=True)( - _sensitivity_gravity_2d_mesh -) diff --git a/simpeg/potential_fields/gravity/receivers.py b/simpeg/potential_fields/gravity/receivers.py index de54848cf5..c6318b021a 100644 --- a/simpeg/potential_fields/gravity/receivers.py +++ b/simpeg/potential_fields/gravity/receivers.py @@ -20,7 +20,7 @@ class Point(survey.BaseRx): .. important:: - Gradient components ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz") are + Gradient components ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz", "guv") are returned in Eotvos (:math:`10^{-9} s^{-2}`). Parameters @@ -41,7 +41,7 @@ class Point(survey.BaseRx): - "gyy" --> y-derivative of the y-component - "gyz" --> z-derivative of the y-component (and visa versa) - "gzz" --> z-derivative of the z-component - - "guv" --> UV component + - "guv" --> UV component, i.e., (gyy - gxx) / 2 See also -------- diff --git a/simpeg/potential_fields/gravity/simulation.py b/simpeg/potential_fields/gravity/simulation.py index 7c03c00319..3d49e3cade 100644 --- a/simpeg/potential_fields/gravity/simulation.py +++ b/simpeg/potential_fields/gravity/simulation.py @@ -1,9 +1,12 @@ from __future__ import annotations +import hashlib import warnings import numpy as np +from numpy.typing import NDArray import scipy.constants as constants from geoana.kernels import prism_fz, prism_fzx, prism_fzy, prism_fzz from scipy.constants import G as NewtG +from scipy.sparse.linalg import LinearOperator, aslinearoperator from simpeg import props from simpeg.utils import mkvc, sdiag @@ -11,17 +14,15 @@ from ...base import BasePDESimulation from ..base import BaseEquivalentSourceLayerSimulation, BasePFSimulation -from ._numba_functions import ( - choclo, - _sensitivity_gravity_serial, - _sensitivity_gravity_parallel, - _forward_gravity_serial, - _forward_gravity_parallel, - _forward_gravity_2d_mesh_serial, - _forward_gravity_2d_mesh_parallel, - _sensitivity_gravity_2d_mesh_serial, - _sensitivity_gravity_2d_mesh_parallel, -) +from ._numba import choclo, NUMBA_FUNCTIONS_3D, NUMBA_FUNCTIONS_2D + +try: + from warnings import deprecated +except ImportError: + # Use the deprecated decorator provided by typing_extensions (which + # supports older versions of Python) if it cannot be imported from + # warnings. + from typing_extensions import deprecated if choclo is not None: from numba import jit @@ -118,22 +119,25 @@ def _get_conversion_factor(component): class Simulation3DIntegral(BasePFSimulation): - """ + r""" Gravity simulation in integral form. - .. important:: - - Density model is assumed to be in g/cc. + .. note:: - .. important:: + The gravity simulation assumes the following units for its inputs and outputs: - Acceleration components ("gx", "gy", "gz") are returned in mgal - (:math:`10^{-5} m/s^2`). + - Density model is assumed to be in gram per cubic centimeter (g/cc). + - Acceleration components (``"gx"``, ``"gy"``, ``"gz"``) are returned in mgal + (:math:`10^{-5} \text{m}/\text{s}^2`). + - Gradient components (``"gxx"``, ``"gyy"``, ``"gzz"``, ``"gxy"``, ``"gxz"``, + ``"gyz"``, ``"guv"``) are returned in Eotvos (:math:`10^{-9} s^{-2}`). .. important:: - Gradient components ("gxx", "gyy", "gzz", "gxy", "gxz", "gyz") are - returned in Eotvos (:math:`10^{-9} s^{-2}`). + Following SimPEG convention for the right-handed xyz coordinate system, the + z axis points *upwards*. Therefore, the ``"gz"`` component corresponds to the + **upward** component of the gravity acceleration vector. + Parameters ---------- @@ -155,7 +159,9 @@ class Simulation3DIntegral(BasePFSimulation): - 'ram': sensitivities are stored in the computer's RAM - 'disk': sensitivities are written to a directory - 'forward_only': you intend only do perform a forward simulation and - sensitivities do not need to be stored + sensitivities do not need to be stored. The sensitivity matrix ``G`` + is never created, but it'll be defined as + a :class:`~scipy.sparse.linalg.LinearOperator`. sensitivity_path : str, optional Path to store the sensitivity matrix if ``store_sensitivities`` is set @@ -166,12 +172,6 @@ class Simulation3DIntegral(BasePFSimulation): If True, the simulation will run in parallel. If False, it will run in serial. If ``engine`` is not ``"choclo"`` this argument will be ignored. - ind_active : np.ndarray of int or bool - - .. deprecated:: 0.23.0 - - Argument ``ind_active`` is deprecated in favor of - ``active_cells`` and will be removed in SimPEG v0.24.0. """ rho, rhoMap, rhoDeriv = props.Invertible("Density") @@ -188,8 +188,6 @@ def __init__( super().__init__(mesh, engine=engine, numba_parallel=numba_parallel, **kwargs) self.rho = rho self.rhoMap = rhoMap - self._G = None - self._gtg_diagonal = None self.modelMap = self.rhoMap # Warn if n_processes has been passed @@ -202,23 +200,14 @@ def __init__( ) self.n_processes = None - # Define jit functions - if self.engine == "choclo": - if self.numba_parallel: - self._sensitivity_gravity = _sensitivity_gravity_parallel - self._forward_gravity = _forward_gravity_parallel - else: - self._sensitivity_gravity = _sensitivity_gravity_serial - self._forward_gravity = _forward_gravity_serial - - def fields(self, m=None): + def fields(self, m): """ Forward model the gravity field of the mesh on the receivers in the survey Parameters ---------- - m : (n_active_cells,) numpy.ndarray - Array with values for the model. + m : (n_param,) numpy.ndarray + The model parameters. Returns ------- @@ -226,8 +215,8 @@ def fields(self, m=None): Gravity fields generated by the given model on every receiver location. """ - if m is not None: - self.model = m + # Need to assign the model, so the rho property can be accessed. + self.model = m if self.store_sensitivities == "forward_only": # Compute the linear operation without forming the full dense G @@ -240,65 +229,238 @@ def fields(self, m=None): return fields def getJtJdiag(self, m, W=None, f=None): + r""" + Compute diagonal of :math:`\mathbf{J}^T \mathbf{J}``. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + W : (nD, nD) np.ndarray or scipy.sparse.sparray, optional + Diagonal matrix with the square root of the weights. If not None, + the function returns the diagonal of + :math:`\mathbf{J}^T \mathbf{W}^T \mathbf{W} \mathbf{J}``. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (n_active_cells) np.ndarray + Array with the diagonal of ``J.T @ J``. + + Notes + ----- + If ``store_sensitivities`` is ``"forward_only"``, the ``G`` matrix is + never allocated in memory, and the diagonal is obtained by + accumulation, computing each element of the ``G`` matrix on the fly. + + This method caches the diagonal ``G.T @ W.T @ W @ G`` and the sha256 + hash of the diagonal of the ``W`` matrix. This way, if same weights are + passed to it, it reuses the cached diagonal so it doesn't need to be + recomputed. + If new weights are passed, the cache is updated with the latest + diagonal of ``G.T @ W.T @ W @ G``. """ - Return the diagonal of JtJ - """ + # Need to assign the model, so the rhoDeriv can be computed (if the + # model is None, the rhoDeriv is going to be Zero). self.model = m - if W is None: - W = np.ones(self.survey.nD) - else: - W = W.diagonal() ** 2 - if getattr(self, "_gtg_diagonal", None) is None: - diag = np.zeros(self.G.shape[1]) - for i in range(len(W)): - diag += W[i] * (self.G[i] * self.G[i]) - self._gtg_diagonal = diag - else: - diag = self._gtg_diagonal - return mkvc((sdiag(np.sqrt(diag)) @ self.rhoDeriv).power(2).sum(axis=0)) + # We should probably check that W is diagonal. Let's assume it for now. + weights = ( + W.diagonal() ** 2 + if W is not None + else np.ones(self.survey.nD, dtype=np.float64) + ) - def getJ(self, m, f=None): + # Compute gtg (G.T @ W.T @ W @ G) if it's not cached, or if the + # weights are not the same. + weights_sha256 = hashlib.sha256(weights) + use_cached_gtg = ( + hasattr(self, "_gtg_diagonal") + and hasattr(self, "_weights_sha256") + and self._weights_sha256.digest() == weights_sha256.digest() + ) + if not use_cached_gtg: + self._gtg_diagonal = self._get_gtg_diagonal(weights) + self._weights_sha256 = weights_sha256 + + # Multiply the gtg_diagonal by the derivative of the mapping + diagonal = mkvc( + (sdiag(np.sqrt(self._gtg_diagonal)) @ self.rhoDeriv).power(2).sum(axis=0) + ) + return diagonal + + def _get_gtg_diagonal(self, weights: NDArray) -> NDArray: """ - Sensitivity matrix + Compute the diagonal of ``G.T @ W.T @ W @ G``. + + Parameters + ---------- + weights : np.ndarray + Weights array: diagonal of ``W.T @ W``. + + Returns + ------- + np.ndarray """ - return self.G.dot(self.rhoDeriv) + match self.store_sensitivities, self.engine: + case ("forward_only", "geoana"): + msg = ( + "Computing the diagonal of G.T @ G with " + 'store_sensitivities="forward_only" and engine="geoana" ' + "hasn't been implemented yet. " + 'Choose store_sensitivities="ram" or "disk", ' + 'or another engine, like "choclo".' + ) + raise NotImplementedError(msg) + case ("forward_only", "choclo"): + gtg_diagonal = self._gtg_diagonal_without_building_g(weights) + case (_, _): + # In Einstein notation, the j-th element of the diagonal is: + # d_j = w_i * G_{ij} * G_{ij} + gtg_diagonal = np.asarray( + np.einsum("i,ij,ij->j", weights, self.G, self.G) + ) + return gtg_diagonal + + def getJ(self, m, f=None) -> NDArray[np.float64 | np.float32] | LinearOperator: + r""" + Sensitivity matrix :math:`\mathbf{J}`. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nD, n_active_cells) np.ndarray or scipy.sparse.linalg.LinearOperator. + Array or :class:`~scipy.sparse.linalg.LinearOperator` for the + :math:`\mathbf{J}` matrix. + A :class:`~scipy.sparse.linalg.LinearOperator` will be returned if + ``store_sensitivities`` is ``"forward_only"``, otherwise a dense + array will be returned. + + Notes + ----- + If ``store_sensitivities`` is ``"ram"`` or ``"disk"``, a dense array + for the ``J`` matrix is returned. + A :class:`~scipy.sparse.linalg.LinearOperator` is returned if + ``store_sensitivities`` is ``"forward_only"``. This object can perform + operations like ``J @ m`` or ``J.T @ v`` without allocating the full + ``J`` matrix in memory. + """ + # Need to assign the model, so the rhoDeriv can be computed (if the + # model is None, the rhoDeriv is going to be Zero). + self.model = m + rhoDeriv = ( + self.rhoDeriv + if not isinstance(self.G, LinearOperator) + else aslinearoperator(self.rhoDeriv) + ) + return self.G @ rhoDeriv def Jvec(self, m, v, f=None): """ - Sensitivity times a vector + Dot product between sensitivity matrix and a vector. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. This array is used to compute the ``J`` + matrix. + v : (n_param,) numpy.ndarray + Vector used in the matrix-vector multiplication. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nD,) numpy.ndarray + + Notes + ----- + If ``store_sensitivities`` is set to ``"forward_only"``, then the + matrix `G` is never fully constructed, and the dot product is computed + by accumulation, computing the matrix elements on the fly. Otherwise, + the full matrix ``G`` is constructed and stored either in memory or + disk. """ + # Need to assign the model, so the rhoDeriv can be computed (if the + # model is None, the rhoDeriv is going to be Zero). + self.model = m dmu_dm_v = self.rhoDeriv @ v return np.asarray(self.G @ dmu_dm_v.astype(self.sensitivity_dtype, copy=False)) def Jtvec(self, m, v, f=None): """ - Sensitivity transposed times a vector + Dot product between transposed sensitivity matrix and a vector. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. This array is used to compute the ``J`` + matrix. + v : (nD,) numpy.ndarray + Vector used in the matrix-vector multiplication. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nD,) numpy.ndarray + + Notes + ----- + If ``store_sensitivities`` is set to ``"forward_only"``, then the + matrix `G` is never fully constructed, and the dot product is computed + by accumulation, computing the matrix elements on the fly. Otherwise, + the full matrix ``G`` is constructed and stored either in memory or + disk. """ + # Need to assign the model, so the rhoDeriv can be computed (if the + # model is None, the rhoDeriv is going to be Zero). + self.model = m Jtvec = self.G.T @ v.astype(self.sensitivity_dtype, copy=False) return np.asarray(self.rhoDeriv.T @ Jtvec) @property - def G(self): + def G(self) -> NDArray | np.memmap | LinearOperator: """ - Gravity forward operator + Gravity forward operator. """ - if getattr(self, "_G", None) is None: - if self.engine == "choclo": - self._G = self._sensitivity_matrix() - else: - self._G = self.linear_operator() + if not hasattr(self, "_G"): + match self.engine, self.store_sensitivities: + case ("choclo", "forward_only"): + self._G = self._sensitivity_matrix_as_operator() + case ("choclo", _): + self._G = self._sensitivity_matrix() + case ("geoana", "forward_only"): + msg = ( + "Accessing matrix G with " + 'store_sensitivities="forward_only" and engine="geoana" ' + "hasn't been implemented yet. " + 'Choose store_sensitivities="ram" or "disk", ' + 'or another engine, like "choclo".' + ) + raise NotImplementedError(msg) + case ("geoana", _): + self._G = self.linear_operator() return self._G @property + @deprecated( + "The `gtg_diagonal` property has been deprecated. " + "It will be removed in SimPEG v0.25.0.", + category=FutureWarning, + ) def gtg_diagonal(self): """ Diagonal of GtG """ - if getattr(self, "_gtg_diagonal", None) is None: - return None - - return self._gtg_diagonal + return getattr(self, "_gtg_diagonal", None) def evaluate_integral(self, receiver_location, components): """ @@ -411,6 +573,8 @@ def _forward(self, densities): (nD,) numpy.ndarray Always return a ``np.float64`` array. """ + # Get Numba function + forward_func = NUMBA_FUNCTIONS_3D["forward"][self.numba_parallel] # Gather active nodes and the indices of the nodes for each active cell active_nodes, active_cell_nodes = self._get_active_nodes() # Allocate fields array @@ -426,7 +590,7 @@ def _forward(self, densities): vector_slice = slice( index_offset + i, index_offset + n_elements, n_components ) - self._forward_gravity( + forward_func( receivers, active_nodes, densities, @@ -440,12 +604,14 @@ def _forward(self, densities): def _sensitivity_matrix(self): """ - Compute the sensitivity matrix G + Compute the sensitivity matrix ``G``. Returns ------- (nD, n_active_cells) numpy.ndarray """ + # Get Numba function + sensitivity_func = NUMBA_FUNCTIONS_3D["sensitivity"][self.numba_parallel] # Gather active nodes and the indices of the nodes for each active cell active_nodes, active_cell_nodes = self._get_active_nodes() # Allocate sensitivity matrix @@ -471,7 +637,7 @@ def _sensitivity_matrix(self): matrix_slice = slice( index_offset + i, index_offset + n_rows, n_components ) - self._sensitivity_gravity( + sensitivity_func( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -482,6 +648,101 @@ def _sensitivity_matrix(self): index_offset += n_rows return sensitivity_matrix + def _sensitivity_matrix_transpose_dot_vec(self, vector): + """ + Compute ``G.T @ v`` without building ``G``. + + Parameters + ---------- + vector : (nD) numpy.ndarray + Vector used in the dot product. + + Returns + ------- + (n_active_cells) numpy.ndarray + """ + # Get Numba function + sensitivity_t_dot_v_func = NUMBA_FUNCTIONS_3D["gt_dot_v"][self.numba_parallel] + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Allocate resulting array + result = np.zeros(self.nC) + # Start filling the result array + index_offset = 0 + for components, receivers in self._get_components_and_receivers(): + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + kernel_func = CHOCLO_KERNELS[component] + conversion_factor = _get_conversion_factor(component) + vector_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + sensitivity_t_dot_v_func( + receivers, + active_nodes, + active_cell_nodes, + kernel_func, + constants.G * conversion_factor, + vector[vector_slice], + result, + ) + index_offset += n_rows + return result + + def _sensitivity_matrix_as_operator(self): + """ + Create a LinearOperator for the sensitivity matrix G. + + Returns + ------- + scipy.sparse.linalg.LinearOperator + """ + shape = (self.survey.nD, self.nC) + linear_op = LinearOperator( + shape=shape, + matvec=self._forward, + rmatvec=self._sensitivity_matrix_transpose_dot_vec, + dtype=np.float64, + ) + return linear_op + + def _gtg_diagonal_without_building_g(self, weights): + """ + Compute the diagonal of ``G.T @ G`` without building the ``G`` matrix. + + Parameters + ----------- + weights : (nD,) array + Array with data weights. It should be the diagonal of the ``W`` + matrix, squared. + + Returns + ------- + (n_active_cells) numpy.ndarray + """ + # Get Numba function + diagonal_gtg_func = NUMBA_FUNCTIONS_3D["diagonal_gtg"][self.numba_parallel] + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Allocate array for the diagonal of G.T @ G + diagonal = np.zeros(self.nC, dtype=np.float64) + # Start filling the diagonal array + for components, receivers in self._get_components_and_receivers(): + for component in components: + kernel_func = CHOCLO_KERNELS[component] + conversion_factor = _get_conversion_factor(component) + diagonal_gtg_func( + receivers, + active_nodes, + active_cell_nodes, + kernel_func, + constants.G * conversion_factor, + weights, + diagonal, + ) + return diagonal + class SimulationEquivalentSourceLayer( BaseEquivalentSourceLayerSimulation, Simulation3DIntegral @@ -525,14 +786,6 @@ def __init__( **kwargs, ) - if self.engine == "choclo": - if self.numba_parallel: - self._sensitivity_gravity = _sensitivity_gravity_2d_mesh_parallel - self._forward_gravity = _forward_gravity_2d_mesh_parallel - else: - self._sensitivity_gravity = _sensitivity_gravity_2d_mesh_serial - self._forward_gravity = _forward_gravity_2d_mesh_serial - def _forward(self, densities): """ Forward model the fields of active cells in the mesh on receivers. @@ -548,6 +801,8 @@ def _forward(self, densities): (nD,) numpy.ndarray Always return a ``np.float64`` array. """ + # Get Numba function + forward_func = NUMBA_FUNCTIONS_2D["forward"][self.numba_parallel] # Get cells in the 2D mesh and keep only active cells cells_bounds_active = self.mesh.cell_bounds[self.active_cells] # Allocate fields array @@ -558,19 +813,19 @@ def _forward(self, densities): n_components = len(components) n_elements = n_components * receivers.shape[0] for i, component in enumerate(components): - forward_func = CHOCLO_FORWARD_FUNCS[component] + choclo_forward_func = CHOCLO_FORWARD_FUNCS[component] conversion_factor = _get_conversion_factor(component) vector_slice = slice( index_offset + i, index_offset + n_elements, n_components ) - self._forward_gravity( + forward_func( receivers, cells_bounds_active, self.cell_z_top, self.cell_z_bottom, densities, fields[vector_slice], - forward_func, + choclo_forward_func, conversion_factor, ) index_offset += n_elements @@ -584,6 +839,8 @@ def _sensitivity_matrix(self): ------- (nD, n_active_cells) numpy.ndarray """ + # Get Numba function + sensitivity_func = NUMBA_FUNCTIONS_2D["sensitivity"][self.numba_parallel] # Get cells in the 2D mesh and keep only active cells cells_bounds_active = self.mesh.cell_bounds[self.active_cells] # Allocate sensitivity matrix @@ -604,23 +861,103 @@ def _sensitivity_matrix(self): n_components = len(components) n_rows = n_components * receivers.shape[0] for i, component in enumerate(components): - forward_func = CHOCLO_FORWARD_FUNCS[component] + choclo_forward_func = CHOCLO_FORWARD_FUNCS[component] conversion_factor = _get_conversion_factor(component) matrix_slice = slice( index_offset + i, index_offset + n_rows, n_components ) - self._sensitivity_gravity( + sensitivity_func( receivers, cells_bounds_active, self.cell_z_top, self.cell_z_bottom, sensitivity_matrix[matrix_slice, :], - forward_func, + choclo_forward_func, conversion_factor, ) index_offset += n_rows return sensitivity_matrix + def _sensitivity_matrix_transpose_dot_vec(self, vector): + """ + Compute ``G.T @ v`` without building ``G``. + + Parameters + ---------- + vector : (nD) numpy.ndarray + Vector used in the dot product. + + Returns + ------- + (n_active_cells) numpy.ndarray + """ + # Get Numba function + g_t_dot_v_func = NUMBA_FUNCTIONS_2D["gt_dot_v"][self.numba_parallel] + # Get cells in the 2D mesh and keep only active cells + cells_bounds_active = self.mesh.cell_bounds[self.active_cells] + # Allocate resulting array + result = np.zeros(self.nC) + # Start filling the result array + index_offset = 0 + for components, receivers in self._get_components_and_receivers(): + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + choclo_forward_func = CHOCLO_FORWARD_FUNCS[component] + conversion_factor = _get_conversion_factor(component) + vector_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + g_t_dot_v_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + choclo_forward_func, + conversion_factor, + vector[vector_slice], + result, + ) + index_offset += n_rows + return result + + def _gtg_diagonal_without_building_g(self, weights): + """ + Compute the diagonal of ``G.T @ G`` without building the ``G`` matrix. + + Parameters + ----------- + weights : (nD,) array + Array with data weights. It should be the diagonal of the ``W`` + matrix, squared. + + Returns + ------- + (n_active_cells) numpy.ndarray + """ + # Get Numba function + diagonal_gtg_func = NUMBA_FUNCTIONS_2D["diagonal_gtg"][self.numba_parallel] + # Get cells in the 2D mesh and keep only active cells + cells_bounds_active = self.mesh.cell_bounds[self.active_cells] + # Allocate array for the diagonal of G.T @ G + diagonal = np.zeros(self.nC, dtype=np.float64) + # Start filling the diagonal array + for components, receivers in self._get_components_and_receivers(): + for component in components: + choclo_forward_func = CHOCLO_FORWARD_FUNCS[component] + conversion_factor = _get_conversion_factor(component) + diagonal_gtg_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + choclo_forward_func, + conversion_factor, + weights, + diagonal, + ) + return diagonal + class Simulation3DDifferential(BasePDESimulation): r"""Finite volume simulation class for gravity. diff --git a/simpeg/potential_fields/gravity/survey.py b/simpeg/potential_fields/gravity/survey.py index 2808e2489a..a14f61296a 100644 --- a/simpeg/potential_fields/gravity/survey.py +++ b/simpeg/potential_fields/gravity/survey.py @@ -1,7 +1,15 @@ from ...survey import BaseSurvey -from ...utils.code_utils import validate_type +from ...utils.code_utils import validate_list_of_types from .sources import SourceField +try: + from warnings import deprecated +except ImportError: + # Use the deprecated decorator provided by typing_extensions (which + # supports older versions of Python) if it cannot be imported from + # warnings. + from typing_extensions import deprecated + class Survey(BaseSurvey): """Base Gravity Survey @@ -13,10 +21,34 @@ class Survey(BaseSurvey): """ def __init__(self, source_field, **kwargs): - self.source_field = validate_type( - "source_field", source_field, SourceField, cast=False + if "source_list" in kwargs: + msg = ( + "source_list is not a valid argument to gravity.Survey. " + "Use source_field instead." + ) + raise TypeError(msg) + super().__init__(source_list=source_field, **kwargs) + + @BaseSurvey.source_list.setter + def source_list(self, new_list): + new_list = validate_list_of_types( + "source_list", new_list, SourceField, ensure_unique=True, min_n=1, max_n=1 ) - super().__init__(source_list=None, **kwargs) + self._source_list = new_list + + @property + def source_field(self): + """A source object that contains the gravity receivers. + + Returns + ------- + simpeg.potential_fields.gravity.sources.SourceField + """ + return self.source_list[0] + + @source_field.setter + def source_field(self, new_src): + self.source_list = new_src def eval(self, fields): # noqa: A003 """Evaluate the field @@ -75,9 +107,24 @@ def nD(self): return sum(receiver.nD for receiver in self.source_field.receiver_list) @property + @deprecated( + "The `components` property is deprecated, " + "and will be removed in SimPEG v0.25.0. " + "Within a gravity survey, receivers can contain different components. " + "Iterate over the sources and receivers in the survey to get " + "information about their components.", + category=FutureWarning, + ) def components(self): """Number of components measured at each receiver. + .. deprecated:: 0.24.0 + + The `components` property is deprecated, and will be removed in + SimPEG v0.25.0. Within a gravity survey, receivers can contain + different components. Iterate over the sources and receivers in the + survey to get information about their components. + Returns ------- int diff --git a/simpeg/potential_fields/magnetics/__init__.py b/simpeg/potential_fields/magnetics/__init__.py index 52612898b8..b757d912ed 100644 --- a/simpeg/potential_fields/magnetics/__init__.py +++ b/simpeg/potential_fields/magnetics/__init__.py @@ -48,5 +48,5 @@ Simulation3DDifferential, ) from .survey import Survey -from .sources import SourceField, UniformBackgroundField +from .sources import UniformBackgroundField from .receivers import Point diff --git a/simpeg/potential_fields/magnetics/_numba/_2d_mesh.py b/simpeg/potential_fields/magnetics/_numba/_2d_mesh.py new file mode 100644 index 0000000000..cb75bc8285 --- /dev/null +++ b/simpeg/potential_fields/magnetics/_numba/_2d_mesh.py @@ -0,0 +1,2279 @@ +""" +Numba functions for magnetic simulation of rectangular prisms on 2D meshes. + +These functions assumes 3D prisms formed by a 2D mesh plus top and bottom boundaries for +each prism. +""" + +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + +from ..._numba_utils import evaluate_kernels_on_cell, evaluate_six_kernels_on_cell + + +def _forward_mag( + receivers, + cells_bounds, + top, + bottom, + model, + fields, + regional_field, + forward_func, + scalar_model, +): + """ + Forward model single magnetic component for 2D meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + model : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities (scalar) or effective + susceptibilities (vector) of the active cells in the mesh, in SI + units. + Susceptibilities are expected if ``scalar_model`` is True, + and the array should have ``n_active_cells`` elements. + Effective susceptibilities are expected if ``scalar_model`` is False, + and the array should have ``3 * n_active_cells`` elements. + fields : (n_receivers) array + Array full of zeros where the magnetic component on each receiver will + be stored. This could be a preallocated array or a slice of it. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + forward_func : callable + Forward function that will be evaluated on each node of the mesh. Choose + one of the forward functions in ``choclo.prism``. + scalar_model : bool + If True, the forward will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the forward will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + + Notes + ----- + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Forward model the magnetic component of each cell on each receiver location + for i in prange(n_receivers): + for j in range(n_cells): + # Define magnetization vector of the cell + # (we we'll divide by mu_0 when adding the forward modelled field) + if scalar_model: + # model is susceptibility, so the vector is parallel to the + # regional field + magnetization_x = model[j] * fx + magnetization_y = model[j] * fy + magnetization_z = model[j] * fz + else: + # model is effective susceptibility (vector) + magnetization_x = model[j] + magnetization_y = model[j + n_cells] + magnetization_z = model[j + 2 * n_cells] + # Forward the magnetic component + fields[i] += ( + regional_field_amplitude + * forward_func( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + magnetization_x, + magnetization_y, + magnetization_z, + ) + / choclo.constants.VACUUM_MAGNETIC_PERMEABILITY + ) + + +def _forward_tmi( + receivers, + cells_bounds, + top, + bottom, + model, + fields, + regional_field, + scalar_model, +): + """ + Forward model the TMI for 2D meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + model : (n_active_cells) or (3 * n_active_cells) + Array with the susceptibility (scalar model) or the effective + susceptibility (vector model) of each active cell in the mesh. + If the model is scalar, the ``model`` array should have + ``n_active_cells`` elements and ``scalar_model`` should be True. + If the model is vector, the ``model`` array should have + ``3 * n_active_cells`` elements and ``scalar_model`` should be False. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + scalar_model : bool + If True, the forward will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the forward will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + + Notes + ----- + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Forward model the magnetic component of each cell on each receiver location + for i in prange(n_receivers): + for j in range(n_cells): + # Define magnetization vector of the cell + # (we we'll divide by mu_0 when adding the forward modelled field) + if scalar_model: + # model is susceptibility, so the vector is parallel to the + # regional field + magnetization_x = model[j] * fx + magnetization_y = model[j] * fy + magnetization_z = model[j] * fz + else: + # model is effective susceptibility (vector) + magnetization_x = model[j] + magnetization_y = model[j + n_cells] + magnetization_z = model[j + 2 * n_cells] + # Forward the magnetic field vector and compute tmi + bx, by, bz = choclo.prism.magnetic_field( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + magnetization_x, + magnetization_y, + magnetization_z, + ) + fields[i] += ( + regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + / choclo.constants.VACUUM_MAGNETIC_PERMEABILITY + ) + + +def _forward_tmi_derivative( + receivers, + cells_bounds, + top, + bottom, + model, + fields, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + scalar_model, +): + r""" + Forward model a TMI derivative for 2D meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_derivative) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + model : (n_active_cells) or (3 * n_active_cells) + Array with the susceptibility (scalar model) or the effective + susceptibility (vector model) of each active cell in the mesh. + If the model is scalar, the ``model`` array should have + ``n_active_cells`` elements and ``scalar_model`` should be True. + If the model is vector, the ``model`` array should have + ``3 * n_active_cells`` elements and ``scalar_model`` should be False. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + scalar_model : bool + If True, the forward will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the forward will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + + Notes + ----- + + About the kernel functions + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + + To compute the :math:`\alpha` derivative of the TMI :math:`\Delta T` (with + :math:`\alpha \in \{x, y, z\}` we need to evaluate third order kernels + functions for the prism. The kernels we need to evaluate can be obtained by + fixing one of the subindices to the direction of the derivative + (:math:`\alpha`) and cycle through combinations of the other two. + + For ``tmi_x`` we need to pass: + + .. code:: + + kernel_xx=kernel_eee, kernel_yy=kernel_enn, kernel_zz=kernel_euu, + kernel_xy=kernel_een, kernel_xz=kernel_eeu, kernel_yz=kernel_enu + + For ``tmi_y`` we need to pass: + + .. code:: + + kernel_xx=kernel_een, kernel_yy=kernel_nnn, kernel_zz=kernel_nuu, + kernel_xy=kernel_enn, kernel_xz=kernel_enu, kernel_yz=kernel_nnu + + For ``tmi_z`` we need to pass: + + .. code:: + + kernel_xx=kernel_eeu, kernel_yy=kernel_nnu, kernel_zz=kernel_uuu, + kernel_xy=kernel_enu, kernel_xz=kernel_euu, kernel_yz=kernel_nuu + + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + """ + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Forward model the magnetic component of each cell on each receiver location + for i in prange(n_receivers): + for j in range(n_cells): + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + ) + if scalar_model: + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + fields[i] += ( + model[j] + * regional_field_amplitude + * (fx * bx + fy * by + fz * bz) + / (4 * np.pi) + ) + else: + model_x = model[j] + model_y = model[j + n_cells] + model_z = model[j + 2 * n_cells] + bx = uxx * model_x + uxy * model_y + uxz * model_z + by = uxy * model_x + uyy * model_y + uyz * model_z + bz = uxz * model_x + uyz * model_y + uzz * model_z + fields[i] += ( + regional_field_amplitude * (bx * fx + by * fy + bz * fz) / 4 / np.pi + ) + + +def _sensitivity_mag( + receivers, + cells_bounds, + top, + bottom, + sensitivity_matrix, + regional_field, + kernel_x, + kernel_y, + kernel_z, + scalar_model, +): + r""" + Fill the sensitivity matrix for single mag component for 2d meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_sensitivity = jit(nopython=True, parallel=True)(_sensitivity_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_cells)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_cells)`` + if ``scalar_model`` is False. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + scalar_model : bool + If True, the sensitivity matrix is built to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is built to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the kernel functions + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + About the sensitivity matrix + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Each row of the sensitivity matrix corresponds to a single receiver + location. + + If ``scalar_model`` is True, then each element of the row will + correspond to the partial derivative of the selected magnetic component + with respect to the susceptibility of each cell in the mesh. + + If ``scalar_model`` is False, then each row can be split in three sections + containing: + + * the partial derivatives of the selected magnetic component with respect + to the _x_ component of the effective susceptibility of each cell; then + * the partial derivatives of the selected magnetic component with respect + to the _y_ component of the effective susceptibility of each cell; and then + * the partial derivatives of the selected magnetic component with respect + to the _z_ component of the effective susceptibility of each cell. + + So, if we call :math:`B_j` the magnetic field component on the receiver + :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, + \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, + then each row of the sensitivity matrix will be: + + .. math:: + + \left[ + \frac{\partial B_j}{\partial \chi_x^{(1)}}, + \dots, + \frac{\partial B_j}{\partial \chi_x^{(N)}}, + \frac{\partial B_j}{\partial \chi_y^{(1)}}, + \dots, + \frac{\partial B_j}{\partial \chi_y^{(N)}}, + \frac{\partial B_j}{\partial \chi_z^{(1)}}, + \dots, + \frac{\partial B_j}{\partial \chi_z^{(N)}} + \right] + + where :math:`N` is the total number of active cells. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + # Fill the sensitivity matrix + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + for i in prange(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + ux, uy, uz = evaluate_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_x, + kernel_y, + kernel_z, + ) + if scalar_model: + sensitivity_matrix[i, j] = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + sensitivity_matrix[i, j] = ( + constant_factor * regional_field_amplitude * ux + ) + sensitivity_matrix[i, j + n_cells] = ( + constant_factor * regional_field_amplitude * uy + ) + sensitivity_matrix[i, j + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * uz + ) + + +def _sensitivity_tmi( + receivers, + cells_bounds, + top, + bottom, + sensitivity_matrix, + regional_field, + scalar_model, +): + r""" + Fill the sensitivity matrix TMI for 2d meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_cells)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_cells)`` + if ``scalar_model`` is False. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + About the sensitivity matrix + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Each row of the sensitivity matrix corresponds to a single receiver + location. + + If ``scalar_model`` is True, then each element of the row will + correspond to the partial derivative of the tmi + with respect to the susceptibility of each cell in the mesh. + + If ``scalar_model`` is False, then each row can be split in three sections + containing: + + * the partial derivatives of the tmi with respect + to the _x_ component of the effective susceptibility of each cell; then + * the partial derivatives of the tmi with respect + to the _y_ component of the effective susceptibility of each cell; and then + * the partial derivatives of the tmi with respect + to the _z_ component of the effective susceptibility of each cell. + + So, if we call :math:`T_j` the tmi on the receiver + :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, + \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, + then each row of the sensitivity matrix will be: + + .. math:: + + \left[ + \frac{\partial T_j}{\partial \chi_x^{(1)}}, + \dots, + \frac{\partial T_j}{\partial \chi_x^{(N)}}, + \frac{\partial T_j}{\partial \chi_y^{(1)}}, + \dots, + \frac{\partial T_j}{\partial \chi_y^{(N)}}, + \frac{\partial T_j}{\partial \chi_z^{(1)}}, + \dots, + \frac{\partial T_j}{\partial \chi_z^{(N)}} + \right] + + where :math:`N` is the total number of active cells. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + # Fill the sensitivity matrix + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + for i in prange(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + choclo.prism.kernel_ee, + choclo.prism.kernel_nn, + choclo.prism.kernel_uu, + choclo.prism.kernel_en, + choclo.prism.kernel_eu, + choclo.prism.kernel_nu, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + sensitivity_matrix[i, j] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + sensitivity_matrix[i, j] = ( + constant_factor * regional_field_amplitude * bx + ) + sensitivity_matrix[i, j + n_cells] = ( + constant_factor * regional_field_amplitude * by + ) + sensitivity_matrix[i, j + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * bz + ) + + +def _sensitivity_tmi_derivative( + receivers, + cells_bounds, + top, + bottom, + sensitivity_matrix, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + scalar_model, +): + r""" + Fill the sensitivity matrix TMI for 2d meshes. + + This function is designed to be used with equivalent sources, where the + mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell + are passed through the ``top`` and ``bottom`` arrays. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi_derivative) + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_cells)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_cells)`` + if ``scalar_model`` is False. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + # Fill the sensitivity matrix + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + for i in prange(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + sensitivity_matrix[i, j] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + sensitivity_matrix[i, j] = ( + constant_factor * regional_field_amplitude * bx + ) + sensitivity_matrix[i, j + n_cells] = ( + constant_factor * regional_field_amplitude * by + ) + sensitivity_matrix[i, j + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * bz + ) + + +@jit(nopython=True, parallel=False) +def _tmi_sensitivity_t_dot_v_serial( + receivers, + cells_bounds, + top, + bottom, + regional_field, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` for TMI on 2d meshes, in serial. + + This function doesn't allocates the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_tmi_sensitivity_t_dot_v_parallel``. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + choclo.prism.kernel_ee, + choclo.prism.kernel_nn, + choclo.prism.kernel_uu, + choclo.prism.kernel_en, + choclo.prism.kernel_eu, + choclo.prism.kernel_nu, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + result[j] += ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + result[j] += constant_factor * vector[i] * regional_field_amplitude * bx + result[j + n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + result[j + 2 * n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + + +@jit(nopython=True, parallel=True) +def _tmi_sensitivity_t_dot_v_parallel( + receivers, + cells_bounds, + top, + bottom, + regional_field, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` for TMI on 2d meshes, in parallel. + + This function doesn't allocates the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A parallel implementation of this function is available in + ``_tmi_sensitivity_t_dot_v_serial``. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + result_size = result.size + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(result_size) + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + choclo.prism.kernel_ee, + choclo.prism.kernel_nn, + choclo.prism.kernel_uu, + choclo.prism.kernel_en, + choclo.prism.kernel_eu, + choclo.prism.kernel_nu, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + local_row[j] = ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + local_row[j] = ( + constant_factor * vector[i] * regional_field_amplitude * bx + ) + local_row[j + n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + local_row[j + 2 * n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _mag_sensitivity_t_dot_v_serial( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_x, + kernel_y, + kernel_z, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` for a single magnetic component on 2d meshes, in serial. + + This function doesn't allocates the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_mag_sensitivity_t_dot_v_parallel``. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + ux, uy, uz = evaluate_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_x, + kernel_y, + kernel_z, + ) + if scalar_model: + result[j] += ( + constant_factor + * vector[i] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + result[j] += constant_factor * vector[i] * regional_field_amplitude * ux + result[j + n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * uy + ) + result[j + 2 * n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * uz + ) + + +@jit(nopython=True, parallel=True) +def _mag_sensitivity_t_dot_v_parallel( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_x, + kernel_y, + kernel_z, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` for a single magnetic component on 2d meshes, in parallel. + + This function doesn't allocates the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A parallel implementation of this function is available in + ``_mag_sensitivity_t_dot_v_parallel``. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + result_size = result.size + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(result_size) + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + ux, uy, uz = evaluate_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_x, + kernel_y, + kernel_z, + ) + if scalar_model: + local_row[j] = ( + constant_factor + * vector[i] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + local_row[j] = ( + constant_factor * vector[i] * regional_field_amplitude * ux + ) + local_row[j + n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * uy + ) + local_row[j + 2 * n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * uz + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _tmi_derivative_sensitivity_t_dot_v_serial( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` for a TMI derivative on 2d meshes, in serial. + + This function doesn't allocates the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_tmi_derivative_sensitivity_t_dot_v_parallel``. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + result[j] += ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + result[j] += constant_factor * vector[i] * regional_field_amplitude * bx + result[j + n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + result[j + 2 * n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + + +@jit(nopython=True, parallel=True) +def _tmi_derivative_sensitivity_t_dot_v_parallel( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` for a TMI derivative on 2d meshes, in parallel. + + This function doesn't allocates the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A parallel implementation of this function is available in + ``_tmi_derivative_sensitivity_t_dot_v_parallel``. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + result_size = result.size + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(result_size) + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + local_row[j] = ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + local_row[j] = ( + constant_factor * vector[i] * regional_field_amplitude * bx + ) + local_row[j + n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + local_row[j + 2 * n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_tmi_serial( + receivers, + cells_bounds, + top, + bottom, + regional_field, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI without storing ``G``, in serial. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_tmi_parallel`` one for parallelized computations. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + choclo.prism.kernel_ee, + choclo.prism.kernel_nn, + choclo.prism.kernel_uu, + choclo.prism.kernel_en, + choclo.prism.kernel_eu, + choclo.prism.kernel_nu, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + diagonal[j] += weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + diagonal[j] += weights[i] * (const * bx) ** 2 + diagonal[j + n_cells] += weights[i] * (const * by) ** 2 + diagonal[j + 2 * n_cells] += weights[i] * (const * bz) ** 2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_tmi_parallel( + receivers, + cells_bounds, + top, + bottom, + regional_field, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI without storing ``G``, in parallel. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_tmi_serial`` one for serialized computations. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + diagonal_size = diagonal.size + constant_factor = 1 / 4 / np.pi + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(diagonal_size) + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + choclo.prism.kernel_ee, + choclo.prism.kernel_nn, + choclo.prism.kernel_uu, + choclo.prism.kernel_en, + choclo.prism.kernel_eu, + choclo.prism.kernel_nu, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + local_diagonal[j] = weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + local_diagonal[j] = weights[i] * (const * bx) ** 2 + local_diagonal[j + n_cells] = weights[i] * (const * by) ** 2 + local_diagonal[j + 2 * n_cells] = weights[i] * (const * bz) ** 2 + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_mag_serial( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for components without storing ``G``, in serial. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_mag_parallel`` one for parallelized computations. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + ux, uy, uz = evaluate_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_x, + kernel_y, + kernel_z, + ) + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + diagonal[j] += weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + diagonal[j] += weights[i] * (const * ux) ** 2 + diagonal[j + n_cells] += weights[i] * (const * uy) ** 2 + diagonal[j + 2 * n_cells] += weights[i] * (const * uz) ** 2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_mag_parallel( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for component without storing ``G``, in parallel. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_mag_serial`` one for serialized computations. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + diagonal_size = diagonal.size + constant_factor = 1 / 4 / np.pi + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(diagonal_size) + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + ux, uy, uz = evaluate_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_x, + kernel_y, + kernel_z, + ) + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + local_diagonal[j] = weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + local_diagonal[j] = weights[i] * (const * ux) ** 2 + local_diagonal[j + n_cells] = weights[i] * (const * uy) ** 2 + local_diagonal[j + 2 * n_cells] = weights[i] * (const * uz) ** 2 + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_tmi_deriv_serial( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI derivative, in serial. + + This function doesn't need to store the ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_tmi_deriv_parallel`` one for parallelized computations. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + + constant_factor = 1 / 4 / np.pi + + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in range(n_receivers): + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + diagonal[j] += weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + diagonal[j] += weights[i] * (const * bx) ** 2 + diagonal[j + n_cells] += weights[i] * (const * by) ** 2 + diagonal[j + 2 * n_cells] += weights[i] * (const * bz) ** 2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_tmi_deriv_parallel( + receivers, + cells_bounds, + top, + bottom, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI without storing ``G``, in parallel. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + cells_bounds : (n_active_cells, 4) numpy.ndarray + Array with the bounds of each active cell in the 2D mesh. For each row, the + bounds should be passed in the following order: ``x_min``, ``x_max``, + ``y_min``, ``y_max``. + top : (n_active_cells) np.ndarray + Array with the top boundaries of each active cell in the 2D mesh. + bottom : (n_active_cells) np.ndarray + Array with the bottom boundaries of each active cell in the 2D mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells,) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_tmi_serial`` one for serialized computations. + """ + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + diagonal_size = diagonal.size + constant_factor = 1 / 4 / np.pi + n_receivers = receivers.shape[0] + n_cells = cells_bounds.shape[0] + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(diagonal_size) + for j in range(n_cells): + # Evaluate kernels for the current cell and receiver + uxx, uyy, uzz, uxy, uxz, uyz = evaluate_six_kernels_on_cell( + receivers[i, 0], + receivers[i, 1], + receivers[i, 2], + cells_bounds[j, 0], + cells_bounds[j, 1], + cells_bounds[j, 2], + cells_bounds[j, 3], + bottom[j], + top[j], + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + ) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + local_diagonal[j] = weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + local_diagonal[j] = weights[i] * (const * bx) ** 2 + local_diagonal[j + n_cells] = weights[i] * (const * by) ** 2 + local_diagonal[j + 2 * n_cells] = weights[i] * (const * bz) ** 2 + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +NUMBA_FUNCTIONS_2D = { + "forward": { + "tmi": { + parallel: jit(nopython=True, parallel=parallel)(_forward_tmi) + for parallel in (True, False) + }, + "magnetic_component": { + parallel: jit(nopython=True, parallel=parallel)(_forward_mag) + for parallel in (True, False) + }, + "tmi_derivative": { + parallel: jit(nopython=True, parallel=parallel)(_forward_tmi_derivative) + for parallel in (True, False) + }, + }, + "sensitivity": { + "tmi": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_tmi) + for parallel in (True, False) + }, + "magnetic_component": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_mag) + for parallel in (True, False) + }, + "tmi_derivative": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_tmi_derivative) + for parallel in (True, False) + }, + }, + "gt_dot_v": { + "tmi": { + False: _tmi_sensitivity_t_dot_v_serial, + True: _tmi_sensitivity_t_dot_v_parallel, + }, + "magnetic_component": { + False: _mag_sensitivity_t_dot_v_serial, + True: _mag_sensitivity_t_dot_v_parallel, + }, + "tmi_derivative": { + False: _tmi_derivative_sensitivity_t_dot_v_serial, + True: _tmi_derivative_sensitivity_t_dot_v_parallel, + }, + }, + "diagonal_gtg": { + "tmi": { + False: _diagonal_G_T_dot_G_tmi_serial, + True: _diagonal_G_T_dot_G_tmi_parallel, + }, + "magnetic_component": { + False: _diagonal_G_T_dot_G_mag_serial, + True: _diagonal_G_T_dot_G_mag_parallel, + }, + "tmi_derivative": { + False: _diagonal_G_T_dot_G_tmi_deriv_serial, + True: _diagonal_G_T_dot_G_tmi_deriv_parallel, + }, + }, +} diff --git a/simpeg/potential_fields/magnetics/_numba/_3d_mesh.py b/simpeg/potential_fields/magnetics/_numba/_3d_mesh.py new file mode 100644 index 0000000000..57ba260371 --- /dev/null +++ b/simpeg/potential_fields/magnetics/_numba/_3d_mesh.py @@ -0,0 +1,2420 @@ +""" +Numba functions for magnetic simulation of rectangular prisms +""" + +import numpy as np + +try: + import choclo +except ImportError: + # Define dummy jit decorator + def jit(*args, **kwargs): + return lambda f: f + + choclo = None +else: + from numba import jit, prange + +from ..._numba_utils import kernels_in_nodes_to_cell + + +def _sensitivity_mag( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, +): + r""" + Fill the sensitivity matrix for single mag component + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_sensitivity_mag = jit(nopython=True, parallel=True)(_sensitivity_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_cells)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_cells)`` + if ``scalar_model`` is False. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is built to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is built to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the kernel functions + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + About the sensitivity matrix + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Each row of the sensitivity matrix corresponds to a single receiver + location. + + If ``scalar_model`` is True, then each element of the row will + correspond to the partial derivative of the selected magnetic component + with respect to the susceptibility of each cell in the mesh. + + If ``scalar_model`` is False, then each row can be split in three sections + containing: + + * the partial derivatives of the selected magnetic component with respect + to the _x_ component of the effective susceptibility of each cell; then + * the partial derivatives of the selected magnetic component with respect + to the _y_ component of the effective susceptibility of each cell; and then + * the partial derivatives of the selected magnetic component with respect + to the _z_ component of the effective susceptibility of each cell. + + So, if we call :math:`B_j` the magnetic field component on the receiver + :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, + \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, + then each row of the sensitivity matrix will be: + + .. math:: + + \left[ + \frac{\partial B_j}{\partial \chi_x^{(1)}}, + \dots, + \frac{\partial B_j}{\partial \chi_x^{(N)}}, + \frac{\partial B_j}{\partial \chi_y^{(1)}}, + \dots, + \frac{\partial B_j}{\partial \chi_y^{(N)}}, + \frac{\partial B_j}{\partial \chi_z^{(1)}}, + \dots, + \frac{\partial B_j}{\partial \chi_z^{(N)}} + \right] + + where :math:`N` is the total number of active cells. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = kernels_in_nodes_to_cell(kx, nodes_indices) + uy = kernels_in_nodes_to_cell(ky, nodes_indices) + uz = kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * ux + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * uy + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * uz + ) + + +def _sensitivity_tmi( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + constant_factor, + scalar_model, +): + r""" + Fill the sensitivity matrix for TMI + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_sensitivity_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_cells)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_cells)`` + if ``scalar_model`` is False. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + About the sensitivity matrix + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Each row of the sensitivity matrix corresponds to a single receiver + location. + + If ``scalar_model`` is True, then each element of the row will + correspond to the partial derivative of the tmi + with respect to the susceptibility of each cell in the mesh. + + If ``scalar_model`` is False, then each row can be split in three sections + containing: + + * the partial derivatives of the tmi with respect + to the _x_ component of the effective susceptibility of each cell; then + * the partial derivatives of the tmi with respect + to the _y_ component of the effective susceptibility of each cell; and then + * the partial derivatives of the tmi with respect + to the _z_ component of the effective susceptibility of each cell. + + So, if we call :math:`T_j` the tmi on the receiver + :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, + \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, + then each row of the sensitivity matrix will be: + + .. math:: + + \left[ + \frac{\partial T_j}{\partial \chi_x^{(1)}}, + \dots, + \frac{\partial T_j}{\partial \chi_x^{(N)}}, + \frac{\partial T_j}{\partial \chi_y^{(1)}}, + \dots, + \frac{\partial T_j}{\partial \chi_y^{(N)}}, + \frac{\partial T_j}{\partial \chi_z^{(1)}}, + \dots, + \frac{\partial T_j}{\partial \chi_z^{(N)}} + \right] + + where :math:`N` is the total number of active cells. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * bx + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * by + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * bz + ) + + +def _sensitivity_tmi_derivative( + receivers, + nodes, + sensitivity_matrix, + cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, +): + r""" + Fill the sensitivity matrix for a TMI derivative. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_sens = jit(nopython=True, parallel=True)(_sensitivity_tmi_derivative) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + sensitivity_matrix : array + Empty 2d array where the sensitivity matrix elements will be filled. + This could be a preallocated empty array or a slice of it. + The array should have a shape of ``(n_receivers, n_active_cells)`` + if ``scalar_model`` is True. + The array should have a shape of ``(n_receivers, 3 * n_active_cells)`` + if ``scalar_model`` is False. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the kernel functions + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + + To compute the :math:`\alpha` derivative of the TMI :math:`\Delta T` (with + :math:`\alpha \in \{x, y, z\}` we need to evaluate third order kernels + functions for the prism. The kernels we need to evaluate can be obtained by + fixing one of the subindices to the direction of the derivative + (:math:`\alpha`) and cycle through combinations of the other two. + + For ``tmi_x`` we need to pass: + + .. code:: + + kernel_xx=kernel_eee, kernel_yy=kernel_enn, kernel_zz=kernel_euu, + kernel_xy=kernel_een, kernel_xz=kernel_eeu, kernel_yz=kernel_enu + + For ``tmi_y`` we need to pass: + + .. code:: + + kernel_xx=kernel_een, kernel_yy=kernel_nnn, kernel_zz=kernel_nuu, + kernel_xy=kernel_enn, kernel_xz=kernel_enu, kernel_yz=kernel_nnu + + For ``tmi_z`` we need to pass: + + .. code:: + + kernel_xx=kernel_eeu, kernel_yy=kernel_nnu, kernel_zz=kernel_uuu, + kernel_xy=kernel_enu, kernel_xz=kernel_euu, kernel_yz=kernel_nuu + + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + About the sensitivity matrix + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Each row of the sensitivity matrix corresponds to a single receiver + location. + + If ``scalar_model`` is True, then each element of the row will + correspond to the partial derivative of the tmi derivative (spatial) + with respect to the susceptibility of each cell in the mesh. + + If ``scalar_model`` is False, then each row can be split in three sections + containing: + + * the partial derivatives of the tmi derivative with respect + to the _x_ component of the effective susceptibility of each cell; then + * the partial derivatives of the tmi derivative with respect + to the _y_ component of the effective susceptibility of each cell; and then + * the partial derivatives of the tmi derivative with respect + to the _z_ component of the effective susceptibility of each cell. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = kernel_xx(dx, dy, dz, distance) + kyy[j] = kernel_yy(dx, dy, dz, distance) + kzz[j] = kernel_zz(dx, dy, dz, distance) + kxy[j] = kernel_xy(dx, dy, dz, distance) + kxz[j] = kernel_xz(dx, dy, dz, distance) + kyz[j] = kernel_yz(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + sensitivity_matrix[i, k] = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + sensitivity_matrix[i, k] = ( + constant_factor * regional_field_amplitude * bx + ) + sensitivity_matrix[i, k + n_cells] = ( + constant_factor * regional_field_amplitude * by + ) + sensitivity_matrix[i, k + 2 * n_cells] = ( + constant_factor * regional_field_amplitude * bz + ) + + +@jit(nopython=True, parallel=False) +def _mag_sensitivity_t_dot_v_serial( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` in serial, without building G, for a single magnetic component. + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_mag_sensitivity_t_dot_v_parallel``. + + See also + -------- + _sensitivity_mag + Compute the sensitivity matrix for a single magnetic component by + allocating it in memory. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = kernels_in_nodes_to_cell(kx, nodes_indices) + uy = kernels_in_nodes_to_cell(ky, nodes_indices) + uz = kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + result[k] += ( + constant_factor + * vector[i] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + result[k] += constant_factor * vector[i] * regional_field_amplitude * ux + result[k + n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * uy + ) + result[k + 2 * n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * uz + ) + + +@jit(nopython=True, parallel=True) +def _mag_sensitivity_t_dot_v_parallel( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` in parallel, without building G, for a single magnetic component + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A serialized implementation of this function is available in + ``_mag_sensitivity_t_dot_v_serial``. + + See also + -------- + _sensitivity_mag + Compute the sensitivity matrix for a single magnetic component by + allocating it in memory. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + result_size = result.size + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(result_size) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = kernels_in_nodes_to_cell(kx, nodes_indices) + uy = kernels_in_nodes_to_cell(ky, nodes_indices) + uz = kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + local_row[k] = ( + constant_factor + * vector[i] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + local_row[k] = ( + constant_factor * vector[i] * regional_field_amplitude * ux + ) + local_row[k + n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * uy + ) + local_row[k + 2 * n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * uz + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _tmi_sensitivity_t_dot_v_serial( + receivers, + nodes, + cell_nodes, + regional_field, + constant_factor, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` in serial, without building G, for TMI. + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_tmi_sensitivity_t_dot_v_parallel``. + + See also + -------- + _sensitivity_tmi + Compute the sensitivity matrix for TMI by allocating it in memory. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + result[k] += ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + result[k] += constant_factor * vector[i] * regional_field_amplitude * bx + result[k + n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + result[k + 2 * n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + + +@jit(nopython=True, parallel=True) +def _tmi_sensitivity_t_dot_v_parallel( + receivers, + nodes, + cell_nodes, + regional_field, + constant_factor, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` in parallel, without building G, for TMI. + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A serialized implementation of this function is available in + ``_tmi_sensitivity_t_dot_v_serial``. + + See also + -------- + _sensitivity_tmi + Compute the sensitivity matrix for TMI by allocating it in memory. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + result_size = result.size + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(result_size) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + local_row[k] = ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + local_row[k] = ( + constant_factor * vector[i] * regional_field_amplitude * bx + ) + local_row[k + n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + local_row[k + 2 * n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _tmi_derivative_sensitivity_t_dot_v_serial( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` in serial, without building G, for a spatial TMI derivative. + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in serial. Writing to the ``result`` array + inside a parallel loop over the receivers generates a race condition that + leads to corrupted outputs. + + A parallel implementation of this function is available in + ``_tmi_derivative_sensitivity_t_dot_v_parallel``. + + See also + -------- + _sensitivity_tmi_derivative + Compute the sensitivity matrix for a TMI derivative by allocating it in memory. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = kernel_xx(dx, dy, dz, distance) + kyy[j] = kernel_yy(dx, dy, dz, distance) + kzz[j] = kernel_zz(dx, dy, dz, distance) + kxy[j] = kernel_xy(dx, dy, dz, distance) + kxz[j] = kernel_xz(dx, dy, dz, distance) + kyz[j] = kernel_yz(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + result[k] += ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + result[k] += constant_factor * vector[i] * regional_field_amplitude * bx + result[k + n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + result[k + 2 * n_cells] += ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + + +@jit(nopython=True, parallel=True) +def _tmi_derivative_sensitivity_t_dot_v_parallel( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + vector, + result, +): + r""" + Compute ``G.T @ v`` in parallel, without building G, for a spatial TMI derivative. + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + vector : (n_receivers) numpy.ndarray + Array that represents the vector used in the dot product. + result : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Running result array where the output of the dot product will be added to. + The array should have ``n_active_cells`` elements if ``scalar_model`` + is True, or ``3 * n_active_cells`` otherwise. + + Notes + ----- + This function is meant to be run in parallel. + This implementation instructs each thread to allocate their own array for + the current row of the sensitivity matrix. After computing the elements of + that row, it gets added to the running ``result`` array through a reduction + operation handled by Numba. + + A serialized implementation of this function is available in + ``_tmi_derivative_sensitivity_t_dot_v_serial``. + + See also + -------- + _sensitivity_tmi_derivative + Compute the sensitivity matrix for a TMI derivative by allocating it in memory. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + result_size = result.size + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate array for the current row of the sensitivity matrix + local_row = np.empty(result_size) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = kernel_xx(dx, dy, dz, distance) + kyy[j] = kernel_yy(dx, dy, dz, distance) + kzz[j] = kernel_zz(dx, dy, dz, distance) + kxy[j] = kernel_xy(dx, dy, dz, distance) + kxz[j] = kernel_xz(dx, dy, dz, distance) + kyz[j] = kernel_yz(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + # Fill the sensitivity matrix element(s) that correspond to the + # current active cell + if scalar_model: + local_row[k] = ( + constant_factor + * vector[i] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + local_row[k] = ( + constant_factor * vector[i] * regional_field_amplitude * bx + ) + local_row[k + n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * by + ) + local_row[k + 2 * n_cells] = ( + constant_factor * vector[i] * regional_field_amplitude * bz + ) + # Apply reduction operation to add the values of the row to the running + # result. Avoid slicing the `result` array when updating it to avoid + # racing conditions, just add the `local_row` to the `results` + # variable. + result += local_row + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_mag_serial( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for single magnetic component, in serial. + + This function doesn't store the full ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_mag_parallel`` one for parallelized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = kernels_in_nodes_to_cell(kx, nodes_indices) + uy = kernels_in_nodes_to_cell(ky, nodes_indices) + uz = kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + diagonal[k] += weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + diagonal[k] += weights[i] * (const * ux) ** 2 + diagonal[k + n_cells] += weights[i] * (const * uy) ** 2 + diagonal[k + 2 * n_cells] += weights[i] * (const * uz) ** 2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_mag_parallel( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for single magnetic component, in parallel. + + This function doesn't store the full ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_mag_serial`` one for serialized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + diagonal_size = diagonal.size + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(diagonal_size) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = kernels_in_nodes_to_cell(kx, nodes_indices) + uy = kernels_in_nodes_to_cell(ky, nodes_indices) + uz = kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + local_diagonal[k] = weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + local_diagonal[k] = weights[i] * (const * ux) ** 2 + local_diagonal[k + n_cells] = weights[i] * (const * uy) ** 2 + local_diagonal[k + 2 * n_cells] = weights[i] * (const * uz) ** 2 + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_tmi_serial( + receivers, + nodes, + cell_nodes, + regional_field, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI, in serial. + + This function doesn't store the full ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_tmi_parallel`` one for parallelized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + diagonal[k] += weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + diagonal[k] += weights[i] * (const * bx) ** 2 + diagonal[k + n_cells] += weights[i] * (const * by) ** 2 + diagonal[k + 2 * n_cells] += weights[i] * (const * bz) ** 2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_tmi_parallel( + receivers, + nodes, + cell_nodes, + regional_field, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI, in parallel. + + This function doesn't store the full ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_tmi_serial`` one for serialized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + diagonal_size = diagonal.size + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(diagonal_size) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + local_diagonal[k] = weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + local_diagonal[k] = weights[i] * (const * bx) ** 2 + local_diagonal[k + n_cells] = weights[i] * (const * by) ** 2 + local_diagonal[k + 2 * n_cells] = weights[i] * (const * bz) ** 2 + + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +@jit(nopython=True, parallel=False) +def _diagonal_G_T_dot_G_tmi_deriv_serial( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI derivatives, in serial. + + This function doesn't store the full ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in serial. Use the + ``_diagonal_G_T_dot_G_tmi_deriv_parallel`` one for parallelized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = kernel_xx(dx, dy, dz, distance) + kyy[j] = kernel_yy(dx, dy, dz, distance) + kzz[j] = kernel_zz(dx, dy, dz, distance) + kxy[j] = kernel_xy(dx, dy, dz, distance) + kxz[j] = kernel_xz(dx, dy, dz, distance) + kyz[j] = kernel_yz(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + diagonal[k] += weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + diagonal[k] += weights[i] * (const * bx) ** 2 + diagonal[k + n_cells] += weights[i] * (const * by) ** 2 + diagonal[k + 2 * n_cells] += weights[i] * (const * bz) ** 2 + + +@jit(nopython=True, parallel=True) +def _diagonal_G_T_dot_G_tmi_deriv_parallel( + receivers, + nodes, + cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + weights, + diagonal, +): + """ + Diagonal of ``G.T @ W.T @ W @ G`` for TMI derivatives, in parallel. + + This function doesn't store the full ``G`` matrix in memory. + + Parameters + ---------- + receivers : (n_receivers, 3) numpy.ndarray + Array with the locations of the receivers + nodes : (n_active_nodes, 3) numpy.ndarray + Array with the location of the mesh nodes. + cell_nodes : (n_active_cells, 8) numpy.ndarray + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the result will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the result will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + weights : (n_receivers,) numpy.ndarray + Array with data weights. It should be the diagonal of the ``W`` matrix, + squared. + diagonal : (n_active_cells) or (3 * n_active_cells) numpy.ndarray + Array where the diagonal of ``G.T @ G`` will be added to. + + Notes + ----- + This function is meant to be run in parallel. Use the + ``_diagonal_G_T_dot_G_tmi_serial`` one for serialized computations. + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + diagonal_size = diagonal.size + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate array for the diagonal elements for the current receiver. + local_diagonal = np.empty(diagonal_size) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = kernel_xx(dx, dy, dz, distance) + kyy[j] = kernel_yy(dx, dy, dz, distance) + kzz[j] = kernel_zz(dx, dy, dz, distance) + kxy[j] = kernel_xy(dx, dy, dz, distance) + kxz[j] = kernel_xz(dx, dy, dz, distance) + kyz[j] = kernel_yz(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + + if scalar_model: + g_element = ( + constant_factor + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + local_diagonal[k] = weights[i] * g_element**2 + else: + const = constant_factor * regional_field_amplitude + local_diagonal[k] = weights[i] * (const * bx) ** 2 + local_diagonal[k + n_cells] = weights[i] * (const * by) ** 2 + local_diagonal[k + 2 * n_cells] = weights[i] * (const * bz) ** 2 + + # Add the result to the diagonal. + # Apply reduction operation to add the values of the local diagonal to + # the running diagonal array. Avoid slicing the `diagonal` array when + # updating it to avoid racing conditions, just add the `local_diagonal` + # to the `diagonal` variable. + diagonal += local_diagonal + + +def _forward_mag( + receivers, + nodes, + model, + fields, + cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, +): + """ + Forward model single magnetic component + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_mag) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + model : (n_active_cells) or (3 * n_active_cells) array + Array containing the susceptibilities (scalar) or effective + susceptibilities (vector) of the active cells in the mesh, in SI + units. + Susceptibilities are expected if ``scalar_model`` is True, + and the array should have ``n_active_cells`` elements. + Effective susceptibilities are expected if ``scalar_model`` is False, + and the array should have ``3 * n_active_cells`` elements. + fields : (n_receivers) array + Array full of zeros where the magnetic component on each receiver will + be stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_x, kernel_y, kernel_z : callable + Kernels used to compute the desired magnetic component. For example, + for computing bx we need to use ``kernel_x=kernel_ee``, + ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the forward will be computed assuming that the ``model`` has + susceptibilities (scalar model) for each active cell. + If False, the forward will be computed assuming that the ``model`` has + effective susceptibilities (vector model) for each active cell. + + Notes + ----- + + About the kernel functions + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + + For computing the ``bx`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu + + + For computing the ``by`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu + + For computing the ``bz`` component of the magnetic field we need to use the + following kernels: + + .. code:: + + kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu + + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kx[j] = kernel_x(dx, dy, dz, distance) + ky[j] = kernel_y(dx, dy, dz, distance) + kz[j] = kernel_z(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + ux = kernels_in_nodes_to_cell(kx, nodes_indices) + uy = kernels_in_nodes_to_cell(ky, nodes_indices) + uz = kernels_in_nodes_to_cell(kz, nodes_indices) + if scalar_model: + fields[i] += ( + constant_factor + * model[k] + * regional_field_amplitude + * (ux * fx + uy * fy + uz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + ux * model[k] + + uy * model[k + n_cells] + + uz * model[k + 2 * n_cells] + ) + ) + + +def _forward_tmi( + receivers, + nodes, + model, + fields, + cell_nodes, + regional_field, + constant_factor, + scalar_model, +): + """ + Forward model the TMI + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + model : (n_active_cells) or (3 * n_active_cells) + Array with the susceptibility (scalar model) or the effective + susceptibility (vector model) of each active cell in the mesh. + If the model is scalar, the ``model`` array should have + ``n_active_cells`` elements and ``scalar_model`` should be True. + If the model is vector, the ``model`` array should have + ``3 * n_active_cells`` elements and ``scalar_model`` should be False. + fields : (n_receivers) array + Array full of zeros where the TMI on each receiver will be stored. This + could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) + kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) + kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) + kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) + kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) + kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + fields[i] += ( + constant_factor + * model[k] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + bx * model[k] + + by * model[k + n_cells] + + bz * model[k + 2 * n_cells] + ) + ) + + +def _forward_tmi_derivative( + receivers, + nodes, + model, + fields, + cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, +): + r""" + Forward model a TMI derivative. + + This function should be used with a `numba.jit` decorator, for example: + + .. code:: + + from numba import jit + + jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_derivative) + + Parameters + ---------- + receivers : (n_receivers, 3) array + Array with the locations of the receivers + nodes : (n_active_nodes, 3) array + Array with the location of the mesh nodes. + model : (n_active_cells) or (3 * n_active_cells) + Array with the susceptibility (scalar model) or the effective + susceptibility (vector model) of each active cell in the mesh. + If the model is scalar, the ``model`` array should have + ``n_active_cells`` elements and ``scalar_model`` should be True. + If the model is vector, the ``model`` array should have + ``3 * n_active_cells`` elements and ``scalar_model`` should be False. + fields : (n_receivers) array + Array full of zeros where the TMI derivative on each receiver will be + stored. This could be a preallocated array or a slice of it. + cell_nodes : (n_active_cells, 8) array + Array of integers, where each row contains the indices of the nodes for + each active cell in the mesh. + regional_field : (3,) array + Array containing the x, y and z components of the regional magnetic + field (uniform background field). + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables + Kernel functions used for computing the desired TMI derivative. + constant_factor : float + Constant factor that will be used to multiply each element of the + sensitivity matrix. + scalar_model : bool + If True, the sensitivity matrix is build to work with scalar models + (susceptibilities). + If False, the sensitivity matrix is build to work with vector models + (effective susceptibilities). + + Notes + ----- + + About the kernel functions + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + + To compute the :math:`\alpha` derivative of the TMI :math:`\Delta T` (with + :math:`\alpha \in \{x, y, z\}` we need to evaluate third order kernels + functions for the prism. The kernels we need to evaluate can be obtained by + fixing one of the subindices to the direction of the derivative + (:math:`\alpha`) and cycle through combinations of the other two. + + For ``tmi_x`` we need to pass: + + .. code:: + + kernel_xx=kernel_eee, kernel_yy=kernel_enn, kernel_zz=kernel_euu, + kernel_xy=kernel_een, kernel_xz=kernel_eeu, kernel_yz=kernel_enu + + For ``tmi_y`` we need to pass: + + .. code:: + + kernel_xx=kernel_een, kernel_yy=kernel_nnn, kernel_zz=kernel_nuu, + kernel_xy=kernel_enn, kernel_xz=kernel_enu, kernel_yz=kernel_nnu + + For ``tmi_z`` we need to pass: + + .. code:: + + kernel_xx=kernel_eeu, kernel_yy=kernel_nnu, kernel_zz=kernel_uuu, + kernel_xy=kernel_enu, kernel_xz=kernel_euu, kernel_yz=kernel_nuu + + + About the model array + ^^^^^^^^^^^^^^^^^^^^^ + + The ``model`` must always be a 1d array: + + * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with + the same number of elements as active cells in the mesh. It should store + the magnetic susceptibilities of each active cell in SI units. + * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array + with a number of elements equal to three times the active cells in the + mesh. It should store the components of the magnetization vector of each + active cell in :math:`Am^{-1}`. The order in which the components should + be passed are: + * every _easting_ component of each active cell, + * then every _northing_ component of each active cell, + * and finally every _upward_ component of each active cell. + + """ + n_receivers = receivers.shape[0] + n_nodes = nodes.shape[0] + n_cells = cell_nodes.shape[0] + fx, fy, fz = regional_field + regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) + fx /= regional_field_amplitude + fy /= regional_field_amplitude + fz /= regional_field_amplitude + # Evaluate kernel function on each node, for each receiver location + for i in prange(n_receivers): + # Allocate vectors for kernels evaluated on mesh nodes + kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) + # Allocate small vector for the nodes indices for a given cell + nodes_indices = np.empty(8, dtype=cell_nodes.dtype) + for j in range(n_nodes): + dx = nodes[j, 0] - receivers[i, 0] + dy = nodes[j, 1] - receivers[i, 1] + dz = nodes[j, 2] - receivers[i, 2] + distance = np.sqrt(dx**2 + dy**2 + dz**2) + kxx[j] = kernel_xx(dx, dy, dz, distance) + kyy[j] = kernel_yy(dx, dy, dz, distance) + kzz[j] = kernel_zz(dx, dy, dz, distance) + kxy[j] = kernel_xy(dx, dy, dz, distance) + kxz[j] = kernel_xz(dx, dy, dz, distance) + kyz[j] = kernel_yz(dx, dy, dz, distance) + # Compute sensitivity matrix elements from the kernel values + for k in range(n_cells): + nodes_indices = cell_nodes[k, :] + uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) + uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) + uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) + uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) + uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) + uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) + bx = uxx * fx + uxy * fy + uxz * fz + by = uxy * fx + uyy * fy + uyz * fz + bz = uxz * fx + uyz * fy + uzz * fz + if scalar_model: + fields[i] += ( + constant_factor + * model[k] + * regional_field_amplitude + * (bx * fx + by * fy + bz * fz) + ) + else: + fields[i] += ( + constant_factor + * regional_field_amplitude + * ( + bx * model[k] + + by * model[k + n_cells] + + bz * model[k + 2 * n_cells] + ) + ) + + +NUMBA_FUNCTIONS_3D = { + "forward": { + "tmi": { + parallel: jit(nopython=True, parallel=parallel)(_forward_tmi) + for parallel in (True, False) + }, + "magnetic_component": { + parallel: jit(nopython=True, parallel=parallel)(_forward_mag) + for parallel in (True, False) + }, + "tmi_derivative": { + parallel: jit(nopython=True, parallel=parallel)(_forward_tmi_derivative) + for parallel in (True, False) + }, + }, + "sensitivity": { + "tmi": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_tmi) + for parallel in (True, False) + }, + "magnetic_component": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_mag) + for parallel in (True, False) + }, + "tmi_derivative": { + parallel: jit(nopython=True, parallel=parallel)(_sensitivity_tmi_derivative) + for parallel in (True, False) + }, + }, + "gt_dot_v": { + "tmi": { + False: _tmi_sensitivity_t_dot_v_serial, + True: _tmi_sensitivity_t_dot_v_parallel, + }, + "magnetic_component": { + False: _mag_sensitivity_t_dot_v_serial, + True: _mag_sensitivity_t_dot_v_parallel, + }, + "tmi_derivative": { + False: _tmi_derivative_sensitivity_t_dot_v_serial, + True: _tmi_derivative_sensitivity_t_dot_v_parallel, + }, + }, + "diagonal_gtg": { + "tmi": { + False: _diagonal_G_T_dot_G_tmi_serial, + True: _diagonal_G_T_dot_G_tmi_parallel, + }, + "magnetic_component": { + False: _diagonal_G_T_dot_G_mag_serial, + True: _diagonal_G_T_dot_G_mag_parallel, + }, + "tmi_derivative": { + False: _diagonal_G_T_dot_G_tmi_deriv_serial, + True: _diagonal_G_T_dot_G_tmi_deriv_serial, + }, + }, +} diff --git a/simpeg/potential_fields/magnetics/_numba/__init__.py b/simpeg/potential_fields/magnetics/_numba/__init__.py new file mode 100644 index 0000000000..b0acae3ea3 --- /dev/null +++ b/simpeg/potential_fields/magnetics/_numba/__init__.py @@ -0,0 +1,14 @@ +""" +Numba functions for magnetic simulations. +""" + +from ._2d_mesh import NUMBA_FUNCTIONS_2D +from ._3d_mesh import NUMBA_FUNCTIONS_3D + +try: + import choclo +except ImportError: + choclo = None + + +__all__ = ["choclo", "NUMBA_FUNCTIONS_3D", "NUMBA_FUNCTIONS_2D"] diff --git a/simpeg/potential_fields/magnetics/_numba_functions.py b/simpeg/potential_fields/magnetics/_numba_functions.py deleted file mode 100644 index dfda671459..0000000000 --- a/simpeg/potential_fields/magnetics/_numba_functions.py +++ /dev/null @@ -1,1981 +0,0 @@ -""" -Numba functions for magnetic simulation of rectangular prisms -""" - -import numpy as np - -try: - import choclo -except ImportError: - # Define dummy jit decorator - def jit(*args, **kwargs): - return lambda f: f - - choclo = None -else: - from numba import jit, prange - -from .._numba_utils import kernels_in_nodes_to_cell, evaluate_kernels_on_cell - - -def _sensitivity_mag( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - kernel_x, - kernel_y, - kernel_z, - constant_factor, - scalar_model, -): - r""" - Fill the sensitivity matrix for single mag component - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_sensitivity_mag = jit(nopython=True, parallel=True)(_sensitivity_mag) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_x, kernel_y, kernel_z : callable - Kernels used to compute the desired magnetic component. For example, - for computing bx we need to use ``kernel_x=kernel_ee``, - ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - scalar_model : bool - If True, the sensitivity matrix is built to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is built to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the kernel functions - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - For computing the ``bx`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu - - - For computing the ``by`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu - - For computing the ``bz`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - About the sensitivity matrix - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Each row of the sensitivity matrix corresponds to a single receiver - location. - - If ``scalar_model`` is True, then each element of the row will - correspond to the partial derivative of the selected magnetic component - with respect to the susceptibility of each cell in the mesh. - - If ``scalar_model`` is False, then each row can be split in three sections - containing: - - * the partial derivatives of the selected magnetic component with respect - to the _x_ component of the effective susceptibility of each cell; then - * the partial derivatives of the selected magnetic component with respect - to the _y_ component of the effective susceptibility of each cell; and then - * the partial derivatives of the selected magnetic component with respect - to the _z_ component of the effective susceptibility of each cell. - - So, if we call :math:`B_j` the magnetic field component on the receiver - :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, - \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, - then each row of the sensitivity matrix will be: - - .. math:: - - \left[ - \frac{\partial B_j}{\partial \chi_x^{(1)}}, - \dots, - \frac{\partial B_j}{\partial \chi_x^{(N)}}, - \frac{\partial B_j}{\partial \chi_y^{(1)}}, - \dots, - \frac{\partial B_j}{\partial \chi_y^{(N)}}, - \frac{\partial B_j}{\partial \chi_z^{(1)}}, - \dots, - \frac{\partial B_j}{\partial \chi_z^{(N)}} - \right] - - where :math:`N` is the total number of active cells. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kx[j] = kernel_x(dx, dy, dz, distance) - ky[j] = kernel_y(dx, dy, dz, distance) - kz[j] = kernel_z(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - ux = kernels_in_nodes_to_cell(kx, nodes_indices) - uy = kernels_in_nodes_to_cell(ky, nodes_indices) - uz = kernels_in_nodes_to_cell(kz, nodes_indices) - if scalar_model: - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (ux * fx + uy * fy + uz * fz) - ) - else: - sensitivity_matrix[i, k] = ( - constant_factor * regional_field_amplitude * ux - ) - sensitivity_matrix[i, k + n_cells] = ( - constant_factor * regional_field_amplitude * uy - ) - sensitivity_matrix[i, k + 2 * n_cells] = ( - constant_factor * regional_field_amplitude * uz - ) - - -def _sensitivity_tmi( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - constant_factor, - scalar_model, -): - r""" - Fill the sensitivity matrix for TMI - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_sensitivity_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - The array should have a shape of ``(n_receivers, n_active_nodes)`` - if ``scalar_model`` is True. - The array should have a shape of ``(n_receivers, 3 * n_active_nodes)`` - if ``scalar_model`` is False. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - About the sensitivity matrix - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Each row of the sensitivity matrix corresponds to a single receiver - location. - - If ``scalar_model`` is True, then each element of the row will - correspond to the partial derivative of the tmi - with respect to the susceptibility of each cell in the mesh. - - If ``scalar_model`` is False, then each row can be split in three sections - containing: - - * the partial derivatives of the tmi with respect - to the _x_ component of the effective susceptibility of each cell; then - * the partial derivatives of the tmi with respect - to the _y_ component of the effective susceptibility of each cell; and then - * the partial derivatives of the tmi with respect - to the _z_ component of the effective susceptibility of each cell. - - So, if we call :math:`T_j` the tmi on the receiver - :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, - \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, - then each row of the sensitivity matrix will be: - - .. math:: - - \left[ - \frac{\partial T_j}{\partial \chi_x^{(1)}}, - \dots, - \frac{\partial T_j}{\partial \chi_x^{(N)}}, - \frac{\partial T_j}{\partial \chi_y^{(1)}}, - \dots, - \frac{\partial T_j}{\partial \chi_y^{(N)}}, - \frac{\partial T_j}{\partial \chi_z^{(1)}}, - \dots, - \frac{\partial T_j}{\partial \chi_z^{(N)}} - \right] - - where :math:`N` is the total number of active cells. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - # Fill the sensitivity matrix element(s) that correspond to the - # current active cell - if scalar_model: - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - else: - sensitivity_matrix[i, k] = ( - constant_factor * regional_field_amplitude * bx - ) - sensitivity_matrix[i, k + n_cells] = ( - constant_factor * regional_field_amplitude * by - ) - sensitivity_matrix[i, k + 2 * n_cells] = ( - constant_factor * regional_field_amplitude * bz - ) - - -def _sensitivity_tmi_derivative( - receivers, - nodes, - sensitivity_matrix, - cell_nodes, - regional_field, - kernel_xx, - kernel_yy, - kernel_zz, - kernel_xy, - kernel_xz, - kernel_yz, - constant_factor, - scalar_model, -): - r""" - Fill the sensitivity matrix for a TMI derivative. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_sens = jit(nopython=True, parallel=True)(_sensitivity_tmi_derivative) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - sensitivity_matrix : array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - The array should have a shape of ``(n_receivers, n_active_nodes)`` - if ``scalar_model`` is True. - The array should have a shape of ``(n_receivers, 3 * n_active_nodes)`` - if ``scalar_model`` is False. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables - Kernel functions used for computing the desired TMI derivative. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the kernel functions - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - To compute the :math:`\alpha` derivative of the TMI :math:`\Delta T` (with - :math:`\alpha \in \{x, y, z\}` we need to evaluate third order kernels - functions for the prism. The kernels we need to evaluate can be obtained by - fixing one of the subindices to the direction of the derivative - (:math:`\alpha`) and cycle through combinations of the other two. - - For ``tmi_x`` we need to pass: - - .. code:: - - kernel_xx=kernel_eee, kernel_yy=kernel_enn, kernel_zz=kernel_euu, - kernel_xy=kernel_een, kernel_xz=kernel_eeu, kernel_yz=kernel_enu - - For ``tmi_y`` we need to pass: - - .. code:: - - kernel_xx=kernel_een, kernel_yy=kernel_nnn, kernel_zz=kernel_nuu, - kernel_xy=kernel_enn, kernel_xz=kernel_enu, kernel_yz=kernel_nnu - - For ``tmi_z`` we need to pass: - - .. code:: - - kernel_xx=kernel_eeu, kernel_yy=kernel_nnu, kernel_zz=kernel_uuu, - kernel_xy=kernel_enu, kernel_xz=kernel_euu, kernel_yz=kernel_nuu - - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - About the sensitivity matrix - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Each row of the sensitivity matrix corresponds to a single receiver - location. - - If ``scalar_model`` is True, then each element of the row will - correspond to the partial derivative of the tmi derivative (spatial) - with respect to the susceptibility of each cell in the mesh. - - If ``scalar_model`` is False, then each row can be split in three sections - containing: - - * the partial derivatives of the tmi derivative with respect - to the _x_ component of the effective susceptibility of each cell; then - * the partial derivatives of the tmi derivative with respect - to the _y_ component of the effective susceptibility of each cell; and then - * the partial derivatives of the tmi derivative with respect - to the _z_ component of the effective susceptibility of each cell. - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = kernel_xx(dx, dy, dz, distance) - kyy[j] = kernel_yy(dx, dy, dz, distance) - kzz[j] = kernel_zz(dx, dy, dz, distance) - kxy[j] = kernel_xy(dx, dy, dz, distance) - kxz[j] = kernel_xz(dx, dy, dz, distance) - kyz[j] = kernel_yz(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - # Fill the sensitivity matrix element(s) that correspond to the - # current active cell - if scalar_model: - sensitivity_matrix[i, k] = ( - constant_factor - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - else: - sensitivity_matrix[i, k] = ( - constant_factor * regional_field_amplitude * bx - ) - sensitivity_matrix[i, k + n_cells] = ( - constant_factor * regional_field_amplitude * by - ) - sensitivity_matrix[i, k + 2 * n_cells] = ( - constant_factor * regional_field_amplitude * bz - ) - - -def _forward_mag( - receivers, - nodes, - model, - fields, - cell_nodes, - regional_field, - kernel_x, - kernel_y, - kernel_z, - constant_factor, - scalar_model, -): - """ - Forward model single magnetic component - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_mag) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - model : (n_active_cells) or (3 * n_active_cells) array - Array containing the susceptibilities (scalar) or effective - susceptibilities (vector) of the active cells in the mesh, in SI - units. - Susceptibilities are expected if ``scalar_model`` is True, - and the array should have ``n_active_cells`` elements. - Effective susceptibilities are expected if ``scalar_model`` is False, - and the array should have ``3 * n_active_cells`` elements. - fields : (n_receivers) array - Array full of zeros where the magnetic component on each receiver will - be stored. This could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_x, kernel_y, kernel_z : callable - Kernels used to compute the desired magnetic component. For example, - for computing bx we need to use ``kernel_x=kernel_ee``, - ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - scalar_model : bool - If True, the forward will be computing assuming that the ``model`` has - susceptibilities (scalar model) for each active cell. - If False, the forward will be computing assuming that the ``model`` has - effective susceptibilities (vector model) for each active cell. - - Notes - ----- - - About the kernel functions - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - For computing the ``bx`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu - - - For computing the ``by`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu - - For computing the ``bz`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu - - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kx, ky, kz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kx[j] = kernel_x(dx, dy, dz, distance) - ky[j] = kernel_y(dx, dy, dz, distance) - kz[j] = kernel_z(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - ux = kernels_in_nodes_to_cell(kx, nodes_indices) - uy = kernels_in_nodes_to_cell(ky, nodes_indices) - uz = kernels_in_nodes_to_cell(kz, nodes_indices) - if scalar_model: - fields[i] += ( - constant_factor - * model[k] - * regional_field_amplitude - * (ux * fx + uy * fy + uz * fz) - ) - else: - fields[i] += ( - constant_factor - * regional_field_amplitude - * ( - ux * model[k] - + uy * model[k + n_cells] - + uz * model[k + 2 * n_cells] - ) - ) - - -def _forward_tmi( - receivers, - nodes, - model, - fields, - cell_nodes, - regional_field, - constant_factor, - scalar_model, -): - """ - Forward model the TMI - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - model : (n_active_cells) or (3 * n_active_cells) - Array with the susceptibility (scalar model) or the effective - susceptibility (vector model) of each active cell in the mesh. - If the model is scalar, the ``model`` array should have - ``n_active_cells`` elements and ``scalar_model`` should be True. - If the model is vector, the ``model`` array should have - ``3 * n_active_cells`` elements and ``scalar_model`` should be False. - fields : (n_receivers) array - Array full of zeros where the TMI on each receiver will be stored. This - could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = choclo.prism.kernel_ee(dx, dy, dz, distance) - kyy[j] = choclo.prism.kernel_nn(dx, dy, dz, distance) - kzz[j] = choclo.prism.kernel_uu(dx, dy, dz, distance) - kxy[j] = choclo.prism.kernel_en(dx, dy, dz, distance) - kxz[j] = choclo.prism.kernel_eu(dx, dy, dz, distance) - kyz[j] = choclo.prism.kernel_nu(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - if scalar_model: - fields[i] += ( - constant_factor - * model[k] - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - else: - fields[i] += ( - constant_factor - * regional_field_amplitude - * ( - bx * model[k] - + by * model[k + n_cells] - + bz * model[k + 2 * n_cells] - ) - ) - - -def _forward_tmi_derivative( - receivers, - nodes, - model, - fields, - cell_nodes, - regional_field, - kernel_xx, - kernel_yy, - kernel_zz, - kernel_xy, - kernel_xz, - kernel_yz, - constant_factor, - scalar_model, -): - r""" - Forward model a TMI derivative. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_derivative) - - Parameters - ---------- - receivers : (n_receivers, 3) array - Array with the locations of the receivers - nodes : (n_active_nodes, 3) array - Array with the location of the mesh nodes. - model : (n_active_cells) or (3 * n_active_cells) - Array with the susceptibility (scalar model) or the effective - susceptibility (vector model) of each active cell in the mesh. - If the model is scalar, the ``model`` array should have - ``n_active_cells`` elements and ``scalar_model`` should be True. - If the model is vector, the ``model`` array should have - ``3 * n_active_cells`` elements and ``scalar_model`` should be False. - fields : (n_receivers) array - Array full of zeros where the TMI derivative on each receiver will be - stored. This could be a preallocated array or a slice of it. - cell_nodes : (n_active_cells, 8) array - Array of integers, where each row contains the indices of the nodes for - each active cell in the mesh. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables - Kernel functions used for computing the desired TMI derivative. - constant_factor : float - Constant factor that will be used to multiply each element of the - sensitivity matrix. - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the kernel functions - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - To compute the :math:`\alpha` derivative of the TMI :math:`\Delta T` (with - :math:`\alpha \in \{x, y, z\}` we need to evaluate third order kernels - functions for the prism. The kernels we need to evaluate can be obtained by - fixing one of the subindices to the direction of the derivative - (:math:`\alpha`) and cycle through combinations of the other two. - - For ``tmi_x`` we need to pass: - - .. code:: - - kernel_xx=kernel_eee, kernel_yy=kernel_enn, kernel_zz=kernel_euu, - kernel_xy=kernel_een, kernel_xz=kernel_eeu, kernel_yz=kernel_enu - - For ``tmi_y`` we need to pass: - - .. code:: - - kernel_xx=kernel_een, kernel_yy=kernel_nnn, kernel_zz=kernel_nuu, - kernel_xy=kernel_enn, kernel_xz=kernel_enu, kernel_yz=kernel_nnu - - For ``tmi_z`` we need to pass: - - .. code:: - - kernel_xx=kernel_eeu, kernel_yy=kernel_nnu, kernel_zz=kernel_uuu, - kernel_xy=kernel_enu, kernel_xz=kernel_euu, kernel_yz=kernel_nuu - - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - """ - n_receivers = receivers.shape[0] - n_nodes = nodes.shape[0] - n_cells = cell_nodes.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Evaluate kernel function on each node, for each receiver location - for i in prange(n_receivers): - # Allocate vectors for kernels evaluated on mesh nodes - kxx, kyy, kzz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - kxy, kxz, kyz = np.empty(n_nodes), np.empty(n_nodes), np.empty(n_nodes) - # Allocate small vector for the nodes indices for a given cell - nodes_indices = np.empty(8, dtype=cell_nodes.dtype) - for j in range(n_nodes): - dx = nodes[j, 0] - receivers[i, 0] - dy = nodes[j, 1] - receivers[i, 1] - dz = nodes[j, 2] - receivers[i, 2] - distance = np.sqrt(dx**2 + dy**2 + dz**2) - kxx[j] = kernel_xx(dx, dy, dz, distance) - kyy[j] = kernel_yy(dx, dy, dz, distance) - kzz[j] = kernel_zz(dx, dy, dz, distance) - kxy[j] = kernel_xy(dx, dy, dz, distance) - kxz[j] = kernel_xz(dx, dy, dz, distance) - kyz[j] = kernel_yz(dx, dy, dz, distance) - # Compute sensitivity matrix elements from the kernel values - for k in range(n_cells): - nodes_indices = cell_nodes[k, :] - uxx = kernels_in_nodes_to_cell(kxx, nodes_indices) - uyy = kernels_in_nodes_to_cell(kyy, nodes_indices) - uzz = kernels_in_nodes_to_cell(kzz, nodes_indices) - uxy = kernels_in_nodes_to_cell(kxy, nodes_indices) - uxz = kernels_in_nodes_to_cell(kxz, nodes_indices) - uyz = kernels_in_nodes_to_cell(kyz, nodes_indices) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - if scalar_model: - fields[i] += ( - constant_factor - * model[k] - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - else: - fields[i] += ( - constant_factor - * regional_field_amplitude - * ( - bx * model[k] - + by * model[k + n_cells] - + bz * model[k + 2 * n_cells] - ) - ) - - -def _forward_mag_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - model, - fields, - regional_field, - forward_func, - scalar_model, -): - """ - Forward model single magnetic component for 2D meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_mag_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - model : (n_active_cells) or (3 * n_active_cells) array - Array containing the susceptibilities (scalar) or effective - susceptibilities (vector) of the active cells in the mesh, in SI - units. - Susceptibilities are expected if ``scalar_model`` is True, - and the array should have ``n_active_cells`` elements. - Effective susceptibilities are expected if ``scalar_model`` is False, - and the array should have ``3 * n_active_cells`` elements. - fields : (n_receivers) array - Array full of zeros where the magnetic component on each receiver will - be stored. This could be a preallocated array or a slice of it. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - forward_func : callable - Forward function that will be evaluated on each node of the mesh. Choose - one of the forward functions in ``choclo.prism``. - scalar_model : bool - If True, the forward will be computing assuming that the ``model`` has - susceptibilities (scalar model) for each active cell. - If False, the forward will be computing assuming that the ``model`` has - effective susceptibilities (vector model) for each active cell. - - Notes - ----- - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - """ - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Forward model the magnetic component of each cell on each receiver location - for i in prange(n_receivers): - for j in range(n_cells): - # Define magnetization vector of the cell - # (we we'll divide by mu_0 when adding the forward modelled field) - if scalar_model: - # model is susceptibility, so the vector is parallel to the - # regional field - magnetization_x = model[j] * fx - magnetization_y = model[j] * fy - magnetization_z = model[j] * fz - else: - # model is effective susceptibility (vector) - magnetization_x = model[j] - magnetization_y = model[j + n_cells] - magnetization_z = model[j + 2 * n_cells] - # Forward the magnetic component - fields[i] += ( - regional_field_amplitude - * forward_func( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - magnetization_x, - magnetization_y, - magnetization_z, - ) - / choclo.constants.VACUUM_MAGNETIC_PERMEABILITY - ) - - -def _forward_tmi_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - model, - fields, - regional_field, - scalar_model, -): - """ - Forward model the TMI for 2D meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - model : (n_active_cells) or (3 * n_active_cells) - Array with the susceptibility (scalar model) or the effective - susceptibility (vector model) of each active cell in the mesh. - If the model is scalar, the ``model`` array should have - ``n_active_cells`` elements and ``scalar_model`` should be True. - If the model is vector, the ``model`` array should have - ``3 * n_active_cells`` elements and ``scalar_model`` should be False. - fields : (n_receivers) array - Array full of zeros where the TMI on each receiver will be stored. This - could be a preallocated array or a slice of it. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - """ - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Forward model the magnetic component of each cell on each receiver location - for i in prange(n_receivers): - for j in range(n_cells): - # Define magnetization vector of the cell - # (we we'll divide by mu_0 when adding the forward modelled field) - if scalar_model: - # model is susceptibility, so the vector is parallel to the - # regional field - magnetization_x = model[j] * fx - magnetization_y = model[j] * fy - magnetization_z = model[j] * fz - else: - # model is effective susceptibility (vector) - magnetization_x = model[j] - magnetization_y = model[j + n_cells] - magnetization_z = model[j + 2 * n_cells] - # Forward the magnetic field vector and compute tmi - bx, by, bz = choclo.prism.magnetic_field( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - magnetization_x, - magnetization_y, - magnetization_z, - ) - fields[i] += ( - regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - / choclo.constants.VACUUM_MAGNETIC_PERMEABILITY - ) - - -def _forward_tmi_derivative_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - model, - fields, - regional_field, - kernel_xx, - kernel_yy, - kernel_zz, - kernel_xy, - kernel_xz, - kernel_yz, - scalar_model, -): - r""" - Forward model a TMI derivative for 2D meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_forward = jit(nopython=True, parallel=True)(_forward_tmi_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - model : (n_active_cells) or (3 * n_active_cells) - Array with the susceptibility (scalar model) or the effective - susceptibility (vector model) of each active cell in the mesh. - If the model is scalar, the ``model`` array should have - ``n_active_cells`` elements and ``scalar_model`` should be True. - If the model is vector, the ``model`` array should have - ``3 * n_active_cells`` elements and ``scalar_model`` should be False. - fields : (n_receivers) array - Array full of zeros where the TMI on each receiver will be stored. This - could be a preallocated array or a slice of it. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables - Kernel functions used for computing the desired TMI derivative. - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the kernel functions - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - To compute the :math:`\alpha` derivative of the TMI :math:`\Delta T` (with - :math:`\alpha \in \{x, y, z\}` we need to evaluate third order kernels - functions for the prism. The kernels we need to evaluate can be obtained by - fixing one of the subindices to the direction of the derivative - (:math:`\alpha`) and cycle through combinations of the other two. - - For ``tmi_x`` we need to pass: - - .. code:: - - kernel_xx=kernel_eee, kernel_yy=kernel_enn, kernel_zz=kernel_euu, - kernel_xy=kernel_een, kernel_xz=kernel_eeu, kernel_yz=kernel_enu - - For ``tmi_y`` we need to pass: - - .. code:: - - kernel_xx=kernel_een, kernel_yy=kernel_nnn, kernel_zz=kernel_nuu, - kernel_xy=kernel_enn, kernel_xz=kernel_enu, kernel_yz=kernel_nnu - - For ``tmi_z`` we need to pass: - - .. code:: - - kernel_xx=kernel_eeu, kernel_yy=kernel_nnu, kernel_zz=kernel_uuu, - kernel_xy=kernel_enu, kernel_xz=kernel_euu, kernel_yz=kernel_nuu - - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - """ - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - # Forward model the magnetic component of each cell on each receiver location - for i in prange(n_receivers): - for j in range(n_cells): - uxx, uyy, uzz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - kernel_xx, - kernel_yy, - kernel_zz, - ) - uxy, uxz, uyz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - kernel_xy, - kernel_xz, - kernel_yz, - ) - if scalar_model: - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - fields[i] += ( - model[j] - * regional_field_amplitude - * (fx * bx + fy * by + fz * bz) - / (4 * np.pi) - ) - else: - model_x = model[j] - model_y = model[j + n_cells] - model_z = model[j + 2 * n_cells] - bx = uxx * model_x + uxy * model_y + uxz * model_z - by = uxy * model_x + uyy * model_y + uyz * model_z - bz = uxz * model_x + uyz * model_y + uzz * model_z - fields[i] += ( - regional_field_amplitude * (bx * fx + by * fy + bz * fz) / 4 / np.pi - ) - - -def _sensitivity_mag_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - sensitivity_matrix, - regional_field, - kernel_x, - kernel_y, - kernel_z, - scalar_model, -): - r""" - Fill the sensitivity matrix for single mag component for 2d meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_sensitivity = jit(nopython=True, parallel=True)(_sensitivity_mag_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - sensitivity_matrix : (n_receivers, n_active_nodes) array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_x, kernel_y, kernel_z : callable - Kernels used to compute the desired magnetic component. For example, - for computing bx we need to use ``kernel_x=kernel_ee``, - ``kernel_y=kernel_en``, ``kernel_z=kernel_eu``. - scalar_model : bool - If True, the sensitivity matrix is built to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is built to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the kernel functions - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - For computing the ``bx`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_ee, kernel_y=kernel_en, kernel_z=kernel_eu - - - For computing the ``by`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_en, kernel_y=kernel_nn, kernel_z=kernel_nu - - For computing the ``bz`` component of the magnetic field we need to use the - following kernels: - - .. code:: - - kernel_x=kernel_eu, kernel_y=kernel_nu, kernel_z=kernel_uu - - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - About the sensitivity matrix - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Each row of the sensitivity matrix corresponds to a single receiver - location. - - If ``scalar_model`` is True, then each element of the row will - correspond to the partial derivative of the selected magnetic component - with respect to the susceptibility of each cell in the mesh. - - If ``scalar_model`` is False, then each row can be split in three sections - containing: - - * the partial derivatives of the selected magnetic component with respect - to the _x_ component of the effective susceptibility of each cell; then - * the partial derivatives of the selected magnetic component with respect - to the _y_ component of the effective susceptibility of each cell; and then - * the partial derivatives of the selected magnetic component with respect - to the _z_ component of the effective susceptibility of each cell. - - So, if we call :math:`B_j` the magnetic field component on the receiver - :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, - \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, - then each row of the sensitivity matrix will be: - - .. math:: - - \left[ - \frac{\partial B_j}{\partial \chi_x^{(1)}}, - \dots, - \frac{\partial B_j}{\partial \chi_x^{(N)}}, - \frac{\partial B_j}{\partial \chi_y^{(1)}}, - \dots, - \frac{\partial B_j}{\partial \chi_y^{(N)}}, - \frac{\partial B_j}{\partial \chi_z^{(1)}}, - \dots, - \frac{\partial B_j}{\partial \chi_z^{(N)}} - \right] - - where :math:`N` is the total number of active cells. - """ - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - - constant_factor = 1 / 4 / np.pi - - # Fill the sensitivity matrix - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - for i in prange(n_receivers): - for j in range(n_cells): - # Evaluate kernels for the current cell and receiver - ux, uy, uz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - kernel_x, - kernel_y, - kernel_z, - ) - if scalar_model: - sensitivity_matrix[i, j] = ( - constant_factor - * regional_field_amplitude - * (ux * fx + uy * fy + uz * fz) - ) - else: - sensitivity_matrix[i, j] = ( - constant_factor * regional_field_amplitude * ux - ) - sensitivity_matrix[i, j + n_cells] = ( - constant_factor * regional_field_amplitude * uy - ) - sensitivity_matrix[i, j + 2 * n_cells] = ( - constant_factor * regional_field_amplitude * uz - ) - - -def _sensitivity_tmi_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - sensitivity_matrix, - regional_field, - scalar_model, -): - r""" - Fill the sensitivity matrix TMI for 2d meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - sensitivity_matrix : array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - The array should have a shape of ``(n_receivers, n_active_nodes)`` - if ``scalar_model`` is True. - The array should have a shape of ``(n_receivers, 3 * n_active_nodes)`` - if ``scalar_model`` is False. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - - About the sensitivity matrix - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Each row of the sensitivity matrix corresponds to a single receiver - location. - - If ``scalar_model`` is True, then each element of the row will - correspond to the partial derivative of the tmi - with respect to the susceptibility of each cell in the mesh. - - If ``scalar_model`` is False, then each row can be split in three sections - containing: - - * the partial derivatives of the tmi with respect - to the _x_ component of the effective susceptibility of each cell; then - * the partial derivatives of the tmi with respect - to the _y_ component of the effective susceptibility of each cell; and then - * the partial derivatives of the tmi with respect - to the _z_ component of the effective susceptibility of each cell. - - So, if we call :math:`T_j` the tmi on the receiver - :math:`j`, and :math:`\bar{\chi}^{(i)} = (\chi_x^{(i)}, \chi_y^{(i)}, - \chi_z^{(i)})` the effective susceptibility of the active cell :math:`i`, - then each row of the sensitivity matrix will be: - - .. math:: - - \left[ - \frac{\partial T_j}{\partial \chi_x^{(1)}}, - \dots, - \frac{\partial T_j}{\partial \chi_x^{(N)}}, - \frac{\partial T_j}{\partial \chi_y^{(1)}}, - \dots, - \frac{\partial T_j}{\partial \chi_y^{(N)}}, - \frac{\partial T_j}{\partial \chi_z^{(1)}}, - \dots, - \frac{\partial T_j}{\partial \chi_z^{(N)}} - \right] - - where :math:`N` is the total number of active cells. - """ - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - - constant_factor = 1 / 4 / np.pi - - # Fill the sensitivity matrix - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - for i in prange(n_receivers): - for j in range(n_cells): - # Evaluate kernels for the current cell and receiver - uxx, uyy, uzz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - choclo.prism.kernel_ee, - choclo.prism.kernel_nn, - choclo.prism.kernel_uu, - ) - uxy, uxz, uyz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - choclo.prism.kernel_en, - choclo.prism.kernel_eu, - choclo.prism.kernel_nu, - ) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - if scalar_model: - sensitivity_matrix[i, j] = ( - constant_factor - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - else: - sensitivity_matrix[i, j] = ( - constant_factor * regional_field_amplitude * bx - ) - sensitivity_matrix[i, j + n_cells] = ( - constant_factor * regional_field_amplitude * by - ) - sensitivity_matrix[i, j + 2 * n_cells] = ( - constant_factor * regional_field_amplitude * bz - ) - - -def _sensitivity_tmi_derivative_2d_mesh( - receivers, - cells_bounds, - top, - bottom, - sensitivity_matrix, - regional_field, - kernel_xx, - kernel_yy, - kernel_zz, - kernel_xy, - kernel_xz, - kernel_yz, - scalar_model, -): - r""" - Fill the sensitivity matrix TMI for 2d meshes. - - This function is designed to be used with equivalent sources, where the - mesh is a 2D mesh (prism layer). The top and bottom boundaries of each cell - are passed through the ``top`` and ``bottom`` arrays. - - This function should be used with a `numba.jit` decorator, for example: - - .. code:: - - from numba import jit - - jit_tmi = jit(nopython=True, parallel=True)(_sensitivity_tmi_2d_mesh) - - Parameters - ---------- - receivers : (n_receivers, 3) numpy.ndarray - Array with the locations of the receivers - cells_bounds : (n_active_cells, 4) numpy.ndarray - Array with the bounds of each active cell in the 2D mesh. For each row, the - bounds should be passed in the following order: ``x_min``, ``x_max``, - ``y_min``, ``y_max``. - top : (n_active_cells) np.ndarray - Array with the top boundaries of each active cell in the 2D mesh. - bottom : (n_active_cells) np.ndarray - Array with the bottom boundaries of each active cell in the 2D mesh. - sensitivity_matrix : array - Empty 2d array where the sensitivity matrix elements will be filled. - This could be a preallocated empty array or a slice of it. - The array should have a shape of ``(n_receivers, n_active_nodes)`` - if ``scalar_model`` is True. - The array should have a shape of ``(n_receivers, 3 * n_active_nodes)`` - if ``scalar_model`` is False. - regional_field : (3,) array - Array containing the x, y and z components of the regional magnetic - field (uniform background field). - kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz : callables - Kernel functions used for computing the desired TMI derivative. - scalar_model : bool - If True, the sensitivity matrix is build to work with scalar models - (susceptibilities). - If False, the sensitivity matrix is build to work with vector models - (effective susceptibilities). - - Notes - ----- - - About the model array - ^^^^^^^^^^^^^^^^^^^^^ - - The ``model`` must always be a 1d array: - - * If ``scalar_model`` is ``True``, then ``model`` should be a 1d array with - the same number of elements as active cells in the mesh. It should store - the magnetic susceptibilities of each active cell in SI units. - * If ``scalar_model`` is ``False``, then ``model`` should be a 1d array - with a number of elements equal to three times the active cells in the - mesh. It should store the components of the magnetization vector of each - active cell in :math:`Am^{-1}`. The order in which the components should - be passed are: - * every _easting_ component of each active cell, - * then every _northing_ component of each active cell, - * and finally every _upward_ component of each active cell. - """ - fx, fy, fz = regional_field - regional_field_amplitude = np.sqrt(fx**2 + fy**2 + fz**2) - fx /= regional_field_amplitude - fy /= regional_field_amplitude - fz /= regional_field_amplitude - - constant_factor = 1 / 4 / np.pi - - # Fill the sensitivity matrix - n_receivers = receivers.shape[0] - n_cells = cells_bounds.shape[0] - for i in prange(n_receivers): - for j in range(n_cells): - # Evaluate kernels for the current cell and receiver - uxx, uyy, uzz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - kernel_xx, - kernel_yy, - kernel_zz, - ) - uxy, uxz, uyz = evaluate_kernels_on_cell( - receivers[i, 0], - receivers[i, 1], - receivers[i, 2], - cells_bounds[j, 0], - cells_bounds[j, 1], - cells_bounds[j, 2], - cells_bounds[j, 3], - bottom[j], - top[j], - kernel_xy, - kernel_xz, - kernel_yz, - ) - bx = uxx * fx + uxy * fy + uxz * fz - by = uxy * fx + uyy * fy + uyz * fz - bz = uxz * fx + uyz * fy + uzz * fz - if scalar_model: - sensitivity_matrix[i, j] = ( - constant_factor - * regional_field_amplitude - * (bx * fx + by * fy + bz * fz) - ) - else: - sensitivity_matrix[i, j] = ( - constant_factor * regional_field_amplitude * bx - ) - sensitivity_matrix[i, j + n_cells] = ( - constant_factor * regional_field_amplitude * by - ) - sensitivity_matrix[i, j + 2 * n_cells] = ( - constant_factor * regional_field_amplitude * bz - ) - - -_sensitivity_tmi_serial = jit(nopython=True, parallel=False)(_sensitivity_tmi) -_sensitivity_tmi_parallel = jit(nopython=True, parallel=True)(_sensitivity_tmi) -_forward_tmi_serial = jit(nopython=True, parallel=False)(_forward_tmi) -_forward_tmi_parallel = jit(nopython=True, parallel=True)(_forward_tmi) -_forward_mag_serial = jit(nopython=True, parallel=False)(_forward_mag) -_forward_mag_parallel = jit(nopython=True, parallel=True)(_forward_mag) -_sensitivity_mag_serial = jit(nopython=True, parallel=False)(_sensitivity_mag) -_sensitivity_mag_parallel = jit(nopython=True, parallel=True)(_sensitivity_mag) -_forward_tmi_derivative_parallel = jit(nopython=True, parallel=True)( - _forward_tmi_derivative -) -_forward_tmi_derivative_serial = jit(nopython=True, parallel=False)( - _forward_tmi_derivative -) -_sensitivity_tmi_derivative_parallel = jit(nopython=True, parallel=True)( - _sensitivity_tmi_derivative -) -_sensitivity_tmi_derivative_serial = jit(nopython=True, parallel=False)( - _sensitivity_tmi_derivative -) -_forward_tmi_2d_mesh_serial = jit(nopython=True, parallel=False)(_forward_tmi_2d_mesh) -_forward_tmi_2d_mesh_parallel = jit(nopython=True, parallel=True)(_forward_tmi_2d_mesh) -_forward_mag_2d_mesh_serial = jit(nopython=True, parallel=False)(_forward_mag_2d_mesh) -_forward_mag_2d_mesh_parallel = jit(nopython=True, parallel=True)(_forward_mag_2d_mesh) -_forward_tmi_derivative_2d_mesh_serial = jit(nopython=True, parallel=False)( - _forward_tmi_derivative_2d_mesh -) -_forward_tmi_derivative_2d_mesh_parallel = jit(nopython=True, parallel=True)( - _forward_tmi_derivative_2d_mesh -) -_sensitivity_mag_2d_mesh_serial = jit(nopython=True, parallel=False)( - _sensitivity_mag_2d_mesh -) -_sensitivity_mag_2d_mesh_parallel = jit(nopython=True, parallel=True)( - _sensitivity_mag_2d_mesh -) -_sensitivity_tmi_2d_mesh_serial = jit(nopython=True, parallel=False)( - _sensitivity_tmi_2d_mesh -) -_sensitivity_tmi_2d_mesh_parallel = jit(nopython=True, parallel=True)( - _sensitivity_tmi_2d_mesh -) -_sensitivity_tmi_derivative_2d_mesh_serial = jit(nopython=True, parallel=False)( - _sensitivity_tmi_derivative_2d_mesh -) -_sensitivity_tmi_derivative_2d_mesh_parallel = jit(nopython=True, parallel=True)( - _sensitivity_tmi_derivative_2d_mesh -) diff --git a/simpeg/potential_fields/magnetics/simulation.py b/simpeg/potential_fields/magnetics/simulation.py index 02204b49e9..5786635b36 100644 --- a/simpeg/potential_fields/magnetics/simulation.py +++ b/simpeg/potential_fields/magnetics/simulation.py @@ -1,6 +1,9 @@ +import hashlib import warnings import numpy as np +from numpy.typing import NDArray import scipy.sparse as sp +from functools import cached_property from geoana.kernels import ( prism_fxxy, prism_fxxz, @@ -11,44 +14,17 @@ prism_fzzz, ) from scipy.constants import mu_0 +from scipy.sparse.linalg import LinearOperator, aslinearoperator from simpeg import props, utils from simpeg.utils import mat_utils, mkvc, sdiag -from simpeg.utils.code_utils import deprecate_property, validate_string, validate_type -from simpeg.utils.solver_utils import get_default_solver +from simpeg.utils.code_utils import validate_string, validate_type from ...base import BaseMagneticPDESimulation from ..base import BaseEquivalentSourceLayerSimulation, BasePFSimulation -from .analytics import CongruousMagBC from .survey import Survey -from ._numba_functions import ( - choclo, - _sensitivity_tmi_parallel, - _sensitivity_tmi_serial, - _sensitivity_mag_parallel, - _sensitivity_mag_serial, - _forward_tmi_parallel, - _forward_tmi_serial, - _forward_mag_parallel, - _forward_mag_serial, - _forward_tmi_2d_mesh_serial, - _forward_tmi_2d_mesh_parallel, - _forward_mag_2d_mesh_serial, - _forward_mag_2d_mesh_parallel, - _forward_tmi_derivative_2d_mesh_serial, - _forward_tmi_derivative_2d_mesh_parallel, - _sensitivity_mag_2d_mesh_serial, - _sensitivity_mag_2d_mesh_parallel, - _sensitivity_tmi_2d_mesh_serial, - _sensitivity_tmi_2d_mesh_parallel, - _forward_tmi_derivative_parallel, - _forward_tmi_derivative_serial, - _sensitivity_tmi_derivative_parallel, - _sensitivity_tmi_derivative_serial, - _sensitivity_tmi_derivative_2d_mesh_serial, - _sensitivity_tmi_derivative_2d_mesh_parallel, -) +from ._numba import choclo, NUMBA_FUNCTIONS_3D, NUMBA_FUNCTIONS_2D if choclo is not None: CHOCLO_SUPPORTED_COMPONENTS = { @@ -179,12 +155,6 @@ class Simulation3DIntegral(BasePFSimulation): If True, the simulation will run in parallel. If False, it will run in serial. If ``engine`` is not ``"choclo"`` this argument will be ignored. - ind_active : np.ndarray of int or bool - - .. deprecated:: 0.23.0 - - Argument ``ind_active`` is deprecated in favor of - ``active_cells`` and will be removed in SimPEG v0.24.0. """ chi, chiMap, chiDeriv = props.Invertible("Magnetic Susceptibility (SI)") @@ -218,22 +188,6 @@ def __init__( ) self.n_processes = None - if self.engine == "choclo": - if self.numba_parallel: - self._sensitivity_tmi = _sensitivity_tmi_parallel - self._sensitivity_mag = _sensitivity_mag_parallel - self._forward_tmi = _forward_tmi_parallel - self._forward_mag = _forward_mag_parallel - self._forward_tmi_derivative = _forward_tmi_derivative_parallel - self._sensitivity_tmi_derivative = _sensitivity_tmi_derivative_parallel - else: - self._sensitivity_tmi = _sensitivity_tmi_serial - self._sensitivity_mag = _sensitivity_mag_serial - self._forward_tmi = _forward_tmi_serial - self._forward_mag = _forward_mag_serial - self._forward_tmi_derivative = _forward_tmi_derivative_serial - self._sensitivity_tmi_derivative = _sensitivity_tmi_derivative_serial - @property def model_type(self): """Type of magnetization model @@ -304,28 +258,32 @@ def fields(self, m=None): return fields @property - def G(self): - """ - Gravity forward operator - """ - if getattr(self, "_G", None) is None: - if self.engine == "choclo": - self._G = self._sensitivity_matrix() - else: - self._G = self.linear_operator() + def G(self) -> NDArray | np.memmap | LinearOperator: + if not hasattr(self, "_G"): + match self.engine, self.store_sensitivities: + case ("choclo", "forward_only"): + self._G = self._sensitivity_matrix_as_operator() + case ("choclo", _): + self._G = self._sensitivity_matrix() + case ("geoana", "forward_only"): + msg = ( + "Accessing matrix G with " + 'store_sensitivities="forward_only" and engine="geoana" ' + "hasn't been implemented yet. " + 'Choose store_sensitivities="ram" or "disk", ' + 'or another engine, like "choclo".' + ) + raise NotImplementedError(msg) + case ("geoana", _): + self._G = self.linear_operator() return self._G - modelType = deprecate_property( - model_type, "modelType", "model_type", removal_version="0.18.0", error=True - ) - @property def nD(self): """ Number of data """ - self._nD = self.survey.receiver_locations.shape[0] - + self._nD = self.survey.nD if not self.is_amplitude_data else self.survey.nD // 3 return self._nD @property @@ -339,41 +297,194 @@ def tmi_projection(self): return self._tmi_projection - def getJtJdiag(self, m, W=None, f=None): + def getJ(self, m, f=None) -> NDArray[np.float64 | np.float32] | LinearOperator: + r""" + Sensitivity matrix :math:`\mathbf{J}`. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nD, n_params) np.ndarray or scipy.sparse.linalg.LinearOperator. + Array or :class:`~scipy.sparse.linalg.LinearOperator` for the + :math:`\mathbf{J}` matrix. + A :class:`~scipy.sparse.linalg.LinearOperator` will be returned if + ``store_sensitivities`` is ``"forward_only"``, otherwise a dense + array will be returned. + + Notes + ----- + If ``store_sensitivities`` is ``"ram"`` or ``"disk"``, a dense array + for the ``J`` matrix is returned. + A :class:`~scipy.sparse.linalg.LinearOperator` is returned if + ``store_sensitivities`` is ``"forward_only"``. This object can perform + operations like ``J @ m`` or ``J.T @ v`` without allocating the full + ``J`` matrix in memory. """ - Return the diagonal of JtJ + if self.is_amplitude_data: + msg = ( + "The `getJ` method is not yet implemented to work with " + "`is_amplitude_data`." + ) + raise NotImplementedError(msg) + + # Need to assign the model, so the chiDeriv can be computed (if the + # model is None, the chiDeriv is going to be Zero). + self.model = m + chiDeriv = ( + self.chiDeriv + if not isinstance(self.G, LinearOperator) + else aslinearoperator(self.chiDeriv) + ) + return self.G @ chiDeriv + + def getJtJdiag(self, m, W=None, f=None): + r""" + Compute diagonal of :math:`\mathbf{J}^T \mathbf{J}``. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. + W : (nD, nD) np.ndarray or scipy.sparse.sparray, optional + Diagonal matrix with the square root of the weights. If not None, + the function returns the diagonal of + :math:`\mathbf{J}^T \mathbf{W}^T \mathbf{W} \mathbf{J}``. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nparam) np.ndarray + Array with the diagonal of ``J.T @ J``. + + Notes + ----- + If ``store_sensitivities`` is ``"forward_only"``, the ``G`` matrix is + never allocated in memory, and the diagonal is obtained by + accumulation, computing each element of the ``G`` matrix on the fly. + + This method caches the diagonal ``G.T @ W.T @ W @ G`` and the sha256 + hash of the diagonal of the ``W`` matrix. This way, if same weights are + passed to it, it reuses the cached diagonal so it doesn't need to be + recomputed. + If new weights are passed, the cache is updated with the latest + diagonal of ``G.T @ W.T @ W @ G``. """ + # Need to assign the model, so the chiDeriv can be computed (if the + # model is None, the chiDeriv is going to be Zero). self.model = m + # We should probably check that W is diagonal. Let's assume it for now. + weights = ( + W.diagonal() ** 2 + if W is not None + else np.ones(self.survey.nD, dtype=np.float64) + ) - if W is None: - W = np.ones(self.survey.nD) - else: - W = W.diagonal() ** 2 - if getattr(self, "_gtg_diagonal", None) is None: - diag = np.zeros(self.Jmatrix.shape[1]) - if not self.is_amplitude_data: - diag = np.einsum("i,ij,ij->j", W, self.Jmatrix, self.Jmatrix) - else: + # Compute gtg (G.T @ W.T @ W @ G) if it's not cached, or if the + # weights are not the same. + weights_sha256 = hashlib.sha256(weights) + use_cached_gtg = ( + hasattr(self, "_gtg_diagonal") + and hasattr(self, "_weights_sha256") + and self._weights_sha256.digest() == weights_sha256.digest() + ) + if not use_cached_gtg: + self._gtg_diagonal = self._get_gtg_diagonal(weights) + self._weights_sha256 = weights_sha256 + + # Multiply the gtg_diagonal by the derivative of the mapping + diagonal = mkvc( + (sdiag(np.sqrt(self._gtg_diagonal)) @ self.chiDeriv).power(2).sum(axis=0) + ) + return diagonal + + def _get_gtg_diagonal(self, weights: NDArray) -> NDArray: + """ + Compute the diagonal of ``G.T @ W.T @ W @ G``. + + Parameters + ---------- + weights : np.ndarray + Weights array: diagonal of ``W.T @ W``. + + Returns + ------- + np.ndarray + """ + match (self.engine, self.store_sensitivities, self.is_amplitude_data): + case ("geoana", "forward_only", _): + msg = ( + "Computing the diagonal of `G.T @ G` using " + "`'forward_only'` and `'geoana'` as engine hasn't been " + "implemented yet." + ) + raise NotImplementedError(msg) + case ("choclo", "forward_only", True): + msg = ( + "Computing the diagonal of `G.T @ G` using " + "`'forward_only'` and `is_amplitude_data` hasn't been " + "implemented yet." + ) + raise NotImplementedError(msg) + case ("choclo", "forward_only", False): + gtg_diagonal = self._gtg_diagonal_without_building_g(weights) + case (_, _, False): + # In Einstein notation, the j-th element of the diagonal is: + # d_j = w_i * G_{ij} * G_{ij} + gtg_diagonal = np.asarray( + np.einsum("i,ij,ij->j", weights, self.G, self.G) + ) + case (_, _, True): ampDeriv = self.ampDeriv - Gx = self.Jmatrix[::3] - Gy = self.Jmatrix[1::3] - Gz = self.Jmatrix[2::3] - for i in range(len(W)): + Gx = self.G[::3] + Gy = self.G[1::3] + Gz = self.G[2::3] + gtg_diagonal = np.zeros(self.G.shape[1]) + for i in range(weights.size): row = ( ampDeriv[0, i] * Gx[i] + ampDeriv[1, i] * Gy[i] + ampDeriv[2, i] * Gz[i] ) - diag += W[i] * (row * row) - self._gtg_diagonal = diag - else: - diag = self._gtg_diagonal - return mkvc((sdiag(np.sqrt(diag)) @ self.chiDeriv).power(2).sum(axis=0)) + gtg_diagonal += weights[i] * (row * row) + return gtg_diagonal def Jvec(self, m, v, f=None): + """ + Dot product between sensitivity matrix and a vector. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. This array is used to compute the ``J`` + matrix. + v : (n_param,) numpy.ndarray + Vector used in the matrix-vector multiplication. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nD,) numpy.ndarray + + Notes + ----- + If ``store_sensitivities`` is set to ``"forward_only"``, then the + matrix `G` is never fully constructed, and the dot product is computed + by accumulation, computing the matrix elements on the fly. Otherwise, + the full matrix ``G`` is constructed and stored either in memory or + disk. + """ + # Need to assign the model, so the chiDeriv can be computed (if the + # model is None, the chiDeriv is going to be Zero). self.model = m dmu_dm_v = self.chiDeriv @ v - Jvec = self.G @ dmu_dm_v.astype(self.sensitivity_dtype, copy=False) if self.is_amplitude_data: @@ -381,10 +492,37 @@ def Jvec(self, m, v, f=None): Jvec = Jvec.reshape((-1, 3)).T # reshape((3, -1), order="F") ampDeriv_Jvec = self.ampDeriv * Jvec return ampDeriv_Jvec[0] + ampDeriv_Jvec[1] + ampDeriv_Jvec[2] - else: - return Jvec + + return Jvec def Jtvec(self, m, v, f=None): + """ + Dot product between transposed sensitivity matrix and a vector. + + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. This array is used to compute the ``J`` + matrix. + v : (nD,) numpy.ndarray + Vector used in the matrix-vector multiplication. + f : Ignored + Not used, present here for API consistency by convention. + + Returns + ------- + (nD,) numpy.ndarray + + Notes + ----- + If ``store_sensitivities`` is set to ``"forward_only"``, then the + matrix `G` is never fully constructed, and the dot product is computed + by accumulation, computing the matrix elements on the fly. Otherwise, + the full matrix ``G`` is constructed and stored either in memory or + disk. + """ + # Need to assign the model, so the chiDeriv can be computed (if the + # model is None, the chiDeriv is going to be Zero). self.model = m if self.is_amplitude_data: @@ -735,7 +873,10 @@ def _forward(self, model): index_offset + i, index_offset + n_rows, n_components ) if component == "tmi": - self._forward_tmi( + forward_func = NUMBA_FUNCTIONS_3D["forward"]["tmi"][ + self.numba_parallel + ] + forward_func( receivers, active_nodes, model, @@ -749,7 +890,10 @@ def _forward(self, model): kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( CHOCLO_KERNELS[component] ) - self._forward_tmi_derivative( + forward_func = NUMBA_FUNCTIONS_3D["forward"]["tmi_derivative"][ + self.numba_parallel + ] + forward_func( receivers, active_nodes, model, @@ -767,7 +911,10 @@ def _forward(self, model): ) else: kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] - self._forward_mag( + forward_func = NUMBA_FUNCTIONS_3D["forward"]["magnetic_component"][ + self.numba_parallel + ] + forward_func( receivers, active_nodes, model, @@ -796,10 +943,8 @@ def _sensitivity_matrix(self): # Get regional field regional_field = self.survey.source_field.b0 # Allocate sensitivity matrix - if self.model_type == "scalar": - n_columns = self.nC - else: - n_columns = 3 * self.nC + scalar_model = self.model_type == "scalar" + n_columns = self.nC if scalar_model else 3 * self.nC shape = (self.survey.nD, n_columns) if self.store_sensitivities == "disk": sensitivity_matrix = np.memmap( @@ -815,7 +960,6 @@ def _sensitivity_matrix(self): constant_factor = 1 / 4 / np.pi # Start filling the sensitivity matrix index_offset = 0 - scalar_model = self.model_type == "scalar" for components, receivers in self._get_components_and_receivers(): if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): raise NotImplementedError( @@ -829,7 +973,10 @@ def _sensitivity_matrix(self): index_offset + i, index_offset + n_rows, n_components ) if component == "tmi": - self._sensitivity_tmi( + sensitivity_func = NUMBA_FUNCTIONS_3D["sensitivity"]["tmi"][ + self.numba_parallel + ] + sensitivity_func( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -839,10 +986,13 @@ def _sensitivity_matrix(self): scalar_model, ) elif component in ("tmi_x", "tmi_y", "tmi_z"): + sensitivity_func = NUMBA_FUNCTIONS_3D["sensitivity"][ + "tmi_derivative" + ][self.numba_parallel] kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( CHOCLO_KERNELS[component] ) - self._sensitivity_tmi_derivative( + sensitivity_func( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -858,8 +1008,11 @@ def _sensitivity_matrix(self): scalar_model, ) else: + sensitivity_func = NUMBA_FUNCTIONS_3D["sensitivity"][ + "magnetic_component" + ][self.numba_parallel] kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] - self._sensitivity_mag( + sensitivity_func( receivers, active_nodes, sensitivity_matrix[matrix_slice, :], @@ -874,6 +1027,212 @@ def _sensitivity_matrix(self): index_offset += n_rows return sensitivity_matrix + def _sensitivity_matrix_as_operator(self): + """ + Create a LinearOperator for the sensitivity matrix G. + + Returns + ------- + scipy.sparse.linalg.LinearOperator + """ + n_columns = self.nC if self.model_type == "scalar" else self.nC * 3 + shape = (self.survey.nD, n_columns) + linear_op = LinearOperator( + shape=shape, + matvec=self._forward, + rmatvec=self._sensitivity_matrix_transpose_dot_vec, + dtype=np.float64, + ) + return linear_op + + def _sensitivity_matrix_transpose_dot_vec(self, vector): + """ + Compute ``G.T @ v`` without building ``G``. + + Parameters + ---------- + vector : (nD) numpy.ndarray + Vector used in the dot product. + + Returns + ------- + (n_active_cells) or (3 * n_active_cells) numpy.ndarray + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Get regional field + regional_field = self.survey.source_field.b0 + + # Allocate resulting array. + scalar_model = self.model_type == "scalar" + result = np.zeros(self.nC if scalar_model else 3 * self.nC) + + # Define the constant factor + constant_factor = 1 / 4 / np.pi + + # Fill the result array + index_offset = 0 + for components, receivers in self._get_components_and_receivers(): + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): + raise NotImplementedError( + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." + ) + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + vector_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + if component == "tmi": + gt_dot_v_func = NUMBA_FUNCTIONS_3D["gt_dot_v"]["tmi"][ + self.numba_parallel + ] + gt_dot_v_func( + receivers, + active_nodes, + active_cell_nodes, + regional_field, + constant_factor, + scalar_model, + vector[vector_slice], + result, + ) + elif component in ("tmi_x", "tmi_y", "tmi_z"): + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( + CHOCLO_KERNELS[component] + ) + gt_dot_v_func = NUMBA_FUNCTIONS_3D["gt_dot_v"]["tmi_derivative"][ + self.numba_parallel + ] + gt_dot_v_func( + receivers, + active_nodes, + active_cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + vector[vector_slice], + result, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + gt_dot_v_func = NUMBA_FUNCTIONS_3D["gt_dot_v"][ + "magnetic_component" + ][self.numba_parallel] + gt_dot_v_func( + receivers, + active_nodes, + active_cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + vector[vector_slice], + result, + ) + index_offset += n_rows + return result + + def _gtg_diagonal_without_building_g(self, weights): + """ + Compute the diagonal of ``G.T @ G`` without building the ``G`` matrix. + + Parameters + ----------- + weights : (nD,) array + Array with data weights. It should be the diagonal of the ``W`` + matrix, squared. + + Returns + ------- + (n_active_cells) numpy.ndarray + """ + # Gather active nodes and the indices of the nodes for each active cell + active_nodes, active_cell_nodes = self._get_active_nodes() + # Get regional field + regional_field = self.survey.source_field.b0 + # Define the constant factor + constant_factor = 1 / 4 / np.pi + + # Allocate array for the diagonal + scalar_model = self.model_type == "scalar" + n_columns = self.nC if scalar_model else 3 * self.nC + diagonal = np.zeros(n_columns, dtype=np.float64) + + # Start filling the diagonal array + for components, receivers in self._get_components_and_receivers(): + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): + raise NotImplementedError( + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." + ) + for component in components: + if component == "tmi": + diagonal_gtg_func = NUMBA_FUNCTIONS_3D["diagonal_gtg"]["tmi"][ + self.numba_parallel + ] + diagonal_gtg_func( + receivers, + active_nodes, + active_cell_nodes, + regional_field, + constant_factor, + scalar_model, + weights, + diagonal, + ) + elif component in ("tmi_x", "tmi_y", "tmi_z"): + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( + CHOCLO_KERNELS[component] + ) + diagonal_gtg_func = NUMBA_FUNCTIONS_3D["diagonal_gtg"][ + "tmi_derivative" + ][self.numba_parallel] + diagonal_gtg_func( + receivers, + active_nodes, + active_cell_nodes, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + weights, + diagonal, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + diagonal_gtg_func = NUMBA_FUNCTIONS_3D["diagonal_gtg"][ + "magnetic_component" + ][self.numba_parallel] + diagonal_gtg_func( + receivers, + active_nodes, + active_cell_nodes, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + weights, + diagonal, + ) + return diagonal + class SimulationEquivalentSourceLayer( BaseEquivalentSourceLayerSimulation, Simulation3DIntegral @@ -918,26 +1277,6 @@ def __init__( **kwargs, ) - if self.engine == "choclo": - if self.numba_parallel: - self._sensitivity_tmi = _sensitivity_tmi_2d_mesh_parallel - self._sensitivity_mag = _sensitivity_mag_2d_mesh_parallel - self._forward_tmi = _forward_tmi_2d_mesh_parallel - self._forward_mag = _forward_mag_2d_mesh_parallel - self._forward_tmi_derivative = _forward_tmi_derivative_2d_mesh_parallel - self._sensitivity_tmi_derivative = ( - _sensitivity_tmi_derivative_2d_mesh_parallel - ) - else: - self._sensitivity_tmi = _sensitivity_tmi_2d_mesh_serial - self._sensitivity_mag = _sensitivity_mag_2d_mesh_serial - self._forward_tmi = _forward_tmi_2d_mesh_serial - self._forward_mag = _forward_mag_2d_mesh_serial - self._forward_tmi_derivative = _forward_tmi_derivative_2d_mesh_serial - self._sensitivity_tmi_derivative = ( - _sensitivity_tmi_derivative_2d_mesh_serial - ) - def _forward(self, model): """ Forward model the fields of active cells in the mesh on receivers. @@ -981,7 +1320,10 @@ def _forward(self, model): index_offset + i, index_offset + n_rows, n_components ) if component == "tmi": - self._forward_tmi( + forward_func = NUMBA_FUNCTIONS_2D["forward"]["tmi"][ + self.numba_parallel + ] + forward_func( receivers, cells_bounds_active, self.cell_z_top, @@ -995,7 +1337,10 @@ def _forward(self, model): kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( CHOCLO_KERNELS[component] ) - self._forward_tmi_derivative( + forward_func = NUMBA_FUNCTIONS_2D["forward"]["tmi_derivative"][ + self.numba_parallel + ] + forward_func( receivers, cells_bounds_active, self.cell_z_top, @@ -1012,8 +1357,11 @@ def _forward(self, model): scalar_model, ) else: - forward_func = CHOCLO_FORWARD_FUNCS[component] - self._forward_mag( + choclo_forward_func = CHOCLO_FORWARD_FUNCS[component] + forward_func = NUMBA_FUNCTIONS_2D["forward"]["magnetic_component"][ + self.numba_parallel + ] + forward_func( receivers, cells_bounds_active, self.cell_z_top, @@ -1021,7 +1369,7 @@ def _forward(self, model): model, fields[vector_slice], regional_field, - forward_func, + choclo_forward_func, scalar_model, ) index_offset += n_rows @@ -1040,10 +1388,8 @@ def _sensitivity_matrix(self): # Get regional field regional_field = self.survey.source_field.b0 # Allocate sensitivity matrix - if self.model_type == "scalar": - n_columns = self.nC - else: - n_columns = 3 * self.nC + scalar_model = self.model_type == "scalar" + n_columns = self.nC if scalar_model else 3 * self.nC shape = (self.survey.nD, n_columns) if self.store_sensitivities == "disk": sensitivity_matrix = np.memmap( @@ -1057,7 +1403,6 @@ def _sensitivity_matrix(self): sensitivity_matrix = np.empty(shape, dtype=self.sensitivity_dtype) # Start filling the sensitivity matrix index_offset = 0 - scalar_model = self.model_type == "scalar" for components, receivers in self._get_components_and_receivers(): if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): raise NotImplementedError( @@ -1071,7 +1416,10 @@ def _sensitivity_matrix(self): index_offset + i, index_offset + n_rows, n_components ) if component == "tmi": - self._sensitivity_tmi( + sensitivity_func = NUMBA_FUNCTIONS_2D["sensitivity"]["tmi"][ + self.numba_parallel + ] + sensitivity_func( receivers, cells_bounds_active, self.cell_z_top, @@ -1084,7 +1432,10 @@ def _sensitivity_matrix(self): kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( CHOCLO_KERNELS[component] ) - self._sensitivity_tmi_derivative( + sensitivity_func = NUMBA_FUNCTIONS_2D["sensitivity"][ + "tmi_derivative" + ][self.numba_parallel] + sensitivity_func( receivers, cells_bounds_active, self.cell_z_top, @@ -1101,7 +1452,10 @@ def _sensitivity_matrix(self): ) else: kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] - self._sensitivity_mag( + sensitivity_func = NUMBA_FUNCTIONS_2D["sensitivity"][ + "magnetic_component" + ][self.numba_parallel] + sensitivity_func( receivers, cells_bounds_active, self.cell_z_top, @@ -1116,511 +1470,731 @@ def _sensitivity_matrix(self): index_offset += n_rows return sensitivity_matrix + def _sensitivity_matrix_transpose_dot_vec(self, vector): + """ + Compute ``G.T @ v`` without building ``G``. + + Parameters + ---------- + vector : (nD) numpy.ndarray + Vector used in the dot product. + + Returns + ------- + (n_active_cells) or (3 * n_active_cells) numpy.ndarray + """ + # Get regional field + regional_field = self.survey.source_field.b0 + # Get cells in the 2D mesh and keep only active cells + cells_bounds_active = self.mesh.cell_bounds[self.active_cells] + # Allocate resulting array + scalar_model = self.model_type == "scalar" + result = np.zeros(self.nC if scalar_model else 3 * self.nC) + # Start filling the result array + index_offset = 0 + for components, receivers in self._get_components_and_receivers(): + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): + raise NotImplementedError( + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." + ) + n_components = len(components) + n_rows = n_components * receivers.shape[0] + for i, component in enumerate(components): + vector_slice = slice( + index_offset + i, index_offset + n_rows, n_components + ) + if component == "tmi": + gt_dot_v_func = NUMBA_FUNCTIONS_2D["gt_dot_v"]["tmi"][ + self.numba_parallel + ] + gt_dot_v_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + regional_field, + scalar_model, + vector[vector_slice], + result, + ) + elif component in ("tmi_x", "tmi_y", "tmi_z"): + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( + CHOCLO_KERNELS[component] + ) + gt_dot_v_func = NUMBA_FUNCTIONS_2D["gt_dot_v"]["tmi_derivative"][ + self.numba_parallel + ] + gt_dot_v_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + scalar_model, + vector[vector_slice], + result, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + gt_dot_v_func = NUMBA_FUNCTIONS_2D["gt_dot_v"][ + "magnetic_component" + ][self.numba_parallel] + gt_dot_v_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + regional_field, + kernel_x, + kernel_y, + kernel_z, + scalar_model, + vector[vector_slice], + result, + ) + index_offset += n_rows + return result + + def _gtg_diagonal_without_building_g(self, weights): + """ + Compute the diagonal of ``G.T @ G`` without building the ``G`` matrix. + + Parameters + ----------- + weights : (nD,) array + Array with data weights. It should be the diagonal of the ``W`` + matrix, squared. + + Returns + ------- + (n_active_cells) numpy.ndarray + """ + # Get regional field + regional_field = self.survey.source_field.b0 + # Get cells in the 2D mesh and keep only active cells + cells_bounds_active = self.mesh.cell_bounds[self.active_cells] + # Define the constant factor + constant_factor = 1 / 4 / np.pi + # Allocate array for the diagonal + scalar_model = self.model_type == "scalar" + n_columns = self.nC if scalar_model else 3 * self.nC + diagonal = np.zeros(n_columns, dtype=np.float64) + # Start filling the diagonal array + for components, receivers in self._get_components_and_receivers(): + if not CHOCLO_SUPPORTED_COMPONENTS.issuperset(components): + raise NotImplementedError( + f"Other components besides {CHOCLO_SUPPORTED_COMPONENTS} " + "aren't implemented yet." + ) + for component in components: + if component == "tmi": + diagonal_gtg_func = NUMBA_FUNCTIONS_2D["diagonal_gtg"]["tmi"][ + self.numba_parallel + ] + diagonal_gtg_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + regional_field, + constant_factor, + scalar_model, + weights, + diagonal, + ) + elif component in ("tmi_x", "tmi_y", "tmi_z"): + kernel_xx, kernel_yy, kernel_zz, kernel_xy, kernel_xz, kernel_yz = ( + CHOCLO_KERNELS[component] + ) + diagonal_gtg_func = NUMBA_FUNCTIONS_2D["diagonal_gtg"][ + "tmi_derivative" + ][self.numba_parallel] + diagonal_gtg_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + regional_field, + kernel_xx, + kernel_yy, + kernel_zz, + kernel_xy, + kernel_xz, + kernel_yz, + constant_factor, + scalar_model, + weights, + diagonal, + ) + else: + kernel_x, kernel_y, kernel_z = CHOCLO_KERNELS[component] + diagonal_gtg_func = NUMBA_FUNCTIONS_2D["diagonal_gtg"][ + "magnetic_component" + ][self.numba_parallel] + diagonal_gtg_func( + receivers, + cells_bounds_active, + self.cell_z_top, + self.cell_z_bottom, + regional_field, + kernel_x, + kernel_y, + kernel_z, + constant_factor, + scalar_model, + weights, + diagonal, + ) + return diagonal + class Simulation3DDifferential(BaseMagneticPDESimulation): - """ - Secondary field approach using differential equations! + r"""A secondary field simulation for magnetic data. + + Parameters + ---------- + mesh : discretize.base.BaseMesh + survey : magnetics.survey.Survey + mu : float, array_like + Magnetic Permeability Model (H/ m). Set this for forward + modeling or to fix while inverting for remanence. This is used if + ``muMap`` is None. + muMap : simpeg.maps.IdentityMap, optional + The mapping used to go from the simulation model to ``mu``. Set this + to invert for ``mu``. + rem : float, array_like + Magnetic Polarization :math:`\mu_0 \mathbf{M}` (nT). Set this for forward + modeling or to fix remanent magnetization while inverting for permeability. + This is used if ``remMap`` is None. + remMap : simpeg.maps.IdentityMap, optional + The mapping used to go from the simulation model to :math:`\mu_0 \mathbf{M}`. + Set this to invert for :math:`\mu_0 \mathbf{M}`. + storeJ: bool + Whether to store the sensitivity matrix. If set to True + solver_dtype: dtype, optional + Data type to use for the matrix that gets passed to the ``solver``. + Default to `numpy.float64`. + + + Notes + ----- + This simulation solves for the magnetostatic PDE: + + .. math:: + \nabla \cdot \Vec{B} = 0 + + where the constitutive relation is specified as: + + .. math:: + \Vec{B} = \mu\Vec{H} + \mu_0\Vec{M_r} + + where :math:`\Vec{M_r}` is a fixed magnetization unaffected by the inducing field + and :math:`\mu\Vec{H}` is the induced magnetization. """ - def __init__(self, mesh, survey=None, **kwargs): - super().__init__(mesh, survey=survey, **kwargs) + _Ainv = None - Pbc, Pin, self._Pout = self.mesh.get_BC_projections( - "neumann", discretization="CC" - ) + rem, remMap, remDeriv = props.Invertible( + "Magnetic Polarization (nT)", optional=True + ) + + _supported_components = ("tmi", "bx", "by", "bz") + + def __init__( + self, + mesh, + survey=None, + mu=None, + muMap=None, + rem=None, + remMap=None, + storeJ=False, + solver_dtype=np.float64, + **kwargs, + ): + if mu is None: + mu = mu_0 + + super().__init__(mesh=mesh, survey=survey, mu=mu, muMap=muMap, **kwargs) + + self.rem = rem + self.remMap = remMap + + self.storeJ = storeJ + self.solver_dtype = solver_dtype - Dface = self.mesh.face_divergence - Mc = sdiag(self.mesh.cell_volumes) - self._Div = Mc * Dface * Pin.T.tocsr() * Pin + self._MfMu0i = self.mesh.get_face_inner_product(1.0 / mu_0) + self._Div = self.Mcc * self.mesh.face_divergence + self._DivT = self._Div.T.tocsr() + self._Mf_vec_deriv = self.mesh.get_face_inner_product_deriv( + np.ones(self.mesh.n_cells * 3) + )(np.ones(self.mesh.n_faces)) + + self.solver_opts = {"is_symmetric": True, "is_positive_definite": True} + + self._Jmatrix = None + self._stored_fields = None @property def survey(self): - """The survey for this simulation. + """The magnetic survey object. Returns ------- - simpeg.potential_fields.magnetics.survey.Survey + simpeg.potential_fields.magnetics.Survey """ if self._survey is None: raise AttributeError("Simulation must have a survey") return self._survey @survey.setter - def survey(self, obj): - if obj is not None: - obj = validate_type("survey", obj, Survey, cast=False) - self._survey = obj + def survey(self, value): + if value is not None: + value = validate_type("survey", value, Survey, cast=False) + unsupported_components = { + component + for source in value.source_list + for receiver in source.receiver_list + for component in receiver.components + if component not in self._supported_components + } + if unsupported_components: + msg = ( + f"Found unsupported magnetic components " + f"'{', '.join(c for c in unsupported_components)}' in the survey." + f"The {type(self).__name__} currently supports the following " + f"components: {', '.join(c for c in self._supported_components)}" + ) + raise NotImplementedError(msg) + self._survey = value @property - def MfMuI(self): - return self._MfMuI + def storeJ(self): + """Whether to store the sensitivity matrix - @property - def MfMui(self): - return self._MfMui + Returns + ------- + bool + """ + return self._storeJ + + @storeJ.setter + def storeJ(self, value): + self._storeJ = validate_type("storeJ", value, bool) @property - def MfMu0(self): - return self._MfMu0 - - def makeMassMatrices(self, m): - mu = self.muMap * m - self._MfMui = self.mesh.get_face_inner_product(1.0 / mu) / self.mesh.dim - # self._MfMui = self.mesh.get_face_inner_product(1./mu) - # TODO: this will break if tensor mu - self._MfMuI = sdiag(1.0 / self._MfMui.diagonal()) - self._MfMu0 = self.mesh.get_face_inner_product(1.0 / mu_0) / self.mesh.dim - # self._MfMu0 = self.mesh.get_face_inner_product(1/mu_0) + def solver_dtype(self): + """ + Data type used by the solver. + + Returns + ------- + numpy.dtype + Either np.float32 or np.float64 + """ + return self._solver_dtype + + @solver_dtype.setter + def solver_dtype(self, value): + """ + Set the solver dtype. Must be np.float32 or np.float64. + """ + if value not in (np.float32, np.float64): + msg = ( + f"Invalid `solver_dtype` '{value}'. " + "It must be np.float32 or np.float64." + ) + raise ValueError(msg) + self._solver_dtype = value + @cached_property @utils.requires("survey") - def getB0(self): + def _b0(self): + # Todo: Experiment with avoiding array of constants b0 = self.survey.source_field.b0 - B0 = np.r_[ + b0 = np.r_[ b0[0] * np.ones(self.mesh.nFx), b0[1] * np.ones(self.mesh.nFy), b0[2] * np.ones(self.mesh.nFz), ] - return B0 + return b0 - def getRHS(self, m): - r""" + @property + def _stored_fields(self): + return self.__stored_fields - .. math :: + @_stored_fields.setter + def _stored_fields(self, value): + self.__stored_fields = value - \mathbf{rhs} = - \Div(\MfMui)^{-1}\mathbf{M}^f_{\mu_0^{-1}}\mathbf{B}_0 - - \Div\mathbf{B}_0 - +\diag(v)\mathbf{D} \mathbf{P}_{out}^T \mathbf{B}_{sBC} + @_stored_fields.deleter + def _stored_fields(self): + self.__stored_fields = None - """ - B0 = self.getB0() + def _getRHS(self, m): + self.model = m - mu = self.muMap * m - chi = mu / mu_0 - 1 + rhs = 0 - # Temporary fix - Bbc, Bbc_const = CongruousMagBC(self.mesh, self.survey.source_field.b0, chi) - self.Bbc = Bbc - self.Bbc_const = Bbc_const - # return self._Div*self.MfMuI*self.MfMu0*B0 - self._Div*B0 + - # Mc*Dface*self._Pout.T*Bbc - return self._Div * self.MfMuI * self.MfMu0 * B0 - self._Div * B0 + if not np.isscalar(self.mu) or not np.allclose(self.mu, mu_0): + rhs += ( + self._Div * self.MfMuiI * self._MfMu0i * self._b0 - self._Div * self._b0 + ) - def getA(self, m): - r""" - GetA creates and returns the A matrix for the Magnetics problem + if self.rem is not None: + rhs += ( + self._Div + * ( + self.MfMuiI + * self.mesh.get_face_inner_product( + self.rem + / np.tile(self.mu * np.ones(self.mesh.n_cells), self.mesh.dim) + ) + ).diagonal() + ) - The A matrix has the form: + return rhs - .. math :: + def _getA(self): + A = self._Div * self.MfMuiI * self._DivT - \mathbf{A} = \Div(\MfMui)^{-1}\Div^{T} + A = A.astype(self.solver_dtype) - """ - return self._Div * self.MfMuI * self._Div.T.tocsr() + return A def fields(self, m): - r""" - Return magnetic potential (u) and flux (B) - - u: defined on the cell center [nC x 1] - B: defined on the cell center [nG x 1] - - After we compute u, then we update B. - - .. math :: - - \mathbf{B}_s = - (\MfMui)^{-1}\mathbf{M}^f_{\mu_0^{-1}}\mathbf{B}_0 - - \mathbf{B}_0 - - (\MfMui)^{-1}\Div^T \mathbf{u} - - """ - self.makeMassMatrices(m) - A = self.getA(m) - rhs = self.getRHS(m) - Ainv = self.solver(A, **self.solver_opts) - u = Ainv * rhs - B0 = self.getB0() - B = self.MfMuI * self.MfMu0 * B0 - B0 - self.MfMuI * self._Div.T * u - Ainv.clean() - - return {"B": B, "u": u} - - @utils.timeIt - def Jvec(self, m, v, u=None): - r""" - Computing Jacobian multiplied by vector - - By setting our problem as - - .. math :: - - \mathbf{C}(\mathbf{m}, \mathbf{u}) = \mathbf{A}\mathbf{u} - \mathbf{rhs} = 0 - - And taking derivative w.r.t m - - .. math :: - - \nabla \mathbf{C}(\mathbf{m}, \mathbf{u}) = - \nabla_m \mathbf{C}(\mathbf{m}) \delta \mathbf{m} + - \nabla_u \mathbf{C}(\mathbf{u}) \delta \mathbf{u} = 0 - - \frac{\delta \mathbf{u}}{\delta \mathbf{m}} = - - [\nabla_u \mathbf{C}(\mathbf{u})]^{-1}\nabla_m \mathbf{C}(\mathbf{m}) - - With some linear algebra we can have - - .. math :: - - \nabla_u \mathbf{C}(\mathbf{u}) = \mathbf{A} - - \nabla_m \mathbf{C}(\mathbf{m}) = - \frac{\partial \mathbf{A}} {\partial \mathbf{m}} (\mathbf{m}) \mathbf{u} - - \frac{\partial \mathbf{rhs}(\mathbf{m})}{\partial \mathbf{m}} + self.model = m - .. math :: + if self._stored_fields is None: - \frac{\partial \mathbf{A}}{\partial \mathbf{m}}(\mathbf{m})\mathbf{u} = - \frac{\partial \mathbf{\mu}}{\partial \mathbf{m}} - \left[\Div \diag (\Div^T \mathbf{u}) \dMfMuI \right] + if self._Ainv is None: + self._Ainv = self.solver(self._getA(), **self.solver_opts) - \dMfMuI = - \diag(\MfMui)^{-1}_{vec} - \mathbf{Av}_{F2CC}^T\diag(\mathbf{v})\diag(\frac{1}{\mu^2}) + rhs = self._getRHS(m) - \frac{\partial \mathbf{rhs}(\mathbf{m})}{\partial \mathbf{m}} = - \frac{\partial \mathbf{\mu}}{\partial \mathbf{m}} - \left[ - \Div \diag(\M^f_{\mu_{0}^{-1}}\mathbf{B}_0) \dMfMuI - \right] - - \diag(\mathbf{v}) \mathbf{D} \mathbf{P}_{out}^T - \frac{\partial B_{sBC}}{\partial \mathbf{m}} + u = self._Ainv * rhs + b_field = -self.MfMuiI * self._DivT * u - In the end, + if not np.isscalar(self.mu) or not np.allclose(self.mu, mu_0): + b_field += self._MfMu0i * self.MfMuiI * self._b0 - self._b0 - .. math :: + if self.rem is not None: + b_field += ( + self.MfMuiI + * self.mesh.get_face_inner_product( + self.rem + / np.tile(self.mu * np.ones(self.mesh.n_cells), self.mesh.dim) + ) + ).diagonal() - \frac{\delta \mathbf{u}}{\delta \mathbf{m}} = - - [ \mathbf{A} ]^{-1} - \left[ - \frac{\partial \mathbf{A}}{\partial \mathbf{m}}(\mathbf{m})\mathbf{u} - - \frac{\partial \mathbf{rhs}(\mathbf{m})}{\partial \mathbf{m}} - \right] + fields = {"b": b_field, "u": u} + self._stored_fields = fields - A little tricky point here is we are not interested in potential (u), but interested in magnetic flux (B). - Thus, we need sensitivity for B. Now we take derivative of B w.r.t m and have + else: + fields = self._stored_fields - .. math :: + return fields - \frac{\delta \mathbf{B}} {\delta \mathbf{m}} = - \frac{\partial \mathbf{\mu} } {\partial \mathbf{m} } - \left[ - \diag(\M^f_{\mu_{0}^{-1} } \mathbf{B}_0) \dMfMuI \ - - \diag (\Div^T\mathbf{u})\dMfMuI - \right ] + def dpred(self, m=None, f=None): + self.model = m + if f is not None: + return self._projectFields(f) - - (\MfMui)^{-1}\Div^T\frac{\delta\mathbf{u}}{\delta \mathbf{m}} + if f is None: + f = self.fields(m) - Finally we evaluate the above, but we should remember that + dpred = self._projectFields(f) - .. note :: + return dpred - We only want to evaluate + def magnetic_polarization(self, m=None): + r""" + Computes the total magnetic polarization :math:`\mu_0\mathbf{M}`. - .. math :: + Parameters + ---------- + m : (n_param,) numpy.ndarray + The model parameters. - \mathbf{J}\mathbf{v} = - \frac{\delta \mathbf{P}\mathbf{B}} {\delta \mathbf{m}}\mathbf{v} + Returns + ------- + mu0_m : np.ndarray + The magnetic polarization :math:`\mu_0 \mathbf{M}` in nanoteslas (nT), defined on the mesh faces. + The result is ordered as a concatenation of the x, y, and z face components + (i.e., ``[Mx_faces, My_faces, Mz_faces]``). - Since forming sensitivity matrix is very expensive in that this - monster is "big" and "dense" matrix!! """ - if u is None: - u = self.fields(m) - - B, u = u["B"], u["u"] - mu = self.muMap * (m) - dmu_dm = self.muDeriv - # dchidmu = sdiag(1 / mu_0 * np.ones(self.mesh.nC)) - - vol = self.mesh.cell_volumes - Div = self._Div - P = self.survey.projectFieldsDeriv(B) # Projection matrix - B0 = self.getB0() - - MfMuIvec = 1 / self.MfMui.diagonal() - dMfMuI = sdiag(MfMuIvec**2) * self.mesh.aveF2CC.T * sdiag(vol * 1.0 / mu**2) - - # A = self._Div*self.MfMuI*self._Div.T - # RHS = Div*MfMuI*MfMu0*B0 - Div*B0 + Mc*Dface*Pout.T*Bbc - # C(m,u) = A*m-rhs - # dudm = -(dCdu)^(-1)dCdm - - dCdu = self.getA(m) # = A - dCdm_A = Div * (sdiag(Div.T * u) * dMfMuI * dmu_dm) - dCdm_RHS1 = Div * (sdiag(self.MfMu0 * B0) * dMfMuI) - # temp1 = (Dface * (self._Pout.T * self.Bbc_const * self.Bbc)) - # dCdm_RHS2v = (sdiag(vol) * temp1) * \ - # np.inner(vol, dchidmu * dmu_dm * v) - - # dCdm_RHSv = dCdm_RHS1*(dmu_dm*v) + dCdm_RHS2v - dCdm_RHSv = dCdm_RHS1 * (dmu_dm * v) - dCdm_v = dCdm_A * v - dCdm_RHSv - - Ainv = self.solver(dCdu, **self.solver_opts) - sol = Ainv * dCdm_v - - dudm = -sol - dBdmv = ( - sdiag(self.MfMu0 * B0) * (dMfMuI * (dmu_dm * v)) - - sdiag(Div.T * u) * (dMfMuI * (dmu_dm * v)) - - self.MfMuI * (Div.T * (dudm)) - ) + self.model = m + f = self.fields(m) + b_field, u = f["b"], f["u"] + MfMu0iI = self.mesh.get_face_inner_product(1.0 / mu_0, invert_matrix=True) - Ainv.clean() + mu0_h = -MfMu0iI * self._DivT * u + mu0_m = b_field - mu0_h - return mkvc(P * dBdmv) + return mu0_m - @utils.timeIt - def Jtvec(self, m, v, u=None): - r""" - Computing Jacobian^T multiplied by vector. + def Jvec(self, m, v, f=None): + self.model = m - .. math :: + if f is None: + f = self.fields(m) - (\frac{\delta \mathbf{P}\mathbf{B}} {\delta \mathbf{m}})^{T} = - \left[ - \mathbf{P}_{deriv}\frac{\partial \mathbf{\mu} } {\partial \mathbf{m} } - \left[ - \diag(\M^f_{\mu_{0}^{-1} } \mathbf{B}_0) \dMfMuI - - \diag (\Div^T\mathbf{u})\dMfMuI - \right ] - \right]^{T} - - - \left[ - \mathbf{P}_{deriv}(\MfMui)^{-1} \Div^T - \frac{\delta\mathbf{u}}{\delta \mathbf{m}} - \right]^{T} + if self.storeJ: + J = self.getJ(m, f=f) + return J.dot(v) - where + return self._Jvec(m, v, f) - .. math :: + def Jtvec(self, m, v, f=None): + self.model = m - \mathbf{P}_{derv} = \frac{\partial \mathbf{P}}{\partial\mathbf{B}} + if f is None: + f = self.fields(m) - .. note :: + if self.storeJ: + J = self.getJ(m, f=f) + return np.asarray(J.T.dot(v)) - Here we only want to compute + return self._Jtvec(m, v, f) - .. math :: + def getJ(self, m, f=None): + self.model = m + if self._Jmatrix: + return self._Jmatrix + if f is None: + f = self.fields(m) + if m.size < self.survey.nD: + J = self._Jvec(m, v=None, f=f) + else: + J = self._Jtvec(m, v=None, f=f).T - \mathbf{J}^{T}\mathbf{v} = - (\frac{\delta \mathbf{P}\mathbf{B}} {\delta \mathbf{m}})^{T} \mathbf{v} + if self.storeJ: + self._Jmatrix = J + return J - """ - if u is None: - u = self.fields(m) + def _Jtvec(self, m, v, f): + b_field, u = f["b"], f["u"] - B, u = u["B"], u["u"] - mu = self.mapping * (m) - dmu_dm = self.mapping.deriv(m) - # dchidmu = sdiag(1 / mu_0 * np.ones(self.mesh.nC)) + Q = self._projectFieldsDeriv(b_field) - vol = self.mesh.cell_volumes - Div = self._Div - P = self.survey.projectFieldsDeriv(B) # Projection matrix - B0 = self.getB0() + if v is None: + v = np.eye(Q.shape[0]) + divt_solve_q = ( + self._DivT * (self._Ainv * ((Q * self.MfMuiI * -self._DivT).T * v)) + + Q.T * v + ) + del v + else: + divt_solve_q = ( + self._DivT * (self._Ainv * ((-self._Div * (self.MfMuiI.T * (Q.T * v))))) + + Q.T * v + ) - MfMuIvec = 1 / self.MfMui.diagonal() - dMfMuI = sdiag(MfMuIvec**2) * self.mesh.aveF2CC.T * sdiag(vol * 1.0 / mu**2) + mu_vec = np.tile(self.mu * np.ones(self.mesh.n_cells), self.mesh.dim) - # A = self._Div*self.MfMuI*self._Div.T - # RHS = Div*MfMuI*MfMu0*B0 - Div*B0 + Mc*Dface*Pout.T*Bbc - # C(m,u) = A*m-rhs - # dudm = -(dCdu)^(-1)dCdm + Jtv = 0 - dCdu = self.getA(m) - s = Div * (self.MfMuI.T * (P.T * v)) + if self.remMap is not None: + Mf_rem_deriv = self._Mf_vec_deriv * sp.diags(1 / mu_vec) * self.remDeriv + Jtv += (self.MfMuiI * Mf_rem_deriv).T * (divt_solve_q) - Ainv = self.solver(dCdu.T, **self.solver_opts) - sol = Ainv * s + if self.muMap is not None: + Jtv += self.MfMuiIDeriv(self._DivT * u, -divt_solve_q, adjoint=True) + Jtv += self.MfMuiIDeriv( + self._b0, self._MfMu0i.T * (divt_solve_q), adjoint=True + ) - Ainv.clean() + if self.rem is not None: + Mf_r_mui = self.mesh.get_face_inner_product( + self.rem / mu_vec + ).diagonal() + mu_vec_i_deriv = sp.vstack( + (self.muiDeriv, self.muiDeriv, self.muiDeriv) + ) - # dCdm_A = Div * ( sdiag( Div.T * u )* dMfMuI *dmu_dm ) - # dCdm_Atsol = ( dMfMuI.T*( sdiag( Div.T * u ) * (Div.T * dmu_dm)) ) * sol - dCdm_Atsol = (dmu_dm.T * dMfMuI.T * (sdiag(Div.T * u) * Div.T)) * sol + Mf_r_mui_deriv = ( + self._Mf_vec_deriv * sp.diags(self.rem) * mu_vec_i_deriv + ) - # dCdm_RHS1 = Div * (sdiag( self.MfMu0*B0 ) * dMfMuI) - # dCdm_RHS1tsol = (dMfMuI.T*( sdiag( self.MfMu0*B0 ) ) * Div.T * dmu_dm) * sol - dCdm_RHS1tsol = (dmu_dm.T * dMfMuI.T * (sdiag(self.MfMu0 * B0)) * Div.T) * sol + Jtv += ( + self.MfMuiIDeriv(Mf_r_mui, divt_solve_q, adjoint=True) + + (Mf_r_mui_deriv.T * self.MfMuiI.T) * divt_solve_q + ) - # temp1 = (Dface*(self._Pout.T*self.Bbc_const*self.Bbc)) - # temp1sol = (Dface.T * (sdiag(vol) * sol)) - # temp2 = self.Bbc_const * (self._Pout.T * self.Bbc).T - # dCdm_RHS2v = (sdiag(vol)*temp1)*np.inner(vol, dchidmu*dmu_dm*v) - # dCdm_RHS2tsol = (dmu_dm.T * dchidmu.T * vol) * np.inner(temp2, temp1sol) + return Jtv - # dCdm_RHSv = dCdm_RHS1*(dmu_dm*v) + dCdm_RHS2v + def _Jvec(self, m, v, f): - # temporary fix - # dCdm_RHStsol = dCdm_RHS1tsol - dCdm_RHS2tsol - dCdm_RHStsol = dCdm_RHS1tsol + if v is None: + v = np.eye(m.shape[0]) - # dCdm_RHSv = dCdm_RHS1*(dmu_dm*v) + dCdm_RHS2v - # dCdm_v = dCdm_A*v - dCdm_RHSv + b_field, u = f["b"], f["u"] - Ctv = dCdm_Atsol - dCdm_RHStsol + Q = self._projectFieldsDeriv(b_field) + C = -self.MfMuiI * self._DivT - # B = self.MfMuI*self.MfMu0*B0-B0-self.MfMuI*self._Div.T*u - # dBdm = d\mudm*dBd\mu - # dPBdm^T*v = Atemp^T*P^T*v - Btemp^T*P^T*v - Ctv + db_dm = 0 + dCmu_dm = 0 - Atemp = sdiag(self.MfMu0 * B0) * (dMfMuI * (dmu_dm)) - Btemp = sdiag(Div.T * u) * (dMfMuI * (dmu_dm)) - Jtv = Atemp.T * (P.T * v) - Btemp.T * (P.T * v) - Ctv + mu_vec = np.tile(self.mu * np.ones(self.mesh.n_cells), self.mesh.dim) - return mkvc(Jtv) + if self.remMap is not None: + Mf_rem_deriv = self._Mf_vec_deriv * sp.diags(1 / mu_vec) * self.remDeriv + db_dm += self.MfMuiI * Mf_rem_deriv * v - @property - def Qfx(self): - if getattr(self, "_Qfx", None) is None: - self._Qfx = self.mesh.get_interpolation_matrix( - self.survey.receiver_locations, "Fx" - ) - return self._Qfx + if self.muMap is not None: + dCmu_dm += self.MfMuiIDeriv(self._DivT @ u, v, adjoint=False) + db_dm += self._MfMu0i * self.MfMuiIDeriv(self._b0, v, adjoint=False) - @property - def Qfy(self): - if getattr(self, "_Qfy", None) is None: - self._Qfy = self.mesh.get_interpolation_matrix( - self.survey.receiver_locations, "Fy" - ) - return self._Qfy + if self.rem is not None: + Mf_r_mui = self.mesh.get_face_inner_product( + self.rem / mu_vec + ).diagonal() + mu_vec_i_deriv = sp.vstack( + (self.muiDeriv, self.muiDeriv, self.muiDeriv) + ) + Mf_r_mui_deriv = ( + self._Mf_vec_deriv * sp.diags(self.rem) * mu_vec_i_deriv + ) + db_dm += self.MfMuiIDeriv(Mf_r_mui, v, adjoint=False) + ( + self.MfMuiI * Mf_r_mui_deriv * v + ) - @property - def Qfz(self): - if getattr(self, "_Qfz", None) is None: - self._Qfz = self.mesh.get_interpolation_matrix( - self.survey.receiver_locations, "Fz" - ) - return self._Qfz + Ainv_Ddm = self._Ainv * (self._Div * (-dCmu_dm + db_dm)) - def projectFields(self, u): - r""" - This function projects the fields onto the data space. - Especially, here for we use total magnetic intensity (TMI) data, - which is common in practice. - First we project our B on to data location + Jv = Q * (C * Ainv_Ddm + (-dCmu_dm + db_dm)) - .. math:: + return Jv - \mathbf{B}_{rec} = \mathbf{P} \mathbf{B} + @cached_property + def _Qfx(self): + Qfx = self.mesh.get_interpolation_matrix(self.survey.receiver_locations, "Fx") + return Qfx - then we take the dot product between B and b_0 + @cached_property + def _Qfy(self): + Qfy = self.mesh.get_interpolation_matrix(self.survey.receiver_locations, "Fy") + return Qfy - .. math :: + @cached_property + def _Qfz(self): + Qfz = self.mesh.get_interpolation_matrix(self.survey.receiver_locations, "Fz") + return Qfz - \text{TMI} = \vec{B}_s \cdot \hat{B}_0 + def _projectFields(self, f): - """ - # TODO: There can be some different tyes of data like |B| or B - components = self.survey.components + rx_list = self.survey.source_field.receiver_list + components = [] + for rx in rx_list: + components.extend(rx.components) + components = set(components) - fields = {} if "bx" in components or "tmi" in components: - fields["bx"] = self.Qfx * u["B"] + bx = self._Qfx * f["b"] if "by" in components or "tmi" in components: - fields["by"] = self.Qfy * u["B"] + by = self._Qfy * f["b"] if "bz" in components or "tmi" in components: - fields["bz"] = self.Qfz * u["B"] + bz = self._Qfz * f["b"] if "tmi" in components: - bx = fields["bx"] - by = fields["by"] - bz = fields["bz"] - # Generate unit vector - B0 = self.survey.source_field.b0 - Bot = np.sqrt(B0[0] ** 2 + B0[1] ** 2 + B0[2] ** 2) - box = B0[0] / Bot - boy = B0[1] / Bot - boz = B0[2] / Bot - fields["tmi"] = bx * box + by * boy + bz * boz - - return np.concatenate([fields[comp] for comp in components]) - - @utils.count - def projectFieldsDeriv(self, B): - r""" - This function projects the fields onto the data space. - - .. math:: - - \frac{\partial d_\text{pred}}{\partial \mathbf{B}} = \mathbf{P} + b0 = self.survey.source_field.b0 + tmi = np.sqrt( + (bx + b0[0]) ** 2 + (by + b0[1]) ** 2 + (bz + b0[2]) ** 2 + ) - np.sqrt(b0[0] ** 2 + b0[1] ** 2 + b0[2] ** 2) + + n_total = 0 + total_data_list = [] + for rx in rx_list: + data = {} + rx_n_locs = rx.locations.shape[0] + if "bx" in rx.components: + data["bx"] = bx[n_total : n_total + rx_n_locs] + if "by" in rx.components: + data["by"] = by[n_total : n_total + rx_n_locs] + if "bz" in rx.components: + data["bz"] = bz[n_total : n_total + rx_n_locs] + if "tmi" in rx.components: + data["tmi"] = tmi[n_total : n_total + rx_n_locs] + + n_total += rx_n_locs + + total_data_list.append( + np.concatenate([data[comp] for comp in rx.components]) + ) - Especially, this function is for TMI data type - """ + if len(total_data_list) == 1: + return total_data_list[0] - components = self.survey.components + return np.concatenate(total_data_list, axis=0) - fields = {} - if "bx" in components or "tmi" in components: - fields["bx"] = self.Qfx - if "by" in components or "tmi" in components: - fields["by"] = self.Qfy - if "bz" in components or "tmi" in components: - fields["bz"] = self.Qfz + @utils.count + def _projectFieldsDeriv(self, bs): + rx_list = self.survey.source_field.receiver_list + components = [] + for rx in rx_list: + components.extend(rx.components) + components = set(components) if "tmi" in components: - bx = fields["bx"] - by = fields["by"] - bz = fields["bz"] - # Generate unit vector - B0 = self.survey.source_field.b0 - Bot = np.sqrt(B0[0] ** 2 + B0[1] ** 2 + B0[2] ** 2) - box = B0[0] / Bot - boy = B0[1] / Bot - boz = B0[2] / Bot - fields["tmi"] = bx * box + by * boy + bz * boz + b0 = self.survey.source_field.b0 + bot = np.sqrt(b0[0] ** 2 + b0[1] ** 2 + b0[2] ** 2) - return sp.vstack([fields[comp] for comp in components]) + bx = self._Qfx * bs + by = self._Qfy * bs + bz = self._Qfz * bs - def projectFieldsAsVector(self, B): - bfx = self.Qfx * B - bfy = self.Qfy * B - bfz = self.Qfz * B + dpred = ( + np.sqrt((bx + b0[0]) ** 2 + (by + b0[1]) ** 2 + (bz + b0[2]) ** 2) - bot + ) - return np.r_[bfx, bfy, bfz] + dDhalf_dD = sdiag(1 / (dpred + bot)) + xterm = sdiag(b0[0] + bx) * self._Qfx + yterm = sdiag(b0[1] + by) * self._Qfy + zterm = sdiag(b0[2] + bz) * self._Qfz -def MagneticsDiffSecondaryInv(mesh, model, data, **kwargs): - """ - Inversion module for MagneticsDiffSecondary + Qtmi = dDhalf_dD * (xterm + yterm + zterm) - """ - from simpeg import ( - directives, - inversion, - objective_function, - optimization, - regularization, - ) + n_total = 0 + total_data_list = [] + for rx in rx_list: + data = {} + rx_n_locs = rx.locations.shape[0] + if "bx" in rx.components: + data["bx"] = self._Qfx[n_total : n_total + rx_n_locs][:] + if "by" in rx.components: + data["by"] = self._Qfy[n_total : n_total + rx_n_locs][:] + if "bz" in rx.components: + data["bz"] = self._Qfz[n_total : n_total + rx_n_locs][:] + if "tmi" in rx.components: + data["tmi"] = Qtmi[n_total : n_total + rx_n_locs][:] - prob = Simulation3DDifferential(mesh, survey=data, mu=model) + n_total += rx_n_locs - miter = kwargs.get("maxIter", 10) + total_data_list.append(sp.vstack([data[comp] for comp in rx.components])) - # Create an optimization program - opt = optimization.InexactGaussNewton(maxIter=miter) - opt.bfgsH0 = get_default_solver(warn=True)(sp.identity(model.nP), flag="D") - # Create a regularization program - reg = regularization.WeightedLeastSquares(model) - # Create an objective function - beta = directives.BetaSchedule(beta0=1e0) - obj = objective_function.BaseObjFunction(prob, reg, beta=beta) - # Create an inversion object - inv = inversion.BaseInversion(obj, opt) + if len(total_data_list) == 1: + return total_data_list[0] - return inv, reg + return sp.vstack(total_data_list) + + @property + def _delete_on_model_update(self): + toDelete = super()._delete_on_model_update + if self._stored_fields is not None: + toDelete = toDelete + ["_stored_fields"] + if self.muMap is not None: + if self._Ainv is not None: + toDelete = toDelete + ["_Ainv"] + if self._Jmatrix is not None: + toDelete = toDelete + ["_Jmatrix"] + return toDelete diff --git a/simpeg/potential_fields/magnetics/sources.py b/simpeg/potential_fields/magnetics/sources.py index d63797a99a..56c9e8662c 100644 --- a/simpeg/potential_fields/magnetics/sources.py +++ b/simpeg/potential_fields/magnetics/sources.py @@ -1,6 +1,6 @@ from ...survey import BaseSrc from ...utils.mat_utils import dip_azimuth2cartesian -from ...utils.code_utils import deprecate_class, validate_float, validate_list_of_types +from ...utils.code_utils import validate_float, validate_list_of_types from .receivers import Point @@ -107,29 +107,3 @@ def b0(self): self.amplitude * dip_azimuth2cartesian(self.inclination, self.declination).squeeze() ) - - -@deprecate_class(removal_version="0.19.0", error=True) -class SourceField(UniformBackgroundField): - """Source field for magnetics integral formulation - - Parameters - ---------- - receivers_list : list of simpeg.potential_fields.receivers.Point - List of magnetics receivers - parameters : (3) array_like of float - Define the Earth's inducing field according to - [*amplitude*, *inclination*, *declination*] where: - - - *amplitude* is the field intensity in nT - - *inclination* is the inclination of the Earth's field in degrees - - *declination* is the declination of the Earth's field in degrees - """ - - def __init__(self, receiver_list=None, parameters=(50000, 90, 0)): - super().__init__( - receiver_list=receiver_list, - amplitude=parameters[0], - inclination=parameters[1], - declination=parameters[2], - ) diff --git a/simpeg/potential_fields/magnetics/survey.py b/simpeg/potential_fields/magnetics/survey.py index 56a2c3296c..c55bfcf653 100644 --- a/simpeg/potential_fields/magnetics/survey.py +++ b/simpeg/potential_fields/magnetics/survey.py @@ -1,8 +1,16 @@ import numpy as np from ...survey import BaseSurvey -from ...utils.code_utils import validate_type +from ...utils.code_utils import validate_list_of_types from .sources import UniformBackgroundField +try: + from warnings import deprecated +except ImportError: + # Use the deprecated decorator provided by typing_extensions (which + # supports older versions of Python) if it cannot be imported from + # warnings. + from typing_extensions import deprecated + class Survey(BaseSurvey): """Base Magnetics Survey @@ -14,10 +22,39 @@ class Survey(BaseSurvey): """ def __init__(self, source_field, **kwargs): - self.source_field = validate_type( - "source_field", source_field, UniformBackgroundField, cast=False + if "source_list" in kwargs: + msg = ( + "source_list is not a valid argument to gravity.Survey. " + "Use source_field instead." + ) + raise TypeError(msg) + super().__init__(source_list=source_field, **kwargs) + + @BaseSurvey.source_list.setter + def source_list(self, new_list): + # mag simulations only support 1 source... for now... + self._source_list = validate_list_of_types( + "source_list", + new_list, + UniformBackgroundField, + ensure_unique=True, + min_n=1, + max_n=1, ) - super().__init__(source_list=None, **kwargs) + + @property + def source_field(self): + """A source defining the Earth's inducing field and containing the magnetic receivers. + + Returns + ------- + simpeg.potential_fields.magnetics.sources.UniformBackgroundField + """ + return self.source_list[0] + + @source_field.setter + def source_field(self, new_src): + self.source_list = new_src def eval(self, fields): # noqa: A003 """Compute the fields @@ -69,9 +106,24 @@ def nD(self): return sum(rx.nD for rx in self.source_field.receiver_list) @property + @deprecated( + "The `components` property is deprecated, " + "and will be removed in SimPEG v0.25.0. " + "Within a magnetic survey, receivers can contain different components. " + "Iterate over the sources and receivers in the survey to get " + "information about their components.", + category=FutureWarning, + ) def components(self): """Field components + .. deprecated:: 0.24.0 + + The `components` property is deprecated, and will be removed in + SimPEG v0.25.0. Within a magnetic survey, receivers can contain + different components. Iterate over the sources and receivers in the + survey to get information about their components. + Returns ------- list of str diff --git a/simpeg/regularization/__init__.py b/simpeg/regularization/__init__.py index e28f24ca0a..cd8b64b821 100644 --- a/simpeg/regularization/__init__.py +++ b/simpeg/regularization/__init__.py @@ -148,7 +148,6 @@ """ -from ..utils.code_utils import deprecate_class from .base import ( BaseRegularization, WeightedLeastSquares, @@ -172,87 +171,3 @@ AmplitudeSmoothnessFirstOrder, ) from ._gradient import SmoothnessFullGradient - - -@deprecate_class(removal_version="0.19.0", error=True) -class SimpleSmall(Smallness): - """Deprecated class, replaced by Smallness.""" - - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class SimpleSmoothDeriv(SmoothnessFirstOrder): - """Deprecated class, replaced by SmoothnessFirstOrder.""" - - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class Simple(WeightedLeastSquares): - """Deprecated class, replaced by WeightedLeastSquares.""" - - def __init__(self, mesh=None, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs): - # These alphas are now refered to as length_scalse in the - # new WeightedLeastSquares regularization - super().__init__( - mesh=mesh, - length_scale_x=alpha_x, - length_scale_y=alpha_y, - length_scale_z=alpha_z, - **kwargs, - ) - - -@deprecate_class(removal_version="0.19.0", error=True) -class Tikhonov(WeightedLeastSquares): - """Deprecated class, replaced by WeightedLeastSquares.""" - - def __init__( - self, mesh=None, alpha_s=1e-6, alpha_x=1.0, alpha_y=1.0, alpha_z=1.0, **kwargs - ): - super().__init__( - mesh=mesh, - alpha_s=alpha_s, - alpha_x=alpha_x, - alpha_y=alpha_y, - alpha_z=alpha_z, - **kwargs, - ) - - -@deprecate_class(removal_version="0.19.0", error=True) -class Small(Smallness): - """Deprecated class, replaced by Smallness.""" - - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class SmoothDeriv(SmoothnessFirstOrder): - """Deprecated class, replaced by SmoothnessFirstOrder.""" - - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class SmoothDeriv2(SmoothnessSecondOrder): - """Deprecated class, replaced by SmoothnessSecondOrder.""" - - pass - - -@deprecate_class(removal_version="0.19.0", error=True) -class PGIwithNonlinearRelationshipsSmallness(PGIsmallness): - """Deprecated class, replaced by PGIsmallness.""" - - def __init__(self, gmm, **kwargs): - super().__init__(gmm, non_linear_relationships=True, **kwargs) - - -@deprecate_class(removal_version="0.19.0", error=True) -class PGIwithRelationships(PGI): - """Deprecated class, replaced by PGI.""" - - def __init__(self, mesh, gmmref, **kwargs): - super().__init__(mesh, gmmref, non_linear_relationships=True, **kwargs) diff --git a/simpeg/regularization/base.py b/simpeg/regularization/base.py index 3aa3a9cb86..dd643a52eb 100644 --- a/simpeg/regularization/base.py +++ b/simpeg/regularization/base.py @@ -5,7 +5,7 @@ from .. import utils from .regularization_mesh import RegularizationMesh -from simpeg.utils.code_utils import deprecate_property, validate_ndarray_with_shape +from simpeg.utils.code_utils import validate_ndarray_with_shape from scipy.sparse import csr_matrix @@ -67,18 +67,6 @@ def __init__( "It must be a dictionary with strings as keys and arrays as values." ) - # Raise errors on deprecated arguments: avoid old code that still uses - # them to silently fail - if (key := "indActive") in kwargs: - raise TypeError( - f"'{key}' argument has been removed. " - "Please use 'active_cells' instead." - ) - if (key := "cell_weights") in kwargs: - raise TypeError( - f"'{key}' argument has been removed. Please use 'weights' instead." - ) - super().__init__(nP=None, mapping=None, **kwargs) self._regularization_mesh = mesh self._weights = {} @@ -121,14 +109,6 @@ def active_cells(self, values: np.ndarray | None): if volume_term: self.set_weights(volume=self.regularization_mesh.vol) - indActive = deprecate_property( - active_cells, - "indActive", - "active_cells", - "0.19.0", - error=True, - ) - @property def model(self) -> np.ndarray: """The model parameters. @@ -256,14 +236,6 @@ def reference_model(self, values: np.ndarray | float): ) self._reference_model = values - mref = deprecate_property( - reference_model, - "mref", - "reference_model", - "0.19.0", - error=True, - ) - @property def regularization_mesh(self) -> RegularizationMesh: """Regularization mesh. @@ -278,31 +250,6 @@ def regularization_mesh(self) -> RegularizationMesh: """ return self._regularization_mesh - regmesh = deprecate_property( - regularization_mesh, - "regmesh", - "regularization_mesh", - "0.19.0", - error=True, - ) - - @property - def cell_weights(self) -> np.ndarray: - """Deprecated property for 'volume' and user defined weights.""" - raise AttributeError( - "'cell_weights' has been removed. " - "Please access weights using the `set_weights`, `get_weights`, and " - "`remove_weights` methods." - ) - - @cell_weights.setter - def cell_weights(self, value): - raise AttributeError( - "'cell_weights' has been removed. " - "Please access weights using the `set_weights`, `get_weights`, and " - "`remove_weights` methods." - ) - def get_weights(self, key) -> np.ndarray: """Cell weights for a given key. @@ -1562,19 +1509,6 @@ def __init__( ) self._regularization_mesh = mesh - # Raise errors on deprecated arguments: avoid old code that still uses - # them to silently fail - if (key := "indActive") in kwargs: - raise TypeError( - f"'{key}' argument has been removed. " - "Please use 'active_cells' instead." - ) - - if (key := "cell_weights") in kwargs: - raise TypeError( - f"'{key}' argument has been removed. Please use 'weights' instead." - ) - self.alpha_s = alpha_s if alpha_x is not None: if length_scale_x is not None: @@ -2082,14 +2016,6 @@ def active_cells(self, values: np.ndarray): for objfct in self.objfcts: objfct.active_cells = active_cells - indActive = deprecate_property( - active_cells, - "indActive", - "active_cells", - "0.19.0", - error=True, - ) - @property def reference_model(self) -> np.ndarray: """Reference model. @@ -2112,14 +2038,6 @@ def reference_model(self, values: np.ndarray | float): self._reference_model = values - mref = deprecate_property( - reference_model, - "mref", - "reference_model", - "0.19.0", - error=True, - ) - @property def model(self) -> np.ndarray: """The model associated with regularization. diff --git a/simpeg/regularization/pgi.py b/simpeg/regularization/pgi.py index dbc059eaa6..0193647251 100644 --- a/simpeg/regularization/pgi.py +++ b/simpeg/regularization/pgi.py @@ -8,7 +8,6 @@ from ..objective_function import ComboObjectiveFunction from ..utils import ( Identity, - deprecate_property, mkvc, sdiag, timeIt, @@ -1365,11 +1364,3 @@ def reference_model(self, values: np.ndarray | float): for fct in self.objfcts: fct.reference_model = values - - mref = deprecate_property( - reference_model, - "mref", - "reference_model", - "0.19.0", - error=True, - ) diff --git a/simpeg/regularization/regularization_mesh.py b/simpeg/regularization/regularization_mesh.py index dea11bb2f1..38b97db5e3 100644 --- a/simpeg/regularization/regularization_mesh.py +++ b/simpeg/regularization/regularization_mesh.py @@ -1,7 +1,7 @@ import numpy as np import scipy.sparse as sp -from simpeg.utils.code_utils import deprecate_property, validate_active_indices +from simpeg.utils.code_utils import validate_active_indices from .. import props, utils @@ -518,28 +518,6 @@ def cell_gradient_z(self) -> sp.csr_matrix: ) return self._cell_gradient_z - cellDiffx = deprecate_property( - cell_gradient_x, - "cellDiffx", - "cell_gradient_x", - "0.19.0", - error=True, - ) - cellDiffy = deprecate_property( - cell_gradient_y, - "cellDiffy", - "cell_gradient_y", - "0.19.0", - error=True, - ) - cellDiffz = deprecate_property( - cell_gradient_z, - "cellDiffz", - "cell_gradient_z", - "0.19.0", - error=True, - ) - @property def cell_distances_x(self) -> np.ndarray: """Cell center distance array along the x-direction. diff --git a/simpeg/regularization/sparse.py b/simpeg/regularization/sparse.py index 7bf6e23c5d..29b22229e7 100644 --- a/simpeg/regularization/sparse.py +++ b/simpeg/regularization/sparse.py @@ -9,7 +9,6 @@ Smallness, SmoothnessFirstOrder, ) -from .. import utils from ..utils import ( validate_ndarray_with_shape, validate_float, @@ -575,12 +574,6 @@ class SparseSmoothness(BaseSparse, SmoothnessFirstOrder): """ def __init__(self, mesh, orientation="x", gradient_type="total", **kwargs): - # Raise error if removed arguments were passed - if (key := "gradientType") in kwargs: - raise TypeError( - f"'{key}' argument has been removed. " - "Please use 'gradient_type' instead." - ) self.gradient_type = gradient_type super().__init__(mesh=mesh, orientation=orientation, **kwargs) @@ -691,14 +684,6 @@ def gradient_type(self, value: str): "gradient_type", value, ["total", "components"] ) - gradientType = utils.code_utils.deprecate_property( - gradient_type, - "gradientType", - new_name="gradient_type", - removal_version="0.19.0", - error=True, - ) - class Sparse(WeightedLeastSquares): r"""Sparse norm weighted least squares regularization. @@ -931,13 +916,6 @@ def __init__( f"Value of type {type(mesh)} provided." ) - # Raise error if removed arguments were passed - if (key := "gradientType") in kwargs: - raise TypeError( - f"'{key}' argument has been removed. " - "Please use 'gradient_type' instead." - ) - self._regularization_mesh = mesh if active_cells is not None: self._regularization_mesh.active_cells = active_cells @@ -995,10 +973,6 @@ def gradient_type(self, value: str): self._gradient_type = value - gradientType = utils.code_utils.deprecate_property( - gradient_type, "gradientType", "0.19.0", error=True - ) - @property def norms(self): """Norms for the child regularization classes. diff --git a/simpeg/simulation.py b/simpeg/simulation.py index 49e27949fc..1a3d39fc8d 100644 --- a/simpeg/simulation.py +++ b/simpeg/simulation.py @@ -11,7 +11,7 @@ from . import props from .typing import RandomSeed -from .data import SyntheticData, Data +from .data import SyntheticData from .survey import BaseSurvey from .utils import ( Counter, @@ -188,11 +188,13 @@ def dpred(self, m=None, f=None): f = self.fields(m) - data = Data(self.survey) + survey_slices = self.survey.get_all_slices() + dpred = np.full(self.survey.nD, np.nan) for src in self.survey.source_list: for rx in src.receiver_list: - data[src, rx] = rx.eval(src, self.mesh, f) - return mkvc(data) + src_rx_slice = survey_slices[src, rx] + dpred[src_rx_slice] = mkvc(rx.eval(src, self.mesh, f)) + return mkvc(dpred) @timeIt def Jvec(self, m, v, f=None): @@ -622,11 +624,13 @@ def dpred(self, m=None, f=None): if f is None: f = self.fields(m) - data = Data(self.survey) + survey_slices = self.survey.get_all_slices() + dpred = np.full(self.survey.nD, np.nan) for src in self.survey.source_list: for rx in src.receiver_list: - data[src, rx] = rx.eval(src, self.mesh, self.time_mesh, f) - return data.dobs + src_rx_slice = survey_slices[src, rx] + dpred[src_rx_slice] = mkvc(rx.eval(src, self.mesh, self.time_mesh, f)) + return dpred ############################################################################## diff --git a/simpeg/survey.py b/simpeg/survey.py index 65335b43a5..d71e858780 100644 --- a/simpeg/survey.py +++ b/simpeg/survey.py @@ -432,7 +432,7 @@ def __init__(self, source_list, counter=None, **kwargs): self.source_list = source_list if counter is not None: - self.counter = counter + self._counter = validate_type("counter", counter, Counter, cast=False) self._uid = uuid.uuid4() super().__init__(**kwargs) @@ -482,21 +482,6 @@ def uid(self): """ return self._uid - @property - def counter(self): - """A SimPEG counter object for counting iterations and operations - - Returns - ------- - simpeg.utils.counter_utils.Counter - A SimPEG counter object - """ - return self._counter - - @counter.setter - def counter(self, new_obj): - self._counter = validate_type("counter", new_obj, Counter, cast=False) - # TODO: this should be private def get_source_indices(self, sources): if not isinstance(sources, list): @@ -555,6 +540,92 @@ def _n_fields(self): """number of fields required for solution""" return sum(src._fields_per_source for src in self.source_list) + def get_slice(self, source, receiver): + """ + Get slice to index a flat array for a given source-receiver pair. + + Use this method to index a data or uncertainty array for + a source-receiver pair of this survey. + + Parameters + ---------- + source : .BaseSrc + Source object. + receiver : .BaseRx + Receiver object. + + Returns + ------- + slice + + Raises + ------ + KeyError + If the given ``source`` or ``receiver`` do not belong to this survey. + + See also + -------- + .get_all_slices + """ + # Create generator for source and receiver pairs + source_receiver_pairs = ( + (src, rx) for src in self.source_list for rx in src.receiver_list + ) + # Get the start and end offsets for the given source and receiver, and + # build the slice + src_rx_slice = None + end_offset = 0 + for src, rx in source_receiver_pairs: + start_offset = end_offset + end_offset += rx.nD + if src is source and rx is receiver: + src_rx_slice = slice(start_offset, end_offset) + break + # Raise error if the source-receiver pair is not in the survey + if src_rx_slice is None: + msg = ( + f"Source '{source}' and receiver '{receiver}' pair " + "is not part of the survey." + ) + raise KeyError(msg) + return src_rx_slice + + def get_all_slices(self): + """ + Get slices to index a flat array for all source-receiver pairs. + + .. warning:: + + Survey objects are mutable objects. If the sources or receivers in + it get modified, slices generated with this method will not match + the arrays linked to the modified survey. + + Returns + ------- + dict[tuple[.BaseSrc, .BaseRx], slice] + Dictionary with flat array slices for every pair of source and + receiver in the survey. The keys are tuples of a single source and + a single receiver, and the values are the corresponding slice for + each one of them. + + See also + -------- + .get_slice + """ + # Create generator for source and receiver pairs + source_receiver_pairs = ( + (src, rx) for src in self.source_list for rx in src.receiver_list + ) + # Get the start and end offsets for all source-receiver pairs, and + # build the slices. + slices = {} + end_offset = 0 + for src, rx in source_receiver_pairs: + start_offset = end_offset + end_offset += rx.nD + slices[(src, rx)] = slice(start_offset, end_offset) + return slices + class BaseTimeSurvey(BaseSurvey): """Base SimPEG survey class for time-dependent simulations.""" diff --git a/simpeg/typing/__init__.py b/simpeg/typing/__init__.py index f0d4d57b78..9f68652973 100644 --- a/simpeg/typing/__init__.py +++ b/simpeg/typing/__init__.py @@ -13,12 +13,15 @@ :toctree: generated/ RandomSeed + MinimizeCallable """ import numpy as np import numpy.typing as npt from typing import Union, TypeAlias +from collections.abc import Callable +from scipy.sparse.linalg import LinearOperator RandomSeed: TypeAlias = Union[ int, @@ -27,8 +30,7 @@ np.random.BitGenerator, np.random.Generator, ] - -RandomSeed.__doc__ = """ +""" A ``typing.Union`` for random seeds and Numpy's random number generators. These type of variables can be used throughout ``simpeg`` to control random @@ -46,3 +48,28 @@ ... rng = np.random.default_rng(seed=seed) ... ... """ + +MinimizeCallable: TypeAlias = Callable[ + [np.ndarray, bool, bool], + float + | tuple[float, np.ndarray | LinearOperator] + | tuple[float, np.ndarray, LinearOperator], +] +""" +The callable expected for the minimization operations. + +The function's signature should look like:: + + func(x: numpy.ndarray, return_g: bool, return_H: bool) + +It should output up to three values ordered as:: + + f_val : float + gradient : numpy.ndarray + H : LinearOperator + +`f_val` is always returned, `gradient` is returned if `return_g`, and `H_func` is returned if `return_H`. +`f_val` should always be the first value returned, `gradient` will always be the second, and `H_func` will +always be the last. If `return_g == return_H == False`, then only the single argument `f_val` is +returned. +""" diff --git a/simpeg/utils/__init__.py b/simpeg/utils/__init__.py index 6e0895fd45..bb3afdd470 100644 --- a/simpeg/utils/__init__.py +++ b/simpeg/utils/__init__.py @@ -11,6 +11,17 @@ documentation for many details on items. +Logger +====== +Function to fetch the SimPEG logger. It can be used to stream messages to the logger, +and to temporarily adjust its configuration (e.g. change log level). + +.. autosummary:: + :toctree: generated/ + + get_logger + + Counter Utility Functions ========================= @@ -144,18 +155,27 @@ Solver utilities ---------------- -This module contains utilities to get and set the default solver -used by SimPEG simulations. +Functions to get and set the default solver meant to be used in PDE simulations. .. autosummary:: :toctree: generated/ - solver_utils.get_default_solver - solver_utils.set_default_solver + get_default_solver + set_default_solver + +Custom warnings +--------------- +List of custom warnings used in SimPEG. + +.. autosummary:: + :toctree: generated/ + + PerformanceWarning """ from discretize.utils.interpolation_utils import interpolation_matrix +from .logger import get_logger from .code_utils import ( mem_profile_class, hook, @@ -246,46 +266,5 @@ GaussianMixtureWithNonlinearRelationships, GaussianMixtureWithNonlinearRelationshipsWithPrior, ) - -# Deprecated imports -interpmat = deprecate_function( - interpolation_matrix, "interpmat", removal_version="0.19.0", error=True -) - -from .code_utils import ( - memProfileWrapper, - setKwargs, - printTitles, - printLine, - checkStoppers, - printStoppers, - printDone, - callHooks, - dependentProperty, - asArray_N_x_Dim, -) -from .mat_utils import ( - sdInv, - getSubArray, - inv3X3BlockDiagonal, - inv2X2BlockDiagonal, - makePropertyTensor, - invPropertyTensor, - diagEst, - uniqueRows, -) -from .mesh_utils import ( - meshTensor, - closestPoints, - ExtractCoreMesh, -) -from .curv_utils import ( - volTetra, - faceInfo, - indexCube, - exampleLrmGrid, -) -from .coord_utils import ( - rotatePointsFromNormals, - rotationMatrixFromNormals, -) +from .solver_utils import get_default_solver, set_default_solver +from .warnings import PerformanceWarning diff --git a/simpeg/utils/code_utils.py b/simpeg/utils/code_utils.py index e0aef08c5a..ceb8ac68e7 100644 --- a/simpeg/utils/code_utils.py +++ b/simpeg/utils/code_utils.py @@ -1,4 +1,5 @@ import types + import numpy as np from functools import wraps import warnings @@ -256,9 +257,13 @@ def print_line(obj, printers, pad=""): """ values = "" for printer in printers: - values += ("{{:^{0:d}}}".format(printer["width"])).format( - printer["format"] % printer["value"](obj) - ) + value = printer["value"](obj) + format_string = f"^{printer['width']}s" + if value is not None: + formatted_val = printer["format"](value) + else: + formatted_val = "" + values += f"{formatted_val:{format_string}}" print(pad + values) @@ -315,12 +320,12 @@ def print_stoppers(obj, stoppers, pad="", stop="STOP!", done="DONE!"): done : str, default: "DONE!" String for statement when stopping criterian not encountered """ - print(pad + "{0!s}{1!s}{2!s}".format("-" * 25, stop, "-" * 25)) + print(pad + "-" * 25 + stop + "-" * 25) for stopper in stoppers: l = stopper["left"](obj) r = stopper["right"](obj) print(pad + stopper["str"] % (l <= r, l, r)) - print(pad + "{0!s}{1!s}{2!s}".format("-" * 25, done, "-" * 25)) + print(pad + "-" * 25 + done + "-" * 25) def call_hooks(match, mainFirst=False): @@ -373,17 +378,15 @@ def wrapper(self, *args, **kwargs): return out - extra = """ - If you have things that also need to run in the method {0!s}, you can create a method:: + extra = f""" + If you have things that also need to run in the method {match}, you can create a method:: - def _{1!s}*(self, ... ): + def _{match}*(self, ... ): pass - Where the * can be any string. If present, _{2!s}* will be called at the start of the default {3!s} call. + Where the * can be any string. If present, _{match}* will be called at the start of the default {match} call. You may also completely overwrite this function. - """.format( - match, match, match, match - ) + """ doc = wrapper.__doc__ wrapper.__doc__ = ("" if doc is None else doc) + extra return wrapper @@ -933,7 +936,9 @@ def validate_float( return var -def validate_list_of_types(property_name, var, class_type, ensure_unique=False): +def validate_list_of_types( + property_name, var, class_type, ensure_unique=False, min_n=0, max_n=None +): """Validate list of instances of a certain class Parameters @@ -946,6 +951,8 @@ def validate_list_of_types(property_name, var, class_type, ensure_unique=False): Class type(s) that are allowed in the list ensure_unique : bool, optional Checks if all items in the var are unique items. + min_n, max_n : int, optional + Minimum and maximum supported list length. Defaults accept any length list. Returns ------- @@ -959,8 +966,20 @@ def validate_list_of_types(property_name, var, class_type, ensure_unique=False): else: raise TypeError(f"{property_name!r} must be a list of {class_type}") - is_true = [isinstance(x, class_type) for x in var] - if np.all(is_true): + if max_n is not None: + if min_n == max_n and len(var) != max_n: + raise ValueError( + f"{property_name!r} must have exactly {min_n} item{'s' if min_n != 1 else ''}." + ) + elif len(var) > max_n: + raise ValueError( + f"{property_name!r} must have at most {max_n} item{'s' if max_n != 1 else ''}." + ) + if len(var) < min_n: + raise ValueError( + f"{property_name!r} must have at least {min_n} item{'s' if min_n != 1 else ''}." + ) + if all(isinstance(x, class_type) for x in var): if ensure_unique and len(set(var)) != len(var): raise ValueError( f"The {property_name!r} list must be unique. Cannot re-use items" @@ -1262,38 +1281,3 @@ def validate_active_indices(property_name, index_arr, n_cells): if index_arr.shape != (n_cells,): raise ValueError(f"Input 'active_cells' must have shape {(n_cells,)}") return index_arr - - -############################################################### -# DEPRECATIONS -############################################################### -memProfileWrapper = deprecate_function( - mem_profile_class, "memProfileWrapper", removal_version="0.18.0", error=True -) -setKwargs = deprecate_function( - set_kwargs, "setKwargs", removal_version="0.18.0", error=True -) -printTitles = deprecate_function( - print_titles, "printTitles", removal_version="0.18.0", error=True -) -printLine = deprecate_function( - print_line, "printLine", removal_version="0.18.0", error=True -) -printStoppers = deprecate_function( - print_stoppers, "printStoppers", removal_version="0.18.0", error=True -) -checkStoppers = deprecate_function( - check_stoppers, "checkStoppers", removal_version="0.18.0", error=True -) -printDone = deprecate_function( - print_done, "printDone", removal_version="0.18.0", error=True -) -callHooks = deprecate_function( - call_hooks, "callHooks", removal_version="0.18.0", error=True -) -dependentProperty = deprecate_function( - dependent_property, "dependentProperty", removal_version="0.18.0", error=True -) -asArray_N_x_Dim = deprecate_function( - as_array_n_by_dim, "asArray_N_x_Dim", removal_version="0.19.0", error=True -) diff --git a/simpeg/utils/coord_utils.py b/simpeg/utils/coord_utils.py index e1d17c5dbf..84491b42f7 100644 --- a/simpeg/utils/coord_utils.py +++ b/simpeg/utils/coord_utils.py @@ -2,18 +2,3 @@ rotation_matrix_from_normals, rotate_points_from_normals, ) -from .code_utils import deprecate_function - -# deprecated functions -rotationMatrixFromNormals = deprecate_function( - rotation_matrix_from_normals, - "rotationMatrixFromNormals", - removal_version="0.19.0", - error=True, -) -rotatePointsFromNormals = deprecate_function( - rotate_points_from_normals, - "rotatePointsFromNormals", - removal_version="0.19.0", - error=True, -) diff --git a/simpeg/utils/curv_utils.py b/simpeg/utils/curv_utils.py index 71e764ce60..93b4e393ec 100644 --- a/simpeg/utils/curv_utils.py +++ b/simpeg/utils/curv_utils.py @@ -4,21 +4,3 @@ face_info, example_curvilinear_grid, ) -from .code_utils import deprecate_function - -# deprecated functions -volTetra = deprecate_function( - volume_tetrahedron, "volTetra", removal_version="0.19.0", error=True -) -indexCube = deprecate_function( - index_cube, "indexCube", removal_version="0.19.0", error=True -) -faceInfo = deprecate_function( - face_info, "faceInfo", removal_version="0.19.0", error=True -) -exampleLrmGrid = deprecate_function( - example_curvilinear_grid, - "exampleLrmGrid", - removal_version="0.19.0", - error=True, -) diff --git a/simpeg/utils/logger.py b/simpeg/utils/logger.py new file mode 100644 index 0000000000..16936abeb7 --- /dev/null +++ b/simpeg/utils/logger.py @@ -0,0 +1,38 @@ +""" +Define logger for SimPEG. +""" + +import logging + +__all__ = ["get_logger"] + + +def _create_logger(): + """ + Create logger for SimPEG. + """ + logger = logging.getLogger("SimPEG") + logger.setLevel(logging.INFO) + handler = logging.StreamHandler() + formatter = logging.Formatter("{levelname}: {message}", style="{") + handler.setFormatter(formatter) + logger.addHandler(handler) + return logger + + +LOGGER = _create_logger() + + +def get_logger(): + r""" + Get the default event logger. + + The logger records events and relevant information while setting up simulations and + inversions. By default the logger will stream to stderr and using the INFO level. + + Returns + ------- + logger : :class:`logging.Logger` + The logger object for SimPEG. + """ + return LOGGER diff --git a/simpeg/utils/mat_utils.py b/simpeg/utils/mat_utils.py index d47b7d3df6..9dbafbcdce 100644 --- a/simpeg/utils/mat_utils.py +++ b/simpeg/utils/mat_utils.py @@ -1,6 +1,4 @@ -import warnings import numpy as np -from .code_utils import deprecate_function from ..typing import RandomSeed from discretize.utils import ( # noqa: F401 Zero, @@ -132,7 +130,6 @@ def eigenvalue_by_power_iteration( n_pw_iter=4, fields_list=None, random_seed: RandomSeed | None = None, - seed: RandomSeed | None = None, ): r"""Estimate largest eigenvalue in absolute value using power iteration. @@ -157,12 +154,6 @@ def eigenvalue_by_power_iteration( Random seed for the initial random guess of eigenvector. It can either be an int, a predefined Numpy random number generator, or any valid input to ``numpy.random.default_rng``. - seed : None or :class:`~simpeg.typing.RandomSeed`, optional - - .. deprecated:: 0.23.0 - - Argument ``seed`` is deprecated in favor of ``random_seed`` and will - be removed in SimPEG v0.24.0. Returns ------- @@ -189,21 +180,6 @@ def eigenvalue_by_power_iteration( selected from a uniform distribution. """ - # Deprecate seed argument - if seed is not None: - if random_seed is not None: - raise TypeError( - "Cannot pass both 'random_seed' and 'seed'." - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - ) - warnings.warn( - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - FutureWarning, - stacklevel=2, - ) - random_seed = seed rng = np.random.default_rng(seed=random_seed) # Initial guess for eigen-vector @@ -391,9 +367,11 @@ def dip_azimuth2cartesian(dip, azm): Parameters ---------- dip : float or 1D numpy.ndarray - Dip angle in degrees. Values in range [0, 90] + Dip angle in degrees. Values in range [-90, 90]. Positive values correspond to + a vector pointing downwards (negative z component). azm : float or 1D numpy.ndarray - Asimuthal angle (strike) in degrees. Defined clockwise from Northing. Values is range [0, 360] + Azimuthal angle (strike) in degrees. Defined clockwise from Northing. + Values is range [0, 360] or [-180, 180]. Returns ------- @@ -401,6 +379,42 @@ def dip_azimuth2cartesian(dip, azm): Numpy array whose columns represent the x, y and z components of the vector(s) in Cartesian coordinates + Examples + -------- + >>> vector = dip_azimuth2cartesian(0, 45) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (0.707, 0.707, 0.0) + + >>> vector = dip_azimuth2cartesian(0, -45) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (-0.707, 0.707, 0.0) + + >>> vector = dip_azimuth2cartesian(60, 0) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (0.0, 0.5, -0.866) + + >>> vector = dip_azimuth2cartesian(-30, 0) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (0.0, 0.866, 0.5) + + >>> vector = dip_azimuth2cartesian(90, 0) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (0.0, 0.0, -1.0) + + >>> vector = dip_azimuth2cartesian(-90, 0) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (0.0, 0.0, 1.0) + + >>> vector = dip_azimuth2cartesian(30, 60) + >>> x, y, z = vector[0].tolist() + >>> x, y, z # doctest: +NUMBER + (0.75, 0.433, -0.5) """ azm = np.asarray(azm) @@ -483,44 +497,3 @@ def define_plane_from_points(xyz1, xyz2, xyz3): d = -(a * xyz1[0] + b * xyz1[1] + c * xyz1[2]) return a, b, c, d - - -################################################ -# DEPRECATED FUNCTIONS -################################################ - - -diagEst = deprecate_function( - estimate_diagonal, "diagEst", removal_version="0.19.0", error=True -) -uniqueRows = deprecate_function( - unique_rows, "uniqueRows", removal_version="0.19.0", error=True -) -sdInv = deprecate_function(sdinv, "sdInv", removal_version="0.19.0", error=True) -getSubArray = deprecate_function( - get_subarray, "getSubArray", removal_version="0.19.0", error=True -) -inv3X3BlockDiagonal = deprecate_function( - inverse_3x3_block_diagonal, - "inv3X3BlockDiagonal", - removal_version="0.19.0", - error=True, -) -inv2X2BlockDiagonal = deprecate_function( - inverse_2x2_block_diagonal, - "inv2X2BlockDiagonal", - removal_version="0.19.0", - error=True, -) -makePropertyTensor = deprecate_function( - make_property_tensor, - "makePropertyTensor", - removal_version="0.19.0", - error=True, -) -invPropertyTensor = deprecate_function( - inverse_property_tensor, - "invPropertyTensor", - removal_version="0.19.0", - error=True, -) diff --git a/simpeg/utils/mesh_utils.py b/simpeg/utils/mesh_utils.py index 1fc3a8d580..3161859288 100644 --- a/simpeg/utils/mesh_utils.py +++ b/simpeg/utils/mesh_utils.py @@ -1,5 +1,4 @@ import numpy as np -from .code_utils import deprecate_function from discretize.utils import ( # noqa: F401 unpack_widths, @@ -95,17 +94,3 @@ def surface2inds(vrtx, trgl, mesh, boundaries=True, internal=True): # Return the indexes inside return insideGrid - - -################################################ -# DEPRECATED FUNCTIONS -################################################ -meshTensor = deprecate_function( - unpack_widths, "meshTensor", removal_version="0.19.0", error=True -) -closestPoints = deprecate_function( - closest_points_index, "closestPoints", removal_version="0.19.0", error=True -) -ExtractCoreMesh = deprecate_function( - extract_core_mesh, "ExtractCoreMesh", removal_version="0.19.0", error=True -) diff --git a/simpeg/utils/model_builder.py b/simpeg/utils/model_builder.py index 285a440a8a..97f785fedb 100644 --- a/simpeg/utils/model_builder.py +++ b/simpeg/utils/model_builder.py @@ -1,4 +1,3 @@ -import warnings import numpy as np import scipy.ndimage as ndi import scipy.sparse as sp @@ -443,12 +442,6 @@ def create_random_model( Number of smoothing iterations after convolutions bounds : list of float Lower and upper bound for the model values - seed : None or :class:`~simpeg.typing.RandomSeed`, optional - - .. deprecated:: 0.23.0 - - Argument ``seed`` is deprecated in favor of ``random_seed`` and will - be removed in SimPEG v0.24.0. Returns ------- @@ -467,21 +460,6 @@ def create_random_model( >>> plt.show() """ - # Deprecate seed argument - if "seed" in kwargs: - if random_seed != 1000: - raise TypeError( - "Cannot pass both 'random_seed' and 'seed'." - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - ) - warnings.warn( - "'seed' has been deprecated and will be removed in " - " SimPEG v0.24.0, please use 'random_seed' instead.", - FutureWarning, - stacklevel=2, - ) - random_seed = kwargs.pop("seed") if kwargs: args = ", ".join([f"'{key}'" for key in kwargs]) raise TypeError(f"Invalid arguments {args}.") diff --git a/simpeg/utils/pgi_utils.py b/simpeg/utils/pgi_utils.py index 1c5c9c8f8a..9fa0d02bef 100644 --- a/simpeg/utils/pgi_utils.py +++ b/simpeg/utils/pgi_utils.py @@ -30,6 +30,14 @@ except ImportError: GaussianMixture = None sklearn = False +else: + # Try to import `validate_data` (added in sklearn 1.6). + # We should remove these bits when we set sklearn>=1.6 as the minimum version, and + # just import `validate_data`. + try: + from sklearn.utils.validation import validate_data + except ImportError: + validate_data = None ############################################################################### @@ -541,7 +549,13 @@ def score_samples_with_sensW(self, X, sensW): Log probabilities of each data point in X. """ check_is_fitted(self) - X = self._validate_data(X, reset=False) + # TODO: Ditch self._validate_data when setting sklearn>=1.6 as the minimum + # required version. + X = ( + validate_data(self, X, reset=False) + if validate_data is not None + else self._validate_data(X, reset=False) + ) return logsumexp(self._estimate_weighted_log_prob_with_sensW(X, sensW), axis=1) @@ -1126,7 +1140,15 @@ def fit_predict(self, X, y=None, debug=False): if self.verbose: print("modified from scikit-learn") - X = self._validate_data(X, dtype=[np.float64, np.float32], ensure_min_samples=2) + # TODO: Ditch self._validate_data when setting sklearn>=1.6 as the minimum + # required version. + X = ( + validate_data(self, X, dtype=[np.float64, np.float32], ensure_min_samples=2) + if validate_data is not None + else self._validate_data( + self, X, dtype=[np.float64, np.float32], ensure_min_samples=2 + ) + ) if X.shape[0] < self.n_components: raise ValueError( "Expected n_samples >= n_components " diff --git a/simpeg/utils/solver_utils.py b/simpeg/utils/solver_utils.py index 55ffab0f1a..70af2dae68 100644 --- a/simpeg/utils/solver_utils.py +++ b/simpeg/utils/solver_utils.py @@ -44,19 +44,17 @@ _DEFAULT_SOLVER = SolverLU -# Create a specific warning allowing users to silence this if they so choose. -class DefaultSolverWarning(UserWarning): - pass - - def get_default_solver(warn=False) -> Type[Base]: """Return the default solver used by simpeg. Parameters ---------- warn : bool, optional - If True, a warning will be raised to let users know that the default - solver is being chosen depending on their system. + + .. deprecated:: 0.25.0 + + Argument ``warn`` is deprecated and will be removed in + SimPEG v0.26.0. Returns ------- @@ -65,12 +63,9 @@ def get_default_solver(warn=False) -> Type[Base]: """ if warn: warnings.warn( - f"Using the default solver: {_DEFAULT_SOLVER.__name__}. \n\n" - f"If you would like to suppress this notification, add \n" - f"warnings.filterwarnings(" - "'ignore', simpeg.utils.solver_utils.DefaultSolverWarning)\n" - f" to your script.", - DefaultSolverWarning, + "The `warn` argument has been deprecated and will be " + "removed in SimPEG v0.26.0.", + FutureWarning, stacklevel=2, ) return _DEFAULT_SOLVER @@ -99,16 +94,19 @@ def set_default_solver(solver_class: Type[Base]): old_name="SolverWrapD", removal_version="0.24.0", new_location="pymatsolver", + error=True, ) SolverWrapI = deprecate_function( wrap_iterative, old_name="SolverWrapI", removal_version="0.24.0", new_location="pymatsolver", + error=True, ) SolverDiag = deprecate_function( Diagonal, old_name="SolverDiag", removal_version="0.24.0", new_location="pymatsolver", + error=True, ) diff --git a/simpeg/utils/warnings.py b/simpeg/utils/warnings.py new file mode 100644 index 0000000000..c6ead5491d --- /dev/null +++ b/simpeg/utils/warnings.py @@ -0,0 +1,11 @@ +""" +Custom warnings that can be used across SimPEG. +""" + +__all__ = ["PerformanceWarning"] + + +class PerformanceWarning(Warning): + """ + Warning raised when there is a possible performance impact. + """ diff --git a/tests/base/regularizations/test_pgi_regularization.py b/tests/base/regularizations/test_pgi_regularization.py index c69bf9e051..98bfa5fb69 100644 --- a/tests/base/regularizations/test_pgi_regularization.py +++ b/tests/base/regularizations/test_pgi_regularization.py @@ -8,7 +8,7 @@ from simpeg import regularization from simpeg.maps import Wires from simpeg.utils import WeightedGaussianMixture, mkvc -from simpeg.utils.solver_utils import get_default_solver +from simpeg.utils import get_default_solver Solver = get_default_solver() @@ -472,20 +472,6 @@ def test_spherical_covariances(self): plt.show() -def test_removed_mref(): - """Test if PGI raises error when accessing removed mref property.""" - h = [[(2, 2)], [(2, 2)], [(2, 2)]] - mesh = discretize.TensorMesh(h) - n_components = 1 - gmm = WeightedGaussianMixture(mesh=mesh, n_components=n_components) - samples = np.random.default_rng(seed=42).normal(size=(mesh.n_cells, 2)) - gmm.fit(samples) - pgi = regularization.PGI(mesh=mesh, gmmref=gmm) - message = "mref has been removed, please use reference_model." - with pytest.raises(NotImplementedError, match=message): - pgi.mref - - class TestCheckWeights: """Test the ``WeightedGaussianMixture._check_weights`` method.""" diff --git a/tests/base/regularizations/test_regularization.py b/tests/base/regularizations/test_regularization.py index 776c07ed65..cf0c0a33d5 100644 --- a/tests/base/regularizations/test_regularization.py +++ b/tests/base/regularizations/test_regularization.py @@ -791,25 +791,18 @@ def mesh(self, request): ) def test_mref_property(self, mesh, regularization_class): """Test mref property.""" - msg = "mref has been removed, please use reference_model." reg = regularization_class(mesh) - with pytest.raises(NotImplementedError, match=msg): - reg.mref + assert not hasattr(reg, "mref") def test_regmesh_property(self, mesh): """Test regmesh property.""" - msg = "regmesh has been removed, please use regularization_mesh." reg = BaseRegularization(mesh) - with pytest.raises(NotImplementedError, match=msg): - reg.regmesh + assert not hasattr(reg, "regmesh") @pytest.mark.parametrize("regularization_class", (Sparse, SparseSmoothness)) def test_gradient_type(self, mesh, regularization_class): """Test gradientType argument.""" - msg = ( - "'gradientType' argument has been removed. " - "Please use 'gradient_type' instead." - ) + msg = "got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): regularization_class(mesh, gradientType="total") @@ -820,10 +813,7 @@ def test_gradient_type(self, mesh, regularization_class): def test_ind_active(self, mesh, regularization_class): """Test if error is raised when passing the indActive argument.""" active_cells = np.ones(len(mesh), dtype=bool) - msg = ( - "'indActive' argument has been removed. " - "Please use 'active_cells' instead." - ) + msg = "got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): regularization_class(mesh, indActive=active_cells) @@ -835,9 +825,7 @@ def test_ind_active_property(self, mesh, regularization_class): """Test if error is raised when trying to access the indActive property.""" active_cells = np.ones(len(mesh), dtype=bool) reg = regularization_class(mesh, active_cells=active_cells) - msg = "indActive has been removed, please use active_cells." - with pytest.raises(NotImplementedError, match=msg): - reg.indActive + assert not hasattr(reg, "indActive") @pytest.mark.parametrize( "regularization_class", @@ -846,7 +834,7 @@ def test_ind_active_property(self, mesh, regularization_class): def test_cell_weights_argument(self, mesh, regularization_class): """Test if error is raised when passing the cell_weights argument.""" weights = np.ones(len(mesh)) - msg = "'cell_weights' argument has been removed. Please use 'weights' instead." + msg = "got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): regularization_class(mesh, cell_weights=weights) @@ -856,54 +844,8 @@ def test_cell_weights_argument(self, mesh, regularization_class): def test_cell_weights_property(self, mesh, regularization_class): """Test if error is raised when trying to access the cell_weights property.""" weights = {"weights": np.ones(len(mesh))} - msg = ( - "'cell_weights' has been removed. " - "Please access weights using the `set_weights`, `get_weights`, and " - "`remove_weights` methods." - ) reg = regularization_class(mesh, weights=weights) - with pytest.raises(AttributeError, match=msg): - reg.cell_weights - - @pytest.mark.parametrize( - "regularization_class", (BaseRegularization, WeightedLeastSquares) - ) - def test_cell_weights_setter(self, mesh, regularization_class): - """Test if error is raised when trying to set the cell_weights property.""" - msg = ( - "'cell_weights' has been removed. " - "Please access weights using the `set_weights`, `get_weights`, and " - "`remove_weights` methods." - ) - reg = regularization_class(mesh) - with pytest.raises(AttributeError, match=msg): - reg.cell_weights = "dummy variable" - - -class TestRemovedRegularizations: - """ - Test if errors are raised after creating removed regularization classes. - """ - - @pytest.mark.parametrize( - "regularization_class", - ( - regularization.PGIwithNonlinearRelationshipsSmallness, - regularization.PGIwithRelationships, - regularization.Simple, - regularization.SimpleSmall, - regularization.SimpleSmoothDeriv, - regularization.Small, - regularization.SmoothDeriv, - regularization.SmoothDeriv2, - regularization.Tikhonov, - ), - ) - def test_removed_class(self, regularization_class): - class_name = regularization_class.__name__ - msg = f"{class_name} has been removed, please use." - with pytest.raises(NotImplementedError, match=msg): - regularization_class() + assert not hasattr(reg, "cell_weights") @pytest.mark.parametrize( diff --git a/tests/base/test_base_pde_sim.py b/tests/base/test_base_pde_sim.py index 20501849ec..e9406d1339 100644 --- a/tests/base/test_base_pde_sim.py +++ b/tests/base/test_base_pde_sim.py @@ -1,7 +1,9 @@ import re +import pymatsolver from simpeg.base import with_property_mass_matrices, BasePDESimulation from simpeg import props, maps +from simpeg.utils import PerformanceWarning import unittest import discretize import numpy as np @@ -11,7 +13,7 @@ import scipy.sparse as sp import pytest -from simpeg.utils.solver_utils import get_default_solver +from simpeg.utils import get_default_solver # define a very simple class... @@ -812,13 +814,27 @@ def test_bad_derivative_stash(): sim.MeSigmaDeriv(u, v) -def test_solver_defaults(): +def test_solver_defaults(caplog, info_logging): mesh = discretize.TensorMesh([2, 2, 2]) sim = BasePDESimulation(mesh) - with pytest.warns(UserWarning, match="Using the default solver.*"): - solver_class = sim.solver + # Check that logging.info was created + assert "Setting the default solver" in caplog.text + # Test if default solver was properly set + assert sim.solver is get_default_solver() - assert solver_class is get_default_solver() + +@pytest.mark.parametrize("solver_class", [pymatsolver.SolverLU, pymatsolver.Solver]) +def test_performance_warning_on_solver(solver_class): + """ + Test PerformanceWarning when setting an inefficient solver. + """ + mesh = discretize.TensorMesh([2, 2, 2]) + regex = re.escape( + f"The 'pymatsolver.{solver_class.__name__}' solver might lead to high " + "computation times." + ) + with pytest.warns(PerformanceWarning, match=regex): + BasePDESimulation(mesh, solver=solver_class) def test_bad_solver(): diff --git a/tests/base/test_data_misfit.py b/tests/base/test_data_misfit.py index bf1d0cc088..76b0905e51 100644 --- a/tests/base/test_data_misfit.py +++ b/tests/base/test_data_misfit.py @@ -1,3 +1,5 @@ +import re +import pytest import unittest import numpy as np @@ -5,6 +7,7 @@ from simpeg import maps from simpeg import data_misfit, simulation, survey +from simpeg import Data class DataMisfitTest(unittest.TestCase): @@ -68,6 +71,73 @@ def test_DataMisfitOrder(self): self.data.noise_floor = self.noise_floor self.dmis.test(x=self.model, random_seed=17) + def test_real_valued(self): + # Change model + new_model = self.model + 1 + + # Misfit to data + misfit_original = self.dmis(new_model) + + # Test pseudo-complex, with 0 imaginary part; misfit must be the same + d_pseudo = Data(self.sim.survey, dobs=self.data.dobs + 0j * self.data.dobs) + d_pseudo.relative_error = self.relative + d_pseudo.noise_floor = self.noise_floor + dmis_pseudo = data_misfit.L2DataMisfit(simulation=self.sim, data=d_pseudo) + misfit_pseudo = dmis_pseudo(new_model) + # assert_array_equal with strict also checks dtype + np.testing.assert_array_equal(misfit_original, misfit_pseudo, strict=True) + + # Test actually complex; misfit must be different + data_imag = self.sim.make_synthetic_data(self.model, random_seed=17) + d_complex = Data(self.sim.survey, dobs=self.data.dobs + 1j * data_imag.dobs) + d_complex.relative_error = self.relative + d_complex.noise_floor = self.noise_floor + dmis_complex = data_misfit.L2DataMisfit(simulation=self.sim, data=d_complex) + misfit_complex = dmis_complex(new_model) + assert misfit_original != misfit_complex + assert misfit_complex.dtype == np.float64 + + +class MockSimulation(simulation.BaseSimulation): + """ + Mock simulation class that returns nans or infs in the dpred array. + """ + + def __init__(self, invalid_value=np.nan): + self.invalid_value = invalid_value + super().__init__() + + def dpred(self, m=None, f=None): + a = np.arange(4, dtype=np.float64) + a[1] = self.invalid_value + return a + + +class TestNanOrInfInResidual: + """Test errors if the simulation return dpred with nans or infs.""" + + @pytest.fixture + def n_data(self): + return 4 + + @pytest.fixture + def sample_survey(self, n_data): + receivers = survey.BaseRx(np.zeros(n_data)[:, np.newaxis]) + source = survey.BaseSrc([receivers]) + return survey.BaseSurvey([source]) + + @pytest.mark.parametrize("invalid_value", [np.nan, np.inf]) + def test_error(self, sample_survey, invalid_value): + mock_simulation = MockSimulation(invalid_value) + data = Data(sample_survey) + dmisfit = data_misfit.BaseDataMisfit(data, mock_simulation) + msg = re.escape( + "The `MockSimulation.dpred()` method returned an array that contains " + "`nan`s and/or `inf`s." + ) + with pytest.raises(ValueError, match=msg): + dmisfit.residual(m=None) + if __name__ == "__main__": unittest.main() diff --git a/tests/base/test_directives.py b/tests/base/test_directives.py index 65fbb0f7f2..ea39e2413f 100644 --- a/tests/base/test_directives.py +++ b/tests/base/test_directives.py @@ -1,8 +1,16 @@ +import re +from collections import namedtuple +from datetime import datetime +import pathlib import unittest +import warnings +from statistics import harmonic_mean + import pytest import numpy as np import discretize +import simpeg from simpeg import ( maps, directives, @@ -13,6 +21,7 @@ simulation, ) from simpeg.data_misfit import L2DataMisfit +from simpeg.potential_fields import gravity from simpeg.potential_fields import magnetics as mag import shutil @@ -21,14 +30,26 @@ class directivesValidation(unittest.TestCase): + + def test_error_irls_and_beta_scheduling(self): + """ + Test if validation error when ``UpdateIRLS`` and ``BetaSchedule`` are present. + """ + directives_list = directives.DirectiveList( + directives.UpdateIRLS(), + directives.BetaSchedule(coolingFactor=2, coolingRate=1), + ) + msg = "Beta scheduling is handled by the" + with pytest.raises(AssertionError, match=msg): + directives_list.validate() + def test_validation_pass(self): betaest = directives.BetaEstimate_ByEig() - IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=3, beta_tol=1e-2) - beta_schedule = directives.BetaSchedule(coolingFactor=2, coolingRate=1) + IRLS = directives.UpdateIRLS() update_Jacobi = directives.UpdatePreconditioner() - dList = [betaest, IRLS, beta_schedule, update_Jacobi] + dList = [betaest, IRLS, update_Jacobi] directiveList = directives.DirectiveList(*dList) self.assertTrue(directiveList.validate()) @@ -36,11 +57,10 @@ def test_validation_pass(self): def test_validation_fail(self): betaest = directives.BetaEstimate_ByEig() - IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=3, beta_tol=1e-2) + IRLS = directives.UpdateIRLS() update_Jacobi = directives.UpdatePreconditioner() - beta_schedule = directives.BetaSchedule(coolingFactor=2, coolingRate=1) - dList = [betaest, update_Jacobi, IRLS, beta_schedule] + dList = [betaest, update_Jacobi, IRLS] directiveList = directives.DirectiveList(*dList) with self.assertRaises(AssertionError): @@ -58,15 +78,24 @@ def test_validation_initial_beta_fail(self): def test_validation_warning(self): betaest = directives.BetaEstimate_ByEig() - IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=3, beta_tol=1e-2) - beta_schedule = directives.BetaSchedule(coolingFactor=2, coolingRate=1) - dList = [betaest, IRLS, beta_schedule] + IRLS = directives.UpdateIRLS() + dList = [betaest, IRLS] directiveList = directives.DirectiveList(*dList) with pytest.warns(UserWarning): self.assertTrue(directiveList.validate()) +def test_directive_list_iterable(): + directs = [ + directives.UpdateIRLS(), + directives.BetaSchedule(coolingFactor=2, coolingRate=1), + ] + directives_list = directives.DirectiveList(*directs) + for item1, item2 in zip(directives_list, directs): + assert item1 is item2 + + class ValidationInInversion(unittest.TestCase): def setUp(self): mesh = discretize.TensorMesh([4, 4, 4]) @@ -117,7 +146,7 @@ def test_validation_in_inversion(self): betaest = directives.BetaEstimate_ByEig() # Here is where the norms are applied - IRLS = directives.Update_IRLS(f_min_change=1e-4, minGNiter=3, beta_tol=1e-2) + IRLS = directives.UpdateIRLS(f_min_change=1e-4) update_Jacobi = directives.UpdatePreconditioner() sensitivity_weights = directives.UpdateSensitivityWeights() with self.assertRaises(AssertionError): @@ -342,16 +371,34 @@ def test_irls_directive(self): irls_directive.max_irls_iterations = 2 assert irls_directive.stopping_criteria() + expected_target = self.dmis.nD # Test beta re-adjustment down invProb.phi_d = 4.0 irls_directive.misfit_tolerance = 0.1 irls_directive.adjust_cooling_schedule() - assert irls_directive.cooling_factor == 2.0 + + ratio = invProb.phi_d / expected_target + expected_factor = harmonic_mean([4 / 3, ratio]) + np.testing.assert_allclose(irls_directive.cooling_factor, expected_factor) # Test beta re-adjustment up - invProb.phi_d = 0.5 + invProb.phi_d = 1 / 2 + ratio = invProb.phi_d / expected_target + expected_factor = harmonic_mean([1 / 2, ratio]) + + irls_directive.adjust_cooling_schedule() + np.testing.assert_allclose(irls_directive.cooling_factor, expected_factor) + + # Test beta no-adjustment + irls_directive.cooling_factor = ( + 2.0 # set this to something not 1 to make sure it changes to 1. + ) + + invProb.phi_d = expected_target * ( + 1 + irls_directive.misfit_tolerance * 0.5 + ) # something within the relative tolerance irls_directive.adjust_cooling_schedule() - assert irls_directive.cooling_factor == 0.5 + assert irls_directive.cooling_factor == 1 def test_spherical_weights(self): reg = regularization.Sparse(self.mesh) @@ -426,53 +473,6 @@ def test_save_output_dict(RegClass): assert "x SparseSmoothness.norm" in out_dict -class TestDeprecatedArguments: - """ - Test if directives raise errors after passing deprecated arguments. - """ - - def test_debug(self): - """ - Test if InversionDirective raises error after passing 'debug'. - """ - msg = "'debug' property has been removed. Please use 'verbose'." - with pytest.raises(TypeError, match=msg): - directives.InversionDirective(debug=True) - - -class TestUpdateSensitivityWeightsRemovedArgs: - """ - Test if `UpdateSensitivityWeights` raises errors after passing removed arguments. - """ - - def test_every_iter(self): - """ - Test if `UpdateSensitivityWeights` raises error after passing `everyIter`. - """ - msg = "'everyIter' property has been removed. Please use 'every_iteration'." - with pytest.raises(TypeError, match=msg): - directives.UpdateSensitivityWeights(everyIter=True) - - def test_threshold(self): - """ - Test if `UpdateSensitivityWeights` raises error after passing `threshold`. - """ - msg = "'threshold' property has been removed. Please use 'threshold_value'." - with pytest.raises(TypeError, match=msg): - directives.UpdateSensitivityWeights(threshold=True) - - def test_normalization(self): - """ - Test if `UpdateSensitivityWeights` raises error after passing `normalization`. - """ - msg = ( - "'normalization' property has been removed. " - "Please define normalization using 'normalization_method'." - ) - with pytest.raises(TypeError, match=msg): - directives.UpdateSensitivityWeights(normalization=True) - - class TestUpdateSensitivityNormalization: """ Test the `normalization` property and setter in `UpdateSensitivityWeights` @@ -578,9 +578,9 @@ def test_beta_estimate_max_derivative(self): assert directive.random_seed == random_seed -class TestDeprecateSeedProperty: +class TestRemovedSeedProperty: """ - Test deprecation of seed property. + Test removal of seed property. """ CLASSES = ( @@ -590,63 +590,31 @@ class TestDeprecateSeedProperty: directives.ScalingMultipleDataMisfits_ByEig, ) - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): + def get_message_removed_error(self, old_name, new_name, version="v0.24.0"): msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " + f"'{old_name}' has been removed in " f" SimPEG {version}, please use '{new_name}' instead." ) return msg - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - @pytest.mark.parametrize("directive", CLASSES) - def test_warning_argument(self, directive): - """ - Test if warning is raised after passing ``seed`` to the constructor. - """ - msg = self.get_message_deprecated_warning("seed", "random_seed") - seed = 42135 - with pytest.warns(FutureWarning, match=msg): - directive_instance = directive(seed=42135) - assert directive_instance.random_seed == seed - @pytest.mark.parametrize("directive", CLASSES) - def test_error_duplicated_argument(self, directive): + def test_error_argument(self, directive): """ - Test error after passing ``seed`` and ``random_seed`` to the constructor. + Test if error is raised after passing ``seed`` to the constructor. """ - msg = self.get_message_duplicated_error("seed", "random_seed") + msg = self.get_message_removed_error("seed", "random_seed") with pytest.raises(TypeError, match=msg): - directive(seed=42, random_seed=42) + directive(seed=42135) @pytest.mark.parametrize("directive", CLASSES) - def test_warning_accessing_property(self, directive): + def test_error_accessing_property(self, directive): """ - Test warning when trying to access the ``seed`` property. + Test error when trying to access the ``seed`` property. """ directive_obj = directive(random_seed=42) - msg = "seed has been deprecated, please use random_seed" - with pytest.warns(FutureWarning, match=msg): - seed = directive_obj.seed - np.testing.assert_allclose(seed, directive_obj.random_seed) - - @pytest.mark.parametrize("directive", CLASSES) - def test_warning_setter(self, directive): - """ - Test warning when trying to set the ``seed`` property. - """ - directive_obj = directive(random_seed=42) - msg = "seed has been deprecated, please use random_seed" - new_seed = 35 - with pytest.warns(FutureWarning, match=msg): - directive_obj.seed = new_seed - np.testing.assert_allclose(directive_obj.random_seed, new_seed) + msg = "seed has been removed, please use random_seed" + with pytest.raises(NotImplementedError, match=msg): + directive_obj.seed class TestUpdateIRLS: @@ -711,5 +679,625 @@ def test_end_iter_irls_threshold(self, mesh, data_misfit): assert sparse_regularization.irls_threshold == irls_threshold +class DummySaveEveryIteration(directives.SaveEveryIteration): + """ + Dummy non-abstract class to test SaveEveryIteration. + """ + + @property + def file_abs_path(self) -> pathlib.Path: + """ + Simple implementation of abstract property file_abs_path. + """ + return self.directory / self.name + + +class MockOpt: + """Mock Opt object.""" + + def __init__(self, xc=None, maxIter=100): + if xc is None: + xc = np.random.default_rng(seed=42).uniform(size=23) + self.xc = xc + self.maxIter = maxIter + + +class MockInvProb: + """Mock InvProb object.""" + + def __init__(self, opt): + self.opt = opt + + +class MockInversion: + """Mock Inversion object.""" + + def __init__(self, xc=None, maxIter=100): + opt = MockOpt(xc=xc, maxIter=maxIter) + inv_prob = MockInvProb(opt) + self.invProb = inv_prob + + +class TestSaveEveryIteration: + """Test the SaveEveryIteration directive.""" + + @pytest.mark.parametrize("directory", ["dummy/path", "../dummy/path"]) + def test_directory(self, directory): + """Test the directory property.""" + directive = DummySaveEveryIteration(directory=directory) + assert directive.directory == pathlib.Path(directory).resolve() + + def test_no_directory(self): + """Test if the directory property is None when on_disk is False""" + directive = DummySaveEveryIteration(directory="blah", on_disk=False) + assert directive._directory is None + + # accessing the directive property should raise error when on_disk is False + msg = re.escape("directory' is only available") + with pytest.raises(AttributeError, match=msg): + directive.directory + + # using the directive setter should raise error when on_disk is False + + @pytest.mark.parametrize("directory", ["dummy/path", "../dummy/path"]) + def test_directory_setter(self, directory): + """Test the directory setter.""" + directive = DummySaveEveryIteration() + directive.directory = directory + assert directive.directory == pathlib.Path(directory).resolve() + + def test_directory_setter_error_none(self): + """Test error when trying to set directory=None if on_disk is True.""" + directive = DummySaveEveryIteration() + msg = re.escape("Directory is not optional if 'on_disk==True'") + with pytest.raises(ValueError, match=msg): + directive.directory = None + + def test_name(self): + """Test the name property.""" + name = "blah" + directive = DummySaveEveryIteration(name=name) + assert directive.name == name + + def test_name_setter(self): + """Test the name setter.""" + directive = DummySaveEveryIteration() + name = "blah" + directive.name = name + assert directive.name == name + + def test_mkdir(self, tmp_path): + """Test _mkdir_and_check_output_file.""" + directory = tmp_path / "blah" + directive = DummySaveEveryIteration(directory=directory) + directive._mkdir_and_check_output_file() + assert directory.exists() + fname = directory / directive.name + assert not fname.exists() + + @pytest.mark.parametrize( + "should_exist", [True, False], ids=["should_exist", "should_not_exist"] + ) + def test_check_output_file_exists(self, tmp_path, should_exist): + """Test _mkdir_and_check_output_file when file exists.""" + directory = tmp_path / "blah" + directory.mkdir(parents=True) + directive = DummySaveEveryIteration(directory=directory) + fname = directive.file_abs_path + fname.touch() + assert fname.exists() + + if should_exist: + # No warning should be raised if exists and should exist + with warnings.catch_warnings(): + warnings.simplefilter("error") + directive._mkdir_and_check_output_file(should_exist=should_exist) + else: + # Warning should be raised if exists and should not exist + with pytest.warns(UserWarning, match="Overwriting file"): + directive._mkdir_and_check_output_file(should_exist=should_exist) + + @pytest.mark.parametrize( + "should_exist", [True, False], ids=["should_exist", "should_not_exist"] + ) + def test_check_output_file_doesnt_exist(self, tmp_path, should_exist): + """Test _mkdir_and_check_output_file when file doesn't exist.""" + directory = tmp_path / "blah" + directory.mkdir(parents=True) + directive = DummySaveEveryIteration(directory=directory) + fname = directive.file_abs_path + + if should_exist: + # Warning should be raised if doesn't exist and should exist + with pytest.warns( + UserWarning, match=re.escape(f"File {fname} was not found") + ): + directive._mkdir_and_check_output_file(should_exist=should_exist) + else: + # No warning should be raised if doesn't exist and should not exist + with warnings.catch_warnings(): + warnings.simplefilter("error") + directive._mkdir_and_check_output_file(should_exist=should_exist) + + @pytest.mark.parametrize("opt", [True, False], ids=["with-opt", "without-opt"]) + def test_initialize(self, opt): + """ + Test the initialize method. + """ + directive = DummySaveEveryIteration() + if opt: + directive.inversion = MockInversion(maxIter=10000) + + expected_start_time = datetime.now().strftime("%Y-%m-%d-%H-%M") + directive.initialize() + assert directive._start_time == expected_start_time + + if opt: + # maxIter was set to 10000, so the _iter_format should be "05d" + assert directive._iter_format == "05d" + + def test_time_iter_no_opt(self): + directive = DummySaveEveryIteration(name="dummy") + time_name = directive._time_file_name.name + assert directive._time_iter_file_name.name == time_name + "_###" + + def test_deprecated_fileName(self): + directive = DummySaveEveryIteration(name="dummy") + + with pytest.warns(FutureWarning, match=r"'fileName' has been deprecated .*"): + f_name = directive.fileName + + assert f_name == "dummy" + + +class TestSaveModelEveryIteration: + """Test the SaveModelEveryIteration directive.""" + + def test_on_disk(self): + """ + Test on_disk is always True. + """ + directive = directives.SaveModelEveryIteration() + assert directive.on_disk + + def test_on_disk_argument(self): + """ + Test warning after passing on_disk as argument. + """ + msg = re.escape("The 'on_disk' argument is ignored") + with pytest.warns(UserWarning, match=msg): + directive = directives.SaveModelEveryIteration(on_disk=False) + assert directive.on_disk + + def test_on_disk_setter(self): + """ + Test error after trying to modify value of on_disk. + """ + directive = directives.SaveModelEveryIteration() + msg = re.escape("Cannot modify value of 'on_disk'") + with pytest.raises(AttributeError, match=msg): + directive.on_disk = False + + def test_end_iter(self, tmp_path): + """ + Test if endIter saves the model to a file. + """ + directory = tmp_path / "dummy_dir" + directive = directives.SaveModelEveryIteration(directory=directory) + + # Add a mock inversion to the directive + mock_inversion = MockInversion() + directive.inversion = mock_inversion + + # Initialize and call endIter + directive.initialize() + directive.endIter() + + # Check if file exists + assert directory.exists() + assert directive.file_abs_path.exists() + array = np.load(directive.file_abs_path) + + np.testing.assert_equal(array, mock_inversion.invProb.opt.xc) + + +class BaseTestOutputDirective: + """ + Base class to test directives that need a full inversion. + """ + + def get_inversion_problem(self): + """ + Simple gravity inversion problem to test the directive. + """ + # Mesh + # ---- + h = [(1.0, 6)] + mesh = discretize.TensorMesh([h, h, h], origin="CCN") + + # Survey + # ------ + x = np.linspace(-2.0, 2.0, 5) + xx, yy = np.meshgrid(x, x) + zz = 1.0 * np.ones_like(xx) + receiver_locations = np.vstack([c.ravel() for c in (xx, yy, zz)]).T + receivers = gravity.Point(locations=receiver_locations, components="gz") + source_field = gravity.SourceField([receivers]) + survey = gravity.Survey(source_field) + + # Simulation + # ---------- + mapping = simpeg.maps.IdentityMap(mesh=mesh) + simulation = gravity.Simulation3DIntegral( + mesh=mesh, survey=survey, rhoMap=mapping, engine="choclo" + ) + + # Synthetic data + # -------------- + model = np.zeros(mesh.n_cells) + model = simpeg.utils.model_builder.add_block( + mesh.cell_centers, + model, + p0=[-1.0, -1.0, -2.0], + p1=[1.0, 1.0, -1.0], + prop_value=200, + ) + synthetic_data = simulation.make_synthetic_data( + model, + relative_error=0.1, + random_seed=4, + add_noise=True, + ) + + # Inversion problem + # ----------------- + data_misfit = simpeg.data_misfit.L2DataMisfit( + data=synthetic_data, simulation=simulation + ) + regularization = simpeg.regularization.WeightedLeastSquares(mesh) + optimizer = optimization.ProjectedGNCG() + inv_prob = simpeg.inverse_problem.BaseInvProblem( + data_misfit, regularization, optimizer + ) + + return inv_prob + + def get_directives(self, save_output_directive: directives.SaveEveryIteration): + """ + Get list of directives to use in the sample gravity inversion. + + Include the save_output_directive passed as argument in the list. + """ + sensitivity_weights = simpeg.directives.UpdateSensitivityWeights( + every_iteration=False + ) + update_jacobi = simpeg.directives.UpdatePreconditioner( + update_every_iteration=True + ) + starting_beta = simpeg.directives.BetaEstimate_ByEig(beta0_ratio=10) + beta_schedule = simpeg.directives.BetaSchedule(coolingFactor=2.0, coolingRate=1) + target_misfit = simpeg.directives.TargetMisfit(chifact=1.0) + + directives_list = [ + sensitivity_weights, + starting_beta, + update_jacobi, + beta_schedule, + save_output_directive, + target_misfit, + ] + return directives_list + + +class TestSaveOutputEveryIteration(BaseTestOutputDirective): + """ + Test the SaveOutputEveryIteration directive. + """ + + @pytest.mark.parametrize("on_disk", [True, False]) + def test_initialize(self, tmp_path, on_disk): + """Test the initialize method.""" + directory = tmp_path / "dummy" + directive = directives.SaveOutputEveryIteration( + on_disk=on_disk, directory=directory + ) + directive.initialize() + + # Check directory was created + if on_disk: + assert directory.exists() + + # Check that the file was created + assert directive.file_abs_path is not None + assert directive.file_abs_path.exists() + + # Check header in file + with directive.file_abs_path.open(mode="r") as f: + lines = f.readlines() + assert len(lines) == 1 + assert "beta" in lines[0] + assert "phi_d" in lines[0] + assert "phi_m" in lines[0] + else: + assert directive.file_abs_path is None + + assert directive.beta == [] + assert directive.phi_d == [] + assert directive.phi_m == [] + assert directive.phi_m_smooth_z == [] + assert directive.phi == [] + + @pytest.mark.parametrize( + ("on_disk", "test_load_results"), + [ + pytest.param( + True, + True, + marks=pytest.mark.xfail( + reason="bug in load_results", raises=AttributeError + ), + ), + (True, False), + (False, None), + ], + ids=["on_disk-test_load_results", "on_disk", "not_on_disk"], + ) + def test_end_iter(self, tmp_path, on_disk, test_load_results): + """Test the endIter method.""" + inv_prob = self.get_inversion_problem() + + directory = tmp_path / "dummy" + directive = directives.SaveOutputEveryIteration( + directory=directory, on_disk=on_disk + ) + directives_list = self.get_directives(directive) + inversion = simpeg.inversion.BaseInversion(inv_prob, directives_list) + + initial_model = np.zeros(inv_prob.dmisfit.nP) + inversion.run(initial_model) + + # Check that lists are not empty + lists = [ + "beta", + "phi_d", + "phi_m", + "phi_m_small", + "phi_m_smooth_x", + "phi_m_smooth_y", + "phi_m_smooth_z", + "phi", + ] + for attribute in lists: + assert getattr(directive, attribute) + + # Just exit the test if on_disk is False + if not on_disk: + return + + # Check that the file was created if on_disk + assert directive.file_abs_path is not None + assert directive.file_abs_path.exists() + + # Check content of file + with directive.file_abs_path.open(mode="r") as f: + lines = f.readlines() + assert "beta" in lines[0] + assert "phi_d" in lines[0] + assert "phi_m" in lines[0] + assert len(lines) > 1 + + # Test load_results + if test_load_results: + original_values = {attr: getattr(directive, attr) for attr in lists} + for attribute in lists: + # Clean the lists + setattr(directive, attribute, []) + + # Load results and check if they are the same as the original ones + directive.load_results() + + # Check that the original values were recovered + for attribute in lists: + np.testing.assert_equal( + getattr(directive, attribute), + original_values[attribute], + ) + + def test_load_results_error(self, tmp_path): + """ + Test error when no file_name is passed to load_results. + """ + directory = tmp_path / "dummy" + directive = directives.SaveOutputEveryIteration( + directory=directory, on_disk=False + ) + msg = re.escape("'file_name' is a required argument") + with pytest.raises(TypeError, match=msg): + directive.load_results() + + +class TestSaveOutputDictEveryIteration(BaseTestOutputDirective): + """ + Test the SaveOutputDictEveryIteration directive. + """ + + def test_initialize(self): + """Test the initialize method.""" + directive = directives.SaveOutputDictEveryIteration() + directive.initialize() + + # Check outDict was created and is empty + assert hasattr(directive, "outDict") + assert not directive.outDict + + @pytest.mark.parametrize("on_disk", [True, False], ids=["on_disk", "not_on_disk"]) + def test_end_iter(self, tmp_path, on_disk): + """Test the endIter method.""" + inv_prob = self.get_inversion_problem() + + directory = tmp_path / "dummy" + directive = directives.SaveOutputDictEveryIteration( + directory=directory, on_disk=on_disk + ) + directives_list = self.get_directives(directive) + inversion = simpeg.inversion.BaseInversion(inv_prob, directives_list) + + initial_model = np.zeros(inv_prob.dmisfit.nP) + inversion.run(initial_model) + + # Check if the outDict is not empty + assert directive.outDict + fields = [ + "iter", + "beta", + "phi_d", + "phi_m", + "f", + "m", + "dpred", + ] + for iteration in directive.outDict: + for field in fields: + assert field in directive.outDict[iteration] + + # Check if output files were created + if on_disk: + assert directory.exists() + assert directory.is_dir() + files = list(directory.iterdir()) + assert len(files) == len(directive.outDict) + + def test_deprecated(self): + with pytest.warns(FutureWarning, match=".*saveOnDisk has been deprecated.*"): + directive = directives.SaveOutputDictEveryIteration(saveOnDisk=True) + + assert directive.on_disk + + @pytest.mark.parametrize("on_disk", [True, False], ids=["on_disk", "not_on_disk"]) + def test_file_abs_path_optional(self, on_disk): + directive = directives.SaveOutputDictEveryIteration(on_disk=on_disk) + if on_disk: + assert directive.file_abs_path is not None + else: + assert directive.file_abs_path is None + + +class MockJointInvProb: + + def __init__(self): + self.opt = namedtuple("Opt", "f iter cg_count")(0.1, 10, 200) + self.betas = [1, 2, 3] + self.phi_d_list = [0.1, 0.2, 0.3] + self.phi_m_list = [0.2, 0.3, 0.4] + self.lambd = 1e5 + self.phi_sim = 10 + + +class TestSimMeasureSaveOutputEveryIteration: + + @pytest.mark.parametrize("on_disk", [True, False]) + def test_initialize(self, tmp_path, on_disk): + """Test the initialize method.""" + directory = tmp_path / "dummy" + directive = directives.SimilarityMeasureSaveOutputEveryIteration( + on_disk=on_disk, directory=directory + ) + directive.initialize() + + # Check directory was created + if on_disk: + assert directory.exists() + + # Check that the file was created + assert directive.file_abs_path is not None + assert directive.file_abs_path.exists() + + # Check header in file + with directive.file_abs_path.open(mode="r") as f: + lines = f.readlines() + assert len(lines) == 1 + assert "betas" in lines[0] + assert "joint_phi_d" in lines[0] + assert "joint_phi_m" in lines[0] + assert "phi_sim" in lines[0] + else: + assert directive.file_abs_path is None + + assert directive.betas == [] + assert directive.lambd == [] + assert directive.phi_d == [] + assert directive.phi_m == [] + assert directive.phi_sim == [] + assert directive.phi == [] + + @pytest.mark.parametrize("on_disk", [True, False]) + def test_end_iter(self, tmp_path, on_disk): + directory = tmp_path / "dummy" + directive = directives.SimilarityMeasureSaveOutputEveryIteration( + on_disk=on_disk, directory=directory + ) + directive.initialize() + + joint_problem = MockJointInvProb() + joint_inv = namedtuple("JointInversion", "invProb")(joint_problem) + directive.inversion = joint_inv + + assert directive.invProb is joint_inv.invProb + + directive.endIter() + + assert directive.betas == [joint_problem.betas] + assert directive.phi_d == [joint_problem.phi_d_list] + assert directive.phi_m == [joint_problem.phi_m_list] + assert directive.lambd == [joint_problem.lambd] + assert directive.phi_sim == [joint_problem.phi_sim] + assert directive.phi == [joint_problem.opt.f] + + if on_disk: + out_file = directive.file_abs_path + assert out_file.exists() + + n_lines = 0 + with open(out_file) as f: + while f.readline(): + n_lines += 1 + assert n_lines == 2 # header plus one line + + @pytest.mark.xfail( + reason="np.loadtxt will not work to read in log file that has nested lists." + ) + @pytest.mark.parametrize("pass_file_name", [True, False]) + def test_load_results(self, tmp_path, pass_file_name): + directory = tmp_path / "dummy" + directive = directives.SimilarityMeasureSaveOutputEveryIteration( + directory=directory, on_disk=True + ) + directive.initialize() + + joint_problem = MockJointInvProb() + joint_inv = namedtuple("JointInversion", "invProb")(joint_problem) + directive.inversion = joint_inv + + directive.endIter() + + if pass_file_name: + log_file = directive.file_abs_path + directive.load_results(log_file) + else: + directive.load_results() + + assert directive.betas == [joint_problem.betas] + assert directive.phi_d == [joint_problem.phi_d_list] + assert directive.phi_m == [joint_problem.phi_m_list] + assert directive.lambd == [joint_problem.lambd] + assert directive.phi_sim == [joint_problem.phi_sim] + assert directive.phi == [joint_problem.opt.f] + + def test_load_results_error(self): + directive = directives.SimilarityMeasureSaveOutputEveryIteration(on_disk=False) + with pytest.raises(TypeError, match=r"'file_name' is a required argument.*"): + directive.load_results() + + if __name__ == "__main__": unittest.main() diff --git a/tests/base/test_directives_deprecations.py b/tests/base/test_directives_deprecations.py new file mode 100644 index 0000000000..0c930b6b1d --- /dev/null +++ b/tests/base/test_directives_deprecations.py @@ -0,0 +1,18 @@ +""" +Test deprecation of public directives submodules. +""" + +import pytest +import importlib + +REGEX = r"The `simpeg\.directives\.[a-z_]+` submodule has been deprecated, " +DEPRECATED_SUBMODULES = ("directives", "pgi_directives", "sim_directives") + + +@pytest.mark.parametrize("submodule", DEPRECATED_SUBMODULES) +def test_deprecations(submodule): + """ + Test FutureWarning when trying to import the deprecated modules. + """ + with pytest.warns(FutureWarning, match=REGEX): + importlib.import_module(f"simpeg.directives.{submodule}") diff --git a/tests/base/test_inversion.py b/tests/base/test_inversion.py new file mode 100644 index 0000000000..6e19703e9f --- /dev/null +++ b/tests/base/test_inversion.py @@ -0,0 +1,69 @@ +import discretize +import pytest +import numpy as np + +import simpeg.optimization as smp_opt +import simpeg.inversion as smp_inv +import simpeg.directives as smp_drcs +import simpeg.simulation + +from simpeg.inverse_problem import BaseInvProblem + +SIMPEG_OPTIMIZERS = [ + smp_opt.ProjectedGradient, + smp_opt.BFGS, + smp_opt.InexactGaussNewton, + smp_opt.SteepestDescent, + smp_opt.ProjectedGNCG, +] + + +@pytest.fixture(params=SIMPEG_OPTIMIZERS) +def inversion(request): + opt = request.param(maxIter=0) + + mesh = discretize.TensorMesh([10]) + n_d = 5 + sim = simpeg.simulation.ExponentialSinusoidSimulation( + mesh=mesh, + n_kernels=n_d, + model_map=simpeg.maps.IdentityMap(mesh), + ) + m0 = np.zeros(mesh.n_cells) + data = sim.make_synthetic_data( + m0, add_noise=True, relative_error=0, noise_floor=0.1, seed=0 + ) + dmis = simpeg.data_misfit.L2DataMisfit(data, sim) + reg = simpeg.regularization.Smallness(mesh) + + prob = BaseInvProblem(dmis, reg, opt) + return smp_inv.BaseInversion(prob) + + +@pytest.mark.parametrize("dlist", [[], [smp_drcs.UpdatePreconditioner()]]) +def test_bfgs_init_logic(inversion, dlist, caplog, info_logging): + dlist = smp_drcs.DirectiveList(*dlist, inversion=inversion) + inversion.directiveList = dlist + + inv_prb = inversion.invProb + + # Always defaults to trying to initialize bfgs with reg.deriv2 + assert inv_prb.init_bfgs + + m0 = np.zeros(10) + inversion.run(m0) + captured = caplog.text + + if isinstance(inv_prb.opt, smp_opt.InexactGaussNewton) and any( + isinstance(dr, smp_drcs.UpdatePreconditioner) for dr in dlist + ): + assert not inv_prb.init_bfgs + assert "bfgsH0" not in captured + elif isinstance(inv_prb.opt, (smp_opt.BFGS, smp_opt.InexactGaussNewton)): + assert inv_prb.init_bfgs + assert "bfgsH0" in captured + else: + assert inv_prb.init_bfgs # defaults to True even if opt would not use it. + assert ( + "bfgsH0" not in captured + ) # But shouldn't say anything if it doesn't use it. diff --git a/tests/base/test_maps.py b/tests/base/test_maps.py index ba7ac19581..966ef8a819 100644 --- a/tests/base/test_maps.py +++ b/tests/base/test_maps.py @@ -32,6 +32,7 @@ MAPS_TO_EXCLUDE_2D = [ "ComboMap", "ActiveCells", + "EffectiveSusceptibilityMap", "InjectActiveCells", "LogMap", "LinearMap", @@ -59,6 +60,7 @@ MAPS_TO_EXCLUDE_3D = [ "ComboMap", "ActiveCells", + "EffectiveSusceptibilityMap", "InjectActiveCells", "LogMap", "LinearMap", @@ -213,6 +215,10 @@ def test_transforms_logMap_reciprocalMap(self): mapping = maps.ReciprocalMap(self.mesh3) self.assertTrue(mapping.test(random_seed=42)) + def test_EffectiveSusceptibilityMap(self): + mapping = maps.EffectiveSusceptibilityMap(50000.0, mesh=self.mesh3) + self.assertTrue(mapping.test(random_seed=42)) + def test_Mesh2MeshMap(self): mapping = maps.Mesh2Mesh([self.mesh22, self.mesh2]) self.assertTrue(mapping.test(random_seed=42)) @@ -748,8 +754,8 @@ def test_linearity(): assert all(not m.is_linear for m in non_linear_maps) -class DeprecatedIndActive: - """Base class to test deprecated ``actInd`` and ``indActive`` arguments in maps.""" +class RemovedIndActive: + """Base class to test removed ``actInd`` and ``indActive`` arguments in maps.""" @pytest.fixture def mesh(self): @@ -763,267 +769,174 @@ def active_cells(self, mesh): active_cells[0] = False return active_cells - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): + def get_message_removed_error(self, old_name, new_name, version="v0.24.0"): msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." + f"'{old_name}' was removed in " + f"SimPEG {version}, please use '{new_name}' instead." ) return msg -class TestParametricPolyMap(DeprecatedIndActive): - """Test deprecated ``actInd`` in ParametricPolyMap.""" +class TestParametricPolyMap(RemovedIndActive): + """Test removed ``actInd`` in ParametricPolyMap.""" - def test_warning_argument(self, mesh, active_cells): + def test_error_argument(self, mesh, active_cells): """ - Test if warning is raised after passing ``actInd`` to the constructor. - """ - msg = self.get_message_deprecated_warning("actInd", "active_cells") - with pytest.warns(FutureWarning, match=msg): - map_instance = maps.ParametricPolyMap(mesh, 2, actInd=active_cells) - np.testing.assert_allclose(map_instance.active_cells, active_cells) - - def test_error_duplicated_argument(self, mesh, active_cells): + Test if error is raised after passing ``actInd`` to the constructor. """ - Test error after passing ``actInd`` and ``active_cells`` to the constructor. - """ - msg = self.get_message_duplicated_error("actInd", "active_cells") + msg = "Unsupported keyword argument actInd" with pytest.raises(TypeError, match=msg): - maps.ParametricPolyMap( - mesh, 2, active_cells=active_cells, actInd=active_cells - ) + maps.ParametricPolyMap(mesh, 2, actInd=active_cells) - def test_warning_accessing_property(self, mesh, active_cells): + def test_error_accessing_property(self, mesh, active_cells): """ - Test warning when trying to access the ``actInd`` property. + Test error when trying to access the ``actInd`` property. """ mapping = maps.ParametricPolyMap(mesh, 2, active_cells=active_cells) - msg = "actInd has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - old_act_ind = mapping.actInd - np.testing.assert_allclose(mapping.active_cells, old_act_ind) + msg = "actInd has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.actInd - def test_warning_setter(self, mesh, active_cells): + def test_error_setter(self, mesh, active_cells): """ - Test warning when trying to set the ``actInd`` property. + Test error when trying to set the ``actInd`` property. """ mapping = maps.ParametricPolyMap(mesh, 2, active_cells=active_cells) - # Define new active cells to pass to the setter - new_active_cells = active_cells.copy() - new_active_cells[-4:] = False - msg = "actInd has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - mapping.actInd = new_active_cells - np.testing.assert_allclose(mapping.active_cells, new_active_cells) + msg = "actInd has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.actInd = active_cells -class TestMesh2Mesh(DeprecatedIndActive): - """Test deprecated ``indActive`` in ``Mesh2Mesh``.""" +class TestMesh2Mesh(RemovedIndActive): + """Test removed ``indActive`` in ``Mesh2Mesh``.""" @pytest.fixture def meshes(self, mesh): return [mesh, deepcopy(mesh)] - def test_warning_argument(self, meshes, active_cells): + def test_error_argument(self, meshes, active_cells): """ - Test if warning is raised after passing ``indActive`` to the constructor. - """ - msg = self.get_message_deprecated_warning("indActive", "active_cells") - with pytest.warns(FutureWarning, match=msg): - mapping_instance = maps.Mesh2Mesh(meshes, indActive=active_cells) - np.testing.assert_allclose(mapping_instance.active_cells, active_cells) - - def test_error_duplicated_argument(self, meshes, active_cells): + Test if error is raised after passing ``indActive`` to the constructor. """ - Test error after passing ``indActive`` and ``active_cells`` to the constructor. - """ - msg = self.get_message_duplicated_error("indActive", "active_cells") + msg = self.get_message_removed_error("indActive", "active_cells") with pytest.raises(TypeError, match=msg): - maps.Mesh2Mesh(meshes, active_cells=active_cells, indActive=active_cells) + maps.Mesh2Mesh(meshes, indActive=active_cells) - def test_warning_accessing_property(self, meshes, active_cells): + def test_error_accessing_property(self, meshes, active_cells): """ - Test warning when trying to access the ``indActive`` property. + Test error when trying to access the ``indActive`` property. """ mapping = maps.Mesh2Mesh(meshes, active_cells=active_cells) - msg = "indActive has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - old_act_ind = mapping.indActive - np.testing.assert_allclose(mapping.active_cells, old_act_ind) + msg = "indActive has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.indActive def test_warning_setter(self, meshes, active_cells): """ Test warning when trying to set the ``indActive`` property. """ mapping = maps.Mesh2Mesh(meshes, active_cells=active_cells) - # Define new active cells to pass to the setter - new_active_cells = active_cells.copy() - new_active_cells[-4:] = False - msg = "indActive has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - mapping.indActive = new_active_cells - np.testing.assert_allclose(mapping.active_cells, new_active_cells) - + msg = "indActive has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.indActive = active_cells -class TestInjectActiveCells(DeprecatedIndActive): - """Test deprecated ``indActive`` and ``valInactive`` in ``InjectActiveCells``.""" - def test_indactive_warning_argument(self, mesh, active_cells): - """ - Test if warning is raised after passing ``indActive`` to the constructor. - """ - msg = self.get_message_deprecated_warning("indActive", "active_cells") - with pytest.warns(FutureWarning, match=msg): - mapping_instance = maps.InjectActiveCells(mesh, indActive=active_cells) - np.testing.assert_allclose(mapping_instance.active_cells, active_cells) +class TestInjectActiveCells(RemovedIndActive): + """Test removed ``indActive`` and ``valInactive`` in ``InjectActiveCells``.""" - def test_indactive_error_duplicated_argument(self, mesh, active_cells): + def test_indactive_error_argument(self, mesh, active_cells): """ - Test error after passing ``indActive`` and ``active_cells`` to the constructor. + Test if error is raised after passing ``indActive`` to the constructor. """ - msg = self.get_message_duplicated_error("indActive", "active_cells") + msg = self.get_message_removed_error("indActive", "active_cells") with pytest.raises(TypeError, match=msg): - maps.InjectActiveCells( - mesh, active_cells=active_cells, indActive=active_cells - ) + maps.InjectActiveCells(mesh, indActive=active_cells) - def test_indactive_warning_accessing_property(self, mesh, active_cells): + def test_indactive_error_accessing_property(self, mesh, active_cells): """ - Test warning when trying to access the ``indActive`` property. + Test error when trying to access the ``indActive`` property. """ mapping = maps.InjectActiveCells(mesh, active_cells=active_cells) - msg = "indActive has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - old_act_ind = mapping.indActive - np.testing.assert_allclose(mapping.active_cells, old_act_ind) + msg = "indActive has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.indActive - def test_indactive_warning_setter(self, mesh, active_cells): + def test_indactive_error_setter(self, mesh, active_cells): """ - Test warning when trying to set the ``indActive`` property. + Test error when trying to set the ``indActive`` property. """ mapping = maps.InjectActiveCells(mesh, active_cells=active_cells) - # Define new active cells to pass to the setter - new_active_cells = active_cells.copy() - new_active_cells[-4:] = False - msg = "indActive has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - mapping.indActive = new_active_cells - np.testing.assert_allclose(mapping.active_cells, new_active_cells) + msg = "indActive has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.indActive = active_cells @pytest.mark.parametrize("value_inactive", (3.14, np.array([1]))) - def test_valinactive_warning_argument(self, mesh, active_cells, value_inactive): - """ - Test if warning is raised after passing ``valInactive`` to the constructor. - """ - msg = self.get_message_deprecated_warning("valInactive", "value_inactive") - with pytest.warns(FutureWarning, match=msg): - mapping_instance = maps.InjectActiveCells( - mesh, active_cells=active_cells, valInactive=value_inactive - ) - # Ensure that the value passed to valInactive was correctly used - expected = np.zeros_like(active_cells, dtype=np.float64) - expected[~active_cells] = value_inactive - np.testing.assert_allclose(mapping_instance.value_inactive, expected) - - @pytest.mark.parametrize("valInactive", (3.14, np.array([3.14]))) - @pytest.mark.parametrize("value_inactive", (3.14, np.array([3.14]))) - def test_valinactive_error_duplicated_argument( - self, mesh, active_cells, valInactive, value_inactive - ): + def test_valinactive_error_argument(self, mesh, active_cells, value_inactive): """ - Test error after passing ``valInactive`` and ``value_inactive`` to the - constructor. + Test if error is raised after passing ``valInactive`` to the constructor. """ - msg = self.get_message_duplicated_error("valInactive", "value_inactive") + msg = self.get_message_removed_error("valInactive", "value_inactive") with pytest.raises(TypeError, match=msg): maps.InjectActiveCells( - mesh, - active_cells=active_cells, - value_inactive=value_inactive, - valInactive=valInactive, + mesh, active_cells=active_cells, valInactive=value_inactive ) - def test_valinactive_warning_accessing_property(self, mesh, active_cells): + def test_valinactive_error_accessing_property(self, mesh, active_cells): """ - Test warning when trying to access the ``valInactive`` property. + Test error when trying to access the ``valInactive`` property. """ mapping = maps.InjectActiveCells( mesh, active_cells=active_cells, value_inactive=3.14 ) - msg = "valInactive has been deprecated, please use value_inactive" - with pytest.warns(FutureWarning, match=msg): - old_value = mapping.valInactive - np.testing.assert_allclose(mapping.value_inactive, old_value) + msg = "valInactive has been removed, please use value_inactive" + with pytest.raises(NotImplementedError, match=msg): + mapping.valInactive - def test_valinactive_warning_setter(self, mesh, active_cells): + def test_valinactive_error_setter(self, mesh, active_cells): """ - Test warning when trying to set the ``valInactive`` property. + Test error when trying to set the ``valInactive`` property. """ mapping = maps.InjectActiveCells( mesh, active_cells=active_cells, value_inactive=3.14 ) - msg = "valInactive has been deprecated, please use value_inactive" - with pytest.warns(FutureWarning, match=msg): + msg = "valInactive has been removed, please use value_inactive" + with pytest.raises(NotImplementedError, match=msg): mapping.valInactive = 4.5 - np.testing.assert_allclose(mapping.value_inactive[~mapping.active_cells], 4.5) -class TestParametric(DeprecatedIndActive): - """Test deprecated ``indActive`` in parametric mappings.""" +class TestParametric(RemovedIndActive): + """Test removed ``indActive`` in parametric mappings.""" CLASSES = (BaseParametric, ParametricLayer, ParametricBlock, ParametricEllipsoid) @pytest.mark.parametrize("map_class", CLASSES) - def test_indactive_warning_argument(self, mesh, active_cells, map_class): + def test_indactive_error_argument(self, mesh, active_cells, map_class): """ - Test if warning is raised after passing ``indActive`` to the constructor. + Test if error is raised after passing ``indActive`` to the constructor. """ - msg = self.get_message_deprecated_warning("indActive", "active_cells") - with pytest.warns(FutureWarning, match=msg): - mapping_instance = map_class(mesh, indActive=active_cells) - np.testing.assert_allclose(mapping_instance.active_cells, active_cells) - - @pytest.mark.parametrize("map_class", CLASSES) - def test_indactive_error_duplicated_argument(self, mesh, active_cells, map_class): - """ - Test error after passing ``indActive`` and ``active_cells`` to the constructor. - """ - msg = self.get_message_duplicated_error("indActive", "active_cells") + msg = self.get_message_removed_error("indActive", "active_cells") with pytest.raises(TypeError, match=msg): - map_class(mesh, active_cells=active_cells, indActive=active_cells) + map_class(mesh, indActive=active_cells) @pytest.mark.parametrize("map_class", CLASSES) - def test_indactive_warning_accessing_property(self, mesh, active_cells, map_class): + def test_indactive_error_accessing_property(self, mesh, active_cells, map_class): """ - Test warning when trying to access the ``indActive`` property. + Test error when trying to access the ``indActive`` property. """ mapping = map_class(mesh, active_cells=active_cells) - msg = "indActive has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - old_act_ind = mapping.indActive - np.testing.assert_allclose(mapping.active_cells, old_act_ind) + msg = "indActive has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.indActive @pytest.mark.parametrize("map_class", CLASSES) - def test_indactive_warning_setter(self, mesh, active_cells, map_class): + def test_indactive_error_setter(self, mesh, active_cells, map_class): """ - Test warning when trying to set the ``indActive`` property. + Test error when trying to set the ``indActive`` property. """ mapping = map_class(mesh, active_cells=active_cells) - # Define new active cells to pass to the setter - new_active_cells = active_cells.copy() - new_active_cells[-4:] = False - msg = "indActive has been deprecated, please use active_cells" - with pytest.warns(FutureWarning, match=msg): - mapping.indActive = new_active_cells - np.testing.assert_allclose(mapping.active_cells, new_active_cells) + msg = "indActive has been removed, please use active_cells" + with pytest.raises(NotImplementedError, match=msg): + mapping.indActive = active_cells if __name__ == "__main__": diff --git a/tests/base/test_model_utils.py b/tests/base/test_model_utils.py index 4cc34beaf1..c77d71fe6f 100644 --- a/tests/base/test_model_utils.py +++ b/tests/base/test_model_utils.py @@ -1,5 +1,3 @@ -import unittest - import numpy as np import pytest from discretize import TensorMesh @@ -7,7 +5,7 @@ from simpeg import utils -class DepthWeightingTest(unittest.TestCase): +class TestDepthWeighting: def test_depth_weighting_3D(self): # Mesh dh = 5.0 @@ -16,19 +14,20 @@ def test_depth_weighting_3D(self): hz = [(dh, 15)] mesh = TensorMesh([hx, hy, hz], "CCN") - actv = np.random.randint(0, 2, mesh.n_cells) == 1 + rng = np.random.default_rng(seed=42) + actv = rng.integers(low=0, high=2, size=mesh.n_cells, dtype=bool) - r_loc = 0.1 # Depth weighting + r_loc = 0.1 wz = utils.depth_weighting( mesh, r_loc, active_cells=actv, exponent=5, threshold=0 ) - reference_locs = ( - np.random.rand(1000, 3) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) - + mesh.origin + # Define reference locs at random locations + reference_locs = rng.uniform( + low=mesh.nodes.min(axis=0), high=mesh.nodes.max(axis=0), size=(1000, 3) ) - reference_locs[:, -1] = r_loc + reference_locs[:, -1] = r_loc # set them all at the same elevation wz2 = utils.depth_weighting( mesh, reference_locs, active_cells=actv, exponent=5, threshold=0 @@ -44,8 +43,8 @@ def test_depth_weighting_3D(self): np.testing.assert_allclose(wz, wz2) - with self.assertRaises(ValueError): - wz2 = utils.depth_weighting(mesh, np.random.rand(10, 3, 3)) + with pytest.raises(ValueError): + utils.depth_weighting(mesh, rng.random(size=(10, 3, 3))) def test_depth_weighting_2D(self): # Mesh @@ -54,7 +53,8 @@ def test_depth_weighting_2D(self): hz = [(dh, 15)] mesh = TensorMesh([hx, hz], "CN") - actv = np.random.randint(0, 2, mesh.n_cells) == 1 + rng = np.random.default_rng(seed=42) + actv = rng.integers(low=0, high=2, size=mesh.n_cells, dtype=bool) r_loc = 0.1 # Depth weighting @@ -62,11 +62,11 @@ def test_depth_weighting_2D(self): mesh, r_loc, active_cells=actv, exponent=5, threshold=0 ) - reference_locs = ( - np.random.rand(1000, 2) * (mesh.nodes.max(axis=0) - mesh.nodes.min(axis=0)) - + mesh.origin + # Define reference locs at random locations + reference_locs = rng.uniform( + low=mesh.nodes.min(axis=0), high=mesh.nodes.max(axis=0), size=(1000, 2) ) - reference_locs[:, -1] = r_loc + reference_locs[:, -1] = r_loc # set them all at the same elevation wz2 = utils.depth_weighting( mesh, reference_locs, active_cells=actv, exponent=5, threshold=0 @@ -195,7 +195,3 @@ def test_removed_indactive(mesh): msg = "'indActive' argument has been removed. " "Please use 'active_cells' instead." with pytest.raises(TypeError, match=msg): utils.depth_weighting(mesh, 0, indActive=active_cells) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/base/test_optimizers.py b/tests/base/test_optimizers.py index 45ef588f25..f8c80bb3c8 100644 --- a/tests/base/test_optimizers.py +++ b/tests/base/test_optimizers.py @@ -1,67 +1,505 @@ -import unittest +import re +import pytest + from simpeg.utils import sdiag import numpy as np +import numpy.testing as npt import scipy.sparse as sp from simpeg import optimization from discretize.tests import get_quadratic, rosenbrock TOL = 1e-2 +OPTIMIZERS = [ + optimization.GaussNewton, + optimization.InexactGaussNewton, + optimization.BFGS, + optimization.ProjectedGradient, + optimization.SteepestDescent, + optimization.ProjectedGNCG, +] + +OPT_KWARGS = { + optimization.GaussNewton: {}, + optimization.InexactGaussNewton: dict(cg_rtol=1e-6, cg_maxiter=100), + optimization.BFGS: dict(maxIter=100, tolG=1e-2, maxIterLS=20), + optimization.ProjectedGradient: dict(maxIter=100, cg_rtol=1e-6, cg_maxiter=100), + optimization.SteepestDescent: dict(maxIter=10000, tolG=1e-5, tolX=1e-8, eps=1e-8), + optimization.ProjectedGNCG: dict(cg_rtol=1e-6, cg_maxiter=100), +} + + +@pytest.mark.parametrize("optimizer", OPTIMIZERS) +@pytest.mark.parametrize( + ("func", "x_true", "x0"), + [ + (rosenbrock, np.array([1.0, 1.0]), np.array([0, 0])), + ( + get_quadratic(sp.identity(2).tocsr(), np.array([-5, 5])), + np.array([5, -5]), + np.zeros(2), + ), + ], + ids=["rosenbrock", "quadratic"], +) +class TestUnboundOptimizers: + + def test_minimizer(self, optimizer, func, x_true, x0): + opt = optimizer(**OPT_KWARGS[optimizer]) + xopt = opt.minimize(func, x0) + npt.assert_allclose(xopt, x_true, rtol=TOL) + + +@pytest.mark.parametrize("optimizer", OPTIMIZERS) +class TestNanInit: + + def test_nan(self, optimizer): + opt = optimizer(maxIter=0) + with pytest.raises(ValueError, match=re.escape("x0 has a nan.")): + opt.minimize(rosenbrock, np.array([np.nan, 0.0])) + + def test_no_nan(self, optimizer): + opt = optimizer(maxIter=0) + opt.minimize(rosenbrock, np.array([0.0, 0.0])) + + +def test_NewtonRoot(): + def fun(x, return_g=True): + if return_g: + return np.sin(x), sdiag(np.cos(x)) + return np.sin(x) + + x = np.array([np.pi - 0.3, np.pi + 0.1, 0]) + xopt = optimization.NewtonRoot(comments=False).root(fun, x) + x_true = np.array([np.pi, np.pi, 0]) + npt.assert_allclose(xopt, x_true, rtol=0, atol=TOL) + + +@pytest.mark.parametrize( + "optimizer", filter(lambda x: issubclass(x, optimization.Bounded), OPTIMIZERS) +) +@pytest.mark.parametrize( + ("lower", "upper", "x_true", "x0"), + [ + (-2, 2, np.array([2.0, -2.0]), np.zeros(2)), + (-2, 8, np.array([5, -2]), np.zeros(2)), + (-8, 2, np.array([2, -5]), np.zeros(2)), + ], + ids=["both active", "lower active", "upper active"], +) +class TestBoundedOptimizers: + def test_minimizer(self, optimizer, lower, upper, x_true, x0): + func = get_quadratic(sp.identity(2).tocsr(), np.array([-5, 5])) + opt = optimizer(lower=lower, upper=upper) + xopt = opt.minimize(func, x0) + npt.assert_allclose(xopt, x_true, rtol=TOL) + + +@pytest.mark.parametrize( + ("x0", "bounded"), + [(np.array([8, 2]), False), (np.array([4, 0]), True)], + ids=["active not bound", "active and bound"], +) +def test_projected_gncg_active_not_bound_branch(x0, bounded): + # tests designed to test the branches of the + # projected gncg when a point is in the active set but not in the binding set. + func = get_quadratic(sp.identity(2).tocsr(), np.array([-5, 5])) + opt = optimization.ProjectedGNCG(upper=8, lower=0) + _, g = func(x0, return_g=True, return_H=False) + + opt.g = g + active = opt.activeSet(x0) + bound = opt.bindingSet(x0) + + # assert that the initial point is what we intend to hit the correct branch + # in the minimizer. + assert not np.any(active & ~bound) is bounded + + xopt = opt.minimize(func, x0) + x_true = np.array([5, 0]) + npt.assert_allclose(xopt, x_true, rtol=TOL) + + +@pytest.mark.parametrize("lower", [None, 0.0, np.zeros(10)]) +@pytest.mark.parametrize("upper", [None, 1.0, np.ones(10)]) +class TestBounded: + + def test_project(self, lower, upper): + x = np.linspace(-9.5, 8.2, 10) + bnd = optimization.Bounded(lower=lower, upper=upper) + + x_proj = bnd.projection(x) + if lower is not None: + assert x_proj.min() == 0.0 + else: + assert x_proj.min() == x.min() + + if upper is not None: + assert x_proj.max() == 1.0 + else: + assert x_proj.max() == x.max() + + def test_active_set(self, lower, upper): + x = np.linspace(-9.5, 8.2, 10) + bnd = optimization.Bounded(lower=lower, upper=upper) + + active_set = bnd.activeSet(x) + + if lower is not None: + assert all(active_set[x <= lower]) + else: + assert not any(active_set[x <= 0]) + + if upper is not None: + assert all(active_set[x >= upper]) + else: + assert not any(active_set[x >= 1]) + + def test_inactive_set(self, lower, upper): + x = np.linspace(-9.5, 8.2, 10) + bnd = optimization.Bounded(lower=lower, upper=upper) + + inactive_set = bnd.inactiveSet(x) + + if lower is not None: + assert not any(inactive_set[x <= lower]) + else: + assert all(inactive_set[x <= 0]) + + if upper is not None: + assert not any(inactive_set[x >= upper]) + else: + assert all(inactive_set[x >= 1]) + + def test_binding_set(self, lower, upper): + x = np.linspace(-9.5, 8.2, 10) + g = (np.ones(5)[:, None] * np.array([-1, 1])).reshape(-1) + assert len(x) == len(g) + assert g[0] == -1 and g[1] == 1 and g[2] == -1 # and so on + bnd = optimization.Bounded(lower=lower, upper=upper) + bnd.g = g + + bnd_set = bnd.bindingSet(x) + + if lower is not None: + assert all(bnd_set[(x <= lower) & (g >= 0)]) + else: + assert not any(bnd_set[(x <= 0) & (g >= 0)]) + + if upper is not None: + assert all(bnd_set[(x >= upper) & (g <= 0)]) + else: + assert not any(bnd_set[(x >= 1) & (g <= 0)]) + + +def test_bounded_kwargs_only(): + with pytest.raises( + TypeError, + match=re.escape( + "Bounded.__init__() takes 1 positional argument but 2 were given" + ), + ): + optimization.Bounded(None) + + +@pytest.mark.parametrize( + ("lower", "upper"), + [ + (np.zeros(11), None), + (None, np.ones(11)), + (np.zeros(11), np.ones(10)), + (np.zeros(10), np.ones(11)), + (np.zeros(11), np.ones(11)), + ], + ids=["only_lower", "only_upper", "bad_lower", "bad_upper", "both_bad"], +) +@pytest.mark.parametrize( + "opt_class", [optimization.ProjectedGradient, optimization.ProjectedGNCG] +) +def test_bad_bounds(lower, upper, opt_class): + m = np.linspace(-9.5, 8.2, 10) + opt = opt_class(lower=lower, upper=upper) + with pytest.raises(RuntimeError, match="Initial model is not projectable"): + opt.startup(m) + + +class TestInexactCGParams: + + def test_defaults(self): + cg_pars = optimization.InexactCG() + assert cg_pars.cg_atol == 0.0 + assert cg_pars.cg_rtol == 1e-1 + assert cg_pars.cg_maxiter == 5 + + def test_init(self): + cg_pars = optimization.InexactCG(cg_rtol=1e-3, cg_atol=1e-5, cg_maxiter=10) + assert cg_pars.cg_atol == 1e-5 + assert cg_pars.cg_rtol == 1e-3 + assert cg_pars.cg_maxiter == 10 + + def test_kwargs_only(self): + with pytest.raises( + TypeError, + match=re.escape( + "InexactCG.__init__() takes 1 positional argument but 2 were given" + ), + ): + optimization.InexactCG(1e-3) + + def test_deprecated(self): + with pytest.warns(FutureWarning, match=".*tolCG has been deprecated.*"): + cg_pars = optimization.InexactCG(tolCG=1e-3) + assert cg_pars.cg_atol == 0.0 + assert cg_pars.cg_rtol == 1e-3 + + with pytest.warns(FutureWarning, match=".*maxIterCG has been deprecated.*"): + cg_pars = optimization.InexactCG(maxIterCG=3) + assert cg_pars.cg_atol == 0.0 + assert cg_pars.cg_rtol == 1e-1 + assert cg_pars.cg_maxiter == 3 + + +class TestProjectedGradient: + + def test_defaults(self): + opt = optimization.ProjectedGradient() + assert opt.cg_rtol == 1e-1 + assert opt.cg_atol == 0.0 + assert opt.cg_maxiter == 5 + assert np.isneginf(opt.lower) + assert np.isposinf(opt.upper) + + def test_init(self): + opt = optimization.ProjectedGradient( + cg_rtol=1e-3, cg_atol=1e-5, cg_maxiter=10, lower=0.0, upper=1.0 + ) + assert opt.cg_rtol == 1e-3 + assert opt.cg_atol == 1e-5 + assert opt.cg_maxiter == 10 + assert opt.lower == 0.0 + assert opt.upper == 1.0 + + def test_kwargs_only(self): + with pytest.raises( + TypeError, + match=re.escape( + "ProjectedGradient.__init__() takes 1 positional argument but 2 were given" + ), + ): + optimization.ProjectedGradient(10) + + @pytest.mark.parametrize("on_init", [True, False], ids=["init", "attribute setter"]) + def test_deprecated_tolCG(self, on_init): + match = ".*tolCG has been deprecated.*cg_rtol.*" + if on_init: + with pytest.warns(FutureWarning, match=match): + opt = optimization.ProjectedGradient(tolCG=1e-3) + else: + opt = optimization.ProjectedGradient() + with pytest.warns(FutureWarning, match=match): + opt.tolCG = 1e-3 + + with pytest.warns(FutureWarning, match=match): + assert opt.tolCG == 1e-3 + assert opt.cg_atol == 0.0 + assert opt.cg_rtol == 1e-3 + + # test setting new changes old + opt.cg_rtol = 1e-4 + + with pytest.warns(FutureWarning, match=match): + assert opt.tolCG == 1e-4 + + @pytest.mark.parametrize("on_init", [True, False], ids=["init", "attribute setter"]) + def test_deprecated_maxIterCG(self, on_init): + + match = ".*maxIterCG has been deprecated.*" + if on_init: + with pytest.warns(FutureWarning, match=match): + opt = optimization.ProjectedGradient(maxIterCG=3) + else: + opt = optimization.ProjectedGradient() + with pytest.warns(FutureWarning, match=match): + opt.maxIterCG = 3 + + with pytest.warns(FutureWarning, match=match): + assert opt.maxIterCG == 3 + + assert opt.cg_maxiter == 3 + + # test setting new changes old + opt.cg_maxiter = 8 + with pytest.warns(FutureWarning, match=match): + assert opt.maxIterCG == 8 + + +class TestInexactGaussNewton: + + def test_defaults(self): + opt = optimization.InexactGaussNewton() + assert opt.cg_rtol == 1e-1 + assert opt.cg_atol == 0.0 + assert opt.cg_maxiter == 5 + + def test_init(self): + opt = optimization.InexactGaussNewton(cg_rtol=1e-3, cg_atol=1e-5, cg_maxiter=10) + assert opt.cg_rtol == 1e-3 + assert opt.cg_atol == 1e-5 + assert opt.cg_maxiter == 10 + + def test_kwargs_only(self): + with pytest.raises( + TypeError, + match=re.escape( + "InexactGaussNewton.__init__() takes 1 positional argument but 2 were given" + ), + ): + optimization.InexactGaussNewton(10) + + @pytest.mark.parametrize("on_init", [True, False], ids=["init", "attribute setter"]) + def test_deprecated_tolCG(self, on_init): + match = ".*tolCG has been deprecated.*cg_rtol.*" + if on_init: + with pytest.warns(FutureWarning, match=match): + opt = optimization.InexactGaussNewton(tolCG=1e-3) + else: + opt = optimization.InexactGaussNewton() + with pytest.warns(FutureWarning, match=match): + opt.tolCG = 1e-3 + + with pytest.warns(FutureWarning, match=match): + assert opt.tolCG == 1e-3 + assert opt.cg_atol == 0.0 + assert opt.cg_rtol == 1e-3 + + # test setting new changes old + opt.cg_rtol = 1e-4 + + with pytest.warns(FutureWarning, match=match): + assert opt.tolCG == 1e-4 + + @pytest.mark.parametrize("on_init", [True, False], ids=["init", "attribute setter"]) + def test_deprecated_maxIterCG(self, on_init): + + match = ".*maxIterCG has been deprecated.*" + if on_init: + with pytest.warns(FutureWarning, match=match): + opt = optimization.InexactGaussNewton(maxIterCG=3) + else: + opt = optimization.InexactGaussNewton() + with pytest.warns(FutureWarning, match=match): + opt.maxIterCG = 3 + + with pytest.warns(FutureWarning, match=match): + assert opt.maxIterCG == 3 + + assert opt.cg_maxiter == 3 + + # test setting new changes old + opt.cg_maxiter = 8 + with pytest.warns(FutureWarning, match=match): + assert opt.maxIterCG == 8 + + +class TestProjectedGNCG: + + @pytest.mark.parametrize("cg_tol_defaults", ["atol", "rtol", "both"]) + def test_defaults(self, cg_tol_defaults): + # testing setting the new default value of rtol if only atol is passed + if cg_tol_defaults == "rtol": + opt = optimization.ProjectedGNCG(cg_atol=1e-5) + assert opt.cg_atol == 1e-5 + assert opt.cg_rtol == 1e-3 + # testing setting the new default value of atol if only rtol is passed + elif cg_tol_defaults == "atol": + opt = optimization.ProjectedGNCG(cg_rtol=1e-4) + assert opt.cg_atol == 0.0 + assert opt.cg_rtol == 1e-4 + # test the old defaults + else: + with pytest.warns( + FutureWarning, match="The defaults for ProjectedGNCG will change.*" + ): + opt = optimization.ProjectedGNCG() + assert opt.cg_rtol == 0.0 + assert opt.cg_atol == 1e-3 + assert opt.cg_maxiter == 5 + assert np.isneginf(opt.lower) + assert np.isposinf(opt.upper) + + def test_init(self): + opt = optimization.ProjectedGNCG( + cg_rtol=1e-3, cg_atol=1e-5, cg_maxiter=10, lower=0.0, upper=1.0 + ) + assert opt.cg_rtol == 1e-3 + assert opt.cg_atol == 1e-5 + assert opt.cg_maxiter == 10 + assert opt.lower == 0.0 + assert opt.upper == 1.0 + + def test_kwargs_only(self): + with pytest.raises( + TypeError, + match=re.escape( + "ProjectedGNCG.__init__() takes 1 positional argument but 2 were given" + ), + ): + optimization.ProjectedGNCG(10) + + @pytest.mark.parametrize("on_init", [True, False], ids=["init", "attribute setter"]) + def test_deprecated_tolCG(self, on_init): + if on_init: + with pytest.warns( + FutureWarning, match=".*tolCG has been deprecated.*cg_atol.*" + ): + opt = optimization.ProjectedGNCG(tolCG=1e-5) + else: + opt = optimization.ProjectedGNCG() + with pytest.warns( + FutureWarning, match=".*tolCG has been deprecated.*cg_atol.*" + ): + opt.tolCG = 1e-5 + + with pytest.warns(FutureWarning, match=".*tolCG has been deprecated.*"): + assert opt.tolCG == 1e-5 + + assert opt.cg_atol == 1e-5 + assert opt.cg_rtol == 0.0 + + # test setting new changes old + opt.cg_atol = 1e-4 + + with pytest.warns(FutureWarning, match=".*tolCG has been deprecated.*"): + assert opt.tolCG == 1e-4 + + @pytest.mark.parametrize("on_init", [True, False], ids=["init", "attribute setter"]) + @pytest.mark.parametrize( + ("old_name", "new_name", "val1", "val2"), + [ + ("maxIterCG", "cg_maxiter", 3, 8), + ("stepActiveSet", "step_active_set", True, False), + ("stepOffBoundsFact", "active_set_grad_scale", 1.2, 1.4), + ], + ids=["maxIterCG", "stepActiveSet", "stepOffBoundsFact"], + ) + def test_deprecated_maxIterCG(self, on_init, old_name, new_name, val1, val2): + + match = f".*{old_name} has been deprecated.*" + if on_init: + with pytest.warns(FutureWarning, match=match): + opt = optimization.ProjectedGNCG(**{old_name: val1}) + else: + opt = optimization.ProjectedGNCG() + with pytest.warns(FutureWarning, match=match): + setattr(opt, old_name, val1) + opt.maxIterCG = 3 + + with pytest.warns(FutureWarning, match=match): + assert getattr(opt, old_name) == val1 + + assert getattr(opt, old_name) == val1 + + setattr(opt, new_name, val2) -class TestOptimizers(unittest.TestCase): - def setUp(self): - self.A = sp.identity(2).tocsr() - self.b = np.array([-5, -5]) - - def test_GN_rosenbrock(self): - GN = optimization.GaussNewton() - xopt = GN.minimize(rosenbrock, np.array([0, 0])) - x_true = np.array([1.0, 1.0]) - print("xopt: ", xopt) - print("x_true: ", x_true) - self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) - - def test_GN_quadratic(self): - GN = optimization.GaussNewton() - xopt = GN.minimize(get_quadratic(self.A, self.b), np.array([0, 0])) - x_true = np.array([5.0, 5.0]) - print("xopt: ", xopt) - print("x_true: ", x_true) - self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) - - def test_ProjGradient_quadraticBounded(self): - PG = optimization.ProjectedGradient(debug=True) - PG.lower, PG.upper = -2, 2 - xopt = PG.minimize(get_quadratic(self.A, self.b), np.array([0, 0])) - x_true = np.array([2.0, 2.0]) - print("xopt: ", xopt) - print("x_true: ", x_true) - self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) - - def test_ProjGradient_quadratic1Bound(self): - myB = np.array([-5, 1]) - PG = optimization.ProjectedGradient() - PG.lower, PG.upper = -2, 2 - xopt = PG.minimize(get_quadratic(self.A, myB), np.array([0, 0])) - x_true = np.array([2.0, -1.0]) - print("xopt: ", xopt) - print("x_true: ", x_true) - self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) - - def test_NewtonRoot(self): - def fun(x, return_g=True): - if return_g: - return np.sin(x), sdiag(np.cos(x)) - return np.sin(x) - - x = np.array([np.pi - 0.3, np.pi + 0.1, 0]) - xopt = optimization.NewtonRoot(comments=False).root(fun, x) - x_true = np.array([np.pi, np.pi, 0]) - print("Newton Root Finding") - print("xopt: ", xopt) - print("x_true: ", x_true) - self.assertTrue(np.linalg.norm(xopt - x_true, 2) < TOL, True) - - -if __name__ == "__main__": - unittest.main() + with pytest.warns(FutureWarning, match=match): + assert getattr(opt, old_name) == val2 diff --git a/tests/base/test_survey.py b/tests/base/test_survey.py new file mode 100644 index 0000000000..83ddc908c2 --- /dev/null +++ b/tests/base/test_survey.py @@ -0,0 +1,34 @@ +""" +Tests for BaseSurvey class. +""" + +import pytest +import numpy as np + +from simpeg.utils import Counter +from simpeg.survey import BaseSurvey, BaseRx, BaseSrc + + +class TestCounterValidation: + + @pytest.fixture + def sample_source(self): + locations = np.array([1.0, 2.0, 3.0]) + receiver = BaseRx(locations=locations) + source = BaseSrc(receiver_list=[receiver]) + return source + + def test_valid_counter(self, sample_source): + """No error should be raise after passing a valid Counter object to Survey.""" + counter = Counter() + BaseSurvey(source_list=[sample_source], counter=counter) + + def test_invalid_counter(self, sample_source): + """Test error upon invalid Counter.""" + + class InvalidCounter: + pass + + invalid_counter = InvalidCounter() + with pytest.raises(TypeError): + BaseSurvey(source_list=[sample_source], counter=invalid_counter) diff --git a/tests/base/test_survey_data.py b/tests/base/test_survey_data.py index 9b06649e07..8b67a3795d 100644 --- a/tests/base/test_survey_data.py +++ b/tests/base/test_survey_data.py @@ -1,7 +1,11 @@ +import re +import pytest import unittest import numpy as np from simpeg import survey, utils, data +from simpeg.survey import BaseRx, BaseSrc, BaseSurvey + np.random.seed(100) @@ -26,35 +30,6 @@ def setUp(self): mysurvey = survey.BaseSurvey(source_list=source_list) self.D = data.Data(mysurvey) - def test_data(self): - V = [] - for src in self.D.survey.source_list: - for rx in src.receiver_list: - v = np.random.rand(rx.nD) - V += [v] - index = self.D.index_dictionary[src][rx] - self.D.dobs[index] = v - V = np.concatenate(V) - self.assertTrue(np.all(V == self.D.dobs)) - - D2 = data.Data(self.D.survey, V) - self.assertTrue(np.all(D2.dobs == self.D.dobs)) - - def test_standard_dev(self): - V = [] - for src in self.D.survey.source_list: - for rx in src.receiver_list: - v = np.random.rand(rx.nD) - V += [v] - index = self.D.index_dictionary[src][rx] - self.D.relative_error[index] = v - self.assertTrue(np.all(v == self.D.relative_error[index])) - V = np.concatenate(V) - self.assertTrue(np.all(V == self.D.relative_error)) - - D2 = data.Data(self.D.survey, relative_error=V) - self.assertTrue(np.all(D2.relative_error == self.D.relative_error)) - def test_uniqueSrcs(self): srcs = self.D.survey.source_list srcs += [srcs[0]] @@ -72,5 +47,197 @@ def test_sourceIndex(self): ) +class BaseFixtures: + @pytest.fixture + def sample_survey(self): + """Create sample Survey object.""" + x = np.linspace(5, 10, 3) + coordinates = utils.ndgrid(x, x, np.r_[0.0]) + source_location = np.r_[0, 0, 0.0] + receivers = [survey.BaseRx(coordinates) for i in range(4)] + sources = [survey.BaseSrc([rx], location=source_location) for rx in receivers] + sources.append(survey.BaseSrc(receivers, location=source_location)) + return survey.BaseSurvey(source_list=sources) + + @pytest.fixture + def sample_data(self, sample_survey): + """Create sample Data object.""" + return data.Data(sample_survey) + + +class TestDataIndexing(BaseFixtures): + """Test indexing of Data object.""" + + def get_source_receiver_pairs(self, survey): + """Return generator for each source-receiver pair in the survey.""" + source_receiver_pairs = ( + (src, rx) for src in survey.source_list for rx in src.receiver_list + ) + return source_receiver_pairs + + def test_getitem(self, sample_data): + """Test the ``Data.__getitem__`` method.""" + # Assign dobs to the data object + dobs = np.random.default_rng(seed=42).uniform(size=sample_data.survey.nD) + sample_data.dobs = dobs + + # Iterate over source-receiver pairs + survey_slices = sample_data.survey.get_all_slices() + for src, rx in self.get_source_receiver_pairs(sample_data.survey): + # Check if the __getitem__ returns the correct slice of the dobs + expected = dobs[survey_slices[src, rx]] + np.testing.assert_allclose(sample_data[src, rx], expected) + + def test_setitem(self, sample_data): + """Test the ``Data.__setitem__`` method.""" + # Assign dobs to the data object + rng = np.random.default_rng(seed=42) + dobs = rng.uniform(size=sample_data.survey.nD) + sample_data.dobs = dobs + + # Override the dobs array for each source-receiver pair + dobs_new = [] + for src, rx in self.get_source_receiver_pairs(sample_data.survey): + _dobs_new_piece = dobs = rng.uniform(size=rx.nD) + sample_data[src, rx] = _dobs_new_piece + dobs_new.append(_dobs_new_piece) + + # Check that the dobs in the data matches the new one + dobs_new = np.hstack(dobs_new) + np.testing.assert_allclose(dobs_new, sample_data.dobs) + + @pytest.mark.filterwarnings( + "ignore:The `index_dictionary` property has been deprecated." + ) + def test_index_dictionary(self, sample_data): + """Test the ``index_dictionary`` property.""" + # Assign dobs to the data object + dobs = np.random.default_rng(seed=42).uniform(size=sample_data.survey.nD) + sample_data.dobs = dobs + + # Check indices in index_dictionary for each source-receiver pair + survey_slices = sample_data.survey.get_all_slices() + for src, rx in self.get_source_receiver_pairs(sample_data.survey): + expected_slice_ = survey_slices[src, rx] + indices = sample_data.index_dictionary[src][rx] + np.testing.assert_allclose(dobs[indices], dobs[expected_slice_]) + + def test_deprecated_index_dictionary(self, sample_data): + """Test deprecation warning in ``index_dictionary``.""" + source = sample_data.survey.source_list[0] + receiver = source.receiver_list[0] + with pytest.warns( + FutureWarning, + match=re.escape("The `index_dictionary` property has been deprecated."), + ): + sample_data.index_dictionary[source][receiver] + + +class TestSurveySlice: + """ + Test BaseSurvey's slices for flat arrays. + """ + + def build_receiver(self, n_locs: int): + locs = np.ones(n_locs)[:, np.newaxis] + return BaseRx(locs) + + @pytest.mark.parametrize( + "all_slices", [True, False], ids=["all_slices", "single_slice"] + ) + def test_single_source(self, all_slices): + """ + Test slicing a survey with a single source. + """ + n_locs = (4, 7) + receivers = [self.build_receiver(n_locs=i) for i in n_locs] + source = BaseSrc(receivers) + test_survey = BaseSurvey([source]) + if all_slices: + expected = { + (source, receivers[0]): slice(0, 4), + (source, receivers[1]): slice(4, 4 + 7), + } + slices = test_survey.get_all_slices() + assert slices == expected + else: + assert test_survey.get_slice(source, receivers[0]) == slice(0, 4) + assert test_survey.get_slice(source, receivers[1]) == slice(4, 4 + 7) + + @pytest.mark.parametrize( + "all_slices", [True, False], ids=["all_slices", "single_slices"] + ) + def test_multiple_sources_shared_receivers(self, all_slices): + """ + Test slicing a survey with multiple sources and shared receivers. + """ + n_locs = (4, 7) + receivers = [self.build_receiver(n_locs=i) for i in n_locs] + sources = [BaseSrc(receivers), BaseSrc(receivers)] + test_survey = BaseSurvey(sources) + if all_slices: + expected = { + (sources[0], receivers[0]): slice(0, 4), + (sources[0], receivers[1]): slice(4, 4 + 7), + (sources[1], receivers[0]): slice(11, 11 + 4), + (sources[1], receivers[1]): slice(15, 15 + 7), + } + slices = test_survey.get_all_slices() + assert slices == expected + else: + assert test_survey.get_slice(sources[0], receivers[0]) == slice(0, 4) + assert test_survey.get_slice(sources[0], receivers[1]) == slice(4, 4 + 7) + assert test_survey.get_slice(sources[1], receivers[0]) == slice(11, 11 + 4) + assert test_survey.get_slice(sources[1], receivers[1]) == slice(15, 15 + 7) + + @pytest.mark.parametrize( + "all_slices", [True, False], ids=["all_slices", "single_slices"] + ) + def test_multiple_sources(self, all_slices): + """ + Test slicing a survey with multiple sources. + """ + receivers_a = [self.build_receiver(n_locs=i) for i in (4, 7)] + receivers_b = [self.build_receiver(n_locs=i) for i in (8, 3)] + srcs = [BaseSrc(receivers_a), BaseSrc(receivers_b)] + test_survey = BaseSurvey(srcs) + if all_slices: + expected = { + (srcs[0], receivers_a[0]): slice(0, 4), + (srcs[0], receivers_a[1]): slice(4, 4 + 7), + (srcs[1], receivers_b[0]): slice(11, 11 + 8), + (srcs[1], receivers_b[1]): slice(19, 19 + 3), + } + slices = test_survey.get_all_slices() + assert slices == expected + else: + assert test_survey.get_slice(srcs[0], receivers_a[0]) == slice(0, 4) + assert test_survey.get_slice(srcs[0], receivers_a[1]) == slice(4, 4 + 7) + assert test_survey.get_slice(srcs[1], receivers_b[0]) == slice(11, 11 + 8) + assert test_survey.get_slice(srcs[1], receivers_b[1]) == slice(19, 19 + 3) + + @pytest.mark.parametrize("missing", ["source", "receiver", "both"]) + def test_missing_source_receiver(self, missing): + """ + Test error on missing source-receiver pair. + """ + # Generate a survey + receivers_a = [self.build_receiver(n_locs=i) for i in (4, 7)] + receivers_b = [self.build_receiver(n_locs=i) for i in (8, 3)] + sources = [BaseSrc(receivers_a), BaseSrc(receivers_b)] + test_survey = BaseSurvey(sources) + # Try to slice with missing source-receiver pair + src, rx = sources[0], receivers_a[1] + if missing in ("source", "both"): + src = BaseSrc() # new src not in the survey + if missing in ("receiver", "both"): + rx = self.build_receiver(1) # new rx not in the survey + msg = re.escape( + f"Source '{src}' and receiver '{rx}' pair " "is not part of the survey." + ) + with pytest.raises(KeyError, match=msg): + test_survey.get_slice(src, rx) + + if __name__ == "__main__": unittest.main() diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000000..6947cdffd6 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,30 @@ +import pytest +from simpeg.utils import get_logger +import logging + + +@pytest.fixture(scope="session", autouse=True) +def quiet_logger_for_tests(request): + logger = get_logger() + + init_level = logger.level + # default solver log is issued at the INFO level. + # set the logger to the higher WARNING level to + # ignore the default solver messages. + logger.setLevel(logging.WARNING) + + yield + + logger.setLevel(init_level) + + +@pytest.fixture() +def info_logging(): + # provide a fixture to temporarily set the logging level to info + logger = get_logger() + init_level = logger.level + logger.setLevel(logging.INFO) + + yield + + logger.setLevel(init_level) diff --git a/tests/em/em1d/test_EM1D_FD_fwd.py b/tests/em/em1d/test_EM1D_FD_fwd.py index 8a62a1398f..88a7498d5a 100644 --- a/tests/em/em1d/test_EM1D_FD_fwd.py +++ b/tests/em/em1d/test_EM1D_FD_fwd.py @@ -9,6 +9,7 @@ vertical_magnetic_field_horizontal_loop as mag_field, ) import empymod +import pytest class EM1D_FD_test_failures(unittest.TestCase): @@ -548,5 +549,62 @@ def solution(res): self.assertLess(err, 1e-4) +@pytest.mark.parametrize( + "rx_class", + [fdem.receivers.PointMagneticField, fdem.receivers.PointMagneticFieldSecondary], +) +@pytest.mark.parametrize("n_locs1", [1, 4]) +@pytest.mark.parametrize("n_locs2", [1, 4]) +@pytest.mark.parametrize("orientation", ["x", "y", "z"]) +@pytest.mark.parametrize("component", ["real", "imag", "both"]) +def test_rx_loc_shapes(rx_class, n_locs1, n_locs2, orientation, component): + offsets = np.full(n_locs1, 100.0) + rx1_locs = np.pad(offsets[:, None], ((0, 0), (0, 2)), constant_values=0) + offsets = np.full(n_locs2, 100.0) + rx2_locs = np.pad(offsets[:, None], ((0, 0), (0, 2)), constant_values=0) + + rx_list = [ + rx_class(rx1_locs, orientation=orientation, component=component), + rx_class(rx2_locs, orientation=orientation, component=component), + ] + n_d = n_locs1 + n_locs2 + if component == "both": + n_d *= 2 + + src = fdem.sources.MagDipole(rx_list, frequency=0.1) + srv = fdem.Survey(src) + + sim = fdem.Simulation1DLayered(survey=srv, sigma=[1]) + d = sim.dpred(None) + + # assert the shape is correct + assert d.shape == (n_d,) + + # every value should be the same... + d1 = d[srv.get_slice(src, rx_list[0])] + d2 = d[srv.get_slice(src, rx_list[1])] + + if component == "both": + d1 = d1[::2] + 1j * d1[1::2] + d2 = d2[::2] + 1j * d2[1::2] + d = np.r_[d1, d2] + np.testing.assert_allclose(d, d[0], rtol=1e-12) + + sim.sigmaMap = maps.IdentityMap(nP=1) + # make sure forming J works + J = sim.getJ(np.ones(1)) + assert J.shape == (n_d, 1) + + # and all of its values are the same too: + j1 = J[srv.get_slice(src, rx_list[0]), 0] + j2 = J[srv.get_slice(src, rx_list[1]), 0] + + if component == "both": + j1 = j1[::2] + 1j * j1[1::2] + j2 = j2[::2] + 1j * j2[1::2] + J = np.r_[j1, j2] + np.testing.assert_allclose(J, J[0], rtol=1e-12) + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/em1d/test_EM1D_FD_getJ.py b/tests/em/em1d/test_EM1D_FD_getJ.py new file mode 100644 index 0000000000..cf18744e2f --- /dev/null +++ b/tests/em/em1d/test_EM1D_FD_getJ.py @@ -0,0 +1,107 @@ +""" +Test the getJ method of FDEM 1D simulation. +""" + +import numpy as np +import simpeg.electromagnetics.frequency_domain as fdem +from simpeg import maps + + +def create_simulation_and_conductivities(identity_mapping: bool): + # Create Survey + # ------------- + # Source properties + frequencies = np.r_[382, 1822, 7970, 35920, 130100] # frequencies in Hz + source_location = np.array([0.0, 0.0, 30.0]) # (3, ) numpy.array_like + source_orientation = "z" # "x", "y" or "z" + moment = 1.0 # dipole moment in Am^2 + + # Receiver properties + receiver_locations = np.array([10.0, 0.0, 30.0]) # or (N, 3) numpy.ndarray + receiver_orientation = "z" # "x", "y" or "z" + data_type = "ppm" # "secondary", "total" or "ppm" + + source_list = [] # create empty list for source objects + + # loop over all sources + for freq in frequencies: + # Define receivers that measure real and imaginary component + # magnetic field data in ppm. + receiver_list = [] + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + receiver_locations, + orientation=receiver_orientation, + data_type=data_type, + component="real", + ) + ) + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + receiver_locations, + orientation=receiver_orientation, + data_type=data_type, + component="imag", + ) + ) + + # Define a magnetic dipole source at each frequency + source_list.append( + fdem.sources.MagDipole( + receiver_list=receiver_list, + frequency=freq, + location=source_location, + orientation=source_orientation, + moment=moment, + ) + ) + + # Define the survey + survey = fdem.survey.Survey(source_list) + + # Defining a 1D Layered Earth Model + # --------------------------------- + # Define layer thicknesses (m) + thicknesses = np.array([20.0, 40.0]) + + # Define layer conductivities (S/m) + conductivities = np.r_[0.1, 1.0, 0.1] + + # Define a mapping + n_layers = len(conductivities) + model_mapping = ( + maps.IdentityMap(nP=n_layers) if identity_mapping else maps.ExpMap(nP=n_layers) + ) + + # Define the Forward Simulation, Predict Data and Plot + # ---------------------------------------------------- + simulation = fdem.Simulation1DLayered( + survey=survey, + thicknesses=thicknesses, + sigmaMap=model_mapping, + ) + + return simulation, conductivities + + +def test_getJ(): + """ + Test if getJ returns different J matrices after passing different maps. + """ + dpreds, jacobians = [], [] + + # Compute dpred and J using an identity map and an exp map + for identity_mapping in (True, False): + simulation, conductivities = create_simulation_and_conductivities( + identity_mapping + ) + model = conductivities if identity_mapping else np.log(conductivities) + dpreds.append(simulation.dpred(model)) + jac = simulation.getJ(model) + jacobians.append(jac) + + # The two dpreds should be equal + assert np.allclose(*dpreds) + + # The two J matrices should not be equal + assert not np.allclose(*jacobians, atol=0.0) diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers2.py b/tests/em/em1d/test_EM1D_FD_jac_layers2.py new file mode 100644 index 0000000000..dbbe2154b8 --- /dev/null +++ b/tests/em/em1d/test_EM1D_FD_jac_layers2.py @@ -0,0 +1,490 @@ +from simpeg import maps +from discretize import tests, TensorMesh +import simpeg.electromagnetics.frequency_domain as fdem +import numpy as np +from scipy.constants import mu_0 +from scipy.sparse import diags + + +class TestEM1D_FD_Jacobian_MagDipole: + + # Tests 2nd order convergence of Jvec and Jtvec for magnetic dipole sources. + # - All src and rx orientations + # - All rx components + # - Span many frequencies + # - Tests derivatives wrt sigma, mu, thicknesses and h + def setup_class(self): + # Layers and topography + nearthick = np.logspace(-1, 1, 5) + deepthick = np.logspace(1, 2, 10) + thicknesses = np.r_[nearthick, deepthick] + topo = np.r_[0.0, 0.0, 100.0] + + # Survey Geometry + height = 1e-5 + src_location = np.array([0.0, 0.0, 100.0 + height]) + rx_location = np.array([5.0, 5.0, 100.0 + height]) + frequencies = np.logspace(1, 8, 9) + orientations = ["x", "y", "z"] + components = ["real", "imag", "both"] + + # Define sources and receivers + source_list = [] + for f in frequencies: + for tx_orientation in orientations: + receiver_list = [] + + for rx_orientation in orientations: + for comp in components: + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + rx_location, orientation=rx_orientation, component=comp + ) + ) + + source_list.append( + fdem.sources.MagDipole( + receiver_list, + frequency=f, + location=src_location, + orientation=tx_orientation, + ) + ) + + # Survey + survey = fdem.Survey(source_list) + + self.topo = topo + self.survey = survey + self.showIt = False + self.height = height + self.frequencies = frequencies + self.thicknesses = thicknesses + self.nlayers = len(thicknesses) + 1 + + wire_map = maps.Wires( + ("mu", self.nlayers), + ("sigma", self.nlayers), + ("h", 1), + ("thicknesses", self.nlayers - 1), + ) + self.sigma_map = maps.ExpMap(nP=self.nlayers) * wire_map.sigma + self.mu_map = maps.ExpMap(nP=self.nlayers) * wire_map.mu + self.thicknesses_map = maps.ExpMap(nP=self.nlayers - 1) * wire_map.thicknesses + nP = len(source_list) + surject_mesh = TensorMesh([np.ones(nP)]) + self.h_map = maps.SurjectFull(surject_mesh) * maps.ExpMap(nP=1) * wire_map.h + + sim = fdem.Simulation1DLayered( + survey=self.survey, + sigmaMap=self.sigma_map, + muMap=self.mu_map, + thicknessesMap=self.thicknesses_map, + hMap=self.h_map, + topo=self.topo, + ) + + self.sim = sim + + def test_EM1DFDJvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + m_1D = np.r_[ + np.log(mu), np.log(sig), np.log(self.height), np.log(self.thicknesses) + ] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_1D * 0.5 + + passed = tests.check_derivative( + derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15, random_seed=9186724 + ) + assert passed + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + m_true = np.r_[ + np.log(mu), np.log(sig), np.log(self.height), np.log(self.thicknesses) + ] + + dobs = self.sim.dpred(m_true) + + m_ini = np.r_[ + np.log(np.ones(self.nlayers) * 1.5 * mu_half), + np.log(np.ones(self.nlayers) * sigma_half), + np.log(0.5 * self.height), + np.log(self.thicknesses) * 0.9, + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2.0 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative( + derChk, m_ini, num=4, plotIt=False, eps=1e-27, random_seed=2345 + ) + assert passed + + def test_jtjdiag(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + model = np.r_[ + np.log(mu), np.log(sig), np.log(self.height), np.log(self.thicknesses) + ] + + rng = np.random.default_rng(seed=42) + weights_matrix = diags(rng.random(size=self.sim.survey.nD)) + jtj_diag = self.sim.getJtJdiag(model, W=weights_matrix) + + J = self.sim.getJ(model) + expected = np.diag(J.T @ weights_matrix.T @ weights_matrix @ J) + np.testing.assert_allclose(expected, jtj_diag) + + +class TestEM1D_FD_Jacobian_CircularLoop: + # Tests 2nd order convergence of Jvec and Jtvec for horizontal loop sources. + # - All rx orientations + # - All rx components + # - Span many frequencies + # - Tests derivatives wrt sigma, mu, thicknesses and h + def setup_class(self): + nearthick = np.logspace(-1, 1, 5) + deepthick = np.logspace(1, 2, 10) + thicknesses = np.r_[nearthick, deepthick] + topo = np.r_[0.0, 0.0, 100.0] + height = 1e-5 + + src_location = np.array([0.0, 0.0, 100.0 + height]) + rx_location = np.array([0.0, 0.0, 100.0 + height]) + frequencies = np.logspace(1, 8, 9) + orientations = ["x", "y", "z"] + components = ["real", "imag", "both"] + I = 1.0 + a = 10.0 + + # Define sources and receivers + source_list = [] + for f in frequencies: + receiver_list = [] + + for rx_orientation in orientations: + for comp in components: + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + rx_location, orientation=rx_orientation, component=comp + ) + ) + + source_list.append( + fdem.sources.CircularLoop( + receiver_list, f, src_location, radius=a, current=I + ) + ) + + # Survey + survey = fdem.Survey(source_list) + + self.topo = topo + self.survey = survey + self.showIt = False + self.height = height + self.frequencies = frequencies + self.thicknesses = thicknesses + self.nlayers = len(thicknesses) + 1 + + nP = len(source_list) + + wire_map = maps.Wires( + ("sigma", self.nlayers), + ("mu", self.nlayers), + ("thicknesses", self.nlayers - 1), + ("h", 1), + ) + self.sigma_map = maps.ExpMap(nP=self.nlayers) * wire_map.sigma + self.mu_map = maps.ExpMap(nP=self.nlayers) * wire_map.mu + self.thicknesses_map = maps.ExpMap(nP=self.nlayers - 1) * wire_map.thicknesses + surject_mesh = TensorMesh([np.ones(nP)]) + self.h_map = maps.SurjectFull(surject_mesh) * maps.ExpMap(nP=1) * wire_map.h + + sim = fdem.Simulation1DLayered( + survey=self.survey, + sigmaMap=self.sigma_map, + muMap=self.mu_map, + thicknessesMap=self.thicknesses_map, + hMap=self.h_map, + topo=self.topo, + ) + + self.sim = sim + + def test_EM1DFDJvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + m_1D = np.r_[ + np.log(sig), np.log(mu), np.log(self.thicknesses), np.log(self.height) + ] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_1D * 0.5 + + passed = tests.check_derivative( + derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15, random_seed=664 + ) + assert passed + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + m_true = np.r_[ + np.log(sig), np.log(mu), np.log(self.thicknesses), np.log(self.height) + ] + + dobs = self.sim.dpred(m_true) + + m_ini = np.r_[ + np.log(np.ones(self.nlayers) * sigma_half), + np.log(np.ones(self.nlayers) * 1.5 * mu_half), + np.log(self.thicknesses) * 0.9, + np.log(0.5 * self.height), + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative( + derChk, m_ini, num=4, plotIt=False, eps=1e-27, random_seed=42 + ) + assert passed + + +class TestEM1D_FD_Jacobian_LineCurrent: + # Tests 2nd order convergence of Jvec and Jtvec for piecewise linear loop. + # - All rx orientations + # - All rx components + # - Span many frequencies + # - Tests derivatives wrt sigma, mu, thicknesses and h + def setup_class(self): + x_path = np.array([-2, -2, 2, 2, -2]) + y_path = np.array([-1, 1, 1, -1, -1]) + frequencies = np.logspace(0, 4) + + wire_paths = np.c_[x_path, y_path, np.ones(5) * 0.5] + source_list = [] + receiver_list = [] + receiver_location = np.array([9.28, 0.0, 0.45]) + orientations = ["x", "y", "z"] + components = ["real", "imag", "both"] + + # Define sources and receivers + source_list = [] + for f in frequencies: + receiver_list = [] + + for rx_orientation in orientations: + for comp in components: + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + receiver_location, + orientation=rx_orientation, + component=comp, + ) + ) + + source_list.append(fdem.sources.LineCurrent(receiver_list, f, wire_paths)) + + # Survey + survey = fdem.Survey(source_list) + self.thicknesses = np.array([20.0, 40.0]) + + self.nlayers = len(self.thicknesses) + 1 + wire_map = maps.Wires( + ("sigma", self.nlayers), + ("mu", self.nlayers), + ("thicknesses", self.nlayers - 1), + ) + self.sigma_map = maps.ExpMap(nP=self.nlayers) * wire_map.sigma + self.mu_map = maps.ExpMap(nP=self.nlayers) * wire_map.mu + self.thicknesses_map = maps.ExpMap(nP=self.nlayers - 1) * wire_map.thicknesses + + sim = fdem.Simulation1DLayered( + survey=survey, + sigmaMap=self.sigma_map, + muMap=self.mu_map, + thicknessesMap=self.thicknesses_map, + ) + + self.sim = sim + + def test_EM1DFDJvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[1] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 1.1 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[1] = mu_blk + + # General model + m_1D = np.r_[np.log(sig), np.log(mu), np.log(self.thicknesses)] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + dm = m_1D * 0.5 + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + passed = tests.check_derivative( + derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15, random_seed=1123 + ) + assert passed + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[1] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 1.1 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[1] = mu_blk + + # General model + m_true = np.r_[np.log(sig), np.log(mu), np.log(self.thicknesses)] + + dobs = self.sim.dpred(m_true) + + m_ini = np.r_[ + np.log(np.ones(self.nlayers) * sigma_half), + np.log(np.ones(self.nlayers) * mu_half), + np.log(self.thicknesses) * 0.9, + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative( + derChk, m_ini, num=4, plotIt=False, eps=1e-27, random_seed=124 + ) + assert passed diff --git a/tests/em/em1d/test_EM1D_FD_jac_layers3.py b/tests/em/em1d/test_EM1D_FD_jac_layers3.py new file mode 100644 index 0000000000..bc49ac3432 --- /dev/null +++ b/tests/em/em1d/test_EM1D_FD_jac_layers3.py @@ -0,0 +1,461 @@ +from simpeg import maps +from discretize import tests, TensorMesh +import simpeg.electromagnetics.frequency_domain as fdem +import numpy as np +from scipy.constants import mu_0 +from scipy.sparse import diags + + +class TestEM1D_FD_Jacobian_MagDipole: + # Tests 2nd order convergence of Jvec and Jtvec for magnetic dipole sources. + # - All src and rx orientations + # - All rx components + # - Span many frequencies + # - Tests derivatives wrt sigma, mu, thicknesses and h + def setup_class(self): + # Layers and topography + nearthick = np.logspace(-1, 1, 5) + deepthick = np.logspace(1, 2, 10) + thicknesses = np.r_[nearthick, deepthick] + topo = np.r_[0.0, 0.0, 100.0] + + # Survey Geometry + height = 1e-5 + src_location = np.array([0.0, 0.0, 100.0 + height]) + rx_location = np.array([5.0, 5.0, 100.0 + height]) + frequencies = np.logspace(1, 8, 9) + orientations = ["x", "y", "z"] + components = ["real", "imag", "both"] + + # Define sources and receivers + source_list = [] + for f in frequencies: + for tx_orientation in orientations: + receiver_list = [] + + for rx_orientation in orientations: + for comp in components: + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + rx_location, orientation=rx_orientation, component=comp + ) + ) + + source_list.append( + fdem.sources.MagDipole( + receiver_list, + frequency=f, + location=src_location, + orientation=tx_orientation, + ) + ) + + # Survey + survey = fdem.Survey(source_list) + + self.topo = topo + self.survey = survey + self.showIt = False + self.height = height + self.frequencies = frequencies + self.thicknesses = thicknesses + self.nlayers = len(thicknesses) + 1 + + wire_map = maps.Wires( + ("sigma", self.nlayers), + ("h", 1), + ("thicknesses", self.nlayers - 1), + ) + self.sigma_map = maps.ExpMap(nP=self.nlayers) * wire_map.sigma + self.thicknesses_map = maps.ExpMap(nP=self.nlayers - 1) * wire_map.thicknesses + nP = len(source_list) + surject_mesh = TensorMesh([np.ones(nP)]) + self.h_map = maps.SurjectFull(surject_mesh) * maps.ExpMap(nP=1) * wire_map.h + + sim = fdem.Simulation1DLayered( + survey=self.survey, + sigmaMap=self.sigma_map, + thicknessesMap=self.thicknesses_map, + hMap=self.h_map, + topo=self.topo, + ) + + self.sim = sim + + def test_EM1DFDJvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # General model + m_1D = np.r_[np.log(sig), np.log(self.height), np.log(self.thicknesses)] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_1D * 0.5 + + passed = tests.check_derivative( + derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15, random_seed=9186724 + ) + assert passed + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # General model + m_true = np.r_[np.log(sig), np.log(self.height), np.log(self.thicknesses)] + + dobs = self.sim.dpred(m_true) + + m_ini = np.r_[ + np.log(np.ones(self.nlayers) * sigma_half), + np.log(0.5 * self.height), + np.log(self.thicknesses) * 0.9, + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2.0 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative( + derChk, m_ini, num=4, plotIt=False, eps=1e-27, random_seed=2345 + ) + assert passed + + def test_jtjdiag(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # General model + model = np.r_[np.log(sig), np.log(self.height), np.log(self.thicknesses)] + + rng = np.random.default_rng(seed=42) + weights_matrix = diags(rng.random(size=self.sim.survey.nD)) + jtj_diag = self.sim.getJtJdiag(model, W=weights_matrix) + + J = self.sim.getJ(model) + expected = np.diag(J.T @ weights_matrix.T @ weights_matrix @ J) + np.testing.assert_allclose(expected, jtj_diag) + + +class TestEM1D_FD_Jacobian_CircularLoop: + # Tests 2nd order convergence of Jvec and Jtvec for horizontal loop sources. + # - All rx orientations + # - All rx components + # - Span many frequencies + # - Tests derivatives wrt sigma, mu, thicknesses and h + def setup_class(self): + nearthick = np.logspace(-1, 1, 5) + deepthick = np.logspace(1, 2, 10) + thicknesses = np.r_[nearthick, deepthick] + topo = np.r_[0.0, 0.0, 100.0] + height = 1e-5 + + src_location = np.array([0.0, 0.0, 100.0 + height]) + rx_location = np.array([0.0, 0.0, 100.0 + height]) + frequencies = np.logspace(1, 8, 9) + orientations = ["x", "y", "z"] + components = ["real", "imag", "both"] + I = 1.0 + a = 10.0 + + # Define sources and receivers + source_list = [] + for f in frequencies: + receiver_list = [] + + for rx_orientation in orientations: + for comp in components: + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + rx_location, orientation=rx_orientation, component=comp + ) + ) + + source_list.append( + fdem.sources.CircularLoop( + receiver_list, f, src_location, radius=a, current=I + ) + ) + + # Survey + survey = fdem.Survey(source_list) + + self.topo = topo + self.survey = survey + self.showIt = False + self.height = height + self.frequencies = frequencies + self.thicknesses = thicknesses + self.nlayers = len(thicknesses) + 1 + + nP = len(source_list) + + wire_map = maps.Wires( + ("sigma", self.nlayers), + ("mu", self.nlayers), + ("thicknesses", self.nlayers - 1), + ("h", 1), + ) + self.sigma_map = maps.ExpMap(nP=self.nlayers) * wire_map.sigma + self.mu_map = maps.ExpMap(nP=self.nlayers) * wire_map.mu + self.thicknesses_map = maps.ExpMap(nP=self.nlayers - 1) * wire_map.thicknesses + surject_mesh = TensorMesh([np.ones(nP)]) + self.h_map = maps.SurjectFull(surject_mesh) * maps.ExpMap(nP=1) * wire_map.h + + sim = fdem.Simulation1DLayered( + survey=self.survey, + sigmaMap=self.sigma_map, + muMap=self.mu_map, + thicknessesMap=self.thicknesses_map, + hMap=self.h_map, + topo=self.topo, + ) + + self.sim = sim + + def test_EM1DFDJvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + m_1D = np.r_[ + np.log(sig), np.log(mu), np.log(self.thicknesses), np.log(self.height) + ] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + dm = m_1D * 0.5 + + passed = tests.check_derivative( + derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15, random_seed=664 + ) + assert passed + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[3] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 2 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[3] = mu_blk + + # General model + m_true = np.r_[ + np.log(sig), np.log(mu), np.log(self.thicknesses), np.log(self.height) + ] + + dobs = self.sim.dpred(m_true) + + m_ini = np.r_[ + np.log(np.ones(self.nlayers) * sigma_half), + np.log(np.ones(self.nlayers) * 1.5 * mu_half), + np.log(self.thicknesses) * 0.9, + np.log(0.5 * self.height), + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative( + derChk, m_ini, num=4, plotIt=False, eps=1e-27, random_seed=42 + ) + assert passed + + +class TestEM1D_FD_Jacobian_LineCurrent: + # Tests 2nd order convergence of Jvec and Jtvec for piecewise linear loop. + # - All rx orientations + # - All rx components + # - Span many frequencies + # - Tests derivatives wrt sigma, mu, thicknesses and h + def setup_class(self): + x_path = np.array([-2, -2, 2, 2, -2]) + y_path = np.array([-1, 1, 1, -1, -1]) + frequencies = np.logspace(0, 4) + + wire_paths = np.c_[x_path, y_path, np.ones(5) * 0.5] + source_list = [] + receiver_list = [] + receiver_location = np.array([9.28, 0.0, 0.45]) + orientations = ["x", "y", "z"] + components = ["real", "imag", "both"] + + # Define sources and receivers + source_list = [] + for f in frequencies: + receiver_list = [] + + for rx_orientation in orientations: + for comp in components: + receiver_list.append( + fdem.receivers.PointMagneticFieldSecondary( + receiver_location, + orientation=rx_orientation, + component=comp, + ) + ) + + source_list.append(fdem.sources.LineCurrent(receiver_list, f, wire_paths)) + + # Survey + survey = fdem.Survey(source_list) + self.thicknesses = np.array([20.0, 40.0]) + + self.nlayers = len(self.thicknesses) + 1 + wire_map = maps.Wires( + ("sigma", self.nlayers), + ("mu", self.nlayers), + ("thicknesses", self.nlayers - 1), + ) + self.sigma_map = maps.ExpMap(nP=self.nlayers) * wire_map.sigma + self.mu_map = maps.ExpMap(nP=self.nlayers) * wire_map.mu + self.thicknesses_map = maps.ExpMap(nP=self.nlayers - 1) * wire_map.thicknesses + + sim = fdem.Simulation1DLayered( + survey=survey, + sigmaMap=self.sigma_map, + muMap=self.mu_map, + thicknessesMap=self.thicknesses_map, + ) + + self.sim = sim + + def test_EM1DFDJvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[1] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 1.1 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[1] = mu_blk + + # General model + m_1D = np.r_[np.log(sig), np.log(mu), np.log(self.thicknesses)] + + def fwdfun(m): + resp = self.sim.dpred(m) + return resp + # return Hz + + def jacfun(m, dm): + Jvec = self.sim.Jvec(m, dm) + return Jvec + + dm = m_1D * 0.5 + + def derChk(m): + return [fwdfun(m), lambda mx: jacfun(m, mx)] + + passed = tests.check_derivative( + derChk, m_1D, num=4, dx=dm, plotIt=False, eps=1e-15, random_seed=1123 + ) + assert passed + + def test_EM1DFDJtvec_Layers(self): + # Conductivity + sigma_half = 0.01 + sigma_blk = 0.1 + sig = np.ones(self.nlayers) * sigma_half + sig[1] = sigma_blk + + # Permeability + mu_half = mu_0 + mu_blk = 1.1 * mu_0 + mu = np.ones(self.nlayers) * mu_half + mu[1] = mu_blk + + # General model + m_true = np.r_[np.log(sig), np.log(mu), np.log(self.thicknesses)] + + dobs = self.sim.dpred(m_true) + + m_ini = np.r_[ + np.log(np.ones(self.nlayers) * sigma_half), + np.log(np.ones(self.nlayers) * mu_half), + np.log(self.thicknesses) * 0.9, + ] + resp_ini = self.sim.dpred(m_ini) + dr = resp_ini - dobs + + def misfit(m, dobs): + dpred = self.sim.dpred(m) + misfit = np.linalg.norm(dpred - dobs) ** 2 + dmisfit = 2 * self.sim.Jtvec( + m, dr + ) # derivative of ||dpred - dobs||^2 gives factor of 2 + return misfit, dmisfit + + def derChk(m): + return misfit(m, dobs) + + passed = tests.check_derivative( + derChk, m_ini, num=4, plotIt=False, eps=1e-27, random_seed=124 + ) + assert passed diff --git a/tests/em/em1d/test_EM1D_TD_general_jac_layers.py b/tests/em/em1d/test_EM1D_TD_general_jac_layers.py index f6818ccf81..cc9cfddda9 100644 --- a/tests/em/em1d/test_EM1D_TD_general_jac_layers.py +++ b/tests/em/em1d/test_EM1D_TD_general_jac_layers.py @@ -3,6 +3,7 @@ from discretize import tests import numpy as np import simpeg.electromagnetics.time_domain as tdem +import pytest class EM1D_TD_general_Jac_layers_ProblemTests(unittest.TestCase): @@ -305,5 +306,60 @@ def derChk(m): self.assertTrue(passed) +@pytest.mark.parametrize( + "rx_class", + [ + tdem.receivers.PointMagneticField, + tdem.receivers.PointMagneticFluxDensity, + tdem.receivers.PointMagneticFluxTimeDerivative, + ], +) +@pytest.mark.parametrize("n_locs1", [1, 4]) +@pytest.mark.parametrize("n_locs2", [1, 4]) +@pytest.mark.parametrize("orientation", ["x", "y", "z"]) +@pytest.mark.parametrize( + "waveform", [tdem.sources.StepOffWaveform(), tdem.sources.RampOffWaveform(1e-6)] +) +@pytest.mark.parametrize("comparison", ["dpred", "J"]) +def test_rx_loc_shapes(rx_class, n_locs1, n_locs2, orientation, waveform, comparison): + offsets = np.full(n_locs1, 100.0) + rx1_locs = np.pad(offsets[:, None], ((0, 0), (0, 2)), constant_values=0) + offsets = np.full(n_locs2, 100.0) + rx2_locs = np.pad(offsets[:, None], ((0, 0), (0, 2)), constant_values=0) + + times = [1e-5, 1e-4] + rx_list = [ + rx_class(rx1_locs, times=times, orientation=orientation), + rx_class(rx2_locs, times=times, orientation=orientation), + ] + n_d = (n_locs1 + n_locs2) * len(times) + + src = tdem.sources.MagDipole(rx_list, waveform=waveform) + srv = tdem.Survey(src) + + sim = tdem.Simulation1DLayered(survey=srv, sigma=[1]) + if comparison == "dpred": + d = sim.dpred(None) + else: + sim.sigmaMap = maps.IdentityMap(nP=1) + J = sim.getJ(np.ones(1)) + d = J[:, 0] + + # assert the shape is correct + assert d.shape == (n_d,) + + # every pair of values should be the same... + d1 = d[srv.get_slice(src, rx_list[0])] + d2 = d[srv.get_slice(src, rx_list[1])] + + # get the data into an n_loc * n_times shape + d = np.r_[ + d1.reshape(2, -1).T, + d2.reshape(2, -1).T, + ] + d_compare = d[0] * np.ones_like(d) + np.testing.assert_equal(d, d_compare) + + if __name__ == "__main__": unittest.main() diff --git a/tests/em/em1d/test_EM1D_TD_getJ.py b/tests/em/em1d/test_EM1D_TD_getJ.py new file mode 100644 index 0000000000..42273fbedf --- /dev/null +++ b/tests/em/em1d/test_EM1D_TD_getJ.py @@ -0,0 +1,161 @@ +""" +Test the getJ method of FDEM 1D simulation. +""" + +import pytest +import numpy as np +import simpeg.electromagnetics.time_domain as tdem +from simpeg import maps +from scipy.sparse import diags + + +def create_simulation_and_conductivities(identity_mapping: bool): + # Create Survey + # ------------- + # Source properties + source_location = np.array([0.0, 0.0, 20.0]) + source_orientation = "z" # "x", "y" or "z" + source_current = 1.0 # maximum on-time current + source_radius = 6.0 # source loop radius + + # Receiver properties + receiver_location = np.array([0.0, 0.0, 20.0]) + receiver_orientation = "z" # "x", "y" or "z" + times = np.logspace(-5, -2, 31) # time channels (s) + + # Define receiver list. In our case, we have only a single receiver for each source. + # When simulating the response for multiple component and/or field orientations, + # the list consists of multiple receiver objects. + receiver_list = [] + receiver_list.append( + tdem.receivers.PointMagneticFluxDensity( + receiver_location, times, orientation=receiver_orientation + ) + ) + + # Define the source waveform. Here we define a unit step-off. The definition + # of other waveform types is covered in a separate tutorial. + waveform = tdem.sources.StepOffWaveform() + + # Define source list. In our case, we have only a single source. + source_list = [ + tdem.sources.CircularLoop( + receiver_list=receiver_list, + location=source_location, + orientation=source_orientation, + waveform=waveform, + current=source_current, + radius=source_radius, + ) + ] + + # Define the survey + survey = tdem.Survey(source_list) + + # Defining a 1D Layered Earth Model + # --------------------------------- + # Physical properties + background_conductivity = 1e-1 + layer_conductivity = 1e0 + + # Layer thicknesses + thicknesses = np.array([40.0, 40.0]) + n_layer = len(thicknesses) + 1 + + # Conductivities + conductivities = background_conductivity * np.ones(n_layer) + conductivities[1] = layer_conductivity + + # Define a mapping + model_mapping = ( + maps.IdentityMap(nP=n_layer) if identity_mapping else maps.ExpMap(nP=n_layer) + ) + + # Define the Forward Simulation, Predict Data and Plot + # ---------------------------------------------------- + simulation = tdem.Simulation1DLayered( + survey=survey, + thicknesses=thicknesses, + sigmaMap=model_mapping, + ) + + return simulation, conductivities + + +def test_getJ(): + """ + Test if getJ returns different J matrices after passing different maps. + """ + dpreds, jacobians = [], [] + + # Compute dpred and J using an identity map and an exp map + for identity_mapping in (True, False): + simulation, conductivities = create_simulation_and_conductivities( + identity_mapping + ) + model = conductivities if identity_mapping else np.log(conductivities) + dpreds.append(simulation.dpred(model)) + jac = simulation.getJ(model) + jacobians.append(jac) + + # The two dpreds should be equal + assert np.allclose(*dpreds) + + # The two J matrices should not be equal + assert not np.allclose(*jacobians, atol=0.0) + + +@pytest.mark.parametrize("mapping", ["identity", "expmap"]) +def test_JtJdiag(mapping): + """ + Test the getJtJdiag method of the simulation. + """ + identity_mapping = mapping == "identity" + simulation, conductivities = create_simulation_and_conductivities(identity_mapping) + + model = conductivities if identity_mapping else np.log(conductivities) + rng = np.random.default_rng(seed=42) + weights_matrix = diags(rng.random(size=simulation.survey.nD)) + jtj_diag = simulation.getJtJdiag(model, W=weights_matrix) + + J = simulation.getJ(model) + expected = np.diag(J.T @ weights_matrix.T @ weights_matrix @ J) + np.testing.assert_allclose(expected, jtj_diag) + + +@pytest.mark.parametrize("mapping", ["identity", "expmap"]) +def test_Jvec(mapping): + """ + Test the Jvec method of the simulation. + """ + identity_mapping = mapping == "identity" + simulation, conductivities = create_simulation_and_conductivities(identity_mapping) + + model = conductivities if identity_mapping else np.log(conductivities) + rng = np.random.default_rng(seed=42) + vector = rng.random(size=model.size) + jvec = simulation.Jvec(model, vector) + + J = simulation.getJ(model) + expected = J @ vector + + np.testing.assert_allclose(expected, jvec) + + +@pytest.mark.parametrize("mapping", ["identity", "expmap"]) +def test_Jtvec(mapping): + """ + Test the Jtvec method of the simulation. + """ + identity_mapping = mapping == "identity" + simulation, conductivities = create_simulation_and_conductivities(identity_mapping) + + model = conductivities if identity_mapping else np.log(conductivities) + rng = np.random.default_rng(seed=42) + vector = rng.random(size=simulation.survey.nD) + jtvec = simulation.Jtvec(model, vector) + + J = simulation.getJ(model) + expected = J.T @ vector + + np.testing.assert_allclose(expected, jtvec) diff --git a/tests/em/fdem/forward/test_FDEM_dipolar_sources.py b/tests/em/fdem/forward/test_FDEM_dipolar_sources.py new file mode 100644 index 0000000000..f06843cf89 --- /dev/null +++ b/tests/em/fdem/forward/test_FDEM_dipolar_sources.py @@ -0,0 +1,107 @@ +from scipy.constants import mu_0 +import numpy as np +import pytest + +from discretize import TensorMesh +from geoana.em.static import MagneticDipoleWholeSpace +import simpeg.electromagnetics.frequency_domain as fdem +from simpeg import maps + +from simpeg.utils.solver_utils import get_default_solver + +Solver = get_default_solver() + +TOL = 5e-2 # relative tolerance + +# Defining transmitter locations +source_location = np.r_[0, 0, 0] + + +def create_survey(source_type="MagDipole", mu=mu_0, orientation="Z"): + + freq = 10 + + # Must define the transmitter properties and associated receivers + source_list = [ + getattr(fdem.sources, source_type)( + [], + location=source_location, + frequency=freq, + moment=1.0, + orientation=orientation, + mu=mu, + ) + ] + + survey = fdem.Survey(source_list) + return survey + + +def create_mesh_model(): + cell_size = 20 + n_core = 10 + padding_factor = 1.3 + n_padding = 10 + + h = [ + (cell_size, n_padding, -padding_factor), + (cell_size, n_core), + (cell_size, n_padding, padding_factor), + ] + mesh = TensorMesh([h, h, h], origin="CCC") + + # Conductivity in S/m + air_conductivity = 1e-8 + background_conductivity = 1e-1 + + model = air_conductivity * np.ones(mesh.n_cells) + model[mesh.cell_centers[:, 2] < 0] = background_conductivity + + return mesh, model + + +@pytest.mark.parametrize("simulation_type", ["e", "b", "h", "j"]) +@pytest.mark.parametrize("field_test", ["bPrimary", "hPrimary"]) +@pytest.mark.parametrize("mur", [1, 50]) +def test_dipolar_fields(simulation_type, field_test, mur, orientation="Z"): + + mesh, model = create_mesh_model() + survey = create_survey("MagDipole", mu=mur * mu_0, orientation="Z") + + if simulation_type in ["e", "b"]: + grid = mesh.faces + projection = mesh.project_face_vector + if simulation_type == "e": + sim = fdem.simulation.Simulation3DElectricField( + mesh, survey=survey, sigmaMap=maps.IdentityMap(), solver=Solver + ) + elif simulation_type == "b": + sim = fdem.simulation.Simulation3DMagneticFluxDensity( + mesh, survey=survey, sigmaMap=maps.IdentityMap(), solver=Solver + ) + + elif simulation_type in ["h", "j"]: + grid = mesh.edges + projection = mesh.project_edge_vector + if simulation_type == "h": + sim = fdem.simulation.Simulation3DMagneticField( + mesh, survey=survey, sigmaMap=maps.IdentityMap(), solver=Solver + ) + elif simulation_type == "j": + sim = fdem.simulation.Simulation3DCurrentDensity( + mesh, survey=survey, sigmaMap=maps.IdentityMap(), solver=Solver + ) + + # get numeric solution + src = survey.source_list[0] + numeric = getattr(src, field_test)(sim) + + # get analytic + dipole = MagneticDipoleWholeSpace(orientation=orientation, mu=mur * mu_0) + + if field_test == "bPrimary": + analytic = projection(dipole.magnetic_flux_density(grid)) + elif field_test == "hPrimary": + analytic = projection(dipole.magnetic_field(grid)) + + assert np.abs(np.mean((numeric / analytic)) - 1) < TOL diff --git a/tests/em/fdem/forward/test_FDEM_sources.py b/tests/em/fdem/forward/test_FDEM_sources.py index 7ba8de8170..640790e534 100644 --- a/tests/em/fdem/forward/test_FDEM_sources.py +++ b/tests/em/fdem/forward/test_FDEM_sources.py @@ -376,24 +376,6 @@ def test_CircularLoop_bPrimaryMu50_h(self): assert self.bPrimaryTest(src, "j") -def test_removal_circular_loop_n(): - """ - Test if passing the N argument to CircularLoop raises an error - """ - msg = "'N' property has been removed. Please use 'n_turns'." - with pytest.raises(TypeError, match=msg): - fdem.sources.CircularLoop( - [], - frequency=1e-3, - radius=np.sqrt(1 / np.pi), - location=[0, 0, 0], - orientation="Z", - mu=mu_0, - current=0.5, - N=2, - ) - - def test_line_current_failures(): rx_locs = [[0.5, 0.5, 0]] tx_locs = [[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0], [0, 0, 0]] diff --git a/tests/em/fdem/forward/test_fields_crosscheck.py b/tests/em/fdem/forward/test_fields_crosscheck.py new file mode 100644 index 0000000000..627407b792 --- /dev/null +++ b/tests/em/fdem/forward/test_fields_crosscheck.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest + +import discretize + +from simpeg import maps + +from simpeg.electromagnetics import frequency_domain as fdem +from simpeg.utils.solver_utils import get_default_solver + +SOLVER = get_default_solver() + +# relative tolerances +RELTOL = 1e-2 +MINTOL = 1e-20 # minimum tolerance we test, anything below this is "ZERO" + +FREQUENCY = 5e-1 +SIMULATION_TYPES = ["e", "b", "h", "j"] +FIELDS_TEST = ["e", "b", "h", "j", "charge", "charge_density"] + +VERBOSE = True + + +def get_fdem_simulation(mesh, fdem_type, frequency): + mapping = maps.ExpMap(mesh) + + source_list = [ + fdem.sources.MagDipole([], frequency=frequency, location=np.r_[0.0, 0.0, 10.0]) + ] + survey = fdem.Survey(source_list) + + if fdem_type == "e": + sim = fdem.Simulation3DElectricField( + mesh, survey=survey, sigmaMap=mapping, solver=SOLVER + ) + + elif fdem_type == "b": + sim = fdem.Simulation3DMagneticFluxDensity( + mesh, survey=survey, sigmaMap=mapping, solver=SOLVER + ) + + elif fdem_type == "j": + sim = fdem.Simulation3DCurrentDensity( + mesh, survey=survey, sigmaMap=mapping, solver=SOLVER + ) + + elif fdem_type == "h": + sim = fdem.Simulation3DMagneticField( + mesh, survey=survey, sigmaMap=mapping, solver=SOLVER + ) + + return sim + + +class TestFieldsCrosscheck: + + @property + def mesh(self): + if getattr(self, "_mesh", None) is None: + cs = 10.0 + ncx, ncy, ncz = 4, 4, 4 + npad = 4 + pf = 1.3 + hx = [(cs, npad, -pf), (cs, ncx), (cs, npad, pf)] + hy = [(cs, npad, -pf), (cs, ncy), (cs, npad, pf)] + hz = [(cs, npad, -pf), (cs, ncz), (cs, npad, pf)] + self._mesh = discretize.TensorMesh([hx, hy, hz], ["C", "C", "C"]) + return self._mesh + + @property + def model(self): + if getattr(self, "_model", None) is None: + sigma_background = 10 + sigma_target = 1e-2 + sigma_air = 1e-8 + + target_width = 40 + target_depth = -20 + + inds_target = ( + (self.mesh.cell_centers[:, 0] > -target_width / 2) + & (self.mesh.cell_centers[:, 0] < target_width / 2) + & (self.mesh.cell_centers[:, 1] > -target_width / 2) + & (self.mesh.cell_centers[:, 1] < target_width / 2) + & (self.mesh.cell_centers[:, 2] > -target_width / 2 + target_depth) + & (self.mesh.cell_centers[:, 2] < target_width / 2 + target_depth) + ) + + sigma_model = sigma_background * np.ones(self.mesh.n_cells) + sigma_model[self.mesh.cell_centers[:, 2] > 0] = sigma_air + + sigma_model[inds_target] = sigma_target + + self._model = np.log(sigma_model) + return self._model + + @property + def simulation_dict(self): + if getattr(self, "_simulation_dict", None) is None: + self._simulation_dict = { + key: get_fdem_simulation(self.mesh, key, FREQUENCY) + for key in SIMULATION_TYPES + } + return self._simulation_dict + + @property + def fields_dict(self): + if getattr(self, "_fields_dict", None) is None: + self._fields_dict = { + key: sim.fields(self.model) for key, sim in self.simulation_dict.items() + } + return self._fields_dict + + def compare_fields(self, field1, field2, relative_tolerance, verbose=False): + norm_diff = np.linalg.norm(field1 - field2) + abs_tol = np.max( + [ + relative_tolerance + * (np.linalg.norm(field1) + np.linalg.norm(field2)) + / 2, + MINTOL, + ] + ) + test = norm_diff < abs_tol + + if verbose is True: + print(f"||diff||: {norm_diff:1.2e} < TOL: {abs_tol:1.2e} ? {test}") + + return test + + @pytest.mark.parametrize("sim_pairs", [("e", "b"), ("h", "j")], ids=["eb", "hj"]) + @pytest.mark.parametrize("field_test", FIELDS_TEST) + def test_fields_cross_check_EBHJ( + self, sim_pairs, field_test, relative_tolerance=RELTOL, verbose=VERBOSE + ): + field1 = self.fields_dict[sim_pairs[0]][:, field_test] + field2 = self.fields_dict[sim_pairs[1]][:, field_test] + + if verbose is True: + print(f"Testing simulations {sim_pairs} for field {field_test}") + + assert self.compare_fields(field1, field2, relative_tolerance, verbose) diff --git a/tests/em/nsem/forward/test_1D_finite_volume.py b/tests/em/nsem/forward/test_1D_finite_volume.py index 5a836908d4..07b8ecd8b5 100644 --- a/tests/em/nsem/forward/test_1D_finite_volume.py +++ b/tests/em/nsem/forward/test_1D_finite_volume.py @@ -26,18 +26,14 @@ def setUp(self): self.frequencies = np.logspace(-2, 1, 30) rx_list = [ - nsem.receivers.PointNaturalSource( + nsem.receivers.Impedance( [[0]], orientation="xy", component="apparent_resistivity" ), - nsem.receivers.PointNaturalSource( - [[0]], orientation="xy", component="phase" - ), - nsem.receivers.PointNaturalSource( + nsem.receivers.Impedance([[0]], orientation="xy", component="phase"), + nsem.receivers.Impedance( [[0]], orientation="yx", component="apparent_resistivity" ), - nsem.receivers.PointNaturalSource( - [[0]], orientation="yx", component="phase" - ), + nsem.receivers.Impedance([[0]], orientation="yx", component="phase"), ] # simulation src_list = [ diff --git a/tests/em/nsem/forward/test_Problem1D_AnalyticVsNumeric.py b/tests/em/nsem/forward/test_Problem1D_AnalyticVsNumeric.py index 4a0ef2ee3e..7aa36f6144 100644 --- a/tests/em/nsem/forward/test_Problem1D_AnalyticVsNumeric.py +++ b/tests/em/nsem/forward/test_Problem1D_AnalyticVsNumeric.py @@ -16,6 +16,7 @@ def appResPhs(freq, z): return app_res, app_phs zList = [] + survey_slices = nsemdata.survey.get_all_slices() for src in nsemdata.survey.source_list: zc = [src.frequency] for rx in src.receiver_list: @@ -23,7 +24,8 @@ def appResPhs(freq, z): m = 1j else: m = 1 - zc.append(m * nsemdata[src, rx]) + src_rx_slice = survey_slices[src, rx] + zc.append(m * nsemdata.dobs[src_rx_slice]) zList.append(zc) return [ appResPhs(zList[i][0], np.sum(zList[i][1:3])) for i in np.arange(len(zList)) @@ -32,7 +34,8 @@ def appResPhs(freq, z): def calculateAnalyticSolution(source_list, mesh, model): surveyAna = nsem.Survey(source_list) - data1D = nsem.Data(surveyAna) + survey_slices = surveyAna.get_all_slices() + data1D = np.full(surveyAna.nD, np.nan) for src in surveyAna.source_list: elev = src.receiver_list[0].locations_e[0] anaEd, anaEu, anaHd, anaHu = nsem.utils.analytic_1d.getEHfields( @@ -45,7 +48,9 @@ def calculateAnalyticSolution(source_list, mesh, model): # anaH = (anaHtemp/anaEtemp[-1])#.conj() anaZ = anaE / anaH for rx in src.receiver_list: - data1D[src, rx] = getattr(anaZ, rx.component) + src_rx_slice = survey_slices[src, rx] + data1D[src_rx_slice] = getattr(anaZ, rx.component) + data1D = nsem.Data(surveyAna, data1D) return data1D diff --git a/tests/em/nsem/forward/test_Recursive1D_VsAnalyticHalfspace.py b/tests/em/nsem/forward/test_Recursive1D_VsAnalyticHalfspace.py index 3242e6f5aa..be624bb19f 100644 --- a/tests/em/nsem/forward/test_Recursive1D_VsAnalyticHalfspace.py +++ b/tests/em/nsem/forward/test_Recursive1D_VsAnalyticHalfspace.py @@ -13,10 +13,10 @@ def create_survey(freq): receivers_list = [ - nsem.receivers.PointNaturalSource(component="real"), - nsem.receivers.PointNaturalSource(component="imag"), - nsem.receivers.PointNaturalSource(component="app_res"), - nsem.receivers.PointNaturalSource(component="phase"), + nsem.receivers.Impedance([[]], component="real"), + nsem.receivers.Impedance([[]], component="imag"), + nsem.receivers.Impedance([[]], component="app_res"), + nsem.receivers.Impedance([[]], component="phase"), ] source_list = [nsem.sources.Planewave(receivers_list, f) for f in freq] @@ -30,7 +30,7 @@ def true_solution(freq, sigma_half): -np.sqrt(np.pi * freq * mu_0 / sigma_half), -np.sqrt(np.pi * freq * mu_0 / sigma_half), 1 / sigma_half, - 45.0, + -135.0, ] return soln @@ -71,10 +71,8 @@ def test_4(self): "rx_class", [ ns_rx.Impedance, - ns_rx.PointNaturalSource, ns_rx.Admittance, ns_rx.Tipper, - ns_rx.Point3DTipper, ns_rx.ApparentConductivity, ], ) @@ -84,7 +82,7 @@ def test_incorrect_rx_types(rx_class): source = nsem.sources.Planewave(rx, frequency=10) survey = nsem.Survey(source) # make sure that only these exact classes do not issue warnings. - if rx_class in [ns_rx.Impedance, ns_rx.PointNaturalSource]: + if rx_class is ns_rx.Impedance: with warnings.catch_warnings(): warnings.simplefilter("error") nsem.Simulation1DRecursive(survey=survey) diff --git a/tests/em/nsem/forward/test_getJ_not_implemented.py b/tests/em/nsem/forward/test_getJ_not_implemented.py new file mode 100644 index 0000000000..0da2a3ee95 --- /dev/null +++ b/tests/em/nsem/forward/test_getJ_not_implemented.py @@ -0,0 +1,50 @@ +""" +Test NotImplementedError on getJ for NSEM 1D finite volume simulations. +""" + +import pytest +import numpy as np +import discretize +from simpeg import maps +from simpeg.electromagnetics import natural_source as nsem + + +@pytest.fixture +def mesh(): + csz = 100 + nc = 300 + npad = 30 + pf = 1.2 + mesh = discretize.TensorMesh([[(csz, npad, -pf), (csz, nc), (csz, npad)]], "N") + mesh.x0 = np.r_[-mesh.h[0][:-npad].sum()] + return mesh + + +@pytest.fixture +def survey(): + frequencies = np.logspace(-2, 1, 30) + receiver = nsem.receivers.Impedance( + [[0]], orientation="xy", component="apparent_resistivity" + ) + sources = [nsem.sources.Planewave([receiver], frequency=f) for f in frequencies] + survey = nsem.survey.Survey(sources) + return survey + + +@pytest.mark.parametrize( + "simulation_class", [nsem.Simulation1DElectricField, nsem.Simulation1DMagneticField] +) +def test_getJ_not_implemented(mesh, survey, simulation_class): + """ + Test NotImplementedError on getJ for NSEM 1D simulations. + """ + mapping = maps.IdentityMap() + simulation = simulation_class( + mesh=mesh, + survey=survey, + sigmaMap=mapping, + ) + model = np.ones(survey.nD) + msg = "The getJ method hasn't been implemented" + with pytest.raises(NotImplementedError, match=msg): + simulation.getJ(model) diff --git a/tests/em/nsem/forward/test_receiver_eval.py b/tests/em/nsem/forward/test_receiver_eval.py new file mode 100644 index 0000000000..1f953198e9 --- /dev/null +++ b/tests/em/nsem/forward/test_receiver_eval.py @@ -0,0 +1,36 @@ +""" +Test receiver's ``eval`` method. +""" + +import numpy as np +import pytest +from simpeg.electromagnetics import natural_source as nsem +from simpeg.electromagnetics.natural_source.utils.test_utils import setup1DSurvey +from simpeg.utils.solver_utils import get_default_solver + + +@pytest.mark.parametrize("orientation", ["xx", "yy"]) +def test_zero_value(orientation): + """ + Test if ``Impedance.eval()`` returns an array of zeros on 1D problem + when orientation is ``"xx"`` or ``"yy"``. + + Test bugfix introduced in #1692. + """ + survey, sigma, _, mesh = setup1DSurvey(sigmaHalf=1e-2, rx_orientation=orientation) + + # Define simulation and precompute fields + solver = get_default_solver() + simulation = nsem.Simulation1DPrimarySecondary( + mesh, sigmaPrimary=sigma, sigma=sigma, survey=survey, solver=solver + ) + fields = simulation.fields() + + # Check if calling eval on each receiver returns the expected result + sources_and_receivers = ( + (src, rx) for src in survey.source_list for rx in src.receiver_list + ) + for source, receiver in sources_and_receivers: + result = receiver.eval(source, mesh, fields) + np.testing.assert_allclose(result, 0) + assert result.shape == (receiver.nD, 1) diff --git a/tests/em/nsem/inversion/test_BC_Sims.py b/tests/em/nsem/inversion/test_BC_Sims.py index 8962b76272..84eecaacf8 100644 --- a/tests/em/nsem/inversion/test_BC_Sims.py +++ b/tests/em/nsem/inversion/test_BC_Sims.py @@ -53,18 +53,18 @@ def create_simulation_1d(sim_type, deriv_type): frequencies = np.logspace(-2, 1, 30) rx_list = [ - nsem.receivers.PointNaturalSource([[0]], orientation="xy", component="real"), - nsem.receivers.PointNaturalSource([[0]], orientation="xy", component="imag"), - nsem.receivers.PointNaturalSource( + nsem.receivers.Impedance([[0]], orientation="xy", component="real"), + nsem.receivers.Impedance([[0]], orientation="xy", component="imag"), + nsem.receivers.Impedance( [[0]], orientation="xy", component="apparent_resistivity" ), - nsem.receivers.PointNaturalSource([[0]], orientation="xy", component="phase"), - nsem.receivers.PointNaturalSource([[0]], orientation="yx", component="real"), - nsem.receivers.PointNaturalSource([[0]], orientation="yx", component="imag"), - nsem.receivers.PointNaturalSource( + nsem.receivers.Impedance([[0]], orientation="xy", component="phase"), + nsem.receivers.Impedance([[0]], orientation="yx", component="real"), + nsem.receivers.Impedance([[0]], orientation="yx", component="imag"), + nsem.receivers.Impedance( [[0]], orientation="yx", component="apparent_resistivity" ), - nsem.receivers.PointNaturalSource([[0]], orientation="yx", component="phase"), + nsem.receivers.Impedance([[0]], orientation="yx", component="phase"), ] src_list = [nsem.sources.Planewave(rx_list, frequency=f) for f in frequencies] survey = nsem.Survey(src_list) @@ -169,18 +169,12 @@ def create_simulation_2d(sim_type, deriv_type, mesh_type, fixed_boundary=False): sim_kwargs["h_bc"] = h_bc rx_list = [ - nsem.receivers.PointNaturalSource( - rx_locs, orientation="xy", component="real" - ), - nsem.receivers.PointNaturalSource( - rx_locs, orientation="xy", component="imag" - ), - nsem.receivers.PointNaturalSource( + nsem.receivers.Impedance(rx_locs, orientation="xy", component="real"), + nsem.receivers.Impedance(rx_locs, orientation="xy", component="imag"), + nsem.receivers.Impedance( rx_locs, orientation="xy", component="apparent_resistivity" ), - nsem.receivers.PointNaturalSource( - rx_locs, orientation="xy", component="phase" - ), + nsem.receivers.Impedance(rx_locs, orientation="xy", component="phase"), ] src_list = [nsem.sources.Planewave(rx_list, frequency=f) for f in frequencies] survey = nsem.Survey(src_list) @@ -219,18 +213,12 @@ def create_simulation_2d(sim_type, deriv_type, mesh_type, fixed_boundary=False): sim_kwargs["e_bc"] = e_bc rx_list = [ - nsem.receivers.PointNaturalSource( - rx_locs, orientation="yx", component="real" - ), - nsem.receivers.PointNaturalSource( - rx_locs, orientation="yx", component="imag" - ), - nsem.receivers.PointNaturalSource( + nsem.receivers.Impedance(rx_locs, orientation="yx", component="real"), + nsem.receivers.Impedance(rx_locs, orientation="yx", component="imag"), + nsem.receivers.Impedance( rx_locs, orientation="yx", component="apparent_resistivity" ), - nsem.receivers.PointNaturalSource( - rx_locs, orientation="yx", component="phase" - ), + nsem.receivers.Impedance(rx_locs, orientation="yx", component="phase"), ] src_list = [nsem.sources.Planewave(rx_list, frequency=f) for f in frequencies] survey = nsem.Survey(src_list) @@ -291,10 +279,10 @@ def test_errors(self): rx_locs = np.c_[np.linspace(-8000, 8000, 3), np.zeros(3)] mesh_1d = TensorMesh([5]) mesh_2d = TensorMesh([5, 5]) - r_xy = nsem.receivers.PointNaturalSource( + r_xy = nsem.receivers.Impedance( rx_locs, orientation="xy", component="apparent_resistivity" ) - r_yx = nsem.receivers.PointNaturalSource( + r_yx = nsem.receivers.Impedance( rx_locs, orientation="yx", component="apparent_resistivity" ) survey_xy = nsem.Survey([nsem.sources.Planewave([r_xy], frequency=10)]) diff --git a/tests/em/nsem/inversion/test_Problem1D_Adjoint.py b/tests/em/nsem/inversion/test_Problem1D_Adjoint.py index b81cda78b6..28449e0395 100644 --- a/tests/em/nsem/inversion/test_Problem1D_Adjoint.py +++ b/tests/em/nsem/inversion/test_Problem1D_Adjoint.py @@ -18,10 +18,10 @@ def JvecAdjointTest_1D(sigmaHalf, formulation="PrimSec"): # Define a receiver for each data type as a list receivers_list = [ - nsem.receivers.PointNaturalSource(component="real"), - nsem.receivers.PointNaturalSource(component="imag"), - nsem.receivers.PointNaturalSource(component="app_res"), - nsem.receivers.PointNaturalSource(component="phase"), + nsem.receivers.Impedance([[]], component="real"), + nsem.receivers.Impedance([[]], component="imag"), + nsem.receivers.Impedance([[]], component="app_res"), + nsem.receivers.Impedance([[]], component="phase"), ] # Use a list to define the planewave source at each frequency and assign receivers diff --git a/tests/em/nsem/inversion/test_Problem1D_Derivs.py b/tests/em/nsem/inversion/test_Problem1D_Derivs.py index 310a7ec4dc..de11c736b4 100644 --- a/tests/em/nsem/inversion/test_Problem1D_Derivs.py +++ b/tests/em/nsem/inversion/test_Problem1D_Derivs.py @@ -16,10 +16,10 @@ def DerivJvecTest_1D(halfspace_value, freq=False, expMap=True): # Define a receiver for each data type as a list receivers_list = [ - nsem.receivers.PointNaturalSource(component="real"), - nsem.receivers.PointNaturalSource(component="imag"), - nsem.receivers.PointNaturalSource(component="app_res"), - nsem.receivers.PointNaturalSource(component="phase"), + nsem.receivers.Impedance([[]], component="real"), + nsem.receivers.Impedance([[]], component="imag"), + nsem.receivers.Impedance([[]], component="app_res"), + nsem.receivers.Impedance([[]], component="phase"), ] # Use a list to define the planewave source at each frequency and assign receivers diff --git a/tests/em/nsem/inversion/test_Problem3D_Derivs.py b/tests/em/nsem/inversion/test_Problem3D_Derivs.py index 8540e9f3bc..41a99d5ce4 100644 --- a/tests/em/nsem/inversion/test_Problem3D_Derivs.py +++ b/tests/em/nsem/inversion/test_Problem3D_Derivs.py @@ -51,7 +51,7 @@ def test_Jtjdiag_clearing(model_simulation_tuple): def test_Jmatrix(model_simulation_tuple): model, simulation = model_simulation_tuple - rng = np.random.default_rng(4421) + rng = np.random.default_rng(4422) # create random vector vec = rng.standard_normal(simulation.survey.nD) diff --git a/tests/em/nsem/inversion/test_complex_resistivity.py b/tests/em/nsem/inversion/test_complex_resistivity.py index 3c98b3d80c..ad3955dfd0 100644 --- a/tests/em/nsem/inversion/test_complex_resistivity.py +++ b/tests/em/nsem/inversion/test_complex_resistivity.py @@ -68,7 +68,9 @@ def create_simulation(self, rx_type="apparent_resistivity", rx_orientation="xy") rx_loc[:, 2] = -50 # Make a receiver list - rxList = [ns.Rx.PointNaturalSource(rx_loc, rx_orientation, rx_type)] + rxList = [ + ns.Rx.Impedance(rx_loc, orientation=rx_orientation, component=rx_type) + ] # Source list freqs = [10, 50, 200] @@ -103,11 +105,11 @@ def create_simulation_rx(self, rx_type="apparent_resistivity", rx_orientation="x # Make a receiver list rxList = [ - ns.Rx.PointNaturalSource( - orientation=rx_orientation, - component=rx_type, + ns.Rx.Impedance( locations_e=rx_loc, locations_h=rx_loc, + orientation=rx_orientation, + component=rx_type, ) ] @@ -145,7 +147,9 @@ def create_simulation_1dprimary_assign_mesh1d( rx_loc[:, 2] = -50 # Make a receiver list - rxList = [ns.Rx.PointNaturalSource(rx_loc, rx_orientation, rx_type)] + rxList = [ + ns.Rx.Impedance(rx_loc, orientation=rx_orientation, component=rx_type) + ] # give background a value x0 = self.mesh.x0 @@ -195,7 +199,9 @@ def create_simulation_1dprimary_assign( rx_loc[:, 2] = -50 # Make a receiver list - rxList = [ns.Rx.PointNaturalSource(rx_loc, rx_orientation, rx_type)] + rxList = [ + ns.Rx.Impedance(rx_loc, orientation=rx_orientation, component=rx_type) + ] # Source list freqs = [10, 50, 200] diff --git a/tests/em/nsem/test_nsem_point_deprecations.py b/tests/em/nsem/test_nsem_point_deprecations.py deleted file mode 100644 index 35128d890e..0000000000 --- a/tests/em/nsem/test_nsem_point_deprecations.py +++ /dev/null @@ -1,215 +0,0 @@ -import inspect -import re - -import pytest -import simpeg.electromagnetics.natural_source as nsem -import numpy as np -import discretize -import numpy.testing as npt - - -@pytest.fixture( - params=[ - "same_location", - "diff_location", - ] -) -def impedance_pairs(request): - test_e_locs = np.array([[0.2, 0.1, 0.3], [-0.1, 0.2, -0.3]]) - test_h_locs = np.array([[-0.2, 0.24, 0.1], [0.5, 0.2, -0.2]]) - - rx_point_type = request.param - if rx_point_type == "same": - rx1 = nsem.receivers.PointNaturalSource(test_e_locs) - rx2 = nsem.receivers.Impedance(test_e_locs, orientation="xy") - else: - rx1 = nsem.receivers.PointNaturalSource( - locations_e=test_e_locs, locations_h=test_h_locs - ) - rx2 = nsem.receivers.Impedance( - locations_e=test_e_locs, locations_h=test_h_locs, orientation="xy" - ) - return rx1, rx2 - - -@pytest.fixture() -def tipper_pairs(): - test_e_locs = np.array([[0.2, 0.1, 0.3], [-0.1, 0.2, -0.3]]) - - rx1 = nsem.receivers.Point3DTipper(test_e_locs) - rx2 = nsem.receivers.Tipper(test_e_locs, orientation="zx") - return rx1, rx2 - - -def test_deprecation(): - test_loc = np.array([10.0, 11.0, 12.0]) - with pytest.warns(FutureWarning, match="PointNaturalSource has been deprecated.*"): - nsem.receivers.PointNaturalSource(test_loc) - - with pytest.warns(FutureWarning, match="Using the default for locations.*"): - nsem.receivers.PointNaturalSource() - - with pytest.warns(FutureWarning, match="Point3DTipper has been deprecated.*"): - nsem.receivers.Point3DTipper(test_loc) - - -def test_imp_consistent_attributes(impedance_pairs): - rx1, rx2 = impedance_pairs - - for item_name in dir(rx1): - is_dunder = re.match(r"__\w+__", item_name) is not None - # skip a few things related to the wrapping, and dunder methods - if not (item_name in ["locations", "_uid", "uid", "_old__init__"] or is_dunder): - item1 = getattr(rx1, item_name) - item2 = getattr(rx2, item_name) - if not (inspect.isfunction(item1) or inspect.ismethod(item1)): - if isinstance(item1, np.ndarray): - npt.assert_array_equal(item1, item2) - else: - assert item1 == item2 - - npt.assert_array_equal(rx1.locations, rx2.locations_e) - - -def test_tip_consistent_attributes(tipper_pairs): - rx1, rx2 = tipper_pairs - - for item_name in dir(rx1): - is_dunder = re.match(r"__\w+__", item_name) is not None - # skip a few things related to the wrapping, and dunder methods - if not ( - item_name in ["locations", "locations_e", "_uid", "uid", "_old__init__"] - or is_dunder - ): - item1 = getattr(rx1, item_name) - item2 = getattr(rx2, item_name) - if not (inspect.isfunction(item1) or inspect.ismethod(item1)): - print(item_name, item1, item2) - if isinstance(item1, np.ndarray): - npt.assert_array_equal(item1, item2) - else: - assert item1 == item2 - - npt.assert_array_equal(rx1.locations, rx2.locations_h) - npt.assert_array_equal(rx1.locations, rx2.locations_base) - - -@pytest.mark.parametrize( - "rx_component", ["real", "imag", "apparent_resistivity", "phase", "complex"] -) -def test_imp_consistent_eval(impedance_pairs, rx_component): - rx1, rx2 = impedance_pairs - rx1.component = rx_component - rx2.component = rx_component - # test that the output of the function eval returns the same thing, - # since it was updated... - mesh = discretize.TensorMesh([3, 4, 5], origin="CCC") - - # create a mock simulation - src = nsem.sources.PlanewaveXYPrimary( - [rx1, rx2], frequency=10, sigma_primary=np.ones(mesh.n_cells) - ) - survey = nsem.Survey(src) - sim_temp = nsem.Simulation3DPrimarySecondary(survey=survey, mesh=mesh, sigma=1) - - # Create a mock field, - f = sim_temp.fieldsPair(sim_temp) - test_u = np.linspace(1, 2, 2 * mesh.n_edges) + 1j * np.linspace( - -1, 1, 2 * mesh.n_edges - ) - f[src, sim_temp._solutionType] = test_u.reshape(mesh.n_edges, 2) - - v1 = rx1.eval(src, mesh, f) - v2 = rx2.eval(src, mesh, f) - - npt.assert_equal(v1, v2) - - if rx_component == "real": - # do a quick test here that calling eval on rx1 is the same as calling - # eval on rx2 with a complex component - rx2.component = "complex" - with pytest.warns(FutureWarning, match="Calling with return_complex=True.*"): - v1 = rx1.eval(src, mesh, f, return_complex=True) - v2 = rx2.eval(src, mesh, f) - - # assert it reset - assert rx1.component == "real" - # assert the outputs are the same - npt.assert_equal(v1, v2) - - -@pytest.mark.parametrize("rx_component", ["real", "imag", "complex"]) -def test_tip_consistent_eval(tipper_pairs, rx_component): - rx1, rx2 = tipper_pairs - rx1.component = rx_component - rx2.component = rx_component - # test that the output of the function eval returns the same thing, - # since it was updated... - mesh = discretize.TensorMesh([3, 4, 5], origin="CCC") - - # create a mock simulation - src = nsem.sources.PlanewaveXYPrimary( - [rx1, rx2], frequency=10, sigma_primary=np.ones(mesh.n_cells) - ) - survey = nsem.Survey(src) - sim_temp = nsem.Simulation3DPrimarySecondary(survey=survey, mesh=mesh, sigma=1) - - # Create a mock field, - f = sim_temp.fieldsPair(sim_temp) - test_u = np.linspace(1, 2, 2 * mesh.n_edges) + 1j * np.linspace( - -1, 1, 2 * mesh.n_edges - ) - f[src, sim_temp._solutionType] = test_u.reshape(mesh.n_edges, 2) - - v1 = rx1.eval(src, mesh, f) - v2 = rx2.eval(src, mesh, f) - - npt.assert_equal(v1, v2) - - if rx_component == "real": - # do a quick test here that calling eval on rx1 is the same as calling - # eval on rx2 with a complex component - rx2.component = "complex" - with pytest.warns(FutureWarning, match="Calling with return_complex=True.*"): - v1 = rx1.eval(src, mesh, f, return_complex=True) - v2 = rx2.eval(src, mesh, f) - - # assert it reset - assert rx1.component == "real" - # assert the outputs are the same - npt.assert_equal(v1, v2) - - -def test_imp_location_initialization(): - loc_1 = np.empty((2, 3)) - loc_2 = np.empty((2, 3)) - with pytest.raises(TypeError, match="Cannot pass both locations and .*"): - nsem.receivers.PointNaturalSource(locations=loc_1, locations_h=loc_2) - - with pytest.raises(TypeError, match="Either locations or both locations_e.*"): - nsem.receivers.PointNaturalSource(locations_e=loc_1) - - rx1 = nsem.receivers.PointNaturalSource(locations=[loc_1]) - rx2 = nsem.receivers.Impedance(loc_1) - npt.assert_equal(rx1.locations, rx2.locations_e) - npt.assert_equal(rx1.locations, rx2.locations_h) - - rx1 = nsem.receivers.PointNaturalSource(locations=[loc_1, loc_2]) - rx2 = nsem.receivers.Impedance(loc_1, loc_2) - npt.assert_equal(rx1.locations_e, rx2.locations_e) - npt.assert_equal(rx1.locations_h, rx2.locations_h) - - with pytest.raises(ValueError, match="incorrect size of list, must be length .*"): - nsem.receivers.PointNaturalSource(locations=[loc_1, loc_2, loc_1]) - - -def test_tip_location_initialization(): - loc_1 = np.empty((2, 3)) - loc_2 = np.empty((2, 3)) - with pytest.warns(UserWarning, match="locations_e and locations_h are unused.*"): - nsem.receivers.Point3DTipper(locations=loc_1, locations_e=loc_2) - - with pytest.raises( - ValueError, match="incorrect size of list, must be length of 1 or 2" - ): - nsem.receivers.Point3DTipper([loc_1, loc_1, loc_1]) diff --git a/tests/em/static/test_SPjvecjtvecadj.py b/tests/em/static/test_SPjvecjtvecadj.py index 94fb2b86a4..583d530c6d 100644 --- a/tests/em/static/test_SPjvecjtvecadj.py +++ b/tests/em/static/test_SPjvecjtvecadj.py @@ -123,25 +123,8 @@ def test_clears(): def test_deprecations(): """ - Test warning after importing deprecated `spontaneous_potential` module + Test error after importing deprecated `spontaneous_potential` module """ - msg = ( - "The 'spontaneous_potential' module has been renamed to 'self_potential'. " - "Please use the 'self_potential' module instead. " - "The 'spontaneous_potential' module will be removed in SimPEG 0.23." - ) - with pytest.warns(FutureWarning, match=msg): + msg = "The 'spontaneous_potential' module has been moved to 'self_potential'" + with pytest.raises(ImportError, match=msg): import simpeg.electromagnetics.static.spontaneous_potential # noqa: F401 - - -def test_imported_objects_on_deprecated_module(): - """ - Test if the new `self_potential` module and the deprecated `spontaneous - potential` have the same members. - """ - import simpeg.electromagnetics.static.spontaneous_potential as spontaneous - - members_self = set([m for m in dir(sp) if not m.startswith("_")]) - members_spontaneous = set([m for m in dir(spontaneous) if not m.startswith("_")]) - difference = members_self - members_spontaneous - assert not difference diff --git a/tests/em/static/test_dc_survey.py b/tests/em/static/test_dc_survey.py index f7b88754db..7087cce873 100644 --- a/tests/em/static/test_dc_survey.py +++ b/tests/em/static/test_dc_survey.py @@ -16,25 +16,23 @@ class TestRemovedSourceType: Tests after removing the source_type argument and property. """ - def test_warning_after_argument(self): + def test_error_after_argument(self): """ - Test warning after passing source_type as argument to the constructor. + Test error after passing ``source_type`` as argument to the constructor. """ - msg = "Argument 'survey_type' is ignored and will be removed in future" - with pytest.warns(FutureWarning, match=msg): - survey = Survey(source_list=[], survey_type="dipole-dipole") - # Check if the object doesn't have a `_survey_type` attribute - assert not hasattr(survey, "_survey_type") + msg = "Argument 'survey_type' has been removed" + with pytest.raises(TypeError, match=msg): + Survey(source_list=[], survey_type="dipole-dipole") - def test_warning_removed_property(self): + def test_error_removed_property(self): """ - Test if warning is raised when accessing the survey_type property. + Test if error is raised when accessing the ``survey_type`` property. """ survey = Survey(source_list=[]) - msg = "Property 'survey_type' has been removed." - with pytest.warns(FutureWarning, match=msg): + msg = "'survey_type' has been removed." + with pytest.raises(AttributeError, match=msg): survey.survey_type - with pytest.warns(FutureWarning, match=msg): + with pytest.raises(AttributeError, match=msg): survey.survey_type = "dipole-dipole" @@ -52,7 +50,7 @@ def test_error(self, mesh): Test if error is raised after passing ``ind_active`` as argument. """ survey = Survey(source_list=[]) - msg = "'ind_active' has been deprecated and will be removed in " + msg = "got an unexpected keyword argument 'ind_active'" active_cells = np.ones(mesh.n_cells, dtype=bool) with pytest.raises(TypeError, match=msg): survey.drape_electrodes_on_topography( diff --git a/tests/em/static/test_model_assignment.py b/tests/em/static/test_model_assignment.py new file mode 100644 index 0000000000..cc21f59bb8 --- /dev/null +++ b/tests/em/static/test_model_assignment.py @@ -0,0 +1,180 @@ +""" +Test model assignment to simulation classes + +Test if the `getJ` method of a few static EM simulations updates the `model`. +These tests have been added as part of the bugfix in #1361. +""" + +import pytest +import numpy as np + +from discretize import TensorMesh +from simpeg import utils +from simpeg.maps import IdentityMap, Wires +from simpeg.electromagnetics import resistivity as dc +from simpeg.electromagnetics import spectral_induced_polarization as sip +from simpeg.electromagnetics.static.utils import generate_dcip_sources_line + + +class TestDCSimulations: + @pytest.fixture + def mesh_3d(self): + """Sample mesh.""" + cell_size = 0.5 + npad = 2 + hx = [(cell_size, npad, -1.5), (cell_size, 10), (cell_size, npad, 1.5)] + hy = [(cell_size, npad, -1.5), (cell_size, 10), (cell_size, npad, 1.5)] + hz = [(cell_size, npad, -1.5), (cell_size, 10), (cell_size, npad, 1.5)] + mesh = TensorMesh([hx, hy, hz], x0="CCC") + return mesh + + @pytest.fixture + def survey_3d(self, mesh_3d): + """Sample survey.""" + xmin, xmax = mesh_3d.nodes_x.min(), mesh_3d.nodes_x.max() + ymin, ymax = mesh_3d.nodes_y.min(), mesh_3d.nodes_y.max() + x = mesh_3d.nodes_x[(mesh_3d.nodes_x > xmin) & (mesh_3d.nodes_x < xmax)] + y = mesh_3d.nodes_y[(mesh_3d.nodes_y > ymin) & (mesh_3d.nodes_y < ymax)] + + Aloc = np.r_[1.25, 0.0, 0.0] + Bloc = np.r_[-1.25, 0.0, 0.0] + M = utils.ndgrid(x - 1.0, y, np.r_[0.0]) + N = utils.ndgrid(x + 1.0, y, np.r_[0.0]) + rx = dc.receivers.Dipole(M, N) + src = dc.sources.Dipole([rx], Aloc, Bloc) + survey = dc.survey.Survey([src]) + return survey + + @pytest.fixture + def mesh_2d(self): + """Sample mesh.""" + cell_size = 0.5 + width = 10.0 + hx = [ + (cell_size, 10, -1.3), + (cell_size, width / cell_size), + (cell_size, 10, 1.3), + ] + hy = [(cell_size, 3, -1.3), (cell_size, 3, 1.3)] + mesh = TensorMesh([hx, hy], "CN") + return mesh + + @pytest.fixture + def survey_2d(self, mesh_2d): + """Sample survey.""" + survey_end_points = np.array([-5.0, 5.0, 0, 0]) + + source_list = generate_dcip_sources_line( + "dipole-dipole", "volt", "2D", survey_end_points, 0.0, 5, 2.5 + ) + survey = dc.survey.Survey(source_list) + return survey + + @pytest.mark.parametrize( + "simulation_class", + (dc.simulation.Simulation3DNodal, dc.simulation.Simulation3DCellCentered), + ) + @pytest.mark.parametrize("storeJ", [True, False]) + def test_simulation_3d(self, mesh_3d, survey_3d, simulation_class, storeJ): + """ + Test model assignment on the ``getJ`` method of 3d simulations + """ + mapping = IdentityMap(mesh_3d) + simulation = simulation_class( + mesh=mesh_3d, survey=survey_3d, sigmaMap=mapping, storeJ=storeJ + ) + model_1 = np.ones(mesh_3d.nC) * 1e-2 + model_2 = np.ones(mesh_3d.nC) * 1e-1 + # Call `getJ` passing a model and check if it was correctly assigned + j_1 = simulation.getJ(model_1) + assert model_1 is simulation.model + # Call `getJ` passing a different model and check if it was correctly assigned + j_2 = simulation.getJ(model_2) + assert model_2 is simulation.model + # Check if the two Js are different + assert not np.allclose(j_1, j_2) + + @pytest.mark.parametrize( + "simulation_class", + (dc.simulation_2d.Simulation2DNodal, dc.simulation_2d.Simulation2DCellCentered), + ) + @pytest.mark.parametrize("storeJ", [True, False]) + def test_simulation_2d(self, mesh_2d, survey_2d, simulation_class, storeJ): + """ + Test model assignment on the ``getJ`` method of 2d simulations + """ + mapping = IdentityMap(mesh_2d) + simulation = simulation_class( + mesh=mesh_2d, survey=survey_2d, sigmaMap=mapping, storeJ=storeJ + ) + model_1 = np.ones(mesh_2d.nC) * 1e-2 + model_2 = np.ones(mesh_2d.nC) * 1e-1 + # Call `getJ` passing a model and check if it was correctly assigned + j_1 = simulation.getJ(model_1) + assert model_1 is simulation.model + # Call `getJ` passing a different model and check if it was correctly assigned + j_2 = simulation.getJ(model_2) + assert model_2 is simulation.model + # Check if the two Js are different + assert not np.allclose(j_1, j_2) + + +class TestSIPSimulations: + @pytest.fixture + def mesh_3d(self): + """Sample mesh.""" + cs = 25.0 + hx = [(cs, 0, -1.3), (cs, 21), (cs, 0, 1.3)] + hy = [(cs, 0, -1.3), (cs, 21), (cs, 0, 1.3)] + hz = [(cs, 0, -1.3), (cs, 20)] + mesh = TensorMesh([hx, hy, hz], x0="CCN") + return mesh + + @pytest.fixture + def survey_3d(self, mesh_3d): + """Sample survey.""" + x = mesh_3d.cell_centers_x[ + (mesh_3d.cell_centers_x > -155.0) & (mesh_3d.cell_centers_x < 155.0) + ] + y = mesh_3d.cell_centers_y[ + (mesh_3d.cell_centers_y > -155.0) & (mesh_3d.cell_centers_y < 155.0) + ] + Aloc = np.r_[-200.0, 0.0, 0.0] + Bloc = np.r_[200.0, 0.0, 0.0] + M = utils.ndgrid(x - 25.0, y, np.r_[0.0]) + + times = np.arange(10) * 1e-3 + 1e-3 + rx = sip.receivers.Pole(M, times) + src = sip.sources.Dipole([rx], Aloc, Bloc) + survey = sip.Survey([src]) + return survey + + @pytest.mark.xfail( + reason=( + "SIP simulation requires some care to pass this test. " + "See #1361 for more details." + ) + ) + def test_simulation_3d(self, mesh_3d, survey_3d): + """ + Test model assignment on the ``getJ`` method of 3d simulations + """ + wires = Wires(("eta", mesh_3d.nC), ("taui", mesh_3d.nC)) + sigma = np.ones(mesh_3d.nC) * 1e-2 + simulation = sip.Simulation3DNodal( + mesh_3d, + sigma=sigma, + survey=survey_3d, + etaMap=wires.eta, + tauiMap=wires.taui, + ) + model_1 = np.r_[sigma, 1.0 / sigma] + model_2 = np.r_[sigma * 2, 1.0 / sigma] + # Call `getJ` passing a model and check if it was correctly assigned + j_1 = simulation.getJ(model_1) + assert model_1 is simulation.model + # Call `getJ` passing a different model and check if it was correctly assigned + j_2 = simulation.getJ(model_2) + assert model_2 is simulation.model + # Check if the two Js are different + assert not np.allclose(j_1, j_2) diff --git a/tests/em/static/test_sip_survey.py b/tests/em/static/test_sip_survey.py index ff3ecc3b51..c39523c0de 100644 --- a/tests/em/static/test_sip_survey.py +++ b/tests/em/static/test_sip_survey.py @@ -12,23 +12,21 @@ class TestRemovedSourceType: Tests after removing the source_type argument and property. """ - def test_warning_after_argument(self): + def test_error_after_argument(self): """ - Test warning after passing source_type as argument to the constructor. + Test error after passing ``source_type`` as argument to the constructor. """ - msg = "Argument 'survey_type' is ignored and will be removed in future" - with pytest.warns(FutureWarning, match=msg): - survey = Survey(source_list=[], survey_type="dipole-dipole") - # Check if the object doesn't have a `_survey_type` attribute - assert not hasattr(survey, "_survey_type") + msg = "Argument 'survey_type' has been removed" + with pytest.raises(TypeError, match=msg): + Survey(source_list=[], survey_type="dipole-dipole") - def test_warning_removed_property(self): + def test_error_removed_property(self): """ - Test if warning is raised when accessing the survey_type property. + Test if error is raised when accessing the ``survey_type`` property. """ survey = Survey(source_list=[]) - msg = "Property 'survey_type' has been removed." - with pytest.warns(FutureWarning, match=msg): + msg = "'survey_type' has been removed." + with pytest.raises(AttributeError, match=msg): survey.survey_type - with pytest.warns(FutureWarning, match=msg): + with pytest.raises(AttributeError, match=msg): survey.survey_type = "dipole-dipole" diff --git a/tests/em/static/test_spectral_ip_mappings.py b/tests/em/static/test_spectral_ip_mappings.py index 99f244a2c0..f82379c90c 100644 --- a/tests/em/static/test_spectral_ip_mappings.py +++ b/tests/em/static/test_spectral_ip_mappings.py @@ -29,35 +29,13 @@ def active_cells(self, mesh): active_cells[0] = False return active_cells - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def test_warning_argument(self, mesh, active_cells): + def test_error_argument(self, mesh, active_cells): """ - Test if warning is raised after passing ``indActive`` as argument. + Test if error is raised after passing ``indActive`` as argument. """ - msg = self.get_message_deprecated_warning(self.OLD_NAME, self.NEW_NAME) - with pytest.warns(FutureWarning, match=msg): - spectral_ip_mappings(mesh, indActive=active_cells) - - def test_error_duplicated_argument(self, mesh, active_cells): - """ - Test error after passing ``indActive`` and ``active_cells`` as arguments. - """ - msg = self.get_message_duplicated_error(self.OLD_NAME, self.NEW_NAME) + msg = ( + "'indActive' was removed in SimPEG v0.24.0, " + "please use 'active_cells' instead." + ) with pytest.raises(TypeError, match=msg): - spectral_ip_mappings( - mesh, active_cells=active_cells, indActive=active_cells - ) + spectral_ip_mappings(mesh, indActive=active_cells) diff --git a/tests/em/static/test_static_utils.py b/tests/em/static/test_static_utils.py index b02489fac6..c7213d6236 100644 --- a/tests/em/static/test_static_utils.py +++ b/tests/em/static/test_static_utils.py @@ -32,35 +32,10 @@ def active_cells(self, mesh): active_cells[0] = False return active_cells - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def test_warning_argument(self, mesh, points, active_cells): + def test_error_argument(self, mesh, points, active_cells): """ - Test if warning is raised after passing ``ind_active`` as argument. + Test if error is raised after passing ``ind_active`` as argument. """ - msg = self.get_message_deprecated_warning(self.OLD_NAME, self.NEW_NAME) - with pytest.warns(FutureWarning, match=msg): - drapeTopotoLoc(mesh, points, ind_active=active_cells) - - def test_error_duplicated_argument(self, mesh, points, active_cells): - """ - Test error after passing ``ind_active`` and ``active_cells`` as arguments. - """ - msg = self.get_message_duplicated_error(self.OLD_NAME, self.NEW_NAME) + msg = "Unsupported keyword argument ind_active" with pytest.raises(TypeError, match=msg): - drapeTopotoLoc( - mesh, points, active_cells=active_cells, ind_active=active_cells - ) + drapeTopotoLoc(mesh, points, ind_active=active_cells) diff --git a/tests/em/tdem/test_TDEM_dipolar_sources.py b/tests/em/tdem/test_TDEM_dipolar_sources.py new file mode 100644 index 0000000000..58dfd00981 --- /dev/null +++ b/tests/em/tdem/test_TDEM_dipolar_sources.py @@ -0,0 +1,110 @@ +from discretize import TensorMesh + +from simpeg import maps +import simpeg.electromagnetics.time_domain as tdem + +import numpy as np + +from simpeg.utils.solver_utils import get_default_solver + +Solver = get_default_solver() + +TOL = 1e-2 # relative tolerance + +# Observation times for response (time channels) +n_times = 30 +time_channels = np.logspace(-4, -1, n_times) + +# Defining transmitter locations +source_locations = np.r_[0, 0, 15.5] +receiver_locations = np.atleast_2d(np.r_[0, 0, 15.5]) + + +def create_survey(src_type="MagDipole"): + + bz_receiver = tdem.receivers.PointMagneticFluxDensity( + receiver_locations, time_channels, "z" + ) + dbdtz_receiver = tdem.receivers.PointMagneticFluxTimeDerivative( + receiver_locations, time_channels, "z" + ) + receivers_list = [bz_receiver, dbdtz_receiver] + + source_list = [ + getattr(tdem.sources, src_type)( + receivers_list, + location=source_locations, + waveform=tdem.sources.StepOffWaveform(), + moment=1.0, + orientation="z", + ) + ] + survey = tdem.Survey(source_list) + return survey + + +def test_BH_dipole(): + survey_b = create_survey() + survey_h = create_survey() + + cell_size = 20 + n_core = 10 + padding_factor = 1.3 + n_padding = 15 + + h = [ + (cell_size, n_padding, -padding_factor), + (cell_size, n_core), + (cell_size, n_padding, padding_factor), + ] + mesh = TensorMesh([h, h, h], origin="CCC") + + air_conductivity = 1e-8 + background_conductivity = 1e-1 + + model = air_conductivity * np.ones(mesh.n_cells) + model[mesh.cell_centers[:, 2] < 0] = background_conductivity + + nsteps = 10 + time_steps = [ + (1e-5, nsteps), + (3e-5, nsteps), + (1e-4, nsteps), + (3e-4, nsteps), + (1e-3, nsteps), + (3e-3, nsteps), + (1e-2, nsteps - 4), + ] + + simulation_b = tdem.simulation.Simulation3DMagneticFluxDensity( + mesh, + survey=survey_b, + sigmaMap=maps.IdentityMap(), + solver=Solver, + time_steps=time_steps, + ) + + simulation_h = tdem.simulation.Simulation3DMagneticField( + mesh, + survey=survey_h, + sigmaMap=maps.IdentityMap(), + solver=Solver, + time_steps=time_steps, + ) + + fields_b = simulation_b.fields(model) + dpred_b = simulation_b.dpred(model, f=fields_b) + + fields_h = simulation_h.fields(model) + dpred_h = simulation_h.dpred(model, f=fields_h) + + assert ( + np.abs( + np.mean(dpred_b[: len(time_channels)] / dpred_h[: len(time_channels)]) - 1 + ) + < TOL + ) + assert np, ( + abs(np.mean(dpred_b[len(time_channels) :] / dpred_h[len(time_channels) :]) - 1) + < TOL + ) diff --git a/tests/em/tdem/test_TDEM_sources.py b/tests/em/tdem/test_TDEM_sources.py index e46dbdc9e2..6c7d248543 100644 --- a/tests/em/tdem/test_TDEM_sources.py +++ b/tests/em/tdem/test_TDEM_sources.py @@ -1,4 +1,3 @@ -import pytest import unittest import numpy as np @@ -6,7 +5,6 @@ from discretize.tests import check_derivative from numpy.testing import assert_array_almost_equal from simpeg.electromagnetics.time_domain.sources import ( - CircularLoop, ExponentialWaveform, HalfSineWaveform, PiecewiseLinearWaveform, @@ -526,19 +524,3 @@ def f(t): def test_simple_source(): waveform = StepOffWaveform() assert waveform.eval(0.0) == 1.0 - - -def test_removal_circular_loop_n(): - """ - Test if passing the N argument to CircularLoop raises an error - """ - msg = "'N' property has been removed. Please use 'n_turns'." - with pytest.raises(TypeError, match=msg): - CircularLoop( - [], - waveform=StepOffWaveform(), - location=np.array([0.0, 0.0, 0.0]), - radius=1.0, - current=0.5, - N=2, - ) diff --git a/tests/em/vrm/test_vrmfwd.py b/tests/em/vrm/test_vrmfwd.py index 772867faa6..70756dfaa3 100644 --- a/tests/em/vrm/test_vrmfwd.py +++ b/tests/em/vrm/test_vrmfwd.py @@ -547,64 +547,40 @@ def active_cells(self, mesh): active_cells[0] = False return active_cells - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - @pytest.mark.parametrize("simulation", CLASSES) - def test_warning_argument(self, mesh, active_cells, simulation): + def test_error_argument(self, mesh, active_cells, simulation): """ - Test if warning is raised after passing ``indActive`` to the constructor. + Test if error is raised after passing ``indActive`` to the constructor. """ - msg = self.get_message_deprecated_warning(self.OLD_NAME, self.NEW_NAME) - with pytest.warns(FutureWarning, match=msg): - sim = simulation(mesh, indActive=active_cells) - np.testing.assert_allclose(sim.active_cells, active_cells) - - @pytest.mark.parametrize("simulation", CLASSES) - def test_error_duplicated_argument(self, mesh, active_cells, simulation): - """ - Test error after passing ``indActive`` and ``active_cells`` to the constructor. - """ - msg = self.get_message_duplicated_error(self.OLD_NAME, self.NEW_NAME) + msg = ( + "'indActive' was removed in SimPEG v0.24.0, " + "please use 'active_cells' instead." + ) with pytest.raises(TypeError, match=msg): - simulation(mesh, active_cells=active_cells, indActive=active_cells) + simulation(mesh, indActive=active_cells) @pytest.mark.parametrize("simulation", CLASSES) - def test_warning_accessing_property(self, mesh, active_cells, simulation): + def test_error_accessing_property(self, mesh, active_cells, simulation): """ - Test warning when trying to access the ``indActive`` property. + Test error when trying to access the ``indActive`` property. """ sim = simulation(mesh, active_cells=active_cells) - msg = f"{self.OLD_NAME} has been deprecated, please use {self.NEW_NAME}" - with pytest.warns(FutureWarning, match=msg): - old_ind_active = sim.indActive - np.testing.assert_allclose(sim.active_cells, old_ind_active) + msg = f"{self.OLD_NAME} has been removed, please use {self.NEW_NAME}" + with pytest.raises(NotImplementedError, match=msg): + sim.indActive @pytest.mark.parametrize("simulation", CLASSES) - def test_warning_setter(self, mesh, active_cells, simulation): + def test_error_setter(self, mesh, active_cells, simulation): """ - Test warning when trying to set the ``indActive`` property. + Test error when trying to set the ``indActive`` property. """ sim = simulation(mesh, active_cells=active_cells) # Define new active cells to pass to the setter new_active_cells = active_cells.copy() new_active_cells[-4:] = False - msg = f"{self.OLD_NAME} has been deprecated, please use {self.NEW_NAME}" - with pytest.warns(FutureWarning, match=msg): + msg = f"{self.OLD_NAME} has been removed, please use {self.NEW_NAME}" + with pytest.raises(NotImplementedError, match=msg): sim.indActive = new_active_cells - np.testing.assert_allclose(sim.active_cells, new_active_cells) if __name__ == "__main__": diff --git a/tests/flow/test_Richards.py b/tests/flow/test_Richards.py index 5cab334437..d234cc82f4 100644 --- a/tests/flow/test_Richards.py +++ b/tests/flow/test_Richards.py @@ -34,7 +34,6 @@ def setUp(self): hydraulic_conductivity=k_fun, water_retention=theta_fun, root_finder_tol=1e-6, - debug=False, boundary_conditions=bc, initial_conditions=h, do_newton=False, diff --git a/tests/pf/test_base_pf_simulation.py b/tests/pf/test_base_pf_simulation.py index fc05eafdca..94f3a0efe1 100644 --- a/tests/pf/test_base_pf_simulation.py +++ b/tests/pf/test_base_pf_simulation.py @@ -309,40 +309,25 @@ def test_invalid_mesh_type(self, mock_simulation_class): mock_simulation_class(CylindricalMesh(h)) -class TestDeprecationIndActive: +class TestRemovedIndActive: """ - Test if using the deprecated ind_active argument/property raise warnings/errors + Test if using the removed ``ind_active`` argument/property raise errors. """ - def test_deprecated_argument(self, tensor_mesh, mock_simulation_class): - """Test if passing ind_active argument raises warning.""" + def test_removed_argument(self, tensor_mesh, mock_simulation_class): + """Test if passing ind_active argument raises error.""" ind_active = np.ones(tensor_mesh.n_cells, dtype=bool) - version_regex = "v[0-9]+.[0-9]+.[0-9]+" msg = ( - "'ind_active' has been deprecated and will be removed in " - f" SimPEG {version_regex}, please use 'active_cells' instead." - ) - with pytest.warns(FutureWarning, match=msg): - sim = mock_simulation_class(tensor_mesh, ind_active=ind_active) - np.testing.assert_allclose(sim.active_cells, ind_active) - - def test_error_both_args(self, tensor_mesh, mock_simulation_class): - """Test if passing both ind_active and active_cells raises error.""" - ind_active = np.ones(tensor_mesh.n_cells, dtype=bool) - version_regex = "v[0-9]+.[0-9]+.[0-9]+" - msg = ( - f"Cannot pass both 'active_cells' and 'ind_active'." - "'ind_active' has been deprecated and will be removed in " - f" SimPEG {version_regex}, please use 'active_cells' instead." + "'ind_active' has been removed in " + "SimPEG v0.24.0, please use 'active_cells' instead." ) with pytest.raises(TypeError, match=msg): - mock_simulation_class( - tensor_mesh, active_cells=ind_active, ind_active=ind_active - ) + mock_simulation_class(tensor_mesh, ind_active=ind_active) - def test_deprecated_property(self, tensor_mesh, mock_simulation_class): - """Test if passing both ind_active and active_cells raises error.""" + def test_removed_property(self, tensor_mesh, mock_simulation_class): + """Test if accessing the ind_active property raises an error.""" ind_active = np.ones(tensor_mesh.n_cells, dtype=bool) simulation = mock_simulation_class(tensor_mesh, active_cells=ind_active) - with pytest.warns(FutureWarning): + msg = "ind_active has been removed, please use active_cells." + with pytest.raises(NotImplementedError, match=msg): simulation.ind_active diff --git a/tests/pf/test_components.py b/tests/pf/test_components.py new file mode 100644 index 0000000000..30c66202ee --- /dev/null +++ b/tests/pf/test_components.py @@ -0,0 +1,55 @@ +""" +Test how potential field surveys and simulations access receiver components. +""" + +import re +import pytest +import numpy as np + +import discretize +from simpeg.potential_fields import gravity, magnetics + + +@pytest.fixture +def receiver_locations(): + x = np.linspace(-20.0, 20.0, 4) + x, y = np.meshgrid(x, x) + z = 5.0 * np.ones_like(x) + return np.vstack((x.ravel(), y.ravel(), z.ravel())).T + + +@pytest.fixture +def mesh(): + dh = 5.0 + hx = [(dh, 10)] + return discretize.TensorMesh([hx, hx, hx], "CCN") + + +class TestComponentsGravitySurvey: + + def test_deprecated_components(self, receiver_locations): + """ + Test FutureError after deprecated ``components`` property. + """ + receivers = gravity.receivers.Point(receiver_locations, components="gz") + source_field = gravity.sources.SourceField(receiver_list=[receivers]) + survey = gravity.survey.Survey(source_field) + msg = re.escape("The `components` property is deprecated") + with pytest.warns(FutureWarning, match=msg): + survey.components + + +class TestComponentsMagneticSurvey: + + def test_deprecated_components(self, receiver_locations): + """ + Test FutureError after deprecated ``components`` property. + """ + receivers = magnetics.receivers.Point(receiver_locations, components="tmi") + source_field = magnetics.sources.UniformBackgroundField( + receiver_list=[receivers], amplitude=55_000, inclination=12, declination=35 + ) + survey = magnetics.survey.Survey(source_field) + msg = re.escape("The `components` property is deprecated") + with pytest.warns(FutureWarning, match=msg): + survey.components diff --git a/tests/pf/test_equivalent_sources.py b/tests/pf/test_equivalent_sources.py index 84339d8db4..0454baedcf 100644 --- a/tests/pf/test_equivalent_sources.py +++ b/tests/pf/test_equivalent_sources.py @@ -25,6 +25,13 @@ "tmi_z", ] +# Define a pytest.mark.xfail to use for engine parametrizations when the method that is +# being tested is not implemented when using geoana as engine. +XFAIL_GEOANA = pytest.param( + "geoana", + marks=pytest.mark.xfail(reason="not implemented", raises=NotImplementedError), +) + def create_grid(x_range, y_range, size): """Create a 2D horizontal coordinates grid.""" @@ -457,15 +464,149 @@ def test_forward_choclo_serial_parallel( np.testing.assert_allclose(sim_parallel.dpred(model), sim_serial.dpred(model)) +@pytest.mark.parametrize("parallel", [True, False], ids=["parallel", "serial"]) +@pytest.mark.parametrize("components", [*GRAVITY_COMPONENTS, ["gz", "gzz"]]) +@pytest.mark.parametrize("engine", ["choclo", XFAIL_GEOANA]) +class TestGravityEquivalentSourcesForwardOnly: + """ + Test gravity equivalent sources methods without building the sensitivity matrix. + """ + + def test_Jvec( + self, + coordinates, + tensor_mesh, + mesh_bottom, + mesh_top, + components, + engine, + parallel, + ): + """ + Test Jvec with "forward_only" vs. J @ v with J stored in ram. + """ + # Build survey + gravity_survey = build_gravity_survey(coordinates, components=components) + # Build simulations + mapping = get_mapping(tensor_mesh) + eqs_ram, eqs_forward_only = ( + gravity.SimulationEquivalentSourceLayer( + mesh=tensor_mesh, + cell_z_top=mesh_top, + cell_z_bottom=mesh_bottom, + survey=gravity_survey, + rhoMap=mapping, + engine=engine, + store_sensitivities=store, + numba_parallel=parallel, + ) + for store in ("ram", "forward_only") + ) + # Compare predictions of both simulations + model = get_block_model(tensor_mesh, 2.67) + vector = np.random.default_rng(seed=42).uniform(size=model.size) + expected = eqs_ram.getJ(model) @ vector + atol = np.max(np.abs(expected)) * 1e-7 + # Test Jvec + np.testing.assert_allclose( + expected, eqs_forward_only.Jvec(model, vector), atol=atol + ) + # Test getJ + np.testing.assert_allclose( + expected, eqs_forward_only.getJ(model) @ vector, atol=atol + ) + + def test_Jtvec( + self, + coordinates, + tensor_mesh, + mesh_bottom, + mesh_top, + components, + engine, + parallel, + ): + """ + Test Jtvec with "forward_only" vs. J.T @ v with J stored in ram. + """ + # Build survey + gravity_survey = build_gravity_survey(coordinates, components=components) + # Build simulations + mapping = get_mapping(tensor_mesh) + eqs_ram, eqs_forward_only = ( + gravity.SimulationEquivalentSourceLayer( + mesh=tensor_mesh, + cell_z_top=mesh_top, + cell_z_bottom=mesh_bottom, + survey=gravity_survey, + rhoMap=mapping, + engine=engine, + store_sensitivities=store, + numba_parallel=parallel, + ) + for store in ("ram", "forward_only") + ) + # Compare predictions of both simulations + model = get_block_model(tensor_mesh, 2.67) + vector = np.random.default_rng(seed=42).uniform(size=gravity_survey.nD) + expected = eqs_ram.getJ(model).T @ vector + atol = np.max(np.abs(expected)) * 1e-7 + # Test Jtvec + np.testing.assert_allclose( + expected, eqs_forward_only.Jtvec(model, vector), atol=atol + ) + # Test getJ + np.testing.assert_allclose( + expected, eqs_forward_only.getJ(model).T @ vector, atol=atol + ) + + def test_getJtJdiag( + self, + coordinates, + tensor_mesh, + mesh_bottom, + mesh_top, + components, + engine, + parallel, + ): + """ + Test the ``getJtJdiag`` method, comparing forward_only with storing J in memory. + """ + # Build survey + gravity_survey = build_gravity_survey(coordinates, components=components) + # Build simulations + mapping = get_mapping(tensor_mesh) + eqs_ram, eqs_forward_only = ( + gravity.SimulationEquivalentSourceLayer( + mesh=tensor_mesh, + cell_z_top=mesh_top, + cell_z_bottom=mesh_bottom, + survey=gravity_survey, + rhoMap=mapping, + engine=engine, + store_sensitivities=store, + numba_parallel=parallel, + ) + for store in ("ram", "forward_only") + ) + # Compare predictions of both simulations + model = get_block_model(tensor_mesh, 2.67) + gtgdiag_ram = eqs_ram.getJtJdiag(model) + gtgdiag_linop = eqs_forward_only.getJtJdiag(model) + atol = np.max(np.abs(gtgdiag_ram)) * 1e-7 + np.testing.assert_allclose(gtgdiag_ram, gtgdiag_linop, atol=atol) + + class TestMagneticEquivalentSourcesForward: """ Test the forward capabilities of the magnetic equivalent sources. """ - @pytest.mark.parametrize("engine", ("geoana", "choclo")) - @pytest.mark.parametrize("store_sensitivities", ("ram", "forward_only")) - @pytest.mark.parametrize("model_type", ("scalar", "vector")) - @pytest.mark.parametrize("components", MAGNETIC_COMPONENTS + [["tmi", "bx"]]) + @pytest.mark.parametrize("engine", ["geoana", "choclo"]) + @pytest.mark.parametrize("store_sensitivities", ["ram", "forward_only"]) + @pytest.mark.parametrize("model_type", ["scalar", "vector"]) + @pytest.mark.parametrize("components", [*MAGNETIC_COMPONENTS, ["tmi", "bx"]]) def test_forward_vs_simulation( self, coordinates, @@ -690,7 +831,7 @@ def build_synthetic_data(self, simulation, model): ) return data - def build_inversion(self, mesh, simulation, synthetic_data): + def build_inversion(self, mesh, simulation, synthetic_data, max_iterations=20): """Build inversion problem.""" # Build data misfit and regularization terms data_misfit = simpeg.data_misfit.L2DataMisfit( @@ -699,6 +840,7 @@ def build_inversion(self, mesh, simulation, synthetic_data): regularization = simpeg.regularization.WeightedLeastSquares(mesh=mesh) # Choose optimization optimization = ProjectedGNCG( + maxIter=max_iterations, maxIterLS=5, maxIterCG=20, tolCG=1e-4, @@ -826,7 +968,9 @@ def test_predictions_on_data_points( model = get_block_model(tree_mesh, 1e-3) synthetic_data = self.build_synthetic_data(simulation, model) # Build inversion - inversion = self.build_inversion(tree_mesh, simulation, synthetic_data) + inversion = self.build_inversion( + tree_mesh, simulation, synthetic_data, max_iterations=40 + ) # Run inversion starting_model = np.zeros(tree_mesh.n_cells) recovered_model = inversion.run(starting_model) @@ -837,3 +981,162 @@ def test_predictions_on_data_points( np.testing.assert_allclose( prediction, synthetic_data.dobs, atol=atol, rtol=rtol ) + + +@pytest.mark.parametrize("parallel", [True, False], ids=["parallel", "serial"]) +@pytest.mark.parametrize("components", [*MAGNETIC_COMPONENTS, ["tmi", "bx"]]) +@pytest.mark.parametrize("engine", ["choclo", XFAIL_GEOANA]) +@pytest.mark.parametrize("model_type", ["scalar", "vector"]) +class TestMagneticEquivalentSourcesForwardOnly: + """ + Test magnetic equivalent sources methods without building the sensitivity matrix. + """ + + def test_Jvec( + self, + coordinates, + tensor_mesh, + mesh_bottom, + mesh_top, + components, + engine, + parallel, + model_type, + ): + """ + Test Jvec with "forward_only" vs. J @ v with J stored in ram. + """ + # Build survey + magnetic_survey = build_magnetic_survey(coordinates, components) + # Define model + model = ( + get_block_model(tensor_mesh, 0.2e-3) + if model_type == "scalar" + else get_block_model(tensor_mesh, (0.2e-3, -0.1e-3, 0.5e-3)) + ) + # Build simulations + mapping = simpeg.maps.IdentityMap(nP=model.size) + eqs_ram, eqs_forward_only = ( + magnetics.SimulationEquivalentSourceLayer( + tensor_mesh, + mesh_top, + mesh_bottom, + survey=magnetic_survey, + chiMap=mapping, + engine=engine, + store_sensitivities=store, + numba_parallel=parallel, + model_type=model_type, + ) + for store in ("ram", "forward_only") + ) + # Compare predictions of both simulations + vector = np.random.default_rng(seed=42).uniform(size=model.size) + expected = eqs_ram.getJ(model) @ vector + atol = np.max(np.abs(expected)) * 1e-7 + # Test Jvec + np.testing.assert_allclose( + expected, eqs_forward_only.Jvec(model, vector), atol=atol + ) + # Test getJ() @ v + jacobian = eqs_forward_only.getJ(model) + np.testing.assert_allclose(expected, jacobian @ vector, atol=atol) + + def test_Jtvec( + self, + coordinates, + tensor_mesh, + mesh_bottom, + mesh_top, + components, + engine, + parallel, + model_type, + ): + """ + Test Jtvec with "forward_only" vs. J.T @ v with J stored in ram. + """ + # Build survey + magnetic_survey = build_magnetic_survey(coordinates, components) + # Define model + model = ( + get_block_model(tensor_mesh, 0.2e-3) + if model_type == "scalar" + else get_block_model(tensor_mesh, (0.2e-3, -0.1e-3, 0.5e-3)) + ) + # Build simulations + mapping = simpeg.maps.IdentityMap(nP=model.size) + eqs_ram, eqs_forward_only = ( + magnetics.SimulationEquivalentSourceLayer( + tensor_mesh, + mesh_top, + mesh_bottom, + survey=magnetic_survey, + chiMap=mapping, + engine=engine, + store_sensitivities=store, + numba_parallel=parallel, + model_type=model_type, + ) + for store in ("ram", "forward_only") + ) + # Compare predictions of both simulations + vector = np.random.default_rng(seed=42).uniform(size=magnetic_survey.nD) + expected = eqs_ram.getJ(model).T @ vector + atol = np.max(np.abs(expected)) * 1e-7 + # Test Jtvec + np.testing.assert_allclose( + expected, eqs_forward_only.Jtvec(model, vector), atol=atol + ) + # Test getJ().T @ v + jacobian = eqs_forward_only.getJ(model) + np.testing.assert_allclose(expected, jacobian.T @ vector, atol=atol) + + def test_getJtJdiag( + self, + coordinates, + tensor_mesh, + mesh_bottom, + mesh_top, + components, + engine, + parallel, + model_type, + ): + """ + Test the ``getJtJdiag`` method, comparing forward_only with storing J in memory. + """ + # Build survey + magnetic_survey = build_magnetic_survey(coordinates, components) + # Define model + model = ( + get_block_model(tensor_mesh, 0.2e-3) + if model_type == "scalar" + else get_block_model(tensor_mesh, (0.2e-3, -0.1e-3, 0.5e-3)) + ) + # Build simulations + mapping = simpeg.maps.IdentityMap(nP=model.size) + eqs_ram, eqs_forward_only = ( + magnetics.SimulationEquivalentSourceLayer( + tensor_mesh, + mesh_top, + mesh_bottom, + survey=magnetic_survey, + chiMap=mapping, + engine=engine, + store_sensitivities=store, + numba_parallel=parallel, + model_type=model_type, + ) + for store in ("ram", "forward_only") + ) + # Compare methods for both simulations + model = ( + get_block_model(tensor_mesh, 0.2e-3) + if model_type == "scalar" + else get_block_model(tensor_mesh, (0.2e-3, -0.1e-3, 0.5e-3)) + ) + gtgdiag_ram = eqs_ram.getJtJdiag(model) + gtgdiag_linop = eqs_forward_only.getJtJdiag(model) + atol = np.max(np.abs(gtgdiag_ram)) * 1e-7 + np.testing.assert_allclose(gtgdiag_ram, gtgdiag_linop, atol=atol) diff --git a/tests/pf/test_forward_Grav_Linear.py b/tests/pf/test_forward_Grav_Linear.py index 063fff9904..e66279e4fc 100644 --- a/tests/pf/test_forward_Grav_Linear.py +++ b/tests/pf/test_forward_Grav_Linear.py @@ -1,10 +1,13 @@ import re import pytest +from scipy.sparse import diags +from scipy.sparse.linalg import LinearOperator, aslinearoperator import discretize import simpeg from simpeg import maps from simpeg.potential_fields import gravity +from simpeg.utils import model_builder from geoana.gravity import Prism import numpy as np @@ -417,6 +420,472 @@ def test_choclo_missing(self, simple_mesh, monkeypatch): gravity.Simulation3DIntegral(simple_mesh, engine="choclo") +class BaseFixtures: + """ + Base test class with some fixtures. + """ + + @pytest.fixture + def survey(self): + # Observation points + x = np.linspace(-20.0, 20.0, 4) + x, y = np.meshgrid(x, x) + z = 5.0 * np.ones_like(x) + coordinates = np.vstack((x.ravel(), y.ravel(), z.ravel())).T + receivers = gravity.receivers.Point(coordinates, components="gz") + source_field = gravity.sources.SourceField(receiver_list=[receivers]) + survey = gravity.survey.Survey(source_field) + return survey + + @pytest.fixture + def mesh(self): + # Mesh + dh = 5.0 + hx = [(dh, 4)] + mesh = discretize.TensorMesh([hx, hx, hx], "CCN") + return mesh + + @pytest.fixture + def densities(self, mesh): + # Define densities + densities = 1e-10 * np.ones(mesh.n_cells) + ind_sphere = model_builder.get_indices_sphere( + np.r_[0.0, 0.0, -20.0], 10.0, mesh.cell_centers + ) + densities[ind_sphere] = 0.2 + return densities + + +class TestJacobianGravity(BaseFixtures): + """ + Test methods related to Jacobian matrix in gravity simulation. + """ + + atol_ratio = 1e-7 + + @pytest.fixture(params=["identity_map", "exp_map"]) + def mapping(self, mesh, request): + mapping = ( + maps.IdentityMap(nP=mesh.n_cells) + if request.param == "identity_map" + else maps.ExpMap(nP=mesh.n_cells) + ) + return mapping + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + def test_getJ_as_array(self, survey, mesh, densities, mapping, engine): + """ + Test the getJ method when J is an array in memory. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="ram", + engine=engine, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + jac = simulation.getJ(model) + assert isinstance(jac, np.ndarray) + # With an identity mapping, the jacobian should be the same as G. + # With an exp mapping, the jacobian should be G @ the mapping derivative. + expected_jac = ( + simulation.G if is_identity_map else simulation.G @ mapping.deriv(model) + ) + np.testing.assert_allclose(jac, expected_jac) + + @pytest.mark.parametrize("transpose", [False, True], ids=["J @ m", "J.T @ v"]) + def test_getJ_as_linear_operator(self, survey, mesh, densities, mapping, transpose): + """ + Test the getJ method when J is a linear operator. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="forward_only", + engine="choclo", + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + jac = simulation.getJ(model) + assert isinstance(jac, LinearOperator) + + if transpose: + vector = np.random.default_rng(seed=42).uniform(size=survey.nD) + result = jac.T @ vector + expected_result = mapping.deriv(model).T @ (simulation.G.T @ vector) + else: + result = jac @ model + expected_result = simulation.G @ (mapping.deriv(model).diagonal() * model) + np.testing.assert_allclose(result, expected_result) + + def test_getJ_as_linear_operator_not_implemented( + self, survey, mesh, densities, mapping + ): + """ + Test getJ raises NotImplementedError when forward only with geoana. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="forward_only", + engine="geoana", + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + msg = re.escape( + "Accessing matrix G with " + 'store_sensitivities="forward_only" and engine="geoana" ' + "hasn't been implemented yet." + ) + with pytest.raises(NotImplementedError, match=msg): + simulation.getJ(model) + + @pytest.mark.parametrize( + ("engine", "store_sensitivities"), + [ + ("choclo", "ram"), + ("choclo", "forward_only"), + ("geoana", "ram"), + pytest.param( + "geoana", + "forward_only", + marks=pytest.mark.xfail( + raises=NotImplementedError, reason="not implemented" + ), + ), + ], + ) + def test_Jvec(self, survey, mesh, densities, mapping, engine, store_sensitivities): + """ + Test the Jvec method. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities=store_sensitivities, + engine=engine, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + vector = np.random.default_rng(seed=42).uniform(size=densities.size) + result = simulation.Jvec(model, vector) + + expected_jac = ( + simulation.G + if is_identity_map + else simulation.G @ aslinearoperator(mapping.deriv(model)) + ) + expected = expected_jac @ vector + + atol = np.max(np.abs(expected)) * self.atol_ratio + np.testing.assert_allclose(result, expected, atol=atol) + + @pytest.mark.parametrize( + ("engine", "store_sensitivities"), + [ + ("choclo", "ram"), + ("choclo", "forward_only"), + ("geoana", "ram"), + pytest.param( + "geoana", + "forward_only", + marks=pytest.mark.xfail( + raises=NotImplementedError, reason="not implemented" + ), + ), + ], + ) + def test_Jtvec(self, survey, mesh, densities, mapping, engine, store_sensitivities): + """ + Test the Jtvec method. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities=store_sensitivities, + engine=engine, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + vector = np.random.default_rng(seed=42).uniform(size=survey.nD) + result = simulation.Jtvec(model, vector) + + expected_jac = ( + simulation.G + if is_identity_map + else simulation.G @ aslinearoperator(mapping.deriv(model)) + ) + expected = expected_jac.T @ vector + + atol = np.max(np.abs(result)) * self.atol_ratio + np.testing.assert_allclose(result, expected, atol=atol) + + @pytest.mark.parametrize( + "engine", + [ + "choclo", + pytest.param( + "geoana", + marks=pytest.mark.xfail( + raises=NotImplementedError, reason="not implemented" + ), + ), + ], + ) + @pytest.mark.parametrize("method", ["Jvec", "Jtvec"]) + def test_array_vs_linear_operator( + self, survey, mesh, densities, mapping, engine, method + ): + """ + Test methods when using "ram" and "forward_only". + + They should give the same results. + """ + simulation_lo, simulation_ram = ( + gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities=store, + engine=engine, + ) + for store in ("forward_only", "ram") + ) + match method: + case "Jvec": + vector_size = densities.size + case "Jtvec": + vector_size = survey.nD + case _: + raise ValueError(f"Invalid method '{method}'") + + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + vector = np.random.default_rng(seed=42).uniform(size=vector_size) + result_lo = getattr(simulation_lo, method)(model, vector) + result_ram = getattr(simulation_ram, method)(model, vector) + atol = np.max(np.abs(result_ram)) * self.atol_ratio + np.testing.assert_allclose(result_lo, result_ram, atol=atol) + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + @pytest.mark.parametrize("weights", [True, False]) + def test_getJtJdiag(self, survey, mesh, densities, mapping, engine, weights): + """ + Test the ``getJtJdiag`` method with G as an array in memory. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="ram", + engine=engine, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + kwargs = {} + if weights: + w_matrix = diags(np.random.default_rng(seed=42).uniform(size=survey.nD)) + kwargs = {"W": w_matrix} + jtj_diag = simulation.getJtJdiag(model, **kwargs) + + expected_jac = ( + simulation.G if is_identity_map else simulation.G @ mapping.deriv(model) + ) + if weights: + expected = np.diag(expected_jac.T @ w_matrix.T @ w_matrix @ expected_jac) + else: + expected = np.diag(expected_jac.T @ expected_jac) + + atol = np.max(np.abs(jtj_diag)) * self.atol_ratio + np.testing.assert_allclose(jtj_diag, expected, atol=atol) + + @pytest.mark.parametrize( + "engine", + [ + "choclo", + pytest.param( + "geoana", + marks=pytest.mark.xfail( + raises=NotImplementedError, reason="not implemented" + ), + ), + ], + ) + @pytest.mark.parametrize("weights", [True, False]) + def test_getJtJdiag_forward_only( + self, survey, mesh, densities, mapping, engine, weights + ): + """ + Test the ``getJtJdiag`` method without building G. + """ + simulation, simulation_ram = ( + gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities=store, + engine=engine, + ) + for store in ("forward_only", "ram") + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + kwargs = {} + if weights: + weights = np.random.default_rng(seed=42).uniform(size=survey.nD) + kwargs = {"W": diags(np.sqrt(weights))} + jtj_diag = simulation.getJtJdiag(model, **kwargs) + jtj_diag_ram = simulation_ram.getJtJdiag(model, **kwargs) + + atol = np.max(np.abs(jtj_diag)) * self.atol_ratio + np.testing.assert_allclose(jtj_diag, jtj_diag_ram, atol=atol) + + @pytest.mark.parametrize("engine", ("choclo", "geoana")) + def test_getJtJdiag_caching(self, survey, mesh, densities, mapping, engine): + """ + Test the caching behaviour of the ``getJtJdiag`` method. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="ram", + engine=engine, + ) + # Get diagonal of J.T @ J without any weight + is_identity_map = type(mapping) is maps.IdentityMap + model = densities if is_identity_map else np.log(densities) + + jtj_diagonal_1 = simulation.getJtJdiag(model) + assert hasattr(simulation, "_gtg_diagonal") + assert hasattr(simulation, "_weights_sha256") + gtg_diagonal_1 = simulation._gtg_diagonal + weights_sha256_1 = simulation._weights_sha256 + + # Compute it again and make sure we get the same result + np.testing.assert_allclose(jtj_diagonal_1, simulation.getJtJdiag(model)) + + # Get a new diagonal with weights + weights_matrix = diags( + np.random.default_rng(seed=42).uniform(size=simulation.survey.nD) + ) + jtj_diagonal_2 = simulation.getJtJdiag(model, W=weights_matrix) + assert hasattr(simulation, "_gtg_diagonal") + assert hasattr(simulation, "_weights_sha256") + gtg_diagonal_2 = simulation._gtg_diagonal + weights_sha256_2 = simulation._weights_sha256 + + # The two results should be different + assert not np.array_equal(jtj_diagonal_1, jtj_diagonal_2) + assert not np.array_equal(gtg_diagonal_1, gtg_diagonal_2) + assert weights_sha256_1.digest() != weights_sha256_2.digest() + + +class TestGLinearOperator(BaseFixtures): + """ + Test G as a linear operator. + """ + + @pytest.fixture + def mapping(self, mesh): + return maps.IdentityMap(nP=mesh.n_cells) + + def test_not_implemented(self, survey, mesh, mapping): + """ + Test NotImplementedError when using geoana as engine. + """ + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="forward_only", + engine="geoana", + ) + msg = re.escape( + "Accessing matrix G with " + 'store_sensitivities="forward_only" and engine="geoana" ' + "hasn't been implemented yet." + ) + with pytest.raises(NotImplementedError, match=msg): + simulation.G + + @pytest.mark.parametrize("parallel", [True, False]) + def test_G_dot_m(self, survey, mesh, mapping, densities, parallel): + """Test G @ m.""" + simulation, simulation_ram = ( + gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities=store, + engine="choclo", + numba_parallel=parallel, + ) + for store in ("forward_only", "ram") + ) + assert isinstance(simulation.G, LinearOperator) + assert isinstance(simulation_ram.G, np.ndarray) + np.testing.assert_allclose( + simulation.G @ densities, simulation_ram.G @ densities + ) + + @pytest.mark.parametrize("parallel", [True, False]) + def test_G_t_dot_v(self, survey, mesh, mapping, parallel): + """Test G.T @ v.""" + simulation, simulation_ram = ( + gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities=store, + engine="choclo", + numba_parallel=parallel, + ) + for store in ("forward_only", "ram") + ) + assert isinstance(simulation.G, LinearOperator) + assert isinstance(simulation_ram.G, np.ndarray) + vector = np.random.default_rng(seed=42).uniform(size=survey.nD) + np.testing.assert_allclose(simulation.G.T @ vector, simulation_ram.G.T @ vector) + + +class TestDeprecationWarning(BaseFixtures): + """ + Test warnings after deprecated properties or methods of the simulation class. + """ + + def test_gtg_diagonal(self, survey, mesh): + """Test deprecation warning on gtg_diagonal property.""" + mapping = maps.IdentityMap(nP=mesh.n_cells) + simulation = gravity.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + rhoMap=mapping, + store_sensitivities="ram", + engine="choclo", + ) + msg = re.escape( + "The `gtg_diagonal` property has been deprecated. " + "It will be removed in SimPEG v0.25.0.", + ) + with pytest.warns(FutureWarning, match=msg): + simulation.gtg_diagonal + + class TestConversionFactor: """Test _get_conversion_factor function.""" diff --git a/tests/pf/test_forward_Mag_Linear.py b/tests/pf/test_forward_Mag_Linear.py index 596813ff09..45a523cb97 100644 --- a/tests/pf/test_forward_Mag_Linear.py +++ b/tests/pf/test_forward_Mag_Linear.py @@ -5,11 +5,14 @@ import discretize import numpy as np import pytest +from scipy.sparse import diags from geoana.em.static import MagneticPrism from scipy.constants import mu_0 +from scipy.sparse.linalg import LinearOperator, aslinearoperator import simpeg from simpeg import maps, utils +from simpeg.utils import model_builder from simpeg.potential_fields import magnetics as mag @@ -893,7 +896,7 @@ def test_choclo_missing(self, mag_mesh, monkeypatch): def test_removed_modeltype(): - """Test if accesing removed modelType property raises error.""" + """Test if accessing removed modelType property raises error.""" h = [[(2, 2)], [(2, 2)], [(2, 2)]] mesh = discretize.TensorMesh(h) receiver_location = np.array([[0, 0, 100]]) @@ -904,6 +907,848 @@ def test_removed_modeltype(): survey = mag.Survey(background_field) mapping = maps.IdentityMap(mesh, nP=mesh.n_cells) sim = mag.Simulation3DIntegral(mesh, survey=survey, chiMap=mapping) - message = "modelType has been removed, please use model_type." - with pytest.raises(NotImplementedError, match=message): + message = "has no attribute 'modelType'" + with pytest.raises(AttributeError, match=message): sim.modelType + + +class BaseFixtures: + """ + Base test class with some fixtures. + + Requires that any child class implements a ``scalar_model`` boolean fixture. + It can be a standalone fixture, or it can be a class parametrization. + """ + + def build_survey(self, *, components): + # Observation points + x = np.linspace(-20.0, 20.0, 4) + x, y = np.meshgrid(x, x) + z = 5.0 * np.ones_like(x) + coordinates = np.vstack((x.ravel(), y.ravel(), z.ravel())).T + receivers = mag.receivers.Point(coordinates, components=components) + source_field = mag.UniformBackgroundField( + receiver_list=[receivers], + amplitude=55_000, + inclination=12, + declination=-35, + ) + survey = mag.survey.Survey(source_field) + return survey + + @pytest.fixture( + params=[ + "tmi", + ["bx", "by", "bz"], + ["tmi", "bx"], + ["tmi_x", "tmi_y", "tmi_z"], + ], + ids=["tmi", "mag_components", "tmi_and_mag", "tmi_derivs"], + ) + def survey(self, request): + """ + Return sample magnetic survey. + """ + return self.build_survey(components=request.param) + + @pytest.fixture + def mesh(self): + # Mesh + dh = 5.0 + hx = [(dh, 4)] + mesh = discretize.TensorMesh([hx, hx, hx], "CCN") + return mesh + + @pytest.fixture + def susceptibilities(self, mesh, scalar_model: bool): + """Create sample susceptibilities.""" + susceptibilities = 1e-10 * np.ones( + mesh.n_cells if scalar_model else 3 * mesh.n_cells + ) + ind_sphere = model_builder.get_indices_sphere( + np.r_[0.0, 0.0, -20.0], 10.0, mesh.cell_centers + ) + if scalar_model: + susceptibilities[ind_sphere] = 0.2 + else: + susceptibilities[: mesh.n_cells][ind_sphere] = 0.2 + susceptibilities[mesh.n_cells : 2 * mesh.n_cells][ind_sphere] = 0.3 + susceptibilities[2 * mesh.n_cells : 3 * mesh.n_cells][ind_sphere] = 0.5 + return susceptibilities + + +@pytest.mark.parametrize( + "scalar_model", [True, False], ids=["scalar_model", "vector_model"] +) +class TestnD(BaseFixtures): + """ + Test the ``nD`` property. + """ + + @pytest.fixture + def survey_b_norm(self): + return self.build_survey(components=["bx", "by", "bz"]) + + @pytest.fixture + def mapping(self, mesh, scalar_model): + nparams = mesh.n_cells if scalar_model else 3 * mesh.n_cells + return maps.IdentityMap(nP=nparams) + + def test_nD(self, mesh, survey, mapping, susceptibilities, scalar_model): + """ + Test nD on tmi data. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine="choclo", + model_type=model_type, + ) + receivers = survey.source_field.receiver_list + n_data = sum(rx.locations.shape[0] * len(rx.components) for rx in receivers) + assert simulation.nD == n_data + dpred = simulation.dpred(susceptibilities) + assert dpred.size == simulation.nD + + def test_nD_amplitude_data( + self, mesh, survey_b_norm, mapping, susceptibilities, scalar_model + ): + """ + Test nD on amplitude data. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey_b_norm, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine="choclo", + model_type=model_type, + is_amplitude_data=True, + ) + receivers = survey_b_norm.source_field.receiver_list + n_data = ( + sum(rx.locations.shape[0] * len(rx.components) for rx in receivers) // 3 + ) + assert simulation.nD == n_data + dpred = simulation.dpred(susceptibilities) + assert dpred.size == simulation.nD + + +@pytest.mark.parametrize( + "scalar_model", [True, False], ids=["scalar_model", "vector_model"] +) +class TestGLinearOperator(BaseFixtures): + """ + Test G as a linear operator. + """ + + @pytest.fixture + def mapping(self, mesh, scalar_model): + nparams = mesh.n_cells if scalar_model else 3 * mesh.n_cells + return maps.IdentityMap(nP=nparams) + + @pytest.mark.parametrize("parallel", [True, False], ids=["parallel", "serial"]) + def test_G_dot_m( + self, survey, mesh, mapping, susceptibilities, scalar_model, parallel + ): + """Test G @ m.""" + model_type = "scalar" if scalar_model else "vector" + simulation, simulation_ram = ( + mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store, + engine="choclo", + numba_parallel=parallel, + model_type=model_type, + ) + for store in ("forward_only", "ram") + ) + assert isinstance(simulation.G, LinearOperator) + assert isinstance(simulation_ram.G, np.ndarray) + + expected = simulation_ram.G @ susceptibilities + + atol = np.max(np.abs(expected)) * 1e-8 + np.testing.assert_allclose(simulation.G @ susceptibilities, expected, atol=atol) + + @pytest.mark.parametrize("parallel", [True, False], ids=["parallel", "serial"]) + def test_G_t_dot_v(self, survey, mesh, mapping, scalar_model, parallel): + """Test G.T @ v.""" + model_type = "scalar" if scalar_model else "vector" + simulation, simulation_ram = ( + mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store, + engine="choclo", + numba_parallel=parallel, + model_type=model_type, + ) + for store in ("forward_only", "ram") + ) + assert isinstance(simulation.G, LinearOperator) + assert isinstance(simulation_ram.G, np.ndarray) + + vector = np.random.default_rng(seed=42).uniform(size=survey.nD) + expected = simulation_ram.G.T @ vector + + atol = np.max(np.abs(expected)) * 1e-7 + np.testing.assert_allclose(simulation.G.T @ vector, expected, atol=atol) + + def test_not_implemented(self, survey, mesh, mapping, scalar_model): + """ + Test NotImplementedError when forward_only and geoana as engine. + """ + engine, store = "geoana", "forward_only" + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store, + engine=engine, + model_type=model_type, + ) + msg = re.escape( + "Accessing matrix G with " + 'store_sensitivities="forward_only" and engine="geoana" ' + "hasn't been implemented yet." + ) + with pytest.raises(NotImplementedError, match=msg): + simulation.G + + +@pytest.mark.parametrize( + "scalar_model", [True, False], ids=["scalar_model", "vector_model"] +) +class TestJacobian(BaseFixtures): + """ + Test methods related to Jacobian matrix in magnetic simulation. + """ + + atol_ratio = 1e-7 + + @pytest.fixture(params=["identity_map", "exp_map"]) + def mapping(self, mesh, scalar_model: bool, request): + nparams = mesh.n_cells if scalar_model else 3 * mesh.n_cells + mapping = ( + maps.IdentityMap(nP=nparams) + if request.param == "identity_map" + else maps.ExpMap(nP=nparams) + ) + return mapping + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + def test_getJ_as_array( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine + ): + """ + Test the getJ method when J is an array in memory. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine=engine, + model_type=model_type, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + jac = simulation.getJ(model) + assert isinstance(jac, np.ndarray) + # With an identity mapping, the jacobian should be the same as G. + # With an exp mapping, the jacobian should be G @ the mapping derivative. + expected_jac = ( + simulation.G if is_identity_map else simulation.G @ mapping.deriv(model) + ) + np.testing.assert_allclose(jac, expected_jac) + + @pytest.mark.parametrize( + "engine", + [ + "choclo", + pytest.param( + "geoana", + marks=pytest.mark.xfail( + reason="not implemented", raises=NotImplementedError + ), + ), + ], + ) + @pytest.mark.parametrize("transpose", [False, True], ids=["J @ m", "J.T @ v"]) + def test_getJ_as_linear_operator( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine, transpose + ): + """ + Test the getJ method when J is a linear operator. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="forward_only", + engine=engine, + model_type=model_type, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + jac = simulation.getJ(model) + assert isinstance(jac, LinearOperator) + if transpose: + vector = np.random.default_rng(seed=42).uniform(size=survey.nD) + result = jac.T @ vector + expected_result = mapping.deriv(model).T @ (simulation.G.T @ vector) + else: + result = jac @ model + expected_result = simulation.G @ (mapping.deriv(model).diagonal() * model) + np.testing.assert_allclose(result, expected_result) + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + def test_getJ_not_implemented( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine + ): + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine=engine, + model_type=model_type, + is_amplitude_data=True, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + with pytest.raises(NotImplementedError): + simulation.getJ(model) + + @pytest.mark.parametrize( + ("engine", "store_sensitivities"), + [ + ("choclo", "ram"), + ("choclo", "forward_only"), + ("geoana", "ram"), + pytest.param( + "geoana", + "forward_only", + marks=pytest.mark.xfail( + reason="not implemented", raises=NotImplementedError + ), + ), + ], + ) + def test_Jvec( + self, + survey, + mesh, + mapping, + susceptibilities, + scalar_model, + engine, + store_sensitivities, + ): + """ + Test the Jvec method. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store_sensitivities, + engine=engine, + model_type=model_type, + sensitivity_dtype=np.float64, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + vector = np.random.default_rng(seed=42).uniform(size=susceptibilities.size) + result = simulation.Jvec(model, vector) + + expected_jac = ( + simulation.G + if is_identity_map + else simulation.G @ aslinearoperator(mapping.deriv(model)) + ) + expected = expected_jac @ vector + + atol = np.max(np.abs(expected)) * self.atol_ratio + np.testing.assert_allclose(result, expected, atol=atol) + + @pytest.mark.parametrize( + ("engine", "store_sensitivities"), + [ + ("choclo", "ram"), + ("choclo", "forward_only"), + ("geoana", "ram"), + pytest.param( + "geoana", + "forward_only", + marks=pytest.mark.xfail( + reason="not implemented", raises=NotImplementedError + ), + ), + ], + ) + def test_Jtvec( + self, + survey, + mesh, + mapping, + susceptibilities, + scalar_model, + engine, + store_sensitivities, + ): + """ + Test the Jtvec method. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store_sensitivities, + engine=engine, + model_type=model_type, + sensitivity_dtype=np.float64, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + vector = np.random.default_rng(seed=42).uniform(size=survey.nD) + result = simulation.Jtvec(model, vector) + + expected_jac = ( + simulation.G + if is_identity_map + else simulation.G @ aslinearoperator(mapping.deriv(model)) + ) + expected = expected_jac.T @ vector + + atol = np.max(np.abs(result)) * self.atol_ratio + np.testing.assert_allclose(result, expected, atol=atol) + + @pytest.mark.parametrize( + "engine", + [ + "choclo", + pytest.param( + "geoana", + marks=pytest.mark.xfail( + reason="not implemented", raises=NotImplementedError + ), + ), + ], + ) + @pytest.mark.parametrize("method", ["Jvec", "Jtvec"]) + def test_array_vs_linear_operator( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine, method + ): + """ + Test methods when using "ram" and "forward_only". + + They should give the same results. + """ + model_type = "scalar" if scalar_model else "vector" + simulation_lo, simulation_ram = ( + mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store, + engine=engine, + model_type=model_type, + sensitivity_dtype=np.float64, + ) + for store in ("forward_only", "ram") + ) + match method: + case "Jvec": + vector_size = susceptibilities.size + case "Jtvec": + vector_size = survey.nD + case _: # pragma: no cover + raise ValueError(f"Invalid method '{method}'") # pragma: no cover + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + vector = np.random.default_rng(seed=42).uniform(size=vector_size) + result_lo = getattr(simulation_lo, method)(model, vector) + result_ram = getattr(simulation_ram, method)(model, vector) + atol = np.max(np.abs(result_ram)) * self.atol_ratio + np.testing.assert_allclose(result_lo, result_ram, atol=atol) + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + @pytest.mark.parametrize("weights", [True, False]) + def test_getJtJdiag( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine, weights + ): + """ + Test the ``getJtJdiag`` method with G as an array in memory. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine=engine, + model_type=model_type, + sensitivity_dtype=np.float64, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + kwargs = {} + if weights: + w_matrix = diags(np.random.default_rng(seed=42).uniform(size=survey.nD)) + kwargs = {"W": w_matrix} + jtj_diag = simulation.getJtJdiag(model, **kwargs) + + expected_jac = ( + simulation.G if is_identity_map else simulation.G @ mapping.deriv(model) + ) + if weights: + expected = np.diag(expected_jac.T @ w_matrix.T @ w_matrix @ expected_jac) + else: + expected = np.diag(expected_jac.T @ expected_jac) + + atol = np.max(np.abs(jtj_diag)) * self.atol_ratio + np.testing.assert_allclose(jtj_diag, expected, atol=atol) + + @pytest.mark.parametrize( + ("engine", "is_amplitude_data"), + [("geoana", True), ("geoana", False), ("choclo", True)], + ids=("geoana-amplitude_data", "geoana-regular_data", "choclo-amplitude_data"), + ) + def test_getJtJdiag_not_implemented( + self, + survey, + mesh, + mapping, + susceptibilities, + scalar_model, + engine, + is_amplitude_data, + ): + """ + Test NotImplementedErrors on ``getJtJdiag``. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="forward_only", + engine=engine, + is_amplitude_data=is_amplitude_data, + model_type=model_type, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + with pytest.raises(NotImplementedError): + simulation.getJtJdiag(model) + + @pytest.mark.parametrize("parallel", [True, False], ids=("parallel", "serial")) + def test_getJtJdiag_forward_only( + self, survey, mesh, mapping, susceptibilities, scalar_model, parallel + ): + """ + Test the ``getJtJdiag`` method with ``"forward_only"`` and choclo. + """ + model_type = "scalar" if scalar_model else "vector" + simulation, simulation_ram = ( + mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store, + engine="choclo", + numba_parallel=parallel, + model_type=model_type, + ) + for store in ("forward_only", "ram") + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + expected = simulation_ram.getJtJdiag(model) + result = simulation.getJtJdiag(model) + + atol = np.max(np.abs(expected)) * 1e-8 + np.testing.assert_allclose(result, expected, atol=atol) + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + def test_getJtJdiag_caching( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine + ): + """ + Test the caching behaviour of the ``getJtJdiag`` method. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine=engine, + model_type=model_type, + ) + + # Get diagonal of J.T @ J without any weight + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + jtj_diagonal_1 = simulation.getJtJdiag(model) + assert hasattr(simulation, "_gtg_diagonal") + assert hasattr(simulation, "_weights_sha256") + gtg_diagonal_1 = simulation._gtg_diagonal + weights_sha256_1 = simulation._weights_sha256 + + # Compute it again and make sure we get the same result + np.testing.assert_allclose(jtj_diagonal_1, simulation.getJtJdiag(model)) + + # Get a new diagonal with weights + weights_matrix = diags( + np.random.default_rng(seed=42).uniform(size=simulation.survey.nD) + ) + jtj_diagonal_2 = simulation.getJtJdiag(model, W=weights_matrix) + assert hasattr(simulation, "_gtg_diagonal") + assert hasattr(simulation, "_weights_sha256") + gtg_diagonal_2 = simulation._gtg_diagonal + weights_sha256_2 = simulation._weights_sha256 + + # The two results should be different + assert not np.array_equal(jtj_diagonal_1, jtj_diagonal_2) + assert not np.array_equal(gtg_diagonal_1, gtg_diagonal_2) + assert weights_sha256_1.digest() != weights_sha256_2.digest() + + +@pytest.mark.parametrize( + "scalar_model", [True, False], ids=["scalar_model", "vector_model"] +) +class TestJacobianAmplitudeData(BaseFixtures): + """ + Test Jacobian related methods with ``is_amplitude_data``. + """ + + atol_ratio = 1e-7 + + @pytest.fixture + def survey(self): + """ + Sample survey with fixed components bx, by, bz. + + These components are assumed when working with ``is_amplitude_data=True``. + """ + # Observation points + x = np.linspace(-20.0, 20.0, 4) + x, y = np.meshgrid(x, x) + z = 5.0 * np.ones_like(x) + coordinates = np.vstack((x.ravel(), y.ravel(), z.ravel())).T + receivers = mag.receivers.Point(coordinates, components=["bx", "by", "bz"]) + source_field = mag.UniformBackgroundField( + receiver_list=[receivers], + amplitude=55_000, + inclination=12, + declination=-35, + ) + survey = mag.survey.Survey(source_field) + return survey + + @pytest.fixture(params=["identity_map", "exp_map"]) + def mapping(self, mesh, scalar_model: bool, request): + nparams = mesh.n_cells if scalar_model else 3 * mesh.n_cells + mapping = ( + maps.IdentityMap(nP=nparams) + if request.param == "identity_map" + else maps.ExpMap(nP=nparams) + ) + return mapping + + @pytest.mark.parametrize("engine", ["choclo", "geoana"]) + def test_getJ_not_implemented( + self, survey, mesh, mapping, susceptibilities, scalar_model, engine + ): + """ + Test the getJ method when J is an array in memory. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities="ram", + engine=engine, + model_type=model_type, + is_amplitude_data=True, + sensitivity_dtype=np.float64, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + with pytest.raises(NotImplementedError): + simulation.getJ(model) + + @pytest.mark.parametrize( + ("engine", "store_sensitivities"), + [ + ("choclo", "ram"), + ("choclo", "forward_only"), + ("geoana", "ram"), + pytest.param( + "geoana", + "forward_only", + marks=pytest.mark.xfail( + reason="not implemented", raises=NotImplementedError + ), + ), + ], + ) + def test_Jvec( + self, + survey, + mesh, + mapping, + susceptibilities, + scalar_model, + engine, + store_sensitivities, + ): + r""" + Test the Jvec method. + + Test the Jvec method through an alternative implementation. + Define a :math:`f(\chi)` forward model function that returns the norm of the + magnetic field given the susceptibility values of :math:`\chi`: + + .. math:: + + f(\chi) + = \lvert \mathbf{B} \rvert + = \sqrt{B_x^2(\chi) + B_y^2(\chi) + B_z^2(\chi)} + + The gradient of :math:`f(\chi)` (jacobian matrix :math:`\mathbf{J}`) can be + written as: + + .. math:: + + \mathbf{J} = \mathbf{J}_x + \mathbf{J}_y + \mathbf{J}_z + + where: + + .. math:: + + \mathbf{J}_x = + \frac{1}{\lvert \mathbf{B} \rvert} + B_x(\chi) + \frac{\partial B_x}{\partial \chi} + \frac{\partial \chi}{\partial \mathbf{m}} + + and :math:`\mathbf{m}` is the model. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store_sensitivities, + engine=engine, + model_type=model_type, + is_amplitude_data=True, + sensitivity_dtype=np.float64, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + vector = np.random.default_rng(seed=42).uniform(size=susceptibilities.size) + result = simulation.Jvec(model, vector) + + magnetic_field = simulation.G @ susceptibilities + bx, by, bz = (magnetic_field[i::3] for i in (0, 1, 2)) + inv_amplitude = 1 / np.sqrt(bx**2 + by**2 + bz**2) + + g_dot_chideriv_v = ( + simulation.G @ aslinearoperator(mapping.deriv(model)) @ vector + ) + jac_x_dot_v = diags(inv_amplitude) @ diags(bx) @ g_dot_chideriv_v[0::3] + jac_y_dot_v = diags(inv_amplitude) @ diags(by) @ g_dot_chideriv_v[1::3] + jac_z_dot_v = diags(inv_amplitude) @ diags(bz) @ g_dot_chideriv_v[2::3] + expected = jac_x_dot_v + jac_y_dot_v + jac_z_dot_v + + atol = np.max(np.abs(expected)) * self.atol_ratio + np.testing.assert_allclose(result, expected, atol=atol) + + @pytest.mark.parametrize( + ("engine", "store_sensitivities"), + [ + ("choclo", "ram"), + ("choclo", "forward_only"), + ("geoana", "ram"), + pytest.param( + "geoana", + "forward_only", + marks=pytest.mark.xfail( + reason="not implemented", raises=NotImplementedError + ), + ), + ], + ) + def test_Jtvec( + self, + survey, + mesh, + mapping, + susceptibilities, + scalar_model, + engine, + store_sensitivities, + ): + """ + Test the Jtvec method. + + Test it similarly to Jvec, but computing the transpose of the matrices. + """ + model_type = "scalar" if scalar_model else "vector" + simulation = mag.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chiMap=mapping, + store_sensitivities=store_sensitivities, + engine=engine, + model_type=model_type, + is_amplitude_data=True, + sensitivity_dtype=np.float64, + ) + is_identity_map = type(mapping) is maps.IdentityMap + model = susceptibilities if is_identity_map else np.log(susceptibilities) + + # Need to set size as survey.nD / 3 because there's a bug in simulation.nD. + vector = np.random.default_rng(seed=42).uniform(size=survey.nD // 3) + result = simulation.Jtvec(model, vector) + + magnetic_field = simulation.G @ susceptibilities + bx, by, bz = (magnetic_field[i::3] for i in (0, 1, 2)) + inv_amplitude = 1 / np.sqrt(bx**2 + by**2 + bz**2) + v = np.array( + ( + bx * inv_amplitude * vector, + by * inv_amplitude * vector, + bz * inv_amplitude * vector, + ) + ).T.ravel() # interleave the values for bx, by, bz + expected = mapping.deriv(model).T @ (simulation.G.T @ v) + + atol = np.max(np.abs(result)) * self.atol_ratio + np.testing.assert_allclose(result, expected, atol=atol) diff --git a/tests/pf/test_forward_PFproblem.py b/tests/pf/test_forward_PFproblem.py deleted file mode 100644 index 2c54300d7a..0000000000 --- a/tests/pf/test_forward_PFproblem.py +++ /dev/null @@ -1,86 +0,0 @@ -import unittest -import discretize -from simpeg import utils, maps -from simpeg.utils.model_builder import get_indices_sphere -from simpeg.potential_fields import magnetics as mag -import numpy as np - - -class MagFwdProblemTests(unittest.TestCase): - def setUp(self): - Inc = 45.0 - Dec = 45.0 - Btot = 51000 - - self.b0 = mag.analytics.IDTtoxyz(-Inc, Dec, Btot) - - cs = 25.0 - hxind = [(cs, 5, -1.3), (cs / 2.0, 41), (cs, 5, 1.3)] - hyind = [(cs, 5, -1.3), (cs / 2.0, 41), (cs, 5, 1.3)] - hzind = [(cs, 5, -1.3), (cs / 2.0, 40), (cs, 5, 1.3)] - M = discretize.TensorMesh([hxind, hyind, hzind], "CCC") - - chibkg = 0.0 - self.chiblk = 0.01 - chi = np.ones(M.nC) * chibkg - - self.rad = 100 - self.sphere_center = [0.0, 0.0, 0.0] - sph_ind = get_indices_sphere(self.sphere_center, self.rad, M.gridCC) - chi[sph_ind] = self.chiblk - - xr = np.linspace(-300, 300, 41) - yr = np.linspace(-300, 300, 41) - X, Y = np.meshgrid(xr, yr) - Z = np.ones((xr.size, yr.size)) * 150 - components = ["bx", "by", "bz"] - self.xr = xr - self.yr = yr - self.rxLoc = np.c_[utils.mkvc(X), utils.mkvc(Y), utils.mkvc(Z)] - receivers = mag.Point(self.rxLoc, components=components) - srcField = mag.UniformBackgroundField( - receiver_list=[receivers], - amplitude=Btot, - inclination=Inc, - declination=Dec, - ) - - self.survey = mag.Survey(srcField) - - self.sim = mag.simulation.Simulation3DDifferential( - M, - survey=self.survey, - muMap=maps.ChiMap(M), - ) - self.M = M - self.chi = chi - - def test_ana_forward(self): - u = self.sim.fields(self.chi) - dpred = self.sim.projectFields(u) - - bxa, bya, bza = mag.analytics.MagSphereAnaFunA( - self.rxLoc[:, 0], - self.rxLoc[:, 1], - self.rxLoc[:, 2], - self.rad, - *self.sphere_center, - self.chiblk, - self.b0, - "secondary", - ) - - n_obs, n_comp = self.rxLoc.shape[0], len(self.survey.components) - dx, dy, dz = dpred.reshape(n_comp, n_obs) - - err_x = np.linalg.norm(dx - bxa) / np.linalg.norm(bxa) - err_y = np.linalg.norm(dy - bya) / np.linalg.norm(bya) - err_z = np.linalg.norm(dz - bza) / np.linalg.norm(bza) - - self.assertLess(err_x, 0.08) - self.assertLess(err_y, 0.08) - self.assertLess(err_z, 0.08) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/pf/test_forward_mag_differential.py b/tests/pf/test_forward_mag_differential.py new file mode 100644 index 0000000000..f6571073fe --- /dev/null +++ b/tests/pf/test_forward_mag_differential.py @@ -0,0 +1,414 @@ +import re +import pytest +import discretize +import simpeg.potential_fields as PF +from simpeg import utils, maps +from discretize.utils import mkvc, refine_tree_xyz +import numpy as np +from tests.utils.ellipsoid import ProlateEllipsoid + + +@pytest.fixture +def mesh(): + + dhx, dhy, dhz = 50.0, 50.0, 50.0 # minimum cell width (base mesh cell width) + nbcx = 512 # number of base mesh cells in x + nbcy = 512 + nbcz = 512 + + # Define base mesh (domain and finest discretization) + hx = dhx * np.ones(nbcx) + hy = dhy * np.ones(nbcy) + hz = dhz * np.ones(nbcz) + _mesh = discretize.TreeMesh([hx, hy, hz], x0="CCC") + + xp, yp, zp = np.meshgrid([-1400.0, 1400.0], [-1400.0, 1400.0], [-1000.0, 200.0]) + xy = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)] + _mesh = refine_tree_xyz( + _mesh, + xy, + method="box", + finalize=False, + octree_levels=[1, 1, 1, 1], + ) + _mesh.finalize() + + return _mesh + + +def get_survey(components=("bx", "by", "bz")): + ccx = np.linspace(-1400, 1400, num=57) + ccy = np.linspace(-1400, 1400, num=57) + ccx, ccy = np.meshgrid(ccx, ccy) + ccz = 50.0 * np.ones_like(ccx) + rxLoc = PF.magnetics.receivers.Point( + np.c_[utils.mkvc(ccy.T), utils.mkvc(ccx.T), utils.mkvc(ccz.T)], + components=components, + ) + inducing_field = [55000.0, 60.0, 90.0] + srcField = PF.magnetics.sources.UniformBackgroundField( + [rxLoc], inducing_field[0], inducing_field[1], inducing_field[2] + ) + _survey = PF.magnetics.survey.Survey(srcField) + + return _survey + + +@pytest.mark.parametrize("model_type", ("mu_rem", "mu", "rem")) +def test_forward(model_type, mesh): + """ + Test against the analytic solution for an ellipse with + uniform intrinsic remanence and susceptibility in a + uniform ambient geomagnetic field + """ + tol = 0.1 + + survey = get_survey() + + amplitude = survey.source_field.amplitude + inclination = survey.source_field.inclination + declination = survey.source_field.declination + inducing_field = [amplitude, inclination, declination] + + if model_type == "mu_rem": + susceptibility = 5 + MrX = 150000 + MrY = 150000 + MrZ = 150000 + if model_type == "mu": + susceptibility = 5 + MrX = 0 + MrY = 0 + MrZ = 0 + if model_type == "rem": + susceptibility = 0 + MrX = 150000 + MrY = 150000 + MrZ = 150000 + + center = np.array([00, 0, -400.0]) + axes = [600.0, 200.0] + strike_dip_rake = [0, 0, 90] + + ellipsoid = ProlateEllipsoid( + center, + axes, + strike_dip_rake, + susceptibility=susceptibility, + Mr=(MrX, MrY, MrZ), + inducing_field=inducing_field, + ) + ind_ellipsoid = ellipsoid.get_indices(mesh.cell_centers) + + sus_model = np.zeros(mesh.n_cells) + sus_model[ind_ellipsoid] = susceptibility + mu_model = maps.ChiMap() * sus_model + + Rx = np.zeros(mesh.n_cells) + Ry = np.zeros(mesh.n_cells) + Rz = np.zeros(mesh.n_cells) + + Rx[ind_ellipsoid] = MrX + Ry[ind_ellipsoid] = MrY + Rz[ind_ellipsoid] = MrZ + + u0_Mr_model = mkvc(np.array([Rx, Ry, Rz]).T) + + if model_type == "mu": + u0_Mr_model = None + if model_type == "rem": + mu_model = None + + simulation = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey, mesh=mesh, mu=mu_model, rem=u0_Mr_model, solver_dtype=np.float32 + ) + + dpred_numeric = simulation.dpred() + dpred_analytic = mkvc(ellipsoid.anomalous_bfield(survey.receiver_locations)) + + assert np.allclose( + dpred_numeric, + dpred_analytic, + rtol=0.1, + atol=0.05 * np.max(np.abs(dpred_analytic)), + ) + + err = np.linalg.norm(dpred_numeric - dpred_analytic) / np.linalg.norm( + dpred_analytic + ) + + print( + "\n||dpred_analytic-dpred_numeric||/||dpred_analytic|| = " + + "{:.{}f}".format(err, 2) + + ", tol = " + + str(tol) + ) + + assert err < tol + + u0_M_analytic = ellipsoid.Magnetization() + u0_M_numeric = mesh.average_face_to_cell_vector * simulation.magnetic_polarization() + u0_M_numeric = u0_M_numeric.reshape((mesh.n_cells, 3), order="F") + u0_M_numeric = np.mean(u0_M_numeric[ind_ellipsoid, :], axis=0) + + assert np.allclose( + u0_M_numeric, + u0_M_analytic, + rtol=0.1, + atol=0.01 * np.max(np.abs(u0_M_analytic)), + ) + + +def test_exact_tmi(mesh): + """ + Test against the analytic solution for an ellipse with + uniform intrinsic remanence and susceptibility in a + uniform ambient geomagnetic field + """ + tol = 1e-8 + + survey = get_survey(components=["bx", "by", "bz", "tmi"]) + + amplitude = survey.source_field.amplitude + inclination = survey.source_field.inclination + declination = survey.source_field.declination + inducing_field = [amplitude, inclination, declination] + + susceptibility = 5 + MrX = 150000 + MrY = 150000 + MrZ = 150000 + + center = np.array([00, 0, -400.0]) + axes = [600.0, 200.0] + strike_dip_rake = [0, 0, 90] + + ellipsoid = ProlateEllipsoid( + center, + axes, + strike_dip_rake, + susceptibility=susceptibility, + Mr=(MrX, MrY, MrZ), + inducing_field=inducing_field, + ) + ind_ellipsoid = ellipsoid.get_indices(mesh.cell_centers) + + sus_model = np.zeros(mesh.n_cells) + sus_model[ind_ellipsoid] = susceptibility + mu_model = maps.ChiMap() * sus_model + + Rx = np.zeros(mesh.n_cells) + Ry = np.zeros(mesh.n_cells) + Rz = np.zeros(mesh.n_cells) + + Rx[ind_ellipsoid] = MrX + Ry[ind_ellipsoid] = MrY + Rz[ind_ellipsoid] = MrZ + + u0_Mr_model = mkvc(np.array([Rx, Ry, Rz]).T) + + simulation = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey, + mesh=mesh, + mu=mu_model, + rem=u0_Mr_model, + ) + + dpred_numeric = simulation.dpred() + + dpred_fields = np.reshape(dpred_numeric[: survey.nRx * 4], (4, survey.nRx)).T + + B0 = survey.source_field.b0 + + TMI_exact_analytic = np.linalg.norm(dpred_fields[:, :3] + B0, axis=1) - amplitude + dpred_TMI_exact = dpred_fields[:, 3] + + TMI_exact_err = np.max(np.abs(dpred_TMI_exact - TMI_exact_analytic)) + + assert TMI_exact_err < tol + print( + "max(TMI_exact_err) = " + + "{:.{}e}".format(TMI_exact_err, 2) + + ", tol = " + + str(tol) + ) + + +def test_differential_magnetization_against_integral(mesh): + + survey = get_survey() + + amplitude = survey.source_field.amplitude + inclination = survey.source_field.inclination + declination = survey.source_field.declination + inducing_field = [amplitude, inclination, declination] + + MrX = 150000 + MrY = 150000 + MrZ = 150000 + + center = np.array([00, 0, -400.0]) + axes = [600.0, 200.0] + strike_dip_rake = [0, 0, 90] + + ellipsoid = ProlateEllipsoid( + center, + axes, + strike_dip_rake, + Mr=np.array([MrX, MrY, MrZ]), + inducing_field=inducing_field, + ) + ind_ellipsoid = ellipsoid.get_indices(mesh.cell_centers) + + Rx = np.zeros(mesh.n_cells) + Ry = np.zeros(mesh.n_cells) + Rz = np.zeros(mesh.n_cells) + + Rx[ind_ellipsoid] = MrX + Ry[ind_ellipsoid] = MrY + Rz[ind_ellipsoid] = MrZ + + u0_Mr_model = mkvc(np.array([Rx, Ry, Rz]).T) + eff_sus_model = (u0_Mr_model / amplitude)[ + np.hstack((ind_ellipsoid, ind_ellipsoid, ind_ellipsoid)) + ] + + simulation_differential = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey, + mesh=mesh, + rem=u0_Mr_model, + ) + + simulation_integral = PF.magnetics.simulation.Simulation3DIntegral( + survey=survey, + mesh=mesh, + chi=eff_sus_model, + model_type="vector", + store_sensitivities="forward_only", + active_cells=ind_ellipsoid, + ) + + dpred_numeric_differential = simulation_differential.dpred() + dni = simulation_integral.dpred() + dpred_numeric_integral = np.hstack((dni[0::3], dni[1::3], dni[2::3])) + dpred_analytic = mkvc(ellipsoid.anomalous_bfield(survey.receiver_locations)) + + diff_numeric = np.linalg.norm( + dpred_numeric_differential - dpred_numeric_integral + ) / np.linalg.norm(dpred_numeric_integral) + diff_differential = np.linalg.norm( + dpred_numeric_differential - dpred_analytic + ) / np.linalg.norm(dpred_analytic) + diff_integral = np.linalg.norm( + dpred_numeric_integral - dpred_analytic + ) / np.linalg.norm(dpred_analytic) + + # Check both discretized solutions are closer to each other than to the analytic + assert diff_numeric < diff_differential + assert diff_numeric < diff_integral + + print( + "\n||dpred_integral-dpred_pde||/||dpred_integral|| = " + + "{:.{}f}".format(diff_numeric, 2) + ) + print( + "||dpred_integral-dpred_analytic||/||dpred_analytic|| = " + + "{:.{}f}".format(diff_integral, 2) + ) + print( + "||dpred_pde-dpred_analytic||/||dpred_analytic|| = " + + "{:.{}f}".format(diff_differential, 2) + ) + + +def test_invalid_solver_dtype(mesh): + """ + Test error upon invalid `solver_dtype`. + """ + survey = get_survey() + invalid_dtype = np.int64 + msg = re.escape( + f"Invalid `solver_dtype` '{invalid_dtype}'. " + "It must be np.float32 or np.float64." + ) + with pytest.raises(ValueError, match=msg): + PF.magnetics.simulation.Simulation3DDifferential( + survey=survey, mesh=mesh, solver_dtype=invalid_dtype + ) + + +@pytest.mark.parametrize( + "components", + ["bx", "by", "bz", "tmi", ["bx", "by", "bz"]], + ids=["bx", "by", "bz", "tmi", "b_field"], +) +class TestGetJ: + + @pytest.fixture + def mesh_small(self): + """ + Define a small mesh that would generate a J matrix small enough to fit in memory + """ + h = [(10.0, 8)] + mesh = discretize.TreeMesh([h, h, h], x0="CCC", diagonal_balance=True) + mesh.refine_points((0, 0, 0), level=-1) + mesh.finalize() + return mesh + + @pytest.fixture + def survey_small(self, components): + """ + Define a small survey. + """ + x = np.linspace(-20, 20, 11) + x, y = tuple(c.ravel() for c in np.meshgrid(x, x)) + z = np.ones_like(x) + locations = np.vstack((x, y, z)).T + receiver = PF.magnetics.receivers.Point( + locations, + components=components, + ) + inducing_field = (55_000, -71, 12) + source = PF.magnetics.sources.UniformBackgroundField( + [receiver], *inducing_field + ) + survey = PF.magnetics.survey.Survey(source) + return survey + + def test_getJ_vs_Jvec(self, mesh_small, survey_small): + """ + Test the getJ method against Jvec. + """ + rng = np.random.default_rng(seed=41) + model = rng.uniform(0, 1e-1, size=mesh_small.n_cells) + mapping = maps.IdentityMap(nP=model.size) + simulation = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey_small, + mesh=mesh_small, + muMap=mapping, + storeJ=False, # explicitly not storing J + ) + + vector = rng.uniform(0, 1e-1, size=mesh_small.n_cells) + result = simulation.getJ(model) @ vector + expected = simulation.Jvec(model, vector) + np.testing.assert_allclose(result, expected) + + def test_getJ_vs_Jtvec(self, mesh_small, survey_small): + """ + Test the getJ method against Jtvec. + """ + rng = np.random.default_rng(seed=41) + model = rng.uniform(0, 1e-1, size=mesh_small.n_cells) + mapping = maps.IdentityMap(nP=model.size) + simulation = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey_small, + mesh=mesh_small, + muMap=mapping, + storeJ=False, # explicitly not storing J + ) + + vector = rng.uniform(0, 1e-1, size=survey_small.nD) + result = simulation.getJ(model).T @ vector + expected = simulation.Jtvec(model, vector) + np.testing.assert_allclose(result, expected) diff --git a/tests/pf/test_mag_differential_functionality.py b/tests/pf/test_mag_differential_functionality.py new file mode 100644 index 0000000000..f682b39fe6 --- /dev/null +++ b/tests/pf/test_mag_differential_functionality.py @@ -0,0 +1,177 @@ +import pytest +import discretize +import simpeg.potential_fields as PF +from simpeg import utils, maps +from discretize.utils import mkvc, refine_tree_xyz +import numpy as np +from tests.utils.ellipsoid import ProlateEllipsoid + + +@pytest.fixture +def mesh(): + + dhx, dhy, dhz = 75.0, 75.0, 75.0 # minimum cell width (base mesh cell width) + nbcx = 512 # number of base mesh cells in x + nbcy = 512 + nbcz = 512 + + # Define base mesh (domain and finest discretization) + hx = dhx * np.ones(nbcx) + hy = dhy * np.ones(nbcy) + hz = dhz * np.ones(nbcz) + _mesh = discretize.TreeMesh([hx, hy, hz], x0="CCC") + + xp, yp, zp = np.meshgrid([-1400.0, 1400.0], [-1400.0, 1400.0], [-1000.0, 200.0]) + xy = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)] + _mesh = refine_tree_xyz( + _mesh, + xy, + method="box", + finalize=False, + octree_levels=[1, 1, 1, 1], + ) + _mesh.finalize() + + return _mesh + + +def test_recievers(mesh): + """ + Test that multiple point recievers with different components work. + """ + + ccx = np.linspace(-1400, 1400, num=57) + ccy = np.linspace(-1400, 1400, num=57) + ccx, ccy = np.meshgrid(ccx, ccy) + ccz = 50.0 * np.ones_like(ccx) + components_1 = ["bx", "by", "bz", "tmi"] + components_2 = ["by", "tmi"] + rxLoc_1 = PF.magnetics.receivers.Point( + np.c_[utils.mkvc(ccy.T), utils.mkvc(ccx.T), utils.mkvc(ccz.T)], + components=components_1, + ) + rxLoc_2 = PF.magnetics.receivers.Point( + np.c_[utils.mkvc(ccy.T), utils.mkvc(ccx.T), utils.mkvc(ccz.T + 20)], + components=components_2, + ) + inducing_field = [55000.0, 60.0, 90.0] + + srcField_1 = PF.magnetics.sources.UniformBackgroundField( + [rxLoc_1], inducing_field[0], inducing_field[1], inducing_field[2] + ) + survey_1 = PF.magnetics.survey.Survey(srcField_1) + + srcField_2 = PF.magnetics.sources.UniformBackgroundField( + [rxLoc_2], inducing_field[0], inducing_field[1], inducing_field[2] + ) + survey_2 = PF.magnetics.survey.Survey(srcField_2) + + srcField_all = PF.magnetics.sources.UniformBackgroundField( + [rxLoc_1, rxLoc_2], inducing_field[0], inducing_field[1], inducing_field[2] + ) + survey_all = PF.magnetics.survey.Survey(srcField_all) + + amplitude = survey_1.source_field.amplitude + inclination = survey_1.source_field.inclination + declination = survey_1.source_field.declination + inducing_field = [amplitude, inclination, declination] + + susceptibility = 5 + MrX = 150000 + MrY = 150000 + MrZ = 150000 + + center = np.array([00, 0, -400.0]) + axes = [600.0, 200.0] + strike_dip_rake = [0, 0, 90] + + ellipsoid = ProlateEllipsoid( + center, + axes, + strike_dip_rake, + susceptibility=susceptibility, + Mr=(MrX, MrY, MrZ), + inducing_field=inducing_field, + ) + + ind_ellipsoid = ellipsoid.get_indices(mesh.cell_centers) + + sus_model = np.zeros(mesh.n_cells) + sus_model[ind_ellipsoid] = susceptibility + + Rx = np.zeros(mesh.n_cells) + Ry = np.zeros(mesh.n_cells) + Rz = np.zeros(mesh.n_cells) + + Rx[ind_ellipsoid] = MrX / 55000 + Ry[ind_ellipsoid] = MrY / 55000 + Rz[ind_ellipsoid] = MrZ / 55000 + + EsusRem = mkvc(np.array([Rx, Ry, Rz]).T) + + chimap = maps.ChiMap(mesh) + eff_sus_map = maps.EffectiveSusceptibilityMap( + nP=mesh.n_cells * 3, ambient_field_magnitude=survey_1.source_field.amplitude + ) + + wire_map = maps.Wires(("mu", mesh.n_cells), ("rem", mesh.n_cells * 3)) + mu_map = chimap * wire_map.mu + rem_map = eff_sus_map * wire_map.rem + m = np.r_[sus_model, EsusRem] + + simulation_1 = PF.magnetics.simulation.Simulation3DDifferential( + mesh=mesh, + survey=survey_1, + muMap=mu_map, + remMap=rem_map, + ) + + simulation_2 = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey_2, + mesh=mesh, + muMap=mu_map, + remMap=rem_map, + ) + + simulation_all = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey_all, + mesh=mesh, + muMap=mu_map, + remMap=rem_map, + ) + dpred_numeric_all = simulation_all.dpred(m) + dpred_numeric_1 = simulation_1.dpred(m) + dpred_numeric_2 = simulation_2.dpred(m) + dpred_stack = np.hstack((dpred_numeric_1, dpred_numeric_2)) + + rvec = np.random.randn(mesh.n_cells * 4) * 0.001 + + jv_all = simulation_all.Jvec(m, v=rvec) + jv_1 = simulation_1.Jvec(m, v=rvec) + jv_2 = simulation_2.Jvec(m, v=rvec) + jv_stack = np.hstack((jv_1, jv_2)) + + assert np.allclose(dpred_numeric_all, dpred_stack, atol=1e-8) + assert np.allclose(jv_all, jv_stack, atol=1e-8) + + +def test_unsupported_components(mesh): + """ + Test error when survey has unsupported components. + """ + supported_components = ["tmi", "bx", "by", "bz"] + unsupported_components = ["bxx", "byy", "bzz"] + receivers = [ + PF.magnetics.Point( + np.array([[0, 0, 0], [1, 2, 3]]), + components=components, + ) + for components in (*supported_components, *unsupported_components) + ] + inducing_field = [55000.0, 60.0, 90.0] + source = PF.magnetics.sources.UniformBackgroundField(receivers, *inducing_field) + survey = PF.magnetics.survey.Survey(source) + + msg = "Found unsupported magnetic components " + with pytest.raises(NotImplementedError, match=msg): + PF.magnetics.simulation.Simulation3DDifferential(survey=survey, mesh=mesh) diff --git a/tests/pf/test_mag_differential_jvecjtvec.py b/tests/pf/test_mag_differential_jvecjtvec.py new file mode 100644 index 0000000000..91daa643d0 --- /dev/null +++ b/tests/pf/test_mag_differential_jvecjtvec.py @@ -0,0 +1,190 @@ +from discretize.tests import check_derivative, assert_isadjoint +import numpy as np +import pytest +from simpeg import maps, utils +from discretize.utils import mkvc, refine_tree_xyz +import discretize +import simpeg.potential_fields as PF + + +@pytest.fixture +def mesh(): + dhx, dhy, dhz = 400.0, 400.0, 400.0 # minimum cell width (base mesh cell width) + nbcx = 512 # number of base mesh cells in x + nbcy = 512 + nbcz = 512 + + # Define base mesh (domain and finest discretization) + hx = dhx * np.ones(nbcx) + hy = dhy * np.ones(nbcy) + hz = dhz * np.ones(nbcz) + _mesh = discretize.TreeMesh([hx, hy, hz], x0="CCC") + + xp, yp, zp = np.meshgrid([-1400.0, 1400.0], [-1400.0, 1400.0], [-1000.0, 200.0]) + xy = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)] + _mesh = refine_tree_xyz( + _mesh, + xy, + method="box", + finalize=False, + octree_levels=[1, 1, 1, 1], + ) + _mesh.finalize() + return _mesh + + +@pytest.fixture +def survey(): + ccx = np.linspace(-1400, 1400, num=57) + ccy = np.copy(ccx) + + ccx, ccy = np.meshgrid(ccx, ccy) + + ccz = 50.0 * np.ones_like(ccx) + + components = ["bx", "by", "bz", "tmi"] + rxLoc = PF.magnetics.receivers.Point( + np.c_[utils.mkvc(ccy.T), utils.mkvc(ccx.T), utils.mkvc(ccz.T)], + components=components, + ) + inducing_field = [55000.0, 60.0, 90.0] + srcField = PF.magnetics.sources.UniformBackgroundField( + [rxLoc], inducing_field[0], inducing_field[1], inducing_field[2] + ) + _survey = PF.magnetics.survey.Survey(srcField) + + return _survey + + +@pytest.mark.parametrize( + "deriv_type", ("mu", "rem", "mu_fix_rem", "rem_fix_mu", "both") +) +def test_derivative(deriv_type, mesh, survey): + np.random.seed(40) + + chimap = maps.ChiMap(mesh) + eff_sus_map = maps.EffectiveSusceptibilityMap( + ambient_field_magnitude=survey.source_field.amplitude, nP=mesh.n_cells * 3 + ) + + sus_model = np.abs(np.random.randn(mesh.n_cells)) + mu_model = chimap * sus_model + + Rx = np.random.randn(mesh.n_cells) + Ry = np.random.randn(mesh.n_cells) + Rz = np.random.randn(mesh.n_cells) + EsusRem = mkvc(np.array([Rx, Ry, Rz]).T) + + u0_Mr_model = eff_sus_map * EsusRem + + if deriv_type == "mu": + mu_map = chimap + mu = None + rem_map = None + rem = None + m = sus_model + if deriv_type == "rem": + mu_map = None + mu = None + rem_map = eff_sus_map + rem = None + m = EsusRem + if deriv_type == "mu_fix_rem": + mu_map = chimap + mu = None + rem_map = None + rem = u0_Mr_model + m = sus_model + if deriv_type == "rem_fix_mu": + mu_map = None + mu = mu_model + rem_map = eff_sus_map + rem = None + m = EsusRem + if deriv_type == "both": + wire_map = maps.Wires(("mu", mesh.n_cells), ("rem", mesh.n_cells * 3)) + mu_map = chimap * wire_map.mu + rem_map = eff_sus_map * wire_map.rem + m = np.r_[sus_model, EsusRem] + mu = None + rem = None + + simulation = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey, mesh=mesh, mu=mu, rem=rem, muMap=mu_map, remMap=rem_map + ) + + def sim_func(m): + d = simulation.dpred(m) + + def J(v): + return simulation.Jvec(m, v) + + return d, J + + assert check_derivative(sim_func, m, plotIt=False, num=6, eps=1e-8, random_seed=40) + + +@pytest.mark.parametrize( + "deriv_type", ("mu", "rem", "mu_fix_rem", "rem_fix_mu", "both") +) +def test_adjoint(deriv_type, mesh, survey): + np.random.seed(40) + + chimap = maps.ChiMap(mesh) + eff_sus_map = maps.EffectiveSusceptibilityMap( + ambient_field_magnitude=survey.source_field.amplitude, nP=mesh.n_cells * 3 + ) + + sus_model = np.abs(np.random.randn(mesh.n_cells)) + mu_model = chimap * sus_model + + Rx = np.random.randn(mesh.n_cells) + Ry = np.random.randn(mesh.n_cells) + Rz = np.random.randn(mesh.n_cells) + EsusRem = mkvc(np.array([Rx, Ry, Rz]).T) + + u0_Mr_model = eff_sus_map * EsusRem + + if deriv_type == "mu": + mu_map = chimap + mu = None + rem_map = None + rem = None + m = sus_model + if deriv_type == "rem": + mu_map = None + mu = None + rem_map = eff_sus_map + rem = None + m = EsusRem + if deriv_type == "mu_fix_rem": + mu_map = chimap + mu = None + rem_map = None + rem = u0_Mr_model + m = sus_model + if deriv_type == "rem_fix_mu": + mu_map = None + mu = mu_model + rem_map = eff_sus_map + rem = None + m = EsusRem + if deriv_type == "both": + wire_map = maps.Wires(("mu", mesh.n_cells), ("rem", mesh.n_cells * 3)) + mu_map = chimap * wire_map.mu + rem_map = eff_sus_map * wire_map.rem + m = np.r_[sus_model, EsusRem] + mu = None + rem = None + + simulation = PF.magnetics.simulation.Simulation3DDifferential( + survey=survey, mesh=mesh, mu=mu, rem=rem, muMap=mu_map, remMap=rem_map + ) + + def J(v): + return simulation.Jvec(m, v) + + def JT(v): + return simulation.Jtvec(m, v) + + assert_isadjoint(J, JT, len(m), survey.nD, random_seed=40) diff --git a/tests/pf/test_mag_uniform_background_field.py b/tests/pf/test_mag_uniform_background_field.py index feeb65e909..3f8164aa1c 100644 --- a/tests/pf/test_mag_uniform_background_field.py +++ b/tests/pf/test_mag_uniform_background_field.py @@ -4,7 +4,7 @@ import pytest import numpy as np -from simpeg.potential_fields.magnetics import UniformBackgroundField, SourceField, Point +from simpeg.potential_fields.magnetics import UniformBackgroundField, Point def test_invalid_parameters_argument(): @@ -17,15 +17,6 @@ def test_invalid_parameters_argument(): UniformBackgroundField(parameters=parameters) -def test_deprecated_source_field(): - """ - Test if instantiating a magnetics.source.SourceField object raises an error - """ - msg = "SourceField has been removed, please use UniformBackgroundField." - with pytest.raises(NotImplementedError, match=msg): - SourceField() - - @pytest.mark.parametrize("receiver_as_list", (True, False)) def test_invalid_receiver_type(receiver_as_list): """ diff --git a/tests/pf/test_pf_survey.py b/tests/pf/test_pf_survey.py new file mode 100644 index 0000000000..0cefa8dd88 --- /dev/null +++ b/tests/pf/test_pf_survey.py @@ -0,0 +1,91 @@ +import functools +import numpy as np +import pytest +from simpeg.potential_fields import gravity as grav +from simpeg.potential_fields import magnetics as mag +from simpeg.data import Data + + +@pytest.fixture(params=["gravity", "magnetics"]) +def survey(request): + rx_locs = np.random.rand(20, 3) + if request.param == "gravity": + rx1_components = ["gx", "gz"] + rx2_components = "gzz" + mod = grav + Source = functools.partial(grav.SourceField) + else: # request.param == "magnetics": + rx1_components = ["bx", "by"] + rx2_components = "tmi" + + mod = mag + Source = functools.partial( + mag.UniformBackgroundField, amplitude=50_000, inclination=90, declination=0 + ) + + rx1 = mod.Point(rx_locs, components=rx1_components) + rx2 = mod.Point(rx_locs, components=rx2_components) + src = Source(receiver_list=[rx1, rx2]) + return mod.Survey(src) + + +def test_survey_counts(survey): + src = survey.source_field + rx1, rx2 = src.receiver_list + + assert rx1.nD == 40 + assert rx2.nD == 20 + assert src.nD == 60 + assert survey.nRx == 40 + np.testing.assert_equal(src.vnD, [40, 20]) + assert survey.nD == 60 + np.testing.assert_equal(survey.vnD, [40, 20]) + + +def test_survey_indexing(survey): + src = survey.source_field + rx1, rx2 = src.receiver_list + d1 = -10 * np.arange(rx1.nD) + d2 = 10 + np.arange(rx2.nD) + data_vec = np.r_[d1, d2] + + data = Data(survey=survey, dobs=data_vec) + + np.testing.assert_equal(data[src, rx1], d1) + np.testing.assert_equal(data[src, rx2], d2) + + +@pytest.mark.parametrize("survey_cls", [grav.Survey, mag.Survey]) +def test_source_list_kwarg(survey_cls): + # cannot pass anything to source list for these classes. + with pytest.raises(TypeError, match=r"source_list is not a valid argument to .*"): + survey_cls("placeholder", source_list=None) + + +@pytest.mark.parametrize( + "survey_cls, source_cls", + [ + (grav.Survey, grav.SourceField), + ( + mag.Survey, + functools.partial( + mag.UniformBackgroundField, + amplitude=50_000, + inclination=90, + declination=0, + ), + ), + ], +) +def test_setting_sourcefield(survey_cls, source_cls): + src1 = source_cls(receiver_list=[]) + survey = survey_cls(source_field=src1) + assert survey.source_field is src1 + assert survey.source_list[0] is src1 + + src2 = source_cls(receiver_list=[]) + survey.source_field = src2 + assert survey.source_field is not src1 + assert survey.source_field is src2 + assert survey.source_list[0] is not src1 + assert survey.source_list[0] is src2 diff --git a/tests/pf/test_survey_counting.py b/tests/pf/test_survey_counting.py deleted file mode 100644 index d0e0d71002..0000000000 --- a/tests/pf/test_survey_counting.py +++ /dev/null @@ -1,41 +0,0 @@ -import numpy as np -from simpeg.potential_fields import gravity as grav -from simpeg.potential_fields import magnetics as mag - - -def test_gravity_survey(): - rx_locs = np.random.rand(20, 3) - rx_components = ["gx", "gz"] - - rx1 = grav.Point(rx_locs, components=rx_components) - rx2 = grav.Point(rx_locs, components="gzz") - src = grav.SourceField([rx1, rx2]) - survey = grav.Survey(src) - - assert rx1.nD == 40 - assert rx2.nD == 20 - assert src.nD == 60 - assert survey.nRx == 40 - np.testing.assert_equal(src.vnD, [40, 20]) - assert survey.nD == 60 - np.testing.assert_equal(survey.vnD, [40, 20]) - - -def test_magnetics_survey(): - rx_locs = np.random.rand(20, 3) - rx_components = ["bx", "by", "bz"] - - rx1 = mag.Point(rx_locs, components=rx_components) - rx2 = mag.Point(rx_locs, components="tmi") - src = mag.UniformBackgroundField( - receiver_list=[rx1, rx2], amplitude=50_000, inclination=90, declination=0 - ) - survey = mag.Survey(src) - - assert rx1.nD == 60 - assert rx2.nD == 20 - assert src.nD == 80 - np.testing.assert_equal(src.vnD, [60, 20]) - assert survey.nRx == 40 - assert survey.nD == 80 - np.testing.assert_equal(survey.vnD, [60, 20]) diff --git a/tests/utils/ellipsoid.py b/tests/utils/ellipsoid.py new file mode 100644 index 0000000000..7b97dfb210 --- /dev/null +++ b/tests/utils/ellipsoid.py @@ -0,0 +1,553 @@ +from simpeg import utils +import numpy as np + +""" +The code for forward modelling magnetic field of ellipsoids present in this +file is based on the implementation made by Diego Takahashi Tomazella and +Vanderlei C. Oliveira Jr., which is available in +https://github.com/pinga-lab/magnetic-ellipsoid, and has been released under +the BSD 3-Clause license: + +> Copyright (c) Diego Takahashi Tomazella and Vanderlei C. Oliveira Jr. +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions are met: +> +> * Redistributions of source code must retain the above copyright notice, this +> list of conditions and the following disclaimer. +> * Redistributions in binary form must reproduce the above copyright notice, +> this list of conditions and the following disclaimer in the documentation +> and/or other materials provided with the distribution. +> * Neither the names of the copyright holders nor the names of any +> contributors may be used to endorse or promote products derived from this +> software without specific prior written permission. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +> ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +> LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +> CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +> SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +> INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +> CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +> ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. +""" + + +class ProlateEllipsoid: + r"""Class for magnetostatic solution for a permeable and remanently + magnetized prolate ellipsoid in a uniform magnetostatic field + based on: https://github.com/pinga-lab/magnetic-ellipsoid + + The ``ProlateEllipse`` class is used to analytically compute the external and internal + secondary magnetic flux density + + Parameters + ---------- + center : (3) array_like, optional + center of ellipsoid (m). + axis : (2) array_like, optional + major and both minor axes of ellipsoid (m). + strike_dip_rake : (3) array_like, optional + strike, dip, and rake of ellipsoid, defined in paper (degrees) + Sets property V (rotation matrix) + susceptibility : float + susceptibility of ellipsoid (SI). + Mr : (3) array_like, optional + Intrinsic remanent magnetic polarization (\mu_0 M) of ellipsoid. + If susceptibility = 0,equivalent to total resultant magnetization. (nT) + inducing_field : (3) array_like, optional + Ambient Geomagnetic Field. (strength(nT),inclination (degrees), declination (degrees) + """ + + def __init__( + self, + center=(0, 0, 0), + axes=(100.1, 100), + strike_dip_rake=(0, 0, 0), + susceptibility=0.0, + Mr=(0, 0, 0), + inducing_field=(50000, 0, 90), + **kwargs, + ): + self.center = self.__redefine_coords(center) + self.axes = axes + self.susceptibility = susceptibility + self.V = strike_dip_rake + Mr = np.array(Mr) + self.Mr = Mr.T + self.B_0 = inducing_field + + @property + def center(self): + """Center of the sphere + + Returns + ------- + (3) numpy.ndarray of float + Center of the sphere. Default = np.r_[0,0,0] + """ + return self._center + + @center.setter + def center(self, vec): + + try: + vec = np.atleast_1d(vec).astype(float) + except (TypeError, AttributeError, ValueError): + raise TypeError(f"location must be array_like, got {type(vec)}") + + if len(vec) != 3: + raise ValueError( + f"location must be array_like with shape (3,), got {len(vec)}" + ) + + self._center = vec + + @property + def axes(self): + """The major axis and shared minor axes of the prolate ellipsoid + + Returns + ------- + (2) numpy.ndarray of float + Center of the sphere. Default = np.r_[100.1,100] + """ + return self._axes + + @axes.setter + def axes(self, vec): + + try: + vec = np.atleast_1d(vec).astype(float) + except (TypeError, AttributeError, ValueError): + raise TypeError(f"location must be array_like, got {type(vec)}") + + if len(vec) != 2: + raise ValueError( + "location must be array_like with shape (2,), got {len(vec)}" + ) + + if vec[0] <= vec[1]: + raise ValueError( + "The major axis of the ellipsoid must be greater then the minor axes" + ) + + if np.any(np.less(vec, 0)): + raise ValueError("The axes must be positive") + axes = np.zeros(3) + axes[:2] = vec + axes[2] = vec[1] + self._axes = axes + + @property + def V(self): + """Rotation Matrix of Ellipsoid + + Returns + ------- + (3,3) numpy.ndarray of float + Rotation Matrix of Ellipsoid + """ + return self._V + + @V.setter + def V(self, vec): + + try: + vec = np.atleast_1d(vec).astype(float) + except (TypeError, AttributeError, ValueError): + raise TypeError(f"strike_dip_rake must be array_like, got {type(vec)}") + + if len(vec) != 3: + raise ValueError( + f"strike_dip_rake must be array_like with shape (3,), got {len(vec)}" + ) + + self._V = self.__rotation_matrix(np.radians(vec)) + + @property + def susceptibility(self): + """Magnetic susceptibility (SI) + + Returns + ------- + float + Magnetic Susceptibility (SI) + """ + return self._susceptibility + + @susceptibility.setter + def susceptibility(self, item): + item = float(item) + if item < 0.0: + raise ValueError("Susceptibility must be positive") + self._susceptibility = item + + @property + def Mr(self): + r"""The remanent polarization (\mu0 M), (nT) + + Returns + ------- + (3) numpy.ndarray of float + Remanent Polarization (nT) + """ + return self._Mr + + @Mr.setter + def Mr(self, vec): + + try: + vec = np.atleast_1d(vec).astype(float) + except (TypeError, AttributeError, ValueError): + raise TypeError(f"location must be array_like, got {type(vec)}") + + if len(vec) != 3: + raise ValueError( + f"location must be array_like with shape (3,), got {len(vec)}" + ) + self._Mr = self.__redefine_coords(vec) + + @property + def B_0(self): + """Amplitude of the inducing field (nT). + + Returns + ------- + (3) numpy.ndarray of float + Amplitude of the primary current density. Default = np.r_[1,0,0] + """ + return self._B_0 + + @B_0.setter + def B_0(self, vec): + + try: + vec = np.atleast_1d(vec).astype(float) + except (TypeError, AttributeError, ValueError): + raise TypeError(f"primary_field must be array_like, got {type(vec)}") + + if len(vec) != 3: + raise ValueError( + f"primary_field must be array_like with shape (3,), got {len(vec)}" + ) + + mag = utils.mat_utils.dip_azimuth2cartesian( + vec[1], + vec[2], + ) + + B_0 = np.array([mag[:, 0] * vec[0], mag[:, 1] * vec[0], mag[:, 2] * vec[0]])[ + :, 0 + ] + + B_0 = self.__redefine_coords(B_0) + + self._B_0 = B_0 + + def get_indices(self, xyz): + """Returns Boolean of provided points internal to ellipse + + Parameters + ---------- + xyz : (..., 3) numpy.ndarray + Locations to evaluate at in units m. + + Returns + ------- + ind: Boolean array, True if internal to ellipse + + """ + + V = self.V + a = self.axes[0] + b = self.axes[1] + c = self.axes[1] + A = np.identity(3) + A[0, 0] = a**-2 + A[1, 1] = b**-2 + A[2, 2] = c**-2 + A = V @ A @ V.T + center = self.center + + t1 = xyz[:, 1] - center[0] + t2 = xyz[:, 0] - center[1] + t3 = -xyz[:, 2] - center[2] + + r_m_rc = np.array([t1, t2, t3]) + b = A @ r_m_rc + + values = np.sum(r_m_rc * b, axis=0) + + ind = values < 1 + + return ind + + def Magnetization(self): + """Returns the resultant magnetization of the ellipsoid as a function + of susceptibility and remanent magnetization + + Parameters + ---------- + + Returns + ------- + M: (3) numpy.ndarray of float + + """ + + V = self.V + + K = self.susceptibility * np.identity(3) # /(4*np.pi) + + N1 = self.__depolarization_prolate() + + I = np.identity(3) + + inv = np.linalg.inv(I + K @ N1) + + M = V @ inv @ V.T @ (K @ self.B_0.T + self.Mr.T) + + M = self.__redefine_coords(M.T) + + return M + + def anomalous_bfield(self, xyz): + """Returns the internal and external secondary magnetic field B_s + + Parameters + ---------- + xyz : (..., 3) numpy.ndarray + Locations to evaluate at in units m. + + Returns + ------- + B_s : (..., 3) np.ndarray + Units of nT + + """ + a = self.axes[0] + b = self.axes[1] + axes_array = np.array([a, b, b]) + + internal_indices = self.get_indices(xyz) + xyz = self.__redefine_coords(xyz) + xyz_m_center = xyz - self.center + + body_axis_coords = (self.V.T @ xyz_m_center.T).T + + x1 = body_axis_coords[:, 0] + x2 = body_axis_coords[:, 1] + x3 = body_axis_coords[:, 2] + + xyz = [x1, x2, x3] + + M = self.__redefine_coords(self.Magnetization()) + + lam = self.__get_lam(x1, x2, x3) + + dlam = self.__d_lam(x1, x2, x3, lam) + + R = np.sqrt((a**2 + lam) * (b**2 + lam) * (b**2 + lam)) + + h = [] + for i in range(len(axes_array)): + h.append(-1 / ((axes_array[i] ** 2 + lam) * R)) + + g = self.__g(lam) + + N2 = self.__N2(h, g, dlam, xyz) + + B_s = self.V @ N2 @ self.V.T @ M + + N1 = self.__depolarization_prolate() + + M_norotate = self.Magnetization() + + B_s = self.__redefine_coords(B_s) + + B_s[internal_indices, :] = M_norotate - N1 @ M_norotate + + return B_s + + def TMI(self, xyz): + """Returns the internal and external exact TMI data + + Parameters + ---------- + xyz : (..., 3) numpy.ndarray + Locations to evaluate at in units m. + + Returns + ------- + TMI : (...,) np.ndarray + Units of nT + + """ + + B_0 = self.__redefine_coords(self.B_0) + + B = self.anomalous_bfield(xyz) + + TMI = np.linalg.norm(B_0 + B, axis=1) - np.linalg.norm(self.B_0) + + return TMI + + def TMI_approx(self, xyz): + """Returns the internal and external approximate TMI data + + Parameters + ---------- + xyz : (..., 3) numpy.ndarray + Locations to evaluate at in units m. + + Returns + ------- + TMI_approx : (...,) np.ndarray + Units of nT + + """ + + B = self.anomalous_bfield(xyz) + B0 = self.__redefine_coords(self.B_0) + + TMI_approx = (B @ B0.T) / np.linalg.norm(B0) + + return TMI_approx + + def __redefine_coords(self, coords): + coords_copy = np.copy(coords) + if len(np.shape(coords)) == 1: + + temp = np.copy(coords[0]) + coords_copy[0] = coords[1] + coords_copy[1] = temp + coords_copy[2] *= -1 + else: + temp = np.copy(coords[:, 0]) + coords_copy[:, 0] = coords[:, 1] + coords_copy[:, 1] = temp + coords_copy[:, 2] *= -1 + + return coords_copy + + def __rotation_matrix(self, strike_dip_rake): + strike = strike_dip_rake[0] + dip = strike_dip_rake[1] + rake = strike_dip_rake[2] + + def R1(theta): + return np.array( + [ + [1, 0, 0], + [0, np.cos(theta), np.sin(theta)], + [0, -np.sin(theta), np.cos(theta)], + ] + ) + + def R2(theta): + return np.array( + [ + [np.cos(theta), 0, -np.sin(theta)], + [0, 1, 0], + [np.sin(theta), 0, np.cos(theta)], + ] + ) + + def R3(theta): + return np.array( + [ + [np.cos(theta), np.sin(theta), 0], + [-np.sin(theta), np.cos(theta), 0], + [0, 0, 1], + ] + ) + + V = R1(np.pi / 2) @ R2(strike) @ R1(np.pi / 2 - dip) @ R3(rake) + + return V + + def __depolarization_prolate(self): + a = self.axes[0] + b = self.axes[1] + + m = a / b + + t11 = 1 / (m**2 - 1) + t22 = m / (m**2 - 1) ** 0.5 + t33 = np.log(m + (m**2 - 1) ** 0.5) + + n11 = t11 * (t22 * t33 - 1) + n22 = 0.5 * (1 - n11) + n33 = n22 + + N1 = np.zeros((3, 3)) + N1[0, 0] = n11 + N1[1, 1] = n22 + N1[2, 2] = n33 + + return N1 + + def __N2(self, h, g, dlam, xyz): + size = np.shape(g[0])[0] + N2 = np.zeros((size, 3, 3)) + abc_2 = self.axes[0] * self.axes[1] * self.axes[2] / 2 + for i in range(3): + for j in range(3): + if i == j: + N2[:, i, j] = -abc_2 * (dlam[i] * h[i] * xyz[i] + g[i]) + else: + N2[:, i, j] = -abc_2 * (dlam[i] * h[j] * xyz[j]) + + return N2 + + def __get_lam(self, x1, x2, x3): + a = self.axes[0] + b = self.axes[1] + p1 = a**2 + b**2 - x1**2 - x2**2 - x3**2 + p0 = a**2 * b**2 - b**2 * x1**2 - a**2 * (x2**2 + x3**2) + lam = (-p1 + np.sqrt(p1**2 - 4 * p0)) / 2 + + return lam + + def __d_lam(self, x1, x2, x3, lam): + + dlam = [] + xyz = [x1, x2, x3] + + den = ( + (x1 / (self.axes[0] ** 2 + lam)) ** 2 + + (x2 / (self.axes[1] ** 2 + lam)) ** 2 + + (x3 / (self.axes[1] ** 2 + lam)) ** 2 + ) + + for i in range(3): + num = (2 * xyz[i]) / (self.axes[i] ** 2 + lam) + dlam.append(num / den) + + return dlam + + def __g(self, lam): + a = self.axes[0] + b = self.axes[1] + a2lam = a**2 + lam + b2lam = b**2 + lam + a2mb2 = a**2 - b**2 + + gmul = 1 / (a2mb2**1.5) + g1t1 = np.log((a2mb2**0.5 + a2lam**0.5) / b2lam**0.5) + g1t2 = (a2mb2 / a2lam) ** 0.5 + + g2t2 = (a2mb2 * a2lam) ** 0.5 / b2lam + + g1 = 2 * gmul * (g1t1 - g1t2) + g2 = gmul * (g2t2 - g1t1) + g3 = g2 + + g = [g1, g2, g3] + + return g diff --git a/tests/utils/test_default_solver.py b/tests/utils/test_default_solver.py index e8e2fc3ba1..4c86b47333 100644 --- a/tests/utils/test_default_solver.py +++ b/tests/utils/test_default_solver.py @@ -1,12 +1,9 @@ +import re import warnings import pytest from pymatsolver import SolverCG -from simpeg.utils.solver_utils import ( - get_default_solver, - set_default_solver, - DefaultSolverWarning, -) +from simpeg.utils import get_default_solver, set_default_solver @pytest.fixture(autouse=True) @@ -27,30 +24,27 @@ def test_default_error(): class Temp: pass - with pytest.warns(DefaultSolverWarning): - initial_default = get_default_solver(warn=True) + initial_default = get_default_solver() - with pytest.raises( - TypeError, - match="Default solver must be a subclass of pymatsolver.solvers.Base.", - ): + regex = re.escape("Default solver must be a subclass of pymatsolver.solvers.Base.") + with pytest.raises(TypeError, match=regex): set_default_solver(Temp) - with pytest.warns(DefaultSolverWarning): - after_default = get_default_solver(warn=True) + after_default = get_default_solver() # make sure we didn't accidentally set the default. - assert initial_default == after_default + assert initial_default is after_default -def test_warning(): - """Test if warning is raised when warn=True.""" - with pytest.warns(DefaultSolverWarning, match="Using the default solver"): +def test_deprecation_warning(): + """Test deprecation warning for the warn argument.""" + regex = re.escape("The `warn` argument has been deprecated and will be removed in") + with pytest.warns(FutureWarning, match=regex): get_default_solver(warn=True) -def test_no_warning(): - """Test if no warning is issued with default parameters.""" +def test_no_deprecation_warning(): + """Test if no deprecation warning is issued with default parameters.""" with warnings.catch_warnings(): warnings.simplefilter("error") # raise error if warning was raised get_default_solver() diff --git a/tests/utils/test_mat_utils.py b/tests/utils/test_mat_utils.py index c194cfc61b..f7e30402f0 100644 --- a/tests/utils/test_mat_utils.py +++ b/tests/utils/test_mat_utils.py @@ -124,8 +124,8 @@ def test_combo_eigenvalue_by_power_iteration(self): print("Eigenvalue Utils for a mixed ComboObjectiveFunction is validated.") -class TestDeprecatedSeed: - """Test deprecation of ``seed`` argument.""" +class TestRemovedSeed: + """Test removed ``seed`` argument.""" @pytest.fixture def mock_objfun(self): @@ -140,48 +140,16 @@ def deriv2(self, m, v=None, **kwargs): return MockObjectiveFunction - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def test_warning_argument(self, mock_objfun): + def test_error_argument(self, mock_objfun): """ - Test if warning is raised after passing ``seed``. + Test if error is raised after passing ``seed``. """ - msg = self.get_message_deprecated_warning("seed", "random_seed") + msg = "got an unexpected keyword argument 'seed'" n_params = 5 combo = mock_objfun(nP=n_params) + 3.0 * mock_objfun(nP=n_params) model = np.ones(n_params) - with pytest.warns(FutureWarning, match=msg): - result_seed = eigenvalue_by_power_iteration( - combo_objfct=combo, model=model, seed=42 - ) - # Ensure that using `seed` and `random_seed` generate the same output - result_random_seed = eigenvalue_by_power_iteration( - combo_objfct=combo, model=model, random_seed=42 - ) - np.testing.assert_allclose(result_seed, result_random_seed) - - def test_error_duplicated_argument(self): - """ - Test error after passing ``seed`` and ``random_seed``. - """ - msg = self.get_message_duplicated_error("seed", "random_seed") with pytest.raises(TypeError, match=msg): - eigenvalue_by_power_iteration( - combo_objfct=None, model=None, random_seed=42, seed=42 - ) + eigenvalue_by_power_iteration(combo_objfct=combo, model=model, seed=42) if __name__ == "__main__": diff --git a/tests/utils/test_model_builder.py b/tests/utils/test_model_builder.py index d35d5901dc..6530b815d3 100644 --- a/tests/utils/test_model_builder.py +++ b/tests/utils/test_model_builder.py @@ -3,51 +3,26 @@ """ import pytest -import numpy as np from simpeg.utils.model_builder import create_random_model -class TestDeprecateSeedProperty: +class TestRemovalSeedProperty: """ - Test deprecation of seed property. + Test removed seed property. """ - def get_message_duplicated_error(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"Cannot pass both '{new_name}' and '{old_name}'." - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - - def get_message_deprecated_warning(self, old_name, new_name, version="v0.24.0"): - msg = ( - f"'{old_name}' has been deprecated and will be removed in " - f" SimPEG {version}, please use '{new_name}' instead." - ) - return msg - @pytest.fixture def shape(self): return (5, 5) - def test_warning_argument(self, shape): + def test_error_argument(self, shape): """ - Test if warning is raised after passing ``seed`` as argument. + Test if error is raised after passing ``seed`` as argument. """ - msg = self.get_message_deprecated_warning("seed", "random_seed") + msg = "Invalid arguments 'seed'" seed = 42135 - with pytest.warns(FutureWarning, match=msg): - result = create_random_model(shape, seed=seed) - np.testing.assert_allclose(result, create_random_model(shape, random_seed=seed)) - - def test_error_duplicated_argument(self, shape): - """ - Test error after passing ``seed`` and ``random_seed`` as arguments. - """ - msg = self.get_message_duplicated_error("seed", "random_seed") with pytest.raises(TypeError, match=msg): - create_random_model(shape, seed=42, random_seed=42) + create_random_model(shape, seed=seed) def test_error_invalid_kwarg(self, shape): """ @@ -56,5 +31,4 @@ def test_error_invalid_kwarg(self, shape): kwargs = {"foo": 1, "bar": 2} msg = "Invalid arguments 'foo', 'bar'." with pytest.raises(TypeError, match=msg): - with pytest.warns(FutureWarning): - create_random_model(shape, seed=10, **kwargs) + create_random_model(shape, **kwargs) diff --git a/tests/utils/test_validators.py b/tests/utils/test_validators.py index 2a6d1bc0dd..ad86256ab7 100644 --- a/tests/utils/test_validators.py +++ b/tests/utils/test_validators.py @@ -150,6 +150,18 @@ def test_list_validation(): "ListProperty", ["Hello", "Hello", "Hello"], str, ensure_unique=True ) + # list is not long enough: + with pytest.raises(ValueError, match=r"'ListProperty' must have at least.*"): + validate_list_of_types("ListProperty", [1, 2, 3, 4], int, min_n=5) + + # list is too long: + with pytest.raises(ValueError, match=r"'ListProperty' must have at most.*"): + validate_list_of_types("ListProperty", [1, 2, 3, 4], int, max_n=2) + + # item is not an exact length + with pytest.raises(ValueError, match=r"'ListProperty' must have exactly.*"): + validate_list_of_types("ListProperty", [1, 2, 3, 4], int, min_n=3, max_n=3) + def test_location_validation(): # simple valid location diff --git a/tutorials/03-gravity/plot_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_1a_gravity_anomaly.py index 25e442ed45..8665fc8189 100644 --- a/tutorials/03-gravity/plot_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_1a_gravity_anomaly.py @@ -2,262 +2,13 @@ Forward Simulation of Gravity Anomaly Data on a Tensor Mesh =========================================================== -Here we use the module *simpeg.potential_fields.gravity* to predict gravity -anomaly data for a synthetic density contrast model. The simulation is -carried out on a tensor mesh. For this tutorial, we focus on the following: +.. important:: - - How to create gravity surveys - - How to predict gravity anomaly data for a density contrast model - - How to include surface topography - - The units of the density contrast model and resulting data + This tutorial has been moved to `User Tutorials + `_. + Checkout the `3D Forward Simulation of Gravity Anomaly Data + `_ tutorial. -""" - -######################################################################### -# Import Modules -# -------------- -# - -import numpy as np -from scipy.interpolate import LinearNDInterpolator -import matplotlib as mpl -import matplotlib.pyplot as plt -import os - -from discretize import TensorMesh -from discretize.utils import mkvc, active_from_xyz - -from simpeg.utils import plot2Ddata, model_builder -from simpeg import maps -from simpeg.potential_fields import gravity - -save_output = False - -# sphinx_gallery_thumbnail_number = 2 - -############################################# -# Defining Topography -# ------------------- -# -# Surface topography is defined as an (N, 3) numpy array. We create it here but -# the topography could also be loaded from a file. -# - -[x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41)) -z_topo = -15 * np.exp(-(x_topo**2 + y_topo**2) / 80**2) -x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) -topo_xyz = np.c_[x_topo, y_topo, z_topo] - - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define survey that will be used for the forward simulation. Gravity -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations, and a list of field components -# which are to be measured. -# - -# Define the observation locations as an (N, 3) numpy array or load them. -x = np.linspace(-80.0, 80.0, 17) -y = np.linspace(-80.0, 80.0, 17) -x, y = np.meshgrid(x, y) -x, y = mkvc(x.T), mkvc(y.T) -fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo) -z = fun_interp(np.c_[x, y]) + 5.0 -receiver_locations = np.c_[x, y, z] - -# Define the component(s) of the field we want to simulate as strings within -# a list. Here we simulate only the vertical component of gravity anomaly. -components = ["gz"] - -# Use the observation locations and components to define the receivers. To -# simulate data, the receivers must be defined as a list. -receiver_list = gravity.receivers.Point(receiver_locations, components=components) - -receiver_list = [receiver_list] - -# Defining the source field. -source_field = gravity.sources.SourceField(receiver_list=receiver_list) - -# Defining the survey -survey = gravity.survey.Survey(source_field) - - -############################################# -# Defining a Tensor Mesh -# ---------------------- -# -# Here, we create the tensor mesh that will be used to predict gravity anomaly -# data. -# - -dh = 5.0 -hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hz = [(dh, 5, -1.3), (dh, 15)] -mesh = TensorMesh([hx, hy, hz], "CCN") - -######################################################## -# Density Contrast Model and Mapping on Tensor Mesh -# ------------------------------------------------- -# -# Here, we create the density contrast model that will be used to predict -# gravity anomaly data and the mapping from the model to the mesh. The model -# consists of a less dense block and a more dense sphere. -# - -# Define density contrast values for each unit in g/cc -background_density = 0.0 -block_density = -0.2 -sphere_density = 0.2 - -# Find the indices for the active mesh cells (e.g. cells below surface) -ind_active = active_from_xyz(mesh, topo_xyz) - -# Define mapping from model to active cells. The model consists of a value for -# each cell below the Earth's surface. -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=nC) - -# Define model. Models in SimPEG are vector arrays. -model = background_density * np.ones(nC) -# You could find the indicies of specific cells within the model and change their -# value to add structures. -ind_block = ( - (mesh.gridCC[ind_active, 0] > -50.0) - & (mesh.gridCC[ind_active, 0] < -20.0) - & (mesh.gridCC[ind_active, 1] > -15.0) - & (mesh.gridCC[ind_active, 1] < 15.0) - & (mesh.gridCC[ind_active, 2] > -50.0) - & (mesh.gridCC[ind_active, 2] < -30.0) -) -model[ind_block] = block_density - -# You can also use SimPEG utilities to add structures to the model more concisely -ind_sphere = model_builder.get_indices_sphere( - np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC -) -ind_sphere = ind_sphere[ind_active] -model[ind_sphere] = sphere_density - -# Plot Density Contrast Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78]) -mesh.plot_slice( - plotting_map * model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(model), np.max(model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) -norm = mpl.colors.Normalize(vmin=np.min(model), vmax=np.max(model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis -) -cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) - -plt.show() - - -####################################################### -# Simulation: Gravity Anomaly Data on Tensor Mesh -# ----------------------------------------------- -# -# Here we demonstrate how to predict gravity anomaly data using the integral -# formulation. -# - -############################################################################### -# Define the forward simulation. By setting the ``store_sensitivities`` keyword -# argument to ``"forward_only"``, we simulate the data without storing the -# sensitivities. -# - -simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - rhoMap=model_map, - active_cells=ind_active, - store_sensitivities="forward_only", - engine="choclo", -) - -############################################################################### -# .. tip:: -# -# Since SimPEG v0.21.0 we can use `Choclo -# `_ as the engine for running the gravity -# simulations, which results in faster and more memory efficient runs. Just -# pass ``engine="choclo"`` when constructing the simulation. -# - -############################################################################### -# Compute predicted data for some model -# SimPEG uses right handed coordinate where Z is positive upward. -# This causes gravity signals look "inconsistent" with density values in visualization. - -dpred = simulation.dpred(model) - -# Plot -fig = plt.figure(figsize=(7, 5)) - -v_max = np.max(np.abs(dpred)) - -ax1 = fig.add_axes([0.1, 0.1, 0.75, 0.85]) -plot2Ddata( - receiver_list[0].locations, - dpred, - clim=(-v_max, v_max), - ax=ax1, - contourOpts={"cmap": "bwr"}, -) -ax1.set_title("Gravity Anomaly (Z-component)") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.82, 0.1, 0.03, 0.85]) -norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" -) -cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) - -plt.show() - - -####################################################### -# Optional: Exporting Results -# --------------------------- -# -# Write the data, topography and true model -# - -if save_output: - dir_path = os.path.dirname(__file__).split(os.path.sep) - dir_path.extend(["outputs"]) - dir_path = os.path.sep.join(dir_path) + os.path.sep - - if not os.path.exists(dir_path): - os.mkdir(dir_path) - - fname = dir_path + "gravity_topo.txt" - np.savetxt(fname, np.c_[topo_xyz], fmt="%.4e") - - np.random.seed(737) - maximum_anomaly = np.max(np.abs(dpred)) - noise = 0.01 * maximum_anomaly * np.random.randn(len(dpred)) - fname = dir_path + "gravity_data.obs" - np.savetxt(fname, np.c_[receiver_locations, dpred + noise], fmt="%.4e") +""" diff --git a/tutorials/03-gravity/plot_1b_gravity_gradiometry.py b/tutorials/03-gravity/plot_1b_gravity_gradiometry.py index 8bb79ffb17..1c26443b9d 100644 --- a/tutorials/03-gravity/plot_1b_gravity_gradiometry.py +++ b/tutorials/03-gravity/plot_1b_gravity_gradiometry.py @@ -2,287 +2,12 @@ Forward Simulation of Gradiometry Data on a Tree Mesh ===================================================== -Here we use the module *simpeg.potential_fields.gravity* to predict gravity -gradiometry data for a synthetic density contrast model. The simulation is -carried out on a tree mesh. For this tutorial, we focus on the following: +.. important:: - - How to define the survey when we want multiple field components - - How to predict gravity gradiometry data for a density contrast model - - How to construct tree meshes based on topography and survey geometry - - The units of the density contrast model and resulting data + This tutorial has been moved to `User Tutorials + `_. + Checkout the `3D Forward Simulation of Gravity Gradiometry Data + `_ tutorial. """ - -######################################################################### -# Import Modules -# -------------- -# - -import numpy as np -from scipy.interpolate import LinearNDInterpolator -import matplotlib as mpl -import matplotlib.pyplot as plt - -from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz -from simpeg.utils import plot2Ddata, model_builder -from simpeg import maps -from simpeg.potential_fields import gravity - -# sphinx_gallery_thumbnail_number = 2 - -############################################# -# Defining Topography -# ------------------- -# -# Surface topography is defined as an (N, 3) numpy array. We create it here but -# the topography could also be loaded from a file. -# - -[x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41)) -z_topo = -15 * np.exp(-(x_topo**2 + y_topo**2) / 80**2) -x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) -xyz_topo = np.c_[x_topo, y_topo, z_topo] - - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define survey that will be used for the forward simulation. Gravity -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations, and a list of field components -# which are to be measured. -# - -# Define the observation locations as an (N, 3) numpy array or load them -x = np.linspace(-80.0, 80.0, 17) -y = np.linspace(-80.0, 80.0, 17) -x, y = np.meshgrid(x, y) -x, y = mkvc(x.T), mkvc(y.T) -fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo) -z = fun_interp(np.c_[x, y]) + 5 -receiver_locations = np.c_[x, y, z] - -# Define the component(s) of the field we want to simulate as strings within -# a list. Here we measure the x, y and z components of the gravity anomaly at -# each observation location. -components = ["gxz", "gyz", "gzz"] - -# Use the observation locations and components to define the receivers. To -# simulate data, the receivers must be defined as a list. -receiver_list = gravity.receivers.Point(receiver_locations, components=components) - -receiver_list = [receiver_list] - -# Defining the source field. -source_field = gravity.sources.SourceField(receiver_list=receiver_list) - -# Defining the survey -survey = gravity.survey.Survey(source_field) - - -########################################################## -# Defining an OcTree Mesh -# ----------------------- -# -# Here, we create the OcTree mesh that will be used in the forward simulation. -# - -dx = 5 # minimum cell width (base mesh cell width) in x -dy = 5 # minimum cell width (base mesh cell width) in y -dz = 5 # minimum cell width (base mesh cell width) in z - -x_length = 240.0 # domain width in x -y_length = 240.0 # domain width in y -z_length = 120.0 # domain width in z - -# Compute number of base mesh cells required in x and y -nbcx = 2 ** int(np.round(np.log(x_length / dx) / np.log(2.0))) -nbcy = 2 ** int(np.round(np.log(y_length / dy) / np.log(2.0))) -nbcz = 2 ** int(np.round(np.log(z_length / dz) / np.log(2.0))) - -# Define the base mesh -hx = [(dx, nbcx)] -hy = [(dy, nbcy)] -hz = [(dz, nbcz)] -mesh = TreeMesh([hx, hy, hz], x0="CCN") - -# Refine based on surface topography -mesh = refine_tree_xyz( - mesh, xyz_topo, octree_levels=[2, 2], method="surface", finalize=False -) - -# Refine box based on region of interest -xp, yp, zp = np.meshgrid([-100.0, 100.0], [-100.0, 100.0], [-80.0, 0.0]) -xyz = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)] - -mesh = refine_tree_xyz(mesh, xyz, octree_levels=[2, 2], method="box", finalize=False) - -mesh.finalize() - -####################################################### -# Density Contrast Model and Mapping on OcTree Mesh -# ------------------------------------------------- -# -# Here, we create the density contrast model that will be used to simulate gravity -# gradiometry data and the mapping from the model to the mesh. The model -# consists of a less dense block and a more dense sphere. -# - -# Define density contrast values for each unit in g/cc -background_density = 0.0 -block_density = -0.1 -sphere_density = 0.1 - -# Find the indecies for the active mesh cells (e.g. cells below surface) -ind_active = active_from_xyz(mesh, xyz_topo) - -# Define mapping from model to active cells. The model consists of a value for -# each cell below the Earth's surface. -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=nC) # model will be value of active cells - -# Define model. Models in SimPEG are vector arrays. -model = background_density * np.ones(nC) - -# You could find the indicies of specific cells within the model and change their -# value to add structures. -ind_block = ( - (mesh.gridCC[ind_active, 0] > -50.0) - & (mesh.gridCC[ind_active, 0] < -20.0) - & (mesh.gridCC[ind_active, 1] > -15.0) - & (mesh.gridCC[ind_active, 1] < 15.0) - & (mesh.gridCC[ind_active, 2] > -50.0) - & (mesh.gridCC[ind_active, 2] < -30.0) -) -model[ind_block] = block_density - -# You can also use SimPEG utilities to add structures to the model more concisely -ind_sphere = model_builder.get_indices_sphere( - np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC -) -ind_sphere = ind_sphere[ind_active] -model[ind_sphere] = sphere_density - -# Plot Density Contrast Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78]) -mesh.plot_slice( - plotting_map * model, - normal="Y", - ax=ax1, - ind=int(mesh.h[1].size / 2), - grid=True, - clim=(np.min(model), np.max(model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) -norm = mpl.colors.Normalize(vmin=np.min(model), vmax=np.max(model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis -) -cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) - -plt.show() - -############################################################## -# Simulation: Gravity Gradiometry Data on an OcTree Mesh -# ------------------------------------------------------ -# -# Here we demonstrate how to predict gravity anomaly data using the integral -# formulation. -# - -############################################################################### -# Define the forward simulation. By setting the ``store_sensitivities`` keyword -# argument to ``"forward_only"``, we simulate the data without storing the -# sensitivities -# - -simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - rhoMap=model_map, - active_cells=ind_active, - store_sensitivities="forward_only", - engine="choclo", -) - -############################################################################### -# .. tip:: -# -# Since SimPEG v0.21.0 we can use `Choclo -# `_ as the engine for running the gravity -# simulations, which results in faster and more memory efficient runs. Just -# pass ``engine="choclo"`` when constructing the simulation. -# - -############################################################################### -# Compute predicted data for some model - -dpred = simulation.dpred(model) -n_data = len(dpred) - -# Plot -fig = plt.figure(figsize=(10, 3)) -n_locations = receiver_locations.shape[0] -v_max = np.max(np.abs(dpred)) - -ax1 = fig.add_axes([0.1, 0.15, 0.25, 0.78]) -cplot1 = plot2Ddata( - receiver_locations, - dpred[0:n_data:3], - ax=ax1, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -cplot1[0].set_clim((-v_max, v_max)) -ax1.set_title(r"$\partial g /\partial x$") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.36, 0.15, 0.25, 0.78]) -cplot2 = plot2Ddata( - receiver_locations, - dpred[1:n_data:3], - ax=ax2, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -cplot2[0].set_clim((-v_max, v_max)) -ax2.set_title(r"$\partial g /\partial y$") -ax2.set_xlabel("x (m)") -ax2.set_yticks([]) - -ax3 = fig.add_axes([0.62, 0.15, 0.25, 0.78]) -cplot3 = plot2Ddata( - receiver_locations, - dpred[2:n_data:3], - ax=ax3, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -cplot3[0].set_clim((-v_max, v_max)) -ax3.set_title(r"$\partial g /\partial z$") -ax3.set_xlabel("x (m)") -ax3.set_yticks([]) - -ax4 = fig.add_axes([0.89, 0.13, 0.02, 0.79]) -norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max) -cbar = mpl.colorbar.ColorbarBase( - ax4, norm=norm, orientation="vertical", cmap=mpl.cm.bwr -) -cbar.set_label("Eotvos", rotation=270, labelpad=15, size=12) - -plt.show() diff --git a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py index 27e679db43..3f3b9cda85 100644 --- a/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py +++ b/tutorials/03-gravity/plot_inv_1a_gravity_anomaly.py @@ -2,444 +2,12 @@ Least-Squares Inversion of Gravity Anomaly Data =============================================== -Here we invert gravity anomaly data to recover a density contrast model. We -formulate the inverse problem as a least-squares optimization problem. For -this tutorial, we focus on the following: +.. important:: - - Defining the survey from xyz formatted data - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, optimization) - - Specifying directives for the inversion - - Plotting the recovered model and data misfit - -Although we consider gravity anomaly data in this tutorial, the same approach -can be used to invert gradiometry and other types of geophysical data. + This tutorial has been moved to `User Tutorials + `_. + Checkout the `3D Inversion of Gravity Anomaly Data + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TensorMesh -from discretize.utils import active_from_xyz -from simpeg.utils import plot2Ddata, model_builder -from simpeg.potential_fields import gravity -from simpeg import ( - maps, - data, - data_misfit, - inverse_problem, - regularization, - optimization, - directives, - inversion, - utils, -) - -# sphinx_gallery_thumbnail_number = 3 - -############################################# -# Define File Names -# ----------------- -# -# File paths for assets we are loading. To set up the inversion, we require -# topography and field observations. The true model defined on the whole mesh -# is loaded to compare with the inversion result. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "gravity_topo.txt" -data_filename = dir_path + "gravity_data.obs" - - -############################################# -# Load Data and Plot -# ------------------ -# -# Here we load and plot synthetic gravity anomaly data. Topography is generally -# defined as an (N, 3) array. Gravity data is generally defined with 4 columns: -# x, y, z and data. -# - -# Load topography -xyz_topo = np.loadtxt(str(topo_filename)) - -# Load field data -dobs = np.loadtxt(str(data_filename)) - -# Define receiver locations and observed data -receiver_locations = dobs[:, 0:3] -dobs = dobs[:, -1] - -# Plot -mpl.rcParams.update({"font.size": 12}) -fig = plt.figure(figsize=(7, 5)) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) -plot2Ddata(receiver_locations, dobs, ax=ax1, contourOpts={"cmap": "bwr"}) -ax1.set_title("Gravity Anomaly") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85]) -norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs))) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" -) -cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) - -plt.show() - -############################################# -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define the standard deviation of our data. -# This represents our estimate of the noise in our data. For a gravity inversion, -# a constant floor value is generally applied to all data. For this tutorial, -# the standard deviation on each datum will be 1% of the maximum observed -# gravity anomaly value. -# - -maximum_anomaly = np.max(np.abs(dobs)) - -uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs)) - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define the survey that will be used for this tutorial. Gravity -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations. From this, the user can -# define the receivers and the source field. -# - -# Define the receivers. The data consists of vertical gravity anomaly measurements. -# The set of receivers must be defined as a list. -receiver_list = gravity.receivers.Point(receiver_locations, components="gz") - -receiver_list = [receiver_list] - -# Define the source field -source_field = gravity.sources.SourceField(receiver_list=receiver_list) - -# Define the survey -survey = gravity.survey.Survey(source_field) - -############################################# -# Defining the Data -# ----------------- -# -# Here is where we define the data that is inverted. The data is defined by -# the survey, the observation values and the standard deviation. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties) - - -############################################# -# Defining a Tensor Mesh -# ---------------------- -# -# Here, we create the tensor mesh that will be used to invert gravity anomaly -# data. If desired, we could define an OcTree mesh. -# - -dh = 5.0 -hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hz = [(dh, 5, -1.3), (dh, 15)] -mesh = TensorMesh([hx, hy, hz], "CCN") - -######################################################## -# Starting/Reference Model and Mapping on Tensor Mesh -# --------------------------------------------------- -# -# Here, we create starting and/or reference models for the inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. -# - -# Find the indices of the active cells in forward model (ones below surface) -ind_active = active_from_xyz(mesh, xyz_topo) - -# Define mapping from model to active cells -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell - -# Define and plot starting model -starting_model = np.zeros(nC) - - -############################################## -# Define the Physics -# ------------------ -# -# Here, we define the physics of the gravity problem by using the simulation -# class. -# -# .. tip:: -# -# Since SimPEG v0.21.0 we can use `Choclo -# `_ as the engine for running the gravity -# simulations, which results in faster and more memory efficient runs. Just -# pass ``engine="choclo"`` when constructing the simulation. -# - -simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - rhoMap=model_map, - active_cells=ind_active, - engine="choclo", -) - - -####################################################################### -# Define the Inverse Problem -# -------------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) - -# Define the regularization (model objective function). -reg = regularization.WeightedLeastSquares( - mesh, active_cells=ind_active, mapping=model_map -) - -# Define how the optimization problem is solved. Here we will use a projected -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG( - maxIter=10, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 -) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define Inversion Directives -# --------------------------- -# -# Here we define any directiveas that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) - -# Defining the fractional decrease in beta and the number of Gauss-Newton solves -# for each beta value. -beta_schedule = directives.BetaSchedule(coolingFactor=5, coolingRate=1) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Updating the preconditionner if it is model dependent. -update_jacobi = directives.UpdatePreconditioner() - -# Setting a stopping criteria for the inversion. -target_misfit = directives.TargetMisfit(chifact=1) - -# Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) - -# The directives are defined as a list. -directives_list = [ - sensitivity_weights, - starting_beta, - beta_schedule, - save_iteration, - update_jacobi, - target_misfit, -] - -##################################################################### -# Running the Inversion -# --------------------- -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run inversion -recovered_model = inv.run(starting_model) - - -############################################################ -# Recreate True Model -# ------------------- -# - -# Define density contrast values for each unit in g/cc -background_density = 0.0 -block_density = -0.2 -sphere_density = 0.2 - -# Define model. Models in SimPEG are vector arrays. -true_model = background_density * np.ones(nC) - -# You could find the indicies of specific cells within the model and change their -# value to add structures. -ind_block = ( - (mesh.gridCC[ind_active, 0] > -50.0) - & (mesh.gridCC[ind_active, 0] < -20.0) - & (mesh.gridCC[ind_active, 1] > -15.0) - & (mesh.gridCC[ind_active, 1] < 15.0) - & (mesh.gridCC[ind_active, 2] > -50.0) - & (mesh.gridCC[ind_active, 2] < -30.0) -) -true_model[ind_block] = block_density - -# You can also use SimPEG utilities to add structures to the model more concisely -ind_sphere = model_builder.get_indices_sphere( - np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC -) -ind_sphere = ind_sphere[ind_active] -true_model[ind_sphere] = sphere_density - - -############################################################ -# Plotting True Model and Recovered Model -# --------------------------------------- -# - -# Plot True Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8]) -mesh.plot_slice( - plotting_map * true_model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(true_model), np.max(true_model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") - - -ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) -norm = mpl.colors.Normalize(vmin=np.min(true_model), vmax=np.max(true_model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis, format="%.1e" -) -cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) - -plt.show() - -# Plot Recovered Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8]) -mesh.plot_slice( - plotting_map * recovered_model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(recovered_model), np.max(recovered_model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") - -ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) -norm = mpl.colors.Normalize(vmin=np.min(recovered_model), vmax=np.max(recovered_model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis -) -cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) - -plt.show() - -################################################################### -# Plotting Predicted Data and Normalized Misfit -# --------------------------------------------- -# - -# Predicted data with final recovered model -# SimPEG uses right handed coordinate where Z is positive upward. -# This causes gravity signals look "inconsistent" with density values in visualization. -dpred = inv_prob.dpred - -# Observed data | Predicted data | Normalized data misfit -data_array = np.c_[dobs, dpred, (dobs - dpred) / uncertainties] - -fig = plt.figure(figsize=(17, 4)) -plot_title = ["Observed", "Predicted", "Normalized Misfit"] -plot_units = ["mgal", "mgal", ""] - -ax1 = 3 * [None] -ax2 = 3 * [None] -norm = 3 * [None] -cbar = 3 * [None] -cplot = 3 * [None] -v_lim = [np.max(np.abs(dobs)), np.max(np.abs(dobs)), np.max(np.abs(data_array[:, 2]))] - -for ii in range(0, 3): - ax1[ii] = fig.add_axes([0.33 * ii + 0.03, 0.11, 0.23, 0.84]) - cplot[ii] = plot2Ddata( - receiver_list[0].locations, - data_array[:, ii], - ax=ax1[ii], - ncontour=30, - clim=(-v_lim[ii], v_lim[ii]), - contourOpts={"cmap": "bwr"}, - ) - ax1[ii].set_title(plot_title[ii]) - ax1[ii].set_xlabel("x (m)") - ax1[ii].set_ylabel("y (m)") - - ax2[ii] = fig.add_axes([0.33 * ii + 0.25, 0.11, 0.01, 0.85]) - norm[ii] = mpl.colors.Normalize(vmin=-v_lim[ii], vmax=v_lim[ii]) - cbar[ii] = mpl.colorbar.ColorbarBase( - ax2[ii], norm=norm[ii], orientation="vertical", cmap=mpl.cm.bwr - ) - cbar[ii].set_label(plot_units[ii], rotation=270, labelpad=15, size=12) - -plt.show() diff --git a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py index 322afbd08e..9b4808a4cf 100644 --- a/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py +++ b/tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py @@ -2,446 +2,16 @@ Sparse Norm Inversion of Gravity Anomaly Data ============================================= -Here we invert gravity anomaly data to recover a density contrast model. We formulate the inverse problem as an iteratively -re-weighted least-squares (IRLS) optimization problem. For this tutorial, we -focus on the following: +.. important:: - - Defining the survey from xyz formatted data - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, optimization) - - Specifying directives for the inversion - - Setting sparse and blocky norms - - Plotting the recovered model and data misfit - -Although we consider gravity anomaly data in this tutorial, the same approach -can be used to invert gradiometry and other types of geophysical data. + This tutorial has been moved to `User Tutorials + `_. + Checkout the + `Iteratively Re-weighted Least-Squares (IRLS) Inversion on a Tree Mesh + `_ + section in the + `3D Inversion of Gravity Anomaly Data + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TensorMesh -from discretize.utils import active_from_xyz -from simpeg.utils import plot2Ddata, model_builder -from simpeg.potential_fields import gravity -from simpeg import ( - maps, - data, - data_misfit, - inverse_problem, - regularization, - optimization, - directives, - inversion, - utils, -) - -# sphinx_gallery_thumbnail_number = 3 - -############################################# -# Define File Names -# ----------------- -# -# File paths for assets we are loading. To set up the inversion, we require -# topography and field observations. The true model defined on the whole mesh -# is loaded to compare with the inversion result. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "gravity_topo.txt" -data_filename = dir_path + "gravity_data.obs" -model_filename = dir_path + "true_model.txt" - - -############################################# -# Load Data and Plot -# ------------------ -# -# Here we load and plot synthetic gravity anomaly data. Topography is generally -# defined as an (N, 3) array. Gravity data is generally defined with 4 columns: -# x, y, z and data. -# - -# Load topography -xyz_topo = np.loadtxt(str(topo_filename)) - -# Load field data -dobs = np.loadtxt(str(data_filename)) - -# Define receiver locations and observed data -receiver_locations = dobs[:, 0:3] -dobs = dobs[:, -1] - -# Plot -mpl.rcParams.update({"font.size": 12}) -fig = plt.figure(figsize=(7, 5)) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) -plot2Ddata(receiver_locations, dobs, ax=ax1, contourOpts={"cmap": "bwr"}) -ax1.set_title("Gravity Anomaly") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85]) -norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs))) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" -) -cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12) - -plt.show() - -############################################# -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define standard deviation on our data. -# This represents our estimate of the noise in our data. For gravity inversion, -# a constant floor value is generally applied to all data. For this tutorial, -# the standard deviation on each datum will be 1% of the maximum observed -# gravity anomaly value. -# - -maximum_anomaly = np.max(np.abs(dobs)) - -uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs)) - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define survey that will be used for this tutorial. Gravity -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations. From this, the user can -# define the receivers and the source field. -# - -# Define the receivers. The data consist of vertical gravity anomaly measurements. -# The set of receivers must be defined as a list. -receiver_list = gravity.receivers.Point(receiver_locations, components="gz") - -receiver_list = [receiver_list] - -# Define the source field -source_field = gravity.sources.SourceField(receiver_list=receiver_list) - -# Define the survey -survey = gravity.survey.Survey(source_field) - -############################################# -# Defining the Data -# ----------------- -# -# Here is where we define the data that are inverted. The data are defined by -# the survey, the observation values and the standard deviation. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties) - - -############################################# -# Defining a Tensor Mesh -# ---------------------- -# -# Here, we create the tensor mesh that will be used to invert gravity anomaly -# data. If desired, we could define an OcTree mesh. -# - -dh = 5.0 -hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hz = [(dh, 5, -1.3), (dh, 15)] -mesh = TensorMesh([hx, hy, hz], "CCN") - -######################################################## -# Starting/Reference Model and Mapping on Tensor Mesh -# --------------------------------------------------- -# -# Here, we create starting and/or reference models for the inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. -# - -# Find the indices of the active cells in forward model (ones below surface) -ind_active = active_from_xyz(mesh, xyz_topo) - -# Define mapping from model to active cells -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell - -# Define and plot starting model -starting_model = np.zeros(nC) - - -############################################## -# Define the Physics -# ------------------ -# -# Here, we define the physics of the gravity problem by using the simulation -# class. -# -# .. tip:: -# -# Since SimPEG v0.21.0 we can use `Choclo -# `_ as the engine for running the gravity -# simulations, which results in faster and more memory efficient runs. Just -# pass ``engine="choclo"`` when constructing the simulation. -# - -simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - rhoMap=model_map, - active_cells=ind_active, - engine="choclo", -) - - -####################################################################### -# Define the Inverse Problem -# -------------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) -dmis.W = utils.sdiag(1 / uncertainties) - -# Define the regularization (model objective function). -reg = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) -reg.norms = [0, 2, 2, 2] - -# Define how the optimization problem is solved. Here we will use a projected -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG( - maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 -) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define Inversion Directives -# --------------------------- -# -# Here we define any directiveas that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) - -# Defines the directives for the IRLS regularization. This includes setting -# the cooling schedule for the trade-off parameter. -update_IRLS = directives.UpdateIRLS( - f_min_change=1e-4, - max_irls_iterations=30, - irls_cooling_factor=1.5, - misfit_tolerance=1e-2, -) -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Updating the preconditionner if it is model dependent. -update_jacobi = directives.UpdatePreconditioner() - -# Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) - -# The directives are defined as a list. -directives_list = [ - update_IRLS, - sensitivity_weights, - starting_beta, - save_iteration, - update_jacobi, -] - -##################################################################### -# Running the Inversion -# --------------------- -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run inversion -recovered_model = inv.run(starting_model) - - -############################################################ -# Recreate True Model -# ------------------- -# - -# Define density contrast values for each unit in g/cc -background_density = 0.0 -block_density = -0.2 -sphere_density = 0.2 - -# Define model. Models in SimPEG are vector arrays. -true_model = background_density * np.ones(nC) - -# You could find the indicies of specific cells within the model and change their -# value to add structures. -ind_block = ( - (mesh.gridCC[ind_active, 0] > -50.0) - & (mesh.gridCC[ind_active, 0] < -20.0) - & (mesh.gridCC[ind_active, 1] > -15.0) - & (mesh.gridCC[ind_active, 1] < 15.0) - & (mesh.gridCC[ind_active, 2] > -50.0) - & (mesh.gridCC[ind_active, 2] < -30.0) -) -true_model[ind_block] = block_density - -# You can also use SimPEG utilities to add structures to the model more concisely -ind_sphere = model_builder.get_indices_sphere( - np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC -) -ind_sphere = ind_sphere[ind_active] -true_model[ind_sphere] = sphere_density - - -############################################################ -# Plotting True Model and Recovered Model -# --------------------------------------- -# - -# Plot True Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8]) -mesh.plot_slice( - plotting_map * true_model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(true_model), np.max(true_model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") - - -ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) -norm = mpl.colors.Normalize(vmin=np.min(true_model), vmax=np.max(true_model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis, format="%.1e" -) -cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) - -plt.show() - -# Plot Recovered Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8]) -mesh.plot_slice( - plotting_map * recovered_model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(recovered_model), np.max(recovered_model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") - -ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) -norm = mpl.colors.Normalize(vmin=np.min(recovered_model), vmax=np.max(recovered_model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis -) -cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12) - -plt.show() - -################################################################### -# Plotting Predicted Data and Normalized Misfit -# --------------------------------------------- -# - -# Predicted data with final recovered model -# SimPEG uses right handed coordinate where Z is positive upward. -# This causes gravity signals look "inconsistent" with density values in visualization. -dpred = inv_prob.dpred - -# Observed data | Predicted data | Normalized data misfit -data_array = np.c_[dobs, dpred, (dobs - dpred) / uncertainties] - -fig = plt.figure(figsize=(17, 4)) -plot_title = ["Observed", "Predicted", "Normalized Misfit"] -plot_units = ["mgal", "mgal", ""] - -ax1 = 3 * [None] -ax2 = 3 * [None] -norm = 3 * [None] -cbar = 3 * [None] -cplot = 3 * [None] -v_lim = [np.max(np.abs(dobs)), np.max(np.abs(dobs)), np.max(np.abs(data_array[:, 2]))] - -for ii in range(0, 3): - ax1[ii] = fig.add_axes([0.33 * ii + 0.03, 0.11, 0.23, 0.84]) - cplot[ii] = plot2Ddata( - receiver_list[0].locations, - data_array[:, ii], - ax=ax1[ii], - ncontour=30, - clim=(-v_lim[ii], v_lim[ii]), - contourOpts={"cmap": "bwr"}, - ) - ax1[ii].set_title(plot_title[ii]) - ax1[ii].set_xlabel("x (m)") - ax1[ii].set_ylabel("y (m)") - - ax2[ii] = fig.add_axes([0.33 * ii + 0.25, 0.11, 0.01, 0.85]) - norm[ii] = mpl.colors.Normalize(vmin=-v_lim[ii], vmax=v_lim[ii]) - cbar[ii] = mpl.colorbar.ColorbarBase( - ax2[ii], norm=norm[ii], orientation="vertical", cmap=mpl.cm.bwr - ) - cbar[ii].set_label(plot_units[ii], rotation=270, labelpad=15, size=12) - -plt.show() diff --git a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py index 2fc2d9210d..8e61754a98 100644 --- a/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py +++ b/tutorials/03-gravity/plot_inv_1c_gravity_anomaly_irls_compare_weighting.py @@ -2,575 +2,12 @@ Compare weighting strategy with Inversion of surface Gravity Anomaly Data ========================================================================= -Here we invert gravity anomaly data to recover a density contrast model. We formulate the inverse problem as an iteratively -re-weighted least-squares (IRLS) optimization problem. For this tutorial, we -focus on the following: +.. important:: - - Setting regularization weights - - Defining the survey from xyz formatted data - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, optimization) - - Specifying directives for the inversion - - Setting sparse and blocky norms - - Plotting the recovered model and data misfit + This tutorial has been moved to `User Tutorials + `_. -Although we consider gravity anomaly data in this tutorial, the same approach -can be used to invert gradiometry and other types of geophysical data. -""" - -######################################################################### -# Import modules -# -------------- -# - -import os -import tarfile - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np -from discretize import TensorMesh -from discretize.utils import active_from_xyz - -from simpeg import ( - data, - data_misfit, - directives, - inverse_problem, - inversion, - maps, - optimization, - regularization, - utils, -) -from simpeg.potential_fields import gravity -from simpeg.utils import model_builder, plot2Ddata - -# sphinx_gallery_thumbnail_number = 3 - -############################################# -# Define File Names -# ----------------- -# -# File paths for assets we are loading. To set up the inversion, we require -# topography and field observations. The true model defined on the whole mesh -# is loaded to compare with the inversion result. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "gravity_topo.txt" -data_filename = dir_path + "gravity_data.obs" - - -############################################# -# Load Data and Plot -# ------------------ -# -# Here we load and plot synthetic gravity anomaly data. Topography is generally -# defined as an (N, 3) array. Gravity data is generally defined with 4 columns: -# x, y, z and data. -# - -# Load topography -xyz_topo = np.loadtxt(str(topo_filename)) - -# Load field data -dobs = np.loadtxt(str(data_filename)) - -# Define receiver locations and observed data -receiver_locations = dobs[:, 0:3] -dobs = dobs[:, -1] - -# Plot -mpl.rcParams.update({"font.size": 12}) -fig = plt.figure(figsize=(7, 5)) - -ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85]) -plot2Ddata( - receiver_locations, - dobs, - ax=ax1, - contourOpts={"cmap": "bwr"}, - shade=True, - nx=20, - ny=20, - dataloc=True, -) -ax1.set_title("Gravity Anomaly") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85]) -norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs))) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e" -) -cbar.set_label("$mGal$", rotation=270, labelpad=15, size=12) - -plt.show() - -############################################# -# Assign Uncertainties -# -------------------- -# -# Inversion with simpeg requires that we define the standard deviation of our data. -# This represents our estimate of the noise in our data. For a gravity inversion, -# a constant floor value is generally applied to all data. For this tutorial, -# the standard deviation on each datum will be 1% of the maximum observed -# gravity anomaly value. -# - -maximum_anomaly = np.max(np.abs(dobs)) - -uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs)) - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define the survey that will be used for this tutorial. Gravity -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations. From this, the user can -# define the receivers and the source field. -# - -# Define the receivers. The data consists of vertical gravity anomaly measurements. -# The set of receivers must be defined as a list. -receiver_list = gravity.receivers.Point(receiver_locations, components="gz") - -receiver_list = [receiver_list] - -# Define the source field -source_field = gravity.sources.SourceField(receiver_list=receiver_list) - -# Define the survey -survey = gravity.survey.Survey(source_field) - -############################################# -# Defining the Data -# ----------------- -# -# Here is where we define the data that is inverted. The data is defined by -# the survey, the observation values and the standard deviation. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties) - - -############################################# -# Defining a Tensor Mesh -# ---------------------- -# -# Here, we create the tensor mesh that will be used to invert gravity anomaly -# data. If desired, we could define an OcTree mesh. -# - -dh = 5.0 -hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hz = [(dh, 5, -1.3), (dh, 15)] -mesh = TensorMesh([hx, hy, hz], "CCN") - -######################################################## -# Starting/Reference Model and Mapping on Tensor Mesh -# --------------------------------------------------- -# -# Here, we create starting and/or reference models for the inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. -# - -# Find the indices of the active cells in forward model (ones below surface) -ind_active = active_from_xyz(mesh, xyz_topo) - -# Define mapping from model to active cells -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell - -# Define and plot starting model -starting_model = np.zeros(nC) - - -############################################## -# Define the Physics and data misfit -# ---------------------------------- -# -# Here, we define the physics of the gravity problem by using the simulation -# class. -# - -simulation = gravity.simulation.Simulation3DIntegral( - survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active -) - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) - - -####################################################################### -# Running the Depth Weighted inversion -# ------------------------------------ -# -# Here we define the directives, weights, regularization, and optimization -# for a depth-weighted inversion -# - -# inversion directives -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) - -# Defines the directives for the IRLS regularization. This includes setting -# the cooling schedule for the trade-off parameter. -update_IRLS = directives.Update_IRLS( - f_min_change=1e-4, - max_irls_iterations=30, - coolEpsFact=1.5, - beta_tol=1e-2, -) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Updating the preconditionner if it is model dependent. -update_jacobi = directives.UpdatePreconditioner() - -# The directives are defined as a list -directives_list = [ - update_IRLS, - starting_beta, - save_iteration, - update_jacobi, -] + Checkout the `Compare weighting strategy with Inversion of surface Gravity Anomaly + Data `_ tutorial. -# Define the regularization (model objective function) with depth weighting. -reg_dpth = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) -reg_dpth.norms = [0, 2, 2, 2] -depth_weights = utils.depth_weighting( - mesh, receiver_locations, active_cells=ind_active, exponent=2 -) -reg_dpth.set_weights(depth_weights=depth_weights) - -# Define how the optimization problem is solved. Here we will use a projected -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG( - maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 -) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg_dpth, opt) - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run inversion -recovered_model_dpth = inv.run(starting_model) - -####################################################################### -# Running the Distance Weighted inversion -# --------------------------------------- -# -# Here we define the directives, weights, regularization, and optimization -# for a distance-weighted inversion -# - -# inversion directives -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) - -# Defines the directives for the IRLS regularization. This includes setting -# the cooling schedule for the trade-off parameter. -update_IRLS = directives.Update_IRLS( - f_min_change=1e-4, - max_irls_iterations=30, - coolEpsFact=1.5, - beta_tol=1e-2, -) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Updating the preconditionner if it is model dependent. -update_jacobi = directives.UpdatePreconditioner() - -# The directives are defined as a list -directives_list = [ - update_IRLS, - starting_beta, - save_iteration, - update_jacobi, -] - -# Define the regularization (model objective function) with distance weighting. -reg_dist = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) -reg_dist.norms = [0, 2, 2, 2] -distance_weights = utils.distance_weighting( - mesh, receiver_locations, active_cells=ind_active, exponent=2 -) -reg_dist.set_weights(distance_weights=distance_weights) - -# Define how the optimization problem is solved. Here we will use a projected -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG( - maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 -) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg_dist, opt) - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run inversion -recovered_model_dist = inv.run(starting_model) - -####################################################################### -# Running the Distance Weighted inversion -# --------------------------------------- -# -# Here we define the directives, weights, regularization, and optimization -# for a sensitivity weighted inversion -# - -# inversion directives -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) - -# Defines the directives for the IRLS regularization. This includes setting -# the cooling schedule for the trade-off parameter. -update_IRLS = directives.Update_IRLS( - f_min_change=1e-4, - max_irls_iterations=30, - coolEpsFact=1.5, - beta_tol=1e-2, -) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Updating the preconditionner if it is model dependent. -update_jacobi = directives.UpdatePreconditioner() - -# Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) - -# The directives are defined as a list -directives_list = [ - update_IRLS, - sensitivity_weights, - starting_beta, - save_iteration, - update_jacobi, -] - -# Define the regularization (model objective function) for sensitivity weighting. -reg_sensw = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map) -reg_sensw.norms = [0, 2, 2, 2] - -# Define how the optimization problem is solved. Here we will use a projected -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG( - maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 -) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg_sensw, opt) - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run inversion -recovered_model_sensw = inv.run(starting_model) - -############################################################ -# Recreate True Model -# ------------------- -# - -# Define density contrast values for each unit in g/cc -background_density = 0.0 -block_density = -0.2 -sphere_density = 0.2 - -# Define model. Models in simpeg are vector arrays. -true_model = background_density * np.ones(nC) - -# You could find the indicies of specific cells within the model and change their -# value to add structures. -ind_block = ( - (mesh.gridCC[ind_active, 0] > -50.0) - & (mesh.gridCC[ind_active, 0] < -20.0) - & (mesh.gridCC[ind_active, 1] > -15.0) - & (mesh.gridCC[ind_active, 1] < 15.0) - & (mesh.gridCC[ind_active, 2] > -50.0) - & (mesh.gridCC[ind_active, 2] < -30.0) -) -true_model[ind_block] = block_density - -# You can also use simpeg utilities to add structures to the model more concisely -ind_sphere = model_builder.get_indices_sphere( - np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC -) -ind_sphere = ind_sphere[ind_active] -true_model[ind_sphere] = sphere_density - - -############################################################ -# Plotting True Model and Recovered Models -# ---------------------------------------- -# - -# Plot Models -fig, ax = plt.subplots(2, 2, figsize=(20, 10), sharex=True, sharey=True) -ax = ax.flatten() -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) -cmap = "coolwarm" -slice_y_loc = 0.0 - -mm = mesh.plot_slice( - plotting_map * true_model, - normal="Y", - ax=ax[0], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap, "norm": norm}, -) -ax[0].set_title(f"True model slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[0]) - -# plot depth weighting result -vmax = np.abs(recovered_model_dpth).max() -norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) -mm = mesh.plot_slice( - plotting_map * recovered_model_dpth, - normal="Y", - ax=ax[1], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap, "norm": norm}, -) -ax[1].set_title(f"Depth weighting Model slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[1]) - -# plot distance weighting result -vmax = np.abs(recovered_model_dist).max() -norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) -mm = mesh.plot_slice( - plotting_map * recovered_model_dist, - normal="Y", - ax=ax[2], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap, "norm": norm}, -) -ax[2].set_title(f"Distance weighting Model slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[2]) - -# plot sensitivity weighting result -vmax = np.abs(recovered_model_sensw).max() -norm = mpl.colors.TwoSlopeNorm(vcenter=0, vmin=-vmax, vmax=vmax) -mm = mesh.plot_slice( - plotting_map * recovered_model_sensw, - normal="Y", - ax=ax[3], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap, "norm": norm}, -) -ax[3].set_title(f"Sensitivity weighting Model slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="$g/cm^3$", ax=ax[3]) - -# shared plotting -plotting_map = maps.InjectActiveCells(mesh, ind_active, 0.0) -slice_y_ind = ( - mesh.cell_centers[:, 1] == np.abs(mesh.cell_centers[:, 1] - slice_y_loc).min() -) -for axx in ax: - utils.plot2Ddata( - mesh.cell_centers[slice_y_ind][:, [0, 2]], - (plotting_map * true_model)[slice_y_ind], - contourOpts={"alpha": 0}, - level=True, - ncontour=2, - levelOpts={"colors": "grey", "linewidths": 2, "linestyles": "--"}, - method="nearest", - ax=axx, - ) - axx.set_aspect(1) - -plt.tight_layout() - -############################################################ -# Visualize weights -# ----------------- -# -# Plot Weights -fig, ax = plt.subplots(1, 3, figsize=(20, 4), sharex=True, sharey=True) -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) -cmap = "magma" -slice_y_loc = 0.0 - -# plot depth weights -mm = mesh.plot_slice( - plotting_map * np.log10(depth_weights), - normal="Y", - ax=ax[0], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap}, -) -ax[0].set_title(f"log10(depth weights) slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="log10(depth weights)", ax=ax[0]) - -# plot distance weights -mm = mesh.plot_slice( - plotting_map * np.log10(distance_weights), - normal="Y", - ax=ax[1], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap}, -) -ax[1].set_title(f"log10(distance weights) slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="log10(distance weights)", ax=ax[1]) - -# plot sensitivity weights -mm = mesh.plot_slice( - plotting_map * np.log10(reg_sensw.objfcts[0].get_weights(key="sensitivity")), - normal="Y", - ax=ax[2], - grid=False, - slice_loc=slice_y_loc, - pcolor_opts={"cmap": cmap}, -) -ax[2].set_title(f"log10(sensitivity weights) slice at y = {slice_y_loc} m") -plt.colorbar(mm[0], label="log10(sensitivity weights)", ax=ax[2]) - -# shared plotting -for axx in ax: - axx.set_aspect(1) - -plt.tight_layout() +""" diff --git a/tutorials/04-magnetics/plot_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_2a_magnetics_induced.py index 9f3ece6ae3..ead80fc2a9 100644 --- a/tutorials/04-magnetics/plot_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_2a_magnetics_induced.py @@ -2,248 +2,12 @@ Forward Simulation of Total Magnetic Intensity Data =================================================== -Here we use the module *simpeg.potential_fields.magnetics* to predict magnetic -data for a magnetic susceptibility model. We simulate the data on a tensor mesh. -For this tutorial, we focus on the following: +.. important:: - - How to define the survey - - How to predict magnetic data for a susceptibility model - - How to include surface topography - - The units of the physical property model and resulting data + This tutorial has been moved to `User Tutorials + `_. + Checkout the `3D Forward Simulation of TMI Data + `_ tutorial. """ - -######################################################################### -# Import Modules -# -------------- -# - -import numpy as np -from scipy.interpolate import LinearNDInterpolator -import matplotlib as mpl -import matplotlib.pyplot as plt -import os - -from discretize import TensorMesh -from discretize.utils import mkvc, active_from_xyz -from simpeg.utils import plot2Ddata, model_builder -from simpeg import maps -from simpeg.potential_fields import magnetics - -write_output = False - -# sphinx_gallery_thumbnail_number = 2 - - -############################################# -# Topography -# ---------- -# -# Surface topography is defined as an (N, 3) numpy array. We create it here but -# topography could also be loaded from a file. -# - -[x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41)) -z_topo = -15 * np.exp(-(x_topo**2 + y_topo**2) / 80**2) -x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) -xyz_topo = np.c_[x_topo, y_topo, z_topo] - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define survey that will be used for the simulation. Magnetic -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations, the list of field components -# which are to be modeled and the properties of the Earth's field. -# - -# Define the observation locations as an (N, 3) numpy array or load them. -x = np.linspace(-80.0, 80.0, 17) -y = np.linspace(-80.0, 80.0, 17) -x, y = np.meshgrid(x, y) -x, y = mkvc(x.T), mkvc(y.T) -fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo) -z = fun_interp(np.c_[x, y]) + 10 # Flight height 10 m above surface. -receiver_locations = np.c_[x, y, z] - -# Define the component(s) of the field we want to simulate as a list of strings. -# Here we simulation total magnetic intensity data. -components = ["tmi"] - -# Use the observation locations and components to define the receivers. To -# simulate data, the receivers must be defined as a list. -receiver_list = magnetics.receivers.Point(receiver_locations, components=components) - -receiver_list = [receiver_list] - -# Define the inducing field H0 = (intensity [nT], inclination [deg], declination [deg]) -inclination = 90 -declination = 0 -strength = 50000 - -source_field = magnetics.sources.UniformBackgroundField( - receiver_list=receiver_list, - amplitude=strength, - inclination=inclination, - declination=declination, -) - -# Define the survey -survey = magnetics.survey.Survey(source_field) - - -############################################# -# Defining a Tensor Mesh -# ---------------------- -# -# Here, we create the tensor mesh that will be used for the forward simulation. -# - -dh = 5.0 -hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hz = [(dh, 5, -1.3), (dh, 15)] -mesh = TensorMesh([hx, hy, hz], "CCN") - - -############################################# -# Defining a Susceptibility Model -# ------------------------------- -# -# Here, we create the model that will be used to predict magnetic data -# and the mapping from the model to the mesh. The model -# consists of a susceptible sphere in a less susceptible host. -# - -# Define susceptibility values for each unit in SI -background_susceptibility = 0.0001 -sphere_susceptibility = 0.01 - -# Find cells that are active in the forward modeling (cells below surface) -ind_active = active_from_xyz(mesh, xyz_topo) - -# Define mapping from model to active cells -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=nC) # model is a vlue for each active cell - -# Define model. Models in SimPEG are vector arrays -model = background_susceptibility * np.ones(ind_active.sum()) -ind_sphere = model_builder.get_indices_sphere( - np.r_[0.0, 0.0, -45.0], 15.0, mesh.cell_centers -) -ind_sphere = ind_sphere[ind_active] -model[ind_sphere] = sphere_susceptibility - -# Plot Model -fig = plt.figure(figsize=(9, 4)) - -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) -ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78]) -mesh.plot_slice( - plotting_map * model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(model), np.max(model)), -) -ax1.set_title("Model slice at y = 0 m") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) -norm = mpl.colors.Normalize(vmin=np.min(model), vmax=np.max(model)) -cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical") -cbar.set_label("Magnetic Susceptibility (SI)", rotation=270, labelpad=15, size=12) - -plt.show() - - -################################################################### -# Simulation: TMI Data for a Susceptibility Model -# ----------------------------------------------- -# -# Here we demonstrate how to predict magnetic data for a magnetic -# susceptibility model using the integral formulation. -# - -############################################################################### -# Define the forward simulation. By setting the 'store_sensitivities' keyword -# argument to "forward_only", we simulate the data without storing the sensitivities - -simulation = magnetics.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - model_type="scalar", - chiMap=model_map, - active_cells=ind_active, - store_sensitivities="forward_only", - engine="choclo", -) - -############################################################################### -# .. tip:: -# -# Since SimPEG v0.22.0 we can use `Choclo -# `_ as the engine for running the magnetic -# simulations, which results in faster and more memory efficient runs. Just -# pass ``engine="choclo"`` when constructing the simulation. -# - -############################################################################### -# Compute predicted data for a susceptibility model - -dpred = simulation.dpred(model) - -# Plot -fig = plt.figure(figsize=(6, 5)) -v_max = np.max(np.abs(dpred)) - -ax1 = fig.add_axes([0.1, 0.1, 0.8, 0.85]) -plot2Ddata( - receiver_list[0].locations, - dpred, - ax=ax1, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -ax1.set_title("TMI Anomaly") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.87, 0.1, 0.03, 0.85]) -norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dpred)), vmax=np.max(np.abs(dpred))) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr -) -cbar.set_label("$nT$", rotation=270, labelpad=15, size=12) - -plt.show() - - -####################################################### -# Optional: Export Data -# --------------------- -# -# Write the data and topography -# - -if write_output: - dir_path = os.path.dirname(__file__).split(os.path.sep) - dir_path.extend(["outputs"]) - dir_path = os.path.sep.join(dir_path) + os.path.sep - - if not os.path.exists(dir_path): - os.mkdir(dir_path) - - fname = dir_path + "magnetics_topo.txt" - np.savetxt(fname, np.c_[xyz_topo], fmt="%.4e") - - np.random.seed(211) - maximum_anomaly = np.max(np.abs(dpred)) - noise = 0.02 * maximum_anomaly * np.random.randn(len(dpred)) - fname = dir_path + "magnetics_data.obs" - np.savetxt(fname, np.c_[receiver_locations, dpred + noise], fmt="%.4e") diff --git a/tutorials/04-magnetics/plot_2b_magnetics_mvi.py b/tutorials/04-magnetics/plot_2b_magnetics_mvi.py index 51c5610f5b..95cbad69b4 100644 --- a/tutorials/04-magnetics/plot_2b_magnetics_mvi.py +++ b/tutorials/04-magnetics/plot_2b_magnetics_mvi.py @@ -2,299 +2,12 @@ Forward Simulation of Gradiometry Data for Magnetic Vector Models ================================================================= -Here we use the module *simpeg.potential_fields.magnetics* to predict magnetic -gradiometry data for magnetic vector models. The simulation is performed on a -Tree mesh. For this tutorial, we focus on the following: +.. important:: - - How to define the survey when we want to measured multiple field components - - How to predict magnetic data in the case of remanence - - How to include surface topography - - How to construct tree meshes based on topography and survey geometry - - The units of the physical property model and resulting data + This tutorial has been moved to `User Tutorials + `_. + Checkout the `3D Forward Simulation of Magnetic Gradiometry Data for Magnetic Vector + Models `_ tutorial. """ - -######################################################################### -# Import Modules -# -------------- -# - -import numpy as np -from scipy.interpolate import LinearNDInterpolator -import matplotlib as mpl -import matplotlib.pyplot as plt - -from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz -from simpeg.utils import plot2Ddata, model_builder, mat_utils -from simpeg import maps -from simpeg.potential_fields import magnetics - -# sphinx_gallery_thumbnail_number = 2 - - -############################################# -# Topography -# ---------- -# -# Here we define surface topography as an (N, 3) numpy array. Topography could -# also be loaded from a file. -# - -[x_topo, y_topo] = np.meshgrid(np.linspace(-200, 200, 41), np.linspace(-200, 200, 41)) -z_topo = -15 * np.exp(-(x_topo**2 + y_topo**2) / 80**2) -x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) -xyz_topo = np.c_[x_topo, y_topo, z_topo] - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define survey that will be used for the simulation. Magnetic -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations, the list of field components -# which are to be modeled and the properties of the Earth's field. -# - -# Define the observation locations as an (N, 3) numpy array or load them. -x = np.linspace(-80.0, 80.0, 17) -y = np.linspace(-80.0, 80.0, 17) -x, y = np.meshgrid(x, y) -x, y = mkvc(x.T), mkvc(y.T) -fun_interp = LinearNDInterpolator(np.c_[x_topo, y_topo], z_topo) -z = fun_interp(np.c_[x, y]) + 10 # Flight height 10 m above surface. -receiver_locations = np.c_[x, y, z] - -# Define the component(s) of the field we want to simulate as strings within -# a list. Here we measure the x, y and z derivatives of the Bz anomaly at -# each observation location. -components = ["bxz", "byz", "bzz"] - -# Use the observation locations and components to define the receivers. To -# simulate data, the receivers must be defined as a list. -receiver_list = magnetics.receivers.Point(receiver_locations, components=components) - -receiver_list = [receiver_list] - -# Define the inducing field H0 = (intensity [nT], inclination [deg], declination [deg]) -field_inclination = 60 -field_declination = 30 -field_strength = 50000 - -source_field = magnetics.sources.UniformBackgroundField( - receiver_list=receiver_list, - amplitude=field_strength, - inclination=field_inclination, - declination=field_declination, -) - -# Define the survey -survey = magnetics.survey.Survey(source_field) - - -########################################################## -# Defining an OcTree Mesh -# ----------------------- -# -# Here, we create the OcTree mesh that will be used to predict magnetic -# gradiometry data for the forward simuulation. -# - -dx = 5 # minimum cell width (base mesh cell width) in x -dy = 5 # minimum cell width (base mesh cell width) in y -dz = 5 # minimum cell width (base mesh cell width) in z - -x_length = 240.0 # domain width in x -y_length = 240.0 # domain width in y -z_length = 120.0 # domain width in y - -# Compute number of base mesh cells required in x and y -nbcx = 2 ** int(np.round(np.log(x_length / dx) / np.log(2.0))) -nbcy = 2 ** int(np.round(np.log(y_length / dy) / np.log(2.0))) -nbcz = 2 ** int(np.round(np.log(z_length / dz) / np.log(2.0))) - -# Define the base mesh -hx = [(dx, nbcx)] -hy = [(dy, nbcy)] -hz = [(dz, nbcz)] -mesh = TreeMesh([hx, hy, hz], x0="CCN") - -# Refine based on surface topography -mesh = refine_tree_xyz( - mesh, xyz_topo, octree_levels=[2, 2], method="surface", finalize=False -) - -# Refine box base on region of interest -xp, yp, zp = np.meshgrid([-100.0, 100.0], [-100.0, 100.0], [-80.0, 0.0]) -xyz = np.c_[mkvc(xp), mkvc(yp), mkvc(zp)] - -mesh = refine_tree_xyz(mesh, xyz, octree_levels=[2, 2], method="box", finalize=False) - -mesh.finalize() - -########################################################## -# Create Magnetic Vector Intensity Model (MVI) -# -------------------------------------------- -# -# Magnetic vector models are defined by three-component effective -# susceptibilities. To create a magnetic vector -# model, we must -# -# 1) Define the magnetic susceptibility for each cell. Then multiply by the -# unit vector direction of the inducing field. (induced contribution) -# 2) Define the remanent magnetization vector for each cell and normalize -# by the magnitude of the Earth's field (remanent contribution) -# 3) Sum the induced and remanent contributions -# 4) Define as a vector np.r_[chi_1, chi_2, chi_3] -# -# - -# Define susceptibility values for each unit in SI -background_susceptibility = 0.0001 -sphere_susceptibility = 0.01 - -# Find cells active in the forward modeling (cells below surface) -ind_active = active_from_xyz(mesh, xyz_topo) - -# Define mapping from model to active cells -nC = int(ind_active.sum()) -model_map = maps.IdentityMap(nP=3 * nC) # model has 3 parameters for each cell - -# Define susceptibility for each cell -susceptibility_model = background_susceptibility * np.ones(ind_active.sum()) -ind_sphere = model_builder.get_indices_sphere(np.r_[0.0, 0.0, -45.0], 15.0, mesh.gridCC) -ind_sphere = ind_sphere[ind_active] -susceptibility_model[ind_sphere] = sphere_susceptibility - -# Compute the unit direction of the inducing field in Cartesian coordinates -field_direction = mat_utils.dip_azimuth2cartesian(field_inclination, field_declination) - -# Multiply susceptibility model to obtain the x, y, z components of the -# effective susceptibility contribution from induced magnetization. -susceptibility_model = np.outer(susceptibility_model, field_direction) - -# Define the effective susceptibility contribution for remanent magnetization to have a -# magnitude of 0.006 SI, with inclination -45 and declination 90 -remanence_inclination = -45.0 -remanence_declination = 90.0 -remanence_susceptibility = 0.01 - -remanence_model = np.zeros(np.shape(susceptibility_model)) -effective_susceptibility_sphere = ( - remanence_susceptibility - * mat_utils.dip_azimuth2cartesian(remanence_inclination, remanence_declination) -) -remanence_model[ind_sphere, :] = effective_susceptibility_sphere - -# Define effective susceptibility model as a vector np.r_[chi_x, chi_y, chi_z] -plotting_model = susceptibility_model + remanence_model -model = mkvc(plotting_model) - -# Plot Effective Susceptibility Model -fig = plt.figure(figsize=(9, 4)) - -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) -plotting_model = np.sqrt(np.sum(plotting_model, axis=1) ** 2) -ax1 = fig.add_axes([0.1, 0.12, 0.73, 0.78]) -mesh.plot_slice( - plotting_map * plotting_model, - normal="Y", - ax=ax1, - ind=int(mesh.h[1].size / 2), - grid=True, - clim=(np.min(plotting_model), np.max(plotting_model)), -) -ax1.set_title("MVI Model at y = 0 m") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.85, 0.12, 0.05, 0.78]) -norm = mpl.colors.Normalize(vmin=np.min(plotting_model), vmax=np.max(plotting_model)) -cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical") -cbar.set_label( - "Effective Susceptibility Amplitude (SI)", rotation=270, labelpad=15, size=12 -) - - -################################################################### -# Simulation: Gradiometry Data for an MVI Model -# --------------------------------------------- -# -# Here we predict magnetic gradiometry data for an effective susceptibility model -# in the case of remanent magnetization. -# - -############################################################################### -# Define the forward simulation. By setting the 'store_sensitivities' keyword -# argument to "forward_only", we simulate the data without storing the sensitivities - -simulation = magnetics.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - chiMap=model_map, - active_cells=ind_active, - model_type="vector", - store_sensitivities="forward_only", -) - - -############################################################################### -# Compute predicted data for some model - -dpred = simulation.dpred(model) -n_data = len(dpred) - -# Plot -fig = plt.figure(figsize=(13, 4)) -v_max = np.max(np.abs(dpred)) - -ax1 = fig.add_axes([0.1, 0.15, 0.25, 0.78]) -plot2Ddata( - receiver_list[0].locations, - dpred[0:n_data:3], - ax=ax1, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -ax1.set_title("$dBz/dx$") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.36, 0.15, 0.25, 0.78]) -cplot2 = plot2Ddata( - receiver_list[0].locations, - dpred[1:n_data:3], - ax=ax2, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -cplot2[0].set_clim((-v_max, v_max)) -ax2.set_title("$dBz/dy$") -ax2.set_xlabel("x (m)") -ax2.set_yticks([]) - -ax3 = fig.add_axes([0.62, 0.15, 0.25, 0.78]) -cplot3 = plot2Ddata( - receiver_list[0].locations, - dpred[2:n_data:3], - ax=ax3, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -cplot3[0].set_clim((-v_max, v_max)) -ax3.set_title("$dBz/dz$") -ax3.set_xlabel("x (m)") -ax3.set_yticks([]) - -ax4 = fig.add_axes([0.88, 0.15, 0.02, 0.79]) -norm = mpl.colors.Normalize(vmin=-v_max, vmax=v_max) -cbar = mpl.colorbar.ColorbarBase( - ax4, norm=norm, orientation="vertical", cmap=mpl.cm.bwr -) -cbar.set_label("$nT/m$", rotation=270, labelpad=15, size=12) - -plt.show() diff --git a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py index 61eeae9497..403f9c9598 100644 --- a/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py +++ b/tutorials/04-magnetics/plot_inv_2a_magnetics_induced.py @@ -2,466 +2,14 @@ Sparse Norm Inversion for Total Magnetic Intensity Data on a Tensor Mesh ======================================================================== -Here we invert total magnetic intensity (TMI) data to recover a magnetic -susceptibility model. We formulate the inverse problem as an iteratively -re-weighted least-squares (IRLS) optimization problem. For this tutorial, we -focus on the following: +.. important:: - - Defining the survey from xyz formatted data - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, optimization) - - Specifying directives for the inversion - - Setting sparse and blocky norms - - Plotting the recovered model and data misfit - -Although we consider TMI data in this tutorial, the same approach -can be used to invert other types of geophysical data. + This tutorial has been moved to `User Tutorials + `_. + Checkout the `Iteratively Re-weighted Least-Squares Inversion + `_ + section of the `3D Inversion of TMI Data to Recover a Susceptibility Model Models + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TensorMesh -from discretize.utils import active_from_xyz -from simpeg.potential_fields import magnetics -from simpeg.utils import plot2Ddata, model_builder -from simpeg import ( - maps, - data, - inverse_problem, - data_misfit, - regularization, - optimization, - directives, - inversion, - utils, -) - -# sphinx_gallery_thumbnail_number = 3 - -############################################# -# Load Data and Plot -# ------------------ -# -# File paths for assets we are loading. To set up the inversion, we require -# topography and field observations. The true model defined on the whole mesh -# is loaded to compare with the inversion result. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/magnetics.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/magnetics.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "magnetics_topo.txt" -data_filename = dir_path + "magnetics_data.obs" - - -############################################# -# Load Data and Plot -# ------------------ -# -# Here we load and plot synthetic TMI data. Topography is generally -# defined as an (N, 3) array. TMI data is generally defined with 4 columns: -# x, y, z and data. -# - -topo_xyz = np.loadtxt(str(topo_filename)) -dobs = np.loadtxt(str(data_filename)) - -receiver_locations = dobs[:, 0:3] -dobs = dobs[:, -1] - -# Plot -fig = plt.figure(figsize=(6, 5)) -v_max = np.max(np.abs(dobs)) - -ax1 = fig.add_axes([0.1, 0.1, 0.75, 0.85]) -plot2Ddata( - receiver_locations, - dobs, - ax=ax1, - ncontour=30, - clim=(-v_max, v_max), - contourOpts={"cmap": "bwr"}, -) -ax1.set_title("TMI Anomaly") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("y (m)") - -ax2 = fig.add_axes([0.85, 0.05, 0.05, 0.9]) -norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs))) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr -) -cbar.set_label("$nT$", rotation=270, labelpad=15, size=12) - -plt.show() - -############################################# -# Assign Uncertainty -# ------------------ -# -# Inversion with SimPEG requires that we define standard deviation on our data. -# This represents our estimate of the noise in our data. For magnetic inversions, -# a constant floor value is generall applied to all data. For this tutorial, the -# standard deviation on each datum will be 2% of the maximum observed magnetics -# anomaly value. -# - -maximum_anomaly = np.max(np.abs(dobs)) - -std = 0.02 * maximum_anomaly * np.ones(len(dobs)) - -############################################# -# Defining the Survey -# ------------------- -# -# Here, we define survey that will be used for the simulation. Magnetic -# surveys are simple to create. The user only needs an (N, 3) array to define -# the xyz locations of the observation locations, the list of field components -# which are to be modeled and the properties of the Earth's field. -# - -# Define the component(s) of the field we are inverting as a list. Here we will -# invert total magnetic intensity data. -components = ["tmi"] - -# Use the observation locations and components to define the receivers. To -# simulate data, the receivers must be defined as a list. -receiver_list = magnetics.receivers.Point(receiver_locations, components=components) - -receiver_list = [receiver_list] - -# Define the inducing field H0 = (intensity [nT], inclination [deg], declination [deg]) -inclination = 90 -declination = 0 -strength = 50000 - -source_field = magnetics.sources.UniformBackgroundField( - receiver_list=receiver_list, - amplitude=strength, - inclination=inclination, - declination=declination, -) - -# Define the survey -survey = magnetics.survey.Survey(source_field) - -############################################# -# Defining the Data -# ----------------- -# -# Here is where we define the data that is inverted. The data is defined by -# the survey, the observation values and the standard deviations. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=std) - - -############################################# -# Defining a Tensor Mesh -# ---------------------- -# -# Here, we create the tensor mesh that will be used to invert TMI data. -# If desired, we could define an OcTree mesh. -# - -dh = 5.0 -hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)] -hz = [(dh, 5, -1.3), (dh, 15)] -mesh = TensorMesh([hx, hy, hz], "CCN") - -######################################################## -# Starting/Reference Model and Mapping on Tensor Mesh -# --------------------------------------------------- -# -# Here, we would create starting and/or reference models for the inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. Here, the background is 1e-4 SI. -# - -# Define background susceptibility model in SI. Don't make this 0! -# Otherwise the gradient for the 1st iteration is zero and the inversion will -# not converge. -background_susceptibility = 1e-4 - -# Find the indecies of the active cells in forward model (ones below surface) -active_cells = active_from_xyz(mesh, topo_xyz) - -# Define mapping from model to active cells -nC = int(active_cells.sum()) -model_map = maps.IdentityMap(nP=nC) # model consists of a value for each cell - -# Define starting model -starting_model = background_susceptibility * np.ones(nC) - -############################################## -# Define the Physics -# ------------------ -# -# Here, we define the physics of the magnetics problem by using the simulation -# class. -# - -############################################################################### -# Define the problem. Define the cells below topography and the mapping - -simulation = magnetics.simulation.Simulation3DIntegral( - survey=survey, - mesh=mesh, - model_type="scalar", - chiMap=model_map, - active_cells=active_cells, - engine="choclo", -) - -############################################################################### -# .. tip:: -# -# Since SimPEG v0.22.0 we can use `Choclo -# `_ as the engine for running the magnetic -# simulations, which results in faster and more memory efficient runs. Just -# pass ``engine="choclo"`` when constructing the simulation. -# - - -####################################################################### -# Define Inverse Problem -# ---------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation) - -# Define the regularization (model objective function) -reg = regularization.Sparse( - mesh, - active_cells=active_cells, - mapping=model_map, - reference_model=starting_model, - gradient_type="total", -) - -# Define sparse and blocky norms p, qx, qy, qz -reg.norms = [0, 0, 0, 0] - -# Define how the optimization problem is solved. Here we will use a projected -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG( - maxIter=20, lower=0.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3 -) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define Inversion Directives -# --------------------------- -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=5) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Defines the directives for the IRLS regularization. This includes setting -# the cooling schedule for the trade-off parameter. -update_IRLS = directives.UpdateIRLS( - f_min_change=1e-4, - max_irls_iterations=30, - cooling_factor=1.5, - misfit_tolerance=1e-2, -) - -# Updating the preconditioner if it is model dependent. -update_jacobi = directives.UpdatePreconditioner() - -# Setting a stopping criteria for the inversion. -target_misfit = directives.TargetMisfit(chifact=1) - -# Add sensitivity weights -sensitivity_weights = directives.UpdateSensitivityWeights(every_iteration=False) - -# The directives are defined as a list. -directives_list = [ - sensitivity_weights, - starting_beta, - save_iteration, - update_IRLS, - update_jacobi, -] - -##################################################################### -# Running the Inversion -# --------------------- -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Print target misfit to compare with convergence -# print("Target misfit is " + str(target_misfit.target)) - -# Run the inversion -recovered_model = inv.run(starting_model) - -############################################################## -# Recreate True Model -# ------------------- -# - - -background_susceptibility = 0.0001 -sphere_susceptibility = 0.01 - -true_model = background_susceptibility * np.ones(nC) -ind_sphere = model_builder.get_indices_sphere( - np.r_[0.0, 0.0, -45.0], 15.0, mesh.cell_centers -) -ind_sphere = ind_sphere[active_cells] -true_model[ind_sphere] = sphere_susceptibility - - -############################################################ -# Plotting True Model and Recovered Model -# --------------------------------------- -# - -# Plot True Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, active_cells, np.nan) - -ax1 = fig.add_axes([0.08, 0.1, 0.75, 0.8]) -mesh.plot_slice( - plotting_map * true_model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(true_model), np.max(true_model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") - -ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) -norm = mpl.colors.Normalize(vmin=np.min(true_model), vmax=np.max(true_model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis, format="%.1e" -) -cbar.set_label("SI", rotation=270, labelpad=15, size=12) - -plt.show() - -# Plot Recovered Model -fig = plt.figure(figsize=(9, 4)) -plotting_map = maps.InjectActiveCells(mesh, active_cells, np.nan) - -ax1 = fig.add_axes([0.08, 0.1, 0.75, 0.8]) -mesh.plot_slice( - plotting_map * recovered_model, - normal="Y", - ax=ax1, - ind=int(mesh.shape_cells[1] / 2), - grid=True, - clim=(np.min(recovered_model), np.max(recovered_model)), - pcolor_opts={"cmap": "viridis"}, -) -ax1.set_title("Model slice at y = 0 m") - -ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8]) -norm = mpl.colors.Normalize(vmin=np.min(recovered_model), vmax=np.max(recovered_model)) -cbar = mpl.colorbar.ColorbarBase( - ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis, format="%.1e" -) -cbar.set_label("SI", rotation=270, labelpad=15, size=12) - -plt.show() - -################################################################### -# Plotting Predicted Data and Misfit -# ---------------------------------- -# - -# Predicted data with final recovered model -dpred = inv_prob.dpred - -# Observed data | Predicted data | Normalized data misfit -data_array = np.c_[dobs, dpred, (dobs - dpred) / std] - -fig = plt.figure(figsize=(17, 4)) -plot_title = ["Observed", "Predicted", "Normalized Misfit"] -plot_units = ["nT", "nT", ""] - -ax1 = 3 * [None] -ax2 = 3 * [None] -norm = 3 * [None] -cbar = 3 * [None] -cplot = 3 * [None] -v_lim = [np.max(np.abs(dobs)), np.max(np.abs(dobs)), np.max(np.abs(data_array[:, 2]))] - -for ii in range(0, 3): - ax1[ii] = fig.add_axes([0.33 * ii + 0.03, 0.11, 0.25, 0.84]) - cplot[ii] = plot2Ddata( - receiver_list[0].locations, - data_array[:, ii], - ax=ax1[ii], - ncontour=30, - clim=(-v_lim[ii], v_lim[ii]), - contourOpts={"cmap": "bwr"}, - ) - ax1[ii].set_title(plot_title[ii]) - ax1[ii].set_xlabel("x (m)") - ax1[ii].set_ylabel("y (m)") - - ax2[ii] = fig.add_axes([0.33 * ii + 0.27, 0.11, 0.01, 0.84]) - norm[ii] = mpl.colors.Normalize(vmin=-v_lim[ii], vmax=v_lim[ii]) - cbar[ii] = mpl.colorbar.ColorbarBase( - ax2[ii], norm=norm[ii], orientation="vertical", cmap=mpl.cm.bwr - ) - cbar[ii].set_label(plot_units[ii], rotation=270, labelpad=15, size=12) - -plt.show() diff --git a/tutorials/05-dcr/plot_fwd_1_dcr_sounding.py b/tutorials/05-dcr/plot_fwd_1_dcr_sounding.py index 7ea20813c7..f4c1c78023 100644 --- a/tutorials/05-dcr/plot_fwd_1_dcr_sounding.py +++ b/tutorials/05-dcr/plot_fwd_1_dcr_sounding.py @@ -3,169 +3,13 @@ Simulate a 1D Sounding over a Layered Earth =========================================== -Here we use the module *simpeg.electromangetics.static.resistivity* to predict -sounding data over a 1D layered Earth. In this tutorial, we focus on the following: +.. important:: - - General definition of sources and receivers - - How to define the survey - - How to predict voltage or apparent resistivity data - - The units of the model and resulting data + This tutorial has been moved to `User Tutorials + `_. -For this tutorial, we will simulate sounding data over a layered Earth using -a Wenner array. The end product is a sounding curve which tells us how the -electrical resistivity changes with depth. + Checkout the `1D Forward Simulation for a Single Sounding + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt - -from simpeg import maps -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.utils import plot_1d_layer_model - -mpl.rcParams.update({"font.size": 16}) - -write_output = False - -# sphinx_gallery_thumbnail_number = 2 - - -##################################################################### -# Create Survey -# ------------- -# -# Here we demonstrate a general way to define sources and receivers. -# For pole and dipole sources, we must define the A or AB electrode locations, -# respectively. For the pole and dipole receivers, we must define the M or -# MN electrode locations, respectively. -# - -a_min = 20.0 -a_max = 500.0 -n_stations = 25 - -# Define the 'a' spacing for Wenner array measurements for each reading -electrode_separations = np.linspace(a_min, a_max, n_stations) - -source_list = [] # create empty array for sources to live - -for ii in range(0, len(electrode_separations)): - # Extract separation parameter for sources and receivers - a = electrode_separations[ii] - - # AB electrode locations for source. Each is a (1, 3) numpy array - A_location = np.r_[-1.5 * a, 0.0, 0.0] - B_location = np.r_[1.5 * a, 0.0, 0.0] - - # MN electrode locations for receivers. Each is an (N, 3) numpy array - M_location = np.r_[-0.5 * a, 0.0, 0.0] - N_location = np.r_[0.5 * a, 0.0, 0.0] - - # Create receivers list. Define as pole or dipole. - receiver_list = dc.receivers.Dipole( - M_location, N_location, data_type="apparent_resistivity" - ) - receiver_list = [receiver_list] - - # Define the source properties and associated receivers - source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location)) - -# Define survey -survey = dc.Survey(source_list) - - -############################################### -# Defining a 1D Layered Earth Model -# --------------------------------- -# -# Here, we define the layer thicknesses and electrical resistivities for our -# 1D simulation. If we have N layers, we define N electrical resistivity -# values and N-1 layer thicknesses. The lowest layer is assumed to extend to -# infinity. In the case of a halfspace, the layer thicknesses would be -# an empty array. -# - -# Define layer thicknesses. -layer_thicknesses = np.r_[100.0, 100.0] - -# Define layer resistivities. -model = np.r_[1e3, 4e3, 2e2] - -# Define mapping from model to 1D layers. -model_map = maps.IdentityMap(nP=len(model)) - -############################################################### -# Plot Resistivity Model -# ---------------------- -# -# Here we plot the 1D resistivity model. -# - -# Plot the 1D model -ax = plot_1d_layer_model(layer_thicknesses, model_map * model) -ax.set_xlabel(r"Resistivity ($\Omega m$)") - -####################################################################### -# Define the Forward Simulation and Predict DC Resistivity Data -# ------------------------------------------------------------- -# -# Here we predict DC resistivity data. If the keyword argument *rhoMap* is -# defined, the simulation will expect a resistivity model. If the keyword -# argument *sigmaMap* is defined, the simulation will expect a conductivity model. -# - -simulation = dc.simulation_1d.Simulation1DLayers( - survey=survey, - rhoMap=model_map, - thicknesses=layer_thicknesses, -) - -# Predict data for a given model -dpred = simulation.dpred(model) - -# Plot apparent resistivities on sounding curve -fig = plt.figure(figsize=(11, 5)) -ax1 = fig.add_axes([0.1, 0.1, 0.75, 0.85]) -ax1.semilogy(1.5 * electrode_separations, dpred, "b") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -plt.show() - - -######################################################################### -# Optional: Export Data -# --------------------- -# -# Export data and true model -# - -if write_output: - dir_path = os.path.dirname(__file__).split(os.path.sep) - dir_path.extend(["outputs"]) - dir_path = os.path.sep.join(dir_path) + os.path.sep - - if not os.path.exists(dir_path): - os.mkdir(dir_path) - - np.random.seed(145) - noise = 0.025 * dpred * np.random.randn(len(dpred)) - - data_array = np.c_[ - survey.locations_a, - survey.locations_b, - survey.locations_m, - survey.locations_n, - dpred + noise, - ] - - fname = dir_path + "app_res_1d_data.dobs" - np.savetxt(fname, data_array, fmt="%.4e") diff --git a/tutorials/05-dcr/plot_fwd_2_dcr2d.py b/tutorials/05-dcr/plot_fwd_2_dcr2d.py index 89f0556cb8..c1ca6be0f1 100644 --- a/tutorials/05-dcr/plot_fwd_2_dcr2d.py +++ b/tutorials/05-dcr/plot_fwd_2_dcr2d.py @@ -3,323 +3,13 @@ DC Resistivity Forward Simulation in 2.5D ========================================= -Here we use the module *simpeg.electromagnetics.static.resistivity* to predict -DC resistivity data and plot using a pseudosection. In this tutorial, we focus -on the following: +.. important:: - - How to define the survey - - How to define the forward simulation - - How to predict normalized voltage data for a synthetic conductivity model - - How to include surface topography - - The units of the model and resulting data + This tutorial has been moved to `User Tutorials + `_. + Checkout the `2.5D Forward Simulation + `_ tutorial. -""" - -######################################################################### -# Import modules -# -------------- -# - -from discretize import TreeMesh -from discretize.utils import mkvc, active_from_xyz - -from simpeg.utils import model_builder -from simpeg.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc -from simpeg import maps, data -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.electromagnetics.static.utils.static_utils import ( - generate_dcip_sources_line, - apparent_resistivity_from_voltage, - plot_pseudosection, -) - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -from matplotlib.colors import LogNorm - - -write_output = False -mpl.rcParams.update({"font.size": 16}) -# sphinx_gallery_thumbnail_number = 3 - - -############################################################### -# Defining Topography -# ------------------- -# -# Here we define surface topography as an (N, 3) numpy array. Topography could -# also be loaded from a file. In our case, our survey takes place within a set -# of valleys that run North-South. -# - -x_topo, y_topo = np.meshgrid( - np.linspace(-3000, 3000, 601), np.linspace(-3000, 3000, 101) -) -z_topo = 40.0 * np.sin(2 * np.pi * x_topo / 800) - 40.0 -x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) -topo_xyz = np.c_[x_topo, y_topo, z_topo] - -# Create 2D topography. Since our 3D topography only changes in the x direction, -# it is easy to define the 2D topography projected along the survey line. For -# arbitrary topography and for an arbitrary survey orientation, the user must -# define the 2D topography along the survey line. -topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0) - -##################################################################### -# Create Dipole-Dipole Survey -# --------------------------- -# -# Here we define a single EW survey line that uses a dipole-dipole configuration. -# For the source, we must define the AB electrode locations. For the receivers -# we must define the MN electrode locations. Instead of creating the survey -# from scratch (see 1D example), we will use the *generat_dcip_survey_line* utility. -# - -# Define survey line parameters -survey_type = "dipole-dipole" -dimension_type = "2D" -data_type = "volt" -end_locations = np.r_[-400.0, 400.0] -station_separation = 40.0 -num_rx_per_src = 10 - -# Generate source list for DC survey line -source_list = generate_dcip_sources_line( - survey_type, - data_type, - dimension_type, - end_locations, - topo_2d, - num_rx_per_src, - station_separation, -) - -# Define survey -survey = dc.survey.Survey(source_list) - -############################################################### -# Create Tree Mesh -# ------------------ -# -# Here, we create the Tree mesh that will be used to predict DC data. -# - -dh = 4 # base cell width -dom_width_x = 3200.0 # domain width x -dom_width_z = 2400.0 # domain width z -nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x -nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z - -# Define the base mesh -hx = [(dh, nbcx)] -hz = [(dh, nbcz)] -mesh = TreeMesh([hx, hz], x0="CN") - -# Mesh refinement based on topography -mesh.refine_surface( - topo_xyz[:, [0, 2]], - padding_cells_by_level=[0, 0, 4, 4], - finalize=False, -) - -# Mesh refinement near transmitters and receivers. First we need to obtain the -# set of unique electrode locations. -electrode_locations = np.c_[ - survey.locations_a, - survey.locations_b, - survey.locations_m, - survey.locations_n, -] - -unique_locations = np.unique( - np.reshape(electrode_locations, (4 * survey.nD, 2)), axis=0 -) - -mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) - -# Refine core mesh region -xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) -xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) - -mesh.finalize() - -############################################################### -# Create Conductivity Model and Mapping for Tree Mesh -# ----------------------------------------------------- -# -# It is important that electrodes are not modeled as being in the air. Even if the -# electrodes are properly located along surface topography, they may lie above -# the discretized topography. This step is carried out to ensure all electrodes -# lie on the discretized surface. -# - -# Define conductivity model in S/m (or resistivity model in Ohm m) -air_conductivity = 1e-8 -background_conductivity = 1e-2 -conductor_conductivity = 1e-1 -resistor_conductivity = 1e-3 - -# Find active cells in forward modeling (cell below surface) -ind_active = active_from_xyz(mesh, topo_xyz[:, [0, 2]]) -# Define mapping from model to active cells -nC = int(ind_active.sum()) -conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity) - -# Define model -conductivity_model = background_conductivity * np.ones(nC) - -ind_conductor = model_builder.get_indices_sphere( - np.r_[-120.0, -160.0], 60.0, mesh.gridCC -) -ind_conductor = ind_conductor[ind_active] -conductivity_model[ind_conductor] = conductor_conductivity - -ind_resistor = model_builder.get_indices_sphere(np.r_[120.0, -100.0], 60.0, mesh.gridCC) -ind_resistor = ind_resistor[ind_active] -conductivity_model[ind_resistor] = resistor_conductivity - -# Plot Conductivity Model -fig = plt.figure(figsize=(9, 4)) - -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) -norm = LogNorm(vmin=1e-3, vmax=1e-1) - -ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7]) -mesh.plot_image( - plotting_map * conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm} -) -ax1.set_xlim(-600, 600) -ax1.set_ylim(-600, 0) -ax1.set_title("Conductivity Model") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7]) -cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical") -cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12) - -plt.show() - - -############################################################### -# Project Survey to Discretized Topography -# ---------------------------------------- -# -# It is important that electrodes are not model as being in the air. Even if the -# electrodes are properly located along surface topography, they may lie above -# the discretized topography. This step is carried out to ensure all electrodes -# like on the discretized surface. -# - -survey.drape_electrodes_on_topography(mesh, ind_active, option="top") - - -####################################################################### -# Predict DC Resistivity Data -# --------------------------- -# -# Here we predict DC resistivity data. If the keyword argument *sigmaMap* is -# defined, the simulation will expect a conductivity model. If the keyword -# argument *rhoMap* is defined, the simulation will expect a resistivity model. -# - -simulation = dc.simulation_2d.Simulation2DNodal( - mesh, survey=survey, sigmaMap=conductivity_map -) - -# Predict the data by running the simulation. The data are the raw voltage in -# units of volts. -dpred = simulation.dpred(conductivity_model) - -####################################################################### -# Plotting in Pseudo-Section -# -------------------------- -# -# Here, we demonstrate how to plot 2D data in pseudo-section. -# First, we plot the voltages in pseudo-section as a scatter plot. This -# allows us to visualize the pseudo-sensitivity locations for our survey. -# Next, we plot the apparent conductivities in pseudo-section as a filled -# contour plot. -# - -# Plot voltages pseudo-section -fig = plt.figure(figsize=(12, 5)) -ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) -plot_pseudosection( - survey, - dobs=np.abs(dpred), - plot_type="scatter", - ax=ax1, - scale="log", - cbar_label="V/A", - scatter_opts={"cmap": mpl.cm.viridis}, -) -ax1.set_title("Normalized Voltages") -plt.show() - -# Get apparent conductivities from volts and survey geometry -apparent_conductivities = 1 / apparent_resistivity_from_voltage(survey, dpred) - -# Plot apparent conductivity pseudo-section -fig = plt.figure(figsize=(12, 5)) -ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) -plot_pseudosection( - survey, - dobs=apparent_conductivities, - plot_type="contourf", - ax=ax1, - scale="log", - cbar_label="S/m", - mask_topography=True, - contourf_opts={"levels": 20, "cmap": mpl.cm.viridis}, -) -ax1.set_title("Apparent Conductivity") -plt.show() - -####################################################################### -# Optional: Write out dpred -# ------------------------- -# -# Write DC resistivity data, topography and true model -# - -if write_output: - dir_path = os.path.dirname(__file__).split(os.path.sep) - dir_path.extend(["outputs"]) - dir_path = os.path.sep.join(dir_path) + os.path.sep - - if not os.path.exists(dir_path): - os.mkdir(dir_path) - - # Add 10% Gaussian noise to each datum - np.random.seed(225) - std = 0.05 * np.abs(dpred) - dc_noise = std * np.random.randn(len(dpred)) - dobs = dpred + dc_noise - - # Create a survey with the original electrode locations - # and not the shifted ones - # Generate source list for DC survey line - source_list = generate_dcip_sources_line( - survey_type, - data_type, - dimension_type, - end_locations, - topo_xyz, - num_rx_per_src, - station_separation, - ) - survey_original = dc.survey.Survey(source_list) - - # Write out data at their original electrode locations (not shifted) - data_obj = data.Data(survey_original, dobs=dobs, standard_deviation=std) - fname = dir_path + "dc_data.obs" - write_dcip2d_ubc(fname, data_obj, "volt", "dobs") - - fname = dir_path + "topo_xyz.txt" - np.savetxt(fname, topo_xyz, fmt="%.4e") +""" diff --git a/tutorials/05-dcr/plot_fwd_3_dcr3d.py b/tutorials/05-dcr/plot_fwd_3_dcr3d.py index b6ee498bde..d731fbc8fd 100644 --- a/tutorials/05-dcr/plot_fwd_3_dcr3d.py +++ b/tutorials/05-dcr/plot_fwd_3_dcr3d.py @@ -3,382 +3,13 @@ DC Resistivity Forward Simulation in 3D ======================================= -Here we use the module *simpeg.electromagnetics.static.resistivity* to predict -DC resistivity data on an OcTree mesh. In this tutorial, we focus on the following: +.. important:: - - How to define the survey - - How to definine a tree mesh based on the survey geometry - - How to define the forward simulations - - How to predict DC data for a synthetic conductivity model - - How to include surface topography - - The units of the model and resulting data - - Plotting DC data in 3D + This tutorial has been moved to `User Tutorials + `_. - -In this case, we simulate dipole-dipole data for three East-West lines and two -North-South lines. + Checkout the `3D Forward Simulation + `_ tutorial. """ - -############################################################## -# Import modules -# -------------- -# -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt - -from discretize import TreeMesh -from discretize.utils import mkvc, refine_tree_xyz, active_from_xyz - -from simpeg import maps, data -from simpeg.utils import model_builder -from simpeg.utils.io_utils.io_utils_electromagnetics import write_dcip_xyz -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.electromagnetics.static.utils.static_utils import ( - generate_dcip_sources_line, - apparent_resistivity_from_voltage, -) - -# To plot DC data in 3D, the user must have the plotly package -try: - import plotly - from simpeg.electromagnetics.static.utils.static_utils import plot_3d_pseudosection - - has_plotly = True -except ImportError: - has_plotly = False - pass - - -mpl.rcParams.update({"font.size": 16}) -write_output = False - -# sphinx_gallery_thumbnail_number = 2 - -######################################################################### -# Defining Topography -# ------------------- -# -# Here we define surface topography as an (N, 3) numpy array. Topography could -# also be loaded from a file. In our case, our survey takes place within a circular -# depression. -# - -x_topo, y_topo = np.meshgrid( - np.linspace(-2100, 2100, 141), np.linspace(-2000, 2000, 141) -) -s = np.sqrt(x_topo**2 + y_topo**2) -z_topo = 10 + (1 / np.pi) * 140 * (-np.pi / 2 + np.arctan((s - 600.0) / 160.0)) -x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo) -topo_xyz = np.c_[x_topo, y_topo, z_topo] - -######################################################################### -# Construct the DC Survey -# ----------------------- -# -# Here we define 5 DC lines that use a dipole-dipole electrode configuration; -# three lines along the East-West direction and 2 lines along the North-South direction. -# For each source, we must define the AB electrode locations. For each receiver -# we must define the MN electrode locations. Instead of creating the survey -# from scratch (see 1D example), we will use the *generat_dcip_sources_line* utility. -# This utility will give us the source list for a given DC/IP line. We can append -# the sources for multiple lines to create the survey. -# - -# Define the parameters for each survey line -survey_type = "dipole-dipole" -data_type = "volt" -dimension_type = "3D" -end_locations_list = [ - np.r_[-1000.0, 1000.0, 0.0, 0.0], - np.r_[-350.0, -350.0, -1000.0, 1000.0], - np.r_[350.0, 350.0, -1000.0, 1000.0], -] -station_separation = 100.0 -num_rx_per_src = 8 - -# The source lists for each line can be appended to create the source -# list for the whole survey. -source_list = [] -for ii in range(0, len(end_locations_list)): - source_list += generate_dcip_sources_line( - survey_type, - data_type, - dimension_type, - end_locations_list[ii], - topo_xyz, - num_rx_per_src, - station_separation, - ) - -# Define the survey -survey = dc.survey.Survey(source_list) - -################################################################# -# Create OcTree Mesh -# ------------------ -# -# Here, we create the OcTree mesh that will be used to predict DC data. -# -# - -# Defining domain side and minimum cell size -dh = 25.0 # base cell width -dom_width_x = 6000.0 # domain width x -dom_width_y = 6000.0 # domain width y -dom_width_z = 4000.0 # domain width z -nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x -nbcy = 2 ** int(np.round(np.log(dom_width_y / dh) / np.log(2.0))) # num. base cells y -nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z - -# Define the base mesh -hx = [(dh, nbcx)] -hy = [(dh, nbcy)] -hz = [(dh, nbcz)] -mesh = TreeMesh([hx, hy, hz], x0="CCN") - -# Mesh refinement based on topography -k = np.sqrt(np.sum(topo_xyz[:, 0:2] ** 2, axis=1)) < 1200 -mesh = refine_tree_xyz( - mesh, topo_xyz[k, :], octree_levels=[0, 6, 8], method="surface", finalize=False -) - -# Mesh refinement near sources and receivers. -electrode_locations = np.r_[ - survey.locations_a, survey.locations_b, survey.locations_m, survey.locations_n -] -unique_locations = np.unique(electrode_locations, axis=0) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 6, 4], method="radial", finalize=False -) - -# Finalize the mesh -mesh.finalize() - -################################################################ -# Create Conductivity Model and Mapping for OcTree Mesh -# ----------------------------------------------------- -# -# Here we define the conductivity model that will be used to predict DC -# resistivity data. The model consists of a conductive sphere and a -# resistive sphere within a moderately conductive background. Note that -# you can carry through this work flow with a resistivity model if desired. -# - -# Define conductivity model in S/m (or resistivity model in Ohm m) -air_value = 1e-8 -background_value = 1e-2 -conductor_value = 1e-1 -resistor_value = 1e-3 - -# Find active cells in forward modeling (cell below surface) -ind_active = active_from_xyz(mesh, topo_xyz) - -# Define mapping from model to active cells -nC = int(ind_active.sum()) -conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_value) - -# Define model -conductivity_model = background_value * np.ones(nC) - -ind_conductor = model_builder.get_indices_sphere( - np.r_[-350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] -) -conductivity_model[ind_conductor] = conductor_value - -ind_resistor = model_builder.get_indices_sphere( - np.r_[350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] -) -conductivity_model[ind_resistor] = resistor_value - -# Plot Conductivity Model -fig = plt.figure(figsize=(10, 4)) - -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) -log_mod = np.log10(conductivity_model) - -ax1 = fig.add_axes([0.15, 0.15, 0.68, 0.75]) -mesh.plot_slice( - plotting_map * log_mod, - ax=ax1, - normal="Y", - ind=int(len(mesh.h[1]) / 2), - grid=True, - clim=(np.log10(resistor_value), np.log10(conductor_value)), - pcolor_opts={"cmap": mpl.cm.viridis}, -) -ax1.set_title("Conductivity Model") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") -ax1.set_xlim([-1000, 1000]) -ax1.set_ylim([-1000, 0]) - -ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) -norm = mpl.colors.Normalize( - vmin=np.log10(resistor_value), vmax=np.log10(conductor_value) -) -cbar = mpl.colorbar.ColorbarBase( - ax2, cmap=mpl.cm.viridis, norm=norm, orientation="vertical", format="$10^{%.1f}$" -) -cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12) - -########################################################## -# Project Survey to Discretized Topography -# ---------------------------------------- -# -# It is important that electrodes are not modeled as being in the air. Even if the -# electrodes are properly located along surface topography, they may lie above -# the *discretized* topography. This step is carried out to ensure all electrodes -# lie on the discretized surface. -# -# - -survey.drape_electrodes_on_topography(mesh, ind_active, option="top") - -############################################################ -# Predict DC Resistivity Data -# --------------------------- -# -# Here we predict DC resistivity data. If the keyword argument *sigmaMap* is -# defined, the simulation will expect a conductivity model. If the keyword -# argument *rhoMap* is defined, the simulation will expect a resistivity model. -# -# -# - -# Define the DC simulation -simulation = dc.simulation.Simulation3DNodal( - mesh, - survey=survey, - sigmaMap=conductivity_map, -) - -# Predict the data by running the simulation. The data are the measured voltage -# normalized by the source current in units of V/A. -dpred = simulation.dpred(conductivity_model) - -######################################################### -# Plot DC Data in 3D Pseudosection -# -------------------------------- -# -# Here we demonstrate how 3D DC resistivity data can be represented on a 3D -# pseudosection plot. To use this utility, you must have Python's *plotly* -# package. Here, we represent the data as apparent conductivities. -# -# The *plot_3d_pseudosection* utility allows the user to plot all pseudosection -# points, or plot the pseudosection plots that lie within some distance of -# one or more planes. -# - -# Since the data are normalized voltage, we must convert predicted -# to apparent conductivities. -apparent_conductivity = 1 / apparent_resistivity_from_voltage( - survey, - dpred, -) - -# For large datasets or for surveys with unconventional electrode geometry, -# interpretation can be challenging if we plot every datum. Here, we plot -# 3 out of the 5 survey lines to better image anomalous structures. -# To plot ALL of the data, simply remove the keyword argument *plane_points* -# when calling *plot_3d_pseudosection*. -plane_points = [] -p1, p2, p3 = np.array([-1000, 0, 0]), np.array([1000, 0, 0]), np.array([0, 0, -1000]) -plane_points.append([p1, p2, p3]) -p1, p2, p3 = ( - np.array([-350, -1000, 0]), - np.array([-350, 1000, 0]), - np.array([-350, 0, -1000]), -) -plane_points.append([p1, p2, p3]) -p1, p2, p3 = ( - np.array([350, -1000, 0]), - np.array([350, 1000, 0]), - np.array([350, 0, -1000]), -) -plane_points.append([p1, p2, p3]) - -if has_plotly: - fig = plot_3d_pseudosection( - survey, - apparent_conductivity, - scale="log", - units="S/m", - plane_points=plane_points, - plane_distance=15, - ) - - fig.update_layout( - title_text="Apparent Conductivity", - title_x=0.5, - title_font_size=24, - width=650, - height=500, - scene_camera=dict(center=dict(x=0.05, y=0, z=-0.4)), - ) - - plotly.io.show(fig) - -else: - print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS") - -######################################################## -# Optional: Write Predicted DC Data -# --------------------------------- -# -# Write DC resistivity data, topography and true model -# - -if write_output: - dir_path = os.path.dirname(__file__).split(os.path.sep) - dir_path.extend(["outputs"]) - dir_path = os.path.sep.join(dir_path) + os.path.sep - - if not os.path.exists(dir_path): - os.mkdir(dir_path) - - # Add 5% Gaussian noise to each datum - np.random.seed(433) - std = 0.1 * np.abs(dpred) - noise = std * np.random.randn(len(dpred)) - dobs = dpred + noise - - # Create dictionary that stores line IDs - N = int(survey.nD / len(end_locations_list)) - lineID = np.r_[np.ones(N), 2 * np.ones(N), 3 * np.ones(N)] - out_dict = {"LINEID": lineID} - - # Create a survey with the original electrode locations - # and not the shifted ones - source_list = [] - for ii in range(0, len(end_locations_list)): - source_list += generate_dcip_sources_line( - survey_type, - data_type, - dimension_type, - end_locations_list[ii], - topo_xyz, - num_rx_per_src, - station_separation, - ) - survey_original = dc.survey.Survey(source_list) - - # Write out data at their original electrode locations (not shifted) - data_obj = data.Data(survey_original, dobs=dobs, standard_deviation=std) - - fname = dir_path + "dc_data.xyz" - write_dcip_xyz( - fname, - data_obj, - data_header="V/A", - uncertainties_header="UNCERT", - out_dict=out_dict, - ) - - fname = dir_path + "topo_xyz.txt" - np.savetxt(fname, topo_xyz, fmt="%.4e") diff --git a/tutorials/05-dcr/plot_inv_1_dcr_sounding.py b/tutorials/05-dcr/plot_inv_1_dcr_sounding.py index 75deb01d18..54159c7a65 100644 --- a/tutorials/05-dcr/plot_inv_1_dcr_sounding.py +++ b/tutorials/05-dcr/plot_inv_1_dcr_sounding.py @@ -3,324 +3,16 @@ Least-Squares 1D Inversion of Sounding Data =========================================== -Here we use the module *simpeg.electromangetics.static.resistivity* to invert -DC resistivity sounding data and recover a 1D electrical resistivity model. -In this tutorial, we focus on the following: +.. important:: - - How to define sources and receivers from a survey file - - How to define the survey - - 1D inversion of DC resistivity data + This tutorial has been moved to `User Tutorials + `_. -For this tutorial, we will invert sounding data collected over a layered Earth using -a Wenner array. The end product is layered Earth model which explains the data. + Checkout the `Weighted Least-Squares Inversion + `_ + section in the + `1D Inversion for a Single Sounding + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TensorMesh - -from simpeg import ( - maps, - data, - data_misfit, - regularization, - optimization, - inverse_problem, - inversion, - directives, - utils, -) -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.utils import plot_1d_layer_model - -mpl.rcParams.update({"font.size": 16}) - -# sphinx_gallery_thumbnail_number = 2 - -############################################# -# Define File Names -# ----------------- -# -# Here we provide the file paths to assets we need to run the inversion. The -# Path to the true model is also provided for comparison with the inversion -# results. These files are stored as a tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -data_filename = dir_path + "app_res_1d_data.dobs" - - -############################################# -# Load Data, Define Survey and Plot -# --------------------------------- -# -# Here we load the observed data, define the DC survey geometry and plot the -# data values. -# - -# Load data -dobs = np.loadtxt(str(data_filename)) - -# Extract source and receiver electrode locations and the observed data -A_electrodes = dobs[:, 0:3] -B_electrodes = dobs[:, 3:6] -M_electrodes = dobs[:, 6:9] -N_electrodes = dobs[:, 9:12] -dobs = dobs[:, -1] - -# Define survey -unique_tx, k = np.unique(np.c_[A_electrodes, B_electrodes], axis=0, return_index=True) -n_sources = len(k) -k = np.sort(k) -k = np.r_[k, len(k) + 1] - -source_list = [] -for ii in range(0, n_sources): - # MN electrode locations for receivers. Each is an (N, 3) numpy array - M_locations = M_electrodes[k[ii] : k[ii + 1], :] - N_locations = N_electrodes[k[ii] : k[ii + 1], :] - receiver_list = [ - dc.receivers.Dipole( - M_locations, - N_locations, - data_type="apparent_resistivity", - ) - ] - - # AB electrode locations for source. Each is a (1, 3) numpy array - A_location = A_electrodes[k[ii], :] - B_location = B_electrodes[k[ii], :] - source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location)) - -# Define survey -survey = dc.Survey(source_list) - -# Plot apparent resistivities on sounding curve as a function of Wenner separation -# parameter. -electrode_separations = 0.5 * np.sqrt( - np.sum((survey.locations_a - survey.locations_b) ** 2, axis=1) -) - -fig = plt.figure(figsize=(11, 5)) -mpl.rcParams.update({"font.size": 14}) -ax1 = fig.add_axes([0.15, 0.1, 0.7, 0.85]) -ax1.semilogy(electrode_separations, dobs, "b") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -plt.show() - -############################################### -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define standard deviation on our data. -# This represents our estimate of the noise in our data. For DC sounding data, -# a relative error is applied to each datum. For this tutorial, the relative -# error on each datum will be 2%. - -std = 0.02 * np.abs(dobs) - - -############################################### -# Define Data -# -------------------- -# -# Here is where we define the data that are inverted. The data are defined by -# the survey, the observation values and the standard deviation. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=std) - - -############################################### -# Defining a 1D Layered Earth (1D Tensor Mesh) -# -------------------------------------------- -# -# Here, we define the layer thicknesses for our 1D simulation. To do this, we use -# the TensorMesh class. -# - -# Define layer thicknesses -layer_thicknesses = 5 * np.logspace(0, 1, 25) - -# Define a mesh for plotting and regularization. -mesh = TensorMesh([(np.r_[layer_thicknesses, layer_thicknesses[-1]])], "0") - -print(mesh) - -############################################################### -# Define a Starting and Reference Model -# ------------------------------------- -# -# Here, we create starting and/or reference models for the inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. Here, the starting model is log(1000) Ohm meters. -# -# Define log-resistivity values for each layer since our model is the -# log-resistivity. Don't make the values 0! -# Otherwise the gradient for the 1st iteration is zero and the inversion will -# not converge. - -# Define model. A resistivity (Ohm meters) or conductivity (S/m) for each layer. -starting_model = np.log(2e2 * np.ones((len(layer_thicknesses) + 1))) - -# Define mapping from model to active cells. -model_map = maps.IdentityMap(nP=len(starting_model)) * maps.ExpMap() - -####################################################################### -# Define the Physics -# ------------------ -# -# Here we define the physics of the problem using the Simulation1DLayers class. -# - -simulation = dc.simulation_1d.Simulation1DLayers( - survey=survey, - rhoMap=model_map, - thicknesses=layer_thicknesses, -) - - -####################################################################### -# Define Inverse Problem -# ---------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) - -# Define the regularization (model objective function) -reg = regularization.WeightedLeastSquares( - mesh, alpha_s=1.0, alpha_x=1.0, reference_model=starting_model -) - -# Define how the optimization problem is solved. Here we will use an inexact -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.InexactGaussNewton(maxIter=30, maxIterCG=20) - -# Define the inverse problem -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define Inversion Directives -# --------------------------- -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0) - -# Set the rate of reduction in trade-off parameter (beta) each time the -# the inverse problem is solved. And set the number of Gauss-Newton iterations -# for each trade-off paramter value. -beta_schedule = directives.BetaSchedule(coolingFactor=5.0, coolingRate=3.0) - -# Apply and update sensitivity weighting as the model updates -update_sensitivity_weights = directives.UpdateSensitivityWeights() - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Setting a stopping criteria for the inversion. -target_misfit = directives.TargetMisfit(chifact=1) - -# The directives are defined as a list. -directives_list = [ - update_sensitivity_weights, - starting_beta, - beta_schedule, - save_iteration, - target_misfit, -] - -##################################################################### -# Running the Inversion -# --------------------- -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run the inversion -recovered_model = inv.run(starting_model) - -############################################################ -# Examining the Results -# --------------------- -# - -# Define true model and layer thicknesses -true_model = np.r_[1e3, 4e3, 2e2] -true_layers = np.r_[100.0, 100.0] - -# Plot true model and recovered model -fig = plt.figure(figsize=(6, 4)) -x_min = np.min([np.min(model_map * recovered_model), np.min(true_model)]) -x_max = np.max([np.max(model_map * recovered_model), np.max(true_model)]) - -ax1 = fig.add_axes([0.2, 0.15, 0.7, 0.7]) -plot_1d_layer_model(true_layers, true_model, ax=ax1, plot_elevation=True, color="b") -plot_1d_layer_model( - layer_thicknesses, - model_map * recovered_model, - ax=ax1, - plot_elevation=True, - color="r", -) -ax1.set_xlabel(r"Resistivity ($\Omega m$)") -ax1.set_xlim(0.9 * x_min, 1.1 * x_max) -ax1.legend(["True Model", "Recovered Model"]) - -# Plot the true and apparent resistivities on a sounding curve -fig = plt.figure(figsize=(11, 5)) -ax1 = fig.add_axes([0.2, 0.1, 0.6, 0.8]) -ax1.semilogy(electrode_separations, dobs, "b") -ax1.semilogy(electrode_separations, inv_prob.dpred, "r") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -ax1.legend(["True Sounding Curve", "Predicted Sounding Curve"]) -plt.show() diff --git a/tutorials/05-dcr/plot_inv_1_dcr_sounding_irls.py b/tutorials/05-dcr/plot_inv_1_dcr_sounding_irls.py index a9e751bbb4..8d203ad958 100644 --- a/tutorials/05-dcr/plot_inv_1_dcr_sounding_irls.py +++ b/tutorials/05-dcr/plot_inv_1_dcr_sounding_irls.py @@ -3,325 +3,16 @@ Sparse 1D Inversion of Sounding Data ==================================== -Here we use the module *simpeg.electromangetics.static.resistivity* to invert -DC resistivity sounding data and recover a 1D electrical resistivity model. -In this tutorial, we focus on the following: +.. important:: - - How to define sources and receivers from a survey file - - How to define the survey - - 1D inversion of DC resistivity data with iteratively re-weighted least-squares + This tutorial has been moved to `User Tutorials + `_. -For this tutorial, we will invert sounding data collected over a layered Earth using -a Wenner array. The end product is layered Earth model which explains the data. + Checkout the `Iteratively Re-weighted Least-Squares Inversion + `_ + section in the + `1D Inversion for a Single Sounding + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TensorMesh - -from simpeg import ( - maps, - data, - data_misfit, - regularization, - optimization, - inverse_problem, - inversion, - directives, - utils, -) -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.utils import plot_1d_layer_model - -mpl.rcParams.update({"font.size": 16}) - -# sphinx_gallery_thumbnail_number = 2 - -############################################# -# Define File Names -# ----------------- -# -# Here we provide the file paths to assets we need to run the inversion. The -# Path to the true model is also provided for comparison with the inversion -# results. These files are stored as a tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -data_filename = dir_path + "app_res_1d_data.dobs" - - -############################################# -# Load Data, Define Survey and Plot -# --------------------------------- -# -# Here we load the observed data, define the DC survey geometry and plot the -# data values. -# - -# Load data -dobs = np.loadtxt(str(data_filename)) - -# Extract source and receiver electrode locations and the observed data -A_electrodes = dobs[:, 0:3] -B_electrodes = dobs[:, 3:6] -M_electrodes = dobs[:, 6:9] -N_electrodes = dobs[:, 9:12] -dobs = dobs[:, -1] - -# Define survey -unique_tx, k = np.unique(np.c_[A_electrodes, B_electrodes], axis=0, return_index=True) -n_sources = len(k) -k = np.sort(k) -k = np.r_[k, len(k) + 1] - -source_list = [] -for ii in range(0, n_sources): - # MN electrode locations for receivers. Each is an (N, 3) numpy array - M_locations = M_electrodes[k[ii] : k[ii + 1], :] - N_locations = N_electrodes[k[ii] : k[ii + 1], :] - receiver_list = [ - dc.receivers.Dipole( - M_locations, - N_locations, - data_type="apparent_resistivity", - ) - ] - - # AB electrode locations for source. Each is a (1, 3) numpy array - A_location = A_electrodes[k[ii], :] - B_location = B_electrodes[k[ii], :] - source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location)) - -# Define survey -survey = dc.Survey(source_list) - -# Plot apparent resistivities on sounding curve as a function of Wenner separation -# parameter. -electrode_separations = 0.5 * np.sqrt( - np.sum((survey.locations_a - survey.locations_b) ** 2, axis=1) -) - -fig = plt.figure(figsize=(11, 5)) -mpl.rcParams.update({"font.size": 14}) -ax1 = fig.add_axes([0.15, 0.1, 0.7, 0.85]) -ax1.semilogy(electrode_separations, dobs, "b") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -plt.show() - -############################################### -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define standard deviation on our data. -# This represents our estimate of the noise in our data. For DC sounding data, -# a relative error is applied to each datum. For this tutorial, the relative -# error on each datum will be 2%. - -std = 0.02 * np.abs(dobs) - - -############################################### -# Define Data -# -------------------- -# -# Here is where we define the data that are inverted. The data are defined by -# the survey, the observation values and the standard deviation. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=std) - - -############################################### -# Defining a 1D Layered Earth (1D Tensor Mesh) -# -------------------------------------------- -# -# Here, we define the layer thicknesses for our 1D simulation. To do this, we use -# the TensorMesh class. -# - -# Define layer thicknesses -layer_thicknesses = 5 * np.logspace(0, 1, 25) - -# Define a mesh for plotting and regularization. -mesh = TensorMesh([(np.r_[layer_thicknesses, layer_thicknesses[-1]])], "0") - -print(mesh) - -############################################################### -# Define a Starting and Reference Model -# ------------------------------------- -# -# Here, we create starting and/or reference models for the inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. Here, the starting model is log(1000) Ohm meters. -# -# Define log-resistivity values for each layer since our model is the -# log-resistivity. Don't make the values 0! -# Otherwise the gradient for the 1st iteration is zero and the inversion will -# not converge. - -# Define model. A resistivity (Ohm meters) or conductivity (S/m) for each layer. -starting_model = np.log(2e2 * np.ones((len(layer_thicknesses) + 1))) - -# Define mapping from model to active cells. -model_map = maps.IdentityMap(nP=len(starting_model)) * maps.ExpMap() - -####################################################################### -# Define the Physics -# ------------------ -# -# Here we define the physics of the problem using the Simulation1DLayers class. -# - -simulation = dc.simulation_1d.Simulation1DLayers( - survey=survey, - rhoMap=model_map, - thicknesses=layer_thicknesses, -) - - -####################################################################### -# Define Inverse Problem -# ---------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) - -# Define the regularization (model objective function). Here, 'p' defines the -# the norm of the smallness term and 'q' defines the norm of the smoothness -# term. -reg = regularization.Sparse(mesh, mapping=model_map) -reg.reference_model = starting_model -p = 0 -q = 0 -reg.norms = [p, q] - -# Define how the optimization problem is solved. Here we will use an inexact -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.ProjectedGNCG(maxIter=100, maxIterLS=20, maxIterCG=20, tolCG=1e-3) - -# Define the inverse problem -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define Inversion Directives -# --------------------------- -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Apply and update sensitivity weighting as the model updates -update_sensitivity_weights = directives.UpdateSensitivityWeights() - -# Reach target misfit for L2 solution, then use IRLS until model stops changing. -IRLS = directives.UpdateIRLS(max_irls_iterations=40, f_min_change=1e-5) - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=20) - -# Update the preconditionner -update_Jacobi = directives.UpdatePreconditioner() - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# The directives are defined as a list. -directives_list = [ - update_sensitivity_weights, - IRLS, - starting_beta, - update_Jacobi, - save_iteration, -] - -##################################################################### -# Running the Inversion -# --------------------- -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directives_list) - -# Run the inversion -recovered_model = inv.run(starting_model) - -############################################################ -# Examining the Results -# --------------------- -# - -# Define true model and layer thicknesses -true_model = np.r_[1e3, 4e3, 2e2] -true_layers = np.r_[100.0, 100.0] - -# Extract Least-Squares model -l2_model = inv_prob.l2model - -# Plot true model and recovered model -fig = plt.figure(figsize=(6, 4)) -x_min = np.min(np.r_[model_map * recovered_model, model_map * l2_model, true_model]) -x_max = np.max(np.r_[model_map * recovered_model, model_map * l2_model, true_model]) - -ax1 = fig.add_axes([0.2, 0.15, 0.7, 0.7]) -plot_1d_layer_model(true_layers, true_model, ax=ax1, color="k") -plot_1d_layer_model(layer_thicknesses, model_map * l2_model, ax=ax1, color="b") -plot_1d_layer_model(layer_thicknesses, model_map * recovered_model, ax=ax1, color="r") -ax1.set_xlabel(r"Resistivity ($\Omega m$)") -ax1.set_xlim(0.9 * x_min, 1.1 * x_max) -ax1.legend(["True Model", "L2-Model", "Sparse Model"]) - -# Plot the true and apparent resistivities on a sounding curve -fig = plt.figure(figsize=(11, 5)) -ax1 = fig.add_axes([0.2, 0.1, 0.6, 0.8]) -ax1.semilogy(electrode_separations, dobs, "k") -ax1.semilogy(electrode_separations, simulation.dpred(l2_model), "b") -ax1.semilogy(electrode_separations, simulation.dpred(recovered_model), "r") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -ax1.legend(["True Sounding Curve", "Predicted (L2-Model)", "Predicted (Sparse)"]) -plt.show() diff --git a/tutorials/05-dcr/plot_inv_1_dcr_sounding_parametric.py b/tutorials/05-dcr/plot_inv_1_dcr_sounding_parametric.py index 63936d4e9d..b0686b6662 100644 --- a/tutorials/05-dcr/plot_inv_1_dcr_sounding_parametric.py +++ b/tutorials/05-dcr/plot_inv_1_dcr_sounding_parametric.py @@ -3,326 +3,17 @@ Parametric 1D Inversion of Sounding Data ======================================== -Here we use the module *simpeg.electromangetics.static.resistivity* to invert -DC resistivity sounding data and recover the resistivities and layer thicknesses -for a 1D layered Earth. In this tutorial, we focus on the following: +.. important:: - - How to define sources and receivers from a survey file - - How to define the survey - - Defining a model that consists of resistivities and layer thicknesses + This tutorial has been moved to `User Tutorials + `_. -For this tutorial, we will invert sounding data collected over a layered Earth using -a Wenner array. The end product is layered Earth model which explains the data. + Checkout the `Parametric Inversion + `_ + section in the + `1D Inversion for a Single Sounding + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TensorMesh - -from simpeg import ( - maps, - data, - data_misfit, - regularization, - optimization, - inverse_problem, - inversion, - directives, - utils, -) -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.utils import plot_1d_layer_model - -mpl.rcParams.update({"font.size": 16}) - -# sphinx_gallery_thumbnail_number = 2 - - -############################################# -# Define File Names -# ----------------- -# -# Here we provide the file paths to assets we need to run the inversion. The -# Path to the true model is also provided for comparison with the inversion -# results. These files are stored as a tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr1d.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -data_filename = dir_path + "app_res_1d_data.dobs" - - -############################################# -# Load Data, Define Survey and Plot -# --------------------------------- -# -# Here we load the observed data, define the DC survey geometry and plot the -# data values. -# - -# Load data -dobs = np.loadtxt(str(data_filename)) - -A_electrodes = dobs[:, 0:3] -B_electrodes = dobs[:, 3:6] -M_electrodes = dobs[:, 6:9] -N_electrodes = dobs[:, 9:12] -dobs = dobs[:, -1] - -# Define survey -unique_tx, k = np.unique(np.c_[A_electrodes, B_electrodes], axis=0, return_index=True) -n_sources = len(k) -k = np.sort(k) -k = np.r_[k, len(k) + 1] - -source_list = [] -for ii in range(0, n_sources): - # MN electrode locations for receivers. Each is an (N, 3) numpy array - M_locations = M_electrodes[k[ii] : k[ii + 1], :] - N_locations = N_electrodes[k[ii] : k[ii + 1], :] - receiver_list = [ - dc.receivers.Dipole( - M_locations, - N_locations, - data_type="apparent_resistivity", - ) - ] - - # AB electrode locations for source. Each is a (1, 3) numpy array - A_location = A_electrodes[k[ii], :] - B_location = B_electrodes[k[ii], :] - source_list.append(dc.sources.Dipole(receiver_list, A_location, B_location)) - -# Define survey -survey = dc.Survey(source_list) - -# Plot apparent resistivities on sounding curve as a function of Wenner separation -# parameter. -electrode_separations = 0.5 * np.sqrt( - np.sum((survey.locations_a - survey.locations_b) ** 2, axis=1) -) - -fig = plt.figure(figsize=(11, 5)) -mpl.rcParams.update({"font.size": 14}) -ax1 = fig.add_axes([0.15, 0.1, 0.7, 0.85]) -ax1.semilogy(electrode_separations, dobs, "b") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -plt.show() - -############################################### -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define standard deviation on our data. -# This represents our estimate of the noise in our data. For DC sounding data, -# a relative error is applied to each datum. For this tutorial, the relative -# error on each datum will be 2.5%. -# - -std = 0.025 * dobs - - -############################################### -# Define Data -# -------------------- -# -# Here is where we define the data that are inverted. The data are defined by -# the survey, the observation values and the standard deviation. -# - -data_object = data.Data(survey, dobs=dobs, standard_deviation=std) - -############################################################### -# Defining the Starting Model and Mapping -# --------------------------------------- -# -# In this case, the model consists of parameters which define the respective -# resistivities and thickness for a set of horizontal layer. Here, we choose to -# define a model consisting of 3 layers. -# - -# Define the resistivities and thicknesses for the starting model. The thickness -# of the bottom layer is assumed to extend downward to infinity so we don't -# need to define it. -resistivities = np.r_[1e3, 1e3, 1e3] -layer_thicknesses = np.r_[50.0, 50.0] - -# Define a mesh for plotting and regularization. -mesh = TensorMesh([(np.r_[layer_thicknesses, layer_thicknesses[-1]])], "0") -print(mesh) - -# Define model. We are inverting for the layer resistivities and layer thicknesses. -# Since the bottom layer extends to infinity, it is not a model parameter for -# which we need to invert. For a 3 layer model, there is a total of 5 parameters. -# For stability, our model is the log-resistivity and log-thickness. -starting_model = np.r_[np.log(resistivities), np.log(layer_thicknesses)] - -# Since the model contains two different properties for each layer, we use -# wire maps to distinguish the properties. -wire_map = maps.Wires(("rho", mesh.nC), ("t", mesh.nC - 1)) -resistivity_map = maps.ExpMap(nP=mesh.nC) * wire_map.rho -layer_map = maps.ExpMap(nP=mesh.nC - 1) * wire_map.t - -####################################################################### -# Define the Physics -# ------------------ -# -# Here we define the physics of the problem. The data consists of apparent -# resistivity values. This is defined here. -# - -simulation = dc.simulation_1d.Simulation1DLayers( - survey=survey, - rhoMap=resistivity_map, - thicknessesMap=layer_map, -) - -####################################################################### -# Define Inverse Problem -# ---------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(simulation=simulation, data=data_object) - -# Define the regularization on the parameters related to resistivity -mesh_rho = TensorMesh([mesh.h[0].size]) -reg_rho = regularization.WeightedLeastSquares( - mesh_rho, alpha_s=0.01, alpha_x=1, mapping=wire_map.rho -) - -# Define the regularization on the parameters related to layer thickness -mesh_t = TensorMesh([mesh.h[0].size - 1]) -reg_t = regularization.WeightedLeastSquares( - mesh_t, alpha_s=0.01, alpha_x=1, mapping=wire_map.t -) - -# Combine to make regularization for the inversion problem -reg = reg_rho + reg_t - -# Define how the optimization problem is solved. Here we will use an inexact -# Gauss-Newton approach that employs the conjugate gradient solver. -opt = optimization.InexactGaussNewton(maxIter=50, maxIterCG=30) - -# Define the inverse problem -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define Inversion Directives -# --------------------------- -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) - -# Set the rate of reduction in trade-off parameter (beta) each time the -# the inverse problem is solved. And set the number of Gauss-Newton iterations -# for each trade-off paramter value. -beta_schedule = directives.BetaSchedule(coolingFactor=5.0, coolingRate=3.0) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Setting a stopping criteria for the inversion. -target_misfit = directives.TargetMisfit(chifact=0.1) - -# The directives are defined in a list -directives_list = [ - starting_beta, - beta_schedule, - target_misfit, -] - -##################################################################### -# Running the Inversion -# --------------------- -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -inv = inversion.BaseInversion(inv_prob, directiveList=directives_list) - -# Run the inversion -recovered_model = inv.run(starting_model) - -############################################################ -# Examining the Results -# --------------------- -# - -# Define true model and layer thicknesses -true_model = np.r_[1e3, 4e3, 2e2] -true_layers = np.r_[100.0, 100.0] - -# Plot true model and recovered model -fig = plt.figure(figsize=(5, 5)) - -x_min = np.min([np.min(resistivity_map * recovered_model), np.min(true_model)]) -x_max = np.max([np.max(resistivity_map * recovered_model), np.max(true_model)]) - -ax1 = fig.add_axes([0.2, 0.15, 0.7, 0.7]) -plot_1d_layer_model(true_layers, true_model, ax=ax1, plot_elevation=True, color="b") -plot_1d_layer_model( - layer_map * recovered_model, - resistivity_map * recovered_model, - ax=ax1, - plot_elevation=True, - color="r", -) -ax1.set_xlabel(r"Resistivity ($\Omega m$)") -ax1.set_xlim(0.9 * x_min, 1.1 * x_max) -ax1.legend(["True Model", "Recovered Model"]) - -# Plot the true and apparent resistivities on a sounding curve -fig = plt.figure(figsize=(11, 5)) -ax1 = fig.add_axes([0.2, 0.05, 0.6, 0.8]) -ax1.semilogy(electrode_separations, dobs, "b") -ax1.semilogy(electrode_separations, inv_prob.dpred, "r") -ax1.set_xlabel("AB/2 (m)") -ax1.set_ylabel(r"Apparent Resistivity ($\Omega m$)") -ax1.legend(["True Sounding Curve", "Predicted Sounding Curve"]) -plt.show() diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d.py b/tutorials/05-dcr/plot_inv_2_dcr2d.py index 063ccedc70..e0766f3be1 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d.py @@ -2,456 +2,15 @@ 2.5D DC Resistivity Least-Squares Inversion =========================================== -Here we invert a line of DC resistivity data to recover an electrical -conductivity model. We formulate the inverse problem as a least-squares -optimization problem. For this tutorial, we focus on the following: +.. important:: - - Defining the survey - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, directives) - - Applying sensitivity weighting - - Plotting the recovered model and data misfit + This tutorial has been moved to `User Tutorials + `_. + Checkout the `Weighted Least-Squares Inversion + `_ + section in the + `2.5D DC Resistivity Inversion + `_ tutorial. """ - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -from matplotlib.colors import LogNorm -import tarfile - -from discretize import TreeMesh -from discretize.utils import mkvc, active_from_xyz - -from simpeg.utils import model_builder -from simpeg import ( - maps, - data_misfit, - regularization, - optimization, - inverse_problem, - inversion, - directives, - utils, -) -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.electromagnetics.static.utils.static_utils import ( - plot_pseudosection, -) -from simpeg.utils.io_utils.io_utils_electromagnetics import read_dcip2d_ubc - - -mpl.rcParams.update({"font.size": 16}) -# sphinx_gallery_thumbnail_number = 4 - - -############################################# -# Download Assets -# --------------- -# -# Here we provide the file paths to assets we need to run the inversion. The -# path to the true model conductivity and chargeability models are also -# provided for comparison with the inversion results. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/dcr2d.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr2d.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "topo_xyz.txt" -data_filename = dir_path + "dc_data.obs" - - -############################################# -# Load Data, Define Survey and Plot -# --------------------------------- -# -# Here we load the observed data, define the DC and IP survey geometry and -# plot the data values using pseudo-sections. -# **Warning**: In the following example, the observations file is assumed to be -# sorted by sources -# - -# Load data -topo_xyz = np.loadtxt(str(topo_filename)) -dc_data = read_dcip2d_ubc(data_filename, "volt", "general") - -####################################################################### -# Plot Observed Data in Pseudo-Section -# ------------------------------------ -# -# Here, we demonstrate how to plot 2D data in pseudo-section. -# First, we plot the actual data (voltages) in pseudo-section as a scatter plot. -# This allows us to visualize the pseudo-sensitivity locations for our survey. -# Next, we plot the data as apparent conductivities in pseudo-section with a filled -# contour plot. -# - -# Plot voltages pseudo-section -fig = plt.figure(figsize=(12, 5)) -ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) -plot_pseudosection( - dc_data, - plot_type="scatter", - ax=ax1, - scale="log", - cbar_label="V/A", - scatter_opts={"cmap": mpl.cm.viridis}, -) -ax1.set_title("Normalized Voltages") -plt.show() - -# Plot apparent conductivity pseudo-section -fig = plt.figure(figsize=(12, 5)) -ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) -plot_pseudosection( - dc_data, - plot_type="contourf", - ax=ax1, - scale="log", - data_type="apparent conductivity", - cbar_label="S/m", - mask_topography=True, - contourf_opts={"levels": 20, "cmap": mpl.cm.viridis}, -) -ax1.set_title("Apparent Conductivity") -plt.show() - -#################################################### -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define the uncertainties on our data. -# This represents our estimate of the standard deviation of the -# noise in our data. For DC data, the uncertainties are 10% of the absolute value -# -# - -dc_data.standard_deviation = 0.05 * np.abs(dc_data.dobs) - -######################################################## -# Create Tree Mesh -# ------------------ -# -# Here, we create the Tree mesh that will be used to invert DC data. -# - -dh = 4 # base cell width -dom_width_x = 3200.0 # domain width x -dom_width_z = 2400.0 # domain width z -nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x -nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z - -# Define the base mesh -hx = [(dh, nbcx)] -hz = [(dh, nbcz)] -mesh = TreeMesh([hx, hz], x0="CN") - -# Mesh refinement based on topography -mesh.refine_surface( - topo_xyz[:, [0, 2]], - padding_cells_by_level=[0, 0, 4, 4], - finalize=False, -) - -# Mesh refinement near transmitters and receivers. First we need to obtain the -# set of unique electrode locations. -electrode_locations = np.c_[ - dc_data.survey.locations_a, - dc_data.survey.locations_b, - dc_data.survey.locations_m, - dc_data.survey.locations_n, -] - -unique_locations = np.unique( - np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 -) - -mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) - -# Refine core mesh region -xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) -xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) - -mesh.finalize() - - -############################################################### -# Project Surveys to Discretized Topography -# ----------------------------------------- -# -# It is important that electrodes are not model as being in the air. Even if the -# electrodes are properly located along surface topography, they may lie above -# the discretized topography. This step is carried out to ensure all electrodes -# like on the discretized surface. -# - -# Create 2D topography. Since our 3D topography only changes in the x direction, -# it is easy to define the 2D topography projected along the survey line. For -# arbitrary topography and for an arbitrary survey orientation, the user must -# define the 2D topography along the survey line. -topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0) - -# Find cells that lie below surface topography -ind_active = active_from_xyz(mesh, topo_2d) - -# Extract survey from data object -survey = dc_data.survey - -# Shift electrodes to the surface of discretized topography -survey.drape_electrodes_on_topography(mesh, ind_active, option="top") - -# Reset survey in data object -dc_data.survey = survey - - -######################################################## -# Starting/Reference Model and Mapping on Tree Mesh -# --------------------------------------------------- -# -# Here, we would create starting and/or reference models for the DC inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. Here, the starting model is the natural log of 0.01 S/m. -# - -# Define conductivity model in S/m (or resistivity model in Ohm m) -air_conductivity = np.log(1e-8) -background_conductivity = np.log(1e-2) - -active_map = maps.InjectActiveCells(mesh, ind_active, np.exp(air_conductivity)) -nC = int(ind_active.sum()) - -conductivity_map = active_map * maps.ExpMap() - -# Define model -starting_conductivity_model = background_conductivity * np.ones(nC) - -############################################## -# Define the Physics of the DC Simulation -# --------------------------------------- -# -# Here, we define the physics of the DC resistivity problem. -# - -# Define the problem. Define the cells below topography and the mapping -simulation = dc.simulation_2d.Simulation2DNodal( - mesh, survey=survey, sigmaMap=conductivity_map, storeJ=True -) - -####################################################################### -# Define DC Inverse Problem -# ------------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(data=dc_data, simulation=simulation) - -# Define the regularization (model objective function) -reg = regularization.WeightedLeastSquares( - mesh, - active_cells=ind_active, - reference_model=starting_conductivity_model, -) - -reg.reference_model_in_smooth = True # Reference model in smoothness term - -# Define how the optimization problem is solved. Here we will use an -# Inexact Gauss Newton approach. -opt = optimization.InexactGaussNewton(maxIter=40) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define DC Inversion Directives -# ------------------------------ -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Apply and update sensitivity weighting as the model updates -update_sensitivity_weighting = directives.UpdateSensitivityWeights() - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) - -# Set the rate of reduction in trade-off parameter (beta) each time the -# the inverse problem is solved. And set the number of Gauss-Newton iterations -# for each trade-off paramter value. -beta_schedule = directives.BetaSchedule(coolingFactor=3, coolingRate=2) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Setting a stopping criteria for the inversion. -target_misfit = directives.TargetMisfit(chifact=1) - -# Update preconditioner -update_jacobi = directives.UpdatePreconditioner() - -directives_list = [ - update_sensitivity_weighting, - starting_beta, - beta_schedule, - save_iteration, - target_misfit, - update_jacobi, -] - -##################################################################### -# Running the DC Inversion -# ------------------------ -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -dc_inversion = inversion.BaseInversion(inv_prob, directiveList=directives_list) - -# Run inversion -recovered_conductivity_model = dc_inversion.run(starting_conductivity_model) - -############################################################ -# Recreate True Conductivity Model -# -------------------------------- -# - -true_background_conductivity = 1e-2 -true_conductor_conductivity = 1e-1 -true_resistor_conductivity = 1e-3 - -true_conductivity_model = true_background_conductivity * np.ones(len(mesh)) - -ind_conductor = model_builder.get_indices_sphere( - np.r_[-120.0, -180.0], 60.0, mesh.gridCC -) -true_conductivity_model[ind_conductor] = true_conductor_conductivity - -ind_resistor = model_builder.get_indices_sphere(np.r_[120.0, -180.0], 60.0, mesh.gridCC) -true_conductivity_model[ind_resistor] = true_resistor_conductivity - -true_conductivity_model[~ind_active] = np.nan - -############################################################ -# Plotting True and Recovered Conductivity Model -# ---------------------------------------------- -# - -# Plot True Model -norm = LogNorm(vmin=1e-3, vmax=1e-1) - -fig = plt.figure(figsize=(9, 4)) -ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7]) -im = mesh.plot_image( - true_conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm} -) -ax1.set_xlim(-600, 600) -ax1.set_ylim(-600, 0) -ax1.set_title("True Conductivity Model") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7]) -cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical") -cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12) - -plt.show() - -# # Plot Recovered Model -fig = plt.figure(figsize=(9, 4)) - -recovered_conductivity = conductivity_map * recovered_conductivity_model -recovered_conductivity[~ind_active] = np.nan - -ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7]) -mesh.plot_image( - recovered_conductivity, normal="Y", ax=ax1, grid=False, pcolor_opts={"norm": norm} -) -ax1.set_xlim(-600, 600) -ax1.set_ylim(-600, 0) -ax1.set_title("Recovered Conductivity Model") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") - -ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7]) -cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical") -cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12) - -plt.show() - -################################################################### -# Plotting Predicted DC Data and Misfit -# ------------------------------------- -# - -# Predicted data from recovered model -dpred = inv_prob.dpred -dobs = dc_data.dobs -std = dc_data.standard_deviation - -# Plot -fig = plt.figure(figsize=(9, 13)) -data_array = [np.abs(dobs), np.abs(dpred), (dobs - dpred) / std] -plot_title = ["Observed Voltage", "Predicted Voltage", "Normalized Misfit"] -plot_units = ["V/A", "V/A", ""] -scale = ["log", "log", "linear"] - -ax1 = 3 * [None] -cax1 = 3 * [None] -cbar = 3 * [None] -cplot = 3 * [None] - -for ii in range(0, 3): - ax1[ii] = fig.add_axes([0.15, 0.72 - 0.33 * ii, 0.65, 0.21]) - cax1[ii] = fig.add_axes([0.81, 0.72 - 0.33 * ii, 0.03, 0.21]) - cplot[ii] = plot_pseudosection( - survey, - data_array[ii], - "contourf", - ax=ax1[ii], - cax=cax1[ii], - scale=scale[ii], - cbar_label=plot_units[ii], - mask_topography=True, - contourf_opts={"levels": 25, "cmap": mpl.cm.viridis}, - ) - ax1[ii].set_title(plot_title[ii]) - -plt.show() diff --git a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py index d030b52a0f..7eedf1d219 100644 --- a/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py +++ b/tutorials/05-dcr/plot_inv_2_dcr2d_irls.py @@ -2,473 +2,16 @@ 2.5D DC Resistivity Inversion with Sparse Norms =============================================== -Here we invert a line of DC resistivity data to recover an electrical -conductivity model. We formulate the inverse problem as a least-squares -optimization problem. For this tutorial, we focus on the following: +.. important:: - - Defining the survey - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, directives) - - Applying sensitivity weighting - - Plotting the recovered model and data misfit + This tutorial has been moved to `User Tutorials + `_. + Checkout the `Iteratively Re-weighted Least-Squares Inversion + `_ + section in the + `2.5D DC Resistivity Inversion + `_ tutorial. -""" - -######################################################################### -# Import modules -# -------------- -# - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -from matplotlib.colors import LogNorm -import tarfile - -from discretize import TreeMesh -from discretize.utils import mkvc, active_from_xyz - -from simpeg.utils import model_builder -from simpeg import ( - maps, - data_misfit, - regularization, - optimization, - inverse_problem, - inversion, - directives, - utils, -) -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.electromagnetics.static.utils.static_utils import ( - plot_pseudosection, - apparent_resistivity_from_voltage, -) -from simpeg.utils.io_utils.io_utils_electromagnetics import read_dcip2d_ubc - -mpl.rcParams.update({"font.size": 16}) -# sphinx_gallery_thumbnail_number = 3 - - -############################################# -# Define File Names -# ----------------- -# -# Here we provide the file paths to assets we need to run the inversion. The -# path to the true model conductivity and chargeability models are also -# provided for comparison with the inversion results. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/dcr2d.tar.gz" -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr2d.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "topo_xyz.txt" -data_filename = dir_path + "dc_data.obs" - - -############################################# -# Load Data, Define Survey and Plot -# --------------------------------- -# -# Here we load the observed data, define the DC and IP survey geometry and -# plot the data values using pseudo-sections. -# **Warning**: In the following example, the observations file is assumed to be -# sorted by sources -# - -# Load data -topo_xyz = np.loadtxt(str(topo_filename)) -dc_data = read_dcip2d_ubc(data_filename, "volt", "general") - -####################################################################### -# Plot Observed Data in Pseudo-Section -# ------------------------------------ -# -# Here, we demonstrate how to plot 2D data in pseudo-section. -# First, we plot the actual data (voltages) in pseudo-section as a scatter plot. -# This allows us to visualize the pseudo-sensitivity locations for our survey. -# Next, we plot the data as apparent conductivities in pseudo-section with a filled -# contour plot. -# - -# Plot voltages pseudo-section -fig = plt.figure(figsize=(12, 5)) -ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) -plot_pseudosection( - dc_data, - plot_type="scatter", - ax=ax1, - scale="log", - cbar_label="V/A", - scatter_opts={"cmap": mpl.cm.viridis}, -) -ax1.set_title("Normalized Voltages") -plt.show() - -# Get apparent conductivities from volts and survey geometry -apparent_conductivities = 1 / apparent_resistivity_from_voltage( - dc_data.survey, dc_data.dobs -) - -# Plot apparent conductivity pseudo-section -fig = plt.figure(figsize=(12, 5)) -ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78]) -plot_pseudosection( - dc_data.survey, - apparent_conductivities, - plot_type="contourf", - ax=ax1, - scale="log", - cbar_label="S/m", - mask_topography=True, - contourf_opts={"levels": 20, "cmap": mpl.cm.viridis}, -) -ax1.set_title("Apparent Conductivity") -plt.show() - -#################################################### -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define the uncertainties on our data. -# This represents our estimate of the standard deviation of the -# noise in our data. For DC data, the uncertainties are 10% of the absolute value. -# -# - -dc_data.standard_deviation = 0.05 * np.abs(dc_data.dobs) - -######################################################## -# Create Tree Mesh -# ------------------ -# -# Here, we create the Tree mesh that will be used invert the DC data -# - -dh = 4 # base cell width -dom_width_x = 3200.0 # domain width x -dom_width_z = 2400.0 # domain width z -nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x -nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z - -# Define the base mesh -hx = [(dh, nbcx)] -hz = [(dh, nbcz)] -mesh = TreeMesh([hx, hz], x0="CN") - -# Mesh refinement based on topography -mesh.refine_surface( - topo_xyz[:, [0, 2]], - padding_cells_by_level=[0, 0, 4, 4], - finalize=False, -) - -# Mesh refinement near transmitters and receivers. First we need to obtain the -# set of unique electrode locations. -electrode_locations = np.c_[ - dc_data.survey.locations_a, - dc_data.survey.locations_b, - dc_data.survey.locations_m, - dc_data.survey.locations_n, -] - -unique_locations = np.unique( - np.reshape(electrode_locations, (4 * dc_data.survey.nD, 2)), axis=0 -) - -mesh.refine_points(unique_locations, padding_cells_by_level=[4, 4], finalize=False) - -# Refine core mesh region -xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0]) -xyz = np.c_[mkvc(xp), mkvc(zp)] -mesh.refine_bounding_box(xyz, padding_cells_by_level=[0, 0, 2, 8], finalize=False) - -mesh.finalize() - - -############################################################### -# Project Surveys to Discretized Topography -# ----------------------------------------- -# -# It is important that electrodes are not model as being in the air. Even if the -# electrodes are properly located along surface topography, they may lie above -# the discretized topography. This step is carried out to ensure all electrodes -# like on the discretized surface. -# - -# Create 2D topography. Since our 3D topography only changes in the x direction, -# it is easy to define the 2D topography projected along the survey line. For -# arbitrary topography and for an arbitrary survey orientation, the user must -# define the 2D topography along the survey line. -topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0) - -# Find cells that lie below surface topography -ind_active = active_from_xyz(mesh, topo_2d) - -# Extract survey from data object -survey = dc_data.survey - -# Shift electrodes to the surface of discretized topography -survey.drape_electrodes_on_topography(mesh, ind_active, option="top") - -# Reset survey in data object -dc_data.survey = survey - - -######################################################## -# Starting/Reference Model and Mapping on Tree Mesh -# --------------------------------------------------- -# -# Here, we would create starting and/or reference models for the DC inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. Here, the starting model is the natural log of 0.01 S/m. -# - -# Define conductivity model in S/m (or resistivity model in Ohm m) -air_conductivity = np.log(1e-8) -background_conductivity = np.log(1e-2) - -active_map = maps.InjectActiveCells(mesh, ind_active, np.exp(air_conductivity)) -nC = int(ind_active.sum()) -conductivity_map = active_map * maps.ExpMap() - -# Define model -starting_conductivity_model = background_conductivity * np.ones(nC) - -############################################## -# Define the Physics of the DC Simulation -# --------------------------------------- -# -# Here, we define the physics of the DC resistivity problem. -# - -# Define the problem. Define the cells below topography and the mapping -simulation = dc.simulation_2d.Simulation2DNodal( - mesh, survey=survey, sigmaMap=conductivity_map, storeJ=True -) - -####################################################################### -# Define DC Inverse Problem -# ------------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# -# - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dmis = data_misfit.L2DataMisfit(data=dc_data, simulation=simulation) - -# Define the regularization (model objective function). Here, 'p' defines the -# the norm of the smallness term, 'qx' defines the norm of the smoothness -# in x and 'qz' defines the norm of the smoothness in z. -regmap = maps.IdentityMap(nP=int(ind_active.sum())) - -reg = regularization.Sparse( - mesh, - active_cells=ind_active, - reference_model=starting_conductivity_model, - mapping=regmap, - gradient_type="total", - alpha_s=0.01, - alpha_x=1, - alpha_y=1, -) - -reg.reference_model_in_smooth = True # Include reference model in smoothness - -p = 0 -qx = 1 -qz = 1 -reg.norms = [p, qx, qz] - -# Define how the optimization problem is solved. Here we will use an inexact -# Gauss-Newton approach. -opt = optimization.InexactGaussNewton(maxIter=40) - -# Here we define the inverse problem that is to be solved -inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt) - -####################################################################### -# Define DC Inversion Directives -# ------------------------------ -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# - -# Apply and update sensitivity weighting as the model updates -update_sensitivity_weighting = directives.UpdateSensitivityWeights() - -# Reach target misfit for L2 solution, then use IRLS until model stops changing. -update_IRLS = directives.UpdateIRLS(max_irls_iterations=25, chifact_start=1.0) - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Update preconditioner -update_jacobi = directives.UpdatePreconditioner() - -directives_list = [ - update_sensitivity_weighting, - update_IRLS, - starting_beta, - save_iteration, - update_jacobi, -] - -##################################################################### -# Running the DC Inversion -# ------------------------ -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -dc_inversion = inversion.BaseInversion(inv_prob, directiveList=directives_list) - -# Run inversion -recovered_conductivity_model = dc_inversion.run(starting_conductivity_model) - -############################################################ -# Recreate True Conductivity Model -# -------------------------------- -# - -true_background_conductivity = 1e-2 -true_conductor_conductivity = 1e-1 -true_resistor_conductivity = 1e-3 - -true_conductivity_model = true_background_conductivity * np.ones(len(mesh)) - -ind_conductor = model_builder.get_indices_sphere( - np.r_[-120.0, -180.0], 60.0, mesh.gridCC -) -true_conductivity_model[ind_conductor] = true_conductor_conductivity - -ind_resistor = model_builder.get_indices_sphere(np.r_[120.0, -180.0], 60.0, mesh.gridCC) -true_conductivity_model[ind_resistor] = true_resistor_conductivity - -true_conductivity_model[~ind_active] = np.nan - -############################################################ -# Plotting True and Recovered Conductivity Model -# ---------------------------------------------- -# - -# Get L2 and sparse recovered model in base 10 -l2_conductivity = conductivity_map * inv_prob.l2model -l2_conductivity[~ind_active] = np.nan - -recovered_conductivity = conductivity_map * recovered_conductivity_model -recovered_conductivity[~ind_active] = np.nan - -# Plot True Model -norm = LogNorm(vmin=1e-3, vmax=1e-1) - -fig = plt.figure(figsize=(9, 15)) -ax1 = 3 * [None] -ax2 = 3 * [None] -title_str = [ - "True Conductivity Model", - "Smooth Recovered Model", - "Sparse Recovered Model", -] -plotting_model = [ - true_conductivity_model, - l2_conductivity, - recovered_conductivity, -] - -for ii in range(0, 3): - ax1[ii] = fig.add_axes([0.14, 0.75 - 0.3 * ii, 0.68, 0.2]) - mesh.plot_image( - plotting_model[ii], - ax=ax1[ii], - grid=False, - range_x=[-700, 700], - range_y=[-600, 0], - pcolor_opts={"norm": norm}, - ) - ax1[ii].set_xlim(-600, 600) - ax1[ii].set_ylim(-600, 0) - ax1[ii].set_title(title_str[ii]) - ax1[ii].set_xlabel("x (m)") - ax1[ii].set_ylabel("z (m)") - - ax2[ii] = fig.add_axes([0.84, 0.75 - 0.3 * ii, 0.03, 0.2]) - cbar = mpl.colorbar.ColorbarBase(ax2[ii], norm=norm, orientation="vertical") - cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12) - -plt.show() - -################################################################### -# Plotting Predicted DC Data and Misfit -# ------------------------------------- -# - -# Predicted data from recovered model -dpred = inv_prob.dpred -dobs = dc_data.dobs -std = dc_data.standard_deviation - -# Plot -fig = plt.figure(figsize=(9, 13)) -data_array = [np.abs(dobs), np.abs(dpred), (dobs - dpred) / std] -plot_title = ["Observed Voltage", "Predicted Voltage", "Normalized Misfit"] -plot_units = ["V/A", "V/A", ""] -scale = ["log", "log", "linear"] - -ax1 = 3 * [None] -cax1 = 3 * [None] -cbar = 3 * [None] -cplot = 3 * [None] - -for ii in range(0, 3): - ax1[ii] = fig.add_axes([0.15, 0.72 - 0.33 * ii, 0.65, 0.21]) - cax1[ii] = fig.add_axes([0.81, 0.72 - 0.33 * ii, 0.03, 0.21]) - cplot[ii] = plot_pseudosection( - survey, - data_array[ii], - "contourf", - ax=ax1[ii], - cax=cax1[ii], - scale=scale[ii], - cbar_label=plot_units[ii], - mask_topography=True, - contourf_opts={"levels": 25, "cmap": mpl.cm.viridis}, - ) - ax1[ii].set_title(plot_title[ii]) - -plt.show() +""" diff --git a/tutorials/05-dcr/plot_inv_3_dcr3d.py b/tutorials/05-dcr/plot_inv_3_dcr3d.py index 60675957c9..fb156a3d32 100644 --- a/tutorials/05-dcr/plot_inv_3_dcr3d.py +++ b/tutorials/05-dcr/plot_inv_3_dcr3d.py @@ -3,504 +3,13 @@ 3D Least-Squares Inversion of DC Resistivity Data ================================================= -Here we invert 5 lines of DC data to recover an electrical -conductivity model. We formulate the corresponding -inverse problem as a least-squares optimization problem. -For this tutorial, we focus on the following: +.. important:: - - Generating a mesh based on survey geometry - - Including surface topography - - Defining the inverse problem (data misfit, regularization, directives) - - Applying sensitivity weighting - - Plotting the recovered model and data misfit + This tutorial has been moved to `User Tutorials + `_. -The DC data are measured voltages normalized by the source current in V/A. + Checkout the `3D DC Resistivity Inversion + `_ tutorial. """ - -################################################################# -# Import Modules -# -------------- -# - - -import os -import numpy as np -import matplotlib as mpl -import matplotlib.pyplot as plt -import tarfile - -from discretize import TreeMesh -from discretize.utils import refine_tree_xyz, active_from_xyz - -from simpeg.utils import model_builder -from simpeg.utils.io_utils.io_utils_electromagnetics import read_dcip_xyz -from simpeg import ( - maps, - data_misfit, - regularization, - optimization, - inverse_problem, - inversion, - directives, - utils, -) -from simpeg.electromagnetics.static import resistivity as dc -from simpeg.electromagnetics.static.utils.static_utils import ( - apparent_resistivity_from_voltage, -) - -# To plot DC/IP data in 3D, the user must have the plotly package -try: - import plotly - from simpeg.electromagnetics.static.utils.static_utils import plot_3d_pseudosection - - has_plotly = True -except ImportError: - has_plotly = False - pass - - -mpl.rcParams.update({"font.size": 16}) - -# sphinx_gallery_thumbnail_number = 3 - -########################################################## -# Download Assets -# --------------- -# -# Here we provide the file paths to assets we need to run the inversion. The -# path to the true model conductivity and chargeability models are also -# provided for comparison with the inversion results. These files are stored as a -# tar-file on our google cloud bucket: -# "https://storage.googleapis.com/simpeg/doc-assets/dcr3d.tar.gz" -# -# -# - -# storage bucket where we have the data -data_source = "https://storage.googleapis.com/simpeg/doc-assets/dcr3d.tar.gz" - -# download the data -downloaded_data = utils.download(data_source, overwrite=True) - -# unzip the tarfile -tar = tarfile.open(downloaded_data, "r") -tar.extractall() -tar.close() - -# path to the directory containing our data -dir_path = downloaded_data.split(".")[0] + os.path.sep - -# files to work with -topo_filename = dir_path + "topo_xyz.txt" -dc_data_filename = dir_path + "dc_data.xyz" - -######################################################## -# Load Data and Topography -# ------------------------ -# -# Here we load the observed data and topography. -# -# - -topo_xyz = np.loadtxt(str(topo_filename)) - -dc_data = read_dcip_xyz( - dc_data_filename, - "volt", - data_header="V/A", - uncertainties_header="UNCERT", - is_surface_data=False, -) - - -########################################################## -# Plot Observed Data in Pseudosection -# ----------------------------------- -# -# Here we plot the observed DC and IP data in 3D pseudosections. -# To use this utility, you must have Python's *plotly* package. -# Here, we represent the DC data as apparent conductivities -# and the IP data as apparent chargeabilities. -# - -# Convert predicted data to apparent conductivities -apparent_conductivity = 1 / apparent_resistivity_from_voltage( - dc_data.survey, - dc_data.dobs, -) - -if has_plotly: - fig = plot_3d_pseudosection( - dc_data.survey, - apparent_conductivity, - scale="log", - units="S/m", - plane_distance=15, - ) - - fig.update_layout( - title_text="Apparent Conductivity", - title_x=0.5, - title_font_size=24, - width=650, - height=500, - scene_camera=dict( - center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.5, y=-1.5, z=1.8) - ), - ) - - plotly.io.show(fig) - -else: - print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS") - - -#################################################### -# Assign Uncertainties -# -------------------- -# -# Inversion with SimPEG requires that we define the uncertainties on our data. -# This represents our estimate of the standard deviation of the -# noise in our data. For DC data, the uncertainties are 10% of the absolute value. -# -# - -dc_data.standard_deviation = 0.1 * np.abs(dc_data.dobs) - - -################################################################ -# Create Tree Mesh -# ---------------- -# -# Here, we create the Tree mesh that will be used to invert -# DC data. -# - - -dh = 25.0 # base cell width -dom_width_x = 6000.0 # domain width x -dom_width_y = 6000.0 # domain width y -dom_width_z = 4000.0 # domain width z -nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x -nbcy = 2 ** int(np.round(np.log(dom_width_y / dh) / np.log(2.0))) # num. base cells y -nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z - -# Define the base mesh -hx = [(dh, nbcx)] -hy = [(dh, nbcy)] -hz = [(dh, nbcz)] -mesh = TreeMesh([hx, hy, hz], x0="CCN") - -# Mesh refinement based on topography -k = np.sqrt(np.sum(topo_xyz[:, 0:2] ** 2, axis=1)) < 1200 -mesh = refine_tree_xyz( - mesh, topo_xyz[k, :], octree_levels=[0, 6, 8], method="surface", finalize=False -) - -# Mesh refinement near sources and receivers. -electrode_locations = np.r_[ - dc_data.survey.locations_a, - dc_data.survey.locations_b, - dc_data.survey.locations_m, - dc_data.survey.locations_n, -] -unique_locations = np.unique(electrode_locations, axis=0) -mesh = refine_tree_xyz( - mesh, unique_locations, octree_levels=[4, 6, 4], method="radial", finalize=False -) - -# Finalize the mesh -mesh.finalize() - -####################################################### -# Project Electrodes to Discretized Topography -# -------------------------------------------- -# -# It is important that electrodes are not modeled as being in the air. Even if the -# electrodes are properly located along surface topography, they may lie above -# the discretized topography. This step is carried out to ensure all electrodes -# lie on the discretized surface. -# - -# Find cells that lie below surface topography -ind_active = active_from_xyz(mesh, topo_xyz) - -# Extract survey from data object -dc_survey = dc_data.survey - -# Shift electrodes to the surface of discretized topography -dc_survey.drape_electrodes_on_topography(mesh, ind_active, option="top") - -# Reset survey in data object -dc_data.survey = dc_survey - -################################################################# -# Starting/Reference Model and Mapping on OcTree Mesh -# --------------------------------------------------- -# -# Here, we create starting and/or reference models for the DC inversion as -# well as the mapping from the model space to the active cells. Starting and -# reference models can be a constant background value or contain a-priori -# structures. Here, the starting model is the natural log of 0.01 S/m. -# -# - -# Define conductivity model in S/m (or resistivity model in Ohm m) -air_conductivity = np.log(1e-8) -background_conductivity = np.log(1e-2) - -# Define the mapping from active cells to the entire domain -active_map = maps.InjectActiveCells(mesh, ind_active, np.exp(air_conductivity)) -nC = int(ind_active.sum()) - -# Define the mapping from the model to the conductivity of the entire domain -conductivity_map = active_map * maps.ExpMap() - -# Define starting model -starting_conductivity_model = background_conductivity * np.ones(nC) - -############################################################### -# Define the Physics of the DC Simulation -# --------------------------------------- -# -# Here, we define the physics of the DC resistivity simulation. -# -# - -dc_simulation = dc.simulation.Simulation3DNodal( - mesh, survey=dc_survey, sigmaMap=conductivity_map, storeJ=True -) - -################################################################# -# Define DC Inverse Problem -# ------------------------- -# -# The inverse problem is defined by 3 things: -# -# 1) Data Misfit: a measure of how well our recovered model explains the field data -# 2) Regularization: constraints placed on the recovered model and a priori information -# 3) Optimization: the numerical approach used to solve the inverse problem -# -# - - -# Define the data misfit. Here the data misfit is the L2 norm of the weighted -# residual between the observed data and the data predicted for a given model. -# Within the data misfit, the residual between predicted and observed data are -# normalized by the data's standard deviation. -dc_data_misfit = data_misfit.L2DataMisfit(data=dc_data, simulation=dc_simulation) - -# Define the regularization (model objective function) -dc_regularization = regularization.WeightedLeastSquares( - mesh, - active_cells=ind_active, - reference_model=starting_conductivity_model, -) - -dc_regularization.reference_model_in_smooth = ( - True # Include reference model in smoothness -) - -# Define how the optimization problem is solved. -dc_optimization = optimization.InexactGaussNewton( - maxIter=15, maxIterLS=20, maxIterCG=30, tolCG=1e-2 -) - -# Here we define the inverse problem that is to be solved -dc_inverse_problem = inverse_problem.BaseInvProblem( - dc_data_misfit, dc_regularization, dc_optimization -) - -################################################# -# Define DC Inversion Directives -# ------------------------------ -# -# Here we define any directives that are carried out during the inversion. This -# includes the cooling schedule for the trade-off parameter (beta), stopping -# criteria for the inversion and saving inversion results at each iteration. -# -# - -# Apply and update sensitivity weighting as the model updates -update_sensitivity_weighting = directives.UpdateSensitivityWeights() - -# Defining a starting value for the trade-off parameter (beta) between the data -# misfit and the regularization. -starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e1) - -# Set the rate of reduction in trade-off parameter (beta) each time the -# the inverse problem is solved. And set the number of Gauss-Newton iterations -# for each trade-off paramter value. -beta_schedule = directives.BetaSchedule(coolingFactor=2.5, coolingRate=2) - -# Options for outputting recovered models and predicted data for each beta. -save_iteration = directives.SaveOutputEveryIteration(save_txt=False) - -# Setting a stopping criteria for the inversion. -target_misfit = directives.TargetMisfit(chifact=1) - -# Apply and update preconditioner as the model updates -update_jacobi = directives.UpdatePreconditioner() - -directives_list = [ - update_sensitivity_weighting, - starting_beta, - beta_schedule, - save_iteration, - target_misfit, - update_jacobi, -] - -######################################################### -# Running the DC Inversion -# ------------------------ -# -# To define the inversion object, we need to define the inversion problem and -# the set of directives. We can then run the inversion. -# - -# Here we combine the inverse problem and the set of directives -dc_inversion = inversion.BaseInversion( - dc_inverse_problem, directiveList=directives_list -) - -# Run inversion -recovered_conductivity_model = dc_inversion.run(starting_conductivity_model) - - -############################################################### -# Recreate True Conductivity Model -# -------------------------------- -# - -# Define conductivity model in S/m (or resistivity model in Ohm m) -background_value = 1e-2 -conductor_value = 1e-1 -resistor_value = 1e-3 - -# Define model -true_conductivity_model = background_value * np.ones(nC) - -ind_conductor = model_builder.get_indices_sphere( - np.r_[-350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] -) -true_conductivity_model[ind_conductor] = conductor_value - -ind_resistor = model_builder.get_indices_sphere( - np.r_[350.0, 0.0, -300.0], 160.0, mesh.cell_centers[ind_active, :] -) -true_conductivity_model[ind_resistor] = resistor_value -true_conductivity_model_log10 = np.log10(true_conductivity_model) - - -############################################################### -# Plotting True and Recovered Conductivity Model -# ---------------------------------------------- -# - - -# Plot True Model -fig = plt.figure(figsize=(10, 4)) - -plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan) - -ax1 = fig.add_axes([0.15, 0.15, 0.67, 0.75]) -mesh.plot_slice( - plotting_map * true_conductivity_model_log10, - ax=ax1, - normal="Y", - ind=int(len(mesh.h[1]) / 2), - grid=False, - clim=(true_conductivity_model_log10.min(), true_conductivity_model_log10.max()), - pcolor_opts={"cmap": mpl.cm.viridis}, -) -ax1.set_title("True Conductivity Model") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") -ax1.set_xlim([-1000, 1000]) -ax1.set_ylim([-1000, 0]) - -ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) -norm = mpl.colors.Normalize( - vmin=true_conductivity_model_log10.min(), vmax=true_conductivity_model_log10.max() -) -cbar = mpl.colorbar.ColorbarBase( - ax2, cmap=mpl.cm.viridis, norm=norm, orientation="vertical", format="$10^{%.1f}$" -) -cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12) - -# Plot recovered model -recovered_conductivity_model_log10 = np.log10(np.exp(recovered_conductivity_model)) - -fig = plt.figure(figsize=(10, 4)) - -ax1 = fig.add_axes([0.15, 0.15, 0.67, 0.75]) -mesh.plot_slice( - plotting_map * recovered_conductivity_model_log10, - ax=ax1, - normal="Y", - ind=int(len(mesh.h[1]) / 2), - grid=False, - clim=(true_conductivity_model_log10.min(), true_conductivity_model_log10.max()), - pcolor_opts={"cmap": mpl.cm.viridis}, -) -ax1.set_title("Recovered Conductivity Model") -ax1.set_xlabel("x (m)") -ax1.set_ylabel("z (m)") -ax1.set_xlim([-1000, 1000]) -ax1.set_ylim([-1000, 0]) - -ax2 = fig.add_axes([0.84, 0.15, 0.03, 0.75]) -norm = mpl.colors.Normalize( - vmin=true_conductivity_model_log10.min(), vmax=true_conductivity_model_log10.max() -) -cbar = mpl.colorbar.ColorbarBase( - ax2, cmap=mpl.cm.viridis, norm=norm, orientation="vertical", format="$10^{%.1f}$" -) -cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12) -plt.show() - -####################################################################### -# Plotting Normalized Data Misfit or Predicted DC Data -# ---------------------------------------------------- -# -# To see how well the recovered model reproduces the observed data, -# it is a good idea to compare the predicted and observed data. -# Here, we accomplish this by plotting the normalized misfit. -# - -# Predicted data from recovered model -dpred_dc = dc_inverse_problem.dpred - -# Compute the normalized data misfit -dc_normalized_misfit = (dc_data.dobs - dpred_dc) / dc_data.standard_deviation - -if has_plotly: - # Plot IP Data - fig = plot_3d_pseudosection( - dc_data.survey, - dc_normalized_misfit, - scale="linear", - units="", - vlim=[-2, 2], - plane_distance=15, - ) - - fig.update_layout( - title_text="Normalized Data Misfit", - title_x=0.5, - title_font_size=24, - width=650, - height=500, - scene_camera=dict( - center=dict(x=0, y=0, z=-0.4), eye=dict(x=1.5, y=-1.5, z=1.8) - ), - ) - - plotly.io.show(fig) - -else: - print("INSTALL 'PLOTLY' TO VISUALIZE 3D PSEUDOSECTIONS")