diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index fbde31b285..94bfa8e0a8 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -27,44 +27,44 @@ jobs: # Comment on the staled issues. stale-issue-message: | It has been 500 days since last activity on this issue, so it has been given the https://github.com/SciTools/iris/labels/needs-checkin label. This reminder prompts us to identify issues which: - + 1. Have been resolved since their last update. 2. Are no longer relevant. 3. Are still outstanding and should be assigned resource. 4. Are still outstanding but for which there is not enough resource. - + If this issue is still important to you, then please comment on this issue. The https://github.com/SciTools/iris/labels/needs-checkin label will be automatically removed. - + If there is no activity on this issue for 28 days, it will be closed and given the https://github.com/SciTools/iris/labels/not-resourced label. # Comment on the staled prs. stale-pr-message: | It has been 500 days since last activity on this PR, so it has been given the https://github.com/SciTools/iris/labels/needs-checkin label. This reminder prompts us to identify PRs which: - + 1. Have now been addressed by other PR(s). 2. Are no longer relevant. 3. Are still outstanding and should be assigned resource. 4. Are still outstanding but for which there is not enough resource. - + If this PR is still important to you, then please comment on this PR. The https://github.com/SciTools/iris/labels/needs-checkin label will be automatically removed. - + If there is no activity on this PR for 28 days, it will be closed and given the https://github.com/SciTools/iris/labels/not-resourced label. # Comment on the staled issues while closed. close-issue-message: | This issue has been automatically closed due to inactivity since the recent https://github.com/SciTools/iris/labels/needs-checkin request. - + If you still want this issue to get attention, then please either: - + - Re-open this issue, if you have sufficient permissions, or ... - Add a comment pinging `@SciTools/iris-devs` who will re-open on your behalf. # Comment on the staled prs while closed. close-pr-message: | This PR has been automatically closed due to inactivity since the recent https://github.com/SciTools/iris/labels/needs-checkin request. - + If you still want this PR to get attention, then please either: - + - Re-open this PR, if you have sufficient permissions, or ... - Add a comment pinging `@SciTools/iris-devs` who will re-open on your behalf. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7a05b75f80..ea2792a221 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,5 +1,7 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks +# See https://pre-commit.ci/#configuration +# See https://github.com/scientific-python/cookie#sp-repo-review ci: autofix_prs: false @@ -7,18 +9,23 @@ ci: autoupdate_commit_msg: "chore: update pre-commit hooks" autoupdate_schedule: "monthly" + +# Alphabetised, for lack of a better order. files: | (?x)( - noxfile\.py| - setup\.py| + benchmarks\/.+\.py| docs\/.+\.py| lib\/.+\.py| - benchmarks\/.+\.py| - tools\/.+\.py + noxfile\.py| + pyproject\.toml| + tools\/.+\.py| + setup\.py ) minimum_pre_commit_version: 1.21.0 repos: + +# Hook for pre-commit's built-in checks - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: @@ -26,23 +33,36 @@ repos: - id: check-added-large-files # Check whether files parse as valid Python. - id: check-ast - # Check for file name conflicts on case-insensitive filesytems. + # Check for file name conflicts on case-insensitive filesystems. - id: check-case-conflict # Check for files that contain merge conflict strings. - id: check-merge-conflict # Check for debugger imports and py37+ `breakpoint()` calls in Python source. - id: debug-statements + # Check TOML file syntax. + - id: check-toml + # Check YAML file syntax. + - id: check-yaml + # Makes sure files end in a newline and only a newline. + # Duplicates Ruff W292 but also works on non-Python files. + - id: end-of-file-fixer + # Replaces or checks mixed line ending. + - id: mixed-line-ending # Don't commit to main branch. - id: no-commit-to-branch + # Trims trailing whitespace. + # Duplicates Ruff W291 but also works on non-Python files. + - id: trailing-whitespace -- repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.15.12" +# Hooks from all other repos +# NOTE : keep these in hook-name (aka 'id') order + +- repo: https://github.com/adamchainz/blacken-docs + # This template does not keep up-to-date with versions, visit the repo to see the most recent release. + rev: 1.20.0 hooks: - - id: ruff - types: [file, python] - args: [--fix, --show-fixes] - - id: ruff-format - types: [file, python] + - id: blacken-docs + types: [file, rst] - repo: https://github.com/codespell-project/codespell rev: "v2.4.2" @@ -51,18 +71,6 @@ repos: types_or: [asciidoc, python, markdown, rst] additional_dependencies: [tomli] -- repo: https://github.com/asottile/blacken-docs - rev: 1.20.0 - hooks: - - id: blacken-docs - types: [file, rst] - -- repo: https://github.com/aio-libs/sort-all - rev: v1.3.0 - hooks: - - id: sort-all - types: [file, python] - - repo: https://github.com/pre-commit/mirrors-mypy rev: 'v1.20.2' hooks: @@ -77,3 +85,31 @@ repos: - id: numpydoc-validation exclude: "^lib/iris/tests/|docs/gallery_code/" types: [file, python] + +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: "v0.15.12" + hooks: + - id: ruff-check + types: [file, python] + args: [--fix, --show-fixes] + - id: ruff-format + types: [file, python] + +- repo: https://github.com/aio-libs/sort-all + rev: v1.3.0 + hooks: + - id: sort-all + types: [file, python] + +- repo: https://github.com/scientific-python/cookie + rev: 2025.11.21 + hooks: + - id: sp-repo-review + additional_dependencies: ["repo-review[cli]"] + args: ["--show=errskip"] + +- repo: https://github.com/abravalheri/validate-pyproject + # More exhaustive than Ruff RUF200. + rev: "v0.24.1" + hooks: + - id: validate-pyproject diff --git a/.ruff.toml b/.ruff.toml deleted file mode 100644 index 37f2cb7498..0000000000 --- a/.ruff.toml +++ /dev/null @@ -1,176 +0,0 @@ -extend = "pyproject.toml" - -lint.ignore = [ - # NOTE: To find a rule code to fix, run: - # ruff --select="ALL" --statistics lib/iris/ - - # Pyflakes (F) - # https://docs.astral.sh/ruff/rules/#pyflakes-f - "F", - - # pycodestyle (E, W) - # https://docs.astral.sh/ruff/rules/#pycodestyle-e-w - "E", - - # mccabe (C90) - # https://docs.astral.sh/ruff/rules/#mccabe-c90 - "C90", - - # pep8-naming (N) - # https://docs.astral.sh/ruff/rules/#pep8-naming-n - "N", - - # pydocstyle (D) - # https://docs.astral.sh/ruff/rules/#pydocstyle-d - # (D-1) Permanent - "D105", # Missing docstring in magic method - # (D-2) Temporary, to be removed when we are more compliant. Rare cases mmove to (1). - "D101", # Missing docstring in public class - "D102", # Missing docstring in public method - # (D-3) Temporary, before an initial review, either fix ocurrences or move to (2). - "D103", # Missing docstring in public function - - # pyupgrade (UP) - # https://docs.astral.sh/ruff/rules/#pyupgrade-up - "UP", - - # flake8-annotations (ANN) - # https://docs.astral.sh/ruff/rules/#flake8-annotations-ann - "ANN", - - # flake8-bandit (S) - # https://docs.astral.sh/ruff/rules/#flake8-bandit-s - "S", - - # flake8-blind-except (BLE) - # https://docs.astral.sh/ruff/rules/#flake8-blind-except-ble - "BLE", - - # flake8-boolean-trap (FBT) - # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt - "FBT", - - # flake8-bugbear (B) - # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b - "B", - - # flake8-builtins (A) - # https://docs.astral.sh/ruff/rules/#flake8-builtins-a - "A", - - # flake8-comprehensions (C4) - # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 - "C4", - - # flake8-datetimez (DTZ) - # https://docs.astral.sh/ruff/rules/#flake8-datetimez-dtz - "DTZ", - - # flake8-errmsg (EM) - # https://docs.astral.sh/ruff/rules/#flake8-errmsg-em - "EM", - - # flake8-future-annotations (FA) - # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa - "FA", - - # flake8-logging-format (G) - # https://docs.astral.sh/ruff/rules/#flake8-logging-format-g - "G", - - # flake8-no-pep420 (INP) - # https://docs.astral.sh/ruff/rules/#flake8-no-pep420-inp - "INP", - - # flake8-pie (PIE) - # https://docs.astral.sh/ruff/rules/#flake8-pie-pie - "PIE", - - # flake8-print (T20) - # https://docs.astral.sh/ruff/rules/#flake8-print-t20 - "T20", - - # flake8-pyi (PYI) - # https://docs.astral.sh/ruff/rules/#flake8-pyi-pyi - "PYI", - - # flake8-pytest-style (PT) - # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt - "PT019", - - # flake8-raise (RSE) - # https://docs.astral.sh/ruff/rules/#flake8-raise-rse - "RSE", - - # flake8-return (RET) - # https://docs.astral.sh/ruff/rules/#flake8-return-ret - "RET", - - # flake8-self (SLF) - # https://docs.astral.sh/ruff/rules/#flake8-self-slf - "SLF", - - # flake8-slots (SLOT) - # https://docs.astral.sh/ruff/rules/#flake8-slots-slot - "SLOT", - - # flake8-simplify (SIM) - # https://docs.astral.sh/ruff/rules/#flake8-simplify-sim - "SIM", - - # flake8-tidy-imports (TID) - # https://docs.astral.sh/ruff/rules/#flake8-tidy-imports-tid - "TID", - - # flake8-type-checking (TCH) - # https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch - "TCH", - - # flake8-unused-arguments (ARG) - # https://docs.astral.sh/ruff/rules/#flake8-unused-arguments-arg - "ARG", - - # flake8-use-pathlib (PTH) - # https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth - "PTH", - - # flake8-todos (TD) - # https://docs.astral.sh/ruff/rules/#flake8-todos-td - "TD", - - # flake8-fixme (FIX) - # https://docs.astral.sh/ruff/rules/#flake8-fixme-fix - "FIX", - - # eradicate (ERA) - # https://docs.astral.sh/ruff/rules/#eradicate-era - "ERA", - - # pandas-vet (PD) - # https://docs.astral.sh/ruff/rules/#pandas-vet-pd - "PD", - - # pygrep-hooks (PGH) - # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh - "PGH", - - # Pylint (PL) - # https://docs.astral.sh/ruff/rules/#pylint-pl - "PL", - - # tryceratops (TRY) - # https://docs.astral.sh/ruff/rules/#tryceratops-try - "TRY", - - # flynt (FLY) - # https://docs.astral.sh/ruff/rules/#flynt-fly - "FLY", - - # Perflint (PERF) - # https://docs.astral.sh/ruff/rules/#perflint-perf - "PERF", - - # Ruff-specific rules (RUF) - # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf - "RUF", -] diff --git a/docs/src/_static/theme_override.css b/docs/src/_static/theme_override.css index d7ac2af3fc..e524f119af 100644 --- a/docs/src/_static/theme_override.css +++ b/docs/src/_static/theme_override.css @@ -49,4 +49,4 @@ ul.squarelist { .center { text-align: center; -} +} \ No newline at end of file diff --git a/docs/src/developers_guide/contributing_documentation.rst b/docs/src/developers_guide/contributing_documentation.rst index e289b1548d..631e479f62 100644 --- a/docs/src/developers_guide/contributing_documentation.rst +++ b/docs/src/developers_guide/contributing_documentation.rst @@ -18,4 +18,3 @@ If you're not then we've got a step-by-step guide here to walk you through it: contributing_documentation_easy contributing_documentation_full - \ No newline at end of file diff --git a/docs/src/developers_guide/contributing_pytest_conversions.rst b/docs/src/developers_guide/contributing_pytest_conversions.rst index 842b57b205..35724033bf 100644 --- a/docs/src/developers_guide/contributing_pytest_conversions.rst +++ b/docs/src/developers_guide/contributing_pytest_conversions.rst @@ -36,8 +36,7 @@ Conversion Checklist .. code-block:: python @pytest.fixture(autouse=True) - def _setup(self): - ... + def _setup(self): ... #. Check for references to ``super()``. Most test classes used to inherit from :class:`iris.tests.IrisTest`, so references to this should be removed. Any @@ -48,19 +47,18 @@ Conversion Checklist class TestFoo: @pytest.fixture(autouse=True) - def _setup_foo(self): - ... + def _setup_foo(self): ... + class TestBar(TestFoo): @pytest.fixture(autouse=True) - def _setup(self, _setup_foo): - ... + def _setup(self, _setup_foo): ... #. Check for references to ``@tests``. These should be changed to ``@_shared_utils``. #. Check for ``mock.patch("warnings.warn")``. This can be replaced with ``pytest.warns(match=message)``. #. Check for references to ``mock`` or ``self.patch``. These should be changed to use - the ``mocker`` fixture - see the `pytest-mock docs`_. Note that pytest-mock's + the ``mocker`` fixture - see the `pytest-mock docs`_. Note that pytest-mock's ``patch`` does not support the context-manager syntax; in most cases this is made unnecessary (see `Usage as context manager`_), in advanced cases consider using the `monkeypatch`_ fixture to provide a context-manager. @@ -73,8 +71,8 @@ Conversion Checklist fixture. #. Check for ``if __name__ == 'main'``. This is no longer needed with pytest. #. Remove the top-level import of :mod:`iris.tests` (usually ``import iris.tests as tests``). - Having followed the above steps, any remaining calls - (e.g. :func:`iris.tests.get_data_path`) should be easily replacable with calls to + Having followed the above steps, any remaining calls + (e.g. :func:`iris.tests.get_data_path`) should be easily replaceable with calls to :mod:`iris.tests._shared_utils` (e.g. :func:`iris.tests._shared_utils.get_data_path`). #. Ensure that all test classes start with ``Test``. Tests will not run in pytest without it. #. Check the file against https://github.com/astral-sh/ruff , using ``pip install ruff`` -> diff --git a/docs/src/developers_guide/contributing_tests.rst b/docs/src/developers_guide/contributing_tests.rst index 896d8e05ac..2ca6e4d8d8 100644 --- a/docs/src/developers_guide/contributing_tests.rst +++ b/docs/src/developers_guide/contributing_tests.rst @@ -147,23 +147,19 @@ Within that file the tests might look something like: .. code-block:: python # A single test for the Cube.xml() method. - def test_xml_some_general_stuff(self): - ... + def test_xml_some_general_stuff(self): ... # A single test for the Cube.xml() method, focussing on the behaviour of # the checksums. - def test_xml_checksum_ignores_masked_values(self): - ... + def test_xml_checksum_ignores_masked_values(self): ... # Tests for the Cube.add_dim_coord() method. class Test_add_dim_coord: - def test_normal_usage(self): - ... + def test_normal_usage(self): ... - def test_coord_already_present(self): - ... + def test_coord_already_present(self): ... When testing functions, within the test module there may be test classes, for example: @@ -187,16 +183,13 @@ Within that file the tests might look something like: # Tests focussing on the handling of different data types. class TestDtypeAndValues: - def test_int16(self): - ... + def test_int16(self): ... - def test_int16_big_endian(self): - ... + def test_int16_big_endian(self): ... # Tests focussing on the handling of different projections. - def test_no_ellipsoid(self): - ... + def test_no_ellipsoid(self): ... There is no fixed naming scheme for integration tests. @@ -213,15 +206,15 @@ module, please include the module in each call, i.e. from iris.tests import _shared_utils - _shared_utils.assert_...() + _shared_utils.assert_CDL() as opposed to: .. code-block:: python - from iris.tests._shared_utils import assert_... + from iris.tests._shared_utils import assert_CDL - assert_...() + assert_CDL() .. note:: diff --git a/docs/src/developers_guide/documenting/whats_new_contributions.rst b/docs/src/developers_guide/documenting/whats_new_contributions.rst index a6b7d13148..10fa72cb8d 100644 --- a/docs/src/developers_guide/documenting/whats_new_contributions.rst +++ b/docs/src/developers_guide/documenting/whats_new_contributions.rst @@ -155,4 +155,3 @@ users. To achieve this several categories may be used. **💼 Internal** Changes to any internal or development related topics, such as testing, environment dependencies etc. - diff --git a/docs/src/user_manual/explanation/iris_cubes.rst b/docs/src/user_manual/explanation/iris_cubes.rst index 4b615ba21e..b4708abcd3 100644 --- a/docs/src/user_manual/explanation/iris_cubes.rst +++ b/docs/src/user_manual/explanation/iris_cubes.rst @@ -183,9 +183,10 @@ printing a real life cube: :hide: import iris - filename = iris.sample_data_path('uk_hires.pp') + + filename = iris.sample_data_path("uk_hires.pp") # NOTE: Every time the output of this cube changes, the full list of deductions below should be re-assessed. - print(iris.load_cube(filename, 'air_potential_temperature')) + print(iris.load_cube(filename, "air_potential_temperature")) .. testoutput:: diff --git a/docs/src/user_manual/explanation/iris_xarray.rst b/docs/src/user_manual/explanation/iris_xarray.rst index 886c556757..aebd65288c 100644 --- a/docs/src/user_manual/explanation/iris_xarray.rst +++ b/docs/src/user_manual/explanation/iris_xarray.rst @@ -55,6 +55,7 @@ For example : .. code-block:: python from ncdata.iris_xarray import cubes_from_xarray, cubes_to_xarray + cubes = cubes_from_xarray(dataset) xrds = cubes_to_xarray(cubes) diff --git a/docs/src/user_manual/explanation/lenient_maths.rst b/docs/src/user_manual/explanation/lenient_maths.rst index bf297e7e58..1dd96519ae 100644 --- a/docs/src/user_manual/explanation/lenient_maths.rst +++ b/docs/src/user_manual/explanation/lenient_maths.rst @@ -58,10 +58,19 @@ Lenient Example import iris from iris.common import LENIENT - experiment = iris.load_cube(iris.sample_data_path("hybrid_height.nc"), "air_potential_temperature") + + experiment = iris.load_cube( + iris.sample_data_path("hybrid_height.nc"), "air_potential_temperature" + ) control = experiment[0] control.remove_aux_factory(control.aux_factory()) - for coord in ["sigma", "forecast_reference_time", "forecast_period", "atmosphere_hybrid_height_coordinate", "surface_altitude"]: + for coord in [ + "sigma", + "forecast_reference_time", + "forecast_period", + "atmosphere_hybrid_height_coordinate", + "surface_altitude", + ]: control.remove_coord(coord) control.attributes["Conventions"] = "CF-1.7" experiment.attributes["experiment-id"] = "RT3 50" diff --git a/docs/src/user_manual/explanation/lenient_metadata.rst b/docs/src/user_manual/explanation/lenient_metadata.rst index 7e1b6b26e9..f1bcb7e07f 100644 --- a/docs/src/user_manual/explanation/lenient_metadata.rst +++ b/docs/src/user_manual/explanation/lenient_metadata.rst @@ -38,6 +38,7 @@ Strict Behaviour .. testsetup:: strict-behaviour import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) latitude = cube.coord("latitude") @@ -148,6 +149,7 @@ Lenient Behaviour .. testsetup:: lenient-behaviour import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) latitude = cube.coord("latitude") diff --git a/docs/src/user_manual/explanation/mesh_data_model.rst b/docs/src/user_manual/explanation/mesh_data_model.rst index bbcfd05f64..a31751fa01 100644 --- a/docs/src/user_manual/explanation/mesh_data_model.rst +++ b/docs/src/user_manual/explanation/mesh_data_model.rst @@ -341,15 +341,15 @@ the :class:`~iris.cube.Cube`\'s unstructured dimension. from iris.mesh import Connectivity, MeshXY node_x = AuxCoord( - points=[0.0, 5.0, 0.0, 5.0, 8.0], - standard_name="longitude", - units="degrees_east", - ) + points=[0.0, 5.0, 0.0, 5.0, 8.0], + standard_name="longitude", + units="degrees_east", + ) node_y = AuxCoord( - points=[3.0, 3.0, 0.0, 0.0, 0.0], - standard_name="latitude", - units="degrees_north", - ) + points=[3.0, 3.0, 0.0, 0.0, 0.0], + standard_name="latitude", + units="degrees_north", + ) edge_node_c = Connectivity( indices=[[0, 1], [0, 2], [1, 3], [1, 4], [2, 3], [3, 4]], @@ -357,9 +357,8 @@ the :class:`~iris.cube.Cube`\'s unstructured dimension. ) face_indices = np.ma.masked_equal([[0, 1, 3, 2], [1, 4, 3, 999]], 999) - face_node_c = Connectivity( - indices=face_indices, cf_role="face_node_connectivity" - ) + face_node_c = Connectivity(indices=face_indices, cf_role="face_node_connectivity") + def centre_coords(conn): indexing = np.ma.filled(conn.indices, 0) @@ -373,6 +372,7 @@ the :class:`~iris.cube.Cube`\'s unstructured dimension. ] return [(x, "x"), (y, "y")] + my_mesh = MeshXY( long_name="my_mesh", topology_dimension=2, @@ -384,19 +384,21 @@ the :class:`~iris.cube.Cube`\'s unstructured dimension. vertical_levels = DimCoord([0, 1, 2], "height") + def location_cube(conn): - location = conn.location - mesh_coord_x, mesh_coord_y = my_mesh.to_MeshCoords(location) - data_shape = (conn.shape[conn.location_axis], len(vertical_levels.points)) - data_array = np.arange(np.prod(data_shape)).reshape(data_shape) - - return Cube( - data=data_array, - long_name=f"{location}_data", - units="K", - dim_coords_and_dims=[(vertical_levels, 1)], - aux_coords_and_dims=[(mesh_coord_x, 0), (mesh_coord_y, 0)], - ) + location = conn.location + mesh_coord_x, mesh_coord_y = my_mesh.to_MeshCoords(location) + data_shape = (conn.shape[conn.location_axis], len(vertical_levels.points)) + data_array = np.arange(np.prod(data_shape)).reshape(data_shape) + + return Cube( + data=data_array, + long_name=f"{location}_data", + units="K", + dim_coords_and_dims=[(vertical_levels, 1)], + aux_coords_and_dims=[(mesh_coord_x, 0), (mesh_coord_y, 0)], + ) + edge_cube = location_cube(edge_node_c) face_cube = location_cube(face_node_c) @@ -539,6 +541,7 @@ given only the ``location`` argument >>> for coord in edge_cube.coords(mesh_coords=True): ... print(coord) + ... MeshCoord : latitude / (degrees_north) mesh: location: 'edge' @@ -571,4 +574,4 @@ given only the ``location`` argument axis: 'x' -__ CF-UGRID_ \ No newline at end of file +__ CF-UGRID_ diff --git a/docs/src/user_manual/explanation/metadata.rst b/docs/src/user_manual/explanation/metadata.rst index 589df672b4..476bde3f3a 100644 --- a/docs/src/user_manual/explanation/metadata.rst +++ b/docs/src/user_manual/explanation/metadata.rst @@ -112,6 +112,7 @@ Common Metadata API .. testsetup:: import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) As of Iris ``3.0.0``, a unified treatment of metadata has been applied @@ -327,6 +328,7 @@ Richer Metadata Behaviour import iris import numpy as np from iris.common import CoordMetadata + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) longitude = cube.coord("longitude") @@ -393,7 +395,7 @@ different value, then the result of the operation will be ``False``. For example .. doctest:: richer-metadata >>> longitude.attributes = {"grinning face": "🙂"} - >>> other = longitude.metadata._replace(attributes={"grinning face": "🙃"}) + >>> other = longitude.metadata._replace(attributes={"grinning face": "🙃"}) >>> other DimCoordMetadata(standard_name='longitude', long_name=None, var_name='longitude', units=Unit('degrees'), attributes={'grinning face': '🙃'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) >>> longitude.metadata == other @@ -435,7 +437,9 @@ However, metadata class equality is rich enough to handle this eventuality, >>> metadata1 CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'one': np.int32(1), 'two': array([1., 2.])}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) - >>> metadata2 = cube.metadata._replace(attributes={"one": np.int32(1), "two": np.array([1000.0, 2000.0])}) + >>> metadata2 = cube.metadata._replace( + ... attributes={"one": np.int32(1), "two": np.array([1000.0, 2000.0])} + ... ) >>> metadata2 CubeMetadata(standard_name='air_temperature', long_name=None, var_name='air_temperature', units=Unit('K'), attributes={'one': np.int32(1), 'two': array([1000., 2000.])}, cell_methods=(CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),)) >>> metadata1 == metadata2 @@ -543,7 +547,9 @@ different values, .. doctest:: richer-metadata >>> from cf_units import Unit - >>> metadata = longitude.metadata._replace(long_name="lon", var_name="lon", units=Unit("radians")) + >>> metadata = longitude.metadata._replace( + ... long_name="lon", var_name="lon", units=Unit("radians") + ... ) >>> metadata DimCoordMetadata(standard_name='longitude', long_name='lon', var_name='lon', units=Unit('radians'), attributes={'grinning face': '🙂'}, coord_system=GeogCS(6371229.0), climatological=False, circular=False) @@ -679,6 +685,7 @@ Metadata Combination .. testsetup:: metadata-combine import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) longitude = cube.coord("longitude") @@ -735,7 +742,7 @@ Let's reinforce this behaviour, but this time by combining metadata where the .. doctest:: metadata-combine - >>> attributes = {"Model scenario": "A1B", "Conventions": "CF-1.8", "grinning face": "🙂" } + >>> attributes = {"Model scenario": "A1B", "Conventions": "CF-1.8", "grinning face": "🙂"} >>> metadata = cube.metadata._replace(attributes=attributes) >>> metadata != cube.metadata True @@ -813,6 +820,7 @@ Metadata Conversion import iris from iris.common import DimCoordMetadata + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) longitude = cube.coord("longitude") @@ -878,6 +886,7 @@ Metadata Assignment .. testsetup:: metadata-assign import iris + cube = iris.load_cube(iris.sample_data_path("A1B_north_america.nc")) longitude = cube.coord("longitude") original = longitude.copy() @@ -932,7 +941,19 @@ namedtuple class, .. doctest:: metadata-assign >>> from collections import namedtuple - >>> Metadata = namedtuple("Metadata", ["standard_name", "long_name", "var_name", "units", "attributes", "coord_system", "climatological", "circular"]) + >>> Metadata = namedtuple( + ... "Metadata", + ... [ + ... "standard_name", + ... "long_name", + ... "var_name", + ... "units", + ... "attributes", + ... "coord_system", + ... "climatological", + ... "circular", + ... ], + ... ) Now create an instance of this custom namedtuple class, and populate it, diff --git a/docs/src/user_manual/explanation/netcdf_io.rst b/docs/src/user_manual/explanation/netcdf_io.rst index d3fbf00b10..bffc16bc2e 100644 --- a/docs/src/user_manual/explanation/netcdf_io.rst +++ b/docs/src/user_manual/explanation/netcdf_io.rst @@ -19,12 +19,12 @@ cube = iris.load(iris.sample_data_path("E1_north_america.nc"))[0] iris.save(cube, tmp_filepath, chunksizes=(120, 37, 49)) old_dask = dask.config.get("array.chunk-size") - dask.config.set({'array.chunk-size': '500KiB'}) + dask.config.set({"array.chunk-size": "500KiB"}) .. testcleanup:: chunk_control - dask.config.set({'array.chunk-size': old_dask}) + dask.config.set({"array.chunk-size": old_dask}) shutil.rmtree(tmp_dir) .. _netcdf_io: @@ -82,6 +82,7 @@ as the shape, i.e. no optimisation occurs on that dimension. >>> with CHUNK_CONTROL.set("air_temperature", time=180, latitude=-1, longitude=25): ... cube = iris.load_cube(tmp_filepath) + ... >>> >>> print(cube.core_data().chunksize) (180, 37, 25) @@ -93,6 +94,7 @@ specify only one dimension, the rest will be optimised using Iris' default behav >>> with CHUNK_CONTROL.set(longitude=25): ... cube = iris.load_cube(tmp_filepath) + ... >>> >>> print(cube.core_data().chunksize) (120, 37, 25) @@ -108,6 +110,7 @@ will default to Iris optimisation. >>> with CHUNK_CONTROL.from_file(): ... cube = iris.load_cube(tmp_filepath) + ... >>> >>> print(cube.core_data().chunksize) (120, 37, 49) @@ -121,7 +124,8 @@ Iris' optimisation all together, and will take its chunksizes from Dask's behavi .. doctest:: chunk_control >>> with CHUNK_CONTROL.as_dask(): - ... cube = iris.load_cube(tmp_filepath) + ... cube = iris.load_cube(tmp_filepath) + ... >>> >>> print(cube.core_data().chunksize) (70, 37, 49) @@ -160,7 +164,7 @@ This allows Iris to make a more informed decision on whether to load the data lazily. For example, consider a netCDF file with an auxiliary coordinate -``experiment_version`` that is stored as a variable-length string type. By +``experiment_version`` that is stored as a variable-length string type. By default, Iris will attempt to guess the total array size based on the known dimension sizes (``time=150`` in this example) and load the data lazily. However, if it is known prior to loading the file that the strings are all no @@ -174,11 +178,12 @@ loader so it can be make a more informed decision on lazy loading: >>> >>> sample_file = iris.sample_data_path("vlstr_type.nc") >>> cube = iris.load_cube(sample_file) - >>> print(cube.coord('experiment_version').has_lazy_points()) + >>> print(cube.coord("experiment_version").has_lazy_points()) True >>> with CHUNK_CONTROL.set("expver", _vl_hint=5): ... cube = iris.load_cube(sample_file) - >>> print(cube.coord('experiment_version').has_lazy_points()) + ... + >>> print(cube.coord("experiment_version").has_lazy_points()) False @@ -243,9 +248,9 @@ Worked example: >>> from iris.coords import DimCoord >>> from iris.util import guess_coord_axis >>> my_coord = DimCoord( - ... points=[1000, 1010, 1020], - ... long_name="pressure_threshold", - ... units="hPa", + ... points=[1000, 1010, 1020], + ... long_name="pressure_threshold", + ... units="hPa", ... ) >>> print(guess_coord_axis(my_coord)) Z @@ -320,11 +325,11 @@ defined on an *OSGB Transverse Mercator grid*: .. code-block:: text - float pres(y, x) ; - pres:standard_name = "air_pressure" ; - pres:units = "Pa" ; - pres:coordinates = "lat lon" ; - pres:grid_mapping = "crsOSGB: x y crsWGS84: lat lon" ; + float press(y, x) ; + press:standard_name = "air_pressure" ; + press:units = "Pa" ; + press:coordinates = "lat lon" ; + press:grid_mapping = "crsOSGB: x y crsWGS84: lat lon" ; double x(x) ; x:standard_name = "projection_x_coordinate" ; @@ -363,7 +368,7 @@ and associate it with the auxiliary ``lat`` and ``lon`` coordinates: :: - pres:grid_mapping = "crsOSGB: x y crsWGS84: lat lon" ; + press:grid_mapping = "crsOSGB: x y crsWGS84: lat lon" ; Note, the *order* of the axes in the extended grid mapping specification is @@ -408,4 +413,4 @@ extended grid mapping is also written, i.e. when ``Cube.extended_grid_mapping=True``. -.. _CRS Well Known Text (WKT): https://cfconventions.org/Data/cf-conventions/cf-conventions-1.12/cf-conventions.html#use-of-the-crs-well-known-text-format \ No newline at end of file +.. _CRS Well Known Text (WKT): https://cfconventions.org/Data/cf-conventions/cf-conventions-1.12/cf-conventions.html#use-of-the-crs-well-known-text-format diff --git a/docs/src/user_manual/explanation/real_and_lazy_data.rst b/docs/src/user_manual/explanation/real_and_lazy_data.rst index 275c870252..52ea04ae3a 100644 --- a/docs/src/user_manual/explanation/real_and_lazy_data.rst +++ b/docs/src/user_manual/explanation/real_and_lazy_data.rst @@ -193,9 +193,9 @@ coordinates' lazy points and bounds: .. doctest:: - >>> cube = iris.load_cube(iris.sample_data_path('orca2_votemper.nc'),'votemper') + >>> cube = iris.load_cube(iris.sample_data_path("orca2_votemper.nc"), "votemper") - >>> dim_coord = cube.coord('depth') + >>> dim_coord = cube.coord("depth") >>> print(dim_coord.has_lazy_points()) False >>> print(dim_coord.has_bounds()) @@ -203,7 +203,7 @@ coordinates' lazy points and bounds: >>> print(dim_coord.has_lazy_bounds()) False - >>> aux_coord = cube.coord('longitude') + >>> aux_coord = cube.coord("longitude") >>> print(aux_coord.has_lazy_points()) True >>> print(aux_coord.has_bounds()) @@ -219,8 +219,10 @@ coordinates' lazy points and bounds: True # Fetch a derived coordinate, from a different file: These can also have lazy data. - >>> cube2 = iris.load_cube(iris.sample_data_path('hybrid_height.nc'), 'air_potential_temperature') - >>> derived_coord = cube2.coord('altitude') + >>> cube2 = iris.load_cube( + ... iris.sample_data_path("hybrid_height.nc"), "air_potential_temperature" + ... ) + >>> derived_coord = cube2.coord("altitude") >>> print(derived_coord.has_lazy_points()) True >>> print(derived_coord.has_bounds()) diff --git a/docs/src/user_manual/explanation/um_files_loading.rst b/docs/src/user_manual/explanation/um_files_loading.rst index 8c6718805a..703ff1218b 100644 --- a/docs/src/user_manual/explanation/um_files_loading.rst +++ b/docs/src/user_manual/explanation/um_files_loading.rst @@ -8,6 +8,7 @@ import numpy as np import iris import iris.fileformats.pp + np.set_printoptions(precision=2) @@ -151,14 +152,14 @@ For example: ... fname = iris.sample_data_path('air_temp.pp') >>> fields_iter = iris.fileformats.pp.load(fname) >>> field = next(fields_iter) - >>> + >>> >>> # Show grid details and first 5 longitude values. >>> print(' '.join(str(_) for _ in (field.lbcode, field.lbnpt, field.bzx, ... field.bdx))) 1 96 -3.749999 3.749999 >>> print(field.bzx + field.bdx * np.arange(1, 6)) [ 0. 3.75 7.5 11.25 15. ] - >>> + >>> >>> # Show Iris equivalent information. ... cube = iris.load_cube(fname) >>> print(cube.coord('longitude').points[:5]) @@ -207,8 +208,8 @@ For example: 16203 >>> print(field.lbuser[6]) 1 - >>> - >>> + >>> + >>> >>> # Show Iris equivalents. >>> print(cube.standard_name) air_temperature @@ -309,7 +310,7 @@ For hybrid height levels (LBVC=65): multidimensional or non-monotonic. See an example printout of a hybrid height cube, -:ref:`here `. Notice that this contains all of the +:ref:`here `. Notice that this contains all of the above coordinates -- ``model_level_number``, ``sigma``, ``level_height`` and the derived ``altitude``. @@ -396,7 +397,7 @@ See an example printout of a forecast data cube, :ref:`here `. Notice that this example contains all of the above coordinates -- ``time``, ``forecast_period`` and ``forecast_reference_time``. In this case the data are forecasts, so ``time`` -is a dimension, ``forecast_period``` varies with time and +is a dimension, ``forecast_period``` varies with time and ``forecast_reference_time`` is a constant. @@ -441,7 +442,7 @@ For example: 622 >>> print(eg_field.lbproc) 128 - >>> + >>> >>> # Print out the Iris equivalent information. >>> print(iris.load_cube(fname).cell_methods) (CellMethod(method='mean', coord_names=('time',), intervals=('6 hour',), comments=()),) diff --git a/docs/src/user_manual/how_to/filtering_warnings.rst b/docs/src/user_manual/how_to/filtering_warnings.rst index d2217f326e..57720ed34d 100644 --- a/docs/src/user_manual/how_to/filtering_warnings.rst +++ b/docs/src/user_manual/how_to/filtering_warnings.rst @@ -29,10 +29,14 @@ Find out more about *why* we chose this approach: :ref:`filtering-warnings-expla # they have a relative path (so a test pass is not machine-dependent). warnings.filterwarnings("default") IRIS_FILE = Path(iris.__file__) + + def custom_warn(message, category, filename, lineno, file=None, line=None): filepath = Path(filename) filename = str(filepath.relative_to(IRIS_FILE.parents[1])) sys.stdout.write(warnings.formatwarning(message, category, filename, lineno)) + + warnings.showwarning = custom_warn geog_cs_globe = iris.coord_systems.GeogCS(6400000) @@ -53,7 +57,6 @@ Warnings: .. doctest:: filtering_warnings >>> my_operation() - ... iris/coord_systems.py:451: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance. warnings.warn(wmsg, category=iris.warnings.IrisUserWarning) iris/coord_systems.py:777: IrisDefaultingWarning: Discarding false_easting and false_northing that are not used by Cartopy. @@ -156,6 +159,7 @@ E.g. filtering the ``coord_systems`` module: >>> with warnings.catch_warnings(): ... warnings.filterwarnings("ignore", module="iris.coord_systems") ... my_operation() + ... :: @@ -173,6 +177,7 @@ whole. >>> with warnings.catch_warnings(): ... warnings.filterwarnings("ignore", module="iris") ... my_operation() + ... The above 'partial' filter is not available with the command line approaches. @@ -189,10 +194,7 @@ module during execution: .. doctest:: filtering_warnings >>> with warnings.catch_warnings(): - ... warnings.filterwarnings( - ... "ignore", - ... category=iris.warnings.IrisDefaultingWarning - ... ) + ... warnings.filterwarnings("ignore", category=iris.warnings.IrisDefaultingWarning) ... my_operation() ... iris/coord_systems.py:451: IrisUserWarning: Setting inverse_flattening does not affect other properties of the GeogCS object. To change other properties set them explicitly or create a new GeogCS instance. @@ -207,11 +209,9 @@ both Warnings, since :class:`~iris.warnings.IrisDefaultingWarning` subclasses .. doctest:: filtering_warnings >>> with warnings.catch_warnings(): - ... warnings.filterwarnings( - ... "ignore", - ... category=iris.warnings.IrisUserWarning - ... ) + ... warnings.filterwarnings("ignore", category=iris.warnings.IrisUserWarning) ... my_operation() + ... ---- diff --git a/docs/src/user_manual/how_to/mesh_conversions.rst b/docs/src/user_manual/how_to/mesh_conversions.rst index c465f9e9da..46afb1b5bd 100644 --- a/docs/src/user_manual/how_to/mesh_conversions.rst +++ b/docs/src/user_manual/how_to/mesh_conversions.rst @@ -35,6 +35,8 @@ as the **nodes** when creating the Iris .. dropdown:: Code :icon: code + .. blacken-docs:off + .. code-block:: python >>> import iris @@ -104,6 +106,8 @@ as the **nodes** when creating the Iris shape(126859,)> shape(126859,)> + .. blacken-docs:on + `WAVEWATCH III`_ Spherical Multi-Cell (SMC) WAVE Quad Grid ---------------------------------------------------------- .. figure:: images/smc_mesh.png @@ -124,6 +128,8 @@ as the **nodes** when creating the Iris .. dropdown:: Code :icon: code + .. blacken-docs:off + .. code-block:: python >>> import iris @@ -228,6 +234,8 @@ as the **nodes** when creating the Iris + .. blacken-docs:on + .. _ORCA_example: @@ -264,6 +272,8 @@ dimensions into a single mesh dimension. Since Iris cubes don't support a "resh .. dropdown:: Code :icon: code + .. blacken-docs:off + .. code-block:: python >>> import numpy as np @@ -363,6 +373,8 @@ dimensions into a single mesh dimension. Since Iris cubes don't support a "resh Attributes: Conventions 'CF-1.5' + .. blacken-docs:on + .. _WAVEWATCH III: https://github.com/NOAA-EMC/WW3 .. _FESOM 1.4: https://fesom.de/models/fesom14/ diff --git a/docs/src/user_manual/how_to/mesh_operations.rst b/docs/src/user_manual/how_to/mesh_operations.rst index 34cd650b91..28150fd769 100644 --- a/docs/src/user_manual/how_to/mesh_operations.rst +++ b/docs/src/user_manual/how_to/mesh_operations.rst @@ -103,9 +103,7 @@ subsequent example operations on this page. ... ] >>> face_indices = np.ma.masked_equal([[0, 1, 3, 2], [1, 4, 3, 999]], 999) - >>> face_node_c = Connectivity( - ... indices=face_indices, cf_role="face_node_connectivity" - ... ) + >>> face_node_c = Connectivity(indices=face_indices, cf_role="face_node_connectivity") >>> my_mesh = MeshXY( ... long_name="my_mesh", @@ -165,20 +163,20 @@ Creating a :class:`~iris.cube.Cube` is unchanged; the >>> my_cubelist = CubeList() >>> for conn in (edge_node_c, face_node_c): - ... location = conn.location - ... mesh_coord_x, mesh_coord_y = my_mesh.to_MeshCoords(location) - ... data_shape = (len(conn.indices_by_location()), len(vertical_levels.points)) - ... data_array = np.arange(np.prod(data_shape)).reshape(data_shape) + ... location = conn.location + ... mesh_coord_x, mesh_coord_y = my_mesh.to_MeshCoords(location) + ... data_shape = (len(conn.indices_by_location()), len(vertical_levels.points)) + ... data_array = np.arange(np.prod(data_shape)).reshape(data_shape) + ... my_cubelist.append( + ... Cube( + ... data=data_array, + ... long_name=f"{location}_data", + ... units="K", + ... dim_coords_and_dims=[(vertical_levels, 1)], + ... aux_coords_and_dims=[(mesh_coord_x, 0), (mesh_coord_y, 0)], + ... ) + ... ) ... - ... my_cubelist.append( - ... Cube( - ... data=data_array, - ... long_name=f"{location}_data", - ... units="K", - ... dim_coords_and_dims=[(vertical_levels, 1)], - ... aux_coords_and_dims=[(mesh_coord_x, 0), (mesh_coord_y, 0)], - ... ) - ... ) >>> print(my_cubelist) 0: edge_data / (K) (-- : 6; height: 3) @@ -186,6 +184,7 @@ Creating a :class:`~iris.cube.Cube` is unchanged; the >>> for cube in my_cubelist: ... print(f"{cube.name()}: {cube.mesh.name()}, {cube.location}") + ... edge_data: my_mesh, edge face_data: my_mesh, face @@ -484,6 +483,8 @@ GeoVista :external+geovista:doc:`generated/gallery/index`. .. dropdown:: Code :icon: code + .. blacken-docs:off + .. code-block:: python >>> from geovista import GeoPlotter, Transform @@ -525,6 +526,8 @@ GeoVista :external+geovista:doc:`generated/gallery/index`. >>> my_plotter.add_mesh(face_polydata) >>> my_plotter.show() + .. blacken-docs:on + .. image:: images/plotting.png :alt: A GeoVista plot of low-res sample data. @@ -659,6 +662,8 @@ with the .. dropdown:: Code :icon: code + .. blacken-docs:off + .. code-block:: python >>> from esmf_regrid.experimental.unstructured_scheme import MeshToGridESMFRegridder @@ -735,6 +740,8 @@ with the title Created by xios uuid 489bcef5-3d1c-4529-be42-4ab5f8c8497b + .. blacken-docs:on + .. note:: **All** :class:`~iris.cube.Cube` :attr:`~iris.cube.Cube.attributes` are @@ -754,6 +761,8 @@ previously initialised regridder: .. dropdown:: Code :icon: code + .. blacken-docs:off + .. code-block:: python # Extract a different cube defined on the same Mesh. @@ -802,6 +811,8 @@ previously initialised regridder: title Created by xios uuid 489bcef5-3d1c-4529-be42-4ab5f8c8497b + .. blacken-docs:on + Support also exists for saving and loading previously initialised regridders - :func:`esmf_regrid.experimental.io.save_regridder` and :func:`~esmf_regrid.experimental.io.load_regridder` - so that they can be diff --git a/docs/src/user_manual/how_to/navigating_a_cube.rst b/docs/src/user_manual/how_to/navigating_a_cube.rst index 2e4f3c0ca9..026766aea9 100644 --- a/docs/src/user_manual/how_to/navigating_a_cube.rst +++ b/docs/src/user_manual/how_to/navigating_a_cube.rst @@ -10,14 +10,15 @@ Navigating a Cube .. testsetup:: import iris - filename = iris.sample_data_path('rotated_pole.nc') + + filename = iris.sample_data_path("rotated_pole.nc") # pot_temp = iris.load_cube(filename, 'air_potential_temperature') cube = iris.load_cube(filename) coord_names = [coord.name() for coord in cube.coords()] - coord = cube.coord('grid_latitude') + coord = cube.coord("grid_latitude") -After loading any cube, you will want to investigate precisely what it contains. This section is all about accessing +After loading any cube, you will want to investigate precisely what it contains. This section is all about accessing and manipulating the metadata contained within a cube. Cube String Representations @@ -43,15 +44,15 @@ We have already seen a basic string representation of a cube when printing: source 'Data from Met Office Unified Model 6.01' -This representation is equivalent to passing the cube to the :func:`str` function. This function can be used on -any Python variable to get a string representation of that variable. +This representation is equivalent to passing the cube to the :func:`str` function. This function can be used on +any Python variable to get a string representation of that variable. Similarly there exist other standard functions for interrogating your variable: :func:`repr`, :func:`type` for example:: print(str(cube)) print(repr(cube)) print(type(cube)) -Other, more verbose, functions also exist which give information on **what** you can do with *any* given +Other, more verbose, functions also exist which give information on **what** you can do with *any* given variable. In most cases it is reasonable to ignore anything starting with a "``_``" (underscore) or a "``__``" (double underscore):: dir(cube) @@ -60,39 +61,39 @@ variable. In most cases it is reasonable to ignore anything starting with a "``_ Working With Cubes ------------------ -Every cube has a standard name, long name and units which are accessed with +Every cube has a standard name, long name and units which are accessed with :attr:`Cube.standard_name `, -:attr:`Cube.long_name ` +:attr:`Cube.long_name ` and :attr:`Cube.units ` respectively:: print(cube.standard_name) print(cube.long_name) print(cube.units) - -Interrogating these with the standard :func:`type` function will tell you that ``standard_name`` and ``long_name`` + +Interrogating these with the standard :func:`type` function will tell you that ``standard_name`` and ``long_name`` are either a string or ``None``, and ``units`` is an instance of :class:`iris.unit.Unit`. A more in depth discussion on the cube units and their functional effects can be found at the end of :doc:`../tutorial/cube_maths`. You can access a string representing the "name" of a cube with the :meth:`Cube.name() ` method:: print(cube.name()) - + The result of which is **always** a string. -Each cube also has a :mod:`numpy` array which represents the phenomenon of the cube which can be accessed with the +Each cube also has a :mod:`numpy` array which represents the phenomenon of the cube which can be accessed with the :attr:`Cube.data ` attribute. As you can see the type is a :class:`numpy n-dimensional array `:: print(type(cube.data)) .. note:: - When loading from most file formats in Iris, the data itself is not loaded until the **first** time that the data is requested. - Hence you may have noticed that running the previous command for the first time takes a little longer than it does for + When loading from most file formats in Iris, the data itself is not loaded until the **first** time that the data is requested. + Hence you may have noticed that running the previous command for the first time takes a little longer than it does for subsequent calls. - For this reason, when you have a large cube it is strongly recommended that you do not access the cube's data unless - you need to. - For convenience :attr:`~iris.cube.Cube.shape` and :attr:`~iris.cube.Cube.ndim` attributes exists on a cube, which + For this reason, when you have a large cube it is strongly recommended that you do not access the cube's data unless + you need to. + For convenience :attr:`~iris.cube.Cube.shape` and :attr:`~iris.cube.Cube.ndim` attributes exists on a cube, which can tell you the shape of the cube's data without loading it:: print(cube.shape) @@ -110,7 +111,7 @@ As well as changing the value of the :attr:`~iris.cube.Cube.units` attribute thi :attr:`~iris.cube.Cube.data`. To replace the units without modifying the data values one can change the :attr:`~iris.cube.Cube.units` attribute directly. -Some cubes represent a processed phenomenon which are represented with cell methods, these can be accessed on a +Some cubes represent a processed phenomenon which are represented with cell methods, these can be accessed on a cube with the :attr:`Cube.cell_methods ` attribute:: print(cube.cell_methods) @@ -123,7 +124,7 @@ cube with the :attr:`Cube.cell_methods ` attribute: Accessing Coordinates on the Cube --------------------------------- -A cube's coordinates can be retrieved via :meth:`Cube.coords `. +A cube's coordinates can be retrieved via :meth:`Cube.coords `. A simple for loop over the coords can print a coordinate's :meth:`~iris.coords.Coord.name`:: for coord in cube.coords(): @@ -143,14 +144,14 @@ To get an individual coordinate given its name, the :meth:`Cube.coord `, +Every coordinate has a :attr:`Coord.standard_name `, :attr:`Coord.long_name `, and :attr:`Coord.units ` attribute:: print(coord.standard_name) print(coord.long_name) print(coord.units) -Additionally every coordinate can provide its :attr:`~iris.coords.Coord.points` and :attr:`~iris.coords.Coord.bounds` +Additionally every coordinate can provide its :attr:`~iris.coords.Coord.points` and :attr:`~iris.coords.Coord.bounds` numpy array. If the coordinate has no bounds ``None`` will be returned:: print(type(coord.points)) @@ -160,7 +161,7 @@ numpy array. If the coordinate has no bounds ``None`` will be returned:: Adding Metadata to a Cube ------------------------- -We can add and remove coordinates via :func:`Cube.add_dim_coord`, +We can add and remove coordinates via :func:`Cube.add_dim_coord`, :func:`Cube.add_aux_coord`, and :meth:`Cube.remove_coord `. @@ -198,17 +199,17 @@ This is often caused by one of the following: * The file does not contain enough metadata, and therefore the cube cannot know everything about the file. * Some of the metadata of the file is contained in the filename, but is not part of the actual file. -* There is not enough metadata loaded from the original file as Iris has not handled the format fully. *(in which case, +* There is not enough metadata loaded from the original file as Iris has not handled the format fully. *(in which case, please let us know about it)* -To solve this, all of :func:`iris.load`, :func:`iris.load_cube`, and :func:`iris.load_cubes` support a callback keyword. +To solve this, all of :func:`iris.load`, :func:`iris.load_cube`, and :func:`iris.load_cubes` support a callback keyword. -The callback is a user defined function which must have the calling sequence ``function(cube, field, filename)`` +The callback is a user defined function which must have the calling sequence ``function(cube, field, filename)`` which can make any modifications to the cube in-place, or alternatively return a completely new cube instance. -Suppose we wish to load a lagged ensemble dataset from the Met Office's GloSea4 model. -The data for this example represents 13 ensemble members of 6 one month timesteps; the logistics of the -model mean that the run is spread over several days. +Suppose we wish to load a lagged ensemble dataset from the Met Office's GloSea4 model. +The data for this example represents 13 ensemble members of 6 one month timesteps; the logistics of the +model mean that the run is spread over several days. If we try to load the data directly for ``surface_temperature``: @@ -221,7 +222,7 @@ If we try to load the data directly for ``surface_temperature``: -We get multiple cubes some with more dimensions than expected, some without a ``realization`` (i.e. ensemble member) dimension. +We get multiple cubes some with more dimensions than expected, some without a ``realization`` (i.e. ensemble member) dimension. In this case, two of the PP files have been encoded without the appropriate ``realization`` number attribute, which means that the appropriate coordinate cannot be added to the resultant cube. Fortunately, the missing attribute has been encoded in the filename which, given the filename, we could extract:: @@ -239,16 +240,20 @@ by field basis *before* they are automatically merged together: import iris import iris.coords as icoords + def lagged_ensemble_callback(cube, field, filename): # Add our own realization coordinate if it doesn't already exist. - if not cube.coords('realization'): + if not cube.coords("realization"): realization = np.int32(filename[-6:-3]) - ensemble_coord = icoords.AuxCoord(realization, standard_name='realization', units="1") + ensemble_coord = icoords.AuxCoord( + realization, standard_name="realization", units="1" + ) cube.add_aux_coord(ensemble_coord) - filename = iris.sample_data_path('GloSea4', '*.pp') - print(iris.load(filename, 'surface_temperature', callback=lagged_ensemble_callback)) + filename = iris.sample_data_path("GloSea4", "*.pp") + + print(iris.load(filename, "surface_temperature", callback=lagged_ensemble_callback)) The result is a single cube which represents the data in a form that was expected: diff --git a/docs/src/user_manual/index.rst b/docs/src/user_manual/index.rst index 0d57721609..d151675627 100644 --- a/docs/src/user_manual/index.rst +++ b/docs/src/user_manual/index.rst @@ -132,7 +132,7 @@ data, e.g. computing means, differences, etc. topic: ``regrid`` ^^^^^^^^^^^^^^^^^ -Pages about regridding (2D to 2D) and interpolation (ND to 1D) of data from one +Pages about regridding (2D to 2D) and interpolation (N-D to 1D) of data from one set of coordinates to another. Commonly used to move between different XY grids. .. diataxis-page-list:: topic_regrid diff --git a/docs/src/user_manual/section_indexes/userguide.rst b/docs/src/user_manual/section_indexes/userguide.rst index 799e751e59..0b8f6ab914 100644 --- a/docs/src/user_manual/section_indexes/userguide.rst +++ b/docs/src/user_manual/section_indexes/userguide.rst @@ -22,8 +22,8 @@ they may serve as a useful reference for future exploration. sequentially using the ``next`` and ``previous`` links at the bottom of each page. -.. note:: - +.. note:: + There is also useful learning material held in the https://github.com/scitools-classroom repo, including tutorials, courses and presentations. @@ -43,4 +43,3 @@ they may serve as a useful reference for future exploration. ../tutorial/merge_and_concat ../tutorial/cube_statistics ../tutorial/cube_maths - diff --git a/docs/src/user_manual/tutorial/cube_maths.rst b/docs/src/user_manual/tutorial/cube_maths.rst index 817b496686..65ef49bf13 100644 --- a/docs/src/user_manual/tutorial/cube_maths.rst +++ b/docs/src/user_manual/tutorial/cube_maths.rst @@ -50,8 +50,8 @@ We can now get the first and last time slices using indexing .. testsetup:: - filename = iris.sample_data_path('E1_north_america.nc') - air_temp = iris.load_cube(filename, 'air_temperature') + filename = iris.sample_data_path("E1_north_america.nc") + air_temp = iris.load_cube(filename, "air_temperature") t_first = air_temp[0, :, :] t_last = air_temp[-1, :, :] @@ -266,4 +266,3 @@ with a unit of ``'unknown'``, but the resulting cube will always have a unit of ``'unknown'``. If a calculation is prevented because it would result in inappropriate units, it may be forced by setting the units of the original cubes to be ``'unknown'``. - diff --git a/docs/src/user_manual/tutorial/cube_statistics.rst b/docs/src/user_manual/tutorial/cube_statistics.rst index e980fea407..e5764aa86e 100644 --- a/docs/src/user_manual/tutorial/cube_statistics.rst +++ b/docs/src/user_manual/tutorial/cube_statistics.rst @@ -22,12 +22,14 @@ Collapsing Entire Data Dimensions .. testsetup:: collapsing import iris - filename = iris.sample_data_path('uk_hires.pp') - cube = iris.load_cube(filename, 'air_potential_temperature') + + filename = iris.sample_data_path("uk_hires.pp") + cube = iris.load_cube(filename, "air_potential_temperature") import iris.analysis.cartography - cube.coord('grid_latitude').guess_bounds() - cube.coord('grid_longitude').guess_bounds() + + cube.coord("grid_latitude").guess_bounds() + cube.coord("grid_longitude").guess_bounds() grid_areas = iris.analysis.cartography.area_weights(cube) @@ -132,7 +134,9 @@ These areas can now be passed to the ``collapsed`` method as weights: .. doctest:: collapsing - >>> new_cube = cube.collapsed(['grid_longitude', 'grid_latitude'], iris.analysis.MEAN, weights=grid_areas) + >>> new_cube = cube.collapsed( + ... ["grid_longitude", "grid_latitude"], iris.analysis.MEAN, weights=grid_areas + ... ) >>> print(new_cube) air_potential_temperature / (K) (time: 3; model_level_number: 7) Dimension coordinates: @@ -171,15 +175,13 @@ the units of the resulting cube are multiplied by an area unit: >>> from iris.coords import CellMeasure >>> cell_areas = CellMeasure( ... grid_areas, - ... standard_name='cell_area', - ... units='m2', - ... measure='area', + ... standard_name="cell_area", + ... units="m2", + ... measure="area", ... ) >>> cube.add_cell_measure(cell_areas, (0, 1, 2, 3)) >>> area_weighted_sum = cube.collapsed( - ... ['grid_longitude', 'grid_latitude'], - ... iris.analysis.SUM, - ... weights='cell_area' + ... ["grid_longitude", "grid_latitude"], iris.analysis.SUM, weights="cell_area" ... ) >>> print(area_weighted_sum) air_potential_temperature / (m2.K) (time: 3; model_level_number: 7) @@ -252,16 +254,17 @@ to represent the climatological seasons and the season year respectively:: import datetime import iris - filename = iris.sample_data_path('ostia_monthly.nc') - cube = iris.load_cube(filename, 'surface_temperature') + filename = iris.sample_data_path("ostia_monthly.nc") + cube = iris.load_cube(filename, "surface_temperature") import iris.coord_categorisation - iris.coord_categorisation.add_season(cube, 'time', name='clim_season') - iris.coord_categorisation.add_season_year(cube, 'time', name='season_year') + + iris.coord_categorisation.add_season(cube, "time", name="clim_season") + iris.coord_categorisation.add_season_year(cube, "time", name="season_year") annual_seasonal_mean = cube.aggregated_by( - ['clim_season', 'season_year'], - iris.analysis.MEAN) + ["clim_season", "season_year"], iris.analysis.MEAN + ) Printing this cube now shows that two extra coordinates exist on the cube: @@ -292,8 +295,8 @@ These two coordinates can now be used to aggregate by season and climate-year: .. doctest:: aggregation >>> annual_seasonal_mean = cube.aggregated_by( - ... ['clim_season', 'season_year'], - ... iris.analysis.MEAN) + ... ["clim_season", "season_year"], iris.analysis.MEAN + ... ) >>> print(repr(annual_seasonal_mean)) @@ -311,9 +314,11 @@ so adjacent ones are often in the same season: .. doctest:: aggregation :options: +NORMALIZE_WHITESPACE - >>> for season, year in zip(cube.coord('clim_season')[:10].points, - ... cube.coord('season_year')[:10].points): - ... print(season + ' ' + str(year)) + >>> for season, year in zip( + ... cube.coord("clim_season")[:10].points, cube.coord("season_year")[:10].points + ... ): + ... print(season + " " + str(year)) + ... mam 2006 mam 2006 jja 2006 @@ -332,9 +337,11 @@ All the points now have distinct season+year values: :options: +NORMALIZE_WHITESPACE >>> for season, year in zip( - ... annual_seasonal_mean.coord('clim_season')[:10].points, - ... annual_seasonal_mean.coord('season_year')[:10].points): - ... print(season + ' ' + str(year)) + ... annual_seasonal_mean.coord("clim_season")[:10].points, + ... annual_seasonal_mean.coord("season_year")[:10].points, + ... ): + ... print(season + " " + str(year)) + ... mam 2006 jja 2006 son 2006 @@ -353,7 +360,7 @@ do not cover a three month period (note: judged here as > 3*28 days): .. doctest:: aggregation - >>> tdelta_3mth = datetime.timedelta(hours=3*28*24.0) + >>> tdelta_3mth = datetime.timedelta(hours=3 * 28 * 24.0) >>> spans_three_months = lambda t: (t.bound[1] - t.bound[0]) > tdelta_3mth >>> three_months_bound = iris.Constraint(time=spans_three_months) >>> full_season_means = annual_seasonal_mean.extract(three_months_bound) @@ -366,9 +373,12 @@ from jja-2006 to jja-2010: .. doctest:: aggregation :options: +NORMALIZE_WHITESPACE - >>> for season, year in zip(full_season_means.coord('clim_season').points, - ... full_season_means.coord('season_year').points): - ... print(season + ' ' + str(year)) + >>> for season, year in zip( + ... full_season_means.coord("clim_season").points, + ... full_season_means.coord("season_year").points, + ... ): + ... print(season + " " + str(year)) + ... jja 2006 son 2006 djf 2007 @@ -409,7 +419,9 @@ The following example shows a weighted sum (notice the change of the units): ... units="hours", ... ) >>> cube.add_ancillary_variable(time_weights, 0) - >>> seasonal_sum = cube.aggregated_by("clim_season", iris.analysis.SUM, weights="Time Weights") + >>> seasonal_sum = cube.aggregated_by( + ... "clim_season", iris.analysis.SUM, weights="Time Weights" + ... ) >>> print(seasonal_sum) surface_temperature / (3600 s.K) (-- : 4; latitude: 18; longitude: 432) Dimension coordinates: diff --git a/docs/src/user_manual/tutorial/interpolation_and_regridding.rst b/docs/src/user_manual/tutorial/interpolation_and_regridding.rst index 6a888d7549..75f95e0295 100644 --- a/docs/src/user_manual/tutorial/interpolation_and_regridding.rst +++ b/docs/src/user_manual/tutorial/interpolation_and_regridding.rst @@ -10,7 +10,8 @@ import numpy as np import iris import warnings - warnings.simplefilter('ignore') + + warnings.simplefilter("ignore") ================================= Cube Interpolation and Regridding diff --git a/docs/src/user_manual/tutorial/loading_iris_cubes.rst b/docs/src/user_manual/tutorial/loading_iris_cubes.rst index e54dbc9ebd..4e290f9f81 100644 --- a/docs/src/user_manual/tutorial/loading_iris_cubes.rst +++ b/docs/src/user_manual/tutorial/loading_iris_cubes.rst @@ -362,21 +362,25 @@ API documentation for:** :class:`iris.loading.LoadProblems`. showwarning_original = warnings.showwarning warnings.filterwarnings("default") IRIS_FILE = Path(iris.__file__) + + def custom_warn(message, category, filename, lineno, file=None, line=None): filepath = Path(filename) filename = str(filepath.relative_to(IRIS_FILE.parents[1])) sys.stdout.write(warnings.formatwarning(message, category, filename, lineno)) + + warnings.showwarning = custom_warn get_names_original = helpers.get_names + def raise_example_error_names(cf_coord_var, coord_name, attributes): if cf_coord_var.cf_name == "time": raise ValueError("Example coordinate error") else: - return get_names_original( - cf_coord_var, coord_name, attributes - ) + return get_names_original(cf_coord_var, coord_name, attributes) + helpers.get_names = raise_example_error_names air_temperature = std_names.STD_NAMES.pop("air_temperature") @@ -396,9 +400,7 @@ API documentation for:** :class:`iris.loading.LoadProblems`. unknown / (unknown) (-- : 240) Attributes:... IRIS_RAW {'axis': 'T', ...} - >>> attributes = last_problem.loaded.attributes[ - ... iris.common.LimitedAttributeDict.IRIS_RAW - ... ] + >>> attributes = last_problem.loaded.attributes[iris.common.LimitedAttributeDict.IRIS_RAW] >>> pprint(attributes) {'axis': 'T', 'bounds': 'time_bnds', diff --git a/docs/src/user_manual/tutorial/merge_and_concat.rst b/docs/src/user_manual/tutorial/merge_and_concat.rst index 3f717f064e..569b47f0f0 100644 --- a/docs/src/user_manual/tutorial/merge_and_concat.rst +++ b/docs/src/user_manual/tutorial/merge_and_concat.rst @@ -87,12 +87,18 @@ that have been merged. import numpy as np import iris + + def _xy_cube(z): - cube = iris.cube.Cube(np.arange(20).reshape(4, 5), 'air_temperature', units='kelvin') - cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name='y'), 0) - cube.add_dim_coord(iris.coords.DimCoord(range(5), long_name='x'), 1) - cube.add_aux_coord(iris.coords.DimCoord(z, long_name='z', units='meters')) + cube = iris.cube.Cube( + np.arange(20).reshape(4, 5), "air_temperature", units="kelvin" + ) + cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name="y"), 0) + cube.add_dim_coord(iris.coords.DimCoord(range(5), long_name="x"), 1) + cube.add_aux_coord(iris.coords.DimCoord(z, long_name="z", units="meters")) return cube + + cubes = iris.cube.CubeList([_xy_cube(1), _xy_cube(2), _xy_cube(3)]) @@ -165,14 +171,20 @@ into a single cube: import numpy as np import iris + + def _xy_cube(z): - cube = iris.cube.Cube(np.arange(20).reshape(4, 5), 'air_temperature', units='kelvin') - cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name='y'), 0) - cube.add_dim_coord(iris.coords.DimCoord(range(5), long_name='x'), 1) - cube.add_aux_coord(iris.coords.DimCoord(z, long_name='z', units='meters')) + cube = iris.cube.Cube( + np.arange(20).reshape(4, 5), "air_temperature", units="kelvin" + ) + cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name="y"), 0) + cube.add_dim_coord(iris.coords.DimCoord(range(5), long_name="x"), 1) + cube.add_aux_coord(iris.coords.DimCoord(z, long_name="z", units="meters")) return cube + + cubes = iris.cube.CubeList([_xy_cube(1), _xy_cube(2), _xy_cube(3)]) - cubes[0].attributes['Conventions'] = 'CF-1.5' + cubes[0].attributes["Conventions"] = "CF-1.5" .. doctest:: merge_vs_merge_cube :options: +ELLIPSIS, +NORMALIZE_WHITESPACE @@ -277,13 +289,25 @@ cubes to form a new cube with an extended ``t`` coordinate: import numpy as np import iris + + def _xyt_cube(t): - cube = iris.cube.Cube(np.arange(12 * len(t)).reshape(-1, 3, 4), 'air_temperature', units='kelvin') - cube.add_dim_coord(iris.coords.DimCoord(range(3), long_name='y'), 1) - cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name='x'), 2) - cube.add_dim_coord(iris.coords.DimCoord(t, long_name='t'), 0) + cube = iris.cube.Cube( + np.arange(12 * len(t)).reshape(-1, 3, 4), "air_temperature", units="kelvin" + ) + cube.add_dim_coord(iris.coords.DimCoord(range(3), long_name="y"), 1) + cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name="x"), 2) + cube.add_dim_coord(iris.coords.DimCoord(t, long_name="t"), 0) return cube - cubes = iris.cube.CubeList([_xyt_cube(np.arange(31)), _xyt_cube(np.arange(28) + 31), _xyt_cube(np.arange(31) + 59)]) + + + cubes = iris.cube.CubeList( + [ + _xyt_cube(np.arange(31)), + _xyt_cube(np.arange(28) + 31), + _xyt_cube(np.arange(31) + 59), + ] + ) .. doctest:: concatenate :options: +ELLIPSIS, +NORMALIZE_WHITESPACE @@ -332,14 +356,26 @@ concatenate into a single cube: import numpy as np import iris + + def _xyt_cube(t): - cube = iris.cube.Cube(np.arange(12 * len(t)).reshape(-1, 3, 4), 'air_temperature', units='kelvin') - cube.add_dim_coord(iris.coords.DimCoord(range(3), long_name='y'), 1) - cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name='x'), 2) - cube.add_dim_coord(iris.coords.DimCoord(t, long_name='t'), 0) + cube = iris.cube.Cube( + np.arange(12 * len(t)).reshape(-1, 3, 4), "air_temperature", units="kelvin" + ) + cube.add_dim_coord(iris.coords.DimCoord(range(3), long_name="y"), 1) + cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name="x"), 2) + cube.add_dim_coord(iris.coords.DimCoord(t, long_name="t"), 0) return cube - cubes = iris.cube.CubeList([_xyt_cube(np.arange(31)), _xyt_cube(np.arange(28) + 31), _xyt_cube(np.arange(31) + 59)]) - cubes[0].attributes['History'] = 'Created 2010-06-30' + + + cubes = iris.cube.CubeList( + [ + _xyt_cube(np.arange(31)), + _xyt_cube(np.arange(28) + 31), + _xyt_cube(np.arange(31) + 59), + ] + ) + cubes[0].attributes["History"] = "Created 2010-06-30" .. doctest:: concatenate_vs_concatenate_cube :options: +ELLIPSIS, +NORMALIZE_WHITESPACE @@ -505,12 +541,18 @@ is the default behaviour): import numpy as np import iris + + def _xy_cube(z): - cube = iris.cube.Cube(np.arange(20).reshape(4, 5), 'air_temperature', units='kelvin') - cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name='y'), 0) - cube.add_dim_coord(iris.coords.DimCoord(range(5), long_name='x'), 1) - cube.add_aux_coord(iris.coords.DimCoord(z, long_name='z', units='meters')) + cube = iris.cube.Cube( + np.arange(20).reshape(4, 5), "air_temperature", units="kelvin" + ) + cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name="y"), 0) + cube.add_dim_coord(iris.coords.DimCoord(range(5), long_name="x"), 1) + cube.add_aux_coord(iris.coords.DimCoord(z, long_name="z", units="meters")) return cube + + cubes = iris.cube.CubeList([_xy_cube(1), _xy_cube(2), _xy_cube(1)]) .. doctest:: merge_duplicate @@ -608,18 +650,28 @@ the input cubes using :meth:`~iris.cube.CubeList.concatenate_cube`: import numpy as np import iris + + def _xyt_cube(t): - cube = iris.cube.Cube(np.arange(12 * len(t)).reshape(-1, 3, 4), 'air_temperature', units='kelvin') - cube.add_dim_coord(iris.coords.DimCoord(range(3), long_name='y'), 1) - cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name='x'), 2) - cube.add_dim_coord(iris.coords.DimCoord(t, long_name='t'), 0) + cube = iris.cube.Cube( + np.arange(12 * len(t)).reshape(-1, 3, 4), "air_temperature", units="kelvin" + ) + cube.add_dim_coord(iris.coords.DimCoord(range(3), long_name="y"), 1) + cube.add_dim_coord(iris.coords.DimCoord(range(4), long_name="x"), 2) + cube.add_dim_coord(iris.coords.DimCoord(t, long_name="t"), 0) return cube - cubes = iris.cube.CubeList([_xyt_cube(np.arange(31).astype(np.float64)), - _xyt_cube(np.arange(28).astype(np.float64) + 31), - _xyt_cube(np.arange(31).astype(np.float64) + 59)]) - cubes[0].coord('t').units = 'days since 1990-02-15' - cubes[1].coord('t').units = 'days since 1970-01-01' - cubes[2].coord('t').units = 'days since 1970-01-01' + + + cubes = iris.cube.CubeList( + [ + _xyt_cube(np.arange(31).astype(np.float64)), + _xyt_cube(np.arange(28).astype(np.float64) + 31), + _xyt_cube(np.arange(31).astype(np.float64) + 59), + ] + ) + cubes[0].coord("t").units = "days since 1990-02-15" + cubes[1].coord("t").units = "days since 1970-01-01" + cubes[2].coord("t").units = "days since 1970-01-01" .. doctest:: concatenate_time_units :options: +ELLIPSIS, +NORMALIZE_WHITESPACE @@ -630,9 +682,9 @@ the input cubes using :meth:`~iris.cube.CubeList.concatenate_cube`: 1: air_temperature / (kelvin) (t: 28; y: 3; x: 4) 2: air_temperature / (kelvin) (t: 31; y: 3; x: 4) - >>> print(cubes[0].coord('t').units) + >>> print(cubes[0].coord("t").units) days since 1990-02-15 - >>> print(cubes[1].coord('t').units) + >>> print(cubes[1].coord("t").units) days since 1970-01-01 >>> print(cubes.concatenate_cube()) @@ -643,7 +695,7 @@ the input cubes using :meth:`~iris.cube.CubeList.concatenate_cube`: >>> unify_time_units(cubes) - >>> print(cubes[1].coord('t').units) + >>> print(cubes[1].coord("t").units) days since 1990-02-15 >>> print(cubes.concatenate_cube()) diff --git a/docs/src/user_manual/tutorial/s3_io.rst b/docs/src/user_manual/tutorial/s3_io.rst index 5c0cd89a2a..9610cac18c 100644 --- a/docs/src/user_manual/tutorial/s3_io.rst +++ b/docs/src/user_manual/tutorial/s3_io.rst @@ -76,7 +76,7 @@ Though not suggested, this appears to work on Unix systems where we have tried i For this, you can use conda -- e.g. -.. code-block:: bash +.. code-block:: text $ conda install s3-fuse @@ -95,7 +95,7 @@ Create an empty mount directory You need an empty directory in your existing filesystem tree, that you will map your S3 bucket **onto** -- e.g. -.. code-block:: bash +.. code-block:: text $ mkdir /home/self.me/s3_root/testbucket_mountpoint @@ -117,7 +117,7 @@ command "s3fs". Map your S3 bucket "into" the chosen empty directory -- e.g. -.. code-block:: bash +.. code-block:: text $ s3fs my-test-bucket /home/self.me/s3_root/testbucket_mountpoint @@ -146,7 +146,7 @@ Within Python code You can now access objects at the remote S3 URL via the mount point on your local file system you just created with `s3fs`, e.g. -.. code-block:: python +.. code-block:: text >>> path = "/home/self.me/s3_root/testbucket_mountpoint/sub_dir/a_file.nc" >>> cubes = iris.load(path) @@ -158,7 +158,7 @@ When you have finished accessing the S3 objects in the mounted virtual filesyste is a good idea to **unmount** it. Before doing this, make sure that all file handles to the objects have been closed and there are no terminals open in that directory. -.. code-block:: bash +.. code-block:: text $ umount /home/self.me/s3_root/testbucket_mountpoint diff --git a/docs/src/user_manual/tutorial/saving_iris_cubes.rst b/docs/src/user_manual/tutorial/saving_iris_cubes.rst index 50466f8261..7a54f756ac 100644 --- a/docs/src/user_manual/tutorial/saving_iris_cubes.rst +++ b/docs/src/user_manual/tutorial/saving_iris_cubes.rst @@ -56,7 +56,7 @@ The :py:func:`iris.save` function passes all other keywords through to the saver >>> # Save a cube list to a PP file, appending to the contents of the file >>> # if it already exists >>> iris.save(cubes, "myfile.pp", append=True) - + >>> # Save a cube to netCDF, defaults to NETCDF4 file format >>> iris.save(cubes[0], "myfile.nc") >>> # Save a cube list to netCDF, using the NETCDF3_CLASSIC storage option @@ -65,6 +65,7 @@ The :py:func:`iris.save` function passes all other keywords through to the saver .. testcleanup:: import pathlib + p = pathlib.Path("myfile.pp") if p.exists(): p.unlink() @@ -72,7 +73,7 @@ The :py:func:`iris.save` function passes all other keywords through to the saver if p.exists(): p.unlink() -See +See * :py:func:`iris.fileformats.netcdf.save` * :py:func:`iris.fileformats.pp.save` @@ -126,7 +127,6 @@ Bespoke Saver ------------- A bespoke saver may be written to support an alternative file format. This can be provided to the :py:func:`iris.save` function, enabling Iris to write to a different file format. -Such a custom saver will need be written to meet the needs of the file format and to handle the metadata translation from cube metadata effectively. +Such a custom saver will need be written to meet the needs of the file format and to handle the metadata translation from cube metadata effectively. Implementing a bespoke saver is out of scope for the user guide. - diff --git a/docs/src/user_manual/tutorial/subsetting_a_cube.rst b/docs/src/user_manual/tutorial/subsetting_a_cube.rst index 53fe027243..cf2b231575 100644 --- a/docs/src/user_manual/tutorial/subsetting_a_cube.rst +++ b/docs/src/user_manual/tutorial/subsetting_a_cube.rst @@ -240,15 +240,18 @@ day of every week for many years: import datetime import numpy as np from iris.time import PartialDateTime - long_ts = iris.cube.Cube(np.arange(150), long_name='data', units='1') - _mondays = iris.coords.DimCoord(7 * np.arange(150), standard_name='time', units='days since 2007-04-09') + + long_ts = iris.cube.Cube(np.arange(150), long_name="data", units="1") + _mondays = iris.coords.DimCoord( + 7 * np.arange(150), standard_name="time", units="days since 2007-04-09" + ) long_ts.add_dim_coord(_mondays, 0) .. doctest:: timeseries_range :options: +NORMALIZE_WHITESPACE, +ELLIPSIS - >>> print(long_ts.coord('time')) + >>> print(long_ts.coord("time")) DimCoord : time / (days since 2007-04-09, standard calendar) points: [ 2007-04-09 00:00:00, 2007-04-16 00:00:00, ..., @@ -264,12 +267,11 @@ we constrain that coord using :class:`iris.cube.Cube.extract` .. doctest:: timeseries_range :options: +NORMALIZE_WHITESPACE, +ELLIPSIS - >>> d1 = datetime.datetime.strptime('20070715T0000Z', '%Y%m%dT%H%MZ') - >>> d2 = datetime.datetime.strptime('20070825T0000Z', '%Y%m%dT%H%MZ') - >>> st_swithuns_daterange_07 = iris.Constraint( - ... time=lambda cell: d1 <= cell.point < d2) + >>> d1 = datetime.datetime.strptime("20070715T0000Z", "%Y%m%dT%H%MZ") + >>> d2 = datetime.datetime.strptime("20070825T0000Z", "%Y%m%dT%H%MZ") + >>> st_swithuns_daterange_07 = iris.Constraint(time=lambda cell: d1 <= cell.point < d2) >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07) - >>> print(within_st_swithuns_07.coord('time')) + >>> print(within_st_swithuns_07.coord("time")) DimCoord : time / (days since 2007-04-09, standard calendar) points: [ 2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, @@ -286,10 +288,9 @@ objects. >>> pdt1 = PartialDateTime(year=2007, month=7, day=15) >>> pdt2 = PartialDateTime(year=2007, month=8, day=25) - >>> st_swithuns_daterange_07 = iris.Constraint( - ... time=lambda cell: pdt1 <= cell.point < pdt2) + >>> st_swithuns_daterange_07 = iris.Constraint(time=lambda cell: pdt1 <= cell.point < pdt2) >>> within_st_swithuns_07 = long_ts.extract(st_swithuns_daterange_07) - >>> print(within_st_swithuns_07.coord('time')) + >>> print(within_st_swithuns_07.coord("time")) DimCoord : time / (days since 2007-04-09, standard calendar) points: [ 2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, @@ -306,11 +307,13 @@ PartialDateTime this becomes simple: .. doctest:: timeseries_range >>> st_swithuns_daterange = iris.Constraint( - ... time=lambda cell: PartialDateTime(month=7, day=15) <= cell.point < PartialDateTime(month=8, day=25)) + ... time=lambda cell: PartialDateTime(month=7, day=15) + ... <= cell.point + ... < PartialDateTime(month=8, day=25) + ... ) >>> within_st_swithuns = long_ts.extract(st_swithuns_daterange) - ... >>> # Note: using summary(max_values) to show more of the points - >>> print(within_st_swithuns.coord('time').summary(max_values=100)) + >>> print(within_st_swithuns.coord("time").summary(max_values=100)) DimCoord : time / (days since 2007-04-09, standard calendar) points: [ 2007-07-16 00:00:00, 2007-07-23 00:00:00, 2007-07-30 00:00:00, @@ -342,7 +345,7 @@ after 1st January 2008. Cube Masking -------------- -Masking a cube allows you to hide unwanted data points without changing the +Masking a cube allows you to hide unwanted data points without changing the shape or size of the cube. This can be achieved by two methods: 1. Masking a cube using a boolean mask array via :func:`iris.util.mask_cube`. @@ -400,7 +403,7 @@ Often we want to perform some kind of analysis over a complex geographical featu - extract data along the trajectory of a storm track - extract data at specific points of interest such as cities or weather stations -These geographical features can often be described by `ESRI Shapefiles`_. +These geographical features can often be described by `ESRI Shapefiles`_. Shapefiles are a file format first developed for GIS software in the 1990s, and `Natural Earth`_ maintain a large freely usable database of shapefiles of many geographical and political divisions, accessible via `cartopy`_. Users may also @@ -417,8 +420,8 @@ the function, any data outside the bounds of the shape geometry is masked. should have a coordinate reference system (CRS) defined. Note that the CRS of the masking geometry must be provided explicitly to :func:`iris.util.mask_cube_from_shape` (via the ``shape_crs`` keyword argument), whereas the :class:`iris.cube.Cube` - CRS is read from the cube itself. - + CRS is read from the cube itself. + The cube **must** have a :attr:`iris.coords.Coord.coord_system` defined otherwise an error will be raised. @@ -427,8 +430,8 @@ the function, any data outside the bounds of the shape geometry is masked. inherent understanding of the spherical geometry underpinning geographic coordinate systems. For this reason, **shapefiles or shape vectors that cross the antimeridian or poles are not supported by this function** to - avoid unexpected masking behaviour. - + avoid unexpected masking behaviour. + For shapes that do cross these boundaries, this function expects the user to undertake fixes upstream of Iris, using tools like `GDAL`_ or `antimeridian`_ to ensure correct geometry wrapping. @@ -450,18 +453,18 @@ a global extent. But only the data over Brazil is plotted - the rest has been masked out. .. important:: - Because we do not explicitly pass a CRS for the shape geometry to + Because we do not explicitly pass a CRS for the shape geometry to :func:`iris.util.mask_cube_from_shape`, the function assumes the geometry has the same CRS as the cube. However, a :class:`iris.cube.Cube` and `Shapely`_ geometry do not need to have -the same CRS, as long as both have a CRS defined. Where the CRS of the +the same CRS, as long as both have a CRS defined. Where the CRS of the :class:`iris.cube.Cube` and geometry differ, :func:`iris.util.mask_cube_from_shape` will reproject the geometry (via `GDAL`_) onto the cube's CRS prior to masking. The masked cube will be returned in the same CRS as the input cube. In the following example, we load a cube containing satellite derived temperature -data in a stereographic projection (with projected coordinates with units of +data in a stereographic projection (with projected coordinates with units of metres), and mask it to only show data over the United Kingdom, based on a shapefile of the UK boundary defined in WGS84 lat-lon coordinates. @@ -589,5 +592,3 @@ Similarly, Iris cubes have indexing capability:: .. _GDAL: https://gdal.org/en/stable/programs/ogr2ogr.html .. _Natural Earth: https://www.naturalearthdata.com/ .. _shapely.Geometry: https://shapely.readthedocs.io/en/stable/geometry.html - - diff --git a/docs/src/whatsnew/1.5.rst b/docs/src/whatsnew/1.5.rst index 72bdbac480..f2b16b1f7c 100644 --- a/docs/src/whatsnew/1.5.rst +++ b/docs/src/whatsnew/1.5.rst @@ -18,7 +18,7 @@ Features 1D plotting capability. .. code-block:: python - + # plot a 1d cube against a given 1d coordinate, with the cube # values on the x-axis and the coordinate on the y-axis iris.plot.plot(cube, coord) @@ -34,18 +34,18 @@ Features types is also supported). .. code-block:: python - + # Get cube slices corresponding to the dimension associated with longitude # and the first dimension from a multi-dimensional cube. - for sub_cube in cube.slices(['longitude', 0]): + for sub_cube in cube.slices(["longitude", 0]): print(sub_cube) * :mod:`iris.experimental.animate` now provides experimental animation support. .. code-block:: python - + # Create an iterable of cubes (generator, lists etc.) - cube_iter = cubes.slices(('grid_longitude', 'grid_latitude')) + cube_iter = cubes.slices(("grid_longitude", "grid_latitude")) ani = animate(cube_iter, qplt.contourf) plt.show() @@ -54,22 +54,21 @@ Features * Complete support for Transverse Mercator with saving to NetCDF also. .. code-block:: python - + import cartopy.crs as ccrs import iris import iris.quickplot as qplt import matplotlib.pyplot as plt - - - fname = iris.sample_data_path('air_temp.pp') + + fname = iris.sample_data_path("air_temp.pp") temperature = iris.load_cube(fname) - + plt.axes(projection=ccrs.TransverseMercator()) qplt.contourf(temperature) plt.gca().coastlines() plt.gca().gridlines() plt.show() - + .. image:: images/transverse_merc.png * Support for loading NAME files (gridded and trajectory data). @@ -96,13 +95,13 @@ Features * Added support for bool array indexing on a cube. .. code-block:: python - - fname = iris.sample_data_path('air_temp.pp') + + fname = iris.sample_data_path("air_temp.pp") temperature = iris.load_cube(fname) - temperature[temperature.coord('latitude').points > 0] - + temperature[temperature.coord("latitude").points > 0] + # The constraints mechanism is still the preferred means to do such a query. - temperature.extract(iris.Constraint(latitude=lambda v: v>0))) + temperature.extract(iris.Constraint(latitude=lambda v: v > 0)) * Added support for loading fields defined on regular Gaussian grids from GRIB files. @@ -197,4 +196,3 @@ Deprecations * :func:`iris.fileformats.pp.add_load_rules` and :func:`iris.fileformats.grib.add_load_rules` are now deprecated. - diff --git a/docs/src/whatsnew/3.11.rst b/docs/src/whatsnew/3.11.rst index 1ecbb3c68d..12a0bfaa85 100644 --- a/docs/src/whatsnew/3.11.rst +++ b/docs/src/whatsnew/3.11.rst @@ -124,7 +124,7 @@ v3.11.1 (19 Dec 2024) :mod:`~iris.coord_categorisation` faster. Anyone using :func:`~iris.coord_categorisation.add_categorised_coord` with cftime :class:`~cftime.datetime` objects can benefit from the same - improvement by adding a type hint to their category funcion. (:pull:`5999`) + improvement by adding a type hint to their category function. (:pull:`5999`) #. `@bouweandela`_ made :meth:`iris.cube.CubeList.concatenate` faster if more than two cubes are concatenated with equality checks on the values of diff --git a/docs/src/whatsnew/3.12.rst b/docs/src/whatsnew/3.12.rst index 9ed4a4b60f..9c2b5e0c3a 100644 --- a/docs/src/whatsnew/3.12.rst +++ b/docs/src/whatsnew/3.12.rst @@ -227,7 +227,7 @@ v3.12.3 (22 Aug 2025) as per the `SPEC 0`_ schedule. (:pull:`6195`) #. `@stephenworsley`_ and `@valeriupredoi`_ removed the pin from dask since newer - versions of dask fix the bug casuing the pin. Introduced a minimum pin (2025.1.0) + versions of dask fix the bug causing the pin. Introduced a minimum pin (2025.1.0) to avoid this bug. (:pull:`6342`) #. `@trexfeathers`_ refactored Iris loading and saving to make it compatible diff --git a/docs/src/whatsnew/3.14.rst b/docs/src/whatsnew/3.14.rst index b5784d03b3..8217f5af52 100644 --- a/docs/src/whatsnew/3.14.rst +++ b/docs/src/whatsnew/3.14.rst @@ -25,7 +25,7 @@ This document explains the changes made to Iris for this release :func:`~iris.cube.Cube.convert_units`, :func:`~iris.cube.Cube.subset` and :func:`~iris.cube.Cube.slices`. - * Added the utility :func:`~iris.util.mask_cube_from_shape`, superceding + * Added the utility :func:`~iris.util.mask_cube_from_shape`, superseding :func:`~iris.util.mask_cube_from_shapefile` and adding the ability to handle shapefiles that use different coordinate systems to the cube they are being applied to, adding the ability to handle additional Point and Line shape types @@ -159,7 +159,7 @@ v3.14.1 (05 Dec 2025) of parallel operation; so a dataset containing ~100 chunks could be around 0.5 seconds slower to load or save. This regression will NOT be fixed within Iris - doing so would introduce unacceptable complexity and potential - concurrency problems. The regession has been reported to the NetCDF team; it + concurrency problems. The regression has been reported to the NetCDF team; it is hoped that a future ``libnetcdf`` release will recover the original performance. See `netcdf-c#3183`_ for more details. (:pull:`6747`) diff --git a/docs/src/whatsnew/3.15.rst b/docs/src/whatsnew/3.15.rst index 005bca33de..c3a6483bf6 100644 --- a/docs/src/whatsnew/3.15.rst +++ b/docs/src/whatsnew/3.15.rst @@ -168,12 +168,12 @@ This document explains the changes made to Iris for this release Python \<3.14. (:pull:`6816`, :issue:`6775`) #. `@ESadek-MO`_, `@trexfeathers`_, `@bjlittle`_, `@HGWright`_, `@pp-mo`_, - `@stephenworsley`_ and `@ukmo-ccbunney`_ converted the entirity of the tests + `@stephenworsley`_ and `@ukmo-ccbunney`_ converted the entirety of the tests from unittest to pytest. Iris is now also ruff-PT compliant, save for PT019. (:issue:`6212`, :pull:`6939`) -#. `@hsteptoe`_ and `@ESadek-MO`_ (reviewer) updated chained assignment useage within the tests - associated with :mod:`iris.pandas` to reflect changes in pandas v3 `New pandas v3 copy behaviour`_. +#. `@hsteptoe`_ and `@ESadek-MO`_ (reviewer) updated chained assignment usage within the tests + associated with :mod:`iris.pandas` to reflect changes in pandas v3 `New pandas v3 copy behaviour`_. (:pull:`6948`, :issue:`6761`) #. `@hsteptoe`_ and `@ESadek-MO`_ (reviewer) added static type hinting to :mod:`iris.pandas`. (:pull:`6948`) diff --git a/docs/src/whatsnew/latest.rst b/docs/src/whatsnew/latest.rst index 0a9f1114b9..3e6d53fa52 100644 --- a/docs/src/whatsnew/latest.rst +++ b/docs/src/whatsnew/latest.rst @@ -77,7 +77,7 @@ This document explains the changes made to Iris for this release appears once you click on a link away from the landing page. Also moved the search box to the top navigation bar. (:pull:`7060`) -#. `@trexfeathers`_ switched to using the offical URL of the `cf-checker`_, after +#. `@trexfeathers`_ switched to using the official URL of the `cf-checker`_, after our previous URL of choice was taken down. (:pull:`7072`) #. `@tkknight`_ updated the voted table that uses datatables to not highlight the diff --git a/lib/iris/fileformats/netcdf/loader.py b/lib/iris/fileformats/netcdf/loader.py index 6557f4aebc..85cb147796 100644 --- a/lib/iris/fileformats/netcdf/loader.py +++ b/lib/iris/fileformats/netcdf/loader.py @@ -892,7 +892,7 @@ def set( raise ValueError(msg) dim_chunks = self.var_dim_chunksizes.setdefault(var_name, {}) for dim_name, chunksize in dimension_chunksizes.items(): - if not (isinstance(dim_name, str) and isinstance(chunksize, int)): + if not (isinstance(dim_name, str) and isinstance(chunksize, int)): # type: ignore[redundant-expr] msg = ( "'dimension_chunksizes' kwargs should be a dict " f"of `str: int` pairs, not {dimension_chunksizes!r}." diff --git a/lib/iris/tests/integration/netcdf/derived_bounds/test_bounds_files.py b/lib/iris/tests/integration/netcdf/derived_bounds/test_bounds_files.py index 4d4b4e5c79..c7d3564fd2 100644 --- a/lib/iris/tests/integration/netcdf/derived_bounds/test_bounds_files.py +++ b/lib/iris/tests/integration/netcdf/derived_bounds/test_bounds_files.py @@ -71,7 +71,7 @@ def cf_primary_sample_path(tmp_path_factory): float temp(eta, lat, lon) ; temp:standard_name = "air_temperature" ; temp:units = "K" ; - + data: eta = 1 ; eta_bnds = 0.5, 1.5 ; diff --git a/lib/iris/util.py b/lib/iris/util.py index 551b5aeb68..0812b4c9c4 100644 --- a/lib/iris/util.py +++ b/lib/iris/util.py @@ -2898,8 +2898,8 @@ def dimco( else: # points is None : interpret n? / ?lims - if not isinstance(num, int) or num < 1: - msg = f"Bad value for 'n{axis}' arg : {num}. Must be an integer >= 1." # type: ignore[unreachable] + if not isinstance(num, int) or num < 1: # type: ignore[redundant-expr] + msg = f"Bad value for 'n{axis}' arg : {num}. Must be an integer >= 1." raise ValueError(msg) ok = isinstance(lims, Iterable) diff --git a/noxfile.py b/noxfile.py old mode 100644 new mode 100755 index 2d4f78b35c..a1d9907b02 --- a/noxfile.py +++ b/noxfile.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Perform test automation with nox. For further details, see https://nox.thea.codes/en/stable/# @@ -11,8 +12,13 @@ import nox from nox.logger import logger +nox.options.default_venv_backend = "conda" +nox.needs_version = ">=2022.1.7" #: Default to reusing any pre-existing nox environments. nox.options.reuse_existing_virtualenvs = True +# /// script +# dependencies = ["nox"] +# /// #: Python versions we can run sessions under _PY_VERSIONS_ALL = ["3.12", "3.13", "3.14"] @@ -291,3 +297,7 @@ def benchmarks(session: nox.sessions.Session): session.install("asv", "nox") bm_runner_path = Path(__file__).parent / "benchmarks" / "bm_runner.py" session.run("python", bm_runner_path, *session.posargs) + + +if __name__ == "__main__": + nox.main() diff --git a/pyproject.toml b/pyproject.toml index 95124f5710..023a20b02d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,6 @@ requires = [ "setuptools>=77.0.3", "setuptools_scm[toml]>=8", - "wheel", ] # Defined by PEP 517 build-backend = "setuptools.build_meta" @@ -58,96 +57,22 @@ Discussions = "https://github.com/SciTools/iris/discussions" Documentation = "https://scitools-iris.readthedocs.io/en/stable/" Issues = "https://github.com/SciTools/iris/issues" -[tool.ruff] -# Exclude the following, in addition to the standard set of exclusions. -# https://docs.astral.sh/ruff/settings/#exclude -extend-exclude = [ - "_ff_cross_references.py", - "um_cf_map.py", - "docs/src/sphinxext/api_rst_formatting.py", -] -line-length = 88 -src = [ - "benchmarks", - "lib", - "docs/src", - "tools", -] - -[tool.ruff.format] -preview = false - -[tool.ruff.lint] +[tool.check-manifest] ignore = [ - # NOTE: Non-permanent exclusions should be added to the ".ruff.toml" file. - - # flake8-commas (COM) - # https://docs.astral.sh/ruff/rules/#flake8-commas-com - "COM812", # Trailing comma missing. - "COM819", # Trailing comma prohibited. - - # flake8-implicit-str-concat (ISC) - # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ - # NOTE: This rule may cause conflicts when used with "ruff format". - "ISC001", # Implicitly concatenate string literals on one line. - ] - preview = false - select = [ - "ALL", - # list specific rules to include that is skipped using numpy convention. - "D212", # Multi-line docstring summary should start at the first line - ] - -[tool.ruff.lint.isort] -force-sort-within-sections = true -known-first-party = ["iris"] - -[tool.ruff.lint.per-file-ignores] -# All test scripts - -"lib/iris/tests/*.py" = [ - # https://docs.astral.sh/ruff/rules/undocumented-public-module/ - "D100", # Missing docstring in public module - "D106", # Missing docstring in public nested class - "D205", # 1 blank line required between summary line and description - "D401", # 1 First line of docstring should be in imperative mood -] -"tools/test_*.py" = [ - "D100", - "D106", - "D205", - "D401", + "lib/iris/_version.py", + "lib/iris/std_names.py", ] -# Deprecated unittest tests - -"lib/iris/tests/__init__.py" = ["PT"] -"lib/iris/tests/unit/tests/test_IrisTest.py" = ["PT"] - -[tool.ruff.lint.pydocstyle] -convention = "numpy" - -[tool.setuptools] -zip-safe = false - -[tool.setuptools.dynamic] -dependencies = {file = "requirements/pypi-core.txt"} -readme = {file = "README.md", content-type = "text/markdown"} - -[tool.setuptools.packages.find] -include = ["iris*"] -where = ["lib"] - -[tool.setuptools_scm] -write_to = "lib/iris/_version.py" -local_scheme = "dirty-tag" -version_scheme = "release-branch-semver" +[tool.codespell] +ignore-words-list = "alpha-numeric,assertIn,degreee,discontiguities,lazyness,meaned,nin,re-use,re-uses,re-using,re-used,anc,abl,ND,Nd" # This is the full list of words that we ignore in all repositories. +skip = "./CODE_OF_CONDUCT.md,_build,*.css,*.ipynb,*.js,*.html,*.svg,*.xml,.git,generated" -[tool.pytest.ini_options] -addopts = "-ra --durations=25" -required_plugins = "pytest-mock" -testpaths = "lib/iris" -log_level = "INFO" +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] [tool.coverage.run] branch = true @@ -159,33 +84,18 @@ omit = [ "lib/iris/etc/*", ] -[tool.coverage.report] -exclude_lines = [ - "pragma: no cover", - "if __name__ == .__main__.:", - "if TYPE_CHECKING:", -] - -[tool.codespell] -ignore-words-list = "alpha-numeric,assertIn,degreee,discontiguities,lazyness,meaned,nin,re-use,re-uses,re-using,re-used,anc,abl" -skip = "./CODE_OF_CONDUCT.md,_build,*.css,*.ipynb,*.js,*.html,*.svg,*.xml,.git,generated" - -[tool.check-manifest] -ignore = [ - "lib/iris/_version.py", - "lib/iris/std_names.py", -] - [tool.mypy] # See https://mypy.readthedocs.io/en/stable/config_file.html +# https://mypy.readthedocs.io/en/stable/error_code_list2.html # TODO: remove when MyPy fixed (see https://github.com/python/mypy/issues/17166) disable_error_code = ["call-arg"] -enable_error_code = ["ignore-without-code", "truthy-bool"] +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] exclude = [ 'noxfile\.py', 'docs/src/conf\.py' ] ignore_missing_imports = true +strict = false warn_unreachable = true warn_unused_configs = true @@ -196,17 +106,20 @@ checks = [ # -> Docstring text (summary) should start in the line immediately # after the opening quotes (not in the same line, or leaving a # blank line in between) + # Reason for ignore: A chosen house-style "GL01", # Permit summary line on same line as docstring opening quotes. # -> Closing quotes should be placed in the line after the last text # in the docstring (do not close the quotes in the same line as # the text, or leave a blank line between the last text and the # quotes) + # Reason for ignore: A chosen house-style "GL02", # Permit a blank line before docstring closing quotes. # -> Double line break found; please use only one blank line to # separate sections or paragraphs, and do not leave blank lines # at the end of docstrings + # Reason for ignore: We benefit from more flexability in formatting "GL03", # Ignoring. # -> See Also section not found @@ -221,7 +134,7 @@ checks = [ # -> No Yields section found "YD01", # Not all docstrings require a "Yields" section. - # Temporary checks to ignore, will be reviewed at a later date. + # TODO: Temporary checks to ignore, will be reviewed at a later date. "GL08", # The object does not have a docstrings not have a docstring "PR01", # Parameters ... not documented "PR02", # Unknown parameters {....} @@ -235,3 +148,294 @@ exclude = [ '\.__ne__$', '\.__repr__$', ] + +[tool.pytest.ini_options] +addopts = [ + "-ra", + "--durations=25", + "--strict-config", + "--strict-markers", +] +filterwarnings = ["default"] +log_level = "INFO" +minversion = "6.0" +required_plugins = "pytest-mock" +testpaths = "lib/iris" +xfail_strict = "True" + +[tool.repo-review] +# A list of the currently failing repo-review checks. +ignore = [ + # PERMANENT IGNORES -------------------------------------------------------- + # https://learn.scientific-python.org/development/guides/style/#RF003 + "RF003", # So that we can list non-src directories for linting. + + # TODO: TEMPORARY IGNORES, for resolving over time ------------------------- + # https://learn.scientific-python.org/development/guides/style/#PC170 + "PC170", # pre-commit/pygrep-hooks for RST linting. + # https://learn.scientific-python.org/development/guides/style/#PC180 + # Not possible to run on the hardware used by the majority of our developers. Might change in future! + "PC180", # Uses prettier. + # https://learn.scientific-python.org/development/guides/packaging-simple#PP006 + "PP006", # Uses dev dependency group + # https://learn.scientific-python.org/development/guides/packaging-simple/#PY005 + "PY005", # tests/ or src/tests/ directory. +] + +[tool.ruff] +# Exclude the following, in addition to the standard set of exclusions. +# https://docs.astral.sh/ruff/settings/#exclude +extend-exclude = [ + "_ff_cross_references.py", + "um_cf_map.py", + "docs/src/sphinxext/api_rst_formatting.py", +] +line-length = 88 + +[tool.ruff.format] +preview = false + +[tool.ruff.lint] +ignore = [ + # NOTE: To find a rule code to fix, run: + # ruff --select="ALL" --statistics lib/iris/ + + # flake8-commas (COM) + # https://docs.astral.sh/ruff/rules/#flake8-commas-com + "COM812", # Trailing comma missing. + "COM819", # Trailing comma prohibited. + + # flake8-implicit-str-concat (ISC) + # https://docs.astral.sh/ruff/rules/single-line-implicit-string-concatenation/ + # NOTE: This rule may cause conflicts when used with "ruff format". + "ISC001", # Implicitly concatenate string literals on one line. + + # TODO: exceptions that still need investigating are below. Might be fixable, or might become permanent (above): + # Pyflakes (F) + # https://docs.astral.sh/ruff/rules/#pyflakes-f + "F", + + # pycodestyle (E, W) + # https://docs.astral.sh/ruff/rules/#pycodestyle-e-w + "E", + + # mccabe (C90) + # https://docs.astral.sh/ruff/rules/#mccabe-c90 + "C90", + + # pep8-naming (N) + # https://docs.astral.sh/ruff/rules/#pep8-naming-n + "N", + + # pydocstyle (D) + # https://docs.astral.sh/ruff/rules/#pydocstyle-d + # (D-1) Permanent + "D105", # Missing docstring in magic method + # (D-2) Temporary, to be removed when we are more compliant. Rare cases mmove to (1). + "D101", # Missing docstring in public class + "D102", # Missing docstring in public method + # (D-3) Temporary, before an initial review, either fix ocurrences or move to (2). + "D103", # Missing docstring in public function + + # pyupgrade (UP) + # https://docs.astral.sh/ruff/rules/#pyupgrade-up + "UP", + + # flake8-annotations (ANN) + # https://docs.astral.sh/ruff/rules/#flake8-annotations-ann + "ANN", + + # flake8-bandit (S) + # https://docs.astral.sh/ruff/rules/#flake8-bandit-s + "S", + + # flake8-blind-except (BLE) + # https://docs.astral.sh/ruff/rules/#flake8-blind-except-ble + "BLE", + + # flake8-boolean-trap (FBT) + # https://docs.astral.sh/ruff/rules/#flake8-boolean-trap-fbt + "FBT", + + # flake8-bugbear (B) + # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b + "B", + + # flake8-builtins (A) + # https://docs.astral.sh/ruff/rules/#flake8-builtins-a + "A", + + # flake8-comprehensions (C4) + # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 + "C4", + + # flake8-datetimez (DTZ) + # https://docs.astral.sh/ruff/rules/#flake8-datetimez-dtz + "DTZ", + + # flake8-errmsg (EM) + # https://docs.astral.sh/ruff/rules/#flake8-errmsg-em + "EM", + + # flake8-future-annotations (FA) + # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa + "FA", + + # flake8-logging-format (G) + # https://docs.astral.sh/ruff/rules/#flake8-logging-format-g + "G", + + # flake8-no-pep420 (INP) + # https://docs.astral.sh/ruff/rules/#flake8-no-pep420-inp + "INP", + + # flake8-pie (PIE) + # https://docs.astral.sh/ruff/rules/#flake8-pie-pie + "PIE", + + # flake8-print (T20) + # https://docs.astral.sh/ruff/rules/#flake8-print-t20 + "T20", + + # flake8-pyi (PYI) + # https://docs.astral.sh/ruff/rules/#flake8-pyi-pyi + "PYI", + + # flake8-pytest-style (PT) + # https://docs.astral.sh/ruff/rules/#flake8-pytest-style-pt + "PT019", + + # flake8-raise (RSE) + # https://docs.astral.sh/ruff/rules/#flake8-raise-rse + "RSE", + + # flake8-return (RET) + # https://docs.astral.sh/ruff/rules/#flake8-return-ret + "RET", + + # flake8-self (SLF) + # https://docs.astral.sh/ruff/rules/#flake8-self-slf + "SLF", + + # flake8-slots (SLOT) + # https://docs.astral.sh/ruff/rules/#flake8-slots-slot + "SLOT", + + # flake8-simplify (SIM) + # https://docs.astral.sh/ruff/rules/#flake8-simplify-sim + "SIM", + + # flake8-tidy-imports (TID) + # https://docs.astral.sh/ruff/rules/#flake8-tidy-imports-tid + "TID", + + # flake8-type-checking (TCH) + # https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch + "TCH", + + # flake8-unused-arguments (ARG) + # https://docs.astral.sh/ruff/rules/#flake8-unused-arguments-arg + "ARG", + + # flake8-use-pathlib (PTH) + # https://docs.astral.sh/ruff/rules/#flake8-use-pathlib-pth + "PTH", + + # flake8-todos (TD) + # https://docs.astral.sh/ruff/rules/#flake8-todos-td + "TD", + + # flake8-fixme (FIX) + # https://docs.astral.sh/ruff/rules/#flake8-fixme-fix + "FIX", + + # eradicate (ERA) + # https://docs.astral.sh/ruff/rules/#eradicate-era + "ERA", + + # pandas-vet (PD) + # https://docs.astral.sh/ruff/rules/#pandas-vet-pd + "PD", + + # pygrep-hooks (PGH) + # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh + "PGH", + + # Pylint (PL) + # https://docs.astral.sh/ruff/rules/#pylint-pl + "PL", + + # tryceratops (TRY) + # https://docs.astral.sh/ruff/rules/#tryceratops-try + "TRY", + + # flynt (FLY) + # https://docs.astral.sh/ruff/rules/#flynt-fly + "FLY", + + # Perflint (PERF) + # https://docs.astral.sh/ruff/rules/#perflint-perf + "PERF", + + # Ruff-specific rules (RUF) + # https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf + "RUF", +] +preview = false +select = [ + "ALL", + # Note: the above "all" disables conflicting rules, if you want that + # rule it needs to be explicitly enabled below: + "D212", # Multi-line docstring summary should start at the first line +] + +[tool.ruff.lint.isort] +force-sort-within-sections = true +known-first-party = ["iris"] +known-local-folder = ["_asv_delegated_abc"] + +[tool.ruff.lint.mccabe] +max-complexity = 22 + +[tool.ruff.lint.per-file-ignores] +# https://docs.astral.sh/ruff/rules +# All test scripts + +"lib/iris/tests/*.py" = [ + # https://docs.astral.sh/ruff/rules/undocumented-public-module/ + "D100", # Missing docstring in public module + "D106", # Missing docstring in public nested class + "D205", # 1 blank line required between summary line and description + "D401", # 1 First line of docstring should be in imperative mood +] +"tools/test_*.py" = [ + "D100", + "D106", + "D205", + "D401", +] + +# Deprecated unittest tests + +"lib/iris/tests/__init__.py" = ["PT"] +"lib/iris/tests/unit/tests/test_IrisTest.py" = ["PT"] + +[tool.ruff.lint.pydocstyle] +convention = "numpy" + +[tool.setuptools] +zip-safe = false + +[tool.setuptools.dynamic] +dependencies = {file = "requirements/pypi-core.txt"} +readme = {file = "README.md", content-type = "text/markdown"} + +[tool.setuptools.packages.find] +include = ["iris*"] +where = ["lib"] + +[tool.setuptools_scm] +local_scheme = "dirty-tag" +# https://setuptools-scm.readthedocs.io/en/latest/extending/#version-number-construction +version_scheme = "release-branch-semver" +write_to = "lib/iris/_version.py" diff --git a/tools/test_release_do_nothing.py b/tools/test_release_do_nothing.py index ed7240c6ef..a7c940f5a5 100644 --- a/tools/test_release_do_nothing.py +++ b/tools/test_release_do_nothing.py @@ -12,7 +12,6 @@ import nothing import pytest from pytest_mock import MockType - from release_do_nothing import IrisRelease, IrisVersion