Skip to content

Commit

Permalink
Merge pull request #165 from sbesson/write_multiscales_datasets_dict
Browse files Browse the repository at this point in the history
Multiscales metadata API: only support datasets as lists of dictionaries
  • Loading branch information
sbesson authored Feb 7, 2022
2 parents 3e3327c + f2e1341 commit b5f07e4
Show file tree
Hide file tree
Showing 3 changed files with 105 additions and 26 deletions.
35 changes: 19 additions & 16 deletions ome_zarr/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,18 +280,20 @@ def validate_coordinate_transformations(
)
for transformations in coordinate_transformations:
assert isinstance(transformations, list)
types = [t.get("type", None) for t in transformations]
if any([t is None for t in types]):
raise ValueError("Missing type in: %s" % transformations)
# validate scales...
scale_transfs = [
trans for trans in transformations if trans["type"] == "scale"
]
if len(scale_transfs) != 1:
if sum(t == "scale" for t in types) != 1:
raise ValueError(
"Must supply 1 'scale' item in coordinate_transformations"
)
# first transformation must be scale
first = transformations[0]
if first["type"] != "scale":
if types[0] != "scale":
raise ValueError("First coordinate_transformations must be 'scale'")
first = transformations[0]
if "scale" not in transformations[0]:
raise ValueError("Missing scale argument in: %s" % first)
scale = first["scale"]
if len(scale) != ndim:
raise ValueError(
Expand All @@ -303,25 +305,26 @@ def validate_coordinate_transformations(
raise ValueError(f"'scale' values must all be numbers: {scale}")

# validate translations...
translates = [
trans for trans in transformations if trans["type"] == "translation"
]
if len(translates) > 1:
translation_types = [t == "translation" for t in types]
if sum(translation_types) > 1:
raise ValueError(
"Must supply 0 or 1 'translation' item in"
"coordinate_transformations"
)
elif len(translates) == 1:
translate = translates[0]["translation"]
if len(translate) != ndim:
elif sum(translation_types) == 1:
transformation = transformations[types.index("translation")]
if "translation" not in transformation:
raise ValueError("Missing scale argument in: %s" % first)
translation = transformation["translation"]
if len(translation) != ndim:
raise ValueError(
"'translation' list %s must match image dimensions count: %s"
% (translate, ndim)
% (translation, ndim)
)
for value in translate:
for value in translation:
if not isinstance(value, (float, int)):
raise ValueError(
f"'translation' values must all be numbers: {translate}"
f"'translation' values must all be numbers: {translation}"
)


Expand Down
8 changes: 2 additions & 6 deletions ome_zarr/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,17 +126,13 @@ def _validate_datasets(
datasets: List[dict], dims: int, fmt: Format = CurrentFormat()
) -> List[Dict]:

validated_datasets = []
if datasets is None or len(datasets) == 0:
raise ValueError("Empty datasets list")
transformations = []
for dataset in datasets:
if isinstance(dataset, str):
validated_datasets.append({"path": dataset})
elif isinstance(dataset, dict):
if isinstance(dataset, dict):
if not dataset.get("path"):
raise ValueError("no 'path' in dataset")
validated_datasets.append(dataset)
transformation = dataset.get("coordinateTransformations")
# transformation may be None for < 0.4 - validated below
if transformation is not None:
Expand All @@ -145,7 +141,7 @@ def _validate_datasets(
raise ValueError(f"Unrecognized type for {dataset}")

fmt.validate_coordinate_transformations(dims, len(datasets), transformations)
return validated_datasets
return datasets


def _validate_plate_wells(
Expand Down
88 changes: 84 additions & 4 deletions tests/test_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def test_multi_levels_transformations(self):

@pytest.mark.parametrize("fmt", (FormatV01(), FormatV02(), FormatV03()))
def test_version(self, fmt):
write_multiscales_metadata(self.root, ["0"], fmt=fmt)
write_multiscales_metadata(self.root, [{"path": "0"}], fmt=fmt)
assert "multiscales" in self.root.attrs
assert self.root.attrs["multiscales"][0]["version"] == fmt.version
assert self.root.attrs["multiscales"][0]["datasets"] == [{"path": "0"}]
Expand All @@ -315,8 +315,10 @@ def test_version(self, fmt):
["t", "c", "z", "y", "x"],
),
)
def test_axes(self, axes):
write_multiscales_metadata(self.root, ["0"], fmt=FormatV03(), axes=axes)
def test_axes_V03(self, axes):
write_multiscales_metadata(
self.root, [{"path": "0"}], fmt=FormatV03(), axes=axes
)
assert "multiscales" in self.root.attrs
# for v0.3, axes is a list of names
assert self.root.attrs["multiscales"][0]["axes"] == axes
Expand All @@ -327,7 +329,7 @@ def test_axes(self, axes):
@pytest.mark.parametrize("fmt", (FormatV01(), FormatV02()))
def test_axes_ignored(self, fmt):
write_multiscales_metadata(
self.root, ["0"], fmt=fmt, axes=["t", "c", "z", "y", "x"]
self.root, [{"path": "0"}], fmt=fmt, axes=["t", "c", "z", "y", "x"]
)
assert "multiscales" in self.root.attrs
assert "axes" not in self.root.attrs["multiscales"][0]
Expand All @@ -346,6 +348,84 @@ def test_invalid_0_3_axes(self, axes):
with pytest.raises(ValueError):
write_multiscales_metadata(self.root, ["0"], fmt=FormatV03(), axes=axes)

@pytest.mark.parametrize("datasets", ([], None, "0", ["0"], [{"key": 1}]))
def test_invalid_datasets(self, datasets):
with pytest.raises(ValueError):
write_multiscales_metadata(
self.root, datasets, axes=["t", "c", "z", "y", "x"]
)

@pytest.mark.parametrize(
"coordinateTransformations",
(
[{"type": "scale", "scale": [1, 1]}],
[
{"type": "scale", "scale": [1, 1]},
{"type": "translation", "translation": [0, 0]},
],
),
)
def test_valid_transformations(self, coordinateTransformations):
axes = [{"name": "y", "type": "space"}, {"name": "x", "type": "space"}]
datasets = [
{
"path": "0",
"coordinateTransformations": coordinateTransformations,
}
]
write_multiscales_metadata(self.root, datasets, axes=axes)
assert "multiscales" in self.root.attrs
assert self.root.attrs["multiscales"][0]["axes"] == axes
assert self.root.attrs["multiscales"][0]["datasets"] == datasets

@pytest.mark.parametrize(
"coordinateTransformations",
(
[],
None,
[{"type": "scale"}],
[{"scale": [1, 1]}],
[{"type": "scale", "scale": ["1", 1]}],
[{"type": "scale", "scale": [1, 1, 1]}],
[{"type": "scale", "scale": [1, 1]}, {"type": "scale", "scale": [1, 1]}],
[
{"type": "scale", "scale": [1, 1]},
{"type": "translation", "translation": ["0", 0]},
],
[
{"type": "translation", "translation": [0, 0]},
],
[
{"type": "scale", "scale": [1, 1]},
{"type": "translation", "translation": [0, 0, 0]},
],
[
{"type": "translation", "translation": [0, 0]},
{"type": "scale", "scale": [1, 1]},
],
[
{"type": "scale", "scale": [1, 1]},
{"type": "translation", "translation": [0, 0]},
{"type": "translation", "translation": [1, 0]},
],
[
{"type": "scale", "scale": [1, 1]},
{"translation": [0, 0]},
],
[
{"type": "scale", "scale": [1, 1]},
{"type": "translation", "translate": [0, 0]},
],
),
)
def test_invalid_transformations(self, coordinateTransformations):
axes = [{"name": "y", "type": "space"}, {"name": "x", "type": "space"}]
datasets = [
{"path": "0", "coordinateTransformations": coordinateTransformations}
]
with pytest.raises(ValueError):
write_multiscales_metadata(self.root, datasets, axes=axes)


class TestPlateMetadata:
@pytest.fixture(autouse=True)
Expand Down

0 comments on commit b5f07e4

Please sign in to comment.