Skip to content

Commit

Permalink
Merge branch '402-global-disparity-interval-for-ambiguity' into 'rele…
Browse files Browse the repository at this point in the history
…ase'

Resolve "Normalisation de l'ambiguité par intervalle global externe"

See merge request 3d/PandoraBox/pandora!349
  • Loading branch information
lecontm committed Jun 13, 2024
2 parents c9d5826 + 906f645 commit 0c16384
Show file tree
Hide file tree
Showing 5 changed files with 144 additions and 3 deletions.
4 changes: 4 additions & 0 deletions docs/source/userguide/step_by_step/cost_volume_confidence.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ The available methods are :
`Sarrazin, E., Cournet, M., Dumas, L., Defonte, V., Fardet, Q., Steux, Y., Jimenez Diaz, N., Dubois, E., Youssefi, D., Buffe, F., 2021. Ambiguity concept in stereo matching pipeline.
ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences. <https://isprs-archives.copernicus.org/articles/XLIII-B2-2021/383/2021/>`_

.. note ::
If the user uses pandora while tiling his data, we recommend integrating the add_global_disparity function into the image dataset to avoid a tiling effect. In this case, the ambiguity
is normalized using a conventional normalization method.
- Risk. This metric consists in evaluating a risk interval associated with the correlation measure, and ultimately the selected disparity, for each point on the disparity map :

Expand Down
35 changes: 32 additions & 3 deletions pandora/cost_volume_confidence/ambiguity.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"""
This module contains functions for estimating confidence from ambiguity.
"""

import logging
import warnings
import os
from typing import Dict, Tuple, Union
Expand Down Expand Up @@ -145,7 +145,17 @@ def confidence_prediction(

# If activated, ambiguity normalization with percentile
if self._normalization:
ambiguity = self.normalize_with_percentile(ambiguity)
if "global_disparity" in img_left.attrs:
ambiguity = self.normalize_with_extremum(ambiguity, img_left)
logging.info(
"You are not using ambiguity normalization by percentile; \n"
"you are in a specific case with the instantiation of global_disparity."
)
# in case of cross correlation
elif "global_disparity" in img_right.attrs:
ambiguity = self.normalize_with_extremum(ambiguity, img_right)
else:
ambiguity = self.normalize_with_percentile(ambiguity)

# Conversion of ambiguity into a confidence measure
ambiguity = 1 - ambiguity
Expand All @@ -154,7 +164,7 @@ def confidence_prediction(

return disp, cv

def normalize_with_percentile(self, ambiguity):
def normalize_with_percentile(self, ambiguity: np.ndarray) -> np.ndarray:
"""
Normalize ambiguity with percentile
Expand All @@ -163,13 +173,32 @@ def normalize_with_percentile(self, ambiguity):
:return: the normalized ambiguity
:rtype: 2D np.ndarray (row, col) dtype = float32
"""

norm_amb = np.copy(ambiguity)
perc_min = np.percentile(norm_amb, self._percentile)
perc_max = np.percentile(norm_amb, 100 - self._percentile)
np.clip(norm_amb, perc_min, perc_max, out=norm_amb)

return (norm_amb - np.min(norm_amb)) / (np.max(norm_amb) - np.min(norm_amb))

def normalize_with_extremum(self, ambiguity: np.ndarray, dataset: xr.Dataset) -> np.ndarray:
"""
Normalize ambiguity with extremum
:param ambiguity: ambiguity
:type ambiguity: 2D np.ndarray (row, col) dtype = float32
:param dataset: Dataset image
:tye dataset: xarray.Dataset
:return: the normalized ambiguity
:rtype: 2D np.ndarray (row, col) dtype = float32
"""
norm_amb = np.copy(ambiguity)
global_disp_max = dataset.attrs["global_disparity"][1]
global_disp_min = dataset.attrs["global_disparity"][0]
max_norm = (global_disp_max - global_disp_min) * self._nbr_etas

return norm_amb / max_norm

@staticmethod
@njit(
"f4[:, :](f4[:, :, :], f8[:], i8, i8[:, :, :],f4[:])",
Expand Down
26 changes: 26 additions & 0 deletions pandora/img_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,32 @@ def add_mask(
return dataset


def add_global_disparity(dataset: xr.Dataset, global_disp_min: int, global_disp_max: int) -> xr.Dataset:
"""
Add global disparity information to dataset
:param dataset: xarray dataset without no_data information
:type dataset: xr.Dataset
:param global_disp_min: global minimum disparity
:type global_disp_min: int
:param global_disp_max: global maximum disparity
:type global_disp_max: int
:return: dataset : updated dataset
:rtype: xr.Dataset
"""

img_grid_min = dataset.attrs["disp_min"]
img_grid_max = dataset.attrs["disp_max"]

if global_disp_min > img_grid_min or global_disp_max < img_grid_max:
raise ValueError("For ambiguity step, the global disparity must be outside the range of the grid disparity")

# Add global disparity to dataset in case of tiling ambiguity
dataset.attrs.update({"global_disparity": [global_disp_min, global_disp_max]})

return dataset


def create_dataset_from_inputs(input_config: dict, roi: dict = None) -> xr.Dataset:
"""
Read image and mask, and return the corresponding xarray.DataSet
Expand Down
31 changes: 31 additions & 0 deletions tests/test_confidence/test_ambiguity.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import xarray as xr

import pandora.cost_volume_confidence as confidence
from pandora import img_tools


def test_ambiguity(create_img_for_confidence):
Expand Down Expand Up @@ -194,3 +195,33 @@ def test_compute_compute_ambiguity_and_sampled_ambiguity_with_variable_disparity
# Check if the calculated ambiguity is equal to the ground truth (same shape and all elements equals)
np.testing.assert_allclose(amb, gt_amb_int, rtol=1e-06)
np.testing.assert_allclose(amb_sampl, gt_sampl_amb, rtol=1e-06)


def test_normalize_with_extremum(create_img_for_confidence):
"""
test normalize_with_extremum function
"""

# create datas
left_im, _ = create_img_for_confidence

# Add tiles disparity
left_im.attrs["disp_min"] = 0
left_im.attrs["disp_max"] = 1

# Add global disparity
left_im = img_tools.add_global_disparity(left_im, -2, 2)

ambiguity_ = confidence.AbstractCostVolumeConfidence(
**{"confidence_method": "ambiguity", "eta_max": 0.2, "eta_step": 0.1}
)
ambiguity = np.ones((4, 4))

# normalize_with_extremum function to test
amb_test = ambiguity_.normalize_with_extremum(ambiguity, left_im)

# create ground truth
nbr_etas = np.arange(0.0, 0.2, 0.1).shape[0]
amb_vt = np.copy(ambiguity) / ((2 - (-2)) * nbr_etas)

np.testing.assert_array_equal(amb_test, amb_vt)
51 changes: 51 additions & 0 deletions tests/test_pandora_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -1108,3 +1108,54 @@ def test_fail_with_wrong_classification_param(self, input_cfg, classif, expected
img_tools.get_metadata(
img=input_cfg["input"]["left"]["img"], disparity=input_cfg["input"]["left"]["disp"], classif=classif
)


def test_add_global_disparity(monoband_image):
"""
Test add_global_disparity function
"""

dataset = monoband_image

# add disparity for CARS tiling
dataset.attrs["disp_min"] = -2
dataset.attrs["disp_max"] = 2

test_dataset = img_tools.add_global_disparity(dataset, -2, 2)

assert test_dataset.attrs["global_disparity"] == [-2, 2]


@pytest.mark.parametrize(
["disparities", "expected_error"],
[
pytest.param(
[0, 2],
"For ambiguity step, the global disparity must be outside the range of the grid disparity",
id="global_min error",
),
pytest.param(
[-2, 1],
"For ambiguity step, the global disparity must be outside the range of the grid disparity",
id="global_max error",
),
pytest.param(
[0, 1],
"For ambiguity step, the global disparity must be outside the range of the grid disparity",
id="global_extremum error",
),
],
)
def test_add_global_disparity_failed(monoband_image, disparities, expected_error):
"""
Test add_global_disparity function
"""

dataset = monoband_image

# add disparity for CARS tiling
dataset.attrs["disp_min"] = -2
dataset.attrs["disp_max"] = 2

with pytest.raises(ValueError, match=expected_error):
_ = img_tools.add_global_disparity(dataset, disparities[0], disparities[1])

0 comments on commit 0c16384

Please sign in to comment.