Skip to content

Commit

Permalink
Merge branch '400-add-ambiguity-invalid-disp-processing' into 'release'
Browse files Browse the repository at this point in the history
Resolve "Bug Calcul ambiguité  et risque intervalles de disparités locaux"

See merge request 3d/PandoraBox/pandora!347
  • Loading branch information
lecontm committed Jun 13, 2024
2 parents ec66e88 + 015f98d commit c9d5826
Show file tree
Hide file tree
Showing 12 changed files with 1,471 additions and 955 deletions.
5 changes: 0 additions & 5 deletions pandora/cost_volume_confidence/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
# pylint: disable=missing-module-docstring
#
# coding: utf8
# Copyright (c) 2024 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
Expand All @@ -27,5 +24,3 @@
from . import interval_bounds
from . import std_intensity
from . import risk

# pylint: disable=missing-module-docstring
144 changes: 93 additions & 51 deletions pandora/cost_volume_confidence/ambiguity.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2024 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
Expand Down Expand Up @@ -68,6 +65,8 @@ def __init__(self, **cfg: str) -> None:
self._eta_max = float(self.cfg["eta_max"])
self._eta_step = float(self.cfg["eta_step"])
self._indicator = self._method + str(self.cfg["indicator"])
self._etas = np.arange(self._eta_min, self._eta_max, self._eta_step)
self._nbr_etas = self._etas.shape[0]

def check_conf(self, **cfg: Union[str, float]) -> Dict[str, Union[str, float]]:
"""
Expand Down Expand Up @@ -130,11 +129,19 @@ def confidence_prediction(
- confidence_measure 3D xarray.DataArray (row, col, indicator)
"""

grids = np.array(
[img_left["disparity"].sel(band_disp="min"), img_left["disparity"].sel(band_disp="max")], dtype=np.int64
)
# Get disparity intervals parameters
disparity_range = cv["disp"].data.astype(np.float32)
# This silences numba's TBB threading layer warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# Computes ambiguity using numba in parallel for memory and computation time optimization
ambiguity = self.compute_ambiguity(cv["cost_volume"].data, self._eta_min, self._eta_max, self._eta_step)
ambiguity = self.compute_ambiguity(
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range
)

# If activated, ambiguity normalization with percentile
if self._normalization:
Expand All @@ -152,9 +159,9 @@ def normalize_with_percentile(self, ambiguity):
Normalize ambiguity with percentile
:param ambiguity: ambiguity
:type ambiguity: 2D np.array (row, col) dtype = float32
:type ambiguity: 2D np.ndarray (row, col) dtype = float32
:return: the normalized ambiguity
:rtype: 2D np.array (row, col) dtype = float32
:rtype: 2D np.ndarray (row, col) dtype = float32
"""
norm_amb = np.copy(ambiguity)
perc_min = np.percentile(norm_amb, self._percentile)
Expand All @@ -165,121 +172,156 @@ def normalize_with_percentile(self, ambiguity):

@staticmethod
@njit(
"f4[:, :](f4[:, :, :], f4, f4, f4)",
parallel=literal_eval(os.environ.get("PANDORA_NUMBA_PARALLEL", "True")),
"f4[:, :](f4[:, :, :], f8[:], i8, i8[:, :, :],f4[:])",
parallel=literal_eval(os.environ.get("PANDORA_NUMBA_PARALLEL", "False")),
cache=True,
)
def compute_ambiguity(cv: np.ndarray, _eta_min: float, _eta_max: float, _eta_step: float) -> np.ndarray:
def compute_ambiguity(
cv: np.ndarray,
etas: np.ndarray,
nbr_etas: int,
grids: np.ndarray,
disparity_range: np.ndarray,
) -> np.ndarray:
"""
Computes ambiguity.
:param cv: cost volume
:type cv: 3D np.array (row, col, disp)
:param _eta_min: minimal eta
:type _eta_min: float
:param _eta_max: maximal eta
:type _eta_max: float
:param _eta_step: eta step
:type _eta_step: float
:type cv: 3D np.ndarray (row, col, disp)
:param etas: range between eta_min and eta_max with step eta_step
:type etas: np.ndarray
:param nbr_etas: number of etas
:type nbr_etas: int
:param grids: array containing min and max disparity grids
:type grids: 2D np.ndarray (min, max)
:param disparity_range: array containing disparity range
:type disparity_range: np.ndarray
:return: the normalized ambiguity
:rtype: 2D np.array (row, col) dtype = float32
:rtype: 2D np.ndarray (row, col) dtype = float32
"""
# Miniumum and maximum of all costs, useful to normalize the cost volume

# Minimum and maximum of all costs, useful to normalize the cost volume
min_cost = np.nanmin(cv)
max_cost = np.nanmax(cv)

n_row, n_col, nb_disps = cv.shape

etas = np.arange(_eta_min, _eta_max, _eta_step)

# Numba does not support the np.tile operation
two_dim_etas = np.repeat(etas, nb_disps).reshape((-1, nb_disps)).T.flatten()

# integral of ambiguity
ambiguity = np.zeros((n_row, n_col), dtype=np.float32)

diff_cost = max_cost - min_cost

for row in prange(n_row): # pylint: disable=not-an-iterable
for col in prange(n_col): # pylint: disable=not-an-iterable
# Normalized minimum cost for one point
normalized_min_cost = (np.nanmin(cv[row, col, :]) - min_cost) / (max_cost - min_cost)
normalized_min_cost = (np.nanmin(cv[row, col, :]) - min_cost) / diff_cost

# If all costs are at nan, set the maximum value of the ambiguity for this point
if np.isnan(normalized_min_cost):
ambiguity[row, col] = etas.shape[0] * nb_disps
ambiguity[row, col] = nbr_etas * nb_disps
else:
normalized_min_cost = np.repeat(normalized_min_cost, nb_disps * etas.shape[0])

idx_disp_min = np.searchsorted(disparity_range, grids[0][row, col])
idx_disp_max = np.searchsorted(disparity_range, grids[1][row, col]) + 1

normalized_min_cost = np.repeat(normalized_min_cost, nb_disps * nbr_etas)
# Normalized cost volume for one point
normalized_cv = (cv[row, col, :] - min_cost) / (max_cost - min_cost)
#  Mask nan to -inf to increase the value of the ambiguity if a point contains nan costs
normalized_cv[np.isnan(normalized_cv)] = -np.inf
normalized_cv = np.repeat(normalized_cv, etas.shape[0])
normalized_cv = (cv[row, col, :] - min_cost) / diff_cost

ambiguity[row, col] += np.sum(normalized_cv <= (normalized_min_cost + two_dim_etas))
# Mask nan to -inf to increase the value of the ambiguity if a point contains nan costs
normalized_cv[idx_disp_min:idx_disp_max][
np.isnan(normalized_cv[idx_disp_min:idx_disp_max])
] = -np.inf

normalized_cv[:idx_disp_min][np.isnan(normalized_cv[:idx_disp_min])] = np.inf
normalized_cv[idx_disp_max:][np.isnan(normalized_cv[idx_disp_max:])] = np.inf

normalized_cv = np.repeat(normalized_cv, nbr_etas)

ambiguity[row, col] += np.nansum(normalized_cv <= (normalized_min_cost + two_dim_etas))

return ambiguity

@staticmethod
@njit(
"Tuple((f4[:, :],f4[:, :, :]))(f4[:, :, :], f4, f4, f4)",
parallel=literal_eval(os.environ.get("PANDORA_NUMBA_PARALLEL", "True")),
"Tuple((f4[:, :],f4[:, :, :]))(f4[:, :, :], f8[:], i8, i8[:, :, :], f4[:])",
parallel=literal_eval(os.environ.get("PANDORA_NUMBA_PARALLEL", "False")),
cache=True,
)
def compute_ambiguity_and_sampled_ambiguity(cv: np.ndarray, _eta_min: float, _eta_max: float, _eta_step: float):
def compute_ambiguity_and_sampled_ambiguity(
cv: np.ndarray,
etas: np.ndarray,
nbr_etas: int,
grids: np.ndarray,
disparity_range: np.ndarray,
):
"""
Return the ambiguity and sampled ambiguity, useful for evaluating ambiguity in notebooks
:param cv: cost volume
:type cv: 3D np.array (row, col, disp)
:param _eta_min: minimal eta
:type _eta_min: float
:param _eta_max: maximal eta
:type _eta_max: float
:param _eta_step: eta step
:type _eta_step: float
:type cv: 3D np.ndarray (row, col, disp)
:param etas: range between eta_min and eta_max with step eta_step
:type etas: np.ndarray
:param nbr_etas: nuber of etas
:type nbr_etas: int
:param grids: array containing min and max disparity grids
:type grids: 2D np.ndarray (min, max)
:param disparity_range: array containing disparity range
:type disparity_range: np.ndarray
:return: the normalized ambiguity and sampled ambiguity
:rtype: Tuple(2D np.array (row, col) dtype = float32, 3D np.array (row, col) dtype = float32)
:rtype: Tuple(2D np.ndarray (row, col) dtype = float32, 3D np.ndarray (row, col) dtype = float32)
"""
#  Miniumum and maximum of all costs, useful to normalize the cost volume
# Minimum and maximum of all costs, useful to normalize the cost volume
min_cost = np.nanmin(cv)
max_cost = np.nanmax(cv)

n_row, n_col, nb_disps = cv.shape

etas = np.arange(_eta_min, _eta_max, _eta_step)

# Numba does not support the np.tile operation
two_dim_etas = np.repeat(etas, nb_disps).reshape((-1, nb_disps)).T.flatten()

# integral of ambiguity
ambiguity = np.zeros((n_row, n_col), dtype=np.float32)
sampled_ambiguity = np.zeros((n_row, n_col, etas.shape[0]), dtype=np.float32)
sampled_ambiguity = np.zeros((n_row, n_col, nbr_etas), dtype=np.float32)

diff_cost = max_cost - min_cost

for row in prange(n_row): # pylint: disable=not-an-iterable
for col in prange(n_col): # pylint: disable=not-an-iterable
# Normalized minimum cost for one point
normalized_min_cost = (np.nanmin(cv[row, col, :]) - min_cost) / (max_cost - min_cost)
normalized_min_cost = (np.nanmin(cv[row, col, :]) - min_cost) / diff_cost

# If all costs are at nan, set the maximum value of the ambiguity for this point
if np.isnan(normalized_min_cost):
ambiguity[row, col] = etas.shape[0] * nb_disps
ambiguity[row, col] = nbr_etas * nb_disps
sampled_ambiguity[row, col, :] = nb_disps
else:
normalized_min_cost = np.repeat(normalized_min_cost, nb_disps * etas.shape[0])
normalized_min_cost = np.repeat(normalized_min_cost, nb_disps * nbr_etas)

# Normalized cost volume for one point
normalized_cv = (cv[row, col, :] - min_cost) / (max_cost - min_cost)

#  Mask nan to -inf to increase the value of the ambiguity if a point contains nan costs
normalized_cv[np.isnan(normalized_cv)] = -np.inf
normalized_cv = np.repeat(normalized_cv, etas.shape[0])
idx_disp_min = np.searchsorted(disparity_range, grids[0][row, col])
idx_disp_max = np.searchsorted(disparity_range, grids[1][row, col]) + 1

# Mask nan to -inf to increase the value of the ambiguity if a point contains nan costs
normalized_cv[idx_disp_min:idx_disp_max][
np.isnan(normalized_cv[idx_disp_min:idx_disp_max])
] = -np.inf
normalized_cv[:idx_disp_min][np.isnan(normalized_cv[:idx_disp_min])] = np.inf
normalized_cv[idx_disp_max:][np.isnan(normalized_cv[idx_disp_max:])] = np.inf

normalized_cv = np.repeat(normalized_cv, nbr_etas)

# fill integral ambiguity
ambiguity[row, col] += np.sum(normalized_cv <= (normalized_min_cost + two_dim_etas))
ambiguity[row, col] += np.nansum(normalized_cv <= (normalized_min_cost + two_dim_etas))

# fill sampled ambiguity
costs_comparison = normalized_cv <= (normalized_min_cost + two_dim_etas)
costs_comparison = costs_comparison.reshape((nb_disps, etas.shape[0]))
costs_comparison = costs_comparison.reshape((nb_disps, nbr_etas))
sampled_ambiguity[row, col, :] = np.sum(costs_comparison, axis=0)

return ambiguity, sampled_ambiguity
8 changes: 4 additions & 4 deletions pandora/cost_volume_confidence/cost_volume_confidence.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,12 @@ def allocate_confidence_map(
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
Create or update the confidence measure : confidence_measure (xarray.DataArray of the cost volume and the
disparity map) by adding a the indicator
disparity map) by adding an indicator
:param name_confidence_measure: the name of the new confidence indicator
:type name_confidence_measure: string
:param confidence_map: the condidence map
:type confidence_map: 2D np.array (row, col) dtype=np.float32
:param confidence_map: the confidence map
:type confidence_map: 2D np.ndarray (row, col) dtype=np.float32
:param disp: the disparity map dataset or None
:type disp: xarray.Dataset or None
:param cv: cost volume dataset
Expand All @@ -147,7 +147,7 @@ def allocate_confidence_map(
conf_measure = np.full((nb_row, nb_col, nb_indicator + 1), np.nan, dtype=np.float32)
# old confidence measures
conf_measure[:, :, :-1] = cv["confidence_measure"].data
#  new confidence measure
# new confidence measure
conf_measure[:, :, -1] = confidence_map

indicator = np.copy(cv.coords["indicator"])
Expand Down
4 changes: 2 additions & 2 deletions pandora/cost_volume_confidence/interval_bounds.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def confidence_prediction(
cv: xr.Dataset = None,
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
Computes a confidence measure that evaluates the minimum and maximim disparity at
Computes a confidence measure that evaluates the minimum and maximum disparity at
each point with a confidence of possibility_threshold %
:param disp: the disparity map dataset
Expand Down Expand Up @@ -216,7 +216,7 @@ def compute_interval_bounds(
:param type_factor: Either 1 or -1. Used to adapt the possibility computation to max or min measures
:type type_factor: float
:return: the infimum and supremum (not regularized) of the set containing the true disparity
:rtype: Tuple(2D np.array (row, col) dtype = float32, 2D np.array (row, col) dtype = float32)
:rtype: Tuple(2D np.ndarray (row, col) dtype = float32, 2D np.ndarray (row, col) dtype = float32)
"""
# IDEA: instead of transforming the cost curve into a possibility, we can compute the cost
# threshold T to apply directly to the cost curve to obtain the same result
Expand Down
Loading

0 comments on commit c9d5826

Please sign in to comment.