Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENH: Upgrade generate mesh #138

Merged
merged 1 commit into from
Feb 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions doc/gen_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,12 @@
DATASET = ["Cance", "France", "Lez"]
DATASET_PATH = f"{os.path.dirname(os.path.realpath(__file__))}/../smash/factory/dataset"

# TODO: Refactorize this when it gets more complicated. Pass to each dataset the files or directories
# TODO: Refactorize this when it gets more complicated. Pass to each dataset the files or directories
# that we want to be uploaded and used in the documentation.
if __name__ == "__main__":

parser = argparse.ArgumentParser()

parser.add_argument(
"-d",
"-dataset",
Expand Down Expand Up @@ -40,11 +40,15 @@
os.system(f"cp -r {DATASET_PATH}/{ds}/pet {DATASET_PATH}/{ds}/prcp {ds_dir}/.")

if ds == "Cance":
os.system(f"cp -r {DATASET_PATH}/{ds}/qobs {DATASET_PATH}/{ds}/gauge_attributes.csv {ds_dir}/.")
os.system(
f"cp -r {DATASET_PATH}/{ds}/qobs {DATASET_PATH}/{ds}/gauge_attributes.csv {ds_dir}/."
)

elif ds == "Lez":
os.system(f"cp -r {DATASET_PATH}/{ds}/qobs {DATASET_PATH}/{ds}/gauge_attributes.csv {DATASET_PATH}/{ds}/descriptor {ds_dir}/.")

os.system(
f"cp -r {DATASET_PATH}/{ds}/qobs {DATASET_PATH}/{ds}/gauge_attributes.csv {DATASET_PATH}/{ds}/descriptor {ds_dir}/."
)

if args.tar:
os.system(f"tar cf {ds_dir}.tar {ds_dir}")
os.system(f"rm -r {ds_dir}")
1 change: 0 additions & 1 deletion smash/core/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,6 @@


class Model:

"""
Primary data structure of the hydrological model `smash`.

Expand Down
1 change: 0 additions & 1 deletion smash/core/signal_analysis/prcp_indices/prcp_indices.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@


class PrecipitationIndices:

"""
Represents precipitation indices computation result.

Expand Down
39 changes: 30 additions & 9 deletions smash/factory/mesh/_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,30 @@ def _rowcol_to_xy(
return x, y


def _trim_zeros_2d(array: np.ndarray, shift_value: bool = False) -> np.ndarray:
def _get_catchment_slice_window(
nrow: int,
ncol: int,
row: int,
col: int,
area: float,
dx: float,
dy: float,
max_depth: int,
) -> tuple[slice, slice]:
n = np.ceil(area / (dx * dy)).astype(np.int32)
srow = np.maximum(0, row - max_depth - n)
erow = np.minimum(nrow - 1, row + max_depth + n)
scol = np.maximum(0, col - max_depth - n)
ecol = np.minimum(ncol - 1, col + max_depth + n)

return (slice(srow, erow), slice(scol, ecol))


def _trim_mask_2d(
array: np.ndarray, slice_win: bool = False
) -> (np.ndarray | np.ndarray, tuple[slice, slice]):
for ax in [0, 1]:
mask = ~(array == 0).all(axis=ax)
mask = ~(array.mask).all(axis=ax)

inv_mask = mask[::-1]

Expand All @@ -38,15 +59,15 @@ def _trim_zeros_2d(array: np.ndarray, shift_value: bool = False) -> np.ndarray:
end_ind = len(inv_mask) - np.argmax(inv_mask)

if ax == 0:
scol, ecol = start_ind, end_ind
array = array[:, start_ind:end_ind]
slice_col = slice(start_ind, end_ind)
array = array[:, slice_col]

else:
srow, erow = start_ind, end_ind
array = array[start_ind:end_ind, :]
slice_row = slice(start_ind, end_ind)
array = array[slice_row, :]

if shift_value:
return array, srow, erow, scol, ecol
if slice_win:
return array, (slice_row, slice_col)

else:
return array
Expand All @@ -56,7 +77,7 @@ def _get_array(
flwdir_dataset: gdal.Dataset, bbox: np.ndarray | None = None
) -> np.ndarray:
if bbox is not None:
xmin, xmax, xres, ymin, ymax, yres = _get_transform(flwdir_dataset)
xmin, _, xres, _, ymax, yres = _get_transform(flwdir_dataset)

col_off = int((bbox[0] - xmin) / xres)
row_off = int((ymax - bbox[3]) / yres)
Expand Down
50 changes: 33 additions & 17 deletions smash/factory/mesh/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
_get_srs,
_get_array,
_xy_to_rowcol,
_trim_zeros_2d,
_trim_mask_2d,
_get_catchment_slice_window,
)
from smash.factory.mesh._libmesh import mw_mesh

Expand Down Expand Up @@ -252,7 +253,7 @@ def _generate_mesh_from_xy(
max_depth: int,
epsg: int,
) -> dict:
(xmin, xmax, xres, ymin, ymax, yres) = _get_transform(flwdir_dataset)
(xmin, _, xres, _, ymax, yres) = _get_transform(flwdir_dataset)

srs = _get_srs(flwdir_dataset, epsg)

Expand All @@ -274,24 +275,42 @@ def _generate_mesh_from_xy(
for ind in range(x.size):
row, col = _xy_to_rowcol(x[ind], y[ind], xmin, ymax, xres, yres)

mask_dln_imd, row_dln[ind], col_dln[ind] = mw_mesh.catchment_dln(
flwdir, dx, dy, row, col, area[ind], max_depth
# % Take a window of flow directions. It avoids to fill and clean too large
# % matrices. The boundaries of the window are obtained by assuming the
# % worst spatial pattern of draining area (i.e. a rectangular catchment
# % in x or y direction of 1 cell width).
slice_win = _get_catchment_slice_window(
*flwdir.shape, row, col, area[ind], dx[row, col], dy[row, col], max_depth
)

area_dln[ind] = np.sum(mask_dln_imd * dx * dy)
row_win = row - slice_win[0].start # % srow
col_win = col - slice_win[1].start # % scol

mask_dln = np.where(mask_dln_imd == 1, 1, mask_dln)
dx_win = dx[slice_win]
dy_win = dy[slice_win]
flwdir_win = flwdir[slice_win]

mask_dln_win, row_dln_win, col_dln_win = mw_mesh.catchment_dln(
flwdir_win, dx_win, dy_win, row_win, col_win, area[ind], max_depth
)

row_dln[ind] = row_dln_win + slice_win[0].start # % srow
col_dln[ind] = col_dln_win + slice_win[1].start # % scol

area_dln[ind] = np.sum(mask_dln_win * dx_win * dy_win)

mask_dln[slice_win] = np.where(mask_dln_win == 1, 1, mask_dln[slice_win])

flwdir = np.ma.masked_array(flwdir, mask=(1 - mask_dln))
flwdir, srow, erow, scol, ecol = _trim_zeros_2d(flwdir, shift_value=True)
dx = dx[srow:erow, scol:ecol]
dy = dy[srow:erow, scol:ecol]
flwdir, slice_win = _trim_mask_2d(flwdir, slice_win=True)
dx = dx[slice_win]
dy = dy[slice_win]

xmin_shifted = xmin + scol * xres
ymax_shifted = ymax - srow * yres
ymax_shifted = ymax - slice_win[0].start * yres # % srow
xmin_shifted = xmin + slice_win[1].start * xres # % scol

row_dln = row_dln - srow
col_dln = col_dln - scol
row_dln = row_dln - slice_win[0].start # % srow
col_dln = col_dln - slice_win[1].start # % scol

flwacc, flwpar = mw_mesh.flow_accumulation_partition(flwdir, dx, dy)

Expand Down Expand Up @@ -343,15 +362,12 @@ def _generate_mesh_from_xy(
def _generate_mesh_from_bbox(
flwdir_dataset: gdal.Dataset, bbox: np.ndarray, epsg: int
) -> dict:
(xmin, xmax, xres, ymin, ymax, yres) = _get_transform(flwdir_dataset)
(_, _, xres, _, ymax, yres) = _get_transform(flwdir_dataset)

srs = _get_srs(flwdir_dataset, epsg)

flwdir = _get_array(flwdir_dataset, bbox)

if np.any(~np.isin(flwdir, D8_VALUE), where=(flwdir > 0)):
raise ValueError(f"Flow direction data is invalid. Value must be in {D8_VALUE}")

flwdir = np.ma.masked_array(flwdir, mask=(flwdir < 1))

# % Accepting arrays for dx and dy in case of unstructured meshing
Expand Down
2 changes: 1 addition & 1 deletion smash/factory/mesh/mw_mesh.f90
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ subroutine catchment_dln(nrow, ncol, flwdir, dx, dy, row, col, area, &
row = row + 1
col = col + 1

min_tol = 1._4
min_tol = huge(0._4)

do i = -max_depth, max_depth

Expand Down
4 changes: 0 additions & 4 deletions smash/factory/net/_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ def output_shape(self):


class Activation(Layer):

"""
Activation layer that applies a specified activation function to the input.

Expand Down Expand Up @@ -69,7 +68,6 @@ def output_shape(self):


class Scale(Layer):

"""
Scale layer that applies the min-max scaling function to the outputs.

Expand Down Expand Up @@ -146,7 +144,6 @@ def _wb_initialization(layer: Layer, attr: str):


class Dense(Layer):

"""
Fully-connected (dense) layer.

Expand Down Expand Up @@ -241,7 +238,6 @@ def output_shape(self):


class Dropout(Layer):

"""
Dropout layer that randomly sets the output of the previous layer to zero with a specified probability.

Expand Down
4 changes: 0 additions & 4 deletions smash/factory/net/_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@


class SGD:

"""
Compile the neural network with Stochastic Gradient Descent (SGD) optimizer.

Expand Down Expand Up @@ -36,7 +35,6 @@ def update(self, w, grad_wrt_w):


class Adam:

"""
Compile the neural network with Adaptive Moment Estimation (Adam) optimizer.

Expand Down Expand Up @@ -88,7 +86,6 @@ def update(self, w: np.ndarray, grad_wrt_w: np.ndarray):


class Adagrad:

"""
Compile the neural network with Adaptive Gradient (Adagrad) optimizer.

Expand Down Expand Up @@ -118,7 +115,6 @@ def update(self, w: np.ndarray, grad_wrt_w: np.ndarray):


class RMSprop:

"""
Compile the neural network with Root Mean Square Propagation (RMSprop) optimizer.

Expand Down
1 change: 0 additions & 1 deletion smash/factory/samples/samples.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@


class Samples:

"""
Represents the generated samples result.

Expand Down
1 change: 0 additions & 1 deletion smash/io/_error.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@


class ReadHDF5MethodError(OSError):

"""
Raise error when using wrong read method for hdf5 files.
"""
Expand Down
6 changes: 3 additions & 3 deletions smash/tests/core/simulation/test_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ def generic_optimize(model_structure: list[smash.Model], **kwargs) -> dict:
)

res[f"optimize.{model.setup.structure}.{mp}.iter_cost"] = ret.iter_cost
res[
f"optimize.{model.setup.structure}.{mp}.control_vector"
] = ret.control_vector
res[f"optimize.{model.setup.structure}.{mp}.control_vector"] = (
ret.control_vector
)

qsim = instance.response.q[:].flatten()
qsim = qsim[::10] # extract values at every 10th position
Expand Down
Loading