diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 0dfa0879..304ef869 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -24,10 +24,7 @@ jobs: strategy: matrix: os: ["ubuntu-latest", "macos-13", "macos-latest"] # macos-13 is x86_64, macos-latest is arm64 - python-version: [8, 9, 10, 11, 12] # sub-versions of Python - exclude: - - os: macos-14 - python-version: 8 + python-version: [9, 10, 11, 12] # sub-versions of Python steps: - uses: actions/checkout@v4 @@ -36,7 +33,7 @@ jobs: run: "brew install hdf5" - name: Build wheels - uses: pypa/cibuildwheel@v2.20.0 + uses: pypa/cibuildwheel@v2.21.3 env: CIBW_SKIP: "pp* *-musllinux*" CIBW_BUILD: "cp3${{ matrix.python-version }}-*" @@ -156,3 +153,21 @@ jobs: - uses: pypa/gh-action-pypi-publish@release/v1 # To test: #repository_url: https://test.pypi.org/legacy/ + + update-tags: + name: Update develop branch tags + needs: [upload_pypi] + runs-on: ubuntu-latest + # Assuming that since it has sucessfully uploaded to PyPI, this release is safe. + # We will now automatically update the tags for develop. + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: pulling tags + run: | + git checkout develop + git pull --tags origin westpa2 + git push origin develop --tags + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/mirror.yaml b/.github/workflows/mirror.yaml new file mode 100644 index 00000000..a3b06bb9 --- /dev/null +++ b/.github/workflows/mirror.yaml @@ -0,0 +1,22 @@ +name: Mirror develop to westpa2 + +on: + schedule: + # Once a month on the 28th day at 4 pm UTC, which is ~11 am EST. + - cron: "00 16 28 * *" + +jobs: + mirror-develop-to-westpa2: + runs-on: ubuntu-latest + name: Mirror develop branch to westpa2 branch + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: mirroring step + run: | + git checkout westpa2 + git pull --ff-only origin develop + git push origin westpa2 + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 799f6507..206f93d5 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -35,7 +35,7 @@ jobs: strategy: matrix: os: ["ubuntu-latest", "macos-13", "macos-latest"] # macos-13 is x86-64, macos-latest is arm64 - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 @@ -53,7 +53,6 @@ jobs: - uses: conda-incubator/setup-miniconda@v3 with: python-version: ${{ matrix.python-version }} - miniforge-variant: Mambaforge environment-file: devtools/conda-envs/test_env.yaml activate-environment: test_env channel-priority: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55685610..66798992 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: end-of-file-fixer types: [python] @@ -8,7 +8,7 @@ repos: types: [python] - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.8.0 + rev: 24.10.0 hooks: - id: black language_version: python3 diff --git a/AUTHORS b/AUTHORS index e09a816f..9285afe2 100644 --- a/AUTHORS +++ b/AUTHORS @@ -4,18 +4,18 @@ in collaboration with Daniel Zuckerman (zuckermd@ohsu.edu) The original version of WESTPA was written by Matthew Zwier (matthew.zwier@drake.edu) as part of his Ph.D. dissertation with Lillian Chong. -Other contributors are the following (In alphabetical order) +Other core contributors are the following (in alphabetical order) Joshua Adelman -Alex DeGrave -Page Harrison -Joseph Kaus -Patrick Pisciuneri -A. J. Pratt -Nicholas Rego +Anthony Bogetti +Jeremy Leung +AJ Pratt +John Russo Ali Saglam -David Wang +Jeff Thompson Kim Wong +Darian Yang +She Zhang The work manager interface is derived from the ``concurrent.futures`` module of Python 3.2 by Brian Quinlan, (C) 2011 the Python Software Foundation. diff --git a/doc/doc_env.yaml b/doc/doc_env.yaml index 345de842..3f6619fe 100644 --- a/doc/doc_env.yaml +++ b/doc/doc_env.yaml @@ -10,6 +10,8 @@ dependencies: - h5py - pyyaml - pyzmq + - pytables + - mdtraj - matplotlib-base - blessings - ipykernel diff --git a/src/westpa/cli/tools/w_fluxanl.py b/src/westpa/cli/tools/w_fluxanl.py index 9cc47b14..a393e80f 100644 --- a/src/westpa/cli/tools/w_fluxanl.py +++ b/src/westpa/cli/tools/w_fluxanl.py @@ -1,7 +1,6 @@ import h5py import numpy as np from scipy.signal import fftconvolve -from warnings import warn import westpa @@ -370,7 +369,6 @@ def go(self): def entry_point(): - warn('w_fluxanl is being deprecated. Please use w_assign and w_direct instead.') WFluxanlTool().main() diff --git a/src/westpa/core/binning/mab.py b/src/westpa/core/binning/mab.py index 42caccf6..56a78cb6 100644 --- a/src/westpa/core/binning/mab.py +++ b/src/westpa/core/binning/mab.py @@ -1,73 +1,67 @@ import logging +from typing import List, Optional import numpy as np import westpa from westpa.core.binning import FuncBinMapper from os.path import expandvars - log = logging.getLogger(__name__) class MABBinMapper(FuncBinMapper): """ - Adaptively place bins in between minimum and maximum segments along - the progress coordinte. Extrema and bottleneck segments are assigned + Adaptively place bins between minimum and maximum segments along + the progress coordinate. Extrema and bottleneck segments are assigned to their own bins. - """ def __init__( self, - nbins, - direction=None, - skip=None, - bottleneck=True, - pca=False, - mab_log=False, - bin_log=False, - bin_log_path="$WEST_SIM_ROOT/binbounds.log", + nbins: List[int], + direction: Optional[List[int]] = None, + skip: Optional[List[int]] = None, + bottleneck: bool = True, + pca: bool = False, + mab_log: bool = False, + bin_log: bool = False, + bin_log_path: str = "$WEST_SIM_ROOT/binbounds.log", ): """ Parameters ---------- nbins : list of int - List of int for nbins in each dimension. - direction : Union(list of int, None), default: None - List of int for 'direction' in each dimension. - Direction options are as follows: + List of number of bins in each dimension. + direction : Optional[list of int], default: None + List of directions in each dimension. Direction options: 0 : default split at leading and lagging boundaries 1 : split at leading boundary only -1 : split at lagging boundary only - 86 : no splitting at either leading or lagging boundary - skip : Union(list of int, None), default: None - List of int for each dimension. Default None for skip=0. - Set to 1 to 'skip' running mab in a dimension. + 86 : no splitting at either leading or lagging boundary (both bottlenecks included) + skip : Optional[list of int], default: None + List of skip flags for each dimension. Default None (no skipping). bottleneck : bool, default: True - Whether to turn on or off bottleneck walker splitting. + Whether to enable bottleneck walker splitting. pca : bool, default: False - Can be True or False (default) to run PCA on pcoords before bin assignment. + Whether to perform PCA on progress coordinates before bin assignment. mab_log : bool, default: False - Whether to output mab info to west.log. + Whether to output MAB info to west.log. bin_log : bool, default: False - Whether to output mab bin boundaries to bin_log_path file. + Whether to output MAB bin boundaries to a log file. bin_log_path : str, default: "$WEST_SIM_ROOT/binbounds.log" Path to output bin boundaries. - """ # Verifying parameters if nbins is None: - raise ValueError("nbins_per_dim is missing") + raise ValueError("nbins is missing") ndim = len(nbins) - if direction is None: - direction = [0] * ndim - elif len(direction) != ndim: + direction = direction or [0] * ndim + if len(direction) != ndim: direction = [0] * ndim log.warning("Direction list is not the correct dimensions, setting to defaults.") - if skip is None: - skip = [0] * ndim - elif len(skip) != ndim: + skip = skip or [0] * ndim + if len(skip) != ndim: skip = [0] * ndim log.warning("Skip list is not the correct dimensions, setting to defaults.") @@ -86,9 +80,12 @@ def __init__( super().__init__(map_mab, n_total_bins, kwargs=kwargs) - def determine_total_bins(self, nbins_per_dim, direction, skip, bottleneck, **kwargs): + def determine_total_bins( + self, nbins_per_dim: List[int], direction: List[int], skip: List[int], bottleneck: bool, **kwargs + ) -> int: """ - The following is neccessary because functional bin mappers need to "reserve" + Calculate the total number of bins needed, taking direction and skipping into account. + This function is necessary because functional bin mappers need to "reserve" bins and tell the sim manager how many bins they will need to use, this is determined by taking all direction/skipping info into account. @@ -97,65 +94,66 @@ def determine_total_bins(self, nbins_per_dim, direction, skip, bottleneck, **kwa nbins_per_dim : list of int Number of total bins in each dimension within the linear portion. direction : list of int - Direction in each dimension. See __init__ for more information. + Direction in each dimension. skip : list of int - List of 0s and 1s indicating whether to skip each dimension. + List indicating whether to skip each dimension. bottleneck : bool - Whether to include separate bin for bottleneck walker(s). + Whether to include a separate bin for bottleneck walker(s). **kwargs : dict - Arbitary keyword arguments. Contains unneeded MAB parameters. + Additional MAB parameters (unused). Returns ------- n_total_bins : int Number of total bins. - """ - n_total_bins = np.prod(nbins_per_dim) # Number of Bins in the linear portion - ndim = len(nbins_per_dim) - for i in range(ndim): - if skip[i] == 0: - if direction[i] == 0: - # Both directions (leading/trailing) + bottlenecks if enabled + # Update nbins_per_dim with any skipped dimensions, setting number of bins along skipped dimensions to 1 + skip = np.array([bool(s) for s in skip]) + nbins_per_dim = np.array(nbins_per_dim) + nbins_per_dim[skip] = 1 + + # Total bins is product of all linear bins plus and special bins + n_total_bins = nbins_per_dim.prod() + for direct, skip_dim in zip(direction, skip): + if not skip_dim: + if direct in [-1, 1]: + # 1 lead or lag bin + 1 bottleneck bin + n_total_bins += 1 + 1 * bottleneck + elif direct == 0: + # 2 lead/lag bins + 2 bottleneck bins n_total_bins += 2 + 2 * bottleneck - elif direction[i] == 86: - # No leading/trailing, 2 bottlenecks if enabled + elif direct == 86: + # 0 lead/lag + 2 bottleneck bins n_total_bins += 2 * bottleneck - else: - # direction[i] == -1 or 1, Just one leading/trailing + 1 bottleneck - n_total_bins += 1 + 1 * bottleneck - else: - n_total_bins -= nbins_per_dim[i] - 1 - n_total_bins += 1 * ndim # or else it will be one bin short return n_total_bins -def map_mab(coords, mask, output, *args, **kwargs): +def map_mab(coords: np.ndarray, mask: np.ndarray, output: List[int], *args, **kwargs) -> List[int]: """ - Binning which adaptively places bins based on the positions of extrema segments and - bottleneck segments, which are where the difference in probability is the greatest - along the progress coordinate. Operates per dimension and places a fixed number of + Adaptively place bins based on extrema and bottleneck segments along the progress coordinate. + + Bottleneck segments are where the difference in probability is the greatest + along the progress coordinate. Operates per dimension (unless skipped) and places a fixed number of evenly spaced bins between the segments with the min and max pcoord values. Extrema and bottleneck segments are assigned their own bins. Parameters ---------- - coords : ndarray + coords : np.ndarray An array with pcoord and weight info. - mask : ndarray - Array of 1 (True) and 0 (False), to filter out unwanted segment info. + mask : np.ndarray + Boolean array to filter out unwanted segments. output : list The main list that, for each segment, holds the bin assignment. *args : list - Variable length arguments. + Additional arguments. **kwargs : dict - Arbitary keyword arguments. Contains most of the MAB-needed parameters. + Additional keyword arguments. Contains most of the MAB-needed parameters. Returns ------ output : list - The main list that, for each segment, holds the bin assignment. - + List with bin assignments for each segment. """ # Argument Processing @@ -163,8 +161,8 @@ def map_mab(coords, mask, output, *args, **kwargs): ndim = len(nbins_per_dim) pca = kwargs.get("pca", False) bottleneck = kwargs.get("bottleneck", True) - direction = kwargs.get("direction", ([0] * ndim)) - skip = kwargs.get("skip", ([0] * ndim)) + direction = kwargs.get("direction", [0] * ndim) + skip = kwargs.get("skip", [0] * ndim) mab_log = kwargs.get("mab_log", False) bin_log = kwargs.get("bin_log", False) bin_log_path = kwargs.get("bin_log_path", "$WEST_SIM_ROOT/binbounds.log") @@ -190,11 +188,11 @@ def map_mab(coords, mask, output, *args, **kwargs): if coords[0, -1] == 0: report = True if coords.shape[1] > ndim + 1: - isfinal = allcoords[:, ndim + 1].astype(np.bool_) + isfinal = allcoords[:, ndim + 1].astype(bool) else: - isfinal = np.ones(coords.shape[0], dtype=np.bool_) + isfinal = np.ones(coords.shape[0], dtype=bool) coords = coords[isfinal, :ndim] - weights = allcoords[isfinal, ndim + 0] + weights = allcoords[isfinal, ndim] mask = mask[isfinal] splitting = True @@ -204,229 +202,301 @@ def map_mab(coords, mask, output, *args, **kwargs): weights = None splitting = False - varcoords = np.copy(coords) originalcoords = np.copy(coords) if pca and len(output) > 1: - colavg = np.mean(coords, axis=0) - for i in range(len(coords)): - for j in range(len(coords[i])): - varcoords[i][j] = coords[i][j] - colavg[j] - covcoords = np.cov(np.transpose(varcoords), aweights=weights) - eigval, eigvec = np.linalg.eigh(covcoords) - eigvec = eigvec[:, np.argmax(np.absolute(eigvec), axis=1)] - for i in range(len(eigvec)): - if eigvec[i, i] < 0: - eigvec[:, i] = -1 * eigvec[:, i] - for i in range(ndim): - for j in range(len(output)): - coords[j][i] = np.dot(varcoords[j], eigvec[:, i]) - - maxlist = [] - minlist = [] - difflist = [] - flipdifflist = [] - for n in range(ndim): - # identify the boundary segments - maxcoord = np.max(coords[mask, n]) - mincoord = np.min(coords[mask, n]) - maxlist.append(maxcoord) - minlist.append(mincoord) - - # detect the bottleneck segments, this uses the weights - if splitting: - temp = np.column_stack((originalcoords[mask, n], weights[mask])) - sorted_indices = temp[:, 0].argsort() - temp = temp[sorted_indices] - for p in range(len(temp)): - if temp[p][1] == 0: - temp[p][1] = 10**-323 - fliptemp = np.flipud(temp) - - difflist.append(None) - flipdifflist.append(None) - maxdiff = 0 - flipmaxdiff = 0 - for i in range(1, len(temp) - 1): - comprob = 0 - flipcomprob = 0 - j = i + 1 - while j < len(temp): - comprob = comprob + temp[j][1] - flipcomprob = flipcomprob + fliptemp[j][1] - j = j + 1 - diff = -np.log(comprob) + np.log(temp[i][1]) - if diff > maxdiff: - difflist[n] = temp[i][0] - maxdiff = diff - flipdiff = -np.log(flipcomprob) + np.log(fliptemp[i][1]) - if flipdiff > flipmaxdiff: - flipdifflist[n] = fliptemp[i][0] - flipmaxdiff = flipdiff + coords = apply_pca(coords, weights) + + # Computing special bins (bottleneck and boundary bins) + minlist, maxlist, bottlenecks_forward, bottlenecks_reverse = calculate_bin_boundaries( + originalcoords, weights, mask, skip, splitting, bottleneck + ) if mab_log and report: - westpa.rc.pstatus("################ MAB stats ################") - westpa.rc.pstatus("minima in each dimension: {}".format(minlist)) - westpa.rc.pstatus("maxima in each dimension: {}".format(maxlist)) - westpa.rc.pstatus("direction in each dimension: {}".format(direction)) - westpa.rc.pstatus("skip in each dimension: {}".format(skip)) - westpa.rc.pstatus("###########################################") - westpa.rc.pflush() - - # assign segments to bins - # the total number of linear bins is the boundary base - boundary_base = np.prod(nbins_per_dim) - - # the bottleneck base is offset by the number of boundary walkers, - # which is two per dimension unless there is a direction specified - # in a particluar dimension, then it's just one - bottleneck_base = boundary_base + log_mab_stats(minlist, maxlist, direction, skip) + + # Assign segments to bins + n_bottleneck_filled = bin_assignment( + allcoords, + allmask, + minlist, + maxlist, + bottlenecks_forward, + bottlenecks_reverse, + nbins_per_dim, + direction, + skip, + splitting, + bottleneck, + output, + ) + + # Report MAB bin statistics + if bin_log and report and westpa.rc.sim_manager.n_iter: + log_bin_boundaries( + skip, + bottleneck, + direction, + bin_log_path, + minlist, + maxlist, + nbins_per_dim, + n_bottleneck_filled, + bottlenecks_forward, + bottlenecks_reverse, + ) + + return output + + +def apply_pca(coords, weights): + colavg = np.mean(coords, axis=0) + varcoords = coords - colavg + covcoords = np.cov(varcoords.T, aweights=weights) + eigval, eigvec = np.linalg.eigh(covcoords) + eigvec = eigvec[:, np.argmax(np.abs(eigvec), axis=1)] + eigvec[:, np.diag(eigvec) < 0] *= -1 + return np.dot(varcoords, eigvec) + + +def calculate_bin_boundaries(coords, weights, mask, skip, splitting, bottleneck): + """ + This function calculates minima, maxima, and bottleneck segments. + """ + skip = np.array([bool(s) for s in skip]) + + # Initialize lists to hold minima and maxima along each dimension + minlist, maxlist = [], [] + # Initialize lists to hold bottleneck segments along each dimension + bottlenecks_forward, bottlenecks_reverse = [None] * len(coords[0]), [None] * len(coords[0]) + # number of unmasked coords + n_coords = mask.sum() + # Grabbing all unmasked coords and weights + unmasked_coords = coords[mask, :] + unmasked_weights = weights[mask] if weights is not None else None + # Replace any zero weights with non-zero values so that log(weight) is well-defined + if unmasked_weights is not None: + unmasked_weights[unmasked_weights == 0] = 10**-323 + # Looping over each dimension of progress coordinate, even those being skipped + for n in range(len(coords[0])): + # We calculate the min and max pcoord along each dimension (boundary segments) even if skipping + maxlist.append(np.max(coords[mask, n])) + minlist.append(np.min(coords[mask, n])) + # Now we calculate the bottleneck segments + if splitting and bottleneck and not skip[n]: + bottlenecks_forward[n], bottlenecks_reverse[n] = detect_bottlenecks(unmasked_coords, unmasked_weights, n_coords, n) + + return minlist, maxlist, bottlenecks_forward, bottlenecks_reverse + + +def detect_bottlenecks(unmasked_coords, unmasked_weights, n_coords, n): + """ + Detect the bottleneck segments along the given coordinate n, this uses the weights + """ + # Grabbing all unmasked coords in current dimension, plus corresponding weights + # Sort by current dimension in coord, smallest to largest + sorted_indices = unmasked_coords[:, n].argsort() + # Grab sorted coords and weights + coords_srt = unmasked_coords[sorted_indices, :] + weights_srt = unmasked_weights[sorted_indices] + # Also sort in reverse order for opposite direction + coords_srt_flip = np.flipud(coords_srt) + weights_srt_flip = np.flipud(weights_srt) + # Initialize the max directional differences along current dimension as None (these may not be updated) + bottleneck_coords, bottleneck_coords_flip = None, None + maxdiff, maxdiff_flip = -np.inf, -np.inf + # Looping through all non-boundary coords + # Compute the cumulative weight on either side of each non-boundary walker + for i in range(1, n_coords - 1): + # Summing up weights of all walkers ahead of current walker along current dim in both directions + cumulative_prob = np.sum(weights_srt[i + 1 :]) + cumulative_prob_flip = np.sum(weights_srt_flip[i + 1 :]) + # Compute the difference of log cumulative weight of current walker and all walkers ahead of it (Z im the MAB paper) + # We use the log as weights vary over many orders of magnitude + # Note a negative Z indicates the cumulative weight ahead of the current walker is larger than the weight of the current walker, + # while a positive Z indicates the cumulative weight ahead of the current walker is smaller, indicating a barrier + Z = np.log(weights_srt[i]) - np.log(cumulative_prob) + Z_flip = np.log(weights_srt_flip[i]) - np.log(cumulative_prob_flip) + # Update ALL coords of the current walker into bottlenecks_forward if it is largest + # This way we uniquely identify a walker by its full set of coordinates + if Z > maxdiff: + bottleneck_coords = coords_srt[i, :] + maxdiff = Z + if Z_flip > maxdiff_flip: + bottleneck_coords_flip = coords_srt_flip[i, :] + maxdiff_flip = Z_flip + return bottleneck_coords, bottleneck_coords_flip + + +def log_mab_stats(minlist, maxlist, direction, skip): + westpa.rc.pstatus("################ MAB stats ################") + westpa.rc.pstatus(f"minima in each dimension: {minlist}") + westpa.rc.pstatus(f"maxima in each dimension: {maxlist}") + westpa.rc.pstatus(f"direction in each dimension: {direction}") + westpa.rc.pstatus(f"skip in each dimension: {skip}") + westpa.rc.pstatus("###########################################") + westpa.rc.pflush() + + +def bin_assignment( + coords, + mask, + minlist, + maxlist, + bottlenecks_forward, + bottlenecks_reverse, + nbins_per_dim, + direction, + skip, + splitting, + bottleneck, + output, +): + """ + Assign segments to bins based on the minima, maxima, and + bottleneck segments along the progress coordinate. + """ + # Update nbins_per_dim with any skipped dimensions, setting number of bins along skipped dimensions to 1 + skip = np.array([bool(s) for s in skip]) + nbins_per_dim = np.array(nbins_per_dim) + nbins_per_dim[skip] = 1 + direction = np.array(direction) + + ndim = len(nbins_per_dim) n_bottleneck_filled = 0 - for i in range(0, ndim): - # for single direction, 1 boundary walker - if direction[i] == 1 or direction[i] == -1: - bottleneck_base += 1 - # 2 boundary walkers with 0 direction - elif direction[i] == 0: - bottleneck_base += 2 - # for 86 direction, no boundary walkers so offset of 0 - elif direction[i] == 86: - bottleneck_base += 0 - - # if a dimension is being "skipped", leave only one bin total as - # the offset - for i in range(0, ndim): - if skip[i] != 0: - boundary_base -= nbins_per_dim[i] - 1 + # Boolean arrays that track use of special bins along each dimension + skip_bneck_fwd = np.array([d == -1 if bottleneck else True for d in direction]) + skip + skip_bneck_rev = np.array([d == 1 if bottleneck else True for d in direction]) + skip + skip_lead = np.array([d in [86, -1] for d in direction]) + skip + skip_lag = np.array([d in [86, 1] for d in direction]) + skip - for i in range(len(output)): - if not allmask[i]: - continue + # List of dimensions that are not skipped + active_dims = np.array([n for n in range(ndim) if not skip[n]]) - # special means either a boundary or bottleneck walker (not a walker in the linear space) - special = False - # this holder is the bin number, which only needs to be unique for different walker groups - holder = 0 - if splitting: - for n in range(ndim): - coord = allcoords[i][n] + # Compute the boundary bin ID offsets + # In forward direction, this is all the linear bins + boundary_bin_id_offset_fwd = nbins_per_dim.prod() + # In reverse, we add the number of forward boundary bins to the offset + boundary_bin_id_offset_rev = boundary_bin_id_offset_fwd + (~skip_lead).sum() - # if skipped, just assign the walkers to the same bin (offset of boundary base) - if skip[n] != 0: - holder = boundary_base + n + # Compute the bottleneck bin ID offsets + # In forward direction, bin IDs are offset by all linear and boundary bins + bneck_bin_id_offset_fwd = boundary_bin_id_offset_rev + (~skip_lag).sum() + # In reverse, we add the number of forward bottleneck bins to the offset + bneck_bin_id_offset_rev = bneck_bin_id_offset_fwd + (~skip_bneck_fwd).sum() + + # Bin assignment loop over all walkers + for i in range(len(output)): + # Skip masked walkers, these walkers bin IDs are unchanged + if not mask[i]: + continue + # Initialize bin ID and special tracker for current coord + # The special variable indicates a boundary or bottleneck walker (not assigned to the linear space) + bin_id, special = 0, False + + # Searching for bottleneck bins first + if splitting and bottleneck: + for n in active_dims: + # Grab coord(s) of current walker + coord = coords[i][:ndim] + # Assign bottlenecks, taking directionality into account + # Check both directions when using 0 or 86 + # Note: 86 implies no leading or lagging bins, but does add bottlenecks for *both* directions when bottleneck is enabled + # Note: All bottleneck bins will typically be filled unless a walker is simultaneously in bottleneck bins along multiple dimensions + # or there are too few walkers to compute free energy barriers + if (coord == bottlenecks_forward[n]).all() and not skip_bneck_fwd[n]: + bin_id = bneck_bin_id_offset_fwd + n - skip_bneck_fwd[:n].sum() + special = True + n_bottleneck_filled += 1 + break + elif (coord == bottlenecks_reverse[n]).all() and not skip_bneck_rev[n]: + bin_id = bneck_bin_id_offset_rev + n - skip_bneck_rev[:n].sum() + special = True + n_bottleneck_filled += 1 break - # assign bottlenecks, taking directionality into account - if bottleneck: - if direction[n] == -1: - if coord == flipdifflist[n]: - holder = bottleneck_base + n - special = True - n_bottleneck_filled += 1 - break - - if direction[n] == 1: - if coord == difflist[n]: - holder = bottleneck_base + n - special = True - n_bottleneck_filled += 1 - break - - # both directions when using 0 or with - # special value of 86 for no lead/lag split - if direction[n] == 0 or direction[n] == 86: - if coord == difflist[n]: - holder = bottleneck_base + n - special = True - n_bottleneck_filled += 1 - break - elif coord == flipdifflist[n]: - holder = bottleneck_base + n + 1 - special = True - n_bottleneck_filled += 1 - break - - # assign boundary walkers, taking directionality into account - if direction[n] == -1: - if coord == minlist[n]: - holder = boundary_base + n - special = True - break - - elif direction[n] == 1: - if coord == maxlist[n]: - holder = boundary_base + n - special = True - break - - elif direction[n] == 0: - if coord == minlist[n]: - holder = boundary_base + n - special = True - break - elif coord == maxlist[n]: - holder = boundary_base + n + 1 - special = True - break - - # special value for direction with no lead/lag split - elif direction[n] == 86: - # westpa.rc.pstatus(f"No lead/lag split for dim {n}") - # westpa.rc.pflush() - # nornmally adds to special bin but here just leaving it forever empty - # holder = boundary_base + n + # Now check for boundary walkers, taking directionality into account + # This should only be done after fully checking for bottleneck walkers + if splitting and not special: + for n in active_dims: + # Grab coord of current walker along current dimension + coord = coords[i, n] + if (coord == maxlist[n]) and not skip_lead[n]: + bin_id = boundary_bin_id_offset_fwd + n - skip_lead[:n].sum() + special = True + break + elif (coord == minlist[n]) and not skip_lag[n]: + bin_id = boundary_bin_id_offset_rev + n - skip_lag[:n].sum() + special = True break - # the following are for the "linear" portion + # Now check for linear bin walkers if not special: + # Again we loop over the dimensions + # Note: no need to worry about skipping as we've already set all skipped dimensions to 1 bin for n in range(ndim): - # if skipped, it's added to the same bin as the special walkers above - if skip[n] != 0: - holder = boundary_base + n - break - - coord = allcoords[i][n] + coord = coords[i][n] nbins = nbins_per_dim[n] minp = minlist[n] maxp = maxlist[n] + # Generate the bins along this dimension bins = np.linspace(minp, maxp, nbins + 1) - bin_number = np.digitize(coord, bins) - 1 - - if isfinal is None or not isfinal[i]: - if bin_number >= nbins: - bin_number = nbins - 1 - elif bin_number < 0: - bin_number = 0 - elif bin_number >= nbins or bin_number < 0: - if np.isclose(bins[-1], coord): - bin_number = nbins - 1 - elif np.isclose(bins[0], coord): - bin_number = 0 - else: - raise ValueError("Walker out of boundary") - - holder += bin_number * np.prod(nbins_per_dim[:n]) - - # output is the main list that, for each segment, holds the bin assignment - output[i] = holder - - if bin_log and report: - if westpa.rc.sim_manager.n_iter: - with open(expandvars(bin_log_path), 'a') as bb_file: - # Iteration Number - bb_file.write(f'iteration: {westpa.rc.sim_manager.n_iter}\n') - bb_file.write('bin boundaries: ') - for n in range(ndim): - # Write binbounds per dim - bb_file.write(f'{np.linspace(minlist[n], maxlist[n], nbins_per_dim[n] + 1)}\t') - # Min/Max pcoord - bb_file.write(f'\nmin/max pcoord: {minlist} {maxlist}\n') - bb_file.write(f'bottleneck bins: {n_bottleneck_filled}\n') - if n_bottleneck_filled > 0: - # Bottlenecks bins exist (passes any of the if bottleneck: checks) - bb_file.write(f'bottleneck pcoord: {flipdifflist} {difflist}\n\n') - else: - bb_file.write('\n') - return output + # Assign walker to a bin along this dimension + bin_number = np.digitize(coord, bins) - 1 # note np.digitize is 1-indexed + + # Sometimes the walker is exactly at the max/min value, + # which would put it in the next bin + if bin_number == nbins: + bin_number -= 1 + elif bin_number == -1: + bin_number = 0 + elif bin_number > nbins or bin_number < -1: + raise ValueError("Walker out of boundary.") + + # Assign to bin within the full dimensional space + bin_id += bin_number * np.prod(nbins_per_dim[:n]) + + # Output is the main list that, for each segment, holds the bin assignment + output[i] = bin_id + return n_bottleneck_filled + + +def log_bin_boundaries( + skip, + bottleneck, + direction, + bin_log_path, + minlist, + maxlist, + nbins_per_dim, + n_bottleneck_filled, + bottlenecks_forward, + bottlenecks_reverse, +): + ndim = len(nbins_per_dim) + skip = np.array([bool(s) for s in skip]) + active_dims = np.array([n for n in range(ndim) if not skip[n]]) + max_bottleneck = np.sum([1 if direction[n] in [-1, 1] else 2 for n in active_dims]) if bottleneck else 0 + with open(expandvars(bin_log_path), 'a') as bb_file: + # Iteration Number + bb_file.write(f'Iteration: {westpa.rc.sim_manager.n_iter}\n') + bb_file.write('MAB linear bin boundaries: ') + for n in range(ndim): + # Write binbounds per dim + bb_file.write(f'{np.linspace(minlist[n], maxlist[n], nbins_per_dim[n] + 1)}\t') + # Min/Max pcoord + bb_file.write(f'\nLagging pcoord in each dimension: {minlist}\n') + bb_file.write(f'Leading pcoord in each dimension: {maxlist}\n') + # Bottlenecks bins exist + if bottleneck: + bb_file.write(f'Number of bottleneck bins filled: {n_bottleneck_filled} / {max_bottleneck}\n') + for n in active_dims: + if direction[n] in [0, 1, 86]: + bb_file.write(f'Dimension {n} forward bottleneck walker at: {list(bottlenecks_forward[n])}\n') + if direction[n] in [0, -1, 86]: + bb_file.write(f'Dimension {n} backward bottleneck walker at: {list(bottlenecks_reverse[n])}\n') + bb_file.write('\n') + else: + bb_file.write('\n') diff --git a/src/westpa/core/sim_manager.py b/src/westpa/core/sim_manager.py index d56a3daa..d005aa89 100644 --- a/src/westpa/core/sim_manager.py +++ b/src/westpa/core/sim_manager.py @@ -229,13 +229,15 @@ def report_bin_statistics(self, bins, target_states, save_summary=False): def get_bstate_pcoords(self, basis_states, label='basis'): '''For each of the given ``basis_states``, calculate progress coordinate values - as necessary. The HDF5 file is not updated.''' + as necessary. The HDF5 file is not updated. The BasisState objects are explicitly + copied from the futures in order to retain auxdata/restart files (under BasisState.data) + from certain work managers (e.g., the ``processes`` work manager.)''' self.rc.pstatus('Calculating progress coordinate values for {} states.'.format(label)) futures = [self.work_manager.submit(wm_ops.get_pcoord, args=(basis_state,)) for basis_state in basis_states] fmap = {future: i for (i, future) in enumerate(futures)} for future in self.work_manager.as_completed(futures): - basis_states[fmap[future]].pcoord = future.get_result().pcoord + basis_states[fmap[future]] = future.get_result() def report_basis_states(self, basis_states, label='basis'): pstatus = self.rc.pstatus diff --git a/src/westpa/westext/weed/weed_driver.py b/src/westpa/westext/weed/weed_driver.py index 41ac6483..d0c10fc3 100644 --- a/src/westpa/westext/weed/weed_driver.py +++ b/src/westpa/westext/weed/weed_driver.py @@ -95,7 +95,15 @@ def prepare_new_iteration(self): with self.data_manager.lock: weed_global_group = self.data_manager.we_h5file.require_group('weed') + reweighting_history_dataset = weed_global_group.require_dataset( + 'reweighting_history', (1,), maxshape=(None,), dtype=int + ) last_reweighting = int(weed_global_group.attrs.get('last_reweighting', 0)) + if last_reweighting > n_iter: + last_reweighting = n_iter - 1 + reweighting_history = reweighting_history_dataset[:] + reweighting_history = reweighting_history[reweighting_history < n_iter] + reweighting_history_dataset.resize((reweighting_history.size), axis=0) if n_iter - last_reweighting < self.reweight_period: # Not time to reweight yet @@ -172,6 +180,8 @@ def prepare_new_iteration(self): for bin, newprob in zip(bins, binprobs): bin.reweight(newprob) + reweighting_history_dataset.resize((reweighting_history_dataset.shape[0] + 1), axis=0) + reweighting_history_dataset[-1] = n_iter weed_global_group.attrs['last_reweighting'] = n_iter assert ( diff --git a/src/westpa/westext/wess/wess_driver.py b/src/westpa/westext/wess/wess_driver.py index 9b523ea7..14c81c02 100644 --- a/src/westpa/westext/wess/wess_driver.py +++ b/src/westpa/westext/wess/wess_driver.py @@ -112,7 +112,15 @@ def prepare_new_iteration(self): with self.data_manager.lock: wess_global_group = self.data_manager.we_h5file.require_group('wess') + reweighting_history_dataset = wess_global_group.require_dataset( + 'reweighting_history', (1,), maxshape=(None,), dtype=int + ) last_reweighting = int(wess_global_group.attrs.get('last_reweighting', 0)) + if last_reweighting > n_iter: + last_reweighting = n_iter - 1 + reweighting_history = reweighting_history_dataset[:] + reweighting_history = reweighting_history[reweighting_history < n_iter] + reweighting_history_dataset.resize((reweighting_history.size), axis=0) if n_iter - last_reweighting < self.reweight_period: # Not time to reweight yet @@ -197,6 +205,8 @@ def prepare_new_iteration(self): if len(bin): bin.reweight(newprob) + reweighting_history_dataset.resize((reweighting_history_dataset.shape[0] + 1), axis=0) + reweighting_history_dataset[-1] = n_iter wess_global_group.attrs['last_reweighting'] = n_iter assert ( diff --git a/tests/conftest.py b/tests/conftest.py index 7aff9291..57d6be85 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,8 @@ import pytest import os import glob - -from shutil import copyfile, copy import tempfile +from shutil import copyfile, copy import westpa @@ -24,6 +23,12 @@ def copy_ref(dest_dir): copy(filename, dest_dir) +def clear_state(): + os.chdir(STARTING_PATH) + del os.environ['WEST_SIM_ROOT'] + westpa.rc = westpa.core._rc.WESTRC() + + @pytest.fixture def ref_3iter(request): """ @@ -224,9 +229,3 @@ def ref_executable(request, tmpdir): westpa.rc = westpa.core._rc.WESTRC() request.addfinalizer(clear_state) - - -def clear_state(): - os.chdir(STARTING_PATH) - del os.environ['WEST_SIM_ROOT'] - westpa.rc = westpa.core._rc.WESTRC() diff --git a/tests/refs/mab_assignments_ref.h5 b/tests/refs/mab_assignments_ref.h5 new file mode 100644 index 00000000..97682ba9 Binary files /dev/null and b/tests/refs/mab_assignments_ref.h5 differ diff --git a/tests/test_binning.py b/tests/test_binning.py index 77712205..533f9823 100644 --- a/tests/test_binning.py +++ b/tests/test_binning.py @@ -1,5 +1,7 @@ +import os import pytest +import h5py import numpy as np from scipy.spatial.distance import cdist @@ -12,7 +14,10 @@ RecursiveBinMapper, ) from westpa.core.binning.assign import coord_dtype -from westpa.core.binning.mab import MABBinMapper +from westpa.core.binning.mab import MABBinMapper, map_mab + + +REFERENCE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'refs') class TestRectilinearBinMapper: @@ -327,11 +332,369 @@ def test2dRectilinearRecursion(self): assert (assignments == expected).all() +# Following section is for MAB Testing +@pytest.fixture(scope='class') +def gen_input_mab_data_fixture(request): + '''A fixture to assign `gen_input_mab_data`'s return as a class attribute for tests.''' + request.cls.input_mab_data = gen_input_mab_data() + + +def gen_input_mab_data(): + '''Function to generate test data for MABBinMapper Testing.''' + # Create synthetic test data: a 2D grid of points with Gaussian weights distribution + N_point_2d = 15 + n_dim_2d = 2 + N_total_2d = N_point_2d**n_dim_2d + coords_2d = np.zeros((N_total_2d, 2)) + for i in range(N_total_2d): + coords_2d[i, 0] = i % N_point_2d + coords_2d[i, 1] = i // N_point_2d + coords_2d /= N_point_2d + + # generate weights as a 2D gaussian where the max is at the center of the 2D space + weights_2d = np.zeros(N_total_2d) + for i in range(N_total_2d): + weights_2d[i] = np.exp(-((coords_2d[i, 0] - 1 / 2) ** 2 + (coords_2d[i, 1] - 1 / 2) ** 2) / (1 / 2) ** 2) + weights_2d /= np.sum(weights_2d) + + # Tile coords + allcoords_2d_grid = np.ones((N_total_2d, 4)) + allcoords_2d_grid[:, :2] = coords_2d + allcoords_2d_grid[:, 2] = weights_2d + allcoords_2d_grid = np.tile(allcoords_2d_grid, (2, 1)) + allcoords_2d_grid[0:N_total_2d, 3] = 0 + + # Create deterministic 3D array of grid points, again with 3D Gaussian distribution of weights + N_point_3d = 15 + n_dim_3d = 3 + N_total_3d = N_point_3d**n_dim_3d + coords_3d = np.zeros((N_total_3d, 3)) + for i in range(N_total_3d): + coords_3d[i, 0] = i % N_point_3d + coords_3d[i, 1] = (i // N_point_3d) % N_point_3d + coords_3d[i, 2] = i // (N_point_3d**2) + coords_3d /= N_point_3d + + # Generate weights as a n_dim gaussian where the max is at the center of the 3D space + weights_3d = np.zeros(N_total_3d) + for i in range(N_total_3d): + weights_3d[i] = np.exp( + -((coords_3d[i, 0] - 1 / 2) ** 2 + (coords_3d[i, 1] - 1 / 2) ** 2 + (coords_3d[i, 2] - 1 / 2) ** 2) / (1 / 2) ** 2 + ) + weights_3d /= np.sum(weights_3d) + + # Tile coords + allcoords_3d_grid = np.ones((N_total_3d, n_dim_3d + 2)) + allcoords_3d_grid[:, :n_dim_3d] = coords_3d + allcoords_3d_grid[:, n_dim_3d] = weights_3d + allcoords_3d_grid = np.tile(allcoords_3d_grid, (2, 1)) + allcoords_3d_grid[0:N_total_3d, n_dim_3d + 1] = 0 + + # Lastly, test bin assignment with 2D coordinates and weights on pseudorandom deterministic Gaussian points + N_total_random = 300 + rng = np.random.default_rng(seed=0) + coords_random = rng.normal(loc=[0.5, 0.5], scale=0.25, size=(N_total_random, n_dim_2d)) + + # Generate weights as a n_dim sine curve with given wavelength plus some deterministic noise + weights_random = np.zeros(N_total_random) + wavelength = 0.25 + noise_level = 0.1 + for i in range(n_dim_2d): + weights_random += np.sin(2 * np.pi * coords_random[:, i] / wavelength) + noise_level * np.cos( + 4 * 2 * np.pi * coords_random[:, i] / wavelength + ) + weights_random = np.abs(weights_random) / np.sum(np.abs(weights_random)) + + # Tile coords + allcoords_2d_gauss = np.ones((N_total_random, 4)) + allcoords_2d_gauss[:, :2] = coords_random + allcoords_2d_gauss[:, 2] = weights_random + allcoords_2d_gauss = np.tile(allcoords_2d_gauss, (2, 1)) + allcoords_2d_gauss[0:N_total_random, 3] = 0 + + return { + 'allcoords_2d_grid': allcoords_2d_grid, + 'allcoords_3d_grid': allcoords_3d_grid, + 'allcoords_2d_gauss': allcoords_2d_gauss, + } + + +@pytest.fixture(scope='class') +def ref_mab_results(request): + '''Function for reading the reference datasets from `refs/mab_assignments_ref.h5`.''' + # Setting up empty things dictionary with corresponding keys + request.cls.ref_mab_results = {} + test_keys = ['2d_grid', '3d_grid', '2d_gauss'] + + # Loading in the three sets of reference data from the reference file into a dictionary + with h5py.File(os.path.join(REFERENCE_PATH, 'mab_assignments_ref.h5'), 'r') as f: + for key in test_keys: + request.cls.ref_mab_results[key] = [f[f'{key}/test_result_{i:d}'][...] for i in range(len(f[key]))] + + class TestMABBinMapper: - def test_init(self): - mab = MABBinMapper([5]) - assert mab.nbins == 9 + '''Test class for MABBinMapper''' + + @pytest.mark.parametrize( + 'nbins, direction, skip, bottleneck, ref_value', + [ + ([5, 1], [1, 86], [0, 0], True, 9), + ([5, 5], [0, 0], [0, 0], True, 33), + ([5, 5, 5], [0, 0, 0], [0, 0, 0], True, 137), + ([5, 5], [0, 0], [1, 0], True, 9), + ([5, 1], [1, 86], [0, 0], False, 6), + ], + ids=['5x1, direction=[1,86]', '5x5', '5x5x5', '5x5, skip=[1,0]', '5x1, direction=[1,86], no bottleneck'], + ) + def test_determine_total_bins(self, nbins, direction, skip, bottleneck, ref_value): + '''Runs through different configurations to see if mab reports the correct number of total bins''' + mab = MABBinMapper([5]) # Creating a dummy MABBinMapper + + assert mab.determine_total_bins(nbins_per_dim=nbins, direction=direction, skip=skip, bottleneck=bottleneck) == ref_value + + @pytest.mark.parametrize( + "nbins_per_dim, direction, bottleneck, skip, ref_index", + [ + ([2, 2], [1, 1], False, [0, 0], 0), + ([2, 2], [0, 0], False, [0, 0], 1), + ([2, 2], [-1, -1], True, [0, 0], 2), + ([2, 2], [0, 0], True, [0, 0], 3), + ([2, 2], [0, 0], True, [0, 1], 4), + ([2, 2], [0, 0], True, [1, 1], 5), + ([2, 2], [86, 0], True, [0, 0], 6), + ([2, 2], [86, 86], False, [0, 0], 7), + ], + ids=[ + 'direction=[1,1], no bottleneck', + 'direction=[0,0], no bottleneck', + 'direction=[-1,-1]', + 'direction=[0,0]', + 'direction=[0,0], skip=[0,1]', + 'direction=[0,0], skip=[1,1]', + 'direction=[86,0]', + 'direction=[86,86], no bottleneck', + ], + ) + def test_2x2_2d_grid_mab_bin_assignments( + self, gen_input_mab_data_fixture, ref_mab_results, nbins_per_dim, direction, bottleneck, skip, ref_index + ): + '''Test MABBinMapper with 2x2 linear section on 2D space''' + allcoords = self.input_mab_data['allcoords_2d_grid'] + N_total = allcoords.shape[0] // 2 + mask = np.full((N_total * 2), True) + output = np.zeros((N_total * 2), dtype=int) + output = map_mab( + coords=allcoords, + mask=mask, + output=output, + nbins_per_dim=nbins_per_dim, + direction=direction, + bottleneck=bottleneck, + skip=skip, + ) + assert np.all(output[:N_total] == output[N_total:]), "Expected first half of bin assignments to equal second half" + assert np.all( + output == self.ref_mab_results['2d_grid'][ref_index] + ), f"Unexpected 2D grid MAB bin assignments with direction={direction}, bottleneck={bottleneck}, and skip={skip}" + + @pytest.mark.parametrize( + "nbins_per_dim, direction, bottleneck, skip, ref_index", + [ + ([2, 2, 2], [0, 0, 0], False, [0, 0, 0], 0), + ([2, 2, 2], [0, 0, 0], True, [0, 0, 0], 1), + ], + ids=[ + 'no bottleneck', + 'with bottleneck', + ], + ) + def test_2x2x2_3d_grid_mab_bin_assignments( + self, gen_input_mab_data_fixture, ref_mab_results, nbins_per_dim, direction, bottleneck, skip, ref_index + ): + '''Test MABBinMapper with 2x2x2 linear section on 3D space''' + allcoords = self.input_mab_data['allcoords_3d_grid'] + N_total = allcoords.shape[0] // 2 + mask = np.full((N_total * 2), True) + output = list(np.zeros((N_total * 2), dtype=int)) + output = map_mab( + coords=allcoords, + mask=mask, + output=output, + nbins_per_dim=nbins_per_dim, + direction=direction, + bottleneck=bottleneck, + skip=skip, + ) + assert output[:N_total] == output[N_total:], "Expected first half of bin assignments to equal second half" + assert output == list( + self.ref_mab_results['3d_grid'][ref_index] + ), f"Unexpected 3D grid MAB bin assignments with direction={direction}, bottleneck={bottleneck}, and skip={skip}" + + @pytest.mark.parametrize( + "nbins_per_dim, direction, bottleneck, skip, ref_index", + [ + ([2, 2], [0, 0], True, [0, 0], 0), + ([2, 2], [0, 0], True, [0, 1], 1), + ([2, 2], [86, -1], True, [0, 0], 2), + ], + ids=[ + 'direction=[0,0], no skip', + 'direction=[0,0], skip=[0,1]', + 'direction=[86,-1], no skip', + ], + ) + def test_2x2_2d_gaussian_mab_bin_assignments( + self, gen_input_mab_data_fixture, ref_mab_results, nbins_per_dim, direction, bottleneck, skip, ref_index + ): + '''Test MABBinMapper with 2x2 linear section on a 2D Gaussian space''' + allcoords = self.input_mab_data['allcoords_2d_gauss'] + N_total = allcoords.shape[0] // 2 + mask = np.full((N_total * 2), True) + output = np.zeros((N_total * 2), dtype=int) + output = map_mab( + coords=allcoords, + mask=mask, + output=output, + nbins_per_dim=nbins_per_dim, + direction=direction, + bottleneck=bottleneck, + skip=skip, + ) + assert np.all(output[:N_total] == output[N_total:]), "Expected first half of bin assignments to equal second half" + assert np.all( + output == self.ref_mab_results['2d_gauss'][ref_index] + ), f"Unexpected 2D Gaussian MAB bin assignments with direction={direction}, bottleneck={bottleneck}, and skip={skip}" + + +def output_mab_reference(): + ''' + Function to generate the reference test files for the MAB tests. + + To run this, run `python -c 'from test_binning import output_mab_reference; output_mab_reference()'` + in the command line (assuming you're in the `tests/` folder). - def test_determine_total_bins(self): - mab = MABBinMapper([5]) - assert mab.determine_total_bins(nbins_per_dim=[5, 1], direction=[1, 86], skip=[0, 0], bottleneck=True) == 9 + It will overwrite the file 'tests/refs/mab_assignments_ref.h5' and create 11 png files in `tests/` visualizing the binning. + ''' + import matplotlib.pyplot as plt + + try: + os.mkdir('mab_tests') + except FileExistsError: + pass + + input_data = gen_input_mab_data() + with h5py.File(f'{REFERENCE_PATH}/mab_assignments_ref.h5', 'w') as f: + # 2D Grid + for i, (nbins_per_dim, direction, bottleneck, skip) in enumerate( + [ + ([2, 2], [1, 1], False, [0, 0]), + ([2, 2], [0, 0], False, [0, 0]), + ([2, 2], [-1, -1], True, [0, 0]), + ([2, 2], [0, 0], True, [0, 0]), + ([2, 2], [0, 0], True, [0, 1]), + ([2, 2], [0, 0], True, [1, 1]), + ([2, 2], [86, 0], True, [0, 0]), + ([2, 2], [86, 86], False, [0, 0]), + ] + ): + allcoords = input_data['allcoords_2d_grid'] + N_total = allcoords.shape[0] // 2 + mask = np.full((N_total * 2), True) + output = np.zeros((N_total * 2), dtype=int) + output = map_mab( + coords=allcoords, + mask=mask, + output=output, + nbins_per_dim=nbins_per_dim, + direction=direction, + bottleneck=bottleneck, + skip=skip, + ) + + f.create_dataset(f'2d_grid/test_result_{i:d}', data=output) + + # Create a cmap with the same number of colors as the number of bins + cmap = plt.cm.get_cmap('tab20', int(np.max(output) + 1)) + + # Plot the synthetic data in 2D using a scatter plot + # Include a cbar to shown the bin assignments + plt.scatter( + allcoords[:N_total, 0], + allcoords[:N_total, 1], + s=allcoords[:N_total, 2] * 10000, + c=output[:N_total], + cmap=cmap, + vmin=-0.5, + vmax=int(np.max(output)) + 0.5, + ) + plt.colorbar(label='Bin ID', ticks=range(int(np.max(output) + 1))) + plt.title(f'nbins_per_dim={nbins_per_dim}, direction={direction},\n bottleneck={bottleneck}, skip={skip}') + plt.savefig(f'mab_tests/2d_grid_ref_result_{i}.png') + plt.clf() + + # 3D grid + for i, (nbins_per_dim, direction, bottleneck, skip) in enumerate( + [ + ([2, 2, 2], [0, 0, 0], False, [0, 0, 0]), + ([2, 2, 2], [0, 0, 0], True, [0, 0, 0]), + ] + ): + allcoords = input_data['allcoords_3d_grid'] + N_total = allcoords.shape[0] // 2 + mask = np.full((N_total * 2), True) + output = np.zeros((N_total * 2), dtype=int) + output = map_mab( + coords=allcoords, + mask=mask, + output=output, + nbins_per_dim=nbins_per_dim, + direction=direction, + bottleneck=bottleneck, + skip=skip, + ) + + f.create_dataset(f'3d_grid/test_result_{i:d}', data=output) + + # 2D Gaussian + for i, (nbins_per_dim, direction, bottleneck, skip) in enumerate( + [ + ([2, 2], [0, 0], True, [0, 0]), + ([2, 2], [0, 0], True, [0, 1]), + ([2, 2], [86, -1], True, [0, 0]), + ] + ): + allcoords = input_data['allcoords_2d_gauss'] + N_total = allcoords.shape[0] // 2 + mask = np.full((N_total * 2), True) + output = np.zeros((N_total * 2), dtype=int) + output = map_mab( + coords=allcoords, + mask=mask, + output=output, + nbins_per_dim=nbins_per_dim, + direction=direction, + bottleneck=bottleneck, + skip=skip, + ) + + f.create_dataset(f'2d_gauss/test_result_{i:d}', data=output) + + # Create a cmap with the same number of colors as the number of bins + cmap = plt.cm.get_cmap('tab20', int(np.max(output) + 1)) + + # Plot the synthetic data in 2D using a scatter plot + # Include a cbar to shown the bin assignments + plt.scatter( + allcoords[:N_total, 0], + allcoords[:N_total, 1], + s=allcoords[:N_total, 2] * 10000, + c=output[:N_total], + cmap=cmap, + vmin=-0.5, + vmax=int(np.max(output)) + 0.5, + ) + plt.colorbar(label='Bin ID', ticks=range(int(np.max(output) + 1))) + plt.title(f'nbins_per_dim={nbins_per_dim}, direction={direction},\n bottleneck={bottleneck}, skip={skip}') + plt.savefig(f'mab_tests/2d_gauss_ref_result_{i}.png') + plt.clf() + print("Reference data generated and saved to file.") diff --git a/tests/test_tools/common.py b/tests/test_tools/common.py index bb73340e..c73e9b57 100755 --- a/tests/test_tools/common.py +++ b/tests/test_tools/common.py @@ -1,5 +1,12 @@ '''Module of common tool test class''' +import sys +import pytest + +flaky_on_macos = pytest.mark.flaky(condition=sys.platform.startswith('darwin'), reruns=5, reason='flaky on macos') + +flaky_on_all = pytest.mark.flaky(reruns=5, reason='flaky on all platforms') + # Placeholder class that will set all kwargs as attributes class MockArgs: diff --git a/tests/test_tools/test_w_fluxanl.py b/tests/test_tools/test_w_fluxanl.py index d28a9f24..60c46241 100755 --- a/tests/test_tools/test_w_fluxanl.py +++ b/tests/test_tools/test_w_fluxanl.py @@ -3,6 +3,7 @@ import h5py import numpy +from common import flaky_on_all from westpa.cli.tools.w_fluxanl import WFluxanlTool @@ -90,6 +91,7 @@ def check_output(self, outfile, **kwargs): class Test_W_Fluxanl_System: '''System level tests for w_fluxanl''' + @flaky_on_all def test_alpha(self, ref_50iter): '''Confidence interval size decreases as alpha increases'''