diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2ae80c5..f7d3f61 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,31 +15,44 @@ on: jobs: build: + runs-on: ${{ matrix.os }} strategy: - fail-fast: false matrix: - branch: [version-1-6, devel] - target: [linux, macos, windows] - include: - - target: linux - builder: ubuntu-latest - - target: macos - builder: macos-latest - - target: windows - builder: windows-latest - name: '${{ matrix.target }} (${{ matrix.branch }})' - runs-on: ${{ matrix.builder }} + nim: + - '1.6.x' + - '2.0.x' + - 'stable' + os: + - ubuntu-latest + - windows-latest + - macOS-latest + name: '${{ matrix.nim }} (${{ matrix.os }})' steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: path: impulse - - name: Setup Nim - uses: alaviss/setup-nim@0.1.1 + - name: Setup nim + uses: jiro4989/setup-nim-action@v1 with: - path: nim - version: ${{ matrix.branch }} + nim-version: ${{ matrix.nim }} + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup MSYS2 (Windows) + if: ${{matrix.target == 'windows'}} + uses: msys2/setup-msys2@v2 + with: + path-type: inherit + update: true + install: base-devel git mingw-w64-x86_64-toolchain + + - name: Install dependencies (Windows) + if: ${{matrix.target == 'windows'}} + shell: msys2 {0} + run: | + pacman -Syu --noconfirm + pacman -S --needed --noconfirm mingw-w64-x86_64-lapack - name: Setup nimble & deps shell: bash @@ -48,12 +61,20 @@ jobs: nimble refresh -y nimble install -y - - name: Run tests + - name: Run tests (Linux & OSX) + if: ${{matrix.target != 'windows'}} shell: bash run: | cd impulse nimble -y test + - name: Run tests (Windows) + if: ${{matrix.target == 'windows'}} + shell: msys2 {0} + run: | + cd impulse + nimble -y test + - name: Build docs if: > github.event_name == 'push' && github.ref == 'refs/heads/master' && diff --git a/README.md b/README.md index e7f504e..731f94b 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Impulse will be a collection of signal processing primitives for Nim. ## FFT -Currently this library only consists of an FFT module, which wraps +The FFT part of this library wraps [PocketFFT](https://gitlab.mpcdf.mpg.de/mtr/pocketfft). For the C++ backend an optimized version of PocketFFT is used, in the @@ -189,6 +189,10 @@ As a convenience, a `tap_examples` function is provided. This function takes a The LFSR module implementation is favors simplicity over speed. As of 2024, it is able to generate 2^24 values in less than 1 minute on a mid-range laptop. +## Signal + +The Signal module implements several signal processing and related functions, such as a Kaiser window and a firls FIR filter design function. See the documentation of those functions for more details. + ## License Licensed and distributed under either of diff --git a/impulse.nimble b/impulse.nimble index 843dfd2..bd6c99f 100644 --- a/impulse.nimble +++ b/impulse.nimble @@ -2,15 +2,16 @@ version = "0.1.1" author = "SciNim" -description = "Signal processing primitives (FFT, ...)" +description = "Signal processing primitives (FFT, filtering, ...)" license = "MIT" # Dependencies requires "nim >= 1.6.0" -requires "arraymancer >= 0.7.28" +requires "arraymancer >= 0.7.30" task test, "Run standard tests": exec "nim c -r tests/test_fft.nim" exec "nim c -r tests/test_fft2.nim" + exec "nim c -r tests/test_signal.nim" diff --git a/impulse/signal.nim b/impulse/signal.nim new file mode 100644 index 0000000..6699730 --- /dev/null +++ b/impulse/signal.nim @@ -0,0 +1,809 @@ +## Module that implements several signal processing and related functions. +## +## The implementations of many of these functions have been based on the +## corresponding functions in the Numpy and Scipy libraries. + +import arraymancer +import std / strformat +import std / strutils + +# Constants used to implement the Modified Bessel function of the first kind +let i0A_values = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +let i0B_values = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + +proc chbevl[T: SomeFloat](x: T, vals: openArray[T]): T = + var b0 = 0.0 + var b1 = 0.0 + var b2 = 0.0 + + for it in vals: + b2 = b1 + b1 = b0 + b0 = x * b1 - b2 + it + + return 0.5 * (b0 - b2) + +proc i0_1[T: SomeFloat](x: T): T = + ## "Inner" function for the Modified Bessel function of the first kind + return exp(x) * chbevl(x / 2.0 - 2.0, i0A_values) + +proc i0_2[T: SomeFloat](x: T): T = + ## "Outer" function for the Modified Bessel function of the first kind + return exp(x) * chbevl(32.0 / x - 2.0, i0B_values) / sqrt(x) + +proc i0*[T: SomeFloat](x: T): T = + ## Modified Bessel function of the first kind, order 0. + ## + ## Inputs: + ## - x: Tensor of floats + ## + ## Result: + ## The modified Bessel function evaluated at each of the elements of `x`. + ## + ## Notes: + ## + ## This implementation is a port of scipy's `i0` function, which in turn + ## uses the algorithm published by Clenshaw [1]_ and referenced by + ## Abramowitz and Stegun [2]_, for which the function domain is + ## partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + ## polynomial expansions are employed in each interval. Relative error on + ## the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + ## peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + ## + ## References: + ## - [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + ## *National Physical Laboratory Mathematical Tables*, vol. 5, London: + ## Her Majesty's Stationery Office, 1962. + ## - [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + ## Functions*, 10th printing, New York: Dover, 1964, pp. 379. + ## https://personal.math.ubc.ca/~cbm/aands/page_379.htm + ## - [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + ## + ## Examples: + ## ```nim + ## i0(0) + ## # 1.0 + ## >>> i0([0, 1, 2, 3].toTensor) + ## # Tensor[system.float] of shape "[4]" on backend "Cpu" + ## # 1 1.26607 2.27959 4.88079 + ## ``` + if abs(x) <= 8.0: + return i0_1(x) + else: + return i0_2(x) + +makeUniversal(i0) + +proc kaiser*[T: SomeFloat](size: int = 10, beta: T = 5.0): Tensor[T] = + ## Return the Kaiser window of the given `size` and `beta` parameter + ## + ## The Kaiser window is a "taper" function formed by using a Bessel function. + ## + ## Input: + ## - size: Number of points (or samples) in the output window. If zero, + ## an empty Tensor is returned. If negative an assertion is raised. + ## - beta: "Shape" parameter for the window (as a float). Defaults to 5.0, + ## which generates a window similar to a Hamming. + ## + ## Result: + ## - The generated Kaiser window, as a float Tensor with the maximum value + ## normalized to 1.0 (the value 1.0 only appears if the number of samples is odd). + ## + ## Notes: + ## 1. The Kaiser window is defined as + ## + ## > w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + ## \\right)/I_0(\\beta) + ## + ## with + ## + ## > \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + ## + ## where `I_0` is the modified zeroth-order Bessel function. + ## + ## 2. The Kaiser window was named after Jim Kaiser, who discovered a simple + ## approximation to the DPSS window based on Bessel functions. The Kaiser + ## window is a very good approximation to the Digital Prolate Spheroidal + ## Sequence, or Slepian window, which is the transform which maximizes the + ## energy in the main lobe of the window relative to total energy. + ## + ## 3. The Kaiser window can approximate many other windows by varying the beta + ## parameter. + ## + ## ==== ======================= + ## beta Window shape + ## ==== ======================= + ## 0.0 Rectangular + ## 5.0 Similar to a Hamming + ## 6.0 Similar to a Hanning + ## 8.6 Similar to a Blackman + ## ==== ======================= + ## + ## A `beta` value of 14.0 is probably a good starting point if you do not + ## want to use the default or one of the values in the list above. + ## + ## 4. As `beta` gets large, the window narrows, and so the number of samples + ## needs to be large enough to sample the increasingly narrow spike, + ## otherwise some `nan` values will be returned. + ## + ## 5. Most references to the Kaiser window come from the signal processing + ## literature, where it is used as one of many windowing functions for + ## smoothing values. It is also known as an apodization (which means + ## "removing the foot", i.e. smoothing discontinuities at the beginning + ## and end of the sampled signal) or tapering function. + ## + ## 5. This implementation is a port of scipy's `kaiser` function. + ## + ## References: + ## - [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + ## digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + ## John Wiley and Sons, New York, (1966). + ## - [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + ## University of Alberta Press, 1975, pp. 177-178. + ## - [3] Wikipedia, "Window function", + ## https://en.wikipedia.org/wiki/Window_function + ## - [4] Code heavily inspired by (but much faster than) Numpy's + ## implementation, + ## https://github.com/numpy/numpy/blob/v1.26.0/numpy/lib/function_base.py#L3493 + ## + ## Examples: + ## ```nim + ## import impulse / signal + ## echo kaiser(4) + ## # Tensor[system.float] of shape "[4]" on backend "Cpu" + ## # 0.0367109 0.775322 0.775322 0.0367109 + ## + ## echo kaiser(12, 14.0) + ## # Tensor[system.float] of shape "[12]" on backend "Cpu" + ## # 7.72687e-06 0.00346009 0.04652 0.229737 + ## # 0.599885 0.945675 0.945675 0.599885 + ## # 0.229737 0.04652 0.00346009 7.72687e-06 + ## ``` + + doAssert size >= 0, "The size of the Kaiser window must be non-negative" + if size == 1: + return [T(1)].toTensor + + let n_vals = arange(T(0.0), T(size)) + let alpha = (T(size) - 1.0) / 2.0 + return i0(beta * sqrt(1.0 -. ((n_vals -. alpha) / alpha) ^. 2.0)) /. i0(beta) + +proc firls*(fir_order: int, + bands, desired: Tensor[float], + weights = newTensor[float](), + symmetric = true, + fs = 2.0): Tensor[float] = + ## Design a linear phase FIR filter using least-squares error criterion + ## + ## Returns the coefficients of the linear phase FIR filter of the requested + ## order and type which best fullfills the desired frequency response + ## "constraints" described by the `bands` (i.e. frequency), `desired` (i.e. gain) + ## and `weights` (e.i. constraint weights) input tensors. + ## + ## Depending on the order and the value of the `symmetric` argument, the + ## output filter coefficients will correspond to a Type I, II, III or IV + ## FIR filter. + ## + ## The constraints are specified as a pair of tensors describing "constrained + ## frequency bands" indicating the start and end frequencies of each band `k`, + ## as well as the gain of the filter at those edge frequencies. + ## + ## Inputs: + ## - fir_order: The "order" of the filter (which is 1 less than the length + ## of the output tensor). + ## - bands: 2-column matrix of non-overlapping, normalized frequency band + ## edges in ascending order. Each row in the matrix corresponds to + ## the edges of a constrained frequency band (the column contains + ## the start frequency of the band and the second column contains + ## the end frequency). + ## Frequencies between the specified bands are "unconstrained" + ## (i.e. ignored during the error minimization process). + ## Frequency values must be in the [0.0, fs/2] range (i.e. they + ## cannot be negative or exceed the Nyquist frequency). + ## - desired: 2-column matrix that specifies the gains of the desired + ## frequency response at the band "edges". Thus the lengths of + ## the `bands` and `desired` tensors must match. If they do not, + ## an exception is raised. + ## For each band `k` the desired filter frequency + ## response is such that its gain linearly changes from the + ## `desired[k, 0]` value at the start of the band (i.e. at the + ## `bands[k, 0]` frequency) to the value `desired[k, 1]` at the + ## end of the band (i.e. at `bands[k, 1]`). + ## - weights: Optional rank-1 Tensor of weights. Controls which frequency + ## response "contraints" are given more "weight" during the + ## least-squares error minimization process. The default is that + ## all constraints are given the same weight. If provided, its + ## length must be half the length of `bands` (i.e. there must be + ## one constraint per band). An exception is raised otherwise. + ## - symmetric: When `true` (the default), the result will be a symmetric + ## FIR filter (Type I when `fir_order` is even and Type II + ## when `fir_order` is odd). When `false`, the result will be + ## an anti-symmetric FIR filter (Type III when `fir_order` is + ## even and Type IV when `fir_order` is odd). + ## - fs: The sampling frequency of the signal (as a float). Each frequency + ## in `bands` must be between 0.0 and `fs/2` (inclusive). + ## Default is 2.0, which means that by default the band frequencies + ## are expected to be on the 0.0 to 1.0 range. + ## + ## Result: + ## - A Tensor containing the `fir_order + 1` taps of the FIR filter that + ## best approximates the desired frequency response (i.e. the filter that + ## minimizes the least-squares error vs the given constraints). + ## + ## Notes: + ## - Contrary to numpy's firls, the first argument is the FIR filter order, + ## not the FIR length. The filter length is `filter_order + 1`. + ## - Frequencies between "constrained bands" are considered "don't care" + ## regions for which the error is not minimized. + ## - When designing a filter with a gain other than zero at the Nyquist + ## frequency (i.e. when `bands[^1] != 0.0`), such as high-pass and + ## band-stop filters, the filter order must be even. An exception is + ## raised otherwise. + ## - The `bands` and `desired` can also be flat rank-1 tensors, as long as + ## their length is even (so that they can be reshaped into 2 column + ## matrices. + ## + ## Examples: + ## ```nim + ## # Example of an order 4, Type I, low-pass FIR filter which targets being + ## # a pass-through in the 0.0 to 0.3 times Nyquist frequency range, while + ## # filtering frequencies in the 0.4 to 1.0 Nyquist frequency range + ## # Note that the range 0.3 to 0.4 is left unconstrained + ## # Also note how the result length is 6 and the filter is symmetric + ## # around the middle value: + ## echo firls(4, [[0.0, 0.3], [0.4, 1.0]].toTensor, [[1.0, 1.0], [0.0, 0.0]].toTensor) + ## # Tensor[system.float] of shape "[5]" on backend "Cpu" + ## # 0.126496 0.278552 0.345068 0.278552 0.126496 + ## + ## # Same filter as above, but using rank-1, even length tensors as inputs + ## echo firls(4, [0.0, 0.3, 0.4, 1.0].toTensor, [1.0, 1.0, 0.0, 0.0].toTensor) + ## # Tensor[system.float] of shape "[5]" on backend "Cpu" + ## # 0.126496 0.278552 0.345068 0.278552 0.126496 + ## + ## # Same filter as above, but using unnormalized frequencies and a sampling + ## # frequency of 10.0 Hz (note how all the frequencies are 5 times greater + ## # because the default fs is 2.0): + ## echo firls(4, + ## [[0.0, 1.5], [2.0, 5.0]].toTensor, + ## [[1.0, 1.0], [0.0, 0.0]].toTensor, + ## fs = 10.0) + ## # Tensor[system.float] of shape "[5]" on backend "Cpu" + ## # 0.126496 0.278552 0.345068 0.278552 0.126496 + ## + ## # Same kind of filter, but give more weight to the pass-through constraint + ## echo firls(4, + ## [[0.0, 0.3], [0.4, 1.0]].toTensor, + ## [[1.0, 1.0], [0.0, 0.0]].toTensor, + ## weights = [1.0, 0.5].toTensor) + ## # Tensor[system.float] of shape "[5]" on backend "Cpu" + ## # 0.110353 0.284473 0.360868 0.284473 0.110353 + ## + ## # A more complex, order 5, Type II, low-pass filter + ## echo firls(5, + ## [[0.0, 0.3], [0.3, 0.6], [0.6, 1.0]].toTensor, + ## [[1.0, 1.0], [1.0, 0.2], [0.0, 0.0]].toTensor) + ## # Tensor[system.float] of shape "[6]" on backend "Cpu" + ## # -0.0560333 0.146107 0.430716 0.430716 0.146107 -0.0560333 + ## + ## # Example of an order 6 Type IV high-pass FIR filter: + ## echo firls(6, + ## [[0.0, 0.4], [0.6, 1.0]].toTensor, [[0.0, 0.0], [0.9, 1.0]].toTensor, + ## symmetric = false) + ## # Tensor[system.float] of shape "[7]" on backend "Cpu" + ## # -0.13945 0.285186 -0.258596 0 0.258596 -0.285186 0.13945 + ## + ## Trying to design a high-pass filter with odd order generates an exception: + ## echo firls(5, + ## [0.0, 0.5, 0.6, 1.0].toTensor, [0.0, 0.0, 1.0, 1.0].toTensor, + ## symmetric = false) + ## # Filter order (5) must be even when the last + ## # frequency is 1.0 and its gain is not 0.0 [ValueError] + ## ``` + if fs <= 0: + raise newException(ValueError, + "Sampling frequency fs must be positive but got " & $fs) + + var bands = bands.flatten() + let desired = desired.flatten() + if bands.rank > 1: + raise newException(ValueError, + "Frequency band tensor rank (" & $bands.rank & ") is not 1") + if desired.rank > 1: + raise newException(ValueError, + "Desired gain pair tensor rank (" & $desired.rank & ") is not 1") + + let nyquist_freq = fs / 2.0 + if max(bands) > nyquist_freq or min(bands) < 0.0: + raise newException(ValueError, + "Some frequency values are outside of the valid [0.0, " & + $nyquist_freq & "] range:\n" & + $bands) + + if (bands.len mod 2) != 0: + raise newException(ValueError, "Frequency band tensor length (" & + $bands.len & ") is not even") + + if bands.len != desired.len: + raise newException(ValueError, + "Frequency and desired gain tensors must have the same length (" & + $bands.len & "!=" & $desired.len & ")") + + # Check whether the filter order is valid + let even_order = (fir_order mod 2) == 0 + + if not even_order and bands[^1].item == 1.0 and desired[^1].item != 0.0: + raise newException(ValueError, + "Filter order (" & $fir_order & + ") must be even when the last frequency is 1.0 and its desired gain is not 0.0") + + let weights = if weights.len == 0: + # When weights are not provided, make them constant + # (i.e. all bands have the same weight) + ones[float](bands.len div 2) + else: + weights + if bands.len != 2 * weights.len: + raise newException(ValueError, + "Weigth tensor length (" & $weights.len & + ") must be half the length of the frequency band and desired gain pair tensor lenghts (" & + $bands.len & ")") + + # Note that df is only used + let df = diff_discrete(bands, axis=0) + if any(df <. 0.0): + raise newException(ValueError, + "Frequency band tensor values must increasing monotonically") + + # We must normalize the band frequencies to the Nyquist frequency, but we + # must _also_ multiply them by 2.0. The result is that (internally to this + # function) we normalize them by the sampling frequency, but externally + # (i.e. in the function documentation) we say that we normalize to the + # Nyquist frequency + bands /= fs # Equivalent to `bands /= (0.5 * fs) * 2.0`! + let constant_weights = all((weights -. weights[0]) ==. 0.0) + # If there are no gaps between the constrained bands we consider that the + # constraints are "continuous" + let continuous_constraints = df.len == 1 or all(df[1 ..< df.len | 2] ==. 0.0) + # Only use the matrix inversion method when needed, since it is much slower + let requires_matrix_inversion = not (constant_weights and continuous_constraints) + + # The half "length" is the length of the "half" of the filter that is + # symmetric or anti-symmetric with the other "half". "Half" is in quotes + # because when fir_order is even an additional middle coefficient must + # be added between the two "halfs" + let half_length = fir_order / 2 + + if symmetric: + # Symmetric (i.e. Type I or Type II) linear phase FIR + # Type I when fir_order is even, Type II otherwise + var k = arange(floor(half_length) + 1.0) + if not even_order: + k +.= 0.5 + + var sk_pos, sk_neg, q: Tensor[float] + if requires_matrix_inversion: + let k_right_tile = 2.0 * k.reshape_infer(-1, 1).tile(1, k.len) + let k_down_tile = 2.0 * k.tile(k.len, 1) + sk_pos = k_right_tile + k_down_tile + sk_neg = k_right_tile - k_down_tile + q = zeros[float](sk_pos.shape) + if even_order: + k = k[1.._] + + # Note that we order the operations and make some pre-calculations + # to minimize the number of tensor ops + let trig_base = 2.0 * PI * k + let k_square_inv = 1.0 /. (k *. k) + var b0 = 0.0 + var b = zeros[float](k.shape) + for idx in countup(0, bands.len - 1, 2): + let weight_pow = abs(weights[(idx+1) div 2]) + if requires_matrix_inversion: + q += + weight_pow * 0.5 * bands[idx+1] * (sinc(sk_pos * bands[idx+1]) + sinc(sk_neg * bands[idx+1])) - + weight_pow * 0.5 * bands[idx] * (sinc(sk_pos * bands[idx]) + sinc(sk_neg * bands[idx])) + let slope = (desired[idx+1] - desired[idx]) / (bands[idx+1] - bands[idx]) + let b1 = desired[idx] - slope * bands[idx] + if even_order: + b0 += weight_pow * + (b1 * (bands[idx+1] - bands[idx]) + slope / 2.0 * (bands[idx+1]^2 - bands[idx]^2)) + b += weight_pow * + (slope / (4.0 * PI^2) * (cos(trig_base * bands[idx+1]) - cos(trig_base * bands[idx])) *. k_square_inv) + b += + weight_pow * bands[idx+1] * (slope * bands[idx+1] + b1) * sinc(2.0 * k * bands[idx+1]) - + weight_pow * bands[idx] * (slope * bands[idx] + b1) * sinc(2.0 * k * bands[idx]) + + if even_order: + b = concat([[b0].toTensor, b], axis = 0) + + var a: Tensor[float] + if requires_matrix_inversion: + a = solve(q, b) + else: + a = 4.0 * weights[0] * b + if even_order: + a[0] /= 2.0 + + result = if even_order: + # Type I FIR filter: symmetric, even order, odd length + # Note that half_length, despite being a float, is an exact number when + # fir_order is even! + concat([a[half_length.int .. 1 | -1] * 0.5, + [a[0]].toTensor, + a[1 .. half_length.int] * 0.5], axis = 0) + else: + # Type II FIR filter: symmetric, odd order, even length + 0.5 * concat([a[_|-1], a], axis = 0) + else: + # Anti-symmetric (i.e. Type III or Type IV) linear phase FIR + # Type III when fir_order is even, Type IV otherwise + var k = if even_order: + # Note that half_order is float but it is an exact number! + arange(1.0, floor(half_length) + 1.0) + else: + arange(floor(half_length) + 1.0) +. 0.5 + + var sk_pos, sk_neg, q: Tensor[float] + if requires_matrix_inversion: + let k_right_tile = 2.0 * k.reshape_infer(-1, 1).tile(1, k.len) + let k_down_tile = 2.0 * k.tile(k.len, 1) + # sk_pos is the base argument for the "positive" sincs + sk_pos = k_right_tile + k_down_tile + # sk_neg is the base argument for the "negative" sincs + sk_neg = k_right_tile - k_down_tile + q = zeros[float](sk_pos.shape) + + # Note that we order the operations and make some pre-calculations + # to minimize the number of tensor ops + let trig_base = 2.0 * PI * k + let k_square_inv = 1.0 /. (k *. k) + var b = zeros[float](k.shape) + for idx in countup(0, bands.len - 1, 2): + let weight_pow = abs(weights[(idx+1) div 2]) + if requires_matrix_inversion: + q += + weight_pow * 0.5 * bands[idx+1] * (sinc(sk_pos * bands[idx+1]) - sinc(sk_neg * bands[idx+1])) - + weight_pow * 0.5 * bands[idx] * (sinc(sk_pos * bands[idx]) - sinc(sk_neg * bands[idx])) + let slope = (desired[idx+1] - desired[idx]) / (bands[idx+1] - bands[idx]) + let b1 = desired[idx] - slope * bands[idx] + b += weight_pow * + (slope / (4.0 * PI^2) * (sin(trig_base * bands[idx+1]) - sin(trig_base * bands[idx])) *. k_square_inv) + b += + (weight_pow * (slope * bands[idx] + b1) * cos(trig_base * bands[idx]) - + weight_pow * (slope * bands[idx+1] + b1) * cos(trig_base * bands[idx+1])) /. trig_base + + let a = if requires_matrix_inversion: + solve(q, b) + else: + -4.0 * weights[0] * b + + result = if even_order: + # Type III FIR filter: anti-symmetric, with a zero middle value + 0.5 * concat([a[_|-1], [0.0].toTensor, -a], axis = 0) + else: + # Type IV FIR filter: anti-symmetric, no middle value + 0.5 * concat([a[_|-1], -a], axis = 0) + +proc upfirdn*[T](t, h: Tensor[T], up = 1, down = 1): Tensor[T] {.noinit.} = + ## Upsample, apply a FIR filter, and downsample a rank-1 Tensor + ## + ## Inputs: + ## - t: Rank-1 input signal Tensor that will be filtered + ## - h: Rank-1 FIR (finite-impulse response) filter coeeficient tensor + ## - up: Upsampling rate (applied before the FIR filter step) + ## - down: Downsampling rate (applied after the FIR filter step) + ## + ## Result: + ## - The filtered signal Rank-1 tensor + ## + ## Note: + ## - The order of the input and filter tensors follows the `Matlab` + ## convention, rather than the `scipy.signal` convention, which puts the + ## filter first, followed by the input tensor). If you are translating + ## `scipy` code you must swap the order of the input and filter tensors. + ## + ## Examples: + ## ```nim + ## # FIR filter + ## echo upfirdn([1, 1, 1].toTensor, [1, 1, 1].toTensor) + ## # Tensor[system.int] of shape "[5]" on backend "Cpu" + ## # 1 2 3 2 1 + ## + ## # Upsampling with zeros insertion + ## echo upfirdn([1, 2, 3].toTensor, [1].toTensor, 3) + ## # Tensor[system.int] of shape "[7]" on backend "Cpu" + ## # 1 0 0 2 0 0 3 + ## + ## # Upsampling with sample-and-hold + ## echo upfirdn([1, 2, 3].toTensor, [1, 1, 1].toTensor, 3) + ## # Tensor[system.int] of shape "[9]" on backend "Cpu" + ## # 1 1 1 2 2 2 3 3 3 + ## + ## # Linear interpolation + ## echo upfirdn([1.0, 1.0, 1.0].toTensor, [0.5, 1.0, 0.5].toTensor, 2) + ## # Tensor[system.float] of shape "[7]" on backend "Cpu" + ## # 0.5 1 1 1 1 1 0.5 + ## + ## # Decimation by 3 + ## echo upfirdn(arange(10.0), [1.0].toTensor, 1, 3) + ## # Tensor[system.float] of shape "[4]" on backend "Cpu" + ## # 0 3 6 9 + ## + ## # Linear interpoloation (rate 2/3) + ## echo upfirdn(arange(10.0), [0.5, 1.0, 0.5].toTensor, 2, 3) + ## # Tensor[system.float] of shape "[7]" on backend "Cpu" + ## # 0 1 2.5 4 5.5 7 8.5 + t.upsample(up).convolve(h, mode = ConvolveMode.full, down = down) + +func reduce_resampling_rates*(up, down: int): (int, int) = + ## Reduce resampling rates to the minimum values that give the same up/down ratio + ## + ## This function reduces a pair of `up` / `down` sampling rates to the + ## minimum equivalent values (by dividing them by their greatest common + ## divisor). This is useful to reduce the number of operations required to + ## resample a tensor (since what really matters is the `up / down` ratio, not + ## the actual values of `up` and `down`). + ## + ## Note that there are 2 versions of the `resample` function. The version that + ## does _not_ take a filter tensor automatically calls this function. However, + ## the version that takes a filter tensor as its second argument does not. + let g = gcd(up, down) + let up = floorDiv(up, g) + let down = floorDiv(down, g) + result = (up, down) + +proc generate_resampling_filter*[T]( + up: int, down: int, fir_order_factor = 10, beta = 5.0 + ): Tensor[T] = + ## Generate a resampling filter + ## + ## Generate a resampling filter that is appropriate given an `up` / `down` + ## sampling pair, a `fir_order_factor` and a kaiser window `beta` value. + ## + ## Inputs: + ## - up: Upsampling rate + ## - down: Downsampling rate + ## - fir_order_factor: A factor used to calculate the generated filter order, + ## which will be `2 * fir_order_factor * max(up, down)`. + ## If set to 0, the generated filter will be a simple + ## rectangular signal which can perform a step upsample + ## - beta: The beta value used to generate the kaiser window that is + ## applied to the filter + ## + ## Result: + ## - Tensor of filter coefficients + if fir_order_factor == 0: + # Step upsample + return ones[T](up) + + # Calculate the cut-off frequency and the filter order + let max_ratio = max(up, down) + let fc = 1.0 / max_ratio.float + let filter_order = 2 * fir_order_factor * max_ratio + # Design the base filter + result = firls(filter_order, + [0.0, fc, fc, 1.0].toTensor, + [1.0, 1.0, 0.0, 0.0].toTensor) + # Apply the kaiser window (note that the filter length is one more than + # its order, so the window length must also be the filter order plus 1) + result *.= kaiser(filter_order + 1, beta) + # And finally normalize the filter + result *.= up.float / result.sum + +proc adjust_filter_position[T]( + h: Tensor[T], input_len, result_len, up, down: int): + tuple[h: Tensor[T], total_delay: int] = + ## Internal function which is used to adjust the position of the filter + ## (by adding leading and trailing zeros) so that: + ## - The downsampling procedure takes samples that correspond to the filter + ## middle point + ## - The result length matches what is expected + ## This function also calculates the total delay (of the upsample, filter + ## and downsample operation), which is the number of samples that we will + ## need to remove (after downsampling) to align the output with the input + + # When we downsample, we want to take those samples that "correspond" to the + # filter middle point. To do so we add some leading zeros to the filter: + var filter_middle_point = (h.len.float - 1.0) / 2.0 + let num_leading_zeros = floor(down.float - (filter_middle_point.floorMod(down.float))).int + result.h = concat(zeros[T](num_leading_zeros), h, axis = 0) + # Obviously, this shifts the actual position of the filter middle point + filter_middle_point = filter_middle_point + num_leading_zeros.float + + # Calculate the total (post downsampling) filter delay, which we will have + # to remove from the result in order to compensate for it + result.total_delay = floor(ceil(filter_middle_point) / down.float).int + + # Add trailing zeros to the filter so that the actual output length + # matches our target result length + # This can only be done iteratively due to the ceil operations involved + # To do so: + # 1. Calculate the length that the output of the filter, before downsampling + # (i.e. after upsampling and the FIR) would have _before_ adding any zeros + let upsampled_filtered_tensor_len = (input_len - 1) * up + result.h.len + # 2. Keep adding zeros until the result length would exceed the target + var num_trailing_zeros = 0 + while result.total_delay + result_len >= + ceil((upsampled_filtered_tensor_len + num_trailing_zeros) / down).int: + num_trailing_zeros += 1 + # 3. Finally add the zeros that we calculated + result.h = concat(result.h, zeros[T](num_trailing_zeros), axis = 0) + +proc resample*[T](t, h: Tensor[T], up = 1, down = 1): Tensor[T] = + ## Resample a Tensor at `up / down` times using a particular FIR filter + ## + ## This is basically a wrapper around `upfirdn`. The main difference + ## with that procedure is that resample only keeps the `up * down * t.len` + ## "central" samples (i.e. it takes into account the delay introduced by + ## the filter). + ## + ## Inputs: + ## - t: Rank-1 input tensor + ## - h: Rank-1 tensor of resampling filter coefficients + ## - up: Upsampling rate + ## - down: Downsampling rate + ## + ## Result: + ## - Rank-1 resampled tensor + ## + ## Notes: + ## - Contrary to the version of this function which doesn't take an + ## explicit filter, this version does not automatically reduce the + ## upsampling and downsampling ratios. If you want to reduce them + ## you must manually use `reduce_resampling_rates` before calling + ## this function. + ## + ## Example: + ## ```nim + ## let t = [1.0 , 1.2, -0.3, -1.0, 2.0].toTensor + ## let (up, down) = reduce_resampling_rates(6, 4) + ## let h = generate_resampling_filter[float](up = up, down = up) + ## echo resample(t, up = up, down = up) + ## # Tensor[system.float] of shape "[8]" on backend "Cpu" + ## # 1.00061 1.22699 1.00038 -0.300182 -1.44448 0.114778 2.00121 0.917579 + ## ``` + if up == 1 and down == 1: + return t.clone() + + var h = h.squeeze + if h.rank != 1: + raise newException(ValueError, + "Squeezed filter rank (" & $h.rank & ") must be 1") + + # Calculate the _target_ result length + let result_len = ceil(t.len * up / down).int + + # Adjust the filter position to align the input with the output and + # calculate the total delay _after_ downsampling + var total_delay = 0 + (h, total_delay) = adjust_filter_position(h, t.len, result_len, up, down) + + # Perform the actual upsampling, FIR filtering and downsampling + result = t.upfirdn(h, up = up, down = down) + + # Remove the trailing and leading samples + # to align the result with the input signal + result = result[total_delay.int ..< (total_delay.int + result_len)] + +proc resample*[T](t: Tensor[T], up = 1, down = 1, fir_order_factor = 10, beta = 5.0): Tensor[T] = + ## Resample a Tensor at `up / down` times its original sampling rate + ## + ## The resampling is performed using a polyphase anti-aliasing FIR filter + ## that is designed according to the selected `fir_order_factor` and a Kaiser + ## window of the given `beta`. + ## + ## Inputs: + ## - t: Rank-1 input tensor + ## - up: Upsampling rate + ## - down: Downsampling rate + ## - fir_order_factor: A factor used to calculate the generated filter order, + ## which will be `2 * fir_order_factor * max(up, down)`. + ## If set to 0, the generated filter will be a simple + ## rectangular signal which can perform a step upsample + ## - beta: The beta value used to generate the Kaiser window that is + ## applied to the filter + ## + ## Result: + ## - Rank-1 resampled tensor + ## + ## Note: + ## - The `up` and `down` rates are automatically "reduced" (by calling + ## `reduce_resampling_rates`, which divides them by their greatest + ## common denominator) before the resampling operation (and the filter + ## generation) is performed. This can reduce the number of calculations + ## significantly. If you want to avoid that "reduction", generate the + ## filter using `generate_resampling_filter` first, and then call the + ## `resample` procedure that takes the filter as its input (since that + ## version does not reduce the ratios automatically). You can find an + ## example of how to do this on the examples section of that procedure's + ## documentation. + ## + ## Example: + ## ```nim + ## let t = [1.0 , 1.2, -0.3, -1.0, 2.0].toTensor + ## echo resample(t, up = 3, down = 2) + ## # Tensor[system.float] of shape "[8]" on backend "Cpu" + ## # 1.00061 1.22699 1.00038 -0.300182 -1.44448 0.114778 2.00121 0.917579 + ## + ## # Any `up` and `down` values that have the same ratio give the same result + ## echo resample(t, up = 6, down = 4) + ## # Tensor[system.float] of shape "[8]" on backend "Cpu" + ## # 1.00061 1.22699 1.00038 -0.300182 -1.44448 0.114778 2.00121 0.917579 + ## ``` + if up == down: + # No need to filter if the upsampling and downsampling ratios match + # Note that this is only true when no specific filter is provided + return t.clone() + + # What matters is the _ratio_ of the uplink and downlink factors, so we can + # reduce the up and down factors before calculating the anti-aliasing filter + # and performing the actual resampling operation in order to save computation + # time on really long signals + let (up, down) = reduce_resampling_rates(up, down) + + # Generate the filter coefficients + when T is Complex: + let h = complex(generate_resampling_filter[typeof(T.re)](up, down, fir_order_factor, beta)) + else: + let h = generate_resampling_filter[T](up, down, fir_order_factor, beta) + + # And resample the input signal + resample(t, h, up = up, down = down) diff --git a/tests/test_signal.nim b/tests/test_signal.nim new file mode 100644 index 0000000..369f357 --- /dev/null +++ b/tests/test_signal.nim @@ -0,0 +1,254 @@ +import ../impulse/signal +import arraymancer +import std / unittest + +proc test_kaiser() = + ## Test the `kaiser` window function + test "Kaiser window": + let kw1 = kaiser(4) + let kw2 = kaiser(4, 5.0) + let kw3 = kaiser(4, 8.0) + let expected_kw1 = [0.03671089, 0.7753221 , 0.7753221 , 0.03671089].toTensor + let expected_kw3 = [0.00233883, 0.65247867, 0.65247867, 0.00233883].toTensor + + check: kw1.mean_absolute_error(expected_kw1) < 1e-8 + check: kw1.mean_absolute_error(kw2) < 1e-8 + check: kw3.mean_absolute_error(expected_kw3) < 1e-8 + +proc test_firls() = + ## Test the `firls` function + test "firls": + block: + let expected = [0.1264964, 0.278552488, 0.34506779765, 0.278552488, 0.1264964].toTensor + check: expected.mean_absolute_error( + firls(4, + [[0.0, 0.3], [0.4, 1.0]].toTensor, + [[1.0, 1.0], [0.0, 0.0]].toTensor)) < 1e-8 + + # Same filter as above, but using rank-1, even length tensors as inputs + check: expected.mean_absolute_error( + firls(4, + [0.0, 0.3, 0.4, 1.0].toTensor, + [1.0, 1.0, 0.0, 0.0].toTensor)) < 1e-8 + + # Same filter as above, but using unnormalized frequencies and a sampling + # frequency of 10.0 Hz (note how all the frequencies are 5 times greater + # because the default fs is 2.0): + check: expected.mean_absolute_error( + firls(4, + [[0.0, 1.5], [2.0, 5.0]].toTensor, + [[1.0, 1.0], [0.0, 0.0]].toTensor, + fs = 10.0)) < 1e-8 + + block: + # Same kind of filter, but give more weight to the pass-through constraint + let expected = [0.110353444, 0.28447325, 0.36086805, 0.28447325, 0.110353444].toTensor + check: expected.mean_absolute_error( + firls(4, + [[0.0, 0.3], [0.4, 1.0]].toTensor, + [[1.0, 1.0], [0.0, 0.0]].toTensor, + weights = [1.0, 0.5].toTensor)) < 1e-8 + + block: + # A more complex, order 5, Type II, low-pass filter + let expected = [-0.05603328, 0.146107441, 0.43071645, 0.43071645, 0.146107441, -0.05603328].toTensor + check: expected.mean_absolute_error( + firls(5, + [[0.0, 0.3], [0.3, 0.6], [0.6, 1.0]].toTensor, + [[1.0, 1.0], [1.0, 0.2], [0.0, 0.0]].toTensor)) < 1e-8 + + block: + # Example of an order 6 Type IV high-pass FIR filter: + let expected = [-0.13944975, 0.2851858, -0.25859575, 0.0, 0.25859575, -0.2851858, 0.13944975].toTensor + check: expected.mean_absolute_error(firls(6, + [[0.0, 0.4], [0.6, 1.0]].toTensor, [[0.0, 0.0], [0.9, 1.0]].toTensor, + symmetric = false)) < 1e-8 + +proc test_upfirdn() = + test "upfirdn": + # FIR filter + check: upfirdn([1, 1, 1].toTensor, [1, 1, 1].toTensor) == [1, 2, 3, 2, 1].toTensor + + # Upsampling with zero insertion + check: upfirdn([1, 2, 3].toTensor, [1].toTensor, 3) == [1, 0, 0, 2, 0, 0, 3].toTensor + + # Upsampling with sample-and-hold + check: upfirdn([1, 2, 3].toTensor, [1, 1, 1].toTensor, 3) == [1, 1, 1, 2, 2, 2, 3, 3, 3].toTensor + + # Linear interpolation + check: upfirdn([1.0, 1.0, 1.0].toTensor, [0.5, 1.0, 0.5].toTensor, 2) == [0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5].toTensor + + # Decimation by 3 + check: upfirdn(arange(10), [1].toTensor, 1, 3) == [0, 3, 6, 9].toTensor + + # Linear interp, rate 2/3 + check: upfirdn(arange(10.0), [0.5, 1.0, 0.5].toTensor, 2, 3) == [0.0, 1.0, 2.5, 4, 5.5, 7.0, 8.5].toTensor + +proc test_resample() = + test "resample": + let t = [1.0 , 1.2, -0.3, -1.0, 2.0].toTensor + + block: # Resample with upsampling only + let resampled = resample(t, up = 3) + let expected = [1.0006061736, + 1.1652623795, 1.2269863677, 1.2007274083, 1.0003755977, + 0.4963084977, -0.3001818521, -1.1022817902, -1.4444776098, + -1.0006061736, 0.1147783685, 1.3383072463, 2.0012123471, + 1.7824018717, 0.9175789829].toTensor + check: resampled.mean_absolute_error(expected) < 1e-8 + + block: # Resample with downsampling only + let resampled = resample(t, down = 2) + let expected = [0.9812293475, -0.0867375515, 0.5627008880].toTensor + check: resampled.mean_absolute_error(expected) < 1e-8 + + block: # Resample with upsample and downsample + let resampled = resample(t, up = 3, down = 2) + let expected = [1.0006061736, 1.2269863677, 1.0003755977, -0.3001818521, + -1.4444776098, 0.1147783685, 2.0012123471, 0.9175789829].toTensor + check: resampled.mean_absolute_error(expected) < 1e-8 + + # Doubling _both_ sampling rates should give the same result + let filtered_double_ratios = resample(t, up = 6, down = 4) + check: resampled == filtered_double_ratios + + block: # Resample with specific filter coefficients + var (up, down) = (6, 4) + (up, down) = reduce_resampling_rates(up, down) + check: (up, down) == (3, 2) + + let h = generate_resampling_filter[float](up, down) + let resampled = resample(t, h, up = up, down = down) + let expected = [1.0006061736, 1.2269863677, 1.0003755977, -0.3001818521, + -1.4444776098, 0.1147783685, 2.0012123471, 0.9175789829].toTensor + check: resampled.mean_absolute_error(expected) < 1e-8 + + block: # Resample with a specific FIR order factor and beta + let resampled = resample(t, up = 3, down = 2, + fir_order_factor = 30, beta=6.0) + let expected = [1.0000890770, 1.1907035514, 1.0289575738, -0.3000267231, + -1.4576402433, 0.1203579771, 2.0001781539, 0.9282917306].toTensor + check: resampled.mean_absolute_error(expected) < 1e-8 + + block: # Resample with a specific 0 window length (i.e. step upsampling) + let resampled = resample(t, up = 3, down = 2, fir_order_factor = 0) + let expected = [1.0, 1.2, 1.2, -0.3, -1, -1, 2, 0].toTensor + check: resampled.mean_absolute_error(expected) < 1e-8 + + block: # Resample a complex signal + let tc = complex(t, 2.0 *. t) + let resampled = resample(tc, up = 3, down = 2) + let expected = complex( + [1.0006061736, 1.2269863677, 1.0003755977, -0.3001818521, + -1.4444776098, 0.1147783685, 2.0012123471, 0.9175789829].toTensor, + [2.0012123471, 2.4539727354, 2.0007511954, -0.6003637041, + -2.8889552195, 0.2295567369, 4.0024246942, 1.8351579659].toTensor + ) + check: resampled.mean_absolute_error(expected) < 1e-8 + + block: + # Check that a wide set of combinations of input lengths and up and down + # ratios work fine (i.e. no exceptions raised and the output lenghts are + # what is expected) + let expected_result_lengths = @[ + 100, 67, 50, 40, 34, 29, 25, 23, 150, 100, 75, 60, 50, 43, 38, 34, + 200, 134, 100, 80, 67, 58, 50, 45, 250, 167, 125, 100, 84, 72, 63, 56, + 300, 200, 150, 120, 100, 86, 75, 67, 350, 234, 175, 140, 117, 100, 88, + 78, 400, 267, 200, 160, 134, 115, 100, 89, 450, 300, 225, 180, 150, + 129, 113, 100, 101, 68, 51, 41, 34, 29, 26, 23, 152, 101, 76, 61, 51, + 44, 38, 34, 202, 135, 101, 81, 68, 58, 51, 45, 253, 169, 127, 101, 85, + 73, 64, 57, 303, 202, 152, 122, 101, 87, 76, 68, 354, 236, 177, 142, + 118, 101, 89, 79, 404, 270, 202, 162, 135, 116, 101, 90, 455, 303, 228, + 182, 152, 130, 114, 101, 102, 68, 51, 41, 34, 30, 26, 23, 153, 102, + 77, 62, 51, 44, 39, 34, 204, 136, 102, 82, 68, 59, 51, 46, 255, 170, + 128, 102, 85, 73, 64, 57, 306, 204, 153, 123, 102, 88, 77, 68, 357, + 238, 179, 143, 119, 102, 90, 80, 408, 272, 204, 164, 136, 117, 102, + 91, 459, 306, 230, 184, 153, 132, 115, 102, 103, 69, 52, 42, 35, 30, + 26, 23, 155, 103, 78, 62, 52, 45, 39, 35, 206, 138, 103, 83, 69, 59, + 52, 46, 258, 172, 129, 103, 86, 74, 65, 58, 309, 206, 155, 124, 103, + 89, 78, 69, 361, 241, 181, 145, 121, 103, 91, 81, 412, 275, 206, 165, + 138, 118, 103, 92, 464, 309, 232, 186, 155, 133, 116, 103, 104, 70, + 52, 42, 35, 30, 26, 24, 156, 104, 78, 63, 52, 45, 39, 35, 208, 139, + 104, 84, 70, 60, 52, 47, 260, 174, 130, 104, 87, 75, 65, 58, 312, 208, + 156, 125, 104, 90, 78, 70, 364, 243, 182, 146, 122, 104, 91, 81, 416, + 278, 208, 167, 139, 119, 104, 93, 468, 312, 234, 188, 156, 134, 117, + 104, 105, 70, 53, 42, 35, 30, 27, 24, 158, 105, 79, 63, 53, 45, 40, + 35, 210, 140, 105, 84, 70, 60, 53, 47, 263, 175, 132, 105, 88, 75, 66, + 59, 315, 210, 158, 126, 105, 90, 79, 70, 368, 245, 184, 147, 123, 105, + 92, 82, 420, 280, 210, 168, 140, 120, 105, 94, 473, 315, 237, 189, 158, + 135, 119, 105, 106, 71, 53, 43, 36, 31, 27, 24, 159, 106, 80, 64, 53, + 46, 40, 36, 212, 142, 106, 85, 71, 61, 53, 48, 265, 177, 133, 106, 89, + 76, 67, 59, 318, 212, 159, 128, 106, 91, 80, 71, 371, 248, 186, 149, + 124, 106, 93, 83, 424, 283, 212, 170, 142, 122, 106, 95, 477, 318, 239, + 191, 159, 137, 120, 106, 107, 72, 54, 43, 36, 31, 27, 24, 161, 107, + 81, 65, 54, 46, 41, 36, 214, 143, 107, 86, 72, 62, 54, 48, 268, 179, + 134, 107, 90, 77, 67, 60, 321, 214, 161, 129, 107, 92, 81, 72, 375, + 250, 188, 150, 125, 107, 94, 84, 428, 286, 214, 172, 143, 123, 107, + 96, 482, 321, 241, 193, 161, 138, 121, 107, 108, 72, 54, 44, 36, 31, + 27, 24, 162, 108, 81, 65, 54, 47, 41, 36, 216, 144, 108, 87, 72, 62, + 54, 48, 270, 180, 135, 108, 90, 78, 68, 60, 324, 216, 162, 130, 108, + 93, 81, 72, 378, 252, 189, 152, 126, 108, 95, 84, 432, 288, 216, 173, + 144, 124, 108, 96, 486, 324, 243, 195, 162, 139, 122, 108, 109, 73, + 55, 44, 37, 32, 28, 25, 164, 109, 82, 66, 55, 47, 41, 37, 218, 146, + 109, 88, 73, 63, 55, 49, 273, 182, 137, 109, 91, 78, 69, 61, 327, 218, + 164, 131, 109, 94, 82, 73, 382, 255, 191, 153, 128, 109, 96, 85, 436, + 291, 218, 175, 146, 125, 109, 97, 491, 327, 246, 197, 164, 141, 123, + 109, 110, 74, 55, 44, 37, 32, 28, 25, 165, 110, 83, 66, 55, 48, 42, + 37, 220, 147, 110, 88, 74, 63, 55, 49, 275, 184, 138, 110, 92, 79, 69, + 62, 330, 220, 165, 132, 110, 95, 83, 74, 385, 257, 193, 154, 129, 110, + 97, 86, 440, 294, 220, 176, 147, 126, 110, 98, 495, 330, 248, 198, 165, + 142, 124, 110, 111, 74, 56, 45, 37, 32, 28, 25, 167, 111, 84, 67, 56, + 48, 42, 37, 222, 148, 111, 89, 74, 64, 56, 50, 278, 185, 139, 111, 93, + 80, 70, 62, 333, 222, 167, 134, 111, 96, 84, 74, 389, 259, 195, 156, + 130, 111, 98, 87, 444, 296, 222, 178, 148, 127, 111, 99, 500, 333, 250, + 200, 167, 143, 125, 111, 112, 75, 56, 45, 38, 32, 28, 25, 168, 112, + 84, 68, 56, 48, 42, 38, 224, 150, 112, 90, 75, 64, 56, 50, 280, 187, + 140, 112, 94, 80, 70, 63, 336, 224, 168, 135, 112, 96, 84, 75, 392, + 262, 196, 157, 131, 112, 98, 88, 448, 299, 224, 180, 150, 128, 112, + 100, 504, 336, 252, 202, 168, 144, 126, 112, 113, 76, 57, 46, 38, 33, + 29, 26, 170, 113, 85, 68, 57, 49, 43, 38, 226, 151, 113, 91, 76, 65, + 57, 51, 283, 189, 142, 113, 95, 81, 71, 63, 339, 226, 170, 136, 113, + 97, 85, 76, 396, 264, 198, 159, 132, 113, 99, 88, 452, 302, 226, 181, + 151, 130, 113, 101, 509, 339, 255, 204, 170, 146, 128, 113, 114, 76, + 57, 46, 38, 33, 29, 26, 171, 114, 86, 69, 57, 49, 43, 38, 228, 152, + 114, 92, 76, 66, 57, 51, 285, 190, 143, 114, 95, 82, 72, 64, 342, 228, + 171, 137, 114, 98, 86, 76, 399, 266, 200, 160, 133, 114, 100, 89, 456, + 304, 228, 183, 152, 131, 114, 102, 513, 342, 257, 206, 171, 147, 129, + 114, 115, 77, 58, 46, 39, 33, 29, 26, 173, 115, 87, 69, 58, 50, 44, + 39, 230, 154, 115, 92, 77, 66, 58, 52, 288, 192, 144, 115, 96, 83, 72, + 64, 345, 230, 173, 138, 115, 99, 87, 77, 403, 269, 202, 161, 135, 115, + 101, 90, 460, 307, 230, 184, 154, 132, 115, 103, 518, 345, 259, 207, + 173, 148, 130, 115, 116, 78, 58, 47, 39, 34, 29, 26, 174, 116, 87, 70, + 58, 50, 44, 39, 232, 155, 116, 93, 78, 67, 58, 52, 290, 194, 145, 116, + 97, 83, 73, 65, 348, 232, 174, 140, 116, 100, 87, 78, 406, 271, 203, + 163, 136, 116, 102, 91, 464, 310, 232, 186, 155, 133, 116, 104, 522, + 348, 261, 209, 174, 150, 131, 116, 117, 78, 59, 47, 39, 34, 30, 26, + 176, 117, 88, 71, 59, 51, 44, 39, 234, 156, 117, 94, 78, 67, 59, 52, + 293, 195, 147, 117, 98, 84, 74, 65, 351, 234, 176, 141, 117, 101, 88, + 78, 410, 273, 205, 164, 137, 117, 103, 91, 468, 312, 234, 188, 156, + 134, 117, 104, 527, 351, 264, 211, 176, 151, 132, 117, 118, 79, 59, + 48, 40, 34, 30, 27, 177, 118, 89, 71, 59, 51, 45, 40, 236, 158, 118, + 95, 79, 68, 59, 53, 295, 197, 148, 118, 99, 85, 74, 66, 354, 236, 177, + 142, 118, 102, 89, 79, 413, 276, 207, 166, 138, 118, 104, 92, 472, 315, + 236, 189, 158, 135, 118, 105, 531, 354, 266, 213, 177, 152, 133, 118, + 119, 80, 60, 48, 40, 34, 30, 27, 179, 119, 90, 72, 60, 51, 45, 40, 238, + 159, 119, 96, 80, 68, 60, 53, 298, 199, 149, 119, 100, 85, 75, 67, 357, + 238, 179, 143, 119, 102, 90, 80, 417, 278, 209, 167, 139, 119, 105, + 93, 476, 318, 238, 191, 159, 136, 119, 106, 536, 357, 268, 215, 179, + 153, 134, 119] + + let t = arange(-50.0, 100.0) + var lengths = newSeq[int]() + for n in arange(100, 120): + for up in arange(2, 10): + for down in arange(2, 10): + let outp = resample(t[_..