diff --git a/.github/workflows/docs_pages.yaml b/.github/workflows/docs_pages.yaml index 967b67b..32a0c4f 100644 --- a/.github/workflows/docs_pages.yaml +++ b/.github/workflows/docs_pages.yaml @@ -1,6 +1,5 @@ -name: Build python packages +name: Build Documentation -on: [push, pull_request] on: workflow_dispatch: branches: diff --git a/.github/workflows/python_packages.yaml b/.github/workflows/python_packages.yaml index c14c125..784b021 100644 --- a/.github/workflows/python_packages.yaml +++ b/.github/workflows/python_packages.yaml @@ -1,17 +1,6 @@ -# Found here: https://tomasfarias.dev/posts/sphinx-docs-with-poetry-and-github-pages/ +name: Build Python packages -name: Docs2Pages -on: - workflow_dispatch: - branches: - - main - push: - # tags: '*' - branches: - - main - pull_request: - branches: - - main +on: [push, pull_request] jobs: build-wheels: @@ -23,6 +12,10 @@ jobs: uses: actions/checkout@master with: fetch-depth: 0 + - name: 🐍 Install Python + uses: actions/setup-python@v4 + with: + python-version: 3.10.2 - name: 🦀 Install Rust uses: actions-rs/toolchain@v1 with: diff --git a/Readme.md b/Readme.md index 41239ef..b038a3b 100644 --- a/Readme.md +++ b/Readme.md @@ -46,50 +46,71 @@ efficiency and simplicity (both in terms of implementation and comprehensibility). Plus, it's my favorite and I'm doing active research with it. -* Numerical python extensions written in Rust -* Seamless integration via [PyO3](https://pyo3.rs/v0.16.1/) and [rust-numpy](https://docs.rs/numpy/0.7.0/numpy/) -* High-performance via [ndarray](https://github.com/rust-ndarray/ndarray)/[rayon](https://docs.rs/ndarray/0.13.1/ndarray/parallel/index.html), [tch-rs](https://github.com/LaurentMazare/tch-rs) ([PyTorch](https://pytorch.org/) bindings) -* Dependency management / publishing with [Poetry](https://python-poetry.org/docs/) and [Maturin](https://github.com/PyO3/maturin) -* [Monorepo](https://en.wikipedia.org/wiki/Monorepo) with [Cargo workspaces][(]()) -* Technical documentation / GitHub page with [Sphinx](https://www.sphinx-doc.org/en/master/) and [MyST](https://myst-parser.readthedocs.io/en/latest/sphinx/intro.html) -* Eventually, distributed computation (e.g., [actor model](https://en.wikipedia.org/wiki/Actor_model) or [timely - dataflow](https://timelydataflow.github.io/timely-dataflow/)) -* Hopefully, GUI application using [Tauri](https://tauri.studio/), [Angular](https://angular.io/) and [ThreeJS](https://threejs.org/) - -Feel free use as a basis for your own projects (MIT licensed). +The development explores: -## Development - -For the monorepo, there is a top-level python project defined using poetry. This project is -mainly for dependency locking, integration testing and the overall project's documentation. -The python module -build with Maturin is in a nested folder and it's project file is also created with poetry. -This is necessary as the top-level project can only add local packages that are either -created with Poetry or contain a `setup.py` file. +* Numerical python extensions written in Rust: Seamless integration via [PyO3](https://pyo3.rs/v0.16.1/) and [rust-numpy](https://docs.rs/numpy/0.7.0/numpy/). Dependency management / publishing with [Poetry](https://python-poetry.org/docs/) and [Maturin](https://github.com/PyO3/maturin) +* High-performance via [ndarray](https://github.com/rust-ndarray/ndarray)/[rayon](https://docs.rs/ndarray/0.13.1/ndarray/parallel/index.html). -While this setup supports a monorepo setup (and should support integration -testing on the imported local packages), there is another caveat. Building the python extension with -`pip` creates a temp directory which does not copy the local rust dependencies. Newer versions of `pip` -build within the tree, so this limitation can be avoided easily. Use the following commands to -setup the environment. +## The cerebral and potpourri Python packages -> :warning: We want to avoid creating a virtual environment for the nested packages. Work in -> a top-level shell instead. +The`potpourri` and `citrate` sub folders contain the Python bindings for +the two crates in `potpourri-rs` and `citrate-rs`. You can build them in a +similar fashion. ```sh -cd self-organization -pyenv shell 3.10.2 -poetry env use 3.10.2 -poetry run pip install -U pip # only required until pip>=22.0 becomes the default +cd citrate +# skip the next line if you are not using pyenv +pyenv shell 3.10.2 # replace with desired/installed version +# skip if you only have one version of python installed +poetry env use 3.10.2 # replace with desired/installed version +# Install dependencies poetry install -# to debug / develop the extension +# Or optionally, with jupyter +poetry install --with jupyter +# activate the virtual environment poetry shell -cd pysom +# install into the current environment maturin develop +# with optimizations (recommended) +maturin develop --release +# build an installable package +maturin build --release ``` To install the virtual environment as a kernel for jupyter: ```sh -python -m ipykernel install --user --name py310_selforganization --display-name "Python3.10 (self-organization)" +poetry add ipykernel +# Adjust names as suitable +python -m ipykernel install --user --name py320_citrate --display-name "Python3.10 (Citrate)" ``` + +## Development + +* Bulding documentation. We need extra headers to have formulas in the rustdoc + + ```sh + RUSTDOCFLAGS="--html-in-header ./static/header.html" cargo doc -p cerebral --no-deps + RUSTDOCFLAGS="--html-in-header ./static/header.html" cargo doc -F ndarray -p potpourri --no-deps + # Open (e.g. with Firefox) + firefox target/doc/citrate/index.html + firefox target/doc/potpourri/index.html + ``` + +* Benchmarks (currently only in Potpourri) + + ```sh + cd potpourri-rs + cargo bench -F ndarray + ``` + +* Running individual tests + + ```sh + cd potpourri-rs + cargo test -F ndarray -p potpourri test_multi_pass + ``` + +## License + +Published under the MIT license. diff --git a/cerebral-rs/src/adaptable/kohonen.rs b/cerebral-rs/src/adaptable/kohonen.rs index be1b943..e0ea57b 100644 --- a/cerebral-rs/src/adaptable/kohonen.rs +++ b/cerebral-rs/src/adaptable/kohonen.rs @@ -1,4 +1,7 @@ +//! Classical Kohonen networks #[derive(Clone)] + +/// Rrepresents the adaptivity as defined by Kohonen pub struct KohonenAdaptivity {} use ndarray::{prelude::*, Data}; @@ -45,12 +48,3 @@ where Box::new(self.clone()) // Forward to the derive(Clone) impl } } } -// #[cfg(test)] -// mod tests { - -// #[test] -// fn it_works() { -// let result = 2 + 2; -// assert_eq!(result, 4); -// } -// } diff --git a/cerebral-rs/src/adaptable/mod.rs b/cerebral-rs/src/adaptable/mod.rs index 771d9bc..3037785 100644 --- a/cerebral-rs/src/adaptable/mod.rs +++ b/cerebral-rs/src/adaptable/mod.rs @@ -1,3 +1,5 @@ +//! Properties of an adaptative self-organizing network + pub mod kohonen; pub use kohonen::KohonenAdaptivity; @@ -7,12 +9,13 @@ use crate::{Neural, Responsive}; pub type BoxedAdaptable = Box + Send>; -/// Interface for structures encapsulating algorithms for self-organization +/// Trait that update rules / adaptation to new data. pub trait Adaptable where R: Responsive, N: Neural, { + /// Adapt a self-organizing network to a single pattern / stimuus fn adapt( &mut self, neurons: &mut N, diff --git a/cerebral-rs/src/lib.rs b/cerebral-rs/src/lib.rs index f5128a1..34499e4 100644 --- a/cerebral-rs/src/lib.rs +++ b/cerebral-rs/src/lib.rs @@ -2,10 +2,12 @@ //! //! //! +//! ## About //! -//! Naming convenctions -//! * traits: Start with capital letter and are adjectives -//! * structs: Start with capital letter and are substantives +//! This crate provides a library for creating highly customizable +//! self-organizing networks. +//! +//! ## Example //! //! ```rust //! use som_rs::default::*; @@ -42,6 +44,74 @@ //! som.adapt(&training.row(0), 0.7, 0.7); //! } //! ``` +//! +//! ## (Naming) convenctions +//! +//! * traits: Start with capital letter and are adjectives +//! * structs: Start with capital letter and are substantives +//! +//! * All aspects of a model (update rules, training, etc.) are +//! represented by a struct that holds it parameters and provides +//! an implementation for the respective trait +//! +//! +//! ## Glossary / Synonyms / Definitions +//! +//! +//! * *Stimulus* (pattern, data point, feature), see [neural::Neural], [neural::NeuralLayer] +//! +//! Stimuli are data points that trigger a response from the neurons +//! of the self organizing network. In silico, this is simply a data +//! point from a data set. +//! +//! * *codebook* (neural weights / weight vector, feature, tuning), see [neural::Neural], [neural::NeuralLayer] +//! +//! Each neuron has a stimulus that it reacts stronger to than all other neurons. This +//! pattern is sometimes called weight (vector). +//! One can say that a neuron is tuned to its weight vector +//! All weights together form a matrix +//! which is called codebook. +//! +//! * *lateral space* (neural coordinate/space/lattice, latent space, hidden space), +//! see [topological::Topological], [topological::CartesianTopology], [neural::Neural], [neural::NeuralLayer] +//! +//! In a simplified reality, each neuron has a 2D coordinate marking its physcal +//! position on the cortex (the neural space). In a self-organizing network, the +//! topology and dimension of this space plays an important role +//! +//! * *Response*, see [responsive::Responsive], [responsive::CartesianResponsiveness] +//! +//! Presented a stimuli, each neuron of a self-organizing network +//! can produce a response. Within the network, exactly one neuron +//! can be triggered from an individual stimulus. In vivo, this neuron +//! suppresses possible responses from other neurons. +//! +//! +//! * *Best-matching unit* (BMU, winning neuron, competive learning), see [responsive::Responsive], [responsive::CartesianResponsiveness] +//! +//! The neuron the creates the stongest (and in vivo, fastest) response to +//! a stimulus. Being the fastest and strongest, it is often referred to as the +//! winning neuron and it suppresses the response of all other neurons. In silico +//! this is the neuron which neural weights resembles most the input given a +//! metric. +//! +//! +//! * *Adaptation* (update, update rule, tuning), see [adaptable::Adaptable], [adaptable::KohonenAdaptivity] +//! +//! The ability of a network to respond and tune to a stimuli. The BMU tunes itself further +//! to the stimulus but so do its adjacent neighbors but with less intensity diminishing with +//! the distance in the neural space +//! +//! * *Training* (learning), see [trainable::Trainable], [trainable::IncrementalLearning] +//! +//! In this context, learning is a process of adapting to multiple stimuli +//! and over an extended period of time possibly with multiple repetitions (i.e., epochs) +//! +//! * Self-organization, see [selforganizing::Selforganizing], [selforganizing::SelforganizingNetwork] +//! +//! A trait of a neural network that emerges when competive learning is implemented. +//! The network starts to map similar input stimuli to adjacent regions in the network +//! thus mapping the topology of input space onto its own neural lattice. pub mod neural; pub mod selforganizing; @@ -59,6 +129,7 @@ pub use responsive::{BoxedResponsive, Responsive}; pub use topological::{BoxedTopological, Topological}; pub use trainable::{BoxedTrainable, Trainable}; +/// Default exports. TODO: rename in prelude pub mod default { pub use crate::adaptable::KohonenAdaptivity; pub use crate::responsive::CartesianResponsiveness; @@ -68,13 +139,3 @@ pub mod default { // #[cfg(feature = "ndarray")] pub mod nd_tools; - -#[cfg(test)] -mod tests { - - #[test] - fn it_works() { - let result = 2 + 2; - assert_eq!(result, 4); - } -} diff --git a/cerebral-rs/src/main.rs b/cerebral-rs/src/main.rs deleted file mode 100644 index e740811..0000000 --- a/cerebral-rs/src/main.rs +++ /dev/null @@ -1,33 +0,0 @@ -// use ndarray::prelude::*; -// use ndarray_rand::rand::SeedableRng; -// use ndarray_rand::rand_distr::Uniform; -// use ndarray_rand::RandomExt; -// use rand_isaac::isaac64::Isaac64Rng; -// use som_rs::som::cartesian::CartesianGrid; -// use som_rs::som::SelfOrganizingMap; - -fn main() { - // let seed = 42; - // let mut rng = Isaac64Rng::seed_from_u64(seed); - - // let mut som = CartesianGrid::new((10, 10), 2, Uniform::new(0., 9.), &mut rng); - // // println!("{:?}", som); - - // let training = Array::random_using((5000, 2), Uniform::new(0., 9.), &mut rng); - // println!("{:?}", training); - - // som.batch(&training, None, None, None); - - // let trained = som.get_feature().view().into_shape((10, 10, 2)); - - // println!("{:?}", trained.unwrap().sum_axis(Axis(1))); - - // // Use this to test! - // // x = a[1:,:,:] - a[:-1,:,:] - // // y = a[:, 1:,:] - a[:, :-1,:] - // // x.mean(axis=0).mean(axis=0), x.std(axis=0).std(axis=0), y.mean(axis=0).mean(axis=0), y.std(axis=0).std(axis=0) - // // (array([-0.03560061, -0.90005994]), - // // array([0.05568317, 0.02590234]), - // // array([ 0.90100552, -0.01948181]), - // // array([0.01654651, 0.04664077])) -} diff --git a/cerebral-rs/src/nd_tools/mod.rs b/cerebral-rs/src/nd_tools/mod.rs index 610aaf0..a9a78ba 100644 --- a/cerebral-rs/src/nd_tools/mod.rs +++ b/cerebral-rs/src/nd_tools/mod.rs @@ -1,4 +1,4 @@ -//! This module defines extensions to the ndarray crate. General functions are defined in the top-level +//! Extensions to the ndarray crate. General functions are defined in the top-level pub mod ndindex; pub mod point_set; diff --git a/cerebral-rs/src/nd_tools/ndindex.rs b/cerebral-rs/src/nd_tools/ndindex.rs index 21a52e0..621268f 100644 --- a/cerebral-rs/src/nd_tools/ndindex.rs +++ b/cerebral-rs/src/nd_tools/ndindex.rs @@ -1,5 +1,8 @@ +//! Indices for ndarrays + use ndarray::{prelude::*, Shape}; +/// An iterator over the indices of an ndarray pub struct NdIndexIterator { shape: Shape, counter: usize, @@ -66,13 +69,3 @@ where result } - -#[cfg(test)] -mod tests { - - #[test] - fn it_works() { - let result = 2 + 2; - assert_eq!(result, 4); - } -} diff --git a/cerebral-rs/src/nd_tools/point_set.rs b/cerebral-rs/src/nd_tools/point_set.rs index 41e7b7f..2e9df73 100644 --- a/cerebral-rs/src/nd_tools/point_set.rs +++ b/cerebral-rs/src/nd_tools/point_set.rs @@ -65,13 +65,3 @@ where row_norm_l2(&self.get_differences(point)) } } - -#[cfg(test)] -mod tests { - - #[test] - fn it_works() { - let result = 2 + 2; - assert_eq!(result, 4); - } -} diff --git a/cerebral-rs/src/neural.rs b/cerebral-rs/src/neural.rs index 921b9f2..0f21fc6 100644 --- a/cerebral-rs/src/neural.rs +++ b/cerebral-rs/src/neural.rs @@ -1,3 +1,5 @@ +//! Properties of lateral and pattern spaces of a network + use ndarray::prelude::*; /// Provides access to the neurons of a neural network. @@ -8,7 +10,6 @@ use ndarray::prelude::*; /// * tuning patterns: List of patterns of the feature space /// each individual neural is tuned to /// Provides read-only, modifying and consuming access. - pub trait Neural { fn get_lateral(&self) -> &Array2; fn get_lateral_mut(&mut self) -> &mut Array2; @@ -18,9 +19,13 @@ pub trait Neural { fn set_patterns(&mut self, patterns: Array2); } +/// The default struct implementing [Neural]. +/// It represents a neural network with the patterns each neuron is tuned to +/// and the coordinates in the neural space to determine lateral connections #[derive(Default)] pub struct NeuralLayer { - /// Lateral layer that defines the topology. Can be coordinates or connections (depending on method). Row matrix. + /// Lateral layer that defines the topology. Can be coordinates or + /// connections (depending on method). Row matrix. pub lateral: Array2, /// Tuning Patterns the neurons. This is the codebook. Row matrix. pub patterns: Array2, @@ -59,13 +64,3 @@ impl Neural for NeuralLayer { self.patterns = patterns; } } - -#[cfg(test)] -mod tests { - - #[test] - fn it_works() { - let result = 2 + 2; - assert_eq!(result, 4); - } -} diff --git a/cerebral-rs/src/responsive.rs b/cerebral-rs/src/responsive.rs index 8fc818b..1f09d29 100644 --- a/cerebral-rs/src/responsive.rs +++ b/cerebral-rs/src/responsive.rs @@ -1,8 +1,10 @@ +//! Properties of a network of responsive neurons + use crate::Neural; +/// Boxed [Responsive] for dynamic creation (in a Python module for instance) pub type BoxedResponsive = Box + Send>; -// Tunable? /// Interface for structures encapsulating representations input patterns. See /// [neural tuning](https://en.wikipedia.org/wiki/Neuronal_tuning) pub trait Responsive @@ -42,6 +44,10 @@ where } } +// TODO consider renaming ot Euclidean + +/// Determines the Responsivenes in a Cartesian space with the +/// standard Euclidean distance. #[derive(Clone)] pub struct CartesianResponsiveness { // usize seed, @@ -69,13 +75,3 @@ where Box::new(self.clone()) // Forward to the derive(Clone) impl } } } - -#[cfg(test)] -mod tests { - - #[test] - fn it_works() { - let result = 2 + 2; - assert_eq!(result, 4); - } -} diff --git a/cerebral-rs/src/selforganizing.rs b/cerebral-rs/src/selforganizing.rs index 9d78baf..d347a18 100644 --- a/cerebral-rs/src/selforganizing.rs +++ b/cerebral-rs/src/selforganizing.rs @@ -1,8 +1,17 @@ +//! The model for a self-organizing neural network + use ndarray::{prelude::*, Data}; use crate::{Adaptable, Neural, NeuralLayer, Responsive, Topological, Trainable}; -/// Public trait that defines the concept of self organization +// TODO why a trait and not on the struct itself? + +/// Public trait for a model of self-organization. +/// It combines the methods of [Neural] (which it extends, i.e., +/// its implementations need to implement it as well), +/// [Adaptable], [Responsive], [Topological] and [Trainable] +/// but with different parameters. Implementations +/// are supposed to delegate calls to instances of said traits. pub trait Selforganizing: Neural { // Associated to topology @@ -46,7 +55,7 @@ pub trait Selforganizing: Neural { //-> Self } -/// Struct that implements structural composition +/// Default struct for self-organization pub struct SelforganizingNetwork where A: Adaptable, @@ -67,6 +76,8 @@ where pub training: L, // Box, } +// Implementation of neural + impl Selforganizing for SelforganizingNetwork where A: Adaptable, @@ -74,12 +85,11 @@ where R: Responsive, B: Trainable, { - fn init_lateral(&mut self) //-> Self - { + fn init_lateral(&mut self) { self.topology.init_lateral(&mut self.neurons); - // self } + // TODO think about removing if unused fn get_lateral_distance(&mut self, index: usize) -> Array2 { todo!() } @@ -97,7 +107,6 @@ where influence, rate, ); - //self } fn train(&mut self, patterns: &ArrayView2) { @@ -107,11 +116,11 @@ where &mut self.responsiveness, patterns, ); - // self } } -// #[cfg(feature = "ndarray")] +// Big TODO: why fullfilling [Neural]? Returning a reference to self.neural would do +// we would need a get_neural_mut and get_neural .. probably doesn't make much of a difference impl Neural for SelforganizingNetwork where @@ -120,6 +129,7 @@ where R: Responsive, B: Trainable, { + // TODO fill these fn get_lateral(&self) -> &Array2 { &self.neurons.lateral } @@ -144,8 +154,11 @@ where } } +// TODO do I need to implement the traits for this type? + pub type BoxedSelforganizing = Box; +// TODO what about this comment block // pub trait SelforganizingNeural: SelfOrganizing + Neural {} // impl SelforganizingNeural for NeuralLayer // where @@ -156,15 +169,3 @@ pub type BoxedSelforganizing = Box; // { // } // pub type BoxedSelforganizingNeural = Box; - -// #[cfg(not(feature = "ndarray"))] - -// #[cfg(test)] -// mod tests { - -// #[test] -// fn it_works() { -// let result = 2 + 2; -// assert_eq!(result, 4); -// } -// } diff --git a/cerebral-rs/src/topological.rs b/cerebral-rs/src/topological.rs index 6d1d42f..3a68f10 100644 --- a/cerebral-rs/src/topological.rs +++ b/cerebral-rs/src/topological.rs @@ -1,3 +1,5 @@ +//! Properties of the topology of a lateral space + use crate::nd_tools::ndindex::get_ndindex_array; use crate::Neural; use ndarray::{prelude::*, Shape}; @@ -44,6 +46,9 @@ where pub shape: Shape, } +// TODO maybe better grid? / lattice? + +/// Organizes neurons in a regular lattice with $n^d$ Neurons impl CartesianTopology where D: Dimension, @@ -77,13 +82,3 @@ where }); } } - -#[cfg(test)] -mod tests { - - #[test] - fn it_works() { - let result = 2 + 2; - assert_eq!(result, 4); - } -} diff --git a/cerebral-rs/src/trainable.rs b/cerebral-rs/src/trainable.rs index c7909ba..ba37262 100644 --- a/cerebral-rs/src/trainable.rs +++ b/cerebral-rs/src/trainable.rs @@ -1,3 +1,5 @@ +//! Properties of a training procedure + use core::panic; use crate::{Adaptable, Neural, Responsive}; diff --git a/cerebral/src/lib.rs b/cerebral/src/lib.rs index 00381dc..49ea2f5 100644 --- a/cerebral/src/lib.rs +++ b/cerebral/src/lib.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use std::sync::Mutex; +use numpy::{PyArray2, PyReadonlyArray1, PyReadonlyArray2, ToPyArray}; use pyo3::prelude::*; use pyo3::Python; @@ -143,74 +144,74 @@ impl PySelforganizingNetwork { }), }; } -} - -// #[pymethods] -// impl PySelforganizingNetwork { -// #[new] -// fn new(shape: (usize, usize), output_dim: usize /*, string->parameters */) -> Self { -// let seed = 42; -// let mut rng = Isaac64Rng::seed_from_u64(seed); -// let mut som = NeuralLayer { -// neurons: Neurons { -// lateral: Array::random_using(shape, Uniform::new(0., 10.), &mut rng), -// patterns: Array::random_using( -// (shape.0 * shape.1, output_dim), -// Uniform::new(0., 10.), -// &mut rng, -// ), -// ..Default::default() -// }, -// adaptivity: KohonenAdaptivity {}, -// topology: CartesianTopology::new((10, 10)), -// responsiveness: CartesianResponsiveness {}, -// training: BatchTraining { -// radii: (2.0, 0.2), -// rates: (0.7, 0.1), -// epochs: 1, -// }, -// }; - -// // println!("{}", som.neurons.lateral); - -// som.init_lateral(); -// PyNeuralLayer { -// __som: Box::new(som), -// } -// } - -// #[getter] -// fn get_feature<'py>(&self, py: Python<'py>) -> &'py PyArray2 { -// self.__som.get_patterns().to_pyarray(py) -// } + // } + // #[pymethods] + // impl PySelforganizingNetwork { + + // #[new] + // fn new(shape: (usize, usize), output_dim: usize /*, string->parameters */) -> Self { + // let seed = 42; + // let mut rng = Isaac64Rng::seed_from_u64(seed); + // let mut som = NeuralLayer { + // neurons: Neurons { + // lateral: Array::random_using(shape, Uniform::new(0., 10.), &mut rng), + // patterns: Array::random_using( + // (shape.0 * shape.1, output_dim), + // Uniform::new(0., 10.), + // &mut rng, + // ), + // ..Default::default() + // }, + // adaptivity: KohonenAdaptivity {}, + // topology: CartesianTopology::new((10, 10)), + // responsiveness: CartesianResponsiveness {}, + // training: BatchTraining { + // radii: (2.0, 0.2), + // rates: (0.7, 0.1), + // epochs: 1, + // }, + // }; + + // // println!("{}", som.neurons.lateral); + + // som.init_lateral(); + // PyNeuralLayer { + // __som: Box::new(som), + // } + // } + + #[getter] + fn get_feature<'py>(&self, py: Python<'py>) -> &'py PyArray2 { + self.__som.get_patterns().to_pyarray(py) + } -// fn get_best_matching(&mut self, feature: PyReadonlyArray1) -> usize { -// self.__som.get_best_matching(&feature.as_array()) -// } + fn get_best_matching(&mut self, feature: PyReadonlyArray1) -> usize { + self.__som.get_best_matching(&feature.as_array()) + } -// fn adapt(&mut self, feature: PyReadonlyArray1, influence: f64, rate: f64) { -// self.__som.adapt(&feature.as_array(), influence, rate) -// } -// fn batch( -// &mut self, -// features: PyReadonlyArray2, -// radii: Option<(f64, f64)>, -// rates: Option<(f64, f64)>, -// epochs: Option, -// ) { -// // if let Some(r) = rates { -// // self.__som.training.rates = r; -// // } -// // if let Some(e) = epochs { -// // self.__som.training.epochs = e; -// // } -// // if let Some(r) = radii { -// // self.__som.training.radii = r; -// // } -// self.__som.train(&features.as_array()) -// } -// } + fn adapt(&mut self, feature: PyReadonlyArray1, influence: f64, rate: f64) { + self.__som.adapt(&feature.as_array(), influence, rate) + } + fn batch( + &mut self, + features: PyReadonlyArray2, + radii: Option<(f64, f64)>, + rates: Option<(f64, f64)>, + epochs: Option, + ) { + // if let Some(r) = rates { + // self.__som.training.rates = r; + // } + // if let Some(e) = epochs { + // self.__som.training.epochs = e; + // } + // if let Some(r) = radii { + // self.__som.training.radii = r; + // } + self.__som.train(&features.as_array()) + } +} #[pymodule] #[pyo3(name = "cerebral")] diff --git a/potpourri-rs/Readme.md b/potpourri-rs/Readme.md index 3d77eca..f158d0f 100644 --- a/potpourri-rs/Readme.md +++ b/potpourri-rs/Readme.md @@ -20,4 +20,11 @@ the Expectation [Maximization (EM) algorithm](https://en.wikipedia.org/wiki/Expe ```sh sudo apt install libfontconfig-dev libopenblas-dev # on ubuntu cargo run --package potpourri --example generate_data --features ndarray -``` \ No newline at end of file +``` + +Building the documentation is a bit more complicated as we need a custom +HTML header in order to support mathematical formulas. + +```sh +RUSTDOCFLAGS="--html-in-header ./static/header.html" cargo doc -F ndarray -p potpourri --no-deps +``` diff --git a/potpourri-rs/src/backend/mod.rs b/potpourri-rs/src/backend/mod.rs index e68f3e3..6bd0eaa 100644 --- a/potpourri-rs/src/backend/mod.rs +++ b/potpourri-rs/src/backend/mod.rs @@ -1,3 +1,7 @@ +//! Computation backends such as [ndrray +//! ](https://docs.rs/ndarray/latest/ndarray/) or [ractor +//! ](https://github.com/slawlor/ractor) (more to follow) + #[cfg(feature = "ndarray")] pub mod ndarray; #[cfg(feature = "ractor")] diff --git a/potpourri-rs/src/backend/ndarray/finite.rs b/potpourri-rs/src/backend/ndarray/finite.rs index 4385dcd..85bdbe7 100644 --- a/potpourri-rs/src/backend/ndarray/finite.rs +++ b/potpourri-rs/src/backend/ndarray/finite.rs @@ -1,3 +1,6 @@ +//! Contains the implementation of the model for hidden states +//! of a finite mixture. + use ndarray::prelude::*; use crate::{AvgLLH, Error, Latent, Parametrizable}; @@ -6,6 +9,8 @@ use super::utils::generate_random_expections; #[derive(Clone, Debug)] + +/// Represents a finite set of hiddenstates (or components in a mixture model) pub struct Finite { // pub dimension: i32, pub prior: Option, @@ -14,6 +19,7 @@ pub struct Finite { } impl Finite { + /// Convenience function pub fn new(prior: Option) -> Finite { // let prior = prior.unwrap_or(1.0); Finite { @@ -67,15 +73,17 @@ impl Parametrizable for Finite { Err(Error::ForbiddenCode) } - fn update( - &mut self, - sufficient_statistics: &Self::SufficientStatistics, - weight: f64, - ) -> Result<(), Error> { - self.sufficient_statistics = - &self.sufficient_statistics * (1.0 - weight) + sufficient_statistics * weight; - Ok(()) - } + + // FIXME remove + // fn update( + // &mut self, + // sufficient_statistics: &Self::SufficientStatistics, + // weight: f64, + // ) -> Result<(), Error> { + // self.sufficient_statistics = + // &self.sufficient_statistics * (1.0 - weight) + sufficient_statistics * weight; + // Ok(()) + // } fn merge( sufficient_statistics: &[&Self::SufficientStatistics], @@ -95,23 +103,18 @@ impl Parametrizable for Finite { } use tracing::info; -/// FIXME compute the likelihood.. is it just the sum of all? impl Latent for Finite { - // fn join( - // likelihood_a: &::Likelihood, - // likelihood_b: &::Likelihood, - // ) -> Result<(::Likelihood, f64), Error> { - // } + fn expect( &self, data: &::DataIn<'_>, - likelihood_b: &::Likelihood, + log_likelihood_b: &::Likelihood, ) -> Result<(::Likelihood, AvgLLH), Error> { - let likelihood_a = Parametrizable::expect(self, data)?.0; - info!(%likelihood_a); - let log_weighted = likelihood_a + likelihood_b; // n x k - info!(%likelihood_b); + let log_likelihood_a = Parametrizable::expect(self, data)?.0; + info!(%log_likelihood_a); + let log_weighted = log_likelihood_a + log_likelihood_b; // n x k + info!(%log_likelihood_b); let weighted = log_weighted.mapv(|x| x.exp()); // sum? let s = weighted.shape(); info!("{}x{}", s[0], s[1]); @@ -127,14 +130,3 @@ impl Latent for Finite { )) } } - -// #[cfg(test)] -// mod tests { -// use super::*; - -// #[test] -// fn it_works() { -// // let result = add(2, 2); -// // assert_eq!(result, 4); -// } -// } diff --git a/potpourri-rs/src/backend/ndarray/gaussian.rs b/potpourri-rs/src/backend/ndarray/gaussian.rs index aa06e64..a284032 100644 --- a/potpourri-rs/src/backend/ndarray/gaussian.rs +++ b/potpourri-rs/src/backend/ndarray/gaussian.rs @@ -19,11 +19,11 @@ use super::utils::{ /// the data to maximize the parameters. It is a triple of /// arrays (names as in [Kimura et al.](https://link.springer.com/article/10.1007/s10044-011-0256-4)): /// -/// $$ \begin{aligned} +/// \[ \begin{aligned} /// a_j &= \sum_i^n r_{ij},&& (k) \\\\ /// b_j &= \sum_i^n r_{ij} \cdot x_i ,&& (k \times d) \\\\ /// c_j &= \sum_i^n r_{ij} \cdot x_i^T\cdot x_i, &&(k \times d \times d) \\\\ -/// \end{aligned} $$ +/// \end{aligned} \] #[derive(Default, Debug, Clone)] pub struct Gaussian { /// The mean values, $ k\times d $ @@ -32,11 +32,13 @@ pub struct Gaussian { pub covariances: Array3, /// The precision matrices (inverted coariances), $(k\times d\times d)$ pub precisions: Array3, + /// Cached values for computing the likelihoods of the Gaussians pub summands: Array1, - sufficient_statistics: ::SufficientStatistics, + // sufficient_statistics: ::SufficientStatistics, } impl Gaussian { + /// Convenience method pub fn new() -> Gaussian { Gaussian { ..Default::default() @@ -162,20 +164,21 @@ impl Parametrizable for Gaussian { Ok(()) } - fn update( - &mut self, - sufficient_statistics: &Self::SufficientStatistics, - weight: f64, - ) -> Result<(), Error> { - // check values of weight - self.sufficient_statistics.0 = - &self.sufficient_statistics.0 * (1.0 - weight) + &sufficient_statistics.0 * weight; - self.sufficient_statistics.1 = - &self.sufficient_statistics.1 * (1.0 - weight) + &sufficient_statistics.1 * weight; - self.sufficient_statistics.2 = - &self.sufficient_statistics.2 * (1.0 - weight) + &sufficient_statistics.2 * weight; - Ok(()) - } + // FIXME: remove + // fn update( + // &mut self, + // sufficient_statistics: &Self::SufficientStatistics, + // weight: f64, + // ) -> Result<(), Error> { + // // check values of weight + // self.sufficient_statistics.0 = + // &self.sufficient_statistics.0 * (1.0 - weight) + &sufficient_statistics.0 * weight; + // self.sufficient_statistics.1 = + // &self.sufficient_statistics.1 * (1.0 - weight) + &sufficient_statistics.1 * weight; + // self.sufficient_statistics.2 = + // &self.sufficient_statistics.2 * (1.0 - weight) + &sufficient_statistics.2 * weight; + // Ok(()) + // } fn merge( sufficient_statistics: &[&Self::SufficientStatistics], @@ -325,13 +328,4 @@ mod tests { // This should fail--we ignored much of the data assert!(covariances.abs_diff_eq(&gaussian.covariances, 1e-3)); } - - // #[traced_test] - // #[test] - // fn how_to_deal_with_zero_dim() { - // let mut x = arr0(0.0); - // x.assign(&arr0(1.2)); - // let y = x.get(()).unwrap(); - // x[()] = 4.0; - // } } diff --git a/potpourri-rs/src/errors.rs b/potpourri-rs/src/errors.rs index b124b8a..6924412 100644 --- a/potpourri-rs/src/errors.rs +++ b/potpourri-rs/src/errors.rs @@ -1,4 +1,9 @@ +//! Logic for error handling. + #[derive(thiserror::Error, Debug, Clone)] + +/// The one error type of this crate. +/// Errors from the backends are mapped in the respective modules pub enum Error { #[error("Multiple iterations overwrite a fitted model: {n_init:?}, {fitted:?}")] ParameterError { n_init: usize, fitted: bool }, diff --git a/potpourri-rs/src/lib.rs b/potpourri-rs/src/lib.rs index ffba46e..c56d49d 100644 --- a/potpourri-rs/src/lib.rs +++ b/potpourri-rs/src/lib.rs @@ -8,9 +8,20 @@ //! such as adding parallelization on clusters and exploring new models //! //! Conventions: -//! * Traits: Capital letters and CamelCase, adjectives used as nouns that indicate a cabability. -//! * Structs: Capital letters and CamelCase, nouns describing things and concepts -//! * methods/functions: snake_case and imperatives or short, discriptive imperative clauses +//! * Traits: Capital letters and CamelCase, adjectives used as nouns that +//! indicate a cabability. +//! * Structs: Capital letters and CamelCase, nouns describing things and +//! concepts +//! * methods/functions: snake_case and imperatives or short, discriptive +//! imperative clauses +//! +//! Concepts: +//! * Avoid making assumptions about the chosen numerical framework early. All +//! calculations are reserved for the actual models or implementations of +//! distributions (in case of the mixture model). This is achieved by +//! extensively using +//! [Generic Assotiate Types +//! (GAT)](https://blog.rust-lang.org/2022/10/28/gats-stabilization.html) pub mod backend; pub mod errors; @@ -21,81 +32,105 @@ use errors::Error; pub use mixture::{Latent, Mixable, Mixture}; pub use model::Model; -/// Average log-likelihood. Used to meature convergence +/// Average log-likelihood used to meature convergence. This is a new type for the inbuilt +/// `f64` datatype +/// ([new type idiom](https://doc.rust-lang.org/rust-by-example/generics/new_types.html)). #[derive(Debug, Clone)] pub struct AvgLLH(f64); +/// The main trait all models (such as the basic Mixture Models or Hidden Markov +/// Models) need to implement. It represents only the logic and parameters of +/// a machine learning model (which it forms together with an implementation of [Learning]). +/// A parametrizable model implements the bare +/// mathematics and must be associated with a struct implements the [Learning] +/// trait and orchestrates the EM algorithm. pub trait Parametrizable { + + /// Sufficient statistics contain all relevant information of a dataset + /// (or a part thereof) to compute all model parameters. Models are required + /// to be able to join a pair of sufficient statistics into a single one + /// for distributed and incremental learning. type SufficientStatistics: Send + Sync; + + /// The likelihoods of the hidden states for all data points. type Likelihood; + + /// The type of input data. A + /// [GAT]((https://blog.rust-lang.org/2022/10/28/gats-stabilization.html)) + /// to allow for references and array views. type DataIn<'a>: Sync; + + /// The data type of predictions. Typically (but not necessarily), + /// these are unsigned integer arrays + /// for classification tasks and floats for regressions. type DataOut; - // weights: Self::DataIn<'_>, + // TODO check whether this is clear later /// Note that for `Mixables`, this is the log-likelihood - /// The E-Step. Computes the likelihood for each component in the mixture - /// Note that for `Mixables`, this is the log-likelihood - fn expect(&self, data: &Self::DataIn<'_>) -> Result<(Self::Likelihood, AvgLLH), Error>; - // Consider combining `compute` and `maximize` – no that is a bad idea - // &mut self, - // store: Option, // consider removing. The parent class should take care of that + /// The Expectation or E-Step of the EM algorithm. The model computes + /// the likelihood of the hidden states for each data point. The result + /// is often called *Responsibility Matrix* + fn expect(&self, data: &Self::DataIn<'_>) -> Result<(Self::Likelihood, AvgLLH), Error>; - /// Computes the sufficient statistics from the responsibility matrix. The - /// Optionally, stores the - /// sufficient statistics (for incremental learning and store.restore functionality) - /// can be disabled for performance (defaults to `True`) + /// Computes the sufficient statistics from the responsibility matrix (the result of the + /// [E-step](Parametrizable::expect)). fn compute( &self, data: &Self::DataIn<'_>, responsibilities: &Self::Likelihood, ) -> Result; - /// Maximize the model parameters from + /// Maximize the model parameters from a sufficient statistics (the return + /// of the [compute](Parametrizable::compute) method. fn maximize(&mut self, sufficient_statistics: &Self::SufficientStatistics) -> Result<(), Error>; + /// The predict method produces a response to a data set once the model + /// has been trained. Typically, the response depends on the application + /// as either a regression or a classification task (see [DataOut](Parametrizable::DataOut)) fn predict( &self, // responsibilities: &Self::DataIn<'_>, data: &Self::DataIn<'_>, ) -> Result; + // FIMXE: Check whether we need update at all (very unlikely as the Parametrizables and Mixables don't expose their sufficient statistics). /// Update the stored sufficient statistics (for incremental learning) /// Weights is a tuple (a float should suffice, if summing to one) - fn update( - &mut self, - sufficient_statistics: &Self::SufficientStatistics, - weight: f64, - ) -> Result<(), Error>; + // fn update( + // &mut self, + // sufficient_statistics: &Self::SufficientStatistics, + // weight: f64, + // ) -> Result<(), Error>; - /// merge multiple sufficient statistics into one. + /// Merge multiple [sufficient statistics](Parametrizable::SufficientStatistics) into one. fn merge( sufficient_statistics: &[&Self::SufficientStatistics], weights: &[f64], ) -> Result; - /// Generate a random expectation. Used as an initalization. It is recommended - /// to draw the expectations from a univorm Dirichlet distribution. - /// Note: This works better than an initialization method, because the layers - /// such as the `Probabilistic` trait don't need to implement backend-specific - /// random samplers. + // TODO: find citation! + /// Generate a random expectation / responsibility matrix (see [E-step](Parametrizable::expect)). + /// Used as an initalization. It is recommended + /// to draw the expectations from a uniform Dirichlet distribution. + /// + /// This kind of initialization is very effective (citation needed) despite its + /// simplicity and easy to implement (typically just calling a Dirichlet RNG from + /// the computation backend). fn expect_rand(&self, _data: &Self::DataIn<'_>, _k: usize) -> Result { todo!() } } -/// Probabilistic mixables should implement this trait - -/// A mixture model has a discrete and unobservable variable (i.e., latent) variable -/// associated with each data point. It can be interpreted as a pointer to the component -/// of a mixture generated the sample. This component computes weights the components -/// in the mixture, that is, the probability for each component that the next sample will -/// be drawn from it. In case of non-probabilistic models (k-mm and SOM) this is irrelevant. +/// Simple trait for an implementation that orchestrates +/// learning a [parametrizable model](Parametrizable) pub trait Learning { type DataIn<'a>; type DataOut; + /// Starts a training fn fit(&mut self, data: &Self::DataIn<'_>) -> Result<(), Error>; + /// Generate a response to a data set after training fn predict(&self, data: &Self::DataIn<'_>) -> Result; } diff --git a/potpourri-rs/src/mixture.rs b/potpourri-rs/src/mixture.rs index b98878f..3d62544 100644 --- a/potpourri-rs/src/mixture.rs +++ b/potpourri-rs/src/mixture.rs @@ -1,27 +1,60 @@ -// Todo: move outside of the backend! +//! Traits and implementation required for +//! [Mixture Models](https://en.wikipedia.org/wiki/Mixture_model) such +//! as the famous [Gaussian Mixture Model +//! ](https://en.wikipedia.org/wiki/Mixture_model#Gaussian_mixture_model) use crate::{AvgLLH, Error, Parametrizable}; -/// An additional interface for `Mixables` that can be used as latent states. -/// These can be categorical distributions, with or without finite Dirichlet -/// or infinite Dirichlet process priors. The `Mixables` are here used not -/// multiple components but only as one distribution of the latent states. - +/// Represents the hidden (aka. latent) states. +/// Implementations might be categorical distributions, with or without finite Dirichlet +/// or infinite Dirichlet process priors. pub trait Latent where T: Parametrizable, { + /// Combines the likelihood of the data for each components with + /// the likelihood of the components themselves. + /// + /// Warning: Returns *loglikelihoods* and expects *loglikelihoods* + /// from the components, see [Mixable]. + /// + /// \[ + /// \begin{aligned} + /// \forall m \in M, \bm x \in X&: \\\\ + /// \bar \gamma_{\bm x,m} &= \pi_m \cdot p(\bm x | m) \\\\ + /// \gamma_{\bm x,m} &= \frac{\bar \gamma_{\bm x, m}}{ \sum_{m \in M} \bar \gamma_{\bm x, m} } \\\\ + /// \Gamma &= ( \gamma_{\bm x,m} )_{\bm x,m} \in \mathbb R^{n \times k}, \quad n=|X| \wedge m=|M| + /// \end{aligned} + /// \] + /// + /// + /// + /// where $M$ are the components and $X$ is the data set fn expect( &self, data: &T::DataIn<'_>, - likelihood: &T::Likelihood, + log_likelihood: &T::Likelihood, ) -> Result<(T::Likelihood, AvgLLH), Error>; } + +// TODO: At the end, find out of + +/// This trait "extends" the [Parametrizable] trait. +/// Typically, implementations would not implement [Parametrizable::predict] +/// (i.e., adding a `todo` or `panic` macro). And implement this function instead. +/// This is because the likelihood is required as an additional function argument +/// (especially for regression) as mixtures are orchestrated by an implementation +/// of [Mixture]. +/// +/// **Warning:** `Mixables` have to compute the log-likelihood in the expectation step! pub trait Mixable where T: Parametrizable, { + /// Predict in dependence of the likelihood (*warning* not log likelihood) + /// of the latent model. Needs to be implemented for each mixable + /// to allow for different tasks such as classifications and regression. fn predict( &self, latent_likelihood: T::Likelihood, @@ -29,17 +62,21 @@ where ) -> Result; } -/// This trait represents the traditional mixture models with an underlying +/// This trait represents the traditional [mixture models +/// ](https://en.wikipedia.org/wiki/Mixture_model) with an underlying /// probability density (as opposed to k-means or SOM). They have a soft /// assignment, that is, for each sample and each component the likelihood /// is computed that the sample belongs to the component. The alternative /// is that a sample can only belong to one of the compent alone. /// +/// This trait is another level of abstraction which contains all additional logic +/// related for mixture models which is still agnostic of the computation framework. +/// The actual densities are implementations of [Parametrizable] and [Mixable]. +/// /// Warning: we don't enforce trait bounds here due to a possible /// [compiler bug](https://github.com/rust-lang/rust/issues/110136) /// -/// Warning: `Mixables` have to compute the log-likelihood in the expectation step! -/// +/// **Warning:** `Mixables` have to compute the log-likelihood in the expectation step! #[derive(Clone, Debug)] pub struct Mixture where @@ -63,6 +100,7 @@ where // for<'a> ::DataIn<'a>: Into>, L: Parametrizable + Latent, { + /// Convenience function pub fn new(mixables: T, latent: L) -> Self { Mixture { latent: latent, @@ -121,15 +159,16 @@ where Mixable::predict(&self.mixables, likelihood, data) } - fn update( - &mut self, - sufficient_statistics: &Self::SufficientStatistics, - weight: f64, - ) -> Result<(), Error> { - self.latent.update(&sufficient_statistics.0, weight)?; - self.mixables.update(&sufficient_statistics.1, weight)?; - Ok(()) - } + // FIXME: Remove + // fn update( + // &mut self, + // sufficient_statistics: &Self::SufficientStatistics, + // weight: f64, + // ) -> Result<(), Error> { + // self.latent.update(&sufficient_statistics.0, weight)?; + // self.mixables.update(&sufficient_statistics.1, weight)?; + // Ok(()) + // } fn merge( sufficient_statistics: &[&Self::SufficientStatistics], diff --git a/potpourri-rs/src/model.rs b/potpourri-rs/src/model.rs index 1ef463f..8c7016f 100644 --- a/potpourri-rs/src/model.rs +++ b/potpourri-rs/src/model.rs @@ -1,26 +1,48 @@ +//! This module defines the traits and implementation required +//! to orchestrate the learning with the Expectation maximization + use crate::{AvgLLH, Error, Learning, Parametrizable}; use rayon::prelude::*; use tracing::info; -/// The basis struct to use for models + +/// The basis structure for model. It hold +/// the configuration of the learning process +/// and a reference to the [Parametrizable]. +/// Its implementation *orchestrates* the learning, +/// that is, implements the logic that is common +/// for all methods learned with Expectation Maximization. #[derive(Debug)] pub struct Model where T: Parametrizable, { + /// Reference to a [Parametrizable] instance pub parametrizable: T, - pub n_components: usize, + /// Number of hidden states (or components in case of a mixture). + /// This value is necessary for the random initialization. + pub n_hidden: usize, + /// Limits the number of iterations pub max_iterations: usize, + /// Expectation Maximization starts with a random initialization and + /// can run into local minima. To avoid this problem, multiple models + /// (with different intializations) can be trained. The best model + /// (w.r.t. the likelihood) is then chosen. pub n_init: usize, + /// TBD not supported yet. pub incremental: bool, + /// TBD not supported yet. pub incremental_weight: f64, + /// Tolerance for when the loglikelihood between iterations are considered + /// equal and the algorithm terminates. pub tol: f64, - // last_sufficient_statistics: Option, - // pub initialization: Option, + /// Information about the outcome of the last training. pub info: ModelInfo, } + +/// Information about the outcome of the last training #[derive(Debug)] pub struct ModelInfo { pub fitted: bool, @@ -34,35 +56,39 @@ impl Model where T: Parametrizable + Sync, { + /// Convenience function to generate new model. pub fn new( parametrizable: T, - n_components: usize, + n_hidden: usize, max_iterations: usize, n_init: usize, incremental: bool, ) -> Model { Model { parametrizable, - n_components, + n_hidden, max_iterations, n_init, incremental, incremental_weight: 0.8, tol: 1e-6, - // last_sufficient_statistics: None, - // initialization: None, info: ModelInfo { fitted: false, converged: false, n_iterations: 0, likelihood: AvgLLH(f64::NAN), - // initialized: false, }, } } } -/// Intermediate result from a single EM training (better than just using tuples) + + +/// Abstract data type to store intermediate results from a single EM training (better than just using tuples). +/// Depending on [Model::n_init] multiple trainings can occur and the best of them has to be chosen. +/// This struct stores the likelihood required for determining the best solution and the +/// [sufficient statistics](Parametrizable::SufficientStatistics) required to reconstruct +/// the model parameters. #[derive(Debug)] struct Intermediate { sufficient_statistics: T::SufficientStatistics, @@ -91,7 +117,7 @@ impl Model where T: Parametrizable + Sync, { - /// Single EM iteration. Consumes a copy of a parametrizable + /// Implements a Single EM training. Warning: Consumes a copy of a parametrizable fn single_fit( &self, mut parametrizable: T, @@ -102,7 +128,7 @@ where // use random sufficient statistics for variable initialization let mut sufficient_statistics = self.parametrizable.compute( &data, - ¶metrizable.expect_rand(&data, self.n_components)?, + ¶metrizable.expect_rand(&data, self.n_hidden)?, )?; // .. and optional model initialization diff --git a/static/header.html b/static/header.html index 3258549..d279970 100644 --- a/static/header.html +++ b/static/header.html @@ -14,6 +14,6 @@ + onload="renderMathInElement(document.body, {delimiters: [{left: '$', right: '$', display: false}, {left: '\[', right: '\]', display: true}]} );"> - \ No newline at end of file + \ No newline at end of file