Skip to content

Commit

Permalink
Merge branch 'learning-engine-bit-acc' of https://github.com/lava-nc/…
Browse files Browse the repository at this point in the history
…lava into learning-engine-bit-acc
  • Loading branch information
gkarray committed Nov 28, 2023
2 parents 3f0ac15 + 3a0960e commit 2ad3ed1
Show file tree
Hide file tree
Showing 10 changed files with 654 additions and 54 deletions.
31 changes: 8 additions & 23 deletions .github/workflows/cd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,12 @@ jobs:
name: Upload release artifact
runs-on: ubuntu-latest
if: github.triggering_actor == 'mgkwill' || github.triggering_actor == 'PhilippPlank' || github.triggering_actor == 'tim-shea'
environment:
name: pypi
url: https://pypi.org/p/lava-nc/
permissions:
contents: write
id-token: write
contents: write
needs: [build-artifacts, test-artifact-install, test-artifact-use]

steps:
Expand Down Expand Up @@ -182,30 +185,12 @@ jobs:
generateReleaseNotes: true
makeLatest: true

- name: Mint Github API token
id: mint-token
run: |
# retrieve the ambient OIDC token
resp=$(curl -H "Authorization: bearer $ACTIONS_ID_TOKEN_REQUEST_TOKEN" \
"$ACTIONS_ID_TOKEN_REQUEST_URL&audience=pypi")
oidc_token=$(jq '.value' <<< "${resp}")
# exchange the OIDC token for an API token
resp=$(curl -X POST https://pypi.org/_/oidc/github/mint-token -d "{\"token\": \"${oidc_token}\"}")
api_token=$(jq '.token' <<< "${resp}")
# mask the newly minted API token, so that we don't accidentally leak it
echo "::add-mask::${api_token}"
# see the next step in the workflow for an example of using this step output
echo "api-token=${api_token}" >> "${GITHUB_OUTPUT}"
- name: Publish to PyPI
if: steps.check-version.outputs.prerelease != 'true'
env:
POETRY_HTTP_BASIC_PYPI_USERNAME: __token__
POETRY_HTTP_BASIC_PYPI_PASSWORD: ${{ steps.mint-token.outputs.api-token }}
run: |
mkdir dist
cp lava* dist/.
poetry publish
- name: Publish package distributions to PyPI
if: steps.check-version.outputs.prerelease != 'true'
uses: pypa/gh-action-pypi-publish@release/v1
3 changes: 0 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -1,3 +0,0 @@
[submodule "docs"]
path = docs
url = https://github.com/lava-nc/lava-docs.git
29 changes: 23 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ cd $HOME
curl -sSL https://install.python-poetry.org | python3 -
git clone [email protected]:lava-nc/lava.git
cd lava
git checkout v0.8.0
git checkout v0.9.0
./utils/githook/install-hook.sh
poetry config virtualenvs.in-project true
poetry install
Expand All @@ -90,7 +90,7 @@ pytest
cd $HOME
git clone [email protected]:lava-nc/lava.git
cd lava
git checkout v0.8.0
git checkout v0.9.0
python3 -m venv .venv
.venv\Scripts\activate
pip install -U pip
Expand Down Expand Up @@ -154,12 +154,28 @@ conda install -n lava -c intel numpy scipy
conda install -n lava -c conda-forge lava --freeze-installed
```

## Alternative: Installing Lava from pypi

If you would like to install Lava as a user you can install via pypi binaries.
Installing in this way does not give you access to run tests.

Open a Python terminal and run:

### Windows/MacOS/Linux

```bash
python -m venv .venv
source .venv/bin/activate ## Or Windows: .venv\Scripts\activate
pip install -U pip
pip install lava-nc
```

## Alternative: Installing Lava from binaries

If you only need to install Lava as a user in your python environment, we will
publish Lava releases via
You can also install Lava as a user with published Lava releases via
[GitHub Releases](https://github.com/lava-nc/lava/releases). Please download
the package and install it.
the package and install it with the following commands. Installing in this way does not
give you access to run tests.

Open a Python terminal and run:

Expand All @@ -169,7 +185,8 @@ Open a Python terminal and run:
python -m venv .venv
source .venv/bin/activate ## Or Windows: .venv\Scripts\activate
pip install -U pip
pip install lava-nc-0.6.0.tar.gz
# Substitute lava version needed for lava-nc-<version here>.tar.gz below
pip install lava-nc-0.9.0.tar.gz
```

## Linting, testing, documentation and packaging
Expand Down
1 change: 0 additions & 1 deletion docs
Submodule docs deleted from 9492b5
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ packages = [
{include = "tests"}
]
include = ["tutorials"]
version = "0.8.0.dev0"
version = "0.9.0.dev0"
readme = "README.md"
description = "A Software Framework for Neuromorphic Computing"
homepage = "https://lava-nc.org/"
Expand Down
2 changes: 1 addition & 1 deletion src/lava/magma/compiler/mapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def map_cores(self, executable: Executable,
chips = [addr.physical_chip_id for addr in src_addr]
address.update(chips)
break
if len(address) > 1:
if len(address) > 1 and hasattr(var_model, "address"):
raise ValueError("Lava Compiler doesn't support port"
"splitting currently. MultiChip "
"Not Supported ")
Expand Down
15 changes: 9 additions & 6 deletions src/lava/proc/conv/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,15 @@
from scipy import signal
from enum import IntEnum, unique

try:
import torch
import torch.nn.functional as F
TORCH_IS_AVAILABLE = True
except ModuleNotFoundError:
TORCH_IS_AVAILABLE = False
# NOTE: It is known that torch calls inside Lava PyProcess hangs.
# Disabling torch usage inside a Lava CPU process until a fix is found.
# try:
# import torch
# import torch.nn.functional as F
# TORCH_IS_AVAILABLE = True
# except ModuleNotFoundError:
# TORCH_IS_AVAILABLE = False
TORCH_IS_AVAILABLE = False


@unique
Expand Down
69 changes: 59 additions & 10 deletions src/lava/proc/io/encoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,14 @@ class DeltaEncoder(AbstractProcess):
Shape of the sigma process.
vth: int or float
Threshold of the delta encoder.
spike_exp: int
spike_exp: Optional[int]
Scaling exponent with base 2 for the spike message.
Note: This should only be used for fixed point models.
Default is 0.
num_bits: Optional[int]
Precision for spike output. It is applied before spike_exp. If None,
precision is not enforced, i.e. the spike output is unbounded.
Default is None.
compression : Compression
Data compression mode, by default DENSE compression.
"""
Expand All @@ -71,6 +75,7 @@ def __init__(self,
shape: Tuple[int, ...],
vth: Union[int, float],
spike_exp: Optional[int] = 0,
num_bits: Optional[int] = None,
compression: Compression = Compression.DENSE) -> None:
super().__init__(shape=shape, vth=vth, cum_error=False,
spike_exp=spike_exp, state_exp=0)
Expand All @@ -84,6 +89,13 @@ def __init__(self,
self.act = Var(shape=shape, init=0)
self.residue = Var(shape=shape, init=0)
self.spike_exp = Var(shape=(1,), init=spike_exp)
if num_bits is not None:
a_min = -(1 << (num_bits - 1)) << spike_exp
a_max = ((1 << (num_bits - 1)) - 1) << spike_exp
else:
a_min = a_max = -1
self.a_min = Var(shape=(1,), init=a_min)
self.a_max = Var(shape=(1,), init=a_max)
self.proc_params['compression'] = compression

@property
Expand All @@ -101,10 +113,14 @@ class AbstractPyDeltaEncoderModel(PyLoihiProcessModel):
act: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
residue: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
spike_exp: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=3)
a_min: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)
a_max: np.ndarray = LavaPyType(np.ndarray, np.int32, precision=24)

def encode_delta(self, act_new):
delta = act_new - self.act + self.residue
s_out = np.where(np.abs(delta) >= self.vth, delta, 0)
if self.a_max > 0:
s_out = np.clip(s_out, a_min=self.a_min, a_max=self.a_max)
self.residue = delta - s_out
self.act = act_new
return s_out
Expand Down Expand Up @@ -188,31 +204,39 @@ def encode_delta_sparse_8(self, s_out):
if len(idx) == 0:
idx = np.array([0])
data = np.array([0])
max_idx = 0xFF
if idx[0] > max_idx:
idx = np.concatenate([np.zeros(1, dtype=idx.dtype),
idx.flatten()])
data = np.concatenate([np.zeros(1, dtype=idx.dtype),
data.flatten()])

# 8 bit index encoding
idx[1:] = idx[1:] - idx[:-1] - 1 # default increment of 1
delta_idx = []
delta_data = []
max_idx = 0xFF
start = 0
for i in np.argwhere(idx >= max_idx)[:, 0]:
delta_idx.append((idx[start:i].flatten()) % max_idx)
delta_data.append(data[start:i].flatten())
delta_idx.append(np.array([max_idx - 1] * (idx[i] // max_idx)))
delta_data.append(np.array([0] * (idx[i] // max_idx)))
start = i
repeat_data = idx[i] // max_idx
num_repeats = repeat_data // max_idx
delta_idx.append(np.array([max_idx] * (num_repeats + 1)).flatten())
delta_data.append(np.array([max_idx] * num_repeats
+ [repeat_data % max_idx]).flatten())
delta_idx.append((idx[i:i + 1].flatten()) % max_idx)
delta_data.append(data[i:i + 1].flatten())
start = i + 1
delta_idx.append(idx[start:].flatten())
delta_data.append(data[start:].flatten())

if len(delta_idx) > 0:
delta_idx = np.concatenate(delta_idx)
delta_data = np.concatenate(delta_data)
else:
delta_idx = idx.flatten()
delta_data = data.flatten()

# Decoding
# idx = delta_idx
# idx[1:] += 1
# idx = np.cumsum(idx)
# data = delta_data
padded_idx = np.zeros(int(np.ceil(len(delta_idx) / 4) * 4))
padded_data = np.zeros(int(np.ceil(len(delta_data) / 4) * 4))

Expand All @@ -232,6 +256,31 @@ def encode_delta_sparse_8(self, s_out):
+ padded_data[0::4])
return packed_data, packed_idx

def decode_encode_delta_sparse_8(self, packed_data, packed_idx):
"""Python decoding script for delta_sparse_8 encoding. It is useful for
debug and verify the encoding."""
data_list = []
idx_list = []
count = 0
data = 0
idx = 0
for p_data, p_idx in zip(packed_data, packed_idx):
for _ in range(4):
data = p_data & 0xFF
idx_1 = p_idx & 0xFF
if idx_1 == 0xFF:
idx += data * 0xFF
data = 0
else:
idx += idx_1 + int(count > 0)
if data != 0:
data_list.append(data)
idx_list.append(idx)
p_data >>= 8
p_idx >>= 8
count += 1
return np.array(data_list), np.array(idx_list)

def run_spk(self):
self.s_out.send(self.data, self.idx)
# Receive synaptic input
Expand Down
Loading

0 comments on commit 2ad3ed1

Please sign in to comment.