Skip to content
This repository has been archived by the owner on Feb 11, 2023. It is now read-only.

Commit

Permalink
update package details
Browse files Browse the repository at this point in the history
* update setup
* update CI, tox
* reformat version
  • Loading branch information
Borda committed May 1, 2019
1 parent 02e36f8 commit f2f7e45
Show file tree
Hide file tree
Showing 14 changed files with 104 additions and 54 deletions.
4 changes: 1 addition & 3 deletions .shippable.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# vim ft=yaml

# After changing this file, check it on:
# http://yaml-online-parser.appspot.com/

Expand Down Expand Up @@ -32,7 +30,7 @@ before_install:

install:
- pip install -r requirements.txt
- pip install nose coverage codecov pytest codacy-coverage
- pip install -r requirements-dev.txt
- pip --version ; pip freeze

script:
Expand Down
10 changes: 8 additions & 2 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,20 @@ matrix:
- python: 3.7
env: TOXENV=py37

# See http://docs.travis-ci.com/user/caching/#pip-cache
cache: pip

install:
- sudo apt-get install python-opencv openslide-tools
- pip install -r requirements.txt
- pip install -r requirements-dev.txt
- pip install tox
- pip --version ; pip freeze
- pip --version ; pip list

script:
- tox
# integration
- tox --sitepackages
# sample run
- mkdir results && touch configs/sample_config.yaml
- python bm_experiments/bm_comp_perform.py -o ./results -n 1
- python birl/bm_template.py -c ./data_images/pairs-imgs-lnds_mix.csv -o ./results --visual --unique --path_sample_config configs/sample_config.yaml
Expand Down
16 changes: 6 additions & 10 deletions MANIFEST.in
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,22 @@ exclude bm_ANHIR
recursive-exclude notebooks *.ipynb
exclude notebooks

# Exclude the data files
recursive-exclude data_images *
exclude data_images
# Include the data files
recursive-include data_images *

# Exclude the method config files
recursive-exclude configs *
exclude configs
# Include the method config files
recursive-include configs *

# Exclude the documentation files
recursive-exclude docs *
exclude docs

# Exclude the Rscript files
recursive-include scripts *.r

# Exclude the ImageJ macro files
recursive-include scripts *.ijm *.bsh *.py
recursive-include scripts *.r *.ijm *.bsh *.py

# Include the Requirements
include requirements.txt
exclude requirements-*.txt

# Exclude build configs
exclude *.yml
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ The project is using the standard [BSD license](http://opensource.org/licenses/B
## References
For complete references see [bibtex](docs/references.bib).
1. Borovec, J., Munoz-Barrutia, A., & Kybic, J. (2018). **[Benchmarking of image registration methods for differently stained histological slides](https://www.researchgate.net/publication/325019076_Benchmarking_of_image_registration_methods_for_differently_stained_histological_slides)**. In IEEE International Conference on Image Processing (ICIP) (pp. 3368–3372). Athens. [DOI: 10.1109/ICIP.2018.8451040](https://doi.org/10.1109/ICIP.2018.8451040)
1. Borovec, J., Munoz-Barrutia, A., & Kybic, J. (2018). **[Benchmarking of image registration methods for differently stained histological slides](https://www.researchgate.net/publication/325019076_Benchmarking_of_image_registration_methods_for_differently_stained_histological_slides)**. In IEEE International Conference on Image Processing (ICIP) (pp. 3368–3372), Athens. [DOI: 10.1109/ICIP.2018.8451040](https://doi.org/10.1109/ICIP.2018.8451040)
## Appendix - Useful information
Expand Down
24 changes: 17 additions & 7 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ environment:
CMD_IN_ENV: "cmd /E:ON /V:ON /C obvci_appveyor_python_build_env.cmd"

matrix:

# Pre-installed Python versions, which Appveyor may upgrade to
# a later point release.
# See: http://www.appveyor.com/docs/installed-software#python
Expand Down Expand Up @@ -44,25 +43,36 @@ environment:

build: off

# https://www.appveyor.com/docs/build-cache/
cache:
- C:\ProgramData\chocolatey\bin -> appveyor.yml
- C:\ProgramData\chocolatey\lib -> appveyor.yml
- '%LOCALAPPDATA%\pip\Cache -> appveyor.yml'

# scripts that run after cloning repository
install:
# If there is a newer build queued for the same PR, cancel this one.
# The AppVeyor 'rollout builds' option is supposed to serve the same
# purpose but it is problematic because it tends to cancel builds pushed
# directly to master instead of just PR builds (or the converse).
# credits: JuliaLang developers.
- choco upgrade chocolatey
- choco install -y opencv
- SET PATH=%PYTHON%;%PYTHON%\\Scripts;%path%
- python --version
- pip install -U --user pip
- pip --version
- pip install -r requirements.txt
- pip install tox coverage codecov
- pip freeze
- pip install -r requirements-dev.txt
- pip install tox

# scripts to run before tests (working directory and environment changes are persisted from the previous steps such as "before_build")
before_test:
- python --version
- pip --version
- pip list
- dir

# to run your custom scripts instead of automatic tests
test_script:
- tox
- tox --sitepackages --parallel auto
- mkdir results && touch configs/sample_config.yaml
- python bm_experiments/bm_comp_perform.py -o ./results -n 1
- python birl/bm_template.py -c ./data_images/pairs-imgs-lnds_mix.csv -o ./results --unique --visual --path_sample_config configs/sample_config.yaml
Expand Down
29 changes: 29 additions & 0 deletions birl/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,32 @@
import birl.utilities

birl.utilities

__version__ = '0.2.2'
__author__ = 'Jiri Borovec'
__author_email__ = '[email protected]'
__license__ = 'BSD 3-clause'
__home__ = 'https://borda.github.io/BIRL',
__copyright__ = 'Copyright (c) 2014-2019, Jiri Borovec.'
__doc__ = """
# BIRL: Benchmark on Image Registration methods with Landmark validation
The project aims at automatic evaluation of state-of-the-art image registration
methods based on landmark annotation for given image dataset. In particular,
this project is the main evaluation framework for ANHIR challenge.
## Main Features
* **automatic** execution of image registration on a sequence of image pairs
* integrated **evaluation** of registration performances
* integrated **visualization** of performed registration
* running several image registration experiment in **parallel**
* **resuming** unfinished sequence of registration benchmark
* handling around dataset and **creating own experiments**
* rerun evaluation and visualisation for finished experiments
## References
Borovec, J., Munoz-Barrutia, A., & Kybic, J. (2018). Benchmarking of image
registration methods for differently stained histological slides.
In IEEE International Conference on Image Processing (ICIP) (pp. 3368-3372),
Athens. DOI: 10.1109/ICIP.2018.8451040
"""
3 changes: 2 additions & 1 deletion birl/utilities/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,8 @@ def scale_large_images_landmarks(images, landmarks):
:param [ndarray] landmarks: list of landmarks
:return ([ndarray], [ndarray]): lists of images and landmarks
>>> scale_large_images_landmarks([np.zeros((8000, 500, 3), dtype=np.uint8)], [None, None]) # doctest: +ELLIPSIS
>>> scale_large_images_landmarks([np.zeros((8000, 500, 3), dtype=np.uint8)],
... [None, None]) # doctest: +ELLIPSIS
([array(...)], [None, None])
"""
if not images:
Expand Down
12 changes: 8 additions & 4 deletions birl/utilities/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ def compute_tre(points_1, points_2):
:return ndarray:
>>> np.random.seed(0)
>>> compute_tre(np.random.random((6, 2)), np.random.random((9, 2))) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
>>> compute_tre(np.random.random((6, 2)),
... np.random.random((9, 2))) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
array([ 0.21..., 0.70..., 0.44..., 0.34..., 0.41..., 0.41...])
"""
nb_common = min([len(pts) for pts in [points_1, points_2]
Expand Down Expand Up @@ -177,8 +178,10 @@ def compute_ranking(user_cases, field, reverse=False):
... }
>>> user_cases = compute_ranking(user_cases, 'rTRE')
>>> import pandas as pd
>>> pd.DataFrame({usr: {cs: user_cases[usr][cs]['rTRE_rank'] for cs in user_cases[usr]}
... for usr in user_cases})[sorted(user_cases.keys())] # doctest: +NORMALIZE_WHITESPACE
>>> df = pd.DataFrame({usr: {cs: user_cases[usr][cs]['rTRE_rank']
... for cs in user_cases[usr]}
... for usr in user_cases})[sorted(user_cases.keys())]
>>> df # doctest: +NORMALIZE_WHITESPACE
franta karel pepa
1 3 1 2
2 1 2 3
Expand Down Expand Up @@ -278,7 +281,8 @@ def aggregate_user_score_timeline(df, col_aggreg, col_user, col_score,
>>> df['day'] = np.random.randint(0, 5, 50)
>>> df['user'] = np.array(list('abc'))[np.random.randint(0, 3, 50)]
>>> df['score'] = np.random.random(50)
>>> aggregate_user_score_timeline(df, 'day', 'user', 'score').round(3) # doctest: +NORMALIZE_WHITESPACE
>>> df_agg = aggregate_user_score_timeline(df, 'day', 'user', 'score')
>>> df_agg.round(3) # doctest: +NORMALIZE_WHITESPACE
b c a
4 0.447 0.132 0.567
0 0.223 0.005 0.094
Expand Down
6 changes: 4 additions & 2 deletions birl/utilities/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,8 @@ def wrap_execute_sequence(wrap_func, iterate_vals, nb_workers=NB_THREADS,
if it is set None, bar is suppressed
:param bool ordered: whether enforce ordering in the parallelism
>>> list(wrap_execute_sequence(np.sqrt, range(5), nb_workers=1, ordered=True)) # doctest: +ELLIPSIS
>>> list(wrap_execute_sequence(np.sqrt, range(5), nb_workers=1,
... ordered=True)) # doctest: +ELLIPSIS
[0.0, 1.0, 1.41..., 1.73..., 2.0]
>>> list(wrap_execute_sequence(sum, [[0, 1]] * 5, nb_workers=2, desc=None))
[1, 1, 1, 1, 1]
Expand Down Expand Up @@ -373,7 +374,8 @@ def dict_deep_update(dict_base, dict_update):
>>> d = {'level1': {'level2': {'levelA': 0, 'levelB': 1}}}
>>> u = {'level1': {'level2': {'levelB': 10}}}
>>> import json
>>> print(json.dumps(dict_deep_update(d, u), sort_keys=True, indent=2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
>>> d = json.dumps(dict_deep_update(d, u), sort_keys=True, indent=2)
>>> print(d) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
{
"level1": {
"level2": {
Expand Down
3 changes: 2 additions & 1 deletion birl/utilities/registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ def get_affine_components(matrix):
>>> mtx = np.array([[ -0.95, 0.1, 65.], [ 0.1, 0.95, -60.], [ 0., 0., 1.]])
>>> import pandas as pd
>>> pd.Series(get_affine_components(mtx)).sort_index() # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
>>> aff = pd.Series(get_affine_components(mtx)).sort_index()
>>> aff # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
rotation 173.9...
scale (0.95..., 0.95...)
shear -3.14...
Expand Down
4 changes: 2 additions & 2 deletions circle.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ references:
name: Install PyPI dependences
command: |
sudo pip install -U backports.functools_lru_cache # required for matplotlib @py2
sudo pip install coverage pytest pytest-cov codecov flake8
pip install -r requirements.txt --user
sudo pip install -r requirements-dev.txt
python --version ; pwd ; ls -l
pip --version ; pip freeze
pip --version ; pip list
test_coverage: &test_coverage
run:
Expand Down
8 changes: 8 additions & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
nose>=1.3.7
coverage
codecov
pytest>=3.0.5
pytest-cov
flake8
check-manifest
codacy-coverage
28 changes: 14 additions & 14 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,33 +16,33 @@
# Python 3 only projects can skip this import
from io import open

here = path.abspath(path.dirname(__file__))
import birl

with open(path.join(here, 'requirements.txt'), encoding='utf-8') as fp:
requirements = [r.rstrip() for r in fp.readlines() if not r.startswith('#')]
PATH_HERE = path.abspath(path.dirname(__file__))

with open(path.join(PATH_HERE, 'requirements.txt'), encoding='utf-8') as fp:
requirements = [rq.rstrip() for rq in fp.readlines() if not rq.startswith('#')]

# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as fp:
long_description = fp.read()
# with open(path.join(PATH_HERE, 'README.md'), encoding='utf-8') as fp:
# long_description = fp.read()

# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.

setup(
name='BIRL',
version='0.2.2',
url='https://borda.github.io/BIRL',
version=birl.__version__,
url=birl.__home__,

author='Jiri Borovec',
author_email='[email protected]',
license='BSD 3-clause',
author=birl.__author__,
author_email=birl.__author_email__,
license=birl.__license__,
description='Benchmark on Image Registration methods with Landmark validation',

long_description=long_description,
long_description=birl.__doc__,
long_description_content_type='text/markdown',

packages=find_packages(
exclude=['docs', 'notebooks', 'scripts*', 'bm_*']),
packages=find_packages(exclude=['docs', 'notebooks', 'scripts*', 'bm_*']),

keywords='benchmark image registration landmarks',
install_requires=requirements,
Expand Down
9 changes: 2 additions & 7 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,8 @@ basepython =
py36: python3.6
py37: python3.7
deps =
check-manifest
# If your project uses README.rst, uncomment the following:
# readme_renderer
flake8
pytest
coverage
codecov
-r requirements.txt
-r requirements-dev.txt
commands =
check-manifest --ignore tox.ini
python setup.py check -m -s
Expand Down

0 comments on commit f2f7e45

Please sign in to comment.