Skip to content

Commit

Permalink
Merge pull request #222 from biosustain/devel
Browse files Browse the repository at this point in the history
Towards 0.11.7
  • Loading branch information
phantomas1234 authored Dec 12, 2018
2 parents 20ffb09 + 2ec2a49 commit 159ba25
Show file tree
Hide file tree
Showing 15 changed files with 235 additions and 90 deletions.
106 changes: 106 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,112 @@ docs/_build
/.DS_Store
.cache
/coverage.xml
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/


cplex/
cplex*.tar.gz
4 changes: 3 additions & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ Cameo—Computer Aided Metabolic Engineering and Optimization
.. summary-start
|Join the chat at https://gitter.im/biosustain/cameo| |PyPI| |License|
|Build Status| |Coverage Status| |DOI|
|Build Status| |Coverage Status| |DOI| |zenhub|

What is cameo?
~~~~~~~~~~~~~~
Expand Down Expand Up @@ -139,3 +139,5 @@ Contributions
:target: https://coveralls.io/r/biosustain/cameo?branch=devel
.. |DOI| image:: https://zenodo.org/badge/5031/biosustain/cameo.svg
:target: https://zenodo.org/badge/latestdoi/5031/biosustain/cameo
.. |zenhub| image:: https://img.shields.io/badge/Shipping_faster_with-ZenHub-5e60ba.svg?style=flat-square
:target: https://zenhub.com
2 changes: 1 addition & 1 deletion cameo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
# optimize the model and print the objective value
solution = model.optimize()
print 'Objective value:', solution.f
print 'Objective value:', solution.objective_value
# Determine a set of gene deletions that will optimize the production
# of a desired compound
Expand Down
14 changes: 8 additions & 6 deletions cameo/core/pathway.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,14 @@ def to_file(self, file_path, sep="\t"):
with open(file_path, "w") as output_file:
for reaction in self.reactions:
equation = _build_equation(reaction.metabolites)
output_file.write(reaction.id + sep +
equation + sep +
reaction.lower_bound + sep +
reaction.upper_bound + sep +
reaction.name + sep +
reaction.notes.get("pathway_note", "") + "\n")
output_file.write(sep.join(map(str, [
reaction.id,
equation,
reaction.lower_bound,
reaction.upper_bound,
reaction.name,
reaction.notes.get("pathway_note", "")
])) + "\n")

def plug_model(self, model):
"""
Expand Down
14 changes: 10 additions & 4 deletions cameo/core/target.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,8 +342,11 @@ def __gt__(self, other):

def __eq__(self, other):
if isinstance(other, GeneModulationTarget):
return (self.id == other.id and self._value == other._value and
self._reference_value == other._reference_value)
return (
(self.id == other.id) and (
self._value == other._value) and (
self._reference_value == other._reference_value)
)
else:
return False

Expand Down Expand Up @@ -425,8 +428,11 @@ def __gt__(self, other):

def __eq__(self, other):
if isinstance(other, ReactionModulationTarget):
return (self.id == other.id and self._value == other._value and
self._reference_value == other._reference_value)
return (
(self.id == other.id) and (
self._value == other._value) and (
self._reference_value == other._reference_value)
)
else:
return False

Expand Down
32 changes: 19 additions & 13 deletions cameo/flux_analysis/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,13 @@ def find_blocked_reactions(model):
for exchange in model.exchanges:
exchange.bounds = (-9999, 9999)
fva_solution = flux_variability_analysis(model)
return frozenset(reaction for reaction in model.reactions
if round(fva_solution.lower_bound(reaction.id), config.ndecimals) == 0 and
round(fva_solution.upper_bound(reaction.id), config.ndecimals) == 0)
return frozenset(
reaction for reaction in model.reactions
if round(
fva_solution.lower_bound(reaction.id),
config.ndecimals) == 0 and round(
fva_solution.upper_bound(reaction.id), config.ndecimals) == 0
)


def flux_variability_analysis(model, reactions=None, fraction_of_optimum=0., pfba_factor=None,
Expand Down Expand Up @@ -303,14 +307,16 @@ def phenotypic_phase_plane(model, variables, objective=None, source=None, points

nice_variable_ids = [_nice_id(reaction) for reaction in variable_reactions]
variable_reactions_ids = [reaction.id for reaction in variable_reactions]
phase_plane = pandas.DataFrame(envelope,
columns=(variable_reactions_ids +
['objective_lower_bound',
'objective_upper_bound',
'c_yield_lower_bound',
'c_yield_upper_bound',
'mass_yield_lower_bound',
'mass_yield_upper_bound']))
phase_plane = pandas.DataFrame(
envelope, columns=(variable_reactions_ids + [
'objective_lower_bound',
'objective_upper_bound',
'c_yield_lower_bound',
'c_yield_upper_bound',
'mass_yield_lower_bound',
'mass_yield_upper_bound'
])
)

if objective is None:
objective = model.objective
Expand Down Expand Up @@ -517,7 +523,7 @@ def _cycle_free_fva(model, reactions=None, sloppy=True, sloppy_bound=666):
else:
logger.debug('Determine if {} with bound {} is a cycle'.format(reaction.id, bound))
solution = get_solution(model)
v0_fluxes = solution.x_dict
v0_fluxes = solution.fluxes
v1_cycle_free_fluxes = remove_infeasible_cycles(model, v0_fluxes)
if abs(v1_cycle_free_fluxes[reaction.id] - bound) < 10 ** -6:
fva_sol[reaction.id]['lower_bound'] = bound
Expand Down Expand Up @@ -556,7 +562,7 @@ def _cycle_free_fva(model, reactions=None, sloppy=True, sloppy_bound=666):
else:
logger.debug('Determine if {} with bound {} is a cycle'.format(reaction.id, bound))
solution = get_solution(model)
v0_fluxes = solution.x_dict
v0_fluxes = solution.fluxes
v1_cycle_free_fluxes = remove_infeasible_cycles(model, v0_fluxes)
if abs(v1_cycle_free_fluxes[reaction.id] - bound) < 1e-6:
fva_sol[reaction.id]['upper_bound'] = v0_fluxes[reaction.id]
Expand Down
15 changes: 9 additions & 6 deletions cameo/flux_analysis/simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,8 @@ def create_objective(model, variables):
solution = model.optimize(raise_error=True)

if reactions is not None:
result = FluxDistributionResult({r: solution.get_primal_by_id(r) for r in reactions}, solution.f)
result = FluxDistributionResult(
{r: solution.get_primal_by_id(r) for r in reactions}, solution.objective_value)
else:
result = FluxDistributionResult.from_solution(solution)
return result
Expand Down Expand Up @@ -275,7 +276,8 @@ def create_objective(model, variables):

solution = model.optimize(raise_error=True)
if reactions is not None:
result = FluxDistributionResult({r: solution.get_primal_by_id(r) for r in reactions}, solution.f)
result = FluxDistributionResult(
{r: solution.get_primal_by_id(r) for r in reactions}, solution.objective_value)
else:
result = FluxDistributionResult.from_solution(solution)
return result
Expand Down Expand Up @@ -369,7 +371,8 @@ def update_lower_constraint(model, constraint, reaction, variable, flux_value, e

solution = model.optimize(raise_error=True)
if reactions is not None:
result = FluxDistributionResult({r: solution.get_primal_by_id(r) for r in reactions}, solution.f)
result = FluxDistributionResult(
{r: solution.get_primal_by_id(r) for r in reactions}, solution.objective_value)
else:
result = FluxDistributionResult.from_solution(solution)
return result
Expand All @@ -392,7 +395,7 @@ class FluxDistributionResult(Result):

@classmethod
def from_solution(cls, solution, *args, **kwargs):
return cls(solution.fluxes, solution.f, *args, **kwargs)
return cls(solution.fluxes, solution.objective_value, *args, **kwargs)

def __init__(self, fluxes, objective_value, *args, **kwargs):
super(FluxDistributionResult, self).__init__(*args, **kwargs)
Expand Down Expand Up @@ -538,13 +541,13 @@ def display_on_map(self, map_name=None, palette="YlGnBu"):
# print("cobra fba")
# tic = time.time()
# cb_model.optimize(solver='cglpk')
# print("flux sum:", sum([abs(val) for val in list(cb_model.solution.x_dict.values())]))
# print("flux sum:", sum([abs(val) for val in list(cb_model.solution.fluxes.values())]))
# print("cobra fba runtime:", time.time() - tic)

# print("cobra pfba")
# tic = time.time()
# optimize_minimal_flux(cb_model, solver='cglpk')
# print("flux sum:", sum([abs(val) for val in list(cb_model.solution.x_dict.values())]))
# print("flux sum:", sum([abs(val) for val in list(cb_model.solution.fluxes.values())]))
# print("cobra pfba runtime:", time.time() - tic)

print("pfba")
Expand Down
2 changes: 1 addition & 1 deletion cameo/flux_analysis/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def remove_infeasible_cycles(model, fluxes, fix=()):
except OptimizationError as e:
logger.warning("Couldn't remove cycles from reference flux distribution.")
raise e
result = solution.x_dict
result = solution.fluxes
return result


Expand Down
52 changes: 34 additions & 18 deletions cameo/strain_design/deterministic/flux_variability_based.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@


class DifferentialFVA(StrainDesignMethod):
"""Differential flux variability analysis.
r"""Differential flux variability analysis.
Compares flux ranges of a reference model to a set of models that
have been parameterized to lie on a grid of evenly spaced points in the
Expand Down Expand Up @@ -354,22 +354,37 @@ def run(self, surface_only=True, improvements_only=True, progress=True, view=Non
df['suddenly_essential'] = False
df['free_flux'] = False

df.loc[(df.lower_bound == 0) & (df.upper_bound == 0) &
(ref_upper_bound != 0) & (ref_lower_bound != 0), 'KO'] = True

df.loc[((ref_upper_bound < 0) & (df.lower_bound > 0) |
((ref_lower_bound > 0) & (df.upper_bound < 0))), 'flux_reversal'] = True

df.loc[((df.lower_bound <= 0) & (df.lower_bound > 0)) |
((ref_lower_bound >= 0) & (df.upper_bound <= 0)), 'suddenly_essential'] = True
df.loc[
(df.lower_bound == 0) & (
df.upper_bound == 0) & (
ref_upper_bound != 0) & (
ref_lower_bound != 0),
'KO'
] = True

df.loc[
((ref_upper_bound < 0) & (df.lower_bound > 0) | (
(ref_lower_bound > 0) & (df.upper_bound < 0))),
'flux_reversal'
] = True

df.loc[
((df.lower_bound <= 0) & (df.lower_bound > 0)) | (
(ref_lower_bound >= 0) & (df.upper_bound <= 0)),
'suddenly_essential'
] = True

is_reversible = numpy.asarray([
self.design_space_model.reactions.get_by_id(i).reversibility for i in df.index], dtype=bool)
self.design_space_model.reactions.get_by_id(i).reversibility
for i in df.index], dtype=bool)
not_reversible = numpy.logical_not(is_reversible)

df.loc[((df.lower_bound == -1000) & (df.upper_bound == 1000) & is_reversible) |
((df.lower_bound == 0) & (df.upper_bound == 1000) & not_reversible) |
((df.lower_bound == -1000) & (df.upper_bound == 0) & not_reversible), 'free_flux'] = True
df.loc[
((df.lower_bound == -1000) & (df.upper_bound == 1000) & is_reversible) | (
(df.lower_bound == 0) & (df.upper_bound == 1000) & not_reversible) | (
(df.lower_bound == -1000) & (df.upper_bound == 0) & not_reversible),
'free_flux'
] = True

df['reaction'] = df.index
df['excluded'] = df['reaction'].isin(self.exclude)
Expand Down Expand Up @@ -481,9 +496,9 @@ def _generate_designs(cls, solutions, reference_fva, reference_fluxes):
for _, solution in solutions.groupby(('biomass', 'production')):
targets = []
relevant_targets = solution.loc[
(numpy.abs(solution['normalized_gaps']) > non_zero_flux_threshold) &
numpy.logical_not(solution['excluded']) &
numpy.logical_not(solution['free_flux'])
(numpy.abs(solution['normalized_gaps']) > non_zero_flux_threshold) & (
numpy.logical_not(solution['excluded'])) & (
numpy.logical_not(solution['free_flux']))
]
for rid, relevant_row in relevant_targets.iterrows():
if relevant_row.KO:
Expand Down Expand Up @@ -648,8 +663,9 @@ def _display_on_map_static(self, index, map_name, palette="RdYlBu", **kwargs):
data = self.nth_panel(index)
# Find values above decimal precision and not NaN
data = data.loc[
~numpy.isnan(data['normalized_gaps']) &
(data['normalized_gaps'].abs() > non_zero_flux_threshold)]
~numpy.isnan(data['normalized_gaps']) & (
data['normalized_gaps'].abs() > non_zero_flux_threshold)
]
data.index = data['reaction']

reaction_data = data['normalized_gaps'].copy()
Expand Down
Loading

0 comments on commit 159ba25

Please sign in to comment.