diff --git a/ctn_benchmark/benchmark.py b/ctn_benchmark/benchmark.py index 1a63341..6eb51eb 100644 --- a/ctn_benchmark/benchmark.py +++ b/ctn_benchmark/benchmark.py @@ -27,7 +27,7 @@ def __init__(self): def default(self, description, **kwarg): if len(kwarg) != 1: raise ValueException('Must specify exactly one parameter') - k, v = kwarg.items()[0] + k, v = list(kwarg.items())[0] if k in self.param_names: raise ValueException('Cannot redefine parameter "%s"' % k) if v is False: diff --git a/ctn_benchmark/spa/memory.py b/ctn_benchmark/spa/memory.py index f57782e..733525b 100644 --- a/ctn_benchmark/spa/memory.py +++ b/ctn_benchmark/spa/memory.py @@ -11,7 +11,7 @@ import ctn_benchmark -import split +from . import split class SemanticMemory(ctn_benchmark.Benchmark): def params(self): @@ -66,7 +66,7 @@ def marker(t): for node in model.all_nodes: if node.output is None: if node.size_in > p.pf_max_dim: - print 'limiting', node + print('limiting', node) model.config[node].n_cores_per_chip = p.pf_cores model.config[node].n_chips = p.pf_n_chips model.config[ diff --git a/ctn_benchmark/spa/memory_recall.py b/ctn_benchmark/spa/memory_recall.py index 3f0ab62..8ca3cfa 100644 --- a/ctn_benchmark/spa/memory_recall.py +++ b/ctn_benchmark/spa/memory_recall.py @@ -11,7 +11,7 @@ import ctn_benchmark -import split +from . import split class SemanticMemory(ctn_benchmark.Benchmark): def params(self): @@ -81,7 +81,7 @@ def cue(t): for node in model.all_nodes: if node.output is None: if node.size_in > p.pf_max_dim: - print 'limiting', node + print('limiting', node) model.config[node].n_cores_per_chip = p.pf_cores model.config[node].n_chips = p.pf_n_chips model.config[ diff --git a/ctn_benchmark/spa/parse.py b/ctn_benchmark/spa/parse.py index ab6baad..01dab43 100644 --- a/ctn_benchmark/spa/parse.py +++ b/ctn_benchmark/spa/parse.py @@ -11,7 +11,7 @@ import ctn_benchmark -import split +from . import split class Parsing(ctn_benchmark.Benchmark): def params(self): @@ -68,7 +68,7 @@ def vision_input(t): for node in model.all_nodes: if node.output is None: if node.size_in > p.pf_max_dim: - print 'limiting', node + print('limiting', node) model.config[node].n_cores_per_chip = p.pf_cores model.config[node].n_chips = p.pf_n_chips model.config[ diff --git a/ctn_benchmark/spa/sequence_routed.py b/ctn_benchmark/spa/sequence_routed.py index 91445e4..f3a4ee6 100644 --- a/ctn_benchmark/spa/sequence_routed.py +++ b/ctn_benchmark/spa/sequence_routed.py @@ -5,7 +5,7 @@ basal ganglia and thalamus. """ -import split +from . import split import ctn_benchmark @@ -56,7 +56,7 @@ def model(self, p): for node in model.all_nodes: if node.output is None: if node.size_in > p.pf_max_dim: - print 'limiting', node + print('limiting', node) model.config[node].n_cores_per_chip = p.pf_cores model.config[node].n_chips = p.pf_n_chips diff --git a/ctn_benchmark/spa/split.py b/ctn_benchmark/spa/split.py index a80c269..037f202 100644 --- a/ctn_benchmark/spa/split.py +++ b/ctn_benchmark/spa/split.py @@ -1,5 +1,6 @@ import numpy as np import nengo +from six import iteritems def gather_info(network, inputs, outputs, parents): for c in network.connections: @@ -183,7 +184,7 @@ def pass_ensembles(model, max_dim=16): total_out[key] += c.size_out conns[key].append(c) - for key, total in total_out.items(): + for key, total in iteritems(total_out): if total > max_dim: f, slice = key cs = conns[key]