Skip to content

Commit

Permalink
[misc] update Python & PyTorch constraint (#371)
Browse files Browse the repository at this point in the history
* [misc] update Python & PyTorch constraint

* update reqs

* update pillow

* use classic solver

* debugging

* debugging

* bump to pytorch 1.10

* update pillow

* update

* update ruff

* apply fixes for ruff

* fix style check
  • Loading branch information
peterjc123 authored Oct 24, 2024
1 parent 75fc200 commit 19e5f6d
Show file tree
Hide file tree
Showing 26 changed files with 143 additions and 164 deletions.
22 changes: 6 additions & 16 deletions .github/workflows/full-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest", "macos-13", "windows-latest"]
vers: [ {pt_ver: "1.6.0", tv_ver: "0.7.0"}, {pt_ver: "1.7.0", tv_ver: "0.8.1"}, {pt_ver: "1.8.0", tv_ver: "0.9.0"}, {pt_ver: "1.9.0", tv_ver: "0.10.0"}, {pt_ver: "1.10.0", tv_ver: "0.11.1"}, {pt_ver: "1.11.0", tv_ver: "0.12.0"}, {pt_ver: "1.12.0", tv_ver: "0.13.0"} ]
vers: [ {pt_ver: "1.10.0", tv_ver: "0.11.1"}, {pt_ver: "1.11.0", tv_ver: "0.12.0"}, {pt_ver: "1.12.0", tv_ver: "0.13.0"} ]
include:
- os: macos-latest
vers:
Expand All @@ -36,7 +36,7 @@ jobs:
- uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
python-version: 3.8
python-version: 3.9
- name: Install PyTorch
env:
PYTORCH_VER: ${{ matrix.vers.pt_ver }}
Expand All @@ -45,19 +45,14 @@ jobs:
if [ "$RUNNER_OS" == "macOS" ]; then
if [[ "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
conda install pytorch::pytorch torchvision -c pytorch
elif [[ "$TORCHVISION_VER" == "0.9."* || "$TORCHVISION_VER" == "0.10."* ]]; then
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER pillow=6 -c pytorch
else
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER -c pytorch
fi
elif [ "$RUNNER_OS" == "Windows" ]; then
if [[ "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
conda install pytorch torchvision cpuonly pillow=6 -c pytorch
elif [[ "$TORCHVISION_VER" == "0.9."* || "$TORCHVISION_VER" == "0.10."* ]]; then
conda install pillow=6 -c conda-forge
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER cpuonly -c pytorch
conda install pytorch torchvision cpuonly pillow=8 -c pytorch
else
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER cpuonly pillow=6 -c pytorch
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER cpuonly pillow=8 -c pytorch
fi
else
if [[ "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
Expand All @@ -72,14 +67,9 @@ jobs:
env:
PYTORCH_VER: ${{ matrix.vers.pt_ver }}
TORCHVISION_VER: ${{ matrix.vers.tv_ver }}
run: |
if [[ "$RUNNER_OS" == "Linux" && "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
pip install black 'ruff<0.0.234' 'tensorflow<2.12' pytest scipy interval
else
pip install black 'ruff<0.0.234' tensorflow pytest scipy interval
fi
run: pip install black==22.3.0 ruff tensorflow pytest scipy interval
- name: Lint checks
run: python -m ruff .
run: python -m ruff check .
- name: Run tests
run: |
source activate.sh
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/smoke-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest", "macos-13", "macos-latest", "windows-latest"]
python-version: ["3.8", "3.9", "3.10"]
python-version: ["3.9", "3.10", "3.11"]
defaults:
run:
shell: bash -l {0}
Expand Down
26 changes: 8 additions & 18 deletions .github/workflows/unit-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
fail-fast: false
matrix:
os: ["ubuntu-latest", "macos-13", "windows-latest"]
vers: [ {pt_ver: "1.6.0", tv_ver: "0.7.0"}, {pt_ver: "latest", tv_ver: "latest"} ]
vers: [ {pt_ver: "1.10.0", tv_ver: "0.11.1"}, {pt_ver: "latest", tv_ver: "latest"} ]
include:
- os: macos-latest
vers:
Expand All @@ -30,7 +30,7 @@ jobs:
- uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
python-version: 3.8
python-version: 3.9
- name: Install PyTorch
env:
PYTORCH_VER: ${{ matrix.vers.pt_ver }}
Expand All @@ -39,19 +39,14 @@ jobs:
if [ "$RUNNER_OS" == "macOS" ]; then
if [[ "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
conda install pytorch::pytorch torchvision -c pytorch
elif [[ "$TORCHVISION_VER" == "0.9."* || "$TORCHVISION_VER" == "0.10."* ]]; then
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER pillow=6 -c pytorch
else
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER -c pytorch
fi
elif [ "$RUNNER_OS" == "Windows" ]; then
if [[ "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
conda install pytorch torchvision cpuonly pillow=6 -c pytorch
elif [[ "$TORCHVISION_VER" == "0.9."* || "$TORCHVISION_VER" == "0.10."* ]]; then
conda install pillow=6 -c conda-forge
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER cpuonly -c pytorch
conda install pytorch torchvision cpuonly pillow=8 -c pytorch
else
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER cpuonly pillow=6 -c pytorch
conda install pytorch=$PYTORCH_VER torchvision=$TORCHVISION_VER cpuonly pillow=8 -c pytorch
fi
else
if [[ "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
Expand All @@ -66,16 +61,11 @@ jobs:
env:
PYTORCH_VER: ${{ matrix.vers.pt_ver }}
TORCHVISION_VER: ${{ matrix.vers.tv_ver }}
run: |
if [[ "$RUNNER_OS" == "Linux" && "$TORCHVISION_VER" == "latest" && "$PYTORCH_VER" == "latest" ]]; then
pip install black 'ruff<0.0.234' 'tensorflow<2.12' scipy interval
else
pip install black 'ruff<0.0.234' tensorflow scipy interval
fi
run: pip install black==22.3.0 ruff tensorflow scipy interval
- name: Lint checks
run: python -m ruff .
run: python -m ruff check .
- name: Style checks
run: python -m black .
run: python -m black --check .
- name: Run unit tests
run: |
cd tests
Expand Down Expand Up @@ -109,7 +99,7 @@ jobs:
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
python-version: 3.8
python-version: 3.9
- name: Install PyTorch
run: conda install pytorch torchvision cpuonly -c pytorch
- name: Install TinyNeuralNetwork
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@ repos:
- id: black
exclude: ^tinynn/converter/schemas
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.150
rev: v0.7.0
hooks:
- id: ruff
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ TinyNeuralNetwork is an efficient and easy-to-use deep learning model compressio

## Installation

Python >= 3.8, PyTorch >= 1.4( PyTorch >= 1.6 if quantization-aware training is involved, see [here](docs/quantization_support.md) for details )
Python >= 3.9, PyTorch >= 1.10

```shell
# Install the TinyNeuralNetwork framework
Expand Down
2 changes: 1 addition & 1 deletion README_zh-CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ TinyNeuralNetwork是一个高效、易用的深度学习模型压缩框架。它

## 安装

python >= 3.8, pytorch >= 1.4(如果使用量化训练 pytorch >= 1.6,详细可见[这里](docs/quantization_support.md)
python >= 3.9, pytorch >= 1.10

```shell
# 安装TinyNeuralNetwork软件包
Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ exclude = [
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"

target-version = "py36"
target-version = "py39"

[tool.ruff.per-file-ignores]
"tinynn/converter/schemas/torch/*.py" = ["E501"]
Expand All @@ -86,6 +86,7 @@ target-version = "py36"
"examples/*.py" = ["E402"]
"__init__.py" = ["F401", "F403"]
"tests/import_test.py" = ["F401"]
"tutorials/quantization/basic.ipynb" = ["F811", "F401"]

[tool.ruff.mccabe]
# Unlike Flake8, default to a complexity level of 10.
Expand Down
3 changes: 1 addition & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
numpy>=1.18.5,<=1.24.4; python_version < '3.9'
numpy>=1.18.5; python_version >= '3.9'
numpy>=1.18.5
PyYAML>=5.3.1
ruamel.yaml>=0.16.12
igraph>=0.9
Expand Down
4 changes: 2 additions & 2 deletions tests/converter_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,7 @@ def test_reduce_ops_single_dim(self):

def model(x):
res = func(x, dim=1)
return res if type(res) == torch.Tensor else res[0]
return res if type(res) is torch.Tensor else res[0]

model_path = get_model_path()
converter = TFLiteConverter(model, dummy_input, model_path, nchw_transpose=False)
Expand Down Expand Up @@ -599,7 +599,7 @@ def test_reduce_ops_single_dim_keepdim(self):

def model(x):
res = func(x, dim=1, keepdim=True)
return res if type(res) == torch.Tensor else res[0]
return res if type(res) is torch.Tensor else res[0]

model_path = get_model_path()
converter = TFLiteConverter(model, dummy_input, model_path, nchw_transpose=False)
Expand Down
6 changes: 3 additions & 3 deletions tinynn/converter/operators/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -4661,21 +4661,21 @@ def elinimate_sequences(
first_node = seq[0]
last_node = seq[-1]

if type(skip_pred) == bool:
if type(skip_pred) is bool:
skip = skip_pred
elif skip_pred is not None:
skip = skip_pred(seq)

if skip:
continue

if type(remove_first_pred) == bool:
if type(remove_first_pred) is bool:
remove_first = remove_first_pred
custom_data = None
elif remove_first_pred is not None:
remove_first, custom_data = remove_first_pred(seq)

if type(remove_last_pred) == bool:
if type(remove_last_pred) is bool:
remove_last = remove_last_pred
custom_data_last = None
elif remove_last_pred is not None:
Expand Down
10 changes: 5 additions & 5 deletions tinynn/converter/operators/tflite/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def __init__(
self.index = 0
self.is_variable = is_variable

if type(tensor) == FakeQuantTensor:
if type(tensor) is FakeQuantTensor:
self.quantization = QuantizationParameters(tensor.scale, tensor.zero_point, tensor.dim)
tensor = tensor.tensor

Expand All @@ -195,7 +195,7 @@ def __init__(

if type(tensor).__module__ == 'numpy':
self.tensor = tensor
elif type(tensor) == torch.Tensor:
elif type(tensor) is torch.Tensor:
assert tensor.is_contiguous, "Tensor should be contiguous"
if tensor.dtype == torch.quint8:
self.tensor = torch.int_repr(tensor.detach()).numpy()
Expand Down Expand Up @@ -253,7 +253,7 @@ def __init__(
self.quantization = QuantizationParameters(scales, zero_points, dim)
else:
self.tensor = tensor.detach().numpy()
elif type(tensor) == torch.Size:
elif type(tensor) is torch.Size:
self.tensor = np.asarray(tensor, dtype='int32')
elif type(tensor) in (tuple, list):
self.tensor = np.asarray(tensor, dtype=dtype)
Expand Down Expand Up @@ -390,7 +390,7 @@ def build(self, builder: flatbuffers.Builder) -> Offset:
def create_offset_vector(builder: flatbuffers.Builder, prop: typing.Callable, vec: typing.Iterable):
if type(vec) not in (tuple, list):
assert False, "type of vec unexpected, expected: list or tuple"
elif type(vec) == tuple:
elif type(vec) is tuple:
vec = list(vec)

prop_name = prop.__name__
Expand Down Expand Up @@ -426,7 +426,7 @@ def create_numpy_array(builder: flatbuffers.Builder, prop: typing.Callable, vec:


def create_string(builder: flatbuffers.Builder, prop: typing.Callable, val: str):
if type(val) != str:
if type(val) is not str:
assert False, "type of val unexpected, expected: str"

prop_name = prop.__name__
Expand Down
20 changes: 10 additions & 10 deletions tinynn/converter/operators/torch/aten.py
Original file line number Diff line number Diff line change
Expand Up @@ -1561,7 +1561,7 @@ def parse(self, node, attrs, args, graph_converter):
self.run(node)

dim = self.input_tensors[1]
assert type(dim) == int
assert type(dim) is int

if dim < 0:
dim += self.input_tensors[0][0].ndim + 1
Expand Down Expand Up @@ -1619,7 +1619,7 @@ def parse(self, node, attrs, args, graph_converter):
self.run(node)

dim = self.input_tensors[1]
assert type(dim) == int
assert type(dim) is int

if dim < 0:
dim += self.input_tensors[0][0].ndim
Expand Down Expand Up @@ -2067,8 +2067,8 @@ def parse(self, node, attrs, args, graph_converter):
input_tensor = self.find_or_create_input(0, graph_converter)
dim, index = self.input_tensors[1:]

assert type(dim) == int
assert type(index) == int
assert type(dim) is int
assert type(index) is int

if dim < 0:
dim += input_tensor.tensor.ndim
Expand Down Expand Up @@ -2166,11 +2166,11 @@ def parse(self, node, attrs, args, graph_converter):
self.parse_common(node, attrs, args, graph_converter)

def parse_common(self, node, attrs, args, graph_converter):
if type(self) == ATenClampOperator:
if type(self) is ATenClampOperator:
min_value, max_value = self.input_tensors[1:]
elif type(self) == ATenClampMinOperator:
elif type(self) is ATenClampMinOperator:
min_value, max_value = self.input_tensors[1], None
elif type(self) == ATenClampMaxOperator:
elif type(self) is ATenClampMaxOperator:
min_value, max_value = None, self.input_tensors[1]

has_min = min_value is not None
Expand Down Expand Up @@ -3808,7 +3808,7 @@ def parse(self, node, attrs, args, graph_converter):
def parse_common(self, graph_converter, input_idx=0, mask_idx=1, other_idx=2, out_idx=0):
for i in (input_idx, other_idx):
t = self.input_tensors[i]
if type(t) == torch.Tensor:
if type(t) is torch.Tensor:
if t.dtype == torch.float64:
self.input_tensors[i] = t.to(dtype=torch.float32)
elif t.dtype == torch.int64:
Expand All @@ -3826,7 +3826,7 @@ def parse_common(self, graph_converter, input_idx=0, mask_idx=1, other_idx=2, ou
input_tensor, mask_tensor = [self.find_or_create_input(i, graph_converter) for i in (input_idx, mask_idx)]

ops = []
if type(other) == torch.Tensor:
if type(other) is torch.Tensor:
other_t = self.find_or_create_input(other_idx, graph_converter)
if out.dtype != other.dtype:
casted = other.clone().to(dtype=out.dtype)
Expand Down Expand Up @@ -4491,4 +4491,4 @@ def parse(self, node, attrs, args, graph_converter):
ops.append(tfl.TileOperator([actual_input, repeat_tensor], [outp]))

for op in ops:
graph_converter.add_operator(op)
graph_converter.add_operator(op)
6 changes: 3 additions & 3 deletions tinynn/converter/operators/torch/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def to_tfl_tensors(
tfl_tensors = []
if has_buffers is None:
has_buffers = [None] * len(tensors)
elif type(has_buffers) == bool:
elif type(has_buffers) is bool:
has_buffers = [has_buffers] * len(tensors)
assert len(names) == len(tensors) == len(has_buffers)
for n, t, b in zip(names, tensors, has_buffers):
Expand Down Expand Up @@ -491,7 +491,7 @@ def handle_padding(self, pad_h, pad_w, pad_op_index, ops, ceil_mode=False):
input_size = [input_tensor.shape[2], input_tensor.shape[3]]

if not all((i + 2 * p - k) % s == 0 for i, p, k, s in zip(input_size, padding, kernel_size, stride)):
assert type(ops[1]) == tfl.MaxPool2dOperator, 'ceil_mode=True for AvgPool not supported'
assert type(ops[1]) is tfl.MaxPool2dOperator, 'ceil_mode=True for AvgPool not supported'
fill_nan = True
ceil_pad = get_pool_ceil_padding(input_tensor, kernel_size, stride, padding)
ceil_pad = list(np.add(ceil_pad, padding))
Expand All @@ -503,7 +503,7 @@ def handle_padding(self, pad_h, pad_w, pad_op_index, ops, ceil_mode=False):
pad_input = ops[pad_op_index - 1].outputs[0]

inputs = [pad_input, pad_tensor]
if type(ops[1]) == tfl.MaxPool2dOperator:
if type(ops[1]) is tfl.MaxPool2dOperator:
constant_tensor = self.get_minimum_constant(pad_input)
inputs.append(constant_tensor)
pad_array = np.pad(pad_input.tensor, pad, constant_values=constant_tensor.tensor[0])
Expand Down
2 changes: 1 addition & 1 deletion tinynn/converter/operators/torch/quantized.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def parse(self, node, attrs, args, graph_converter):
self.run(node)

dim = self.input_tensors[1]
assert type(dim) == int
assert type(dim) is int

if dim < 0:
dim += self.input_tensors[0][0].ndim
Expand Down
2 changes: 1 addition & 1 deletion tinynn/graph/configs/gen_creation_funcs_yml.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
if k in block_list:
continue
c = getattr(torch, k)
if inspect.isclass(c) and k.endswith('Tensor') and c.__bases__[0] == object:
if inspect.isclass(c) and k.endswith('Tensor') and c.__bases__[0] is object:
print(k)
final_dict['torch'].append(k)
elif inspect.isbuiltin(c):
Expand Down
Loading

0 comments on commit 19e5f6d

Please sign in to comment.