Skip to content

Commit

Permalink
Merge branch 'main' into datasets/glaciers_alps
Browse files Browse the repository at this point in the history
  • Loading branch information
dcodrut authored Jan 20, 2025
2 parents 640f72a + 2419f64 commit 0ca7b34
Show file tree
Hide file tree
Showing 23 changed files with 42 additions and 49 deletions.
2 changes: 0 additions & 2 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@ updates:
# https://github.com/pytorch/pytorch_sphinx_theme/issues/175
- dependency-name: 'sphinx'
versions: '>=6'
# segmentation-models-pytorch pins timm, must update in unison
- dependency-name: 'timm'
- package-ecosystem: 'npm'
directory: '/'
schedule:
Expand Down
2 changes: 1 addition & 1 deletion docs/api/datasets/non_geo_datasets.csv
Original file line number Diff line number Diff line change
Expand Up @@ -63,4 +63,4 @@ Dataset,Task,Source,License,# Samples,# Classes,Size (px),Resolution (m),Bands
`VHR-10`_,I,"Google Earth, Vaihingen","MIT",800,10,"358--1,728",0.08--2,RGB
`Western USA Live Fuel Moisture`_,R,"Landsat8, Sentinel-1","CC-BY-NC-ND-4.0",2615,-,-,-,-
`xView2`_,CD,Maxar,"CC-BY-NC-SA-4.0","3,732",4,"1,024x1,024",0.8,RGB
`ZueriCrop`_,"I, T",Sentinel-2,-,116K,48,24x24,10,MSI
`ZueriCrop`_,"I, T",Sentinel-2,CC-BY-NC-4.0,116K,48,24x24,10,MSI
2 changes: 1 addition & 1 deletion experiments/ssl4eo/flops.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
for model in models:
print(f'Model: {model}')

m = timm.create_model(model, num_classes=num_classes, in_chans=in_channels)
m = timm.create_model(model, num_classes=num_classes, in_chans=in_channels) # type: ignore[attr-defined]

# Calculate memory requirements of model
mem_params = sum([p.nelement() * p.element_size() for p in m.parameters()])
Expand Down
6 changes: 3 additions & 3 deletions requirements/required.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@ kornia==0.8.0
lightly==1.5.16
lightning[pytorch-extra]==2.5.0.post0
matplotlib==3.10.0
numpy==2.2.1
numpy==2.2.2
pandas==2.2.3
pillow==11.1.0
pyproj==3.7.0
rasterio==1.4.3
rtree==1.3.0
segmentation-models-pytorch==0.3.4
segmentation-models-pytorch==0.4.0
shapely==2.0.6
timm==0.9.7
timm==1.0.14
torch==2.5.1
torchmetrics==1.6.1
torchvision==0.20.1
12 changes: 3 additions & 9 deletions tests/datasets/test_zuericrop.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,11 @@
class TestZueriCrop:
@pytest.fixture
def dataset(self, monkeypatch: MonkeyPatch, tmp_path: Path) -> ZueriCrop:
data_dir = os.path.join('tests', 'data', 'zuericrop')
urls = [
os.path.join(data_dir, 'ZueriCrop.hdf5'),
os.path.join(data_dir, 'labels.csv'),
]
md5s = ['1635231df67f3d25f4f1e62c98e221a4', '5118398c7a5bbc246f5f6bb35d8d529b']
monkeypatch.setattr(ZueriCrop, 'urls', urls)
monkeypatch.setattr(ZueriCrop, 'md5s', md5s)
url = os.path.join('tests', 'data', 'zuericrop') + os.sep
monkeypatch.setattr(ZueriCrop, 'url', url)
root = tmp_path
transforms = nn.Identity()
return ZueriCrop(root=root, transforms=transforms, download=True, checksum=True)
return ZueriCrop(root=root, transforms=transforms, download=True)

def test_getitem(self, dataset: ZueriCrop) -> None:
x = dataset[0]
Expand Down
6 changes: 3 additions & 3 deletions tests/models/test_resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model('resnet18', in_chans=weights.meta['in_chans'])
model = timm.create_model('resnet18', in_chans=weights.meta['in_chans']) # type: ignore[attr-defined]
torch.save(model.state_dict(), path)
try:
monkeypatch.setattr(weights.value, 'url', str(path))
Expand Down Expand Up @@ -78,7 +78,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model('resnet50', in_chans=weights.meta['in_chans'])
model = timm.create_model('resnet50', in_chans=weights.meta['in_chans']) # type: ignore[attr-defined]
torch.save(model.state_dict(), path)
try:
monkeypatch.setattr(weights.value, 'url', str(path))
Expand Down Expand Up @@ -122,7 +122,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model('resnet152', in_chans=weights.meta['in_chans'])
model = timm.create_model('resnet152', in_chans=weights.meta['in_chans']) # type: ignore[attr-defined]
torch.save(model.state_dict(), path)
try:
monkeypatch.setattr(weights.value, 'url', str(path))
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
2 changes: 1 addition & 1 deletion tests/trainers/test_byol.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
2 changes: 1 addition & 1 deletion tests/trainers/test_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
2 changes: 1 addition & 1 deletion tests/trainers/test_moco.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
4 changes: 2 additions & 2 deletions tests/trainers/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down Expand Up @@ -273,7 +273,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
2 changes: 1 addition & 1 deletion tests/trainers/test_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
2 changes: 1 addition & 1 deletion tests/trainers/test_simclr.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def mocked_weights(
load_state_dict_from_url: None,
) -> WeightsEnum:
path = tmp_path / f'{weights}.pth'
model = timm.create_model(
model = timm.create_model( # type: ignore[attr-defined]
weights.meta['model'], in_chans=weights.meta['in_chans']
)
torch.save(model.state_dict(), path)
Expand Down
2 changes: 1 addition & 1 deletion tests/trainers/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def test_extract_backbone_unsupported_model(tmp_path: Path) -> None:


def test_get_input_layer_name_and_module() -> None:
key, module = _get_input_layer_name_and_module(timm.create_model('resnet18'))
key, module = _get_input_layer_name_and_module(timm.create_model('resnet18')) # type: ignore[attr-defined]
assert key == 'conv1'
assert isinstance(module, nn.Conv2d)
assert module.in_channels == 3
Expand Down
9 changes: 3 additions & 6 deletions torchgeo/datasets/zuericrop.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,7 @@ class ZueriCrop(NonGeoDataset):
* `h5py <https://pypi.org/project/h5py/>`_ to load the dataset
"""

urls = (
'https://polybox.ethz.ch/index.php/s/uXfdr2AcXE3QNB6/download',
'https://raw.githubusercontent.com/0zgur0/multi-stage-convSTAR-network/fa92b5b3cb77f5171c5c3be740cd6e6395cc29b6/labels.csv',
)
url = 'https://hf.co/datasets/torchgeo/zuericrop/resolve/8ac0f416fbaab032d8670cc55f984b9f079e86b2/'
md5s = ('1635231df67f3d25f4f1e62c98e221a4', '5118398c7a5bbc246f5f6bb35d8d529b')
filenames = ('ZueriCrop.hdf5', 'labels.csv')

Expand Down Expand Up @@ -221,11 +218,11 @@ def _verify(self) -> None:

def _download(self) -> None:
"""Download the dataset."""
for url, filename, md5 in zip(self.urls, self.filenames, self.md5s):
for filename, md5 in zip(self.filenames, self.md5s):
filepath = os.path.join(self.root, filename)
if not os.path.exists(filepath):
download_url(
url,
self.url + filename,
self.root,
filename=filename,
md5=md5 if self.checksum else None,
Expand Down
6 changes: 3 additions & 3 deletions torchgeo/models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -768,7 +768,7 @@ def resnet18(
if weights:
kwargs['in_chans'] = weights.meta['in_chans']

model: ResNet = timm.create_model('resnet18', *args, **kwargs)
model: ResNet = timm.create_model('resnet18', *args, **kwargs) # type: ignore[attr-defined]

if weights:
missing_keys, unexpected_keys = model.load_state_dict(
Expand Down Expand Up @@ -803,7 +803,7 @@ def resnet50(
if weights:
kwargs['in_chans'] = weights.meta['in_chans']

model: ResNet = timm.create_model('resnet50', *args, **kwargs)
model: ResNet = timm.create_model('resnet50', *args, **kwargs) # type: ignore[attr-defined]

if weights:
missing_keys, unexpected_keys = model.load_state_dict(
Expand Down Expand Up @@ -837,7 +837,7 @@ def resnet152(
if weights:
kwargs['in_chans'] = weights.meta['in_chans']

model: ResNet = timm.create_model('resnet152', *args, **kwargs)
model: ResNet = timm.create_model('resnet152', *args, **kwargs) # type: ignore[attr-defined]

if weights:
missing_keys, unexpected_keys = model.load_state_dict(
Expand Down
14 changes: 9 additions & 5 deletions torchgeo/models/scale_mae.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def get_1d_sincos_pos_embed_from_grid_torch(embed_dim: int, pos: Tensor) -> Tens
return emb


class ScaleMAE(VisionTransformer): # type: ignore[misc]
class ScaleMAE(VisionTransformer):
"""Custom Vision Transformer for Scale-MAE with GSD positional embeddings.
This is a ViT encoder only model of the Scale-MAE architecture with GSD positional embeddings.
Expand All @@ -117,7 +117,8 @@ def __init__(self, res: float = 1.0, *args: Any, **kwargs: Any) -> None:
self.res = res

# Scale MAE uses resolution specific positional embeddings
self.pos_embed.requires_grad = False
if self.pos_embed is not None:
self.pos_embed.requires_grad = False

def _pos_embed(self, x: Tensor) -> Tensor:
"""Apply GSD positional embeddings to the input tensor."""
Expand All @@ -133,8 +134,9 @@ def _pos_embed(self, x: Tensor) -> Tensor:
.to(x.dtype)
.to(x.device)
)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = x + pos_embed
x = self.pos_drop(x)
return x
Expand All @@ -155,7 +157,9 @@ def interpolate_pos_embed(
pos_embed_checkpoint = state_dict['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
num_extra_tokens = 0
if model.pos_embed is not None:
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/models/vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ def vit_small_patch16_224(
if weights:
kwargs['in_chans'] = weights.meta['in_chans']

model: VisionTransformer = timm.create_model(
model: VisionTransformer = timm.create_model( # type: ignore[attr-defined]
'vit_small_patch16_224', *args, **kwargs
)

Expand Down
2 changes: 1 addition & 1 deletion torchgeo/trainers/byol.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def configure_models(self) -> None:
in_channels: int = self.hparams['in_channels']

# Create backbone
backbone = timm.create_model(
backbone = timm.create_model( # type: ignore[attr-defined]
self.hparams['model'], in_chans=in_channels, pretrained=weights is True
)

Expand Down
2 changes: 1 addition & 1 deletion torchgeo/trainers/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def configure_models(self) -> None:
weights = self.weights

# Create model
self.model = timm.create_model(
self.model = timm.create_model( # type: ignore[attr-defined]
self.hparams['model'],
num_classes=self.hparams['num_classes'],
in_chans=self.hparams['in_channels'],
Expand Down
4 changes: 2 additions & 2 deletions torchgeo/trainers/moco.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,10 +238,10 @@ def configure_models(self) -> None:
output_dim: int = self.hparams['output_dim']

# Create backbone
self.backbone = timm.create_model(
self.backbone = timm.create_model( # type: ignore[attr-defined]
model, in_chans=in_channels, num_classes=0, pretrained=weights is True
)
self.backbone_momentum = timm.create_model(
self.backbone_momentum = timm.create_model( # type: ignore[attr-defined]
model, in_chans=in_channels, num_classes=0, pretrained=weights is True
)
deactivate_requires_grad(self.backbone_momentum)
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/trainers/regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def configure_models(self) -> None:
"""Initialize the model."""
# Create model
weights = self.weights
self.model = timm.create_model(
self.model = timm.create_model( # type: ignore[attr-defined]
self.hparams['model'],
num_classes=self.hparams['num_outputs'],
in_chans=self.hparams['in_channels'],
Expand Down
2 changes: 1 addition & 1 deletion torchgeo/trainers/simclr.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def configure_models(self) -> None:
weights = self.weights

# Create backbone
self.backbone = timm.create_model(
self.backbone = timm.create_model( # type: ignore[attr-defined]
self.hparams['model'],
in_chans=self.hparams['in_channels'],
num_classes=0,
Expand Down

0 comments on commit 0ca7b34

Please sign in to comment.