Skip to content

Commit

Permalink
Changes of last 4 months (code, not data)
Browse files Browse the repository at this point in the history
  • Loading branch information
kevinmicha committed Dec 11, 2023
1 parent db79762 commit 78ea24f
Show file tree
Hide file tree
Showing 20 changed files with 7,146 additions and 11,759 deletions.
24 changes: 24 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -128,3 +128,27 @@ dmypy.json
# Pyre type checker
.pyre/
.DS_Store

# Checkpoints (too heavy to upload them all)
checkpoints/agnostic_all_modes/seed_13/
checkpoints/agnostic_all_modes/seed_23/
checkpoints/agnostic_all_modes/seed_398/
checkpoints/agnostic_all_modes/seed_447/
checkpoints/contact_maps/seed_13/
checkpoints/contact_maps/seed_23/
checkpoints/contact_maps/seed_398/
checkpoints/contact_maps/seed_447/
checkpoints/contact_maps/seed_917/
checkpoints/full_ags_all_modes/seed_13/
checkpoints/full_ags_all_modes/seed_23/
checkpoints/full_ags_all_modes/seed_398/
checkpoints/full_ags_all_modes/seed_447/
checkpoints/full_ags_all_modes/seed_917/

# Data not used
data/contact_maps_explorer/
data/contact_maps_with_antigen_explorer/
notebooks/biological_properties/

# Raw notebooks
notebooks/explainability_raw.ipynb
4 changes: 2 additions & 2 deletions AUTHORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ Core developer:

Supervisors:

- Barbara Bravi [[Website](https://www.imperial.ac.uk/people/b.bravi21)|[Github](https://github.com/bravib)|[Email](mailto:b.bravi21@imperial.ac.uk?subject=[GitHub]%20ANTIPASTI)]
- Mauricio Barahona [[Website](https://www.imperial.ac.uk/people/m.barahona/)|[Github](https://github.com/mauriciobarahona)|[Email](mailto:m.barahona@imperial.ac.uk?subject=[GitHub]%20ANTIPASTI)]
- Mauricio Barahona [[Website](https://www.imperial.ac.uk/people/m.barahona/)|[Github](https://github.com/mauriciobarahona)|[Email](mailto:m.barahona@imperial.ac.uk?subject=[GitHub]%20ANTIPASTI)]
- Barbara Bravi [[Website](https://www.imperial.ac.uk/people/b.bravi21)|[Github](https://github.com/bravib)|[Email](mailto:b.bravi21@imperial.ac.uk?subject=[GitHub]%20ANTIPASTI)]
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,11 @@ python setup.py install --user

ANTIPASTI requires the following Python packages:
* `adabelief-pytorch`
* `biopython`
* `matplotlib`
* `numpy`
* `opencv-python`
* `optuna`
* `pandas`
* `scikit-learn`
* `torch`
Expand Down
2 changes: 2 additions & 0 deletions antipasti-env.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,5 +16,7 @@ dependencies:
- pip:
- adabelief-pytorch>=0.2.1
- beautifulsoup4>=4.12.2
- biopython>=1.79
- opencv-python>=4.7.0.68
- optuna>=3.1.1
- torch>=1.13.1
52 changes: 24 additions & 28 deletions antipasti/model/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ class ANTIPASTI(Module):
Size of the max pooling operation.
input_shape: int
Shape of the normal mode correlation maps.
l1_lambda: float
Weight of L1 regularisation.
mode: str
To use the full model, provide ``full``. Otherwise, ANTIPASTI corresponds to a linear classifier.
"""
def __init__(
Expand All @@ -31,27 +35,27 @@ def __init__(
pooling_size=1,
input_shape=281,
l1_lambda=0.002,
mode='full',
):
super(ANTIPASTI, self).__init__()
self.n_filters = n_filters
self.filter_size = filter_size
self.pooling_size = pooling_size
self.input_shape = input_shape
self.fully_connected_input = n_filters * ((input_shape-filter_size+1)//pooling_size) ** 2
self.conv1 = Conv2d(1, n_filters, filter_size)
#self.conv1_bn = torch.nn.BatchNorm2d(n_filters)
#torch.nn.init.normal_(self.conv1.weight, mean=0.0, std=1/input_shape)
#torch.nn.init.constant_(self.conv1.bias, 0)
self.pool = MaxPool2d(pooling_size, pooling_size)
self.dropit = Dropout(p=0.05)
self.relu = ReLU()
self.mode = mode
if self.mode == 'full':
self.fully_connected_input = n_filters * ((input_shape-filter_size+1)//pooling_size) ** 2
self.conv1 = Conv2d(1, n_filters, filter_size)
self.pool = MaxPool2d(pooling_size, pooling_size)
#self.dropit = Dropout(p=0.05)
self.relu = ReLU()
else:
self.fully_connected_input = self.input_shape ** 2
self.fc1 = Linear(self.fully_connected_input, 1, bias=False)
#self.fc1_bn = torch.nn.BatchNorm1d(self.fully_connected_input)
#torch.nn.init.normal_(self.fc1.weight, mean=0.0, std=1/np.sqrt(self.fully_connected_input))
#self.fc2 = Linear(self.fully_connected_input, 1, bias=False)
#self.fc2 = Linear(4, 1, bias=False)
self.l1_lambda = l1_lambda

def forward(self, input):
def forward(self, x):
r"""Model's forward pass.
Returns
Expand All @@ -62,24 +66,16 @@ def forward(self, input):
Filters before the fully-connected layer.
"""
#if torch.numel(torch.nonzero(input[0,0,-80:,-80:])) == 0:
# x = self.conv2(input) + torch.transpose(self.conv2(input), 2, 3)
#else:
# x = self.conv1(input) + torch.transpose(self.conv1(input), 2, 3)
x = self.conv1(input) + torch.transpose(self.conv1(input), 2, 3)
#x = self.conv1_bn(x)
x = self.relu(x)
x = self.pool(x)
inter = x
inter = x
if self.mode == 'full':
x = self.conv1(x) + torch.transpose(self.conv1(x), 2, 3)
x = self.relu(x)
inter = x = self.pool(x)
x = x.view(x.size(0), -1)
x = self.dropit(x)
#if torch.numel(torch.nonzero(input[0,0,-80:,-80:])) == 0:
# x = self.fc2(x)
# print('nano')
#else:
# x = self.fc1(x)
# print('paired')
#if self.mode == 'full':
# x = self.dropit(x)
x = self.fc1(x)
#x = self.fc2(x)

return x.float(), inter

Expand Down
Loading

0 comments on commit 78ea24f

Please sign in to comment.