Skip to content

Commit

Permalink
Standarised DNN model names
Browse files Browse the repository at this point in the history
Standardised all models to a 3-letter code. This impacts lib_common.py in mewc-flow and lib_model.py and config.yaml (comments) in mewc-train.
  • Loading branch information
BWBrook committed Nov 1, 2024
1 parent aefd370 commit aea11c0
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion src/config.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
SEED: 12345 # Random seed for reproducibility of sampled datasets and model initialisation
MODEL: 'ENB0' # EN:[B0,B2,S,M,L,XL] (e.g. ENS): CN:[P,N,T,S,B,L] (e.g. CNB); ViT:[T,S,B,L] (e.g. ViTS) or pretrained filename with model as first 3 char e.g. 'ens_pt.keras'
MODEL: 'EN0' # EN:[0,2,S,M,L,XL] (e.g. ENS): CN:[P,N,T,S,B,L] (e.g. CNB); VT:[T,S,B,L] (e.g. VTS) or pretrained filename with model as first 3 char e.g. 'ens_pt.keras'
SAVEFILE: 'case_study' # Filename to save .keras model : MODEL name appended automatically
OUTPUT_PATH: '/data/output' # Save Path for output files (model, class map, confusion matrix): SAVEFILE/MODEL added automatically to path

Expand Down
14 changes: 7 additions & 7 deletions src/lib_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,22 +51,22 @@ def import_model(img_size, mname, REPO_ID, FILENAME):
# Dictionary mapping mod strings to model constructors
# print(kimm.list_models(weights="imagenet")) # to check all available kimm models
model_constructors = {
'enb0': kimm.models.EfficientNetV2B0, # 5 M model params : 26 MB frozen file size
'enb2': kimm.models.EfficientNetV2B2, # 9 M : 37 MB
'en0': kimm.models.EfficientNetV2B0, # 5 M model params : 26 MB frozen file size
'en2': kimm.models.EfficientNetV2B2, # 9 M : 37 MB
'ens': kimm.models.EfficientNetV2S, # 21 M : 84 MB
'enm': kimm.models.EfficientNetV2M, # 54 M : 216 MB
'enl': kimm.models.EfficientNetV2L, # 119 M : 475 MB
'enxl': kimm.models.EfficientNetV2XL, # 208 M : 835 MB
'enx': kimm.models.EfficientNetV2XL, # 208 M : 835 MB
'cnp': kimm.models.ConvNeXtPico, # 9 M : 35 MB
'cnn': kimm.models.ConvNeXtNano, # 16 M : 61 MB
'cnt': kimm.models.ConvNeXtTiny, # 29 M : 112 MB
'cns': kimm.models.ConvNeXtSmall, # 50 M : 200 MB
'cnb': kimm.models.ConvNeXtBase, # 89 M : 352 MB
'cnl': kimm.models.ConvNeXtLarge, # 198 M : 787 MB
'vitt': kimm.models.VisionTransformerTiny16, # 6 M : 23 MB
'vits': kimm.models.VisionTransformerSmall16, # 22 M : 88 MB
'vitb': kimm.models.VisionTransformerBase16, # 87 M : 346 MB
'vitl': kimm.models.VisionTransformerLarge16 # 305 M : 1.22 GB
'vtt': kimm.models.VisionTransformerTiny16, # 6 M : 23 MB
'vts': kimm.models.VisionTransformerSmall16, # 22 M : 88 MB
'vtb': kimm.models.VisionTransformerBase16, # 87 M : 346 MB
'vtl': kimm.models.VisionTransformerLarge16 # 305 M : 1.22 GB
# Note: actual trained model file size (MB/GB) will depend on number of blocks unfrozen during fine-tuning
}
try:
Expand Down

0 comments on commit aea11c0

Please sign in to comment.