diff --git a/basicsr/archs/rrdbnet_arch.py b/basicsr/archs/rrdbnet_arch.py index e1f31bcad..63d07080c 100644 --- a/basicsr/archs/rrdbnet_arch.py +++ b/basicsr/archs/rrdbnet_arch.py @@ -35,7 +35,7 @@ def forward(self, x): x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - # Emperically, we use 0.2 to scale the residual for better performance + # Empirically, we use 0.2 to scale the residual for better performance return x5 * 0.2 + x @@ -59,7 +59,7 @@ def forward(self, x): out = self.rdb1(x) out = self.rdb2(out) out = self.rdb3(out) - # Emperically, we use 0.2 to scale the residual for better performance + # Empirically, we use 0.2 to scale the residual for better performance return out * 0.2 + x diff --git a/basicsr/models/base_model.py b/basicsr/models/base_model.py index f06f9ca2c..05c8d2e13 100644 --- a/basicsr/models/base_model.py +++ b/basicsr/models/base_model.py @@ -149,7 +149,7 @@ def print_network(self, net): logger.info(net_str) def _set_lr(self, lr_groups_l): - """Set learning rate for warmup. + """Set learning rate for warm-up. Args: lr_groups_l (list): List for lr_groups, each for an optimizer. @@ -171,7 +171,7 @@ def update_learning_rate(self, current_iter, warmup_iter=-1): Args: current_iter (int): Current iteration. - warmup_iter (int): Warmup iter numbers. -1 for no warmup. + warmup_iter (int): Warm-up iter numbers. -1 for no warm-up. Default: -1. """ if current_iter > 1: diff --git a/basicsr/utils/misc.py b/basicsr/utils/misc.py index 728fef857..c8d4a1403 100644 --- a/basicsr/utils/misc.py +++ b/basicsr/utils/misc.py @@ -132,7 +132,7 @@ def sizeof_fmt(size, suffix='B'): suffix (str): Suffix. Default: 'B'. Return: - str: Formatted file siz. + str: Formatted file size. """ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(size) < 1024.0: diff --git a/docs/Config.md b/docs/Config.md index 98aa2df76..d894407ed 100644 --- a/docs/Config.md +++ b/docs/Config.md @@ -19,7 +19,7 @@ Taking `001_MSRResNet_x4_f64b16_DIV2K_1000k_B16G1_wandb` as an example: - `DIV2K`: Training data is DIV2K - `1000k`: Total training iteration is 1000k - `B16G1`: Batch size is 16; one GPU is used for training -- `wandb`: Use wandb logger; the training process has beed uploaded to wandb server +- `wandb`: Use wandb logger; the training process has been uploaded to wandb server **Note**: If `debug` is in the experiment name, it will enter the debug mode. That is, the program will log and validate more intensively and will not use `tensorboard logger` and `wandb logger`. diff --git a/docs/ModelZoo.md b/docs/ModelZoo.md index 773baa5c1..87463aff5 100644 --- a/docs/ModelZoo.md +++ b/docs/ModelZoo.md @@ -18,7 +18,7 @@ You can put the downloaded models in the `experiments/pretrained_models` folder. **[Download official pre-trained models]** ([Google Drive](https://drive.google.com/drive/folders/15DgDtfaLASQ3iAPJEVHQF49g9msexECG?usp=sharing), [百度网盘](https://pan.baidu.com/s/1R6Nc4v3cl79XPAiK0Toe7g)) -You can use the scrip to download pre-trained models from Google Drive. +You can use the script to download pre-trained models from Google Drive. ```python python scripts/download_pretrained_models.py ESRGAN diff --git a/experiments/pretrained_models/README.md b/experiments/pretrained_models/README.md index 9aceac1bd..d66240b97 100644 --- a/experiments/pretrained_models/README.md +++ b/experiments/pretrained_models/README.md @@ -2,7 +2,7 @@ **[Download official pre-trained models](https://drive.google.com/drive/folders/15DgDtfaLASQ3iAPJEVHQF49g9msexECG?usp=sharing)** -You can use the scrip to download pre-trained models from Google Drive. +You can use the script to download pre-trained models from Google Drive. ```python python scripts/download_pretrained_models.py