From b1c054ce9ff87b543cc4651fae7e9977b0209b08 Mon Sep 17 00:00:00 2001 From: "Alex J. Champandard" Date: Sat, 29 Oct 2016 00:39:32 +0200 Subject: [PATCH] Improve the README for applying and training models. --- README.rst | 34 ++++++++++++++++++++-------------- enhance.py | 2 +- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/README.rst b/README.rst index 480dd07..94b3f3f 100644 --- a/README.rst +++ b/README.rst @@ -19,8 +19,6 @@ The catch? The neural network is hallucinating details based on its training fro |Python Version| |License Type| |Project Stars| ----- - 1. Examples & Usage =================== @@ -36,29 +34,37 @@ The default is to use ``--device=cpu``, if you have NVIDIA card setup with CUDA .. code:: bash - # Run the super-resolution script for one or more images. + # Run the super-resolution script for one image. python3 enhance.py example.png - # Display output image that has `_enhanced.png` suffix. - open example_enhanced.png + # Also process multiple files with a single run. + python3 enhance.py file1.jpg file2.jpg + + # Display output images that were given `_ne4x.png` suffix. + open *_ne4x.png 1.b) Training Super-Resolution ------------------------------ +Pre-trained models are provided in the GitHub releases. Training your own is a delicate process that may require you to pick parameters based on your image dataset. + .. code:: bash - rm -f ne4x.pkl.bz2 + # Remove the model file as don't want to reload the data to fine-tune it. + rm -f ne4x*.pkl.bz2 - python3.4 enhance.py --train --epochs=25 \ - --scales=2 --perceptual-layer=conv2_2 \ - --generator-block=16 --generator-filters=128 \ - --smoothness-weight=1e7 --adversary-weight=0.0 + # Pre-train the model using perceptual loss from paper [1] below. + python3.4 enhance.py --train --scales=2 --epochs=50 \ + --perceptual-layer=conv2_2 --smoothness-weight=1e7 --adversary-weight=0.0 \ + --generator-blocks=4 --generator-filters=64 + + # Train the model using an adversarial setup based on [4] below. + python3.4 enhance.py --train --scales=2 --epochs=250 \ + --perceptual-layer=conv5_2 --smoothness-weight=2e4 --adversary-weight=1e3 \ + --generator-start=5 --discriminator-start=0 --adversarial-start=5 \ + --discriminator-size=64 - python3.4 enhance.py --train --epochs=250 \ - --scales=2 --perceptual-layer=conv5_2 \ - --smoothness-weight=5e4 --adversary-weight=2e2 \ - --generator-start=1 --discriminator-start=0 --adversarial-start=1 .. image:: docs/BankLobby_example.gif diff --git a/enhance.py b/enhance.py index 63ad0eb..27ba310 100644 --- a/enhance.py +++ b/enhance.py @@ -504,7 +504,7 @@ def process(self, image): for filename in args.files: print(filename) out = enhancer.process(scipy.ndimage.imread(filename, mode='RGB')) - out.save(os.path.splitext(filename)[0]+'_ne%ix-%s.png'%(2**args.scales, args.model)) + out.save(os.path.splitext(filename)[0]+'_ne%ix.png'%(2**args.scales)) if args.files: print(ansi.ENDC)