From f297e445b45da7d2a2789e24d1ab12c70984aa72 Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Sun, 17 Jul 2022 22:49:45 +0200 Subject: [PATCH 1/8] feat: Added multiple filters for video - Filters can be automatically selected via "filters" on settings.json - Can add as many filter as you want --- .gitignore | 2 + .pylintrc | 3 +- backend/requirements.txt | 2 +- backend/src/crispy/main.py | 23 +-- backend/src/crispy/utils/arguments.py | 5 + backend/src/crispy/utils/constants.py | 8 + backend/src/crispy/utils/ffmpeg_filters.py | 100 ++++++++++ backend/src/crispy/utils/ffmpeg_utils.py | 90 +++++---- backend/src/crispy/utils/filter.py | 179 ++++-------------- backend/src/crispy/utils/test_ffmpeg_utils.py | 67 +++---- backend/src/crispy/utils/test_filter.py | 25 ++- backend/src/crispy/video/video.py | 73 +++++-- settings_template.json | 11 ++ 13 files changed, 335 insertions(+), 253 deletions(-) create mode 100644 backend/src/crispy/utils/ffmpeg_filters.py create mode 100644 settings_template.json diff --git a/.gitignore b/.gitignore index b65f2b8..2960527 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,5 @@ build/ tmp/ outputs/ issues/ + +settings.json diff --git a/.pylintrc b/.pylintrc index f75957b..3cd34c6 100644 --- a/.pylintrc +++ b/.pylintrc @@ -16,7 +16,8 @@ disable= R0914, C0200, W0614, - W1203 + W1203, + C3001 string-quote=double diff --git a/backend/requirements.txt b/backend/requirements.txt index 014933c..a5fc7ba 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -17,7 +17,7 @@ ffmpeg-python==0.2.0 yapf==0.32.0 mypy==0.961 pylint==2.14.3 +pylint-quotes==0.2.3 # Tests pytube==12.1.0 -pylint-quotes==0.2.3 diff --git a/backend/src/crispy/main.py b/backend/src/crispy/main.py index 4f333d0..cd47476 100644 --- a/backend/src/crispy/main.py +++ b/backend/src/crispy/main.py @@ -11,14 +11,6 @@ def main(videos: List[str]) -> None: - ### FIXME: should be settings in settings.json - framerate = 8 - second_before = 2.5 - second_after = 1.5 - #### FIXME: should not be parameters to function - frames_before = int(second_before * framerate) - frames_after = int(second_after * framerate) - io.generate_tmp_folder(not args.no_extract) nn = NeuralNetwork([4000, 120, 15, 2], 0.01) @@ -33,7 +25,7 @@ def main(videos: List[str]) -> None: if not args.no_extract: io.generate_folder_clip(video_clean_name) - images_path = vid.extract_frames_from_video(video, framerate) + images_path = vid.extract_frames_from_video(video) else: images_path = vid.get_saving_path(video_clean_name) @@ -41,10 +33,15 @@ def main(videos: List[str]) -> None: io.clean_cuts(video_clean_name) query_array = vid.get_query_array_from_video(nn, images_path) - kill_array = vid.get_kill_array_from_query_array( - query_array, frames_before, frames_after) - - vid.segment_video_with_kill_array(video, kill_array, framerate) + l.debug(query_array) + kill_array = vid.get_kill_array_from_query_array(query_array) + l.debug(kill_array) + kill_array = vid.post_processing_kill_array(kill_array) + l.debug(kill_array) + vid.segment_video_with_kill_array(video, kill_array) + + if not args.no_merge: + vid.merge_cuts() if __name__ == "__main__": diff --git a/backend/src/crispy/utils/arguments.py b/backend/src/crispy/utils/arguments.py index f976182..df0c13b 100644 --- a/backend/src/crispy/utils/arguments.py +++ b/backend/src/crispy/utils/arguments.py @@ -21,6 +21,11 @@ help="Do not extract frames", action="store_true") +_parser.add_argument("--no-merge", + default=False, + help="Do not merge final videos", + action="store_true") + args = _parser.parse_args() if args.debug: diff --git a/backend/src/crispy/utils/constants.py b/backend/src/crispy/utils/constants.py index 4e9d88e..972785e 100644 --- a/backend/src/crispy/utils/constants.py +++ b/backend/src/crispy/utils/constants.py @@ -1,4 +1,5 @@ import os +import json BACKEND = "backend" OUTPUT = "output" @@ -29,3 +30,10 @@ NEURAL_NETWORK_PATH = os.path.join(ASSETS, "trained_network_latest.npy") ### CODE_PATH ### + +### SETTINGS ### +SETTINGS_PATH = "settings.json" +_f = open(SETTINGS_PATH, "r") +SETTINGS = json.load(_f) +_f.close() +### SETTINGS ### diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py new file mode 100644 index 0000000..46bd91f --- /dev/null +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -0,0 +1,100 @@ +import ffmpeg + + +def crop(video_path: str, save_path: str, _option: str) -> None: + ( + ffmpeg + .input(video_path) + .crop(x=960, y=540) + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def blur(video_path: str, save_path: str, option: str) -> None: + ( + ffmpeg + .input(video_path) + .filter("boxblur", option) # "luma_radius=2:luma_power=1" + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def scale(video_path: str, save_path: str, option: str) -> None: + ( + ffmpeg + .input(video_path) + .filter("scale", option) # "w=1280:h=720" + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def hflip(video_path: str, save_path: str, _option: str) -> None: + ( + ffmpeg + .input(video_path) + .hflip() + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def vflip(video_path: str, save_path: str, _option: str) -> None: + ( + ffmpeg + .input(video_path) + .vflip() + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def brightness(video_path: str, save_path: str, option: int) -> None: + ( + ffmpeg + .input(video_path) + .hue(b=option) + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def saturation(video_path: str, save_path: str, option: int) -> None: + ( + ffmpeg + .input(video_path) + .hue(s=option) + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def zoom(video_path: str, save_path: str, option: int) -> None: + ( + ffmpeg + .input(video_path) + .zoompan(z=option, fps=60, d=1, x="iw/2-(iw/zoom/2)", y="ih/2-(ih/zoom/2)") + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable + + +def grayscale(video_path: str, save_path: str, _option: str) -> None: + ( + ffmpeg + .input(video_path) + .hue(s=0) + .output(save_path, start_number=0) + .overwrite_output() + .run(quiet=True) + ) # yapf: disable diff --git a/backend/src/crispy/utils/ffmpeg_utils.py b/backend/src/crispy/utils/ffmpeg_utils.py index 2c6db06..42ad174 100644 --- a/backend/src/crispy/utils/ffmpeg_utils.py +++ b/backend/src/crispy/utils/ffmpeg_utils.py @@ -1,10 +1,13 @@ import os import random import string +import shutil from typing import Optional, Any, List, Tuple +from constants import SETTINGS import ffmpeg from PIL import Image, ImageFilter, ImageOps +from filter import filters BACKEND = "backend" DOT_PATH = os.path.join(BACKEND, "assets", "dot.png") @@ -46,7 +49,7 @@ def extract_images(video_path: str, ( ffmpeg .input(video_path) - .filter("framerate", framerate=f"1/{round(1 / framerate, 5)}") + .filter("fps", fps=f"1/{round(1 / framerate, 5)}") .crop(x=899, y=801, width=122, height=62) # .overlay(ffmpeg.input(DOT_PATH)) .output(os.path.join(save_path, "%8d.bmp"), start_number=0) @@ -78,6 +81,26 @@ def extract_images(video_path: str, final.save(im_path) +def segment_video(video_path: str, save_path: str, + frames: List[Tuple[int, int]], frame_duration: int) -> None: + """ + Segment a video on multiple smaller video using the frames array + """ + for frame in frames: + start = frame[0] / frame_duration + end = frame[1] / frame_duration + # print(start, end, frame_duration, video_path, save_path) + ( + ffmpeg + .input(video_path) + .output(os.path.join(save_path, f"{frame[0]}-{frame[1]}.mp4"), + ss=f"{start}", + to=f"{end}") + .overwrite_output() + .run(quiet=True) + ) # yaPf: disable + + def find_available_path(video_path: str) -> str: """ Find available path to store the scaled video temporarily. @@ -99,7 +122,7 @@ def scale_video(video_path: str) -> None: ( ffmpeg .input(video_path) - .filter('scale', w=1920, h=1080) + .filter("scale", w=1920, h=1080) .output(save_path, start_number=0) .overwrite_output() .run() @@ -108,6 +131,8 @@ def scale_video(video_path: str) -> None: os.remove(video_path) os.rename(save_path, video_path) # check if image has to be upscaled or downscaled ? + else: + raise FileNotFoundError(f"{video_path} not found") def create_new_path(video_path: str) -> str: @@ -128,55 +153,42 @@ def create_new_path(video_path: str) -> str: return res -def split_video_once(video_path: str, split: tuple) -> None: +# FIXME: audio +def merge_videos(videos_path: List[str], save_path: str) -> None: """ - Split a video between 2 timestamp + Merge videos together. """ - save_path = create_new_path(video_path) - if split[1] - split[0] > 0: + if len(videos_path) > 1: + videos: List[Any] = [] + for video_path in videos_path: + videos.append(ffmpeg.input(video_path)) ( ffmpeg - .input(video_path) - .trim(start_frame=split[0], end_frame=split[1]) + .concat(*videos) .output(save_path) .overwrite_output() .run(quiet=True) ) # yapf: disable + else: + shutil.copyfile(videos_path[0], save_path) -def _split_video(video_path: str, splits: list) -> None: - if os.path.exists(video_path): - for split in splits: - split_video_once(video_path, split) - - -def segment_video(video_path: str, save_path: str, - frames: List[Tuple[int, int]], frame_duration: int) -> None: - """ - Segment a video on multiple smaller video using the frames array - """ - for frame in frames: - start = frame[0] / frame_duration - end = frame[1] / frame_duration - # print(start, end, frame_duration, video_path, save_path) - ( - ffmpeg - .input(video_path) - .output(os.path.join(save_path, f"{frame[0]}-{frame[1]}.mp4"), - ss=f"{start}", - to=f"{end}") - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def apply_filter(video_path: str, filter_list: list, save_path: str) -> None: +def apply_filter(video_path: str, save_path: str) -> None: """ Apply a list of filter to a video """ - if not os.path.exists(save_path): - os.makedirs(save_path) + global_filters: List[filters] = [] + for filt in SETTINGS["filters"].items(): + global_filters.append(filters(filt[0], filt[1])) + print(video_path) if os.path.exists(video_path): - for filt in filter_list: - filt.execute(video_path, save_path) + tmp_video_path = video_path + for filt in global_filters: + tmp_save_path = find_available_path(tmp_video_path) + filt(tmp_video_path, tmp_save_path) + if tmp_video_path != video_path: + os.remove(tmp_video_path) + tmp_video_path = tmp_save_path + print(tmp_save_path) + os.rename(tmp_save_path, save_path) diff --git a/backend/src/crispy/utils/filter.py b/backend/src/crispy/utils/filter.py index 21458cc..7b29239 100644 --- a/backend/src/crispy/utils/filter.py +++ b/backend/src/crispy/utils/filter.py @@ -1,6 +1,6 @@ from enum import Enum import re -import ffmpeg +import ffmpeg_filters class no_value(Enum): @@ -17,128 +17,16 @@ class filter_value(no_value): Enum class containing all possible filters """ - CROP = 'crop' # "crop" - BLUR = 'Blur' # "boxblur" - SCALE = 'Scale' # "scale" - HFLIP = 'Hflip' # "horizontal flip" - VFLIP = 'Vflip' # "vertical flip" - BRIGHTNESS = 'Brightness' # "b" - SATURATION = 'Saturation' # "s" - ZOOM = 'Zoom' # "zoom" - GRAYSCALE = 'Grayscale' # "hue=s=0" - NONE = 'none' - - -def ex(video_path: str, save_path: str, filter_name: str, - filter_option: str) -> None: - ( - ffmpeg - .input(video_path) - .filter(filter_name, filter_option) - .output(save_path) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def crop(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .crop(x=960, y=540) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def blur(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .filter("boxblur", "luma_radius=2:luma_power=1") - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def scale(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .filter('scale', 'w=1280:h=720') - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def hflip(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .hflip() - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def vflip(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .vflip() - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def brightness(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .hue(b=8) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def saturation(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .hue(s=8) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def zoom(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .zoom(zoom=8) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def grayscale(video_path: str, save_path: str) -> None: - ( - ffmpeg - .input(video_path) - .filter('format', "format=gray") - # .filter('colorchannelmixer', '.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3') - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable + CROP = "crop" # "crop" + BLUR = "Blur" # "boxblur" + SCALE = "Scale" # "scale" + HFLIP = "Hflip" # "horizontal flip" + VFLIP = "Vflip" # "vertical flip" + BRIGHTNESS = "Brightness" # "b" + SATURATION = "Saturation" # "s" + ZOOM = "Zoom" # "zoom" + GRAYSCALE = "Grayscale" # "hue=s=0" + NONE = "none" class filters(): @@ -146,40 +34,41 @@ class filters(): Class holding all filters """ - def __init__(self, name: str) -> None: + def __init__(self, name: str, option: str) -> None: # Add other parameter for different filters later ? - print(name) - if re.search(r'^[\s]*crop[\s]*$', name, re.IGNORECASE): + self.option = option + if re.search(r"^[\s]*crop[\s]*$", name, re.IGNORECASE): self.filter = filter_value.CROP - self.f = {'f': crop} - elif re.search(r'^[\s]*blur[\s]*$', name, re.IGNORECASE): + self.f = "crop" + elif re.search(r"^[\s]*blur[\s]*$", name, re.IGNORECASE): self.filter = filter_value.BLUR - self.f = {'f': blur} - elif re.search(r'^[\s]*scale[\s]*$', name, re.IGNORECASE): + self.f = "blur" + elif re.search(r"^[\s]*scale[\s]*$", name, re.IGNORECASE): self.filter = filter_value.SCALE - self.f = {'f': scale} - elif re.search(r'^[\s]*hflip[\s]*$', name, re.IGNORECASE): + self.f = "scale" + elif re.search(r"^[\s]*hflip[\s]*$", name, re.IGNORECASE): self.filter = filter_value.HFLIP - self.f = {'f': hflip} - elif re.search(r'^[\s]*vflip[\s]*$', name, re.IGNORECASE): + self.f = "hflip" + elif re.search(r"^[\s]*vflip[\s]*$", name, re.IGNORECASE): self.filter = filter_value.VFLIP - self.f = {'f': vflip} - elif re.search(r'^[\s]*brightness[\s]*$', name, re.IGNORECASE): + self.f = "vflip" + elif re.search(r"^[\s]*brightness[\s]*$", name, re.IGNORECASE): self.filter = filter_value.BRIGHTNESS - self.f = {'f': brightness} - elif re.search(r'^[\s]*saturation[\s]*$', name, re.IGNORECASE): + self.f = "brightness" + elif re.search(r"^[\s]*saturation[\s]*$", name, re.IGNORECASE): self.filter = filter_value.SATURATION - self.f = {'f': saturation} - elif re.search(r'^[\s]*zoom[\s]*$', name, re.IGNORECASE): + self.f = "saturation" + elif re.search(r"^[\s]*zoom[\s]*$", name, re.IGNORECASE): self.filter = filter_value.ZOOM - self.f = {'f': zoom} - elif re.search(r'^[\s]*grayscale[\s]*$', name, re.IGNORECASE): + self.f = "zoom" + elif re.search(r"^[\s]*grayscale[\s]*$", name, re.IGNORECASE): self.filter = filter_value.GRAYSCALE - self.f = {'f': grayscale} + self.f = "grayscale" else: self.filter = filter_value.NONE - def execute(self, video_path: str, save_path: str) -> None: + def __call__(self, video_path: str, save_path: str) -> None: if self.filter == filter_value.NONE: return None - return self.f['f'](video_path, save_path) + func = getattr(ffmpeg_filters, self.f) + return func(video_path, save_path, self.option) diff --git a/backend/src/crispy/utils/test_ffmpeg_utils.py b/backend/src/crispy/utils/test_ffmpeg_utils.py index 4c28221..c555d29 100644 --- a/backend/src/crispy/utils/test_ffmpeg_utils.py +++ b/backend/src/crispy/utils/test_ffmpeg_utils.py @@ -1,48 +1,48 @@ import os import cv2 from pytube import YouTube -from ffmpeg_utils import scale_video, _split_video +from ffmpeg_utils import scale_video, segment_video def test_basic() -> None: - os.mkdir('./test_mp4') - yt = YouTube('https://www.youtube.com/watch?v=6A-hTKYBkC4') - yt.streams.order_by('resolution').desc().first().download( - filename='./test_mp4/test_basic.mp4') - vid = cv2.VideoCapture('./test_mp4/test_basic.mp4') + os.mkdir("./test_mp4") + yt = YouTube("https://www.youtube.com/watch?v=6A-hTKYBkC4") + yt.streams.order_by("resolution").desc().first().download( + filename="./test_mp4/test_basic.mp4") + vid = cv2.VideoCapture("./test_mp4/test_basic.mp4") width = vid.get(cv2.CAP_PROP_FRAME_WIDTH) height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT) - os.remove('./test_mp4/test_basic.mp4') + os.remove("./test_mp4/test_basic.mp4") assert width == 1920 and height == 1080 def test_upscale() -> None: - yt = YouTube('https://www.youtube.com/watch?v=6A-hTKYBkC4') - yt.streams.order_by('resolution').asc().first().download( - filename='./test_mp4/test_upscale.mp4') + yt = YouTube("https://www.youtube.com/watch?v=6A-hTKYBkC4") + yt.streams.order_by("resolution").asc().first().download( + filename="./test_mp4/test_upscale.mp4") - scale_video('./test_mp4/test_upscale.mp4') - vid = cv2.VideoCapture('./test_mp4/test_upscale.mp4') + scale_video("./test_mp4/test_upscale.mp4") + vid = cv2.VideoCapture("./test_mp4/test_upscale.mp4") width = vid.get(cv2.CAP_PROP_FRAME_WIDTH) height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT) - os.remove('./test_mp4/test_upscale.mp4') + os.remove("./test_mp4/test_upscale.mp4") assert width == 1920 and height == 1080 def test_downscale() -> None: - yt = YouTube('https://www.youtube.com/watch?v=wZI9is9Ix90') - yt.streams.order_by('resolution').desc().first().download( - filename='./test_mp4/test_downscale.mp4') + yt = YouTube("https://www.youtube.com/watch?v=wZI9is9Ix90") + yt.streams.order_by("resolution").desc().first().download( + filename="./test_mp4/test_downscale.mp4") - scale_video('./test_mp4/test_downscale.mp4') - vid = cv2.VideoCapture('./test_mp4/test_downscale.mp4') + scale_video("./test_mp4/test_downscale.mp4") + vid = cv2.VideoCapture("./test_mp4/test_downscale.mp4") width = vid.get(cv2.CAP_PROP_FRAME_WIDTH) height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT) - os.remove('./test_mp4/test_downscale.mp4') - os.rmdir('./test_mp4') + os.remove("./test_mp4/test_downscale.mp4") + os.rmdir("./test_mp4") assert width == 1920 and height == 1080 @@ -75,27 +75,28 @@ def split_check(video_path: str, frames: int) -> bool: def cut_1_100(video_path: str) -> bool: - _split_video(video_path, [(0, 100)]) + segment_video(video_path, video_path, [(0, 100)], 1) return split_check(video_path, 100) def cut_10_100(video_path: str) -> bool: - _split_video(video_path, [(0, 100), (100, 200), (200, 300), (300, 400), - (400, 500), (500, 600), (600, 700), (700, 800), - (800, 900), (900, 1000)]) + segment_video(video_path, video_path, [(0, 100), (100, 200), (200, 300), + (300, 400), (400, 500), (500, 600), + (600, 700), (700, 800), (800, 900), + (900, 1000)], 1) return split_check(video_path, 100) def test_split() -> None: - os.mkdir('test_split') - yt = YouTube('https://www.youtube.com/watch?v=6A-hTKYBkC4') - yt.streams.order_by('resolution').desc().first().download( - filename='./test_split/test_basic.mp4') + os.mkdir("test_split") + yt = YouTube("https://www.youtube.com/watch?v=6A-hTKYBkC4") + yt.streams.order_by("resolution").desc().first().download( + filename="./test_split/test_basic.mp4") - assert cut_1_100('./test_split/test_basic.mp4') + assert cut_1_100("./test_split/test_basic.mp4") - yt.streams.order_by('resolution').desc().first().download( - filename='./test_split/test_basic.mp4') - assert cut_10_100('./test_split/test_basic.mp4') + yt.streams.order_by("resolution").desc().first().download( + filename="./test_split/test_basic.mp4") + assert cut_10_100("./test_split/test_basic.mp4") - abort('./test_split/test_basic.mp4') + abort("./test_split/test_basic.mp4") diff --git a/backend/src/crispy/utils/test_filter.py b/backend/src/crispy/utils/test_filter.py index 46053ce..f917497 100644 --- a/backend/src/crispy/utils/test_filter.py +++ b/backend/src/crispy/utils/test_filter.py @@ -1,17 +1,30 @@ +import os from filter import filters +from ffmpeg_utils import apply_filter +from constants import TMP_PATH, CUT def test_class() -> None: - crop = filters(" crop ") + crop = filters(" crop ", "1") assert crop.filter.name == "CROP" - crop = filters(" ZOOM ") + crop = filters(" ZOOM ", "1") assert crop.filter.name == "ZOOM" - crop = filters(" CrOpdafd ") + crop = filters(" CrOpdafd ", "1") assert crop.filter.name == "NONE" - crop = filters(" c rop ") + crop = filters(" c rop ", "1") assert crop.filter.name == "NONE" - crop = filters("crop") + crop = filters("crop", "1") assert crop.filter.name == "CROP" crop = filters(" brightness \ - ") + ", "1") assert crop.filter.name == "BRIGHTNESS" + + +def test_filter() -> None: + folders = os.listdir(TMP_PATH) + folders = [f for f in folders if os.path.isdir(os.path.join(TMP_PATH, f))] + for fold in folders: + cut = os.listdir(os.path.join(TMP_PATH, fold, CUT)) + for i in range(len(cut)): + cut[i] = os.path.join(TMP_PATH, fold, CUT, cut[i]) + apply_filter(cut[i], os.path.join(TMP_PATH, "test_brightness.mp4")) diff --git a/backend/src/crispy/video/video.py b/backend/src/crispy/video/video.py index 087d142..051a9d6 100644 --- a/backend/src/crispy/video/video.py +++ b/backend/src/crispy/video/video.py @@ -4,7 +4,7 @@ from PIL import Image import numpy as np -from utils.constants import TMP_PATH, IMAGE, RESOURCE_PATH, VIDEO, CUT +from utils.constants import TMP_PATH, IMAGE, RESOURCE_PATH, VIDEO, CUT, SETTINGS import utils.ffmpeg_utils as ff from utils.IO import io from AI.network import NeuralNetwork @@ -17,7 +17,7 @@ def get_saving_path(video: str) -> str: return os.path.join(TMP_PATH, video, IMAGE) -def extract_frames_from_video(video: str, framerate: int = 6) -> str: +def extract_frames_from_video(video: str) -> str: """ Extract frames from the video return: saving location @@ -28,7 +28,7 @@ def extract_frames_from_video(video: str, framerate: int = 6) -> str: video_clean_name = io.generate_clean_name(video_no_ext) saving_path = os.path.join(TMP_PATH, video_clean_name, IMAGE) - ff.extract_images(loading_path, saving_path, framerate) + ff.extract_images(loading_path, saving_path, SETTINGS["clip"]["framerate"]) return saving_path @@ -50,6 +50,7 @@ def get_query_array_from_video(neural_network: NeuralNetwork, images = os.listdir(images_path) images.sort() query_array = [] + confidence = SETTINGS["neural-network"]["confidence"] for i, image in enumerate(images): image_path = os.path.join(images_path, image) @@ -59,19 +60,14 @@ def get_query_array_from_video(neural_network: NeuralNetwork, query_result = neural_network.query(inputs) - # FIXME: confidence is not used - # Should be used instead of np.argmax - result = np.argmax(query_result) - - if result == 1: + if query_result[1] >= confidence: query_array.append(i) return query_array def segment_video_with_kill_array(video: str, - kill_array: List[Tuple[int, int]], - frame_duration: int = 4) -> None: + kill_array: List[Tuple[int, int]]) -> None: """ Segment the video with the given kill array """ @@ -82,18 +78,44 @@ def segment_video_with_kill_array(video: str, video_clean_name = io.generate_clean_name(video_no_ext) save_path = os.path.join(TMP_PATH, video_clean_name, CUT) - ff.segment_video(loading_path, save_path, kill_array, frame_duration) + ff.segment_video(loading_path, save_path, kill_array, + SETTINGS["clip"]["framerate"]) + + +def post_processing_kill_array( + kill_array: List[Tuple[int, int]]) -> List[Tuple[int, int]]: + """ + Post processing the kill array + """ + found = True + offset = SETTINGS["clip"]["second-between-kills"] * SETTINGS["clip"][ + "framerate"] + while found: + found = False + for i in range(len(kill_array) - 1): + if kill_array[i][1] + offset >= kill_array[i + 1][0]: + found = True + kill_array[i] = (kill_array[i][0], kill_array[i + 1][1]) + kill_array.pop(i + 1) + break + + return kill_array -# FIXME: Add post processing def get_kill_array_from_query_array( - query_array: List[int], frames_before: int, - frames_after: int) -> List[Tuple[int, int]]: + query_array: List[int]) -> List[Tuple[int, int]]: """ Get the kill array from the query array """ kill_array: List[List[int]] = [] current_kill: List[int] = [] + + framerate = SETTINGS["clip"]["framerate"] + mul = lambda x: int(x * framerate) + + frames_before = mul(SETTINGS["clip"]["second-before"]) + frames_after = mul(SETTINGS["clip"]["second-after"]) + for q in query_array: if len(current_kill) == 0: current_kill.append(q) @@ -106,8 +128,29 @@ def get_kill_array_from_query_array( result = [] for kill in kill_array: + # Remove fake-positives + if len(kill) <= 2: + continue + start = kill[0] - frames_before end = kill[-1] + frames_after result.append((start, end)) - return result + + +def merge_cuts() -> None: + """ + Merge the cuts + """ + folders = os.listdir(TMP_PATH) + folders = [f for f in folders if os.path.isdir(os.path.join(TMP_PATH, f))] + folders.sort() + cuts: List[str] = [] + for folder in folders: + cut = os.listdir(os.path.join(TMP_PATH, folder, CUT)) + cut.sort() + for i in range(len(cut)): + cut[i] = os.path.join(TMP_PATH, folder, CUT, cut[i]) + cuts.extend(cut) + + ff.merge_videos(cuts, os.path.join(TMP_PATH, "merged.mp4")) diff --git a/settings_template.json b/settings_template.json new file mode 100644 index 0000000..2f3f055 --- /dev/null +++ b/settings_template.json @@ -0,0 +1,11 @@ +{ + "neural-network": { + "confidence": 0.8 + }, + "clip": { + "framerate": 8, + "second-before": 3, + "second-after": 2, + "second-between-kills": 1 + } +} From bcb946c60b5570e041bcf2d220f692a07acb74f4 Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Mon, 18 Jul 2022 18:34:04 +0200 Subject: [PATCH 2/8] feat: Filters on video segmentation - Added specific filters for clips (should be in "clips" in settings.json) - Grouped filters call in one ffmpeg call to reduce time --- backend/src/crispy/{utils => }/test_filter.py | 21 ++- backend/src/crispy/utils/ffmpeg_filters.py | 162 +++++++----------- backend/src/crispy/utils/ffmpeg_utils.py | 59 ++++--- backend/src/crispy/utils/filter.py | 60 ++----- 4 files changed, 127 insertions(+), 175 deletions(-) rename backend/src/crispy/{utils => }/test_filter.py (54%) diff --git a/backend/src/crispy/utils/test_filter.py b/backend/src/crispy/test_filter.py similarity index 54% rename from backend/src/crispy/utils/test_filter.py rename to backend/src/crispy/test_filter.py index f917497..64b88cb 100644 --- a/backend/src/crispy/utils/test_filter.py +++ b/backend/src/crispy/test_filter.py @@ -1,7 +1,5 @@ -import os -from filter import filters -from ffmpeg_utils import apply_filter -from constants import TMP_PATH, CUT +from utils.filter import filters +from pytube import YouTube def test_class() -> None: @@ -21,10 +19,11 @@ def test_class() -> None: def test_filter() -> None: - folders = os.listdir(TMP_PATH) - folders = [f for f in folders if os.path.isdir(os.path.join(TMP_PATH, f))] - for fold in folders: - cut = os.listdir(os.path.join(TMP_PATH, fold, CUT)) - for i in range(len(cut)): - cut[i] = os.path.join(TMP_PATH, fold, CUT, cut[i]) - apply_filter(cut[i], os.path.join(TMP_PATH, "test_brightness.mp4")) + """ + Load youtube video to test filters. Needs to call main.py manually to check + """ + yt = YouTube( + "https://www.youtube.com/watch?v=vWj6NxN7PsI&feature=youtu.be") + yt.streams.order_by("resolution").desc().first().download( + filename="backend/resources/video/0.mp4") + print("Video downloaded") diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py index 46bd91f..9f14536 100644 --- a/backend/src/crispy/utils/ffmpeg_filters.py +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -1,100 +1,68 @@ import ffmpeg -def crop(video_path: str, save_path: str, _option: str) -> None: - ( - ffmpeg - .input(video_path) - .crop(x=960, y=540) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def blur(video_path: str, save_path: str, option: str) -> None: - ( - ffmpeg - .input(video_path) - .filter("boxblur", option) # "luma_radius=2:luma_power=1" - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def scale(video_path: str, save_path: str, option: str) -> None: - ( - ffmpeg - .input(video_path) - .filter("scale", option) # "w=1280:h=720" - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def hflip(video_path: str, save_path: str, _option: str) -> None: - ( - ffmpeg - .input(video_path) - .hflip() - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def vflip(video_path: str, save_path: str, _option: str) -> None: - ( - ffmpeg - .input(video_path) - .vflip() - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def brightness(video_path: str, save_path: str, option: int) -> None: - ( - ffmpeg - .input(video_path) - .hue(b=option) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def saturation(video_path: str, save_path: str, option: int) -> None: - ( - ffmpeg - .input(video_path) - .hue(s=option) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def zoom(video_path: str, save_path: str, option: int) -> None: - ( - ffmpeg - .input(video_path) - .zoompan(z=option, fps=60, d=1, x="iw/2-(iw/zoom/2)", y="ih/2-(ih/zoom/2)") - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable - - -def grayscale(video_path: str, save_path: str, _option: str) -> None: - ( - ffmpeg - .input(video_path) - .hue(s=0) - .output(save_path, start_number=0) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable +def crop( + _option: str, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.crop(x=960, y=540) + return video + + +def blur( + option: str, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.filter("boxblur", option) # "luma_radius=2:luma_power=1" + return video + + +def scale( + option: str, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.filter("scale", option) # "w=1280:h=720" + return video + + +def hflip( + _option: str, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.hflip() + return video + + +def vflip( + _option: str, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.vflip() + return video + + +def brightness( + option: int, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.hue(b=option) + return video + + +def saturation( + option: int, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.hue(s=option) + return video + + +def zoom( + option: int, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.zoompan(z=option, + fps=60, + d=1, + x="iw/2-(iw/zoom/2)", + y="ih/2-(ih/zoom/2)") + return video + + +def grayscale( + _option: str, + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + video = video.hue(s=0) + return video diff --git a/backend/src/crispy/utils/ffmpeg_utils.py b/backend/src/crispy/utils/ffmpeg_utils.py index 42ad174..a5db0cc 100644 --- a/backend/src/crispy/utils/ffmpeg_utils.py +++ b/backend/src/crispy/utils/ffmpeg_utils.py @@ -3,11 +3,10 @@ import string import shutil from typing import Optional, Any, List, Tuple -from constants import SETTINGS - import ffmpeg +from utils.constants import SETTINGS +from utils.filter import filters from PIL import Image, ImageFilter, ImageOps -from filter import filters BACKEND = "backend" DOT_PATH = os.path.join(BACKEND, "assets", "dot.png") @@ -84,21 +83,24 @@ def extract_images(video_path: str, def segment_video(video_path: str, save_path: str, frames: List[Tuple[int, int]], frame_duration: int) -> None: """ - Segment a video on multiple smaller video using the frames array + Segment a video on multiple smaller video using the frames array. """ for frame in frames: start = frame[0] / frame_duration end = frame[1] / frame_duration # print(start, end, frame_duration, video_path, save_path) - ( + video = ( ffmpeg .input(video_path) - .output(os.path.join(save_path, f"{frame[0]}-{frame[1]}.mp4"), - ss=f"{start}", - to=f"{end}") - .overwrite_output() - .run(quiet=True) - ) # yaPf: disable + ) # yapf: disable + video = apply_filter(video, video_path) + + video = video.overwrite_output() + video = video.output(os.path.join(save_path, + f"{frame[0]}-{frame[1]}.mp4"), + ss=f"{start}", + to=f"{end}") + video.run(quiet=True) def find_available_path(video_path: str) -> str: @@ -173,22 +175,31 @@ def merge_videos(videos_path: List[str], save_path: str) -> None: shutil.copyfile(videos_path[0], save_path) -def apply_filter(video_path: str, save_path: str) -> None: +def apply_filter(video: ffmpeg.nodes.FilterableStream, + video_path: str) -> ffmpeg.nodes.FilterableStream: """ - Apply a list of filter to a video + Apply a list of filter to a video. """ global_filters: List[filters] = [] for filt in SETTINGS["filters"].items(): global_filters.append(filters(filt[0], filt[1])) - print(video_path) - if os.path.exists(video_path): - tmp_video_path = video_path - for filt in global_filters: - tmp_save_path = find_available_path(tmp_video_path) - filt(tmp_video_path, tmp_save_path) - if tmp_video_path != video_path: - os.remove(tmp_video_path) - tmp_video_path = tmp_save_path - print(tmp_save_path) - os.rename(tmp_save_path, save_path) + find_specific_filters(global_filters, video_path) + for filt in global_filters: + video = filt(video) + + return video + + +def find_specific_filters(global_filters: List[filters], + video_path: str) -> None: + """ + Find specific filters for a video in Settings.json + """ + video_name = os.path.split(video_path)[1] + if "clips" in SETTINGS: + if video_name in SETTINGS["clips"]: + for filt, value in SETTINGS["clips"][video_name].items(): + for i in range(len(global_filters)): + if filt == global_filters[i].filter.value: + global_filters[i] = filters(filt, value) diff --git a/backend/src/crispy/utils/filter.py b/backend/src/crispy/utils/filter.py index 7b29239..1908d39 100644 --- a/backend/src/crispy/utils/filter.py +++ b/backend/src/crispy/utils/filter.py @@ -1,6 +1,6 @@ from enum import Enum -import re -import ffmpeg_filters +from utils import ffmpeg_filters +import ffmpeg class no_value(Enum): @@ -18,14 +18,14 @@ class filter_value(no_value): """ CROP = "crop" # "crop" - BLUR = "Blur" # "boxblur" - SCALE = "Scale" # "scale" - HFLIP = "Hflip" # "horizontal flip" - VFLIP = "Vflip" # "vertical flip" - BRIGHTNESS = "Brightness" # "b" - SATURATION = "Saturation" # "s" - ZOOM = "Zoom" # "zoom" - GRAYSCALE = "Grayscale" # "hue=s=0" + BLUR = "blur" # "boxblur" + SCALE = "scale" # "scale" + HFLIP = "hflip" # "horizontal flip" + VFLIP = "vflip" # "vertical flip" + BRIGHTNESS = "brightness" # "b" + SATURATION = "saturation" # "s" + ZOOM = "zoom" # "zoom" + GRAYSCALE = "grayscale" # "hue=s=0" NONE = "none" @@ -35,40 +35,14 @@ class filters(): """ def __init__(self, name: str, option: str) -> None: - # Add other parameter for different filters later ? - self.option = option - if re.search(r"^[\s]*crop[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.CROP - self.f = "crop" - elif re.search(r"^[\s]*blur[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.BLUR - self.f = "blur" - elif re.search(r"^[\s]*scale[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.SCALE - self.f = "scale" - elif re.search(r"^[\s]*hflip[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.HFLIP - self.f = "hflip" - elif re.search(r"^[\s]*vflip[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.VFLIP - self.f = "vflip" - elif re.search(r"^[\s]*brightness[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.BRIGHTNESS - self.f = "brightness" - elif re.search(r"^[\s]*saturation[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.SATURATION - self.f = "saturation" - elif re.search(r"^[\s]*zoom[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.ZOOM - self.f = "zoom" - elif re.search(r"^[\s]*grayscale[\s]*$", name, re.IGNORECASE): - self.filter = filter_value.GRAYSCALE - self.f = "grayscale" + if name in filter_value._value2member_map_: + self.filter = filter_value._value2member_map_[name] else: self.filter = filter_value.NONE + self.option = option - def __call__(self, video_path: str, save_path: str) -> None: + def __call__(self, video: ffmpeg.nodes.FilterableStream) -> None: if self.filter == filter_value.NONE: - return None - func = getattr(ffmpeg_filters, self.f) - return func(video_path, save_path, self.option) + return video + func = getattr(ffmpeg_filters, self.filter.value) + return func(self.option, video) From 2695dc8747d766f27fae3e56ee3ad7597e33baaf Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Mon, 18 Jul 2022 22:26:25 +0200 Subject: [PATCH 3/8] feat: Fixed overwrite_output + option boolean --- backend/src/crispy/utils/ffmpeg_filters.py | 31 +++++++++++++--------- backend/src/crispy/utils/ffmpeg_utils.py | 23 +++++++++------- backend/src/crispy/utils/filter.py | 17 ++++++------ 3 files changed, 41 insertions(+), 30 deletions(-) diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py index 9f14536..7468463 100644 --- a/backend/src/crispy/utils/ffmpeg_filters.py +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -1,57 +1,61 @@ +from typing import Union import ffmpeg def crop( - _option: str, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.crop(x=960, y=540) + if option: + video = video.crop(x=960, y=540) return video def blur( - option: str, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: video = video.filter("boxblur", option) # "luma_radius=2:luma_power=1" return video def scale( - option: str, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: video = video.filter("scale", option) # "w=1280:h=720" return video def hflip( - _option: str, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hflip() + if option: + video = video.hflip() return video def vflip( - _option: str, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.vflip() + if option: + video = video.vflip() return video def brightness( - option: int, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: video = video.hue(b=option) return video def saturation( - option: int, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: video = video.hue(s=option) return video def zoom( - option: int, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: video = video.zoompan(z=option, fps=60, @@ -62,7 +66,8 @@ def zoom( def grayscale( - _option: str, + option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hue(s=0) + if option: + video = video.hue(s=0) return video diff --git a/backend/src/crispy/utils/ffmpeg_utils.py b/backend/src/crispy/utils/ffmpeg_utils.py index a5db0cc..fa52eb6 100644 --- a/backend/src/crispy/utils/ffmpeg_utils.py +++ b/backend/src/crispy/utils/ffmpeg_utils.py @@ -5,7 +5,7 @@ from typing import Optional, Any, List, Tuple import ffmpeg from utils.constants import SETTINGS -from utils.filter import filters +from utils.filter import Filters from PIL import Image, ImageFilter, ImageOps BACKEND = "backend" @@ -95,11 +95,11 @@ def segment_video(video_path: str, save_path: str, ) # yapf: disable video = apply_filter(video, video_path) - video = video.overwrite_output() video = video.output(os.path.join(save_path, f"{frame[0]}-{frame[1]}.mp4"), ss=f"{start}", to=f"{end}") + video = video.overwrite_output() video.run(quiet=True) @@ -180,9 +180,9 @@ def apply_filter(video: ffmpeg.nodes.FilterableStream, """ Apply a list of filter to a video. """ - global_filters: List[filters] = [] + global_filters: List[Filters] = [] for filt in SETTINGS["filters"].items(): - global_filters.append(filters(filt[0], filt[1])) + global_filters.append(Filters(filt[0], filt[1])) find_specific_filters(global_filters, video_path) for filt in global_filters: @@ -191,15 +191,20 @@ def apply_filter(video: ffmpeg.nodes.FilterableStream, return video -def find_specific_filters(global_filters: List[filters], +def find_specific_filters(global_filters: List[Filters], video_path: str) -> None: """ - Find specific filters for a video in Settings.json + Find specificFilters for a video in Settings.json """ - video_name = os.path.split(video_path)[1] + video_name = os.path.split(video_path) + video_name = video_name[len(video_name) - 1] if "clips" in SETTINGS: if video_name in SETTINGS["clips"]: for filt, value in SETTINGS["clips"][video_name].items(): + found = False for i in range(len(global_filters)): - if filt == global_filters[i].filter.value: - global_filters[i] = filters(filt, value) + if global_filters[i].filter.value == filt: + found = True + global_filters[i] = Filters(filt, value) + if not found: + global_filters.append(Filters(filt, value)) diff --git a/backend/src/crispy/utils/filter.py b/backend/src/crispy/utils/filter.py index 1908d39..cfc6e8b 100644 --- a/backend/src/crispy/utils/filter.py +++ b/backend/src/crispy/utils/filter.py @@ -1,9 +1,10 @@ +from typing import Union from enum import Enum from utils import ffmpeg_filters import ffmpeg -class no_value(Enum): +class NoValue(Enum): """ Super class for filtes enum """ @@ -12,7 +13,7 @@ def __repr__(self) -> str: return f"<{self.__class__.__name__}.{self.name}>" -class filter_value(no_value): +class FilterValue(NoValue): """ Enum class containing all possible filters """ @@ -29,20 +30,20 @@ class filter_value(no_value): NONE = "none" -class filters(): +class Filters(): """ Class holding all filters """ - def __init__(self, name: str, option: str) -> None: - if name in filter_value._value2member_map_: - self.filter = filter_value._value2member_map_[name] + def __init__(self, name: str, option: Union[str, bool, int]) -> None: + if name in FilterValue._value2member_map_: + self.filter = FilterValue._value2member_map_[name] else: - self.filter = filter_value.NONE + self.filter = FilterValue.NONE self.option = option def __call__(self, video: ffmpeg.nodes.FilterableStream) -> None: - if self.filter == filter_value.NONE: + if self.filter == FilterValue.NONE: return video func = getattr(ffmpeg_filters, self.filter.value) return func(self.option, video) From 4580aece4757a2cffacd255e395097da49f36cff Mon Sep 17 00:00:00 2001 From: Brice PARENT Date: Tue, 19 Jul 2022 00:29:42 +0200 Subject: [PATCH 4/8] fix: Small fix on PR --- backend/src/crispy/main.py | 26 ++++++++++-------------- backend/src/crispy/utils/constants.py | 5 +++++ backend/src/crispy/utils/ffmpeg_utils.py | 3 ++- backend/src/crispy/utils/filter.py | 6 +++++- settings_template.json | 13 ++++++++++++ 5 files changed, 36 insertions(+), 17 deletions(-) diff --git a/backend/src/crispy/main.py b/backend/src/crispy/main.py index cd47476..44c8f69 100644 --- a/backend/src/crispy/main.py +++ b/backend/src/crispy/main.py @@ -1,27 +1,24 @@ -import logging from typing import List from utils.arguments import args -from utils.constants import NEURAL_NETWORK_PATH +from utils.constants import NEURAL_NETWORK_PATH, L from utils.IO import io import video.video as vid from AI.network import NeuralNetwork -logging.getLogger("PIL").setLevel(logging.ERROR) - def main(videos: List[str]) -> None: io.generate_tmp_folder(not args.no_extract) nn = NeuralNetwork([4000, 120, 15, 2], 0.01) nn.load(NEURAL_NETWORK_PATH) - l.debug(f"Neural network: {nn}") + L.debug(f"Neural network: {nn}") for video in videos: - l.info(f"Currently processing {video}") + L.info(f"Currently processing {video}") video_no_ext = io.remove_extension(video) video_clean_name = io.generate_clean_name(video_no_ext) - l.debug(f"Clean name: {video_clean_name}") + L.debug(f"Clean name: {video_clean_name}") if not args.no_extract: io.generate_folder_clip(video_clean_name) @@ -33,27 +30,26 @@ def main(videos: List[str]) -> None: io.clean_cuts(video_clean_name) query_array = vid.get_query_array_from_video(nn, images_path) - l.debug(query_array) + L.debug(query_array) kill_array = vid.get_kill_array_from_query_array(query_array) - l.debug(kill_array) + L.debug(kill_array) kill_array = vid.post_processing_kill_array(kill_array) - l.debug(kill_array) + L.debug(kill_array) vid.segment_video_with_kill_array(video, kill_array) if not args.no_merge: + L.info("Merging videos") vid.merge_cuts() if __name__ == "__main__": - l = logging.getLogger() - print("Welcome to crispy!") - l.info("Starting the program crispy") + L.info("Starting the program crispy") - l.debug(f"Arguments: {args}") + L.debug(f"Arguments: {args}") - videos_path = ["0.mp4"] + videos_path = ["1.mp4"] # FIXME: should be sort with the frontend ? videos_path.sort() diff --git a/backend/src/crispy/utils/constants.py b/backend/src/crispy/utils/constants.py index 972785e..6965fbd 100644 --- a/backend/src/crispy/utils/constants.py +++ b/backend/src/crispy/utils/constants.py @@ -1,6 +1,11 @@ import os +import logging import json +L = logging.getLogger("crispy") + +logging.getLogger("PIL").setLevel(logging.ERROR) + BACKEND = "backend" OUTPUT = "output" diff --git a/backend/src/crispy/utils/ffmpeg_utils.py b/backend/src/crispy/utils/ffmpeg_utils.py index fa52eb6..85fdd9b 100644 --- a/backend/src/crispy/utils/ffmpeg_utils.py +++ b/backend/src/crispy/utils/ffmpeg_utils.py @@ -4,7 +4,7 @@ import shutil from typing import Optional, Any, List, Tuple import ffmpeg -from utils.constants import SETTINGS +from utils.constants import SETTINGS, L from utils.filter import Filters from PIL import Image, ImageFilter, ImageOps @@ -186,6 +186,7 @@ def apply_filter(video: ffmpeg.nodes.FilterableStream, find_specific_filters(global_filters, video_path) for filt in global_filters: + L.debug(f"Applying filter {filt.filter.name} {filt.option}") video = filt(video) return video diff --git a/backend/src/crispy/utils/filter.py b/backend/src/crispy/utils/filter.py index cfc6e8b..6f22ee3 100644 --- a/backend/src/crispy/utils/filter.py +++ b/backend/src/crispy/utils/filter.py @@ -1,8 +1,11 @@ from typing import Union from enum import Enum -from utils import ffmpeg_filters + import ffmpeg +from utils import ffmpeg_filters +from utils.constants import L + class NoValue(Enum): """ @@ -39,6 +42,7 @@ def __init__(self, name: str, option: Union[str, bool, int]) -> None: if name in FilterValue._value2member_map_: self.filter = FilterValue._value2member_map_[name] else: + L.error(f"{name} is not a valid filter") self.filter = FilterValue.NONE self.option = option diff --git a/settings_template.json b/settings_template.json index 2f3f055..b5aa3e2 100644 --- a/settings_template.json +++ b/settings_template.json @@ -7,5 +7,18 @@ "second-before": 3, "second-after": 2, "second-between-kills": 1 + }, + "filters": { + "saturation": 1.3 + }, + "clips": { + "0.mp4": { + "saturation": 0.5, + "hflip": true + }, + "1.mp4": { + "zoom": true, + "grayscale": true + } } } From 74934ef38a4dcb39996fa18ea9a6339c57453f6f Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Tue, 19 Jul 2022 16:51:17 +0200 Subject: [PATCH 5/8] feat: Fixed type check for filters --- .pylintrc | 3 ++- README.md | 6 +++++ backend/src/crispy/main.py | 2 +- backend/src/crispy/utils/ffmpeg_filters.py | 28 +++++++++------------- 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.pylintrc b/.pylintrc index 3cd34c6..75d05b9 100644 --- a/.pylintrc +++ b/.pylintrc @@ -17,7 +17,8 @@ disable= C0200, W0614, W1203, - C3001 + C3001, + C0123 string-quote=double diff --git a/README.md b/README.md index a5787bf..5a3c07d 100644 --- a/README.md +++ b/README.md @@ -29,3 +29,9 @@ to do so, you'll need another resource folder - `cd frontend && npm install && npm run dev` - `cd backend && pip install -r requirements.txt && uvicorn src.app:app --reload --host 127.0.0.1 --port 1337` + +# ffmpeg filters +- crop / hflip / vflip / grayscale : boolean +- blur / brightness / saturation : int (between -10 and 10) +- scale: str (ex: "w=1280:h=720") +- zoom: int (>= 1) diff --git a/backend/src/crispy/main.py b/backend/src/crispy/main.py index 44c8f69..ecbc344 100644 --- a/backend/src/crispy/main.py +++ b/backend/src/crispy/main.py @@ -49,7 +49,7 @@ def main(videos: List[str]) -> None: L.debug(f"Arguments: {args}") - videos_path = ["1.mp4"] + videos_path = ["0.mp4"] # FIXME: should be sort with the frontend ? videos_path.sort() diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py index 7468463..a929fd6 100644 --- a/backend/src/crispy/utils/ffmpeg_filters.py +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -5,69 +5,63 @@ def crop( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - if option: - video = video.crop(x=960, y=540) + video = video.cop(960, 540) if type(option) == bool and option else video return video def blur( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.filter("boxblur", option) # "luma_radius=2:luma_power=1" + video = video.filter("boxblur", option) if type(option) == int else video return video def scale( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.filter("scale", option) # "w=1280:h=720" + video = video.filter("scale", option) if type(option) == str else video return video def hflip( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - if option: - video = video.hflip() + video = video.hflip() if type(option) == bool and option else video return video def vflip( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - if option: - video = video.vflip() + video = video.vflip() if type(option) == bool and option else video return video def brightness( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hue(b=option) + video = video.hue(b=option) if type(option) == int else video return video def saturation( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hue(s=option) + video = video.hue(s=option) if type(option) == int else video return video def zoom( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.zoompan(z=option, - fps=60, - d=1, - x="iw/2-(iw/zoom/2)", - y="ih/2-(ih/zoom/2)") + video = video.zoompan( + z=option, fps=60, d=1, x="iw/2-(iw/zoom/2)", + y="ih/2-(ih/zoom/2)") if type(option) == int else video return video def grayscale( option: Union[str, bool, int], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - if option: - video = video.hue(s=0) + video = video.hue(s=0) if type(option) == bool and option else video return video From bb50073f305b49600e3e746ddaf7d6e17a40dd91 Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Tue, 19 Jul 2022 17:20:20 +0200 Subject: [PATCH 6/8] feat: Added logger for wrong type --- backend/src/crispy/utils/ffmpeg_filters.py | 84 ++++++++++++++++------ 1 file changed, 64 insertions(+), 20 deletions(-) diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py index a929fd6..5eec5de 100644 --- a/backend/src/crispy/utils/ffmpeg_filters.py +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -1,67 +1,111 @@ from typing import Union +from utils.constants import L import ffmpeg def crop( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.cop(960, 540) if type(option) == bool and option else video + if type(option) == bool and option: + video = video.crop(960, 540) + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('crop')") return video def blur( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.filter("boxblur", option) if type(option) == int else video + if type(option) == int or type(option) == float: + video = video.filter("boxblur", option) + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('blur')" + ) return video def scale( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.filter("scale", option) if type(option) == str else video + if type(option) == str: + video = video.filter("scale", option) + else: + L.error( + f"expected type({str}) got ({type(option)}) for filter ('scale')") return video def hflip( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hflip() if type(option) == bool and option else video + if type(option) == bool and option: + video = video.hflip() + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('hflip')") return video def vflip( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.vflip() if type(option) == bool and option else video + if type(option) == bool and option: + video = video.hflip() + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('vflip')") return video def brightness( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hue(b=option) if type(option) == int else video + if type(option) == int or type(option) == float: + video = video.hue(b=option) + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('brightness')" + ) return video def saturation( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hue(s=option) if type(option) == int else video + if type(option) == int or type(option) == float: + video = video.hue(s=option) + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('saturation')" + ) return video def zoom( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.zoompan( - z=option, fps=60, d=1, x="iw/2-(iw/zoom/2)", - y="ih/2-(ih/zoom/2)") if type(option) == int else video + if type(option) == int or type(option) == float: + video = video.zoompan(z=option, + fps=60, + d=1, + x="iw/2-(iw/zoom/2)", + y="ih/2-(ih/zoom/2)") + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('zoom')" + ) return video def grayscale( - option: Union[str, bool, int], + option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: - video = video.hue(s=0) if type(option) == bool and option else video + if type(option) == bool and option: + video = video.hue(s=0) + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('grayscale')" + ) return video From c99f217a00424c1e03c083eb2bb60560f5da2feb Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Tue, 19 Jul 2022 17:43:08 +0200 Subject: [PATCH 7/8] feat: Added documentation for filter functions --- backend/src/crispy/utils/ffmpeg_filters.py | 27 ++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py index 5eec5de..75b7dd6 100644 --- a/backend/src/crispy/utils/ffmpeg_filters.py +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -6,6 +6,9 @@ def crop( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Crop a video in 960 x 540 pixels + """ if type(option) == bool and option: video = video.crop(960, 540) else: @@ -17,6 +20,9 @@ def crop( def blur( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Increase or decrease the blur of the video based on the option + """ if type(option) == int or type(option) == float: video = video.filter("boxblur", option) else: @@ -29,6 +35,9 @@ def blur( def scale( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Scale a video based on the option (e.g 'w=1920:h=1280') + """ if type(option) == str: video = video.filter("scale", option) else: @@ -40,6 +49,9 @@ def scale( def hflip( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Flip horizontally the video + """ if type(option) == bool and option: video = video.hflip() else: @@ -51,6 +63,9 @@ def hflip( def vflip( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Flip vertically the video + """ if type(option) == bool and option: video = video.hflip() else: @@ -62,6 +77,9 @@ def vflip( def brightness( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Increase or decrease the brightness of the video based on the option + """ if type(option) == int or type(option) == float: video = video.hue(b=option) else: @@ -74,6 +92,9 @@ def brightness( def saturation( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Increase or decrease the saturation of the video based on the option + """ if type(option) == int or type(option) == float: video = video.hue(s=option) else: @@ -86,6 +107,9 @@ def saturation( def zoom( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Zoom the video based on the option (can only be positive) + """ if type(option) == int or type(option) == float: video = video.zoompan(z=option, fps=60, @@ -102,6 +126,9 @@ def zoom( def grayscale( option: Union[str, bool, int, float], video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Turn the video into grayscale + """ if type(option) == bool and option: video = video.hue(s=0) else: From 1cbf56f4226dc389d67094c53f9113412b2cecd8 Mon Sep 17 00:00:00 2001 From: Nicolas Charles Date: Tue, 19 Jul 2022 19:03:09 +0200 Subject: [PATCH 8/8] feat: Fixed duplicated functions --- backend/src/crispy/utils/ffmpeg_utils.py | 70 ------------------------ 1 file changed, 70 deletions(-) diff --git a/backend/src/crispy/utils/ffmpeg_utils.py b/backend/src/crispy/utils/ffmpeg_utils.py index c77d0de..1f00b54 100644 --- a/backend/src/crispy/utils/ffmpeg_utils.py +++ b/backend/src/crispy/utils/ffmpeg_utils.py @@ -171,76 +171,6 @@ def merge_videos(videos_path: List[str], save_path: str) -> None: .overwrite_output() .run(quiet=True) ) # yaPf: disable - - -def find_available_path(video_path: str) -> str: - """ - Find available path to store the scaled video temporarily. - """ - dirname, basename = os.path.split(video_path) - h = str(hash(basename)) + ".mp4" - while (os.path.exists(os.path.join(dirname, h))): - h = random.choice(string.ascii_letters) + h - - return os.path.join(dirname, h) - - -def scale_video(video_path: str) -> None: - """ - Scale (up or down) a video. - """ - if os.path.exists(video_path): - save_path = find_available_path(video_path) - ( - ffmpeg - .input(video_path) - .filter("scale", w=1920, h=1080) - .output(save_path, start_number=0) - .overwrite_output() - .run() - ) # yapf: disable - - os.remove(video_path) - os.rename(save_path, video_path) - # check if image has to be upscaled or downscaled ? - else: - raise FileNotFoundError(f"{video_path} not found") - - -def create_new_path(video_path: str) -> str: - """ - Create new path based on the original one. - """ - drive, tail = os.path.split(video_path) - name, ext = os.path.splitext(tail) - nb = 1 - cur_name = name + "_" + str(nb) - while os.path.exists(os.path.join(drive, cur_name + ext)): - nb = nb + 1 - cur_name = name + "_" + str(nb) - - tail = cur_name + ext - res = os.path.join(drive, cur_name + ext) - - return res - - -# FIXME: audio -def merge_videos(videos_path: List[str], save_path: str) -> None: - """ - Merge videos together. - """ - if len(videos_path) > 1: - videos: List[Any] = [] - for video_path in videos_path: - videos.append(ffmpeg.input(video_path)) - ( - ffmpeg - .concat(*videos) - .output(save_path) - .overwrite_output() - .run(quiet=True) - ) # yapf: disable else: shutil.copyfile(videos_path[0], save_path)