diff --git a/.pylintrc b/.pylintrc index 3cd34c6..75d05b9 100644 --- a/.pylintrc +++ b/.pylintrc @@ -17,7 +17,8 @@ disable= C0200, W0614, W1203, - C3001 + C3001, + C0123 string-quote=double diff --git a/README.md b/README.md index a5787bf..5a3c07d 100644 --- a/README.md +++ b/README.md @@ -29,3 +29,9 @@ to do so, you'll need another resource folder - `cd frontend && npm install && npm run dev` - `cd backend && pip install -r requirements.txt && uvicorn src.app:app --reload --host 127.0.0.1 --port 1337` + +# ffmpeg filters +- crop / hflip / vflip / grayscale : boolean +- blur / brightness / saturation : int (between -10 and 10) +- scale: str (ex: "w=1280:h=720") +- zoom: int (>= 1) diff --git a/backend/src/crispy/main.py b/backend/src/crispy/main.py index 767f454..a73a4bc 100644 --- a/backend/src/crispy/main.py +++ b/backend/src/crispy/main.py @@ -1,27 +1,24 @@ -import logging from typing import List from utils.arguments import args -from utils.constants import NEURAL_NETWORK_PATH +from utils.constants import NEURAL_NETWORK_PATH, L from utils.IO import io import video.video as vid from AI.network import NeuralNetwork -logging.getLogger("PIL").setLevel(logging.ERROR) - def main(videos: List[str]) -> None: io.generate_tmp_folder(not args.no_extract) nn = NeuralNetwork([4000, 120, 15, 2], 0.01) nn.load(NEURAL_NETWORK_PATH) - l.debug(f"Neural network: {nn}") + L.debug(f"Neural network: {nn}") for video in videos: - l.info(f"Currently processing {video}") + L.info(f"Currently processing {video}") video_no_ext = io.remove_extension(video) video_clean_name = io.generate_clean_name(video_no_ext) - l.debug(f"Clean name: {video_clean_name}") + L.debug(f"Clean name: {video_clean_name}") if not args.no_extract: io.generate_folder_clip(video_clean_name) @@ -33,25 +30,24 @@ def main(videos: List[str]) -> None: io.clean_cuts(video_clean_name) query_array = vid.get_query_array_from_video(nn, images_path) - l.debug(query_array) + L.debug(query_array) kill_array = vid.get_kill_array_from_query_array(query_array) - l.debug(kill_array) + L.debug(kill_array) kill_array = vid.post_processing_kill_array(kill_array) - l.debug(kill_array) + L.debug(kill_array) vid.segment_video_with_kill_array(video, kill_array) if not args.no_merge: + L.info("Merging videos") vid.merge_cuts() if __name__ == "__main__": - l = logging.getLogger() - print("Welcome to crispy!") - l.info("Starting the program crispy") + L.info("Starting the program crispy") - l.debug(f"Arguments: {args}") + L.debug(f"Arguments: {args}") videos_path = ["4.mp4", "quadra chrlie mice.mp4"] diff --git a/backend/src/crispy/test_filter.py b/backend/src/crispy/test_filter.py new file mode 100644 index 0000000..64b88cb --- /dev/null +++ b/backend/src/crispy/test_filter.py @@ -0,0 +1,29 @@ +from utils.filter import filters +from pytube import YouTube + + +def test_class() -> None: + crop = filters(" crop ", "1") + assert crop.filter.name == "CROP" + crop = filters(" ZOOM ", "1") + assert crop.filter.name == "ZOOM" + crop = filters(" CrOpdafd ", "1") + assert crop.filter.name == "NONE" + crop = filters(" c rop ", "1") + assert crop.filter.name == "NONE" + crop = filters("crop", "1") + assert crop.filter.name == "CROP" + crop = filters(" brightness \ + ", "1") + assert crop.filter.name == "BRIGHTNESS" + + +def test_filter() -> None: + """ + Load youtube video to test filters. Needs to call main.py manually to check + """ + yt = YouTube( + "https://www.youtube.com/watch?v=vWj6NxN7PsI&feature=youtu.be") + yt.streams.order_by("resolution").desc().first().download( + filename="backend/resources/video/0.mp4") + print("Video downloaded") diff --git a/backend/src/crispy/utils/constants.py b/backend/src/crispy/utils/constants.py index 972785e..6965fbd 100644 --- a/backend/src/crispy/utils/constants.py +++ b/backend/src/crispy/utils/constants.py @@ -1,6 +1,11 @@ import os +import logging import json +L = logging.getLogger("crispy") + +logging.getLogger("PIL").setLevel(logging.ERROR) + BACKEND = "backend" OUTPUT = "output" diff --git a/backend/src/crispy/utils/ffmpeg_filters.py b/backend/src/crispy/utils/ffmpeg_filters.py new file mode 100644 index 0000000..75b7dd6 --- /dev/null +++ b/backend/src/crispy/utils/ffmpeg_filters.py @@ -0,0 +1,138 @@ +from typing import Union +from utils.constants import L +import ffmpeg + + +def crop( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Crop a video in 960 x 540 pixels + """ + if type(option) == bool and option: + video = video.crop(960, 540) + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('crop')") + return video + + +def blur( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Increase or decrease the blur of the video based on the option + """ + if type(option) == int or type(option) == float: + video = video.filter("boxblur", option) + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('blur')" + ) + return video + + +def scale( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Scale a video based on the option (e.g 'w=1920:h=1280') + """ + if type(option) == str: + video = video.filter("scale", option) + else: + L.error( + f"expected type({str}) got ({type(option)}) for filter ('scale')") + return video + + +def hflip( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Flip horizontally the video + """ + if type(option) == bool and option: + video = video.hflip() + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('hflip')") + return video + + +def vflip( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Flip vertically the video + """ + if type(option) == bool and option: + video = video.hflip() + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('vflip')") + return video + + +def brightness( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Increase or decrease the brightness of the video based on the option + """ + if type(option) == int or type(option) == float: + video = video.hue(b=option) + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('brightness')" + ) + return video + + +def saturation( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Increase or decrease the saturation of the video based on the option + """ + if type(option) == int or type(option) == float: + video = video.hue(s=option) + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('saturation')" + ) + return video + + +def zoom( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Zoom the video based on the option (can only be positive) + """ + if type(option) == int or type(option) == float: + video = video.zoompan(z=option, + fps=60, + d=1, + x="iw/2-(iw/zoom/2)", + y="ih/2-(ih/zoom/2)") + else: + L.error( + f"expected type({int} / {float}) got ({type(option)}) for filter ('zoom')" + ) + return video + + +def grayscale( + option: Union[str, bool, int, float], + video: ffmpeg.nodes.FilterableStream) -> ffmpeg.nodes.FilterableStream: + """ + Turn the video into grayscale + """ + if type(option) == bool and option: + video = video.hue(s=0) + else: + L.error( + f"expected type({bool}) got ({type(option)}) for filter ('grayscale')" + ) + return video diff --git a/backend/src/crispy/utils/ffmpeg_utils.py b/backend/src/crispy/utils/ffmpeg_utils.py index 2408aea..1f00b54 100644 --- a/backend/src/crispy/utils/ffmpeg_utils.py +++ b/backend/src/crispy/utils/ffmpeg_utils.py @@ -3,8 +3,9 @@ import string import shutil from typing import Optional, Any, List, Tuple - import ffmpeg +from utils.constants import SETTINGS, L +from utils.filter import Filters from PIL import Image, ImageFilter, ImageOps BACKEND = "backend" @@ -82,21 +83,24 @@ def extract_images(video_path: str, def segment_video(video_path: str, save_path: str, frames: List[Tuple[int, int]], frame_duration: int) -> None: """ - Segment a video on multiple smaller video using the frames array + Segment a video on multiple smaller video using the frames array. """ for frame in frames: start = frame[0] / frame_duration end = frame[1] / frame_duration # print(start, end, frame_duration, video_path, save_path) - ( + video = ( ffmpeg .input(video_path) - .output(os.path.join(save_path, f"{frame[0]}-{frame[1]}.mp4"), - ss=f"{start}", - to=f"{end}") - .overwrite_output() - .run(quiet=True) - ) # yaPf: disable + ) # yapf: disable + video = apply_filter(video, video_path) + + video = video.output(os.path.join(save_path, + f"{frame[0]}-{frame[1]}.mp4"), + ss=f"{start}", + to=f"{end}") + video = video.overwrite_output() + video.run(quiet=True) def find_available_path(video_path: str) -> str: @@ -166,6 +170,42 @@ def merge_videos(videos_path: List[str], save_path: str) -> None: .output(save_path) .overwrite_output() .run(quiet=True) - ) # yapf: disable + ) # yaPf: disable else: shutil.copyfile(videos_path[0], save_path) + + +def apply_filter(video: ffmpeg.nodes.FilterableStream, + video_path: str) -> ffmpeg.nodes.FilterableStream: + """ + Apply a list of filter to a video. + """ + global_filters: List[Filters] = [] + for filt in SETTINGS["filters"].items(): + global_filters.append(Filters(filt[0], filt[1])) + + find_specific_filters(global_filters, video_path) + for filt in global_filters: + L.debug(f"Applying filter {filt.filter.name} {filt.option}") + video = filt(video) + + return video + + +def find_specific_filters(global_filters: List[Filters], + video_path: str) -> None: + """ + Find specificFilters for a video in Settings.json + """ + video_name = os.path.split(video_path) + video_name = video_name[len(video_name) - 1] + if "clips" in SETTINGS: + if video_name in SETTINGS["clips"]: + for filt, value in SETTINGS["clips"][video_name].items(): + found = False + for i in range(len(global_filters)): + if global_filters[i].filter.value == filt: + found = True + global_filters[i] = Filters(filt, value) + if not found: + global_filters.append(Filters(filt, value)) diff --git a/backend/src/crispy/utils/filter.py b/backend/src/crispy/utils/filter.py new file mode 100644 index 0000000..6f22ee3 --- /dev/null +++ b/backend/src/crispy/utils/filter.py @@ -0,0 +1,53 @@ +from typing import Union +from enum import Enum + +import ffmpeg + +from utils import ffmpeg_filters +from utils.constants import L + + +class NoValue(Enum): + """ + Super class for filtes enum + """ + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}.{self.name}>" + + +class FilterValue(NoValue): + """ + Enum class containing all possible filters + """ + + CROP = "crop" # "crop" + BLUR = "blur" # "boxblur" + SCALE = "scale" # "scale" + HFLIP = "hflip" # "horizontal flip" + VFLIP = "vflip" # "vertical flip" + BRIGHTNESS = "brightness" # "b" + SATURATION = "saturation" # "s" + ZOOM = "zoom" # "zoom" + GRAYSCALE = "grayscale" # "hue=s=0" + NONE = "none" + + +class Filters(): + """ + Class holding all filters + """ + + def __init__(self, name: str, option: Union[str, bool, int]) -> None: + if name in FilterValue._value2member_map_: + self.filter = FilterValue._value2member_map_[name] + else: + L.error(f"{name} is not a valid filter") + self.filter = FilterValue.NONE + self.option = option + + def __call__(self, video: ffmpeg.nodes.FilterableStream) -> None: + if self.filter == FilterValue.NONE: + return video + func = getattr(ffmpeg_filters, self.filter.value) + return func(self.option, video) diff --git a/backend/src/crispy/utils/test_ffmpeg_utils.py b/backend/src/crispy/utils/test_ffmpeg_utils.py index 956c333..c555d29 100644 --- a/backend/src/crispy/utils/test_ffmpeg_utils.py +++ b/backend/src/crispy/utils/test_ffmpeg_utils.py @@ -1,7 +1,7 @@ import os import cv2 from pytube import YouTube -from ffmpeg_utils import scale_video +from ffmpeg_utils import scale_video, segment_video def test_basic() -> None: @@ -72,3 +72,31 @@ def split_check(video_path: str, frames: int) -> bool: abort(video_path) return False return True + + +def cut_1_100(video_path: str) -> bool: + segment_video(video_path, video_path, [(0, 100)], 1) + return split_check(video_path, 100) + + +def cut_10_100(video_path: str) -> bool: + segment_video(video_path, video_path, [(0, 100), (100, 200), (200, 300), + (300, 400), (400, 500), (500, 600), + (600, 700), (700, 800), (800, 900), + (900, 1000)], 1) + return split_check(video_path, 100) + + +def test_split() -> None: + os.mkdir("test_split") + yt = YouTube("https://www.youtube.com/watch?v=6A-hTKYBkC4") + yt.streams.order_by("resolution").desc().first().download( + filename="./test_split/test_basic.mp4") + + assert cut_1_100("./test_split/test_basic.mp4") + + yt.streams.order_by("resolution").desc().first().download( + filename="./test_split/test_basic.mp4") + assert cut_10_100("./test_split/test_basic.mp4") + + abort("./test_split/test_basic.mp4") diff --git a/settings_template.json b/settings_template.json index 2f3f055..b5aa3e2 100644 --- a/settings_template.json +++ b/settings_template.json @@ -7,5 +7,18 @@ "second-before": 3, "second-after": 2, "second-between-kills": 1 + }, + "filters": { + "saturation": 1.3 + }, + "clips": { + "0.mp4": { + "saturation": 0.5, + "hflip": true + }, + "1.mp4": { + "zoom": true, + "grayscale": true + } } }