Skip to content

Commit

Permalink
fix #1882
Browse files Browse the repository at this point in the history
  • Loading branch information
jxxghp committed Apr 14, 2024
1 parent 8cd0dd4 commit 6939bff
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 62 deletions.
8 changes: 4 additions & 4 deletions app/chain/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
from app.core.context import MediaInfo, TorrentInfo
from app.core.event import EventManager
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.core.module import ModuleManager
from app.db.message_oper import MessageOper
from app.helper.message import MessageHelper
Expand Down Expand Up @@ -478,18 +477,19 @@ def post_torrents_message(self, message: Notification, torrents: List[Context])
return self.run_module("post_torrents_message", message=message, torrents=torrents)

def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str,
metainfo: MetaInfo = None, force_nfo: bool = False, force_img: bool = False) -> None:
metainfo: MetaBase = None, force_nfo: bool = False, force_img: bool = False) -> None:
"""
刮削元数据
:param path: 媒体文件路径
:param mediainfo: 识别的媒体信息
:param metainfo: 源文件的识别元数据
:param transfer_type: 转移模式
:param force_nfo: 强制刮削nfo
:param force_img: 强制刮削图片
:return: 成功或失败
"""
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo,
metainfo=metainfo, transfer_type=transfer_type, force_nfo=force_nfo, force_img=force_img)
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo, metainfo=metainfo,
transfer_type=transfer_type, force_nfo=force_nfo, force_img=force_img)

def register_commands(self, commands: Dict[str, dict]) -> None:
"""
Expand Down
114 changes: 62 additions & 52 deletions app/modules/douban/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.log import logger
from app.modules import _ModuleBase
from app.modules.douban.apiv2 import DoubanApi
Expand Down Expand Up @@ -629,88 +629,98 @@ def movie_top250(self, page: int = 1, count: int = 30) -> List[dict]:
return infos.get("subject_collection_items")

def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str,
metainfo: MetaInfo = None, force_nfo: bool = False, force_img: bool = False) -> None:
metainfo: MetaBase = None, force_nfo: bool = False, force_img: bool = False) -> None:
"""
刮削元数据
:param path: 媒体文件路径
:param mediainfo: 识别的媒体信息
:param transfer_type: 传输类型
:param metainfo: 源文件的识别元数据
:param force_nfo: 是否强制刮削nfo
:param force_img: 是否强制刮削图片
:return: 成功或失败
"""

def __get_mediainfo(_meta: MetaBase, _mediainfo: MediaInfo) -> Optional[MediaInfo]:
"""
获取豆瓣媒体信息
"""
if not _meta.name:
return None
# 查询豆瓣详情
if not _mediainfo.douban_id:
# 根据TMDB名称查询豆瓣数据
_doubaninfo = self.match_doubaninfo(name=_mediainfo.title,
imdbid=_mediainfo.imdb_id,
mtype=_mediainfo.type,
year=_mediainfo.year)
if not _doubaninfo:
logger.warn(f"未找到 {_mediainfo.title} 的豆瓣信息")
return None
_doubaninfo = self.douban_info(doubanid=_doubaninfo.get("id"), mtype=_mediainfo.type)
else:
_doubaninfo = self.douban_info(doubanid=_mediainfo.douban_id,
mtype=_mediainfo.type)
if not _doubaninfo:
logger(f"未获取到 {_mediainfo.douban_id} 的豆瓣媒体信息,无法刮削!")
return None
# 豆瓣媒体信息
_doubanmedia = MediaInfo(douban_info=_doubaninfo)
# 补充图片
self.obtain_images(_doubanmedia)
return _doubanmedia

if settings.SCRAP_SOURCE != "douban":
return None
if SystemUtils.is_bluray_dir(path):
# 蓝光原盘
logger.info(f"开始刮削蓝光原盘:{path} ...")
meta = MetaInfo(path.stem)
if not meta.name:
return
# 查询豆瓣详情
if not mediainfo.douban_id:
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type,
year=mediainfo.year)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
return
doubaninfo = self.douban_info(doubanid=doubaninfo.get("id"), mtype=mediainfo.type)
else:
doubaninfo = self.douban_info(doubanid=mediainfo.douban_id,
mtype=mediainfo.type)
if not doubaninfo:
logger(f"未获取到 {mediainfo.douban_id} 的豆瓣媒体信息,无法刮削!")
return
# 豆瓣媒体信息
mediainfo = MediaInfo(douban_info=doubaninfo)
# 补充图片
self.obtain_images(mediainfo)
# 优先使用传入metainfo
meta = metainfo or MetaInfo(path.name)
# 刮削路径
scrape_path = path / path.name
# 媒体信息
doubanmedia = __get_mediainfo(_meta=meta, _mediainfo=mediainfo)
if not doubanmedia:
return
# 刮削
self.scraper.gen_scraper_files(meta=meta,
mediainfo=mediainfo,
mediainfo=doubanmedia,
file_path=scrape_path,
transfer_type=transfer_type,
force_nfo=force_nfo,
force_img=force_img)
elif path.is_file():
# 刮削单个文件
logger.info(f"开始刮削媒体库文件:{path} ...")
# 优先使用传入metainfo
meta = metainfo or MetaInfoPath(path)
# 媒体信息
doubanmedia = __get_mediainfo(_meta=meta, _mediainfo=mediainfo)
if not doubanmedia:
return
# 刮削
self.scraper.gen_scraper_files(meta=meta,
mediainfo=doubanmedia,
file_path=path,
transfer_type=transfer_type,
force_nfo=force_nfo,
force_img=force_img)
else:
# 目录下的所有文件
for file in SystemUtils.list_files(path, settings.RMT_MEDIAEXT):
if not file:
continue
logger.info(f"开始刮削媒体库文件:{file} ...")
try:
meta = MetaInfo(file.stem)
if not meta.name:
continue
if not mediainfo.douban_id:
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break
# 查询豆瓣详情
doubaninfo = self.douban_info(doubanid=doubaninfo.get("id"), mtype=mediainfo.type)
else:
doubaninfo = self.douban_info(doubanid=mediainfo.douban_id,
mtype=mediainfo.type)
if not doubaninfo:
logger(f"未获取到 {mediainfo.douban_id} 的豆瓣媒体信息,无法刮削!")
continue
meta = MetaInfoPath(file)
# 豆瓣媒体信息
mediainfo = MediaInfo(douban_info=doubaninfo)
# 补充图片
self.obtain_images(mediainfo)
doubanmedia = __get_mediainfo(_meta=meta, _mediainfo=mediainfo)
if not doubanmedia:
return
# 刮削
self.scraper.gen_scraper_files(meta=meta,
mediainfo=mediainfo,
mediainfo=doubanmedia,
file_path=file,
transfer_type=transfer_type,
force_nfo=force_nfo,
Expand Down
5 changes: 2 additions & 3 deletions app/modules/themoviedb/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.themoviedb.category import CategoryHelper
Expand Down Expand Up @@ -263,11 +262,12 @@ def search_medias(self, meta: MetaBase) -> Optional[List[MediaInfo]]:
return []

def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str,
metainfo: MetaInfo = None, force_nfo: bool = False, force_img: bool = False) -> None:
metainfo: MetaBase = None, force_nfo: bool = False, force_img: bool = False) -> None:
"""
刮削元数据
:param path: 媒体文件路径
:param mediainfo: 识别的媒体信息
:param metainfo: 源文件的识别元数据
:param transfer_type: 转移类型
:param force_nfo: 强制刮削nfo
:param force_img: 强制刮削图片
Expand Down Expand Up @@ -304,7 +304,6 @@ def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str,
self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=file,
transfer_type=transfer_type,
metainfo=metainfo,
force_nfo=force_nfo,
force_img=force_img)
logger.info(f"{path} 刮削完成")
Expand Down
10 changes: 7 additions & 3 deletions app/modules/themoviedb/scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.log import logger
from app.schemas.types import MediaType
Expand All @@ -26,10 +27,11 @@ def __init__(self, tmdb):
self.tmdb = tmdb

def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path, transfer_type: str,
metainfo: MetaInfo = None, force_nfo: bool = False, force_img: bool = False):
metainfo: MetaBase = None, force_nfo: bool = False, force_img: bool = False):
"""
生成刮削文件,包括NFO和图片,传入路径为文件路径
:param mediainfo: 媒体信息
:param metainfo: 源文件的识别元数据
:param file_path: 文件路径或者目录路径
:param transfer_type: 传输类型
:param force_nfo: 是否强制生成NFO
Expand Down Expand Up @@ -76,7 +78,9 @@ def __get_episode_detail(_seasoninfo: dict, _episode: int):
# 电视剧,路径为每一季的文件名 名称/Season xx/名称 SxxExx.xxx
else:
# 如果有上游传入的元信息则使用,否则使用文件名识别
meta = metainfo or MetaInfo(file_path.stem)
meta = metainfo or MetaInfo(file_path.name)
if meta.begin_season is None:
meta.begin_season = mediainfo.season if mediainfo.season is not None else 1
# 根目录不存在时才处理
if self._force_nfo or not file_path.parent.with_name("tvshow.nfo").exists():
# 根目录描述文件
Expand All @@ -96,7 +100,7 @@ def __get_episode_detail(_seasoninfo: dict, _episode: int):
self.__save_image(url=attr_value,
file_path=image_path)
# 查询季信息
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, meta.begin_season or mediainfo.season)
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, meta.begin_season)
if seasoninfo:
# 季目录NFO
if self._force_nfo or not file_path.with_name("season.nfo").exists():
Expand Down

0 comments on commit 6939bff

Please sign in to comment.