Skip to content

Commit

Permalink
Merge pull request #3441 from jxxghp/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
jxxghp authored Feb 11, 2023
2 parents 71de357 + 02ae954 commit 0db383e
Show file tree
Hide file tree
Showing 7 changed files with 143 additions and 130 deletions.
67 changes: 34 additions & 33 deletions app/helper/db_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class DbHelper:
_db = MainDb()

@DbPersist(_db)
def insert_search_results(self, media_items: list):
def insert_search_results(self, media_items: list, title=None, ident_flag=True):
"""
将返回信息插入数据库
"""
Expand All @@ -29,38 +29,39 @@ def insert_search_results(self, media_items: list):
mtype = "MOV"
else:
mtype = "ANI"
data_list.append(SEARCHRESULTINFO(
TORRENT_NAME=media_item.org_string,
ENCLOSURE=media_item.enclosure,
DESCRIPTION=media_item.description,
TYPE=mtype,
TITLE=media_item.title or media_item.get_name(),
YEAR=media_item.year,
SEASON=media_item.get_season_string(),
EPISODE=media_item.get_episode_string(),
ES_STRING=media_item.get_season_episode_string(),
VOTE=media_item.vote_average or "0",
IMAGE=media_item.get_backdrop_image(default=False),
POSTER=media_item.get_poster_image(),
TMDBID=media_item.tmdb_id,
OVERVIEW=media_item.overview,
RES_TYPE=json.dumps({
"respix": media_item.resource_pix,
"restype": media_item.resource_type,
"reseffect": media_item.resource_effect,
"video_encode": media_item.video_encode
}),
RES_ORDER=media_item.res_order,
SIZE=StringUtils.str_filesize(int(media_item.size)),
SEEDERS=media_item.seeders,
PEERS=media_item.peers,
SITE=media_item.site,
SITE_ORDER=media_item.site_order,
PAGEURL=media_item.page_url,
OTHERINFO=media_item.resource_team,
UPLOAD_VOLUME_FACTOR=media_item.upload_volume_factor,
DOWNLOAD_VOLUME_FACTOR=media_item.download_volume_factor
))
data_list.append(
SEARCHRESULTINFO(
TORRENT_NAME=media_item.org_string,
ENCLOSURE=media_item.enclosure,
DESCRIPTION=media_item.description,
TYPE=mtype if ident_flag else '',
TITLE=media_item.title if ident_flag else title,
YEAR=media_item.year if ident_flag else '',
SEASON=media_item.get_season_string() if ident_flag else '',
EPISODE=media_item.get_episode_string() if ident_flag else '',
ES_STRING=media_item.get_season_episode_string() if ident_flag else '',
VOTE=media_item.vote_average or "0",
IMAGE=media_item.get_backdrop_image(default=False),
POSTER=media_item.get_poster_image(),
TMDBID=media_item.tmdb_id,
OVERVIEW=media_item.overview,
RES_TYPE=json.dumps({
"respix": media_item.resource_pix,
"restype": media_item.resource_type,
"reseffect": media_item.resource_effect,
"video_encode": media_item.video_encode
}),
RES_ORDER=media_item.res_order,
SIZE=StringUtils.str_filesize(int(media_item.size)),
SEEDERS=media_item.seeders,
PEERS=media_item.peers,
SITE=media_item.site,
SITE_ORDER=media_item.site_order,
PAGEURL=media_item.page_url,
OTHERINFO=media_item.resource_team,
UPLOAD_VOLUME_FACTOR=media_item.upload_volume_factor,
DOWNLOAD_VOLUME_FACTOR=media_item.download_volume_factor
))
self._db.insert(data_list)

def get_search_result_by_id(self, dl_id):
Expand Down
7 changes: 5 additions & 2 deletions app/media/media.py
Original file line number Diff line number Diff line change
Expand Up @@ -2088,15 +2088,18 @@ def get_detail_url(mtype, tmdbid):
else:
return "https://www.themoviedb.org/tv/%s" % tmdbid

def get_episode_images(self, tv_id, season_id, episode_id):
def get_episode_images(self, tv_id, season_id, episode_id, orginal=False):
"""
获取剧集中某一集封面
"""
if not self.episode:
return ""
res = self.episode.images(tv_id, season_id, episode_id)
if res:
return TMDB_IMAGE_W500_URL % res[0].get("file_path")
if orginal:
return TMDB_IMAGE_ORIGINAL_URL % res[0].get("file_path")
else:
return TMDB_IMAGE_W500_URL % res[0].get("file_path")
else:
return ""

Expand Down
35 changes: 24 additions & 11 deletions app/media/scraper.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,19 +257,23 @@ def gen_tv_episode_nfo_file(self,
self.__save_nfo(doc, os.path.join(out_path, os.path.join(out_path, "%s.nfo" % file_name)))

@staticmethod
def __save_image(url, out_path, itype="poster"):
def __save_image(url, out_path, itype=''):
"""
下载poster.jpg并保存
"""
if not url or not out_path:
return
if os.path.exists(os.path.join(out_path, "%s.%s" % (itype, str(url).split('.')[-1]))):
if itype:
image_path = os.path.join(out_path, "%s.%s" % (itype, str(url).split('.')[-1]))
else:
image_path = out_path
if os.path.exists(image_path):
return
try:
log.info(f"【Scraper】正在下载{itype}图片:{url} ...")
r = RequestUtils().get_res(url)
if r:
with open(file=os.path.join(out_path, "%s.%s" % (itype, str(url).split('.')[-1])),
with open(file=image_path,
mode="wb") as img:
img.write(r.content)
log.info(f"【Scraper】{itype}图片已保存:{out_path}")
Expand Down Expand Up @@ -325,7 +329,7 @@ def gen_scraper_files(self, media, scraper_nfo, scraper_pic, dir_path, file_name
if scraper_movie_pic.get("poster"):
poster_image = media.get_poster_image(original=True)
if poster_image:
self.__save_image(poster_image, dir_path)
self.__save_image(poster_image, dir_path, "poster")
# backdrop
if scraper_movie_pic.get("backdrop"):
backdrop_image = media.get_backdrop_image(default=False, original=True)
Expand Down Expand Up @@ -375,7 +379,7 @@ def gen_scraper_files(self, media, scraper_nfo, scraper_pic, dir_path, file_name
if scraper_tv_pic.get("poster"):
poster_image = media.get_poster_image(original=True)
if poster_image:
self.__save_image(poster_image, os.path.dirname(dir_path))
self.__save_image(poster_image, os.path.dirname(dir_path), "poster")
# backdrop
if scraper_tv_pic.get("backdrop"):
backdrop_image = media.get_backdrop_image(default=False, original=True)
Expand Down Expand Up @@ -457,15 +461,24 @@ def gen_scraper_files(self, media, scraper_nfo, scraper_pic, dir_path, file_name
self.__save_image(seasonthumb,
os.path.dirname(dir_path),
"season%s-landscape" % media.get_season_seq().rjust(2, '0'))
# 处理集
# 处理集图片
if scraper_tv_pic.get("episode_thumb"):
episode_thumb = os.path.join(dir_path, file_name + "-thumb.jpg")
if not os.path.exists(episode_thumb):
video_path = os.path.join(dir_path, file_name + file_ext)
log.info(f"【Scraper】正在生成缩略图:{video_path} ...")
FfmpegHelper().get_thumb_image_from_video(video_path=video_path,
image_path=episode_thumb)
log.info(f"【Scraper】缩略图生成完成:{episode_thumb}")
# 优先从TMDB查询
episode_image = self.media.get_episode_images(tv_id=media.tmdb_id,
season_id=media.get_season_seq(),
episode_id=media.get_episode_seq(),
orginal=True)
if episode_image:
self.__save_image(episode_image, episode_thumb)
else:
# 从视频文件生成缩略图
video_path = os.path.join(dir_path, file_name + file_ext)
log.info(f"【Scraper】正在生成缩略图:{video_path} ...")
FfmpegHelper().get_thumb_image_from_video(video_path=video_path,
image_path=episode_thumb)
log.info(f"【Scraper】缩略图生成完成:{episode_thumb}")

except Exception as e:
ExceptionUtils.exception_traceback(e)
Expand Down
2 changes: 1 addition & 1 deletion web/action.py
Original file line number Diff line number Diff line change
Expand Up @@ -3502,7 +3502,7 @@ def get_search_result(self, data=None):
title_string = f"{title_string} ({item.YEAR})"
# 电视剧季集标识
mtype = item.TYPE or ""
SE_key = item.ES_STRING or "TV" if mtype != "MOV" else "MOV"
SE_key = item.ES_STRING if item.ES_STRING and mtype != "MOV" else "MOV"
media_type = {"MOV": "电影", "TV": "电视剧", "ANI": "动漫"}.get(mtype)
# 种子信息
torrent_item = {
Expand Down
4 changes: 3 additions & 1 deletion web/backend/search_torrents.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,9 @@ def search_medias_for_web(content, ident_flag=True, filters=None, tmdbid=None, m
media_list = sorted(media_list, key=lambda x: "%s%s%s" % (str(x.res_order).rjust(3, '0'),
str(x.site_order).rjust(3, '0'),
str(x.seeders).rjust(10, '0')), reverse=True)
dbhepler.insert_search_results(media_list)
dbhepler.insert_search_results(media_items=media_list,
ident_flag=ident_flag,
title=content)
return 0, ""


Expand Down
Loading

0 comments on commit 0db383e

Please sign in to comment.