Skip to content

Commit

Permalink
fix: 合并冲突
Browse files Browse the repository at this point in the history
  • Loading branch information
linyuan0213 committed Mar 27, 2023
2 parents c0ae0a4 + 9e890da commit 4dcfcf8
Show file tree
Hide file tree
Showing 56 changed files with 1,192 additions and 497 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build-dev.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: NAStool Docker
name: NAStool Dev
on:
workflow_dispatch:
push:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ jobs:
push: true
tags: |
${{ secrets.DOCKER_USERNAME }}/nas-tools:latest
${{ secrets.DOCKER_USERNAME }}/nas-tools:${{ env.app_version }}
${{ secrets.DOCKER_USERNAME }}/nas-tools:${{ env.app_version }}
15 changes: 11 additions & 4 deletions app/brushtask.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,8 @@ def get_brushtask_info(self, taskid=None):
"rss_rule": eval(task.RSS_RULE),
"remove_rule": eval(task.REMOVE_RULE),
"seed_size": task.SEED_SIZE,
"rss_url": site_info.get("rssurl"),
"rss_url": task.RSSURL if task.RSSURL else site_info.get("rssurl"),
"rss_url_show": task.RSSURL,
"cookie": site_info.get("cookie"),
"ua": site_info.get("ua"),
"download_count": task.DOWNLOAD_COUNT,
Expand Down Expand Up @@ -176,7 +177,7 @@ def check_task_rss(self, taskid):
dlcount=rss_rule.get("dlcount")):
return

rss_result = Rss.parse_rssxml(rss_url)
rss_result = Rss.parse_rssxml(rss_url, proxy=site_proxy)
if len(rss_result) == 0:
log.warn("【Brush】%s RSS未下载到数据" % site_name)
return
Expand Down Expand Up @@ -299,7 +300,10 @@ def __send_message(_task_name, _delete_type, _torrent_name):
continue
# 被手动从下载器删除的种子列表
remove_torrent_ids = list(
set(torrent_ids).difference(set([torrent.get("hash") for torrent in torrents])))
set(torrent_ids).difference(
set([(torrent.get("hash")
if downloader_type == 'qbittorrent'
else str(torrent.id)) for torrent in torrents])))
# 完成的种子
for torrent in torrents:
torrent_info = self.__get_torrent_dict(downloader_type=downloader_type,
Expand Down Expand Up @@ -338,7 +342,10 @@ def __send_message(_task_name, _delete_type, _torrent_name):
continue
# 更新手动从下载器删除的种子列表
remove_torrent_ids = list(
set(remove_torrent_ids).difference(set([torrent.get("hash") for torrent in torrents])))
set(remove_torrent_ids).difference(
set([(torrent.get("hash")
if downloader_type == 'qbittorrent'
else str(torrent.id)) for torrent in torrents])))
# 下载中的种子
for torrent in torrents:
torrent_info = self.__get_torrent_dict(downloader_type=downloader_type,
Expand Down
2 changes: 1 addition & 1 deletion app/conf/moduleconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ class ModuleConf(object):
"id": "plex.token",
"required": False,
"title": "X-Plex-Token",
"tooltip": "Plex网页Cookie中的X-Plex-Token,通过浏览器F12->网络中获取,如填写将优先使用;Token与服务器名称、用户名及密码 二选一,推荐使用Token,连接速度更快",
"tooltip": "Plex网页Url中的X-Plex-Token,通过浏览器F12->网络从请求URL中获取,如填写将优先使用;Token与服务器名称、用户名及密码 二选一,推荐使用Token,连接速度更快",
"type": "text",
"placeholder": "X-Plex-Token与其它认证信息二选一"
},
Expand Down
1 change: 1 addition & 0 deletions app/db/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,6 +378,7 @@ class SITEBRUSHTASK(Base):
ID = Column(Integer, Sequence('ID'), primary_key=True)
NAME = Column(Text, index=True)
SITE = Column(Text)
RSSURL = Column(Text)
FREELEECH = Column(Text)
RSS_RULE = Column(Text)
REMOVE_RULE = Column(Text)
Expand Down
3 changes: 2 additions & 1 deletion app/downloader/client/qbittorrent.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,10 +271,11 @@ def get_remove_torrents(self, config=None):
size = remove_torrent.get("size")
for torrent in torrents:
if torrent.name == name and torrent.size == size and torrent.hash not in remove_torrents_ids:
site = parse.urlparse(torrent.tracker).netloc.split(".") if torrent.tracker else [""]
remove_torrents_plus.append({
"id": torrent.hash,
"name": torrent.name,
"site": parse.urlparse(torrent.tracker).netloc.split(".")[-2],
"site": site[-2] if len(site) >= 2 else site[0],
"size": torrent.size
})
remove_torrents_plus += remove_torrents
Expand Down
19 changes: 19 additions & 0 deletions app/helper/db_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,23 @@ def get_transfer_info_by_id(self, logid):
"""
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.ID == int(logid)).first()

def get_transfer_info_by(self, tmdbid, season=None, season_episode=None):
"""
据tmdbid、season、season_episode查询转移记录
"""
# 电视剧所有季集|电影
if tmdbid and not season and not season_episode:
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.TMDBID == int(tmdbid)).all()
# 电视剧某季
if tmdbid and season:
season = f"%{season}%"
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.TMDBID == int(tmdbid),
TRANSFERHISTORY.SEASON_EPISODE.like(season)).all()
# 电视剧某季某集
if tmdbid and season_episode:
return self._db.query(TRANSFERHISTORY).filter(TRANSFERHISTORY.TMDBID == int(tmdbid),
TRANSFERHISTORY.SEASON_EPISODE == season_episode).all()

def is_transfer_history_exists_by_source_full_path(self, source_full_path):
"""
据源文件的全路径查询识别转移记录
Expand Down Expand Up @@ -1687,6 +1704,7 @@ def insert_brushtask(self, brush_id, item):
RSS_RULE=str(item.get('rss_rule')),
REMOVE_RULE=str(item.get('remove_rule')),
SEED_SIZE=item.get('seed_size'),
RSSURL=item.get('rssurl'),
INTEVAL=item.get('interval'),
DOWNLOADER=item.get('downloader'),
LABEL=item.get('label'),
Expand All @@ -1708,6 +1726,7 @@ def insert_brushtask(self, brush_id, item):
"RSS_RULE": str(item.get('rss_rule')),
"REMOVE_RULE": str(item.get('remove_rule')),
"SEED_SIZE": item.get('seed_size'),
"RSSURL": item.get('rssurl'),
"INTEVAL": item.get('interval'),
"DOWNLOADER": item.get('downloader'),
"LABEL": item.get('label'),
Expand Down
50 changes: 47 additions & 3 deletions app/helper/ffmpeg_helper.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import subprocess

from app.utils import SystemUtils
Expand All @@ -21,15 +22,58 @@ def get_thumb_image_from_video(video_path, image_path, frames="00:03:01"):
return False

@staticmethod
def extract_wav_from_video(video_path, audio_path):
def extract_wav_from_video(video_path, audio_path, audio_index=None):
"""
使用ffmpeg从视频文件中提取16000hz, 16-bit的wav格式音频
"""
if not video_path or not audio_path:
return False

command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path, '-acodec', 'pcm_s16le',
'-ac', '1', '-ar', '16000', audio_path]
# 提取指定音频流
if audio_index:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-map', f'0:a:{audio_index}',
'-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', audio_path]
else:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', audio_path]

ret = subprocess.run(command).returncode
if ret == 0:
return True
return False

@staticmethod
def get_video_metadata(video_path):
"""
获取视频元数据
"""
if not video_path:
return False

try:
command = ['ffprobe', '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', video_path]
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode == 0:
return json.loads(result.stdout.decode("utf-8"))
except Exception as e:
print(e)
return None

@staticmethod
def extract_subtitle_from_video(video_path, subtitle_path, subtitle_index=None):
"""
从视频中提取字幕
"""
if not video_path or not subtitle_path:
return False

if subtitle_index:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path,
'-map', f'0:s:{subtitle_index}',
subtitle_path]
else:
command = ['ffmpeg', "-hide_banner", "-loglevel", "warning", '-y', '-i', video_path, subtitle_path]
ret = subprocess.run(command).returncode
if ret == 0:
return True
Expand Down
2 changes: 1 addition & 1 deletion app/helper/openai_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def translate_to_zh(self, text):
"content": user_prompt
}
])
result = completion.choices[0].message.content
result = completion.choices[0].message.content.strip()
return True, result
except Exception as e:
print(f"{str(e)}{result}")
Expand Down
60 changes: 29 additions & 31 deletions app/indexer/client/_spider.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,10 +295,12 @@ def Gettitle_default(self, torrent):
for v in removelist:
title_default_item.remove(v)
if 'attribute' in title_default_selector:
render_dict.update(
{'title_default': title_default_item.attr(title_default_selector.get('attribute'))})
title_default = [item.attr(title_default_selector.get('attribute'))
for item in title_default_item.items() if item]
else:
render_dict.update({'title_default': title_default_item.text()})
title_default = [item.text() for item in title_default_item.items() if item]
if title_default:
render_dict.update({'title_default': title_default[0]})
if "title_optional" in self.fields:
title_optional_selector = self.fields.get('title_optional', {})
title_optional_item = torrent(title_optional_selector.get('selector', '')).clone()
Expand All @@ -307,10 +309,12 @@ def Gettitle_default(self, torrent):
for v in removelist:
title_optional_item.remove(v)
if 'attribute' in title_optional_selector:
render_dict.update(
{'title_optional': title_optional_item.attr(title_optional_selector.get('attribute'))})
title_optional = [item.attr(title_optional_selector.get('attribute'))
for item in title_optional_item.items() if item]
else:
render_dict.update({'title_optional': title_optional_item.text()})
title_optional = [item.text() for item in title_optional_item.items() if item]
if title_optional:
render_dict.update({'title_optional': title_optional[0]})
self.torrents_info['title'] = Template(selector.get('text')).render(fields=render_dict)
if 'filters' in selector:
self.torrents_info['title'] = self.__filter_text(self.torrents_info.get('title'),
Expand Down Expand Up @@ -352,23 +356,33 @@ def Gettitle_optional(self, torrent):
removelist = tags_selector.get('remove', '').split(', ')
for v in removelist:
tags_item.remove(v)
render_dict.update({'tags': tags_item.text()})
tags = [item.text() for item in tags_item.items() if item]
if tags:
render_dict.update({'tags': tags[0]})
if "subject" in self.fields:
subject_selector = self.fields.get('subject', {})
subject_item = torrent(subject_selector.get('selector', '')).clone()
if "remove" in subject_selector:
removelist = subject_selector.get('remove', '').split(', ')
for v in removelist:
subject_item.remove(v)
render_dict.update({'subject': subject_item.text()})
subject = [item.text() for item in subject_item.items() if item]
if subject:
render_dict.update({'subject': subject[0]})
if "description_free_forever" in self.fields:
render_dict.update({"description_free_forever": torrent(self.fields.get("description_free_forever",
{}).get("selector",
'')).text()})
description_free_forever_item = torrent(self.fields.get("description_free_forever",
{}).get("selector",
''))
description_free_forever = [item.text() for item in description_free_forever_item.items() if item]
if description_free_forever:
render_dict.update({"description_free_forever": description_free_forever[0]})
if "description_normal" in self.fields:
render_dict.update({"description_normal": torrent(self.fields.get("description_normal",
{}).get("selector",
'')).text()})
description_normal_item = torrent(self.fields.get("description_normal",
{}).get("selector",
''))
description_normal = [item.text() for item in description_normal_item.items() if item]
if description_normal:
render_dict.update({"description_normal": description_normal[0]})
self.torrents_info['description'] = Template(selector.get('text')).render(fields=render_dict)
if 'filters' in selector:
self.torrents_info['description'] = self.__filter_text(self.torrents_info.get('description'),
Expand Down Expand Up @@ -651,22 +665,6 @@ def parse(self, request, response):
html_doc = PyQuery(html_text)
# 种子筛选器
torrents_selector = self.list.get('selector', '')
str_list = list(torrents_selector)
# 兼容选择器中has()函数 部分情况下无双引号会报错
has_index = torrents_selector.find('has')
if has_index != -1 and torrents_selector.find('"') == -1:
flag = 0
str_list.insert(has_index + 4, '"')
for i in range(len(str_list)):
if i > has_index + 2:
n = str_list[i]
if n == '(':
flag = flag + 1
if n == ')':
flag = flag - 1
if flag == 0:
str_list.insert(i, '"')
torrents_selector = "".join(str_list)
# 遍历种子html列表
for torn in html_doc(torrents_selector):
self.torrents_info_array.append(copy.deepcopy(self.Getinfo(PyQuery(torn))))
Expand All @@ -676,6 +674,6 @@ def parse(self, request, response):
except Exception as err:
self.is_error = True
ExceptionUtils.exception_traceback(err)
log.warn("【Spider】错误:%s" % str(err))
log.warn(f"【Spider】错误:{self.indexername} {str(err)}")
finally:
self.is_complete = True
10 changes: 8 additions & 2 deletions app/media/fanart.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ class Fanart:
'seasonthumb',
'tvposter',
'hdclearart']
_season_types = ['seasonposter',
'seasonthumb',
'seasonbanner']
_images = {}

def __init__(self):
Expand All @@ -47,7 +50,7 @@ def __get_fanart_images(self, media_type, queryid):
for image_type in self._tv_image_types:
images = ret.json().get(image_type)
if isinstance(images, list):
if image_type in ['seasonposter', 'seasonthumb', 'seasonbanner']:
if image_type in self._season_types:
if not self._images.get(image_type):
self._images[image_type] = {}
for image in images:
Expand All @@ -56,7 +59,10 @@ def __get_fanart_images(self, media_type, queryid):
else:
self._images[image_type] = images[0].get('url') if isinstance(images[0], dict) else ""
else:
self._images[image_type] = ""
if image_type in self._season_types:
self._images[image_type] = {}
else:
self._images[image_type] = ""
except Exception as e2:
ExceptionUtils.exception_traceback(e2)

Expand Down
7 changes: 5 additions & 2 deletions app/media/media.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,7 @@ def __search_chatgpt(self, file_name, mtype: MediaType):
:param mtype: 媒体类型
:return: 类型、季、集、TMDBINFO
"""

def __failed():
return mtype, None, None, None

Expand All @@ -457,6 +458,8 @@ def __failed():
file_year = str(file_info.get("year")).split("/")[0].strip()
if not file_title:
return __failed()
if not str(file_year).isdigit():
file_year = None
if mtype != MediaType.MOVIE or file_info.get("year"):
tmdb_info = self.__search_tmdb(file_media_name=file_title,
search_type=mtype,
Expand Down Expand Up @@ -1137,7 +1140,7 @@ def __get_tmdb_movie_detail(self, tmdbid, append_to_response=None):
log.info("【Meta】正在查询TMDB电影:%s ..." % tmdbid)
tmdbinfo = self.movie.details(tmdbid, append_to_response)
if tmdbinfo:
log.info("【Meta】查询结果:%s" % tmdbinfo.get("title"))
log.info(f"【Meta】{tmdbid}查询结果:{tmdbinfo.get('title')}")
return tmdbinfo or {}
except Exception as e:
print(str(e))
Expand Down Expand Up @@ -1314,7 +1317,7 @@ def __get_tmdb_tv_detail(self, tmdbid, append_to_response=None):
log.info("【Meta】正在查询TMDB电视剧:%s ..." % tmdbid)
tmdbinfo = self.tv.details(tmdbid, append_to_response)
if tmdbinfo:
log.info("【Meta】查询结果:%s" % tmdbinfo.get("name"))
log.info(f"【Meta】{tmdbid}查询结果:{tmdbinfo.get('name')}")
return tmdbinfo or {}
except Exception as e:
print(str(e))
Expand Down
Loading

0 comments on commit 4dcfcf8

Please sign in to comment.