Skip to content

Commit

Permalink
feat | issue/38 > 索引器支持多开
Browse files Browse the repository at this point in the history
实现方式:
将jackett与内置索引器合并实现
  • Loading branch information
xuyanling committed Aug 11, 2023
1 parent 251ced0 commit bb4c96e
Show file tree
Hide file tree
Showing 14 changed files with 469 additions and 434 deletions.
32 changes: 0 additions & 32 deletions app/conf/moduleconf.py
Original file line number Diff line number Diff line change
Expand Up @@ -891,38 +891,6 @@ class ModuleConf(object):

# 索引器
INDEXER_CONF = {
"jackett": {
"name": "Jackett",
"img_url": "./static/img/indexer/jackett.png",
"background": "bg-black",
"test_command": "app.indexer.client.jackett|Jackett",
"config": {
"host": {
"id": "jackett.host",
"required": True,
"title": "Jackett地址",
"tooltip": "Jackett访问地址和端口,如为https需加https://前缀。注意需要先在Jackett中添加indexer,才能正常测试通过和使用",
"type": "text",
"placeholder": "http://127.0.0.1:9117"
},
"api_key": {
"id": "jackett.api_key",
"required": True,
"title": "Api Key",
"tooltip": "Jackett管理界面右上角复制API Key",
"type": "text",
"placeholder": ""
},
"password": {
"id": "jackett.password",
"required": False,
"title": "密码",
"tooltip": "Jackett管理界面中配置的Admin password,如未配置可为空",
"type": "password",
"placeholder": ""
}
}
},
"prowlarr": {
"name": "Prowlarr",
"img_url": "../static/img/indexer/prowlarr.png",
Expand Down
60 changes: 34 additions & 26 deletions app/helper/indexer_helper.py
Original file line number Diff line number Diff line change
@@ -1,46 +1,51 @@
import os.path
import pickle
from os.path import join
import json
import base64
from base64 import b64decode

import log
from app.utils import StringUtils, ExceptionUtils
from app.utils.commons import singleton
from config import Config
from app.helper import DbHelper
from config import Config

@singleton
class IndexerHelper:

_private_indexers = []
_indexers = []

def __init__(self):
self.init_config()

def init_config(self):
try:
with open(os.path.join(Config().get_inner_config_path(), "sites.dat"), "r") as f:
_indexers_json = base64.b64decode(f.read())
self._indexers = json.loads(_indexers_json).get("indexer")
with open(join(Config().get_inner_config_path(), "sites.dat"), "r") as f:
_indexers_json = b64decode(f.read())
self._private_indexers = json.loads(_indexers_json).get("indexer")
except Exception as err:
ExceptionUtils.exception_traceback(err)

try:
for inexer in DbHelper().get_indexer_custom_site():
self._indexers.append(json.loads(inexer.INDEXER))
except Exception as err:
pass
ExceptionUtils.exception_traceback(err)

def get_all_indexers(self):
self.init_config()
return self._indexers

def get_indexer_info(self, url, public=False):
for indexer in self._indexers:
if not public and indexer.get("public"):
continue
if StringUtils.url_equal(indexer.get("domain"), url):
return indexer
return None
def get_public_indexers(self):
"""
获取内置公开站点
"""
try:
with open(join(Config().get_inner_config_path(), "sites.dat"), "r") as f:
_indexers_json = b64decode(f.read())
_indexer_list = json.loads(_indexers_json).get("indexer")
return [item for item in _indexer_list if item['public'] is True]
except Exception as err:
return []

def get_indexer(self,
url,
Expand All @@ -55,9 +60,12 @@ def get_indexer(self,
render=None,
language=None,
pri=None):
self.init_config()
if not url:
return None
for indexer in self._indexers:
_all_indexers = self._indexers + self._private_indexers
for indexer in _all_indexers:
indexer_domain = indexer.get("domain")
if not indexer.get("domain"):
continue
if StringUtils.url_equal(indexer.get("domain"), url):
Expand Down Expand Up @@ -95,12 +103,14 @@ def __init__(self,
pri=None):
if not datas:
return
# 索引ID
# ID
self.id = datas.get('id')
# 站点ID
self.siteid = siteid
# 名称
self.name = name if name else datas.get('name')
self.name = datas.get('name') if not name else name
# 是否内置站点
self.builtin = builtin
self.builtin = datas.get('builtin')
# 域名
self.domain = datas.get('domain')
# 搜索
Expand All @@ -110,26 +120,24 @@ def __init__(self,
# 解析器
self.parser = parser if parser is not None else datas.get('parser')
# 是否启用渲染
self.render = render and datas.get("render")
self.render = render if render is not None else datas.get("render")
# 浏览
self.browse = datas.get('browse', {})
# 种子过滤
self.torrents = datas.get('torrents', {})
# 分类
self.category = datas.get('category', {})
# 站点ID
self.siteid = siteid
# Cookie
self.cookie = cookie
# User-Agent
self.ua = ua
# 过滤规则
self.rule = rule
# 是否公开站点
self.public = public if public is not None else datas.get('public')
self.public = datas.get('public') if not public else public
# 是否使用代理
self.proxy = proxy if proxy is not None else datas.get('proxy')
self.proxy = datas.get('proxy') if not proxy else proxy
# 仅支持的特定语种
self.language = language if language else datas.get('language')
self.language = language
# 索引器优先级
self.pri = pri if pri else 0
self.pri = pri if pri else 0
155 changes: 2 additions & 153 deletions app/indexer/client/_base.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import datetime
import xml.dom.minidom
from abc import ABCMeta, abstractmethod

import log
from app.filter import Filter
from app.helper import ProgressHelper, DbHelper
from app.helper import ProgressHelper
from app.media import Media
from app.media.meta import MetaInfo
from app.utils import DomUtils, RequestUtils, StringUtils, ExceptionUtils
from app.utils.types import MediaType, SearchType, ProgressKey


Expand All @@ -22,13 +20,11 @@ class _IIndexClient(metaclass=ABCMeta):
media = None
progress = None
filter = None
dbhelper = None

def __init__(self):
self.media = Media()
self.filter = Filter()
self.progress = ProgressHelper()
self.dbhelper = DbHelper()

@abstractmethod
def match(self, ctype):
Expand Down Expand Up @@ -75,154 +71,7 @@ def search(self, order_seq,
"""
根据关键字多线程搜索
"""
if not indexer or not key_word:
return None
if filter_args is None:
filter_args = {}
# 不在设定搜索范围的站点过滤掉
if filter_args.get("site") and indexer.name not in filter_args.get("site"):
return []
# 计算耗时
start_time = datetime.datetime.now()
log.info(f"【{self.index_type}】开始搜索Indexer:{indexer.name} ...")
# 特殊符号处理
search_word = StringUtils.handler_special_chars(text=key_word,
replace_word=" ",
allow_space=True)
api_url = f"{indexer.domain}?apikey={self.api_key}&t=search&q={search_word}"
result_array = self.__parse_torznabxml(api_url)

# 索引花费时间
seconds = (datetime.datetime.now() - start_time).seconds
if len(result_array) == 0:
log.warn(f"【{self.index_type}{indexer.name} 未搜索到数据")
self.progress.update(ptype=ProgressKey.Search, text=f"{indexer.name} 未搜索到数据")

self.dbhelper.insert_indexer_statistics(indexer=indexer.name,
itype=self.client_id,
seconds=seconds,
result='N'
)
return []
else:
log.warn(f"【{self.index_type}{indexer.name} 返回数据:{len(result_array)}")
# 更新进度
self.progress.update(ptype=ProgressKey.Search, text=f"{indexer.name} 返回 {len(result_array)} 条数据")
# 索引统计
self.dbhelper.insert_indexer_statistics(indexer=indexer.name,
itype=self.client_id,
seconds=seconds,
result='Y'
)
return self.filter_search_results(result_array=result_array,
order_seq=order_seq,
indexer=indexer,
filter_args=filter_args,
match_media=match_media,
start_time=start_time)

@staticmethod
def __parse_torznabxml(url):
"""
从torznab xml中解析种子信息
:param url: URL地址
:return: 解析出来的种子信息列表
"""
if not url:
return []
try:
ret = RequestUtils(timeout=10).get_res(url)
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
return []
if not ret:
return []
xmls = ret.text
if not xmls:
return []

torrents = []
try:
# 解析XML
dom_tree = xml.dom.minidom.parseString(xmls)
root_node = dom_tree.documentElement
items = root_node.getElementsByTagName("item")
for item in items:
try:
# indexer id
indexer_id = DomUtils.tag_value(item, "jackettindexer", "id",
default=DomUtils.tag_value(item, "prowlarrindexer", "id", ""))
# indexer
indexer = DomUtils.tag_value(item, "jackettindexer",
default=DomUtils.tag_value(item, "prowlarrindexer", default=""))

# 标题
title = DomUtils.tag_value(item, "title", default="")
if not title:
continue
# 种子链接
enclosure = DomUtils.tag_value(item, "enclosure", "url", default="")
if not enclosure:
continue
# 描述
description = DomUtils.tag_value(item, "description", default="")
# 种子大小
size = DomUtils.tag_value(item, "size", default=0)
# 种子页面
page_url = DomUtils.tag_value(item, "comments", default="")

# 做种数
seeders = 0
# 下载数
peers = 0
# 是否免费
freeleech = False
# 下载因子
downloadvolumefactor = 1.0
# 上传因子
uploadvolumefactor = 1.0
# imdbid
imdbid = ""

torznab_attrs = item.getElementsByTagName("torznab:attr")
for torznab_attr in torznab_attrs:
name = torznab_attr.getAttribute('name')
value = torznab_attr.getAttribute('value')
if name == "seeders":
seeders = value
if name == "peers":
peers = value
if name == "downloadvolumefactor":
downloadvolumefactor = value
if float(downloadvolumefactor) == 0:
freeleech = True
if name == "uploadvolumefactor":
uploadvolumefactor = value
if name == "imdbid":
imdbid = value

tmp_dict = {'indexer_id': indexer_id,
'indexer': indexer,
'title': title,
'enclosure': enclosure,
'description': description,
'size': size,
'seeders': seeders,
'peers': peers,
'freeleech': freeleech,
'downloadvolumefactor': downloadvolumefactor,
'uploadvolumefactor': uploadvolumefactor,
'page_url': page_url,
'imdbid': imdbid}
torrents.append(tmp_dict)
except Exception as e:
ExceptionUtils.exception_traceback(e)
continue
except Exception as e2:
ExceptionUtils.exception_traceback(e2)
pass

return torrents
pass

def filter_search_results(self, result_array: list,
order_seq,
Expand Down

0 comments on commit bb4c96e

Please sign in to comment.