Skip to content

Commit

Permalink
Added: Download limits. Closes #247
Browse files Browse the repository at this point in the history
  • Loading branch information
theotherp@gmx.de authored and theotherp@gmx.de committed Feb 11, 2017
1 parent 010c167 commit 75a91c1
Show file tree
Hide file tree
Showing 36 changed files with 729 additions and 330 deletions.
2 changes: 1 addition & 1 deletion bower.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
"angular-loading-bar": "~0.8.0",
"angular-growl-v2": "angular-growl-2#~0.7.5",
"angular-filter": "^0.5.11",
"angular-ui-router": "~0.2.15",
"angular-ui-router": "~0.4.2",
"angular": "~1.4.7",
"angular-route": "~1.4.7",
"angular-schema-form": "~0.8.12",
Expand Down
8 changes: 7 additions & 1 deletion changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,13 @@

----------
### 0.2.198
Fixed: Downloading NZBs not possible/easy because of blinking tooltip. See [#542](https://github.com/theotherp/nzbhydra/issues/542).
Added: Download limit for indexers. When the download limit is reached for an indexer it will not be picked for searching. Please note that Hydra
will never prevent downloads from happening even when the download limit is reached. This is to make sure that external tools do not disable Hydra
when a requested download fails. See [#247](https://github.com/theotherp/nzbhydra/issues/247).

Fixed: Some hit limit related bugs.

Fixed: Multiple UI issues, e.g. tooltip placement, header "active section" display, etc.

### 0.2.197
Fixed: Search history next to search box didn't work with authorization. See [#539](https://github.com/theotherp/nzbhydra/issues/539).
Expand Down
8 changes: 7 additions & 1 deletion nzbhydra/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@
"main": {
"apikey": "ab00y7qye6u84lx4eqhwd0yh1wp423",
"branch": "master",
"configVersion": 33,
"configVersion": 34,
"dereferer": "http://www.dereferer.org/?$s",
"debug": False,
"externalUrl": None,
Expand Down Expand Up @@ -741,6 +741,12 @@ def migrateConfig(config):
if "drunkenslug" in indexer["host"]:
indexer["host"] = "https://api.drunkenslug.com"

if config["main"]["configVersion"] == 33:
with version_update(config, 34):
addLogMessage(20, "Adding empty download limit to all indexers")
for indexer in config["indexers"]:
indexer["downloadLimit"] = None




Expand Down
3 changes: 0 additions & 3 deletions nzbhydra/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ def save(self, *args, **kwargs):
class IndexerNzbDownload(Model):
searchResult = ForeignKeyField(SearchResult, related_name="downloads")
apiAccess = ForeignKeyField(IndexerApiAccess)
time = DateTimeField()
title = CharField() # Redundant when the search result still exists but after it's deleted we still wanna see what the title is
mode = CharField() # "serve" or "redirect"
internal = BooleanField(null=True)
Expand All @@ -144,8 +143,6 @@ class Meta(object):
database = db

def save(self, *args, **kwargs):
if self.time is None:
self.time = datetime.datetime.utcnow() # Otherwise the time at the first run of this code is taken
super(IndexerNzbDownload, self).save(*args, **kwargs)


Expand Down
101 changes: 75 additions & 26 deletions nzbhydra/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from retry import retry

from nzbhydra import config, indexers, infos, categories, databaseLock
from nzbhydra.database import IndexerStatus, Search, db, IndexerApiAccess, SearchResult, Indexer, InterfaceError, IntegrityError
from nzbhydra.database import IndexerStatus, Search, db, IndexerApiAccess, SearchResult, Indexer, InterfaceError, IntegrityError, IndexerNzbDownload
from nzbhydra.search_module import SearchModule

logger = logging.getLogger('root')
Expand Down Expand Up @@ -111,8 +111,53 @@ def add_not_picked_indexer(reasonMap, reason, indexerName):
reasonMap[reason].append(indexerName)


def checkHitOrDownloadLimit(p):
if p.settings.hitLimit > 0 or p.settings.downloadLimit > 0:
if p.settings.hitLimitResetTime:
comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
if comparisonTime > arrow.utcnow():
comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
else:
# Use rolling time window
comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))
if p.settings.hitLimit > 0:
apiHitsQuery = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful)
apiHits = apiHitsQuery.count()
if apiHits >= p.settings.hitLimit:
if p.settings.hitLimitResetTime:
logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
else:
try:
firstHitTimeInWindow = arrow.get(list(apiHitsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit-1).dicts())[0]["time"]).to("local")
nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
except IndexerApiAccess.DoesNotExist:
logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
return False, "API limit reached"
else:
logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))

if p.settings.downloadLimit > 0:
downloadsQuery = IndexerNzbDownload().select(IndexerApiAccess, IndexerNzbDownload).join(IndexerApiAccess).where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime))
downloads = downloadsQuery.count()
if downloads >= p.settings.downloadLimit:
if p.settings.hitLimitResetTime:
logger.info("Did not pick %s because its download limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.downloadLimit, p.settings.hitLimitResetTime))
else:
try:
firstHitTimeInWindow = arrow.get(list(downloadsQuery.order_by(IndexerApiAccess.time.desc()).offset(p.settings.downloadLimit-1).limit(1).dicts())[0]["time"]).to("local")
nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
logger.info("Did not pick %s because its download limit of %d was reached. Next possible hit at %s" % (p, p.settings.downloadLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
except IndexerApiAccess.DoesNotExist:
logger.info("Did not pick %s because its download limit of %d was reached" % (p, p.settings.downloadLimit))
return False, "Download limit reached"
else:
logger.debug("%s has had %d of a maximum of %d downloads since %02d:%02d" % (p, downloads, p.settings.downloadLimit, comparisonTime.hour, comparisonTime.minute))

return True, None


def pick_indexers(search_request):
# type: (nzbhydra.search.SearchRequest, bool) -> List[nzbhydra.search_modules.SearchModule]
query_supplied = True if search_request.query else False
queryCanBeGenerated = None # Store if we can generate a query from IDs. Initiall true but when we need this the first time and query generation fails we set it to false
picked_indexers = []
Expand Down Expand Up @@ -149,30 +194,34 @@ def pick_indexers(search_request):
logger.debug("Did not pick %s because it is not enabled for category %s" % (p, search_request.category.category.pretty))
add_not_picked_indexer(notPickedReasons, "Disabled for this category %s" % search_request.category.category.pretty, p.name)
continue
if p.settings.hitLimit > 0:
if p.settings.hitLimitResetTime:
comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
if comparisonTime > arrow.utcnow():
comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
else:
# Use rolling time window
comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))

apiHits = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful).count()
if apiHits >= p.settings.hitLimit:
if p.settings.hitLimitResetTime:
logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
else:
try:
firstHitTimeInWindow = IndexerApiAccess().select().where(IndexerApiAccess.indexer == p.indexer & IndexerApiAccess.response_successful).order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit).limit(1).get().time.datetime
nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
except IndexerApiAccess.DoesNotExist:
logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
add_not_picked_indexer(notPickedReasons, "API limit reached", p.name)
continue
else:
logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))
picked, reason = checkHitOrDownloadLimit(p)
if not picked:
add_not_picked_indexer(notPickedReasons, reason, p.name)
continue
# if p.settings.hitLimit > 0:
# if p.settings.hitLimitResetTime:
# comparisonTime = arrow.utcnow().replace(hour=p.settings.hitLimitResetTime, minute=0, second=0)
# if comparisonTime > arrow.utcnow():
# comparisonTime = arrow.get(comparisonTime.datetime - datetime.timedelta(days=1)) # Arrow is too dumb to properly subtract 1 day (throws an error on every first of the month)
# else:
# # Use rolling time window
# comparisonTime = arrow.get(arrow.utcnow().datetime - datetime.timedelta(days=1))
#
# apiHits = IndexerApiAccess().select().where((IndexerApiAccess.indexer == p.indexer) & (IndexerApiAccess.time > comparisonTime) & IndexerApiAccess.response_successful).count()
# if apiHits >= p.settings.hitLimit:
# if p.settings.hitLimitResetTime:
# logger.info("Did not pick %s because its API hit limit of %d was reached. Will pick again after %02d:00" % (p, p.settings.hitLimit, p.settings.hitLimitResetTime))
# else:
# try:
# firstHitTimeInWindow = IndexerApiAccess().select().where(IndexerApiAccess.indexer == p.indexer & IndexerApiAccess.response_successful).order_by(IndexerApiAccess.time.desc()).offset(p.settings.hitLimit).limit(1).get().time.datetime
# nextHitAfter = arrow.get(firstHitTimeInWindow + datetime.timedelta(days=1))
# logger.info("Did not pick %s because its API hit limit of %d was reached. Next possible hit at %s" % (p, p.settings.hitLimit, nextHitAfter.format('YYYY-MM-DD HH:mm')))
# except IndexerApiAccess.DoesNotExist:
# logger.info("Did not pick %s because its API hit limit of %d was reached" % (p, p.settings.hitLimit))
# add_not_picked_indexer(notPickedReasons, "API limit reached", p.name)
# continue
# else:
# logger.debug("%s has had %d of a maximum of %d API hits since %02d:%02d" % (p, apiHits, p.settings.hitLimit, comparisonTime.hour, comparisonTime.minute))

if (query_supplied or search_request.identifier_key is not None) and not p.supports_queries:
logger.debug("Did not pick %s because a query was supplied but the indexer does not support queries" % p)
Expand Down
16 changes: 8 additions & 8 deletions nzbhydra/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,12 +213,12 @@ def get_avg_indexer_access_success(afterSql, beforeSql):


def getTimeBasedDownloadStats(after, before):
downloads = IndexerNzbDownload(). \
select(Indexer.name, IndexerApiAccess.response_successful, IndexerNzbDownload.time). \
downloads = list(IndexerNzbDownload(). \
select(Indexer.name, IndexerApiAccess.response_successful, IndexerApiAccess.time). \
where((IndexerApiAccess.time > after) & (IndexerApiAccess.time < before)). \
join(IndexerApiAccess, JOIN.LEFT_OUTER). \
join(Indexer, JOIN.LEFT_OUTER)
downloadTimes = [arrow.get(x.time).to(tz.tzlocal()) for x in downloads]
join(Indexer, JOIN.LEFT_OUTER).dicts())
downloadTimes = [arrow.get(x["time"]).to(tz.tzlocal()) for x in downloads]

perDayOfWeek, perHourOfDay = calculcateTimeBasedStats(downloadTimes)

Expand Down Expand Up @@ -284,7 +284,7 @@ def getIndexerBasedDownloadStats(afterSql, beforeSql):
LEFT OUTER JOIN indexerapiaccess api
ON dl.apiAccess_id = api.id
WHERE api.indexer_id IN (%(enabledIndexerIds)s)
AND dl.time > %(afterSql)s AND dl.time < %(beforeSql)s
AND api.time > %(afterSql)s AND api.time < %(beforeSql)s
)
countall
LEFT OUTER JOIN indexerapiaccess api
Expand All @@ -304,7 +304,7 @@ def getIndexerBasedDownloadStats(afterSql, beforeSql):

def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None):
columnNameToEntityMap = {
"time": IndexerNzbDownload.time,
"time": IndexerApiAccess.time,
"indexer": Indexer.name,
"title": IndexerNzbDownload.title,
"access": IndexerNzbDownload.internal,
Expand All @@ -313,12 +313,12 @@ def get_nzb_downloads(page=0, limit=100, filterModel=None, sortModel=None):
}

query = IndexerNzbDownload() \
.select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerNzbDownload.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \
.select(Indexer.name.alias("indexerName"), IndexerNzbDownload.title, IndexerApiAccess.time, IndexerNzbDownload.internal, SearchResult.id.alias('searchResultId'), SearchResult.details.alias('detailsLink'), IndexerApiAccess.response_successful, IndexerApiAccess.username) \
.switch(IndexerNzbDownload).join(IndexerApiAccess, JOIN.LEFT_OUTER).join(Indexer, JOIN.LEFT_OUTER) \
.switch(IndexerNzbDownload).join(SearchResult, JOIN.LEFT_OUTER)

query = extendQueryWithFilter(columnNameToEntityMap, filterModel, query)
query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel, IndexerNzbDownload.time.desc())
query = extendQueryWithSorting(columnNameToEntityMap, query, sortModel, IndexerApiAccess.time.desc())

total_downloads = query.count()
nzb_downloads = list(query.paginate(page, limit).dicts())
Expand Down
3 changes: 3 additions & 0 deletions onlinehelp/indexers.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,7 @@ Most of you will want to use your newznab indexers here. The first time an index

You can define a hit limit for an indexer. When the maximum number of API hits is reached the indexer isn't used anymore. Either define the time of day when the counter is reset by the indexer or leave it empty to use a rolling reset counter, meaning the number of hits for the last 24 at the time of the search is limited.

You can also define a download limit. When the download limit is reached the indexer will not be searched anymore until the hit reset time is reached (see above).
Note: Hydra will not prevent downloading NZBs even when the hit limit is reached to prevent tools from disabling Hydra.

The priority determines which indexer is used if duplicate results are found. The result from the indexer with the highest priority number is shown first in the GUI and returned for API searches.
1 change: 0 additions & 1 deletion static/css/bright.css

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion static/css/bright.css.map

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion static/css/dark.css

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion static/css/dark.css.map

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion static/css/grey.css

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion static/css/grey.css.map

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions static/html/directives/log.html
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
<div cg-busy="{promise:logPromise,message:'Loading log file'}">
<div style="margin-bottom: 15px">
<button class="btn btn-default" ng-click="update()">Update</button>
<button class="btn btn-default" ng-click="scrollToBottom()">Scroll to bottom</button>
<label>
<input type="checkbox" ng-model="doUpdateLog" ng-change="toggleUpdate()">
Expand Down
2 changes: 1 addition & 1 deletion static/html/states/download-history.html
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
</thead>
<tbody>
<tr dir-paginate="nzbDownload in nzbDownloads | itemsPerPage: limit" total-items="totalDownloads" current-page="pagination.current" pagination-id="downloads">
<td class="narrow-row">{{ nzbDownload.time | reformatDate }}</td>
<td class="narrow-row">{{ nzbDownload.time | reformatDateEpoch }}</td>
<td class="narrow-row">{{ nzbDownload.indexerName }}</td>
<td class="narrow-row">
<addable-nzbs search-result-id="nzbDownload.searchResultId" ng-style="{'visibility':!nzbDownload.searchResultId ? 'hidden' : 'initial'}"></addable-nzbs>
Expand Down
6 changes: 3 additions & 3 deletions static/html/states/header.html
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav">
<li ui-sref-active="active"><a ui-sref="root.search" ui-sref-opts="{inherit: false, reload: true}">Search</a></li>
<li ui-sref-active="active" ng-if="showStats"><a ui-sref="root.stats.searches">History & Stats</a></li>
<li ui-sref-active="active" ng-if="showAdmin"><a ui-sref="root.config.main">Config</a></li>
<li ui-sref-active="active" ng-if="showAdmin"><a ui-sref="root.system.control">System</a></li>
<li ui-sref-active="{'active':'root.stats'}" ng-if="showStats"><a ui-sref="root.stats.searches">History & Stats</a></li>
<li ui-sref-active="{'active':'root.config'}" ng-if="showAdmin"><a ui-sref="root.config.main">Config</a></li>
<li ui-sref-active="{'active':'root.system'}" ng-if="showAdmin"><a ui-sref="root.system.control">System</a></li>
</ul>
<ul class="nav navbar-nav navbar-right" ng-if="showLoginout">
<li><a href="" ng-click="loginout()"
Expand Down
10 changes: 0 additions & 10 deletions static/html/states/search-history.html
Original file line number Diff line number Diff line change
Expand Up @@ -59,16 +59,6 @@
</td>
<td class="narrow-row">{{ request.category }}</td>
<td class="narrow-row" ng-bind-html="formatAdditional(request)"></td>
<!--
<td>{{ request.identifier_key }}</td>
<td>
<a target="_blank" ng-if="request.identifier_key=='imdbid'" href="http://www.imdb.com/title/tt{{ request.identifier_value }}/">{{ request.identifier_value }}</a>
<a target="_blank" ng-if="request.identifier_key=='tvdbid'" href="http://thetvdb.com/?tab=series&id={{ request.identifier_value }}/">{{ request.identifier_value }}</a>
<a target="_blank" ng-if="request.identifier_key=='rid'" href="internalapi/redirect_rid?rid={{ request.identifier_value }}">{{ request.identifier_value }}</a>
</td>
<td>{{ request.season }}</td>
<td>{{ request.episode }}</td>
-->
<td class="narrow-row">{{ request.internal ? "Internal" : "API"}}</td>
<td class="narrow-row">{{ request.username }}</td>
</tr>
Expand Down
Loading

0 comments on commit 75a91c1

Please sign in to comment.