Skip to content

Commit 89e1ffc

Browse files
committed
[Temporary] Merge-Squash of PR 6807
1 parent b35eaef commit 89e1ffc

File tree

14 files changed

+477
-100
lines changed

14 files changed

+477
-100
lines changed

dredd/api-description.yml

+3-2
Original file line numberDiff line numberDiff line change
@@ -2565,11 +2565,12 @@ parameters:
25652565
description: The configuration to retrieve
25662566
type: string
25672567
enum:
2568-
- main
2568+
- main # Keep main first, as the tests use it
25692569
- consts
25702570
- metadata
2571-
- search
25722571
- notifiers
2572+
- search
2573+
- system
25732574
log-level:
25742575
name: level
25752576
in: query

medusa/helpers/__init__.py

+32
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,16 @@
6565
except ImportError:
6666
reflink = None
6767

68+
try:
69+
from psutil import Process
70+
memory_usage_tool = 'psutil'
71+
except ImportError:
72+
try:
73+
import resource # resource module is unix only
74+
memory_usage_tool = 'resource'
75+
except ImportError:
76+
memory_usage_tool = None
77+
6878

6979
def indent_xml(elem, level=0):
7080
"""Do our pretty printing and make Matt very happy."""
@@ -1450,6 +1460,28 @@ def get_disk_space_usage(disk_path=None, pretty=True):
14501460
return False
14511461

14521462

1463+
def memory_usage(pretty=True):
1464+
"""
1465+
Get the current memory usage (if possible).
1466+
1467+
:param pretty: True for human readable size, False for bytes
1468+
1469+
:return: Current memory usage
1470+
"""
1471+
usage = ''
1472+
if memory_usage_tool == 'resource':
1473+
usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
1474+
elif memory_usage_tool == 'psutil':
1475+
usage = Process(os.getpid()).memory_info().rss
1476+
else:
1477+
return ''
1478+
1479+
if pretty:
1480+
usage = pretty_file_size(usage)
1481+
1482+
return usage
1483+
1484+
14531485
def get_tvdb_from_id(indexer_id, indexer):
14541486

14551487
session = MedusaSafeSession()

medusa/helpers/utils.py

+17-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,8 @@ def strtobool(val):
8484

8585

8686
def to_timestamp(dt):
87-
"""Return POSIX timestamp corresponding to the datetime instance.
87+
"""
88+
Return POSIX timestamp corresponding to the datetime instance.
8889
8990
:param dt: datetime (possibly aware)
9091
:return: seconds since epoch as float
@@ -102,3 +103,18 @@ def to_camel_case(snake_str):
102103
"""Convert a snake formatted string to camel case."""
103104
components = snake_str.split('_')
104105
return components[0] + ''.join(x.title() for x in components[1:])
106+
107+
108+
def timedelta_in_milliseconds(td):
109+
"""
110+
Return the value of the timedelta object in milliseconds.
111+
112+
:param td: timedelta
113+
:type td: timedelta
114+
:return: the value of the timedelta in milliseconds
115+
:rtype: int
116+
"""
117+
if not td:
118+
return 0
119+
120+
return int(td.total_seconds() * 1000)

medusa/server/api/v2/config.py

+16
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
common,
1616
config,
1717
db,
18+
helpers,
1819
logger,
1920
ws,
2021
)
@@ -33,6 +34,10 @@
3334
iter_nested_items,
3435
set_nested_value,
3536
)
37+
from medusa.system.schedulers import (
38+
generate_schedulers,
39+
generate_show_queue,
40+
)
3641

3742
from six import iteritems, itervalues, text_type
3843
from six.moves import map
@@ -914,6 +919,17 @@ def data_notifiers():
914919

915920
return section_data
916921

922+
@staticmethod
923+
def data_system():
924+
"""System information."""
925+
section_data = {}
926+
927+
section_data['memoryUsage'] = helpers.memory_usage(pretty=True)
928+
section_data['schedulers'] = generate_schedulers()
929+
section_data['showQueue'] = generate_show_queue()
930+
931+
return section_data
932+
917933
@staticmethod
918934
def data_clients():
919935
"""Notifications."""

medusa/server/api/v2/stats.py

+95-83
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@
1818
WANTED
1919
)
2020
from medusa.server.api.v2.base import BaseRequestHandler
21+
from medusa.show.show import Show
22+
23+
from six.moves import map
2124

2225

2326
class StatsHandler(BaseRequestHandler):
@@ -28,93 +31,102 @@ class StatsHandler(BaseRequestHandler):
2831
#: identifier
2932
identifier = ('identifier', r'\w+')
3033
#: path param
31-
path_param = ('path_param', r'\w+')
34+
path_param = None
3235
#: allowed HTTP methods
3336
allowed_methods = ('GET', )
3437

35-
def get(self, identifier, path_param=None):
38+
def get(self, identifier):
3639
"""Query statistics.
3740
38-
:param identifier:
39-
:param path_param:
40-
:type path_param: str
41+
:param identifier: The type of statistics to query
42+
:type identifier: str
4143
"""
42-
pre_today = [SKIPPED, WANTED, FAILED]
43-
snatched = [SNATCHED, SNATCHED_PROPER, SNATCHED_BEST]
44-
downloaded = [DOWNLOADED, ARCHIVED]
45-
46-
def query_in(items):
47-
return '({0})'.format(','.join(map(str, items)))
48-
49-
query = dedent("""\
50-
SELECT indexer AS indexerId, showid AS seriesId,
51-
SUM(
52-
season > 0 AND
53-
episode > 0 AND
54-
airdate > 1 AND
55-
status IN {status_quality}
56-
) AS epSnatched,
57-
SUM(
58-
season > 0 AND
59-
episode > 0 AND
44+
if not identifier or identifier == 'overall':
45+
data = overall_stats()
46+
elif identifier == 'show':
47+
data = per_show_stats()
48+
else:
49+
return self._not_found('Statistics not found')
50+
51+
return self._ok(data=data)
52+
53+
54+
def overall_stats():
55+
"""Generate overall library statistics."""
56+
return Show.overall_stats()
57+
58+
59+
def per_show_stats():
60+
"""Generate per-show library statistics."""
61+
pre_today = [SKIPPED, WANTED, FAILED]
62+
snatched = [SNATCHED, SNATCHED_PROPER, SNATCHED_BEST]
63+
downloaded = [DOWNLOADED, ARCHIVED]
64+
65+
def query_in(items):
66+
return '({0})'.format(','.join(map(str, items)))
67+
68+
query = dedent("""\
69+
SELECT indexer AS indexerId, showid AS seriesId,
70+
SUM(
71+
season > 0 AND
72+
episode > 0 AND
73+
airdate > 1 AND
74+
status IN {status_quality}
75+
) AS epSnatched,
76+
SUM(
77+
season > 0 AND
78+
episode > 0 AND
79+
airdate > 1 AND
80+
status IN {status_download}
81+
) AS epDownloaded,
82+
SUM(
83+
season > 0 AND
84+
episode > 0 AND
85+
airdate > 1 AND (
86+
(airdate <= {today} AND status IN {status_pre_today}) OR
87+
status IN {status_both}
88+
)
89+
) AS epTotal,
90+
(SELECT airdate FROM tv_episodes
91+
WHERE showid=tv_eps.showid AND
92+
indexer=tv_eps.indexer AND
93+
airdate >= {today} AND
94+
(status = {unaired} OR status = {wanted})
95+
ORDER BY airdate ASC
96+
LIMIT 1
97+
) AS epAirsNext,
98+
(SELECT airdate FROM tv_episodes
99+
WHERE showid=tv_eps.showid AND
100+
indexer=tv_eps.indexer AND
60101
airdate > 1 AND
61-
status IN {status_download}
62-
) AS epDownloaded,
63-
SUM(
64-
season > 0 AND
65-
episode > 0 AND
66-
airdate > 1 AND (
67-
(airdate <= {today} AND status IN {status_pre_today}) OR
68-
status IN {status_both}
69-
)
70-
) AS epTotal,
71-
(SELECT airdate FROM tv_episodes
72-
WHERE showid=tv_eps.showid AND
73-
indexer=tv_eps.indexer AND
74-
airdate >= {today} AND
75-
(status = {unaired} OR status = {wanted})
76-
ORDER BY airdate ASC
77-
LIMIT 1
78-
) AS epAirsNext,
79-
(SELECT airdate FROM tv_episodes
80-
WHERE showid=tv_eps.showid AND
81-
indexer=tv_eps.indexer AND
82-
airdate > 1 AND
83-
status <> {unaired}
84-
ORDER BY airdate DESC
85-
LIMIT 1
86-
) AS epAirsPrev,
87-
SUM(file_size) AS seriesSize
88-
FROM tv_episodes tv_eps
89-
GROUP BY showid, indexer
90-
""").format(
91-
status_quality=query_in(snatched),
92-
status_download=query_in(downloaded),
93-
status_both=query_in(snatched + downloaded),
94-
today=date.today().toordinal(),
95-
status_pre_today=query_in(pre_today),
96-
skipped=SKIPPED,
97-
wanted=WANTED,
98-
unaired=UNAIRED,
99-
)
100-
101-
main_db_con = db.DBConnection()
102-
sql_result = main_db_con.select(query)
103-
104-
stats_data = {}
105-
stats_data['seriesStat'] = list()
106-
stats_data['maxDownloadCount'] = 1000
107-
for cur_result in sql_result:
108-
stats_data['seriesStat'].append(dict(cur_result))
109-
if cur_result['epTotal'] > stats_data['maxDownloadCount']:
110-
stats_data['maxDownloadCount'] = cur_result['epTotal']
111-
112-
stats_data['maxDownloadCount'] *= 100
113-
114-
if identifier is not None:
115-
if identifier not in stats_data:
116-
return self._bad_request('{key} is a invalid path'.format(key=identifier))
117-
118-
stats_data = stats_data[identifier]
119-
120-
return self._ok(data=stats_data)
102+
status <> {unaired}
103+
ORDER BY airdate DESC
104+
LIMIT 1
105+
) AS epAirsPrev,
106+
SUM(file_size) AS seriesSize
107+
FROM tv_episodes tv_eps
108+
GROUP BY showid, indexer
109+
""").format(
110+
status_quality=query_in(snatched),
111+
status_download=query_in(downloaded),
112+
status_both=query_in(snatched + downloaded),
113+
today=date.today().toordinal(),
114+
status_pre_today=query_in(pre_today),
115+
skipped=SKIPPED,
116+
wanted=WANTED,
117+
unaired=UNAIRED,
118+
)
119+
120+
main_db_con = db.DBConnection()
121+
sql_result = main_db_con.select(query)
122+
123+
stats_data = {}
124+
stats_data['seriesStat'] = []
125+
stats_data['maxDownloadCount'] = 1000
126+
for cur_result in sql_result:
127+
stats_data['seriesStat'].append(cur_result)
128+
if cur_result['epTotal'] > stats_data['maxDownloadCount']:
129+
stats_data['maxDownloadCount'] = cur_result['epTotal']
130+
131+
stats_data['maxDownloadCount'] *= 100
132+
return stats_data

0 commit comments

Comments
 (0)