Skip to content

Commit 5dd4756

Browse files
authored
Merge pull request anime-dl#2 from AbdullahM0hamed/configurator-2
Rewrite
2 parents b845c7c + 7b28743 commit 5dd4756

16 files changed

+177
-46
lines changed

README.md

+1
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ Yeah. Me too! That's why this tool exists.
6363
- Watchmovie
6464
- Nyaa.si
6565
- Animedaisuki
66+
- Justdubs
6667
- twist.moe - requires Node.js
6768
- Kissanime - requires Node.js
6869
- Kisscartoon - requires Node.js

anime_downloader/commands/config.py

+31
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,37 @@
66
import logging
77
from tabulate import tabulate
88
from anime_downloader.sites import ALL_ANIME_SITES
9+
from anime_downloader.config import Config
10+
from anime_downloader import config
11+
12+
logger = logging.getLogger(__name__)
13+
sitenames = [*config.DEFAULT_CONFIG["siteconfig"]]
14+
15+
data = Config._CONFIG
16+
17+
def create_table(_list):
18+
newList = [(x, y) for x, y in zip(range(1,len(_list) + 1), _list)]
19+
table = tabulate(newList, tablefmt = "psql")
20+
table = "\n".join(table.split("\n")[::-1])
21+
return table
22+
23+
def traverse_json(data):
24+
click.clear()
25+
keys = [*data.keys()]
26+
click.echo(create_table(keys))
27+
val = click.prompt("Select Option", type = int, default = 1) - 1
28+
29+
if type(data[keys[val]]) == dict:
30+
traverse_json(data[keys[val]])
31+
else:
32+
click.echo(f"Current value: {data[keys[val]]}")
33+
data[keys[val]] = click.prompt(f"Input new value for {keys[val]}", type = type(data[keys[val]]))
34+
return data
35+
36+
traverse_json(data)
37+
Config._CONFIG = data
38+
Config.write()
39+
=======
940
sitenames = [v[1] for v in ALL_ANIME_SITES]
1041
def clear():
1142
os.system('cls||clear')

anime_downloader/config.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
'provider': 'twist.moe',
2929
},
3030
"siteconfig": {
31-
"nineanime": {
31+
'nineanime': {
3232
"server": "mp4upload",
3333
},
3434
'anistream.xyz': {
@@ -85,6 +85,9 @@
8585
},
8686
'vidstream': {
8787
"servers": ["vidstream","gcloud","mp4upload","cloud9","hydrax"]
88+
},
89+
'justdubs': {
90+
"servers": ["mp4upload","gcloud"]
8891
}
8992
}
9093
}

anime_downloader/extractors/gcloud.py

+5
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@
1010
class Gcloud(BaseExtractor):
1111
def _get_data(self):
1212
url = self.url
13+
14+
"""gcloud uses the same video ID as other sites"""
15+
url = url.replace('fembed.com','gcloud.live')
16+
url = url.replace('feurl.com','gcloud.live')
17+
1318
url = url.replace('gcloud.live/v/','gcloud.live/api/source/')
1419
if url.find('#') != -1:url = url[:url.find('#')]
1520
url = (url[-url[::-1].find('/'):])

anime_downloader/extractors/kwik.py

+14-7
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,24 @@ def _get_data(self):
1919
# Kwik servers don't have direct link access you need to be referred
2020
# from somewhere, I will just use the url itself. We then
2121
# have to rebuild the url. Hopefully kwik doesn't block this too
22-
eval_re = re.compile(r';(eval.*\))')
23-
stream_parts_re = re.compile(r'https:\/\/(.*?)\..*\/(\d+)\/(.*)\/.*token=(.*)&expires=([^\']+)')
22+
23+
#Necessary
24+
self.url = self.url.replace(".cx/e/", ".cx/f/")
25+
2426
title_re = re.compile(r'title>(.*)<')
2527

26-
kwik_text = helpers.get(self.url, referer=self.url).text
27-
obsfucated_js = eval_re.search(kwik_text).group(1)
28-
deobsfucated_js = util.deobfuscate_packed_js(obsfucated_js)
28+
resp = helpers.get(self.url, headers={"referer": self.url})
29+
kwik_text = resp.text
30+
cookies = resp.cookies
2931

3032
title = title_re.search(kwik_text).group(1)
31-
cdn, digits, file, token, expires = stream_parts_re.search(deobsfucated_js).group(1, 2, 3, 4, 5)
32-
stream_url = f'https://{cdn}.nextstream.org/get/{token}/{expires}/mp4/{digits}/{file}/{title}'
33+
deobfuscated = helpers.soupify(util.deobfuscate_packed_js(re.search(r'<(script).*(var\s+_.*escape.*?)</\1>(?s)', kwik_text).group(2)))
34+
35+
post_url = deobfuscated.form["action"]
36+
token = deobfuscated.input["value"]
37+
38+
resp = helpers.post(post_url, headers={"referer": self.url}, params={"_token": token}, cookies=cookies, allow_redirects = False)
39+
stream_url = resp.headers["Location"]
3340

3441
logger.debug('Stream URL: %s' % stream_url)
3542
return {

anime_downloader/extractors/vidstream.py

+21-10
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
class VidStream(BaseExtractor):
1313
def _get_data(self):
14-
14+
1515
'''
1616
Config:
1717
List of servers. Will use servers in order.
@@ -28,30 +28,34 @@ def _get_data(self):
2828

2929
url = self.url.replace('https:////','https://')
3030
url = url.replace('https://vidstreaming.io/download','https://vidstreaming.io/server.php')
31-
3231
soup = helpers.soupify(helpers.get(url))
33-
3432
servers = Config._read_config()['siteconfig']['vidstream']['servers']
35-
sources_regex = r'sources:(\[{.*?}])'
36-
sources = re.search(sources_regex,str(soup))
3733

3834
linkserver = soup.select('li.linkserver')
3935
for a in servers:
4036
if a == 'vidstream':
41-
return self._get_link(sources)
37+
return self._get_link(soup)
4238
for b in linkserver:
4339
if b.get('data-video').startswith(links.get(a,'None')):
44-
self.url = b.get('data-video')
45-
return extractors.get_extractor(a)._get_data(self)
40+
"""
41+
Another class needs to get created instead of using self not to impact future loops
42+
If the extractor fails vidstream.py will get run again with changed self
43+
"""
44+
info = self.__dict__.copy()
45+
info['url'] = b.get('data-video')
46+
_self = Extractor(info)
47+
return extractors.get_extractor(a)._get_data(_self)
4648

47-
def _get_link(self,sources):
49+
def _get_link(self,soup):
4850
QUALITIES = {
4951
"360":[],
5052
"480":[],
5153
"720":[],
5254
"1080":[],
5355
}
54-
sources = sources.group(1)
56+
57+
sources_regex = r'sources:(\[{.*?}])'
58+
sources = re.search(sources_regex,str(soup)).group(1)
5559
sources = sources.replace("'",'"') #json only accepts ""
5660

5761
regex = r"[{|,][\n]*?[ ]*?[\t]*?[A-z]*?[^\"]:"
@@ -75,3 +79,10 @@ def _get_link(self,sources):
7579
'stream_url': stream_url,
7680
'referer': self.url
7781
}
82+
83+
84+
"""dummy class to prevent changing self"""
85+
class Extractor:
86+
def __init__(self, dictionary):
87+
for k, v in dictionary.items():
88+
setattr(self, k, v)

anime_downloader/sites/animeflix.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,11 @@ def _scrape_episodes(self):
3939
# find a way to pass some values within the class
4040
episodes = helpers.get(self.episodeList_url,
4141
params={'slug': self.slug}).json()
42+
43+
if episodes.get('@type','') == 'Movie': #different response if movies
44+
return [episodes['potentialAction']['target']]
4245
return [ self.anime_url + episode['url'] for episode in episodes['episodes'] ]
43-
46+
4447
def _scrape_metadata(self):
4548
self.slug = self.url.strip('/').split('/')[-1]
4649
meta = helpers.get(self.meta_url,

anime_downloader/sites/animeout.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,6 @@ class AnimeOutEpisode(AnimeEpisode, sitename='animeout'):
3636
def _get_sources(self):
3737
soup = helpers.soupify(helpers.get(self.url))
3838
link = soup.select('div.Center > p > h2 > a')[0].get('href')
39-
script = helpers.soupify(helpers.get(link)).select('script')[2].text
40-
url = re.search(r'http[^"]*',script).group()
39+
script = helpers.soupify(helpers.get(link)).select('script')[2]
40+
url = re.search(r'http[^"]*',str(script)).group()
4141
return [('no_extractor', url,)]

anime_downloader/sites/animepahe.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def _get_sources(self):
3939
sources = []
4040

4141
server_list = re.findall(r'data-provider="([^"]+)', source_text)
42-
episode_id, session_id = re.search(r'getEmbeds\((\d+), "([^"]+)', source_text).groups()
42+
episode_id, session_id = re.search("getUrls\((\d+?), \"(.*)?\"", source_text).groups()
4343

4444
for server in server_list:
4545
if server not in supported_servers:

anime_downloader/sites/dreamanime.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,8 @@ class DreamAnime(Anime, sitename='dreamanime'):
2222

2323
@classmethod
2424
def search(cls, query):
25-
results = helpers.get("https://dreamanime.fun/search", params = {"term" : query}).text
26-
soup = helpers.soupify(results)
27-
result_data = soup.find_all("a", {"id":"epilink"})
25+
soup = helpers.soupify(helpers.get("https://dreamanime.fun/search", params = {"term" : query}))
26+
result_data = soup.select("a#epilink")
2827

2928
search_results = [
3029
SearchResult(
@@ -42,7 +41,7 @@ def _scrape_episodes(self):
4241

4342
episodes = []
4443

45-
_all = soup.find_all("div", {"class":"episode-wrap"})
44+
_all = soup.select("div.episode-wrap")
4645
for i in _all:
4746
ep_type = i.find("div", {"class":re.compile("ep-type type-.* dscd")}).text
4847
if ep_type == 'Sub':
@@ -70,8 +69,8 @@ def getLink(self, name, _id):
7069

7170
def _get_sources(self):
7271
server = self.config.get("server", "trollvid")
73-
soup = helpers.soupify(helpers.get(self.url))
74-
hosts = json.loads(soup.find("div", {"class":"spatry"}).previous_sibling.previous_sibling.text[21:-2])["videos"]
72+
resp = helpers.get(self.url).text
73+
hosts = json.loads(re.search("var\s+episode\s+=\s+({.*})", resp).group(1))["videos"]
7574
_type = hosts[0]["type"]
7675
try:
7776
host = list(filter(lambda video: video["host"] == server and video["type"] == _type, hosts))[0]

anime_downloader/sites/dubbedanime.py

+8-3
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,12 @@ def _get_sources(self):
5555
api = json.loads(re.search(episode_regex,soup).group(1))
5656
slug = api['slug']
5757
sources = api['videos']
58-
vidstream = helpers.get(f'https://vid.xngine.com/api/episode/{slug}',referer = self.url).json()
58+
59+
try: #Continues even if vidstream api fails
60+
vidstream = helpers.get(f'https://vid.xngine.com/api/episode/{slug}',referer = self.url).json()
61+
except:
62+
vidstream = []
63+
5964
for a in vidstream:
6065
if a['host'] == 'vidstreaming' and 'id' in a and 'type' in a:
6166
sources.append(a)
@@ -70,7 +75,7 @@ def _get_sources(self):
7075
provider = a[:]
7176
embed = server_links.get(provider,'{}').format(b['id'],x)
7277
return [(provider, embed,)]
73-
78+
7479
logger.debug('No servers found in selected language. Trying all supported servers')
7580

7681
for a in servers: #trying all supported servers in order
@@ -84,11 +89,11 @@ def _get_sources(self):
8489
return [(provider, embed,)]
8590

8691
logger.debug('No supported servers found, trying mp4sh')
92+
8793
if re.search(r'"trollvid","id":"([^"]*)', soup):
8894
token = re.search(r'"trollvid","id":"([^"]*)', soup).group(1)
8995
embed = server_links.get('mp4sh','{}').format(token,x)
9096
return [('mp4sh', embed,)]
9197
else:
9298
logger.debug('No servers found')
9399
return [('no_extractor', '',)]
94-

anime_downloader/sites/erairaws.py

+4-6
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
33
from anime_downloader.sites import helpers
44
from difflib import get_close_matches
5-
import base64
5+
import re
66

77
class EraiRaws(Anime, sitename='erai-raws'):
88
sitename='erai-raws'
@@ -11,11 +11,9 @@ class EraiRaws(Anime, sitename='erai-raws'):
1111
#Bypass DDosGuard
1212
def bypass(self):
1313
host = "https://erai-raws.info"
14-
url = "https://erai-raws.info/anime-list/"
15-
u = base64.b64encode(url.encode('utf-8'))
16-
h = base64.b64encode(host.encode('utf-8'))
17-
bypass_link = helpers.post('https://ddgu.ddos-guard.net/ddgu/', data = {'u':u, 'h':h, 'p':''}, headers = {'Referer': url}, allow_redirects = False).headers["Location"]
18-
helpers.get(bypass_link, allow_redirects = False)
14+
resp = helpers.get("https://check.ddos-guard.net/check.js").text
15+
ddosBypassPath = re.search("'(.*?)'", resp).groups()[0]
16+
helpers.get(host + ddosBypassPath)
1917

2018
def parse(self, rows, url):
2119
episodes = []

anime_downloader/sites/init.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,8 @@
2222
('watchmovie','watchmovie','WatchMovie'),
2323
('animekisa','animekisa','AnimeKisa'),
2424
('nyaa','nyaa','Nyaa'),
25-
('animedaisuki','animedaisuki','Animedaisuki')
25+
('animedaisuki','animedaisuki','Animedaisuki'),
26+
('justdubs','justdubs','JustDubs')
2627
]
2728

2829

anime_downloader/sites/justdubs.py

+63
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
import logging
2+
import json
3+
import re
4+
5+
from anime_downloader.sites.exceptions import AnimeDLError, NotFoundError
6+
from anime_downloader import util
7+
from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
8+
from anime_downloader.sites import helpers
9+
10+
logger = logging.getLogger(__name__)
11+
12+
class JustDubs(Anime, sitename='justdubs'):
13+
sitename = 'justdubs'
14+
@classmethod
15+
def search(cls, query):
16+
results = helpers.get(f"http://justdubs.org/search/node/{query}").text
17+
soup = helpers.soupify(results)
18+
results_data = soup.select("li.search-result a[href*='http://justdubs.org/watch-']")
19+
logger.debug(results_data)
20+
search_results = [
21+
SearchResult(
22+
title = result.text,
23+
url = result.get("href")
24+
)
25+
for result in results_data
26+
]
27+
return search_results
28+
29+
def _scrape_episodes(self):
30+
soup = helpers.soupify(helpers.get(self.url))
31+
ret = [str(a['href'])
32+
for a in soup.find_all('a', {'class' : 'list-group-item'})]
33+
if ret == []:
34+
err = 'No Episodes Found in url "{}"'.format(self.url)
35+
args = [self.url]
36+
raise NotFoundError(err, *args)
37+
return list(reversed(ret))
38+
39+
def _scrape_metadata(self):
40+
soup = helpers.soupify(helpers.get(self.url))
41+
self.title = soup.select('h1.page-header')[0].text
42+
43+
class JustDubsEpisode(AnimeEpisode, sitename='justdubs'):
44+
def _get_sources(self):
45+
servers = self.config['servers']
46+
47+
"""maps urls to extractors"""
48+
server_links = {
49+
'mp4upload':'mp4upload.com',
50+
'gcloud':'gcloud.live',
51+
'gcloud':'fembed.com'
52+
}
53+
54+
soup = helpers.soupify(helpers.get(self.url)).select('iframe')
55+
56+
for a in servers:
57+
for b in soup:
58+
for c in server_links:
59+
if server_links[c] in b.get('src') and a == c:
60+
return [(c, b.get('src'))]
61+
62+
logger.warn("Unsupported URL")
63+
return ""

anime_downloader/sites/nyaa.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,10 @@ def search(cls,query):
1313

1414
search_results = [
1515
SearchResult(
16-
title = i.select("a:not(.comments)")[1].get("title") + ' | '+ i.find_all('td',class_ = 'text-center')[1].text,
17-
url = i.find_all('a',{'href':re.compile(rex)})[0].get('href'))
16+
title = i.select("a:not(.comments)")[1].get("title"),
17+
url = i.find_all('a',{'href':re.compile(rex)})[0].get('href'),
18+
meta= {'peers':i.find_all('td',class_ = 'text-center')[3].text + ' peers','size':i.find_all('td',class_ = 'text-center')[1].text})
19+
1820
for i in search_results.select("tr.default,tr.success")
1921
]
2022

0 commit comments

Comments
 (0)