SpeedUp Animeworld

This commit is contained in:
Alhaziel
2019-06-05 18:46:40 +02:00
parent 7f2fe7940d
commit 8d74126db0
2 changed files with 20 additions and 41 deletions

View File

@@ -256,65 +256,45 @@ def video(item):
def episodios(item):
log()
itemlist = []
data = httptools.downloadpage(item.url, headers=headers).data.replace('\n', '')
block1 = scrapertoolsV2.find_single_match(data, r'<div class="widget servers".*?>(.*?)<div id="download"')
block = scrapertoolsV2.find_single_match(block1,r'<div class="server.*?>(.*?)<div class="server.*?>')
patron = r'<li><a.*?href="([^"]+)".*?>(.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(block)
extra = {}
extra['data'] = block1.replace('<strong>Attenzione!</strong> Non ci sono episodi in questa sezione, torna indietro!.','')
itemlist = []
patron_block = r'<div class="widget servers".*?>(.*?)<div id="download"'
patron = r'<li><a [^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+"[^=]+="[^"]+" href="([^"]+)"[^>]+>([^<]+)<'
matches = support.match(item, patron, patron_block)[0]
for scrapedurl, scrapedtitle in matches:
extra['episode'] = scrapedtitle
scrapedtitle = '[B] Episodio ' + scrapedtitle + '[/B]'
itemlist.append(
Item(
channel=item.channel,
action="findvideos",
contentType="episode",
title=scrapedtitle,
title='[B] Episodio ' + scrapedtitle + '[/B]',
url=urlparse.urljoin(host, scrapedurl),
fulltitle=scrapedtitle,
show=scrapedtitle,
plot=item.plot,
fanart=item.thumbnail,
extra=extra,
thumbnail=item.thumbnail))
autorenumber.renumber(itemlist, item,'bold')
support.videolibrary(itemlist, item)
return itemlist
def findvideos(item):
log()
itemlist = []
episode = ''
if item.extra and item.extra['episode']:
data = item.extra['data']
episode = item.extra['episode']
else:
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertoolsV2.find_single_match(data,r'data-target="\.widget\.servers.*?>(.*?)</div>')
servers = scrapertoolsV2.find_multiple_matches(block,r'class="tab.*?data-name="([0-9]+)">([^<]+)</span')
videolist = []
data = httptools.downloadpage(item.url, headers=headers).data
matches, data = support.match(item, r'class="tab.*?data-name="([0-9]+)">([^<]+)</span', headers=headers)
videoData = ''
for serverid, servername in servers:
#recupero l'id del video per questo server
block = str(scrapertoolsV2.find_multiple_matches(data,r'<div class="server.*?data-id="'+serverid+'">(.*?)</ul>'))
id = scrapertoolsV2.find_single_match(data,r'<a\sdata-id="([^"]+)"\sdata-base="'+episode+'"')
for serverid, servername in matches:
block = scrapertoolsV2.find_multiple_matches(data,'data-id="'+serverid+'">(.*?)<div class="server')
id = scrapertoolsV2.find_single_match(str(block),r'<a data-id="([^"]+)" data-base="'+item.fulltitle+'"')
dataJson = httptools.downloadpage('%s/ajax/episode/info?id=%s&server=%s&ts=%s' % (host, id, serverid, int(time.time())), headers=[['x-requested-with', 'XMLHttpRequest']]).data
json = jsontools.load(dataJson)
log('JSON= ',json)
videoData +='\n'+json['grabber']
@@ -331,7 +311,5 @@ def findvideos(item):
contentType=item.contentType,
folder=False))
itemlist += servertools.find_video_items(item,videoData)
return support.server(item,itemlist=itemlist)
return support.server(item, videoData, itemlist)

View File

@@ -552,14 +552,15 @@ def pagination(itemlist, item, page, perpage, function_level=1):
thumbnail=thumb()))
return itemlist
def server(item, data='', itemlist='', headers='', AutoPlay=True, CheckLinks=True):
def server(item, data='', itemlist=[], headers='', AutoPlay=True, CheckLinks=True):
if not data:
data = httptools.downloadpage(item.url, headers=headers, ignore_response_code=True).data
if not itemlist:
itemlist = servertools.find_video_items(data=str(data))
itemList = servertools.find_video_items(data=str(data))
itemlist += itemList
for videoitem in itemlist:
videoitem.title = "".join([item.title, ' ', typo(videoitem.title, 'color kod []'), typo(videoitem.quality, 'color kod []') if videoitem.quality else ""])
videoitem.fulltitle = item.fulltitle