Correcciones

- BlogHorror: Corrección por cambio de estructura
- PelisPlus: Corrección para series
- TuPeliculas: Corrección por cambio de estructura
- VeSeriesOnline: Corrección en la detección de enlaces
This commit is contained in:
Alfa-beto
2018-11-28 10:21:40 -03:00
committed by GitHub
parent feb8b00e01
commit 3bd39826b6
4 changed files with 15 additions and 13 deletions

View File

@@ -47,11 +47,11 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<divclass="post-thumbnail">.?<.*?href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?'
patron = '<article id="post-\d+".*?data-background="([^"]+)".*?href="([^"]+)".*?<h3.*?internal">([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
url = scrapedurl
title = scrapertools.find_single_match(scrapedtitle, '(.*?)(?:|\(|\| )\d{4}').strip()
year = scrapertools.find_single_match(scrapedtitle, '(\d{4})')
@@ -68,7 +68,7 @@ def list_all(item):
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a class="next" href="([^"]+)"')
next_page = scrapertools.find_single_match(data, 'page-numbers current.*?<a class="page-numbers" href="([^"]+)"')
if next_page != '':
itemlist.append(Item(channel=item.channel, fanart=fanart, action="list_all", title='Siguiente >>>', url=next_page))
else:

View File

@@ -183,7 +183,7 @@ def episodesxseasons(item):
season = item.infoLabels['season']
data=get_source(item.url)
season_data = scrapertools.find_single_match(data, 'id="pills-vertical-%s">(.*?)</div>' % season)
patron='href="([^"]+)".*?block">Capitulo(\d+) -.?([^<]+)<'
patron='href="([^"]+)".*?block">Capitulo.?(\d+) -.?([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(season_data)
infoLabels = item.infoLabels

View File

@@ -52,9 +52,6 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Años", action="section",
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True)))

View File

@@ -4,6 +4,7 @@
# -*- By the Alfa Develop Group -*-
import re
import base64
from channels import autoplay
from channels import filtertools
@@ -178,8 +179,8 @@ def findvideos(item):
data = get_source(item.url)
video_id = scrapertools.find_single_match(data, 'getEnlaces\((\d+)\)')
links_url = '%s%s%s' % (host,'/link/repro.php/',video_id)
online_url = '%s%s%s' % (host, '/link/enlaces_online.php/', video_id)
links_url = '%s%s%s' % (host,'link/repro.php/',video_id)
online_url = '%s%s%s' % (host, 'link/enlaces_online.php/', video_id)
# listado de opciones links_url
@@ -223,10 +224,14 @@ def findvideos(item):
video_id = scrapertools.find_single_match(scrapedurl, 'index.php/(\d+)/')
new_url = '%s%s%s%s' % (host, 'ext/index-include.php?id=', video_id, '&tipo=1')
data = get_source(new_url)
video_url = scrapertools.find_single_match(data, '<div class=container><a href=(.*?)>')
video_url = video_url.replace('enlace.php', 'r')
data = httptools.downloadpage(video_url, follow_redirects=False)
url = data.headers['location']
video_url = scrapertools.find_single_match(data, '<div class=container><a onclick=addURL.*?href=(.*?)>')
video_url = video_url.replace('%3D', '&')+'status'
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Referer': item.url}
data = httptools.downloadpage(video_url, headers=headers, ignore_response_code=True).data
b64_url = scrapertools.find_single_match(data, "var string = '([^']+)';")+'=='
url = base64.b64decode(b64_url)
title = '%s '+ '[%s]' % language
if url != '':
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language=language,