Correcciones

Correcciones:

- DoramasMP4: Cambio de dominio
- PelisPlus: Corrección para temporadas
- SeriesPapaya: Corrección en reproducción
- Vi2: Corrección por cambio de estructura
This commit is contained in:
Alfa-beto
2019-01-23 11:00:47 -03:00
committed by GitHub
parent cff6417223
commit 9a28c1a5dd
4 changed files with 22 additions and 18 deletions

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www2.doramasmp4.com/'
host = 'https://www3.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
@@ -191,7 +191,7 @@ def findvideos(item):
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = 'https://www2.doramasmp4.com/api/redirect.php?token=%s' % token
video_url = 'https://www3.doramasmp4.com/api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')

View File

@@ -148,7 +148,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='data-toggle="tab">TEMPORADA.?(\d+)</a>'
patron='data-toggle="tab">TEMPORADA\s?(\d+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels

View File

@@ -216,9 +216,11 @@ def play(item):
logger.info("play: %s" % item.url)
itemlist = []
data = httptools.downloadpage(item.url).data
item.url = scrapertools.find_single_match(data, "location.href='([^']+)")
item.server = ""
new_url = scrapertools.find_single_match(data, "location.href='([^']+)")
if new_url != '':
item.url = new_url
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist[0].thumbnail=item.contentThumbnail
return itemlist

View File

@@ -86,8 +86,7 @@ def sub_menu(item):
logger.info()
itemlist = []
url = host + '/%s/es/' % item.type
search_url = host + '/search/'
url = host + '/%s/es/ajax/1/' % item.type
link_type = item.title.lower()
if link_type == 'streaming':
link_type = 'flash'
@@ -119,8 +118,8 @@ def sub_menu(item):
url=url + '?q=%s+subtitulado' % link_type, action='list_all',
thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE',
link_type=link_type))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=search_url + '?q=',
thumbnail=get_thumb("search", auto=True), type='search', link_type=link_type))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + '?q=',
thumbnail=get_thumb("search", auto=True), type=item.type, link_type=link_type))
return itemlist
@@ -145,7 +144,7 @@ def section(item):
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = host+scrapedurl
url = host+scrapedurl.replace('/?','/ajax/1/?')
if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()):
itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type))
@@ -159,8 +158,11 @@ def list_all(item):
listed =[]
quality=''
infoLabels = {}
data= get_source(item.url, referer='%s/%s/es/' %(host, item.type))
json_data= jsontools.load(get_source(item.url))
data = json_data['render']
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
#if item.type == 'peliculas':
patron = '<img class="cover".*?src="([^"]+)" data-id="\d+" '
patron +='alt="Ver ([^\(]+)(.*?)">'
patron += '<div class="mdl-card__menu"><a class="clean-link" href="([^"]+)">'
@@ -222,7 +224,7 @@ def list_all(item):
infoLabels = infoLabels
)
if item.type == 'peliculas' or item.type == 'all' or item.type == 'search':
if item.type == 'peliculas' or item.type == 'all':
new_item.contentTitle = scrapedtitle
else:
scrapedtitle = scrapedtitle.split(' - ')
@@ -235,12 +237,12 @@ def list_all(item):
itemlist.sort(key=lambda it: it.title)
# Paginación
# if json_data['next']:
# actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/')
# next_page =int(actual_page) + 1
# url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page)
# itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type,
# action='list_all', send_lang=item.send_lang))
if json_data['next']:
actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/')
next_page =int(actual_page) + 1
url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page)
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type,
action='list_all', send_lang=item.send_lang))
return itemlist