doramasmp4: fix

This commit is contained in:
Intel1
2018-05-19 09:16:46 -05:00
committed by GitHub
parent 3fd25a0568
commit 6e7d9be234

View File

@@ -18,7 +18,7 @@ from channelselector import get_thumb
host = 'https://www.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE'}
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
@@ -38,7 +38,7 @@ def mainlist(item):
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?type[]=pelicula', thumbnail=get_thumb('movies', auto=True),
url=host + 'catalogue?format=pelicula', thumbnail=get_thumb('movies', auto=True),
type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?q=',
thumbnail=get_thumb('search', auto=True)))
@@ -63,9 +63,8 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<a class=item_episode href=(.*?) title=.*?<img src=(.*?) title=.*?title>(.*?)'
patron += '</div> <div class=options> <span>(.*?)</span>'
patron = '<div class=col-lg-2 col-md-3 col-6><a href=(.*?) title=.*?'
patron += '<img src=(.*?) alt=(.*?) class=img-fluid>.*?bg-primary text-capitalize>(.*?)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -107,8 +106,8 @@ def latest_episodes(item):
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = '<a class=episode href=(.*?) title=.*?<img src=(.*?) title=.*?title>(.*?)</div>.*?episode>(.*?)</div>'
patron = '<div class=col-lg-3 col-md-6 mb-2><a href=(.*?) title=.*?'
patron +='<img src=(.*?) alt.*?truncate-width>(.*?)<.*?mb-1>(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
@@ -116,6 +115,7 @@ def latest_episodes(item):
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
@@ -125,8 +125,9 @@ def episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li class=link_episode><a itemprop=url href=(.*?) title=.*?itemprop=name>(.*?)'
patron += '</span></a><meta itemprop=episodeNumber content=(.*?) /></li>'
logger.debug(data)
patron = '<a itemprop=url href=(.*?) title=.*? class=media.*?truncate-width>(.*?)<.*?'
patron +='text-muted mb-1>Capítulo (.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -139,7 +140,7 @@ def episodes(item):
infoLabels['episode'] = contentEpisodeNumber
if scrapedtitle != '':
title = scrapedtitle
title = '%sx%s - %s' % ('1',scrapedep, scrapedtitle)
else:
title = 'episodio %s' % scrapedep
@@ -148,7 +149,12 @@ def episodes(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodes", text_color='yellow'))
return itemlist
def findvideos(item):
@@ -156,51 +162,69 @@ def findvideos(item):
itemlist = []
duplicated = []
headers={'referer':item.url}
data = get_source(item.url)
logger.debug(data)
patron = 'animated pulse data-url=(.*?)>'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
item.type = 'dorama'
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodes(item)
else:
itemlist.extend(servertools.find_video_items(data=data))
for video_item in itemlist:
if 'sgl.php' in video_item.url:
headers = {'referer': item.url}
patron_gvideo = "'file':'(.*?)','type'"
data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
duplicated.append(video_item.url)
video_item.channel = item.channel
video_item.infoLabels = item.infoLabels
video_item.language=IDIOMAS['sub']
patron = 'var item = {id: (\d+), episode: (\d+),'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, episode in matches:
data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
sources = data_json['options']
for src in sources:
url = sources[src]
if 'sgl.php' in url:
headers = {'referer':item.url}
patron_gvideo = "'file':'(.*?)','type'"
data_gvideo = httptools.downloadpage(url, headers = headers).data
url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
infoLabels=item.infoLabels)
if url != '' and url not in duplicated:
itemlist.append(new_item)
duplicated.append(url)
try:
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
except:
pass
for video_url in matches:
video_data = httptools.downloadpage(video_url, headers=headers).data
server = ''
if 'Media player DMP4' in video_data:
url = scrapertools.find_single_match(video_data, "sources: \[\{'file':'(.*?)'")
server = 'Directo'
else:
url = scrapertools.find_single_match(video_data, '<iframe src="(.*?)".*?scrolling="no"')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
if server !='':
new_item.server = server
itemlist.append(new_item)
# for video_item in itemlist:
# if 'sgl.php' in video_item.url:
# headers = {'referer': item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
# video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# duplicated.append(video_item.url)
# video_item.channel = item.channel
# video_item.infoLabels = item.infoLabels
# video_item.language=IDIOMAS['sub']
#
# patron = 'var item = {id: (\d+), episode: (\d+),'
# matches = re.compile(patron, re.DOTALL).findall(data)
#
# for id, episode in matches:
# data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
# sources = data_json['options']
# for src in sources:
# url = sources[src]
#
# if 'sgl.php' in url:
# headers = {'referer':item.url}
# patron_gvideo = "'file':'(.*?)','type'"
# data_gvideo = httptools.downloadpage(url, headers = headers).data
# url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
#
# new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
# infoLabels=item.infoLabels)
# if url != '' and url not in duplicated:
# itemlist.append(new_item)
# duplicated.append(url)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
# Requerido para FilterTools
@@ -215,8 +239,13 @@ def findvideos(item):
def search(item, texto):
logger.info()
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
return list_all(item)
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist