(.*?)<\/h3>.*?(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
- for scrapedurl, scrapedthumbnail, scrapedtitle, year, quality in matches:
+ for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if "|" in scrapedtitle:
@@ -79,14 +79,13 @@ def list_all(item):
contentTitle = re.sub('\(.*?\)','', contentTitle)
- title = '%s [%s] [%s]'%(contentTitle, year, quality)
+ title = '%s [%s]'%(contentTitle, year)
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
- quality = quality,
infoLabels={'year':year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
@@ -132,16 +131,16 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
+ data = scrapertools.decodeHtmlentities(data)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
-
- url= scrapedurl
- opt_data = scrapertools.find_single_match(data,'%s>.*?\d+<.*?.*?('
- '.*?)'%option).split('-')
-
+ scrapedurl = scrapedurl.replace('"','').replace('&','&')
+ data_video = get_source(scrapedurl)
+ url = scrapertools.find_single_match(data_video, '.*?src=(.*?) frameborder')
+ opt_data = scrapertools.find_single_match(data,'%s>.*?.*?(.*?)'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
@@ -151,7 +150,10 @@ def findvideos(item):
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
- itemlist.append(trailer)
+ try:
+ itemlist.append(trailer)
+ except:
+ pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
diff --git a/plugin.video.alfa/channels/doramasmp4.json b/plugin.video.alfa/channels/doramasmp4.json
new file mode 100644
index 00000000..c28e266f
--- /dev/null
+++ b/plugin.video.alfa/channels/doramasmp4.json
@@ -0,0 +1,34 @@
+{
+ "id": "doramasmp4",
+ "name": "DoramasMP4",
+ "active": true,
+ "adult": false,
+ "language": [],
+ "thumbnail": "https://s14.postimg.org/ibh4znkox/doramasmp4.png",
+ "banner": "",
+ "categories": [
+ "tvshow"
+ ],
+ "settings": [
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Incluir en busqueda global",
+ "default": false,
+ "enabled": false,
+ "visible": false
+ },
+ {
+ "id": "filter_languages",
+ "type": "list",
+ "label": "Mostrar enlaces en idioma...",
+ "default": 0,
+ "enabled": true,
+ "visible": true,
+ "lvalues": [
+ "No filtrar",
+ "VOSE"
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/plugin.video.alfa/channels/doramasmp4.py b/plugin.video.alfa/channels/doramasmp4.py
new file mode 100644
index 00000000..5e8c7480
--- /dev/null
+++ b/plugin.video.alfa/channels/doramasmp4.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+# -*- Channel DoramasMP4 -*-
+# -*- Created for Alfa-addon -*-
+# -*- By the Alfa Develop Group -*-
+
+import re
+
+from channels import autoplay
+from channels import filtertools
+from core import httptools
+from core import scrapertools
+from core import servertools
+from core import jsontools
+from core import tmdb
+from core.item import Item
+from platformcode import config, logger
+from channelselector import get_thumb
+
+host = 'https://www.doramasmp4.com/'
+
+IDIOMAS = {'sub': 'VOSE'}
+list_language = IDIOMAS.values()
+list_quality = []
+list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
+
+def get_source(url):
+ logger.info()
+ data = httptools.downloadpage(url).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+ return data
+
+def mainlist(item):
+ logger.info()
+
+ autoplay.init(item.channel, list_servers, list_quality)
+ itemlist = []
+
+ itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
+ thumbnail=get_thumb('doramas', auto=True), type='dorama'))
+ itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
+ url=host + 'catalogue?type[]=pelicula', thumbnail=get_thumb('movies', auto=True),
+ type='movie'))
+ itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?q=',
+ thumbnail=get_thumb('search', auto=True)))
+
+ autoplay.show_option(item.channel, itemlist)
+
+ return itemlist
+
+def doramas_menu(item):
+ logger.info()
+
+ itemlist =[]
+
+ itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue',
+ thumbnail=get_thumb('all', auto=True), type='dorama'))
+ itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
+ url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
+ return itemlist
+
+def list_all(item):
+ logger.info()
+
+ itemlist = []
+ data = get_source(item.url)
+
+ patron = '(.*?)'
+ patron += ' .*?episode>(.*?)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
+ title = '%s %s' % (scrapedtitle, scrapedep)
+ contentSerieName = scrapedtitle
+ itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
+ title=title, contentSerieName=contentSerieName, type='episode'))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ return itemlist
+
+
+def episodes(item):
+ logger.info()
+ itemlist = []
+ data = get_source(item.url)
+ patron = '(.*?)'
+ patron += ' '
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+
+ for scrapedurl, scrapedtitle, scrapedep in matches:
+ url = scrapedurl
+ contentEpisodeNumber = scrapedep
+
+ infoLabels['season'] = 1
+ infoLabels['episode'] = contentEpisodeNumber
+
+ if scrapedtitle != '':
+ title = scrapedtitle
+ else:
+ title = 'episodio %s' % scrapedep
+
+ infoLabels = item.infoLabels
+
+ itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
+ contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+def findvideos(item):
+ logger.info()
+
+ itemlist = []
+ duplicated = []
+ data = get_source(item.url)
+ if item.type !='episode' and '' not in data:
+ item.type = 'dorama'
+ item.contentSerieName = item.contentTitle
+ item.contentTitle = ''
+ return episodes(item)
+ else:
+ itemlist.extend(servertools.find_video_items(data=data))
+ for video_item in itemlist:
+ if 'sgl.php' in video_item.url:
+ headers = {'referer': item.url}
+ patron_gvideo = "'file':'(.*?)','type'"
+ data_gvideo = httptools.downloadpage(video_item.url, headers=headers).data
+ video_item.url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
+
+ duplicated.append(video_item.url)
+ video_item.channel = item.channel
+ video_item.infoLabels = item.infoLabels
+ video_item.language=IDIOMAS['sub']
+
+ patron = 'var item = {id: (\d+), episode: (\d+),'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for id, episode in matches:
+ data_json=jsontools.load(httptools.downloadpage(host+'/api/stream/?id=%s&episode=%s' %(id, episode)).data)
+ sources = data_json['options']
+ for src in sources:
+ url = sources[src]
+
+ if 'sgl.php' in url:
+ headers = {'referer':item.url}
+ patron_gvideo = "'file':'(.*?)','type'"
+ data_gvideo = httptools.downloadpage(url, headers = headers).data
+ url = scrapertools.find_single_match(data_gvideo, patron_gvideo)
+
+ new_item = Item(channel=item.channel, title='%s', url=url, language=IDIOMAS['sub'], action='play',
+ infoLabels=item.infoLabels)
+ if url != '' and url not in duplicated:
+ itemlist.append(new_item)
+ duplicated.append(url)
+ try:
+ itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
+ except:
+ pass
+
+
+ # Requerido para FilterTools
+
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
+
+ return itemlist
+
+
+def search(item, texto):
+ logger.info()
+ texto = texto.replace(" ", "+")
+ item.url = item.url + texto
+ item.type = 'search'
+ if texto != '':
+ return list_all(item)
diff --git a/plugin.video.alfa/channels/grantorrent.json b/plugin.video.alfa/channels/grantorrent.json
deleted file mode 100644
index fe8c7bd7..00000000
--- a/plugin.video.alfa/channels/grantorrent.json
+++ /dev/null
@@ -1,33 +0,0 @@
-{
- "id": "grantorrent",
- "name": "GranTorrent",
- "active": true,
- "adult": false,
- "language": ["cast"],
- "thumbnail": "grantorrent.jpg",
- "banner": "grantorrent.png",
- "fanart": "grantorrent.png",
- "categories": [
- "torrent",
- "movie",
- "tvshow"
- ],
- "settings": [
- {
- "id": "include_in_global_search",
- "type": "bool",
- "label": "Incluir en busqueda global",
- "default": true,
- "enabled": true,
- "visible": true
- },
- {
- "id": "modo_grafico",
- "type": "bool",
- "label": "Buscar información extra (TMDB)",
- "default": true,
- "enabled": true,
- "visible": true
- }
- ]
-}
diff --git a/plugin.video.alfa/channels/grantorrent.py b/plugin.video.alfa/channels/grantorrent.py
deleted file mode 100644
index 8ce6ca34..00000000
--- a/plugin.video.alfa/channels/grantorrent.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import re
-
-from channelselector import get_thumb
-from core import httptools
-from core import scrapertools
-from core.item import Item
-from platformcode import config, logger
-
-host = "https://grantorrent.com/"
-
-dict_url_seasons = dict()
-__modo_grafico__ = config.get_setting('modo_grafico', 'grantorrent')
-
-
-def mainlist(item):
- logger.info()
-
- thumb_movie = get_thumb("movies", auto=True)
- thumb_tvshow = get_thumb("tvshows", auto=True)
-
- itemlist = list()
- itemlist.append(
- Item(channel=item.channel, title="Peliculas", action="peliculas", thumbnail=thumb_movie))
- itemlist.append(
- Item(channel=item.channel, title="Series", action="series", thumbnail=thumb_tvshow))
-
- return itemlist
-
-
-def peliculas(item):
- logger.info()
-
- thumb_search = get_thumb("search.png")
-
- itemlist = list()
- itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host))
- # itemlist.append(item.clone(channel=item.channel, title="Filtrar películas", action="listado", url=host))
- itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host, media="película",
- thumbnail=thumb_search))
-
- return itemlist
-
-
-def series(item):
- logger.info()
-
- thumb_search = get_thumb("search.png")
-
- itemlist = list()
- itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host + "series/"))
- # itemlist.append(item.clone(channel=item.channel, title="Filtrar series", action="listado", url=host))
- itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host + "series/",
- media="serie", thumbnail=thumb_search))
-
- return itemlist
-
-
-def search(item, texto):
- logger.info("texto:" + texto)
- texto = texto.replace(" ", "+")
- itemlist = []
-
- try:
- url = "%s?s=%s" % (item.url, texto)
- data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url).data)
- # logger.debug("data %s \n\n" % data)
-
- video_section = scrapertools.find_single_match(data, '(.*?)')
-
- pattern = '[^"]+)".*?class="bloque-inferior">' \
- '\s*(?P.*?)\s*'
-
- matches = re.compile(pattern, re.DOTALL).findall(video_section)
-
- for url, thumb, title in matches:
- if item.media == "serie":
- action = "episodios"
- else:
- action = "findvideos"
- itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumb,
- contentTitle=title, contentType="movie"))
-
- return itemlist
-
- # Se captura la excepción, para no interrumpir al buscador global si un canal falla
- except:
- import sys
- for line in sys.exc_info():
- logger.error("%s" % line)
- return []
-
-
-def listado(item):
- logger.info()
-
- itemlist = []
-
- data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
- # logger.debug("data %s \n\n" % data)
-
- video_section = scrapertools.find_single_match(data, '
(.*?)')
- # logger.debug("data %s \n\n" % video_section)
-
- pattern = '[^"]+)".*?.*?class="bloque-superior">\s*' \
- '(?P.*?)\s*\s*
\s*(?P.*?)\s* \s*(?P.*?)\s* '
-
- matches = re.compile(pattern, re.DOTALL).findall(video_section)
-
- for url, thumb, quality, lang, title, date in matches:
- title = scrapertools.htmlclean(title)
- title = re.sub(r"\s{2}", " ", title)
-
- if "/series" in item.url:
- if quality:
- title2 = "%s [%s]" % (title, quality)
-
- itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
- quality=quality, contentTitle=title, contentType="tvshow"))
- else:
-
- if quality:
- title2 = "%s [%s]" % (title, quality)
-
- itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
- quality=quality, contentTitle=title, contentType="movie"))
-
- pagination = scrapertools.find_single_match(data, '')
- # logger.debug("data %s \n\n" % links_section)
-
- pattern = 'icono_.*?png" title="(?P.*?)" [^>]+>(?P.*?) (?P.*?) ' \
- '\s*Temporada.*?
')
- # logger.debug("url es %s " % url_to_check)
-
- # if url doesn't exist we add it into the dict
- if url_to_check and url_to_check not in dict_url_seasons:
- dict_url_seasons[url_to_check] = False
-
- for key, value in dict_url_seasons.items():
- if not value:
- item.url = key
- dict_url_seasons[key] = True
- dict_data, item = get_episodes(item, dict_data)
-
- # logger.debug("URL_LIST es %s " % dict_url_seasons)
-
- return dict_data, item
-
-
-def findvideos(item):
- logger.info()
- itemlist = []
-
- if item.contentType == "movie":
-
- data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
- # logger.debug("data %s \n\n" % data)
-
- if item.contentTitle != "":
- title = scrapertools.find_single_match(data, '(.*?)[.]')
- year = scrapertools.find_single_match(data, '(\d+)')
- logger.debug("title es %s" % title)
- if title:
- item.contentTitle = title
- item.show = title
- if year:
- item.infoLabels['year'] = year
-
- links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)')
- # logger.debug("data %s \n\n" % data)
-
- pattern = 'icono_.*?png" title="(?P.*?)" [^>]+> (?P.*?) (?P.*?) ' \
- '',
- 'Sinopsis:<\/span>.([^<]+)<\/span>.<\/p>']
-
IDIOMA = {'latino': 'Latino'}
list_language = IDIOMA.values()
@@ -36,6 +32,13 @@ list_servers = [
]
+def get_source(url):
+
+ logger.info()
+ data = httptools.downloadpage(url).data
+ data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data)
+ return data
+
def mainlist(item):
logger.info()
@@ -44,100 +47,218 @@ def mainlist(item):
itemlist.append(
item.clone(title="Peliculas",
- action="menupeliculas",
- thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
- fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png',
- extra='peliculas/'
+ action="sub_menu",
+ thumbnail=get_thumb('movies', auto=True),
))
itemlist.append(
item.clone(title="Series",
- action="menuseries",
- thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
- fanart='https://s27.postimg.org/iahczwgrn/series.png',
- extra='peliculas/'
+ action="sub_menu",
+ thumbnail=get_thumb('tvshows', auto=True),
))
itemlist.append(
- item.clone(title="Documentales",
- action="lista",
- url=host + 'documentales/pag-1',
- thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png',
- fanart='https://s16.postimg.org/7xjj4bmol/documental.png',
- extra='documentales/'
- ))
+ item.clone(title="Buscar", action="search", url=host + 'busqueda/?s=',
+ thumbnail=get_thumb('search', auto=True),
+ ))
autoplay.show_option(item.channel, itemlist)
return itemlist
-def menupeliculas(item):
+def sub_menu(item):
logger.info()
itemlist = []
+ content = item.title.lower()
itemlist.append(item.clone(title="Todas",
- action="lista",
- url=host + 'peliculas/pag-1',
- thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
- fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
- extra='peliculas/'
- ))
-
- itemlist.append(item.clone(title="Ultimas",
- action="lista",
- url=host + 'estrenos/pag-1',
- thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
- fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png',
- extra='estrenos/'
+ action="list_all",
+ url=host + '%s/ultimas-%s/' % (content, content),
+ thumbnail=get_thumb('all', auto=True),
))
itemlist.append(item.clone(title="Generos",
action="generos",
- url=host + 'peliculas/pag-1',
- thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
- fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
- extra='documentales/'
- ))
-
- itemlist.append(item.clone(title="Buscar",
- action="search",
- url=host + 'busqueda/?s=',
- thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
- fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
- extra='peliculas/'
+ url=host + '%s/' % content,
+ thumbnail=get_thumb('genres', auto=True),
))
return itemlist
-def menuseries(item):
+def list_all(item):
+ logger.info()
+
+ itemlist=[]
+
+ data = get_source(item.url)
+ patron = '(?:|Posters>)»')
+ if next_page != '':
+ itemlist.append(item.clone(action="list_all",
+ title='Siguiente >>>',
+ url=host+next_page,
+ thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'
+ ))
+ return itemlist
+
+
+def generos(item):
+
logger.info()
itemlist = []
+ data = get_source(item.url)
+ if 'series' not in item.url:
+ clean_genre = 'PELÍCULAS DE'
+ else:
+ clean_genre = 'SERIES DE'
- itemlist.append(item.clone(title="Todas",
- action="lista",
- url=host + "series/pag-1",
- thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
- fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
- extra='series/'
- ))
+ patron = ' %s(.*?) <' % clean_genre
+ matches = re.compile(patron, re.DOTALL).findall(data)
- itemlist.append(item.clone(title="Generos",
- action="generos",
- url=host + 'series/pag-1',
- thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
- fanart='https://s3.postimg.org/5s9jg2wtf/generos.png',
- extra='series/'
- ))
+ for scrapedtitle, scrapedurl in matches:
- itemlist.append(item.clone(title="Buscar",
- action="search",
- url=host + 'busqueda/?s=',
- thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
- fanart='https://s30.postimg.org/pei7txpa9/buscar.png',
- extra='series/'
- ))
+ url = scrapedurl
+ title = scrapedtitle
+ if 'agregadas' not in title.lower():
+ itemlist.append(
+ Item(channel=item.channel,
+ action="list_all",
+ title=title,
+ url=url,
+ ))
+ return itemlist
+
+
+def seasons(item):
+ logger.info()
+
+ itemlist = []
+ templist = []
+ data = get_source(item.url)
+ serie_id = scrapertools.find_single_match(data, '')
+
+ patron = 'class=js-season-item> SEASON(.*?)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+ for season in matches:
+ contentSeasonNumber = season
+ infoLabels['season']=season
+ itemlist.append(Item(channel=item.channel, action="episodes", title='Temporada %s' % season,
+ serie_id=serie_id, contentSeasonNumber=contentSeasonNumber,
+ serie_url=item.url, infoLabels=infoLabels))
+
+ if item.extra == 'seasons':
+ for tempitem in itemlist:
+ templist += episodes(tempitem)
+ else:
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ if config.get_videolibrary_support() and len(itemlist) > 0:
+ itemlist.append(
+ Item(channel=item.channel,
+ title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
+ url=item.url,
+ action="add_serie_to_library",
+ extra="seasons",
+ contentSerieName=item.contentSerieName,
+ contentSeasonNumber=contentSeasonNumber
+ ))
+ if item.extra == 'seasons':
+ return templist
+ else:
+ return itemlist
+
+
+def episodes(item):
+ logger.info()
+
+ itemlist= []
+
+ url = host+'api/episodes?titleId=%s&seasonNumber=%s' % (item.serie_id, item.contentSeasonNumber)
+
+ data = jsontools.load(httptools.downloadpage(url).data)
+ episode_list = data['titles']
+ infoLabels = item.infoLabels
+ for episode in episode_list:
+
+ url = item.serie_url+episode['friendlyTitle4Url']
+ thumbnail = episode['url_image']
+ plot = episode['shortDescription']
+ contentEpisodeNumber = episode['tvSeasonEpisodeNumber']
+ title = '%sx%s - %s' % (item.contentSeasonNumber, contentEpisodeNumber, episode['title'])
+ infoLabels['episode']=contentEpisodeNumber
+
+ itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail,
+ plot=plot, infoLabels=infoLabels))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ return itemlist
+
+
+
+def findvideos(item):
+ logger.info()
+
+ itemlist = []
+
+
+ data = get_source(item.url)
+
+ itemlist.extend(servertools.find_video_items(data=data))
+
+ for videoitem in itemlist:
+ videoitem.channel = item.channel
+ videoitem.language = IDIOMA['latino']
+ videoitem.title = '[%s] [%s]' % (videoitem.server, videoitem.language)
+ videoitem.infoLabels = item.infoLabels
+
+ # Requerido para FilterTools
+
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
+
+ if item.contentType == 'movie':
+ if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
+ itemlist.append(
+ Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
+ url=item.url, action="add_pelicula_to_library", extra="findvideos",
+ contentTitle=item.contentTitle))
return itemlist
@@ -149,7 +270,7 @@ def search(item, texto):
try:
if texto != '':
- return lista(item)
+ return list_all(item)
else:
return []
except:
@@ -159,380 +280,22 @@ def search(item, texto):
return []
-def lista(item):
- logger.info()
-
- itemlist = []
-
- if 'series/' in item.extra:
- accion = 'temporadas'
- tipo = 'tvshow'
- else:
- accion = 'findvideos'
- tipo = 'movie'
-
- data = httptools.downloadpage(item.url).data
-
- if item.action != 'search':
- patron = '.*?.<\/i>.*?..*?..*?>'
- actual = scrapertools.find_single_match(data,
- '<<\/a>')
- else:
- patron = '
.*?\n<\/i>.*?\n.*?\n.*?>'
- actual = ''
-
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
- url = scrapedurl
- title = scrapertools.decodeHtmlentities(scrapedtitle)
- thumbnail = scrapedthumbnail
-
- filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w154", "")
- filtro_list = {"poster_path": filtro_thumb} # Nombre del campo a filtrar y valor en los resultados de la api
- # de tmdb
- filtro_list = filtro_list.items()
-
- if item.action != 'search':
-
- new_item=(
- Item(channel=item.channel,
- contentType=tipo,
- action=accion,
- title=title,
- url=scrapedurl,
- thumbnail=thumbnail,
- fulltitle=scrapedtitle,
- infoLabels={'filtro': filtro_list},
- extra=item.extra,
- context=autoplay.context
- ))
- if 'serie' in scrapedurl:
- new_item.contentSerieName=scrapedtitle
- else:
- new_item.contentTitle = scrapedtitle
- itemlist.append(new_item)
- else:
- if item.extra=='':
- item.extra = scrapertools.find_single_match(url, 'serie|pelicula')+'s/'
- if 'series/' in item.extra:
- accion = 'temporadas'
- tipo = 'tvshow'
- else:
- accion = 'findvideos'
- tipo = 'movie'
- item.extra = item.extra.rstrip('s/')
- if item.extra in url:
- new_item=(
- Item(channel=item.channel,
- contentType=tipo,
- action=accion,
- title=scrapedtitle,
- url=scrapedurl,
- thumbnail=scrapedthumbnail,
- fulltitle=scrapedtitle,
- infoLabels={'filtro': filtro_list},
- extra=item.extra,
- context=autoplay.context
- ))
- if 'serie' in scrapedurl:
- new_item.contentSerieName=scrapedtitle
- else:
- new_item.contentTitle = scrapedtitle
- itemlist.append(new_item)
-
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
- if item.action != 'search' and actual != '':
- if itemlist != []:
- next_page = str(int(actual) + 1)
- next_page_url = item.extra + 'pag-' + next_page
- if not next_page_url.startswith("http"):
- next_page_url = host + next_page_url
- itemlist.append(
- Item(channel=item.channel,
- action="lista",
- title='Siguiente >>>',
- url=next_page_url,
- thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png',
- extra=item.extra
- ))
- return itemlist
-
-
-def temporadas(item):
- logger.info()
- itemlist = []
- templist = []
- data = httptools.downloadpage(item.url).data
-
- patron = '<\/span>Temporada([^<]+)'
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- for scrapedtitle in matches:
- infoLabels = item.infoLabels
- url = item.url
- title = 'Temporada ' + scrapedtitle.strip(' \r\n')
- thumbnail = scrapertools.find_single_match(data, '
')
- plot = scrapertools.find_single_match(data,
- 'Sinopsis:<\/span>.([^<]+).<\/span>')
- fanart = scrapertools.find_single_match(data, '
.*?')
- contentSeasonNumber = scrapedtitle.strip(' \r\n')
- itemlist.append(
- Item(channel=item.channel,
- action="episodios",
- title=title,
- fulltitle=item.title,
- url=url,
- thumbnail=thumbnail,
- plot=plot,
- fanart=fanart,
- extra=scrapedtitle.rstrip('\n'),
- contentSerieName=item.contentSerieName,
- contentSeasonNumber=contentSeasonNumber,
- infoLabels={'season': contentSeasonNumber},
- context=item.context
- ))
-
- if item.extra == 'temporadas':
- for tempitem in itemlist:
- templist += episodios(tempitem)
- else:
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
- if config.get_videolibrary_support() and len(itemlist) > 0:
- itemlist.append(
- Item(channel=item.channel,
- title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
- url=item.url,
- action="add_serie_to_library",
- extra="temporadas",
- contentSerieName=item.contentSerieName,
- contentSeasonNumber=contentSeasonNumber
- ))
- if item.extra == 'temporadas':
- return templist
- else:
- return itemlist
-
-
-def episodios(item):
- logger.info()
- itemlist = []
- data = httptools.downloadpage(item.url).data
- patron = '<\/span>([^<]+)<\/a>.'
- temporada = 'temporada/' + item.extra.strip(' ')
- matches = re.compile(patron, re.DOTALL).findall(data)
- infoLabels = item.infoLabels
-
- for scrapedtitle, scrapedurl in matches:
-
- if temporada in scrapedurl:
- url = scrapedurl
- contentSeasonNumber = re.findall(r'temporada.*?(\d+)', url)
- capitulo = re.findall(r'Capitulo \d+', scrapedtitle)
- contentEpisodeNumber = re.findall(r'\d+', capitulo[0])
- contentEpisodeNumber = contentEpisodeNumber[0]
- infoLabels['episode'] = contentEpisodeNumber
- title = contentSeasonNumber[0] + 'x' + contentEpisodeNumber + ' - ' + scrapedtitle
-
- thumbnail = scrapertools.find_single_match(data, '
')
- plot = ''
- fanart = ''
- itemlist.append(
- Item(channel=item.channel,
- action="findvideos",
- title=title,
- fulltitle=item.title,
- url=url,
- thumbnail=thumbnail,
- plot=plot,
- fanart=fanart,
- extra=scrapedtitle,
- contentSeasonNumber=item.contentSeasonNumber,
- infoLabels=infoLabels,
- context=item.context
- ))
- if item.extra != 'temporadas':
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
- itemlist = fail_tmdb(itemlist)
- return itemlist
-
-
-def fail_tmdb(itemlist):
- logger.info()
- realplot = ''
- for item in itemlist:
- if item.infoLabels['plot'] == '':
- data = httptools.downloadpage(item.url).data
- if item.fanart == '':
- item.fanart = scrapertools.find_single_match(data, patrones[0])
- realplot = scrapertools.find_single_match(data, patrones[1])
- item.plot = scrapertools.remove_htmltags(realplot)
- return itemlist
-
-
-def generos(item):
- tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
- "Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
- "Drama": "https://s16.postimg.org/94sia332d/drama.png",
- "Accion": "https://s3.postimg.org/y6o9puflv/accion.png",
- "Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
- "Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
- "Animacion": "https://s13.postimg.org/5on877l87/animacion.png",
- "Ciencia Ficcion": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
- "Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
- "Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
- "Musica": "https://s29.postimg.org/bbxmdh9c7/musical.png",
- "Western": "https://s23.postimg.org/lzyfbjzhn/western.png",
- "Fantasia": "https://s13.postimg.org/65ylohgvb/fantasia.png",
- "Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
- "Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
- "Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
- "Historia": "https://s15.postimg.org/fmc050h1n/historia.png",
- "Pelicula De La Television": "https://s9.postimg.org/t8xb14fb3/delatv.png",
- "Foreign": "https://s29.postimg.org/jdc2m158n/extranjera.png"}
-
- logger.info()
- itemlist = []
- data = httptools.downloadpage(item.url).data
- patron = '<\/i>.([^<]+)<\/span>'
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- for scrapedurl, scrapedtitle in matches:
-
- url = scrapedurl + 'pag-1'
- title = scrapedtitle
- if scrapedtitle in tgenero:
- thumbnail = tgenero[scrapedtitle]
- fanart = tgenero[scrapedtitle]
- else:
- thumbnail = ''
- fanart = ''
- extra = scrapedurl.replace('http://www.pelisplus.tv/', '')
- itemlist.append(
- Item(channel=item.channel,
- action="lista",
- title=title,
- fulltitle=item.title,
- url=url,
- thumbnail=thumbnail,
- fanart=fanart,
- extra=extra
- ))
- return itemlist
-
-def get_vip(url):
- logger.info()
- itemlist =[]
- url= url.replace('reproductor','vip')
- data = httptools.downloadpage(url).data
- video_urls = scrapertools.find_multiple_matches(data,'')
- for item in video_urls:
- if 'elreyxhd' in item:
- if 'plus'in item:
- id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
- new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
- datax=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
- itemlist.append(Item(url=datax))
- else:
- id = scrapertools.find_single_match(item,'episodes\/(\d+)')
- data_vip = httptools.downloadpage(item).data
- patron = ''
- matches = re.compile(patron, re.DOTALL).findall(data_vip)
- for urls in matches:
- x = scrapertools.find_single_match(urls,r"&x=(\d)&")
- if x != '':
- new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=%s&sv=si' % (id, x)
- datax = httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
- itemlist.append(Item(url=datax))
-
- return itemlist
-
-
-def findvideos(item):
- logger.info()
- itemlist = []
- duplicados = []
- data = httptools.downloadpage(item.url).data
- video_page = scrapertools.find_single_match(data, "
|\s{2,}', "", data) + return data + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + itemlist = [] + + itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu", + thumbnail=get_thumb('doramas', auto=True), type='dorama')) + itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", + url=host + 'catalogue?type[]=pelicula', thumbnail=get_thumb('movies', auto=True), + type='movie')) + itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?q=', + thumbnail=get_thumb('search', auto=True))) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + +def doramas_menu(item): + logger.info() + + itemlist =[] + + itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'catalogue', + thumbnail=get_thumb('all', auto=True), type='dorama')) + itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes", + url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama')) + return itemlist + +def list_all(item): + logger.info() + + itemlist = [] + data = get_source(item.url) + + patron = '(.*?)' + patron += '
(.*?
)')
- # logger.debug("data %s \n\n" % video_section)
-
- pattern = '\s*
\s*(?P.*?)\s*
\s*(?P.*?)\s*
'
-
- matches = re.compile(pattern, re.DOTALL).findall(video_section)
-
- for url, thumb, quality, lang, title, date in matches:
- title = scrapertools.htmlclean(title)
- title = re.sub(r"\s{2}", " ", title)
-
- if "/series" in item.url:
- if quality:
- title2 = "%s [%s]" % (title, quality)
-
- itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
- quality=quality, contentTitle=title, contentType="tvshow"))
- else:
-
- if quality:
- title2 = "%s [%s]" % (title, quality)
-
- itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
- quality=quality, contentTitle=title, contentType="movie"))
-
- pagination = scrapertools.find_single_match(data, '')
- # logger.debug("data %s \n\n" % links_section)
-
- pattern = 'icono_.*?png" title="(?P(.*?)[.]
')
- year = scrapertools.find_single_match(data, '(\d+)
')
- logger.debug("title es %s" % title)
- if title:
- item.contentTitle = title
- item.show = title
- if year:
- item.infoLabels['year'] = year
-
- links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)')
- # logger.debug("data %s \n\n" % data)
-
- pattern = 'icono_.*?png" title="(?P|\s{2,}', "", data) + return data + def mainlist(item): logger.info() @@ -44,100 +47,218 @@ def mainlist(item): itemlist.append( item.clone(title="Peliculas", - action="menupeliculas", - thumbnail='https://s8.postimg.org/6wqwy2c2t/peliculas.png', - fanart='https://s8.postimg.org/6wqwy2c2t/peliculas.png', - extra='peliculas/' + action="sub_menu", + thumbnail=get_thumb('movies', auto=True), )) itemlist.append( item.clone(title="Series", - action="menuseries", - thumbnail='https://s27.postimg.org/iahczwgrn/series.png', - fanart='https://s27.postimg.org/iahczwgrn/series.png', - extra='peliculas/' + action="sub_menu", + thumbnail=get_thumb('tvshows', auto=True), )) itemlist.append( - item.clone(title="Documentales", - action="lista", - url=host + 'documentales/pag-1', - thumbnail='https://s16.postimg.org/7xjj4bmol/documental.png', - fanart='https://s16.postimg.org/7xjj4bmol/documental.png', - extra='documentales/' - )) + item.clone(title="Buscar", action="search", url=host + 'busqueda/?s=', + thumbnail=get_thumb('search', auto=True), + )) autoplay.show_option(item.channel, itemlist) return itemlist -def menupeliculas(item): +def sub_menu(item): logger.info() itemlist = [] + content = item.title.lower() itemlist.append(item.clone(title="Todas", - action="lista", - url=host + 'peliculas/pag-1', - thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', - fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', - extra='peliculas/' - )) - - itemlist.append(item.clone(title="Ultimas", - action="lista", - url=host + 'estrenos/pag-1', - thumbnail='https://s22.postimg.org/cb7nmhwv5/ultimas.png', - fanart='https://s22.postimg.org/cb7nmhwv5/ultimas.png', - extra='estrenos/' + action="list_all", + url=host + '%s/ultimas-%s/' % (content, content), + thumbnail=get_thumb('all', auto=True), )) itemlist.append(item.clone(title="Generos", action="generos", - url=host + 'peliculas/pag-1', - thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png', - fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', - extra='documentales/' - )) - - itemlist.append(item.clone(title="Buscar", - action="search", - url=host + 'busqueda/?s=', - thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png', - fanart='https://s30.postimg.org/pei7txpa9/buscar.png', - extra='peliculas/' + url=host + '%s/' % content, + thumbnail=get_thumb('genres', auto=True), )) return itemlist -def menuseries(item): +def list_all(item): + logger.info() + + itemlist=[] + + data = get_source(item.url) + patron = '(?:|Posters>)»') + if next_page != '': + itemlist.append(item.clone(action="list_all", + title='Siguiente >>>', + url=host+next_page, + thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png' + )) + return itemlist + + +def generos(item): + logger.info() itemlist = [] + data = get_source(item.url) + if 'series' not in item.url: + clean_genre = 'PELÍCULAS DE' + else: + clean_genre = 'SERIES DE' - itemlist.append(item.clone(title="Todas", - action="lista", - url=host + "series/pag-1", - thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png', - fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', - extra='series/' - )) + patron = '