Correcciones y novedades

Correcciones:
- CineDeTodo: Correcciones en la detección de enlaces y agregada sección series
- CinemaHD: Correcciones en la detección de enlaces y agregada sección series
- PelisPlus: Corrección en la detección de enlaces
- ZonaWorld: Correccion por cambio de estructura

Novedades:
- PelisRex: Nuevo canal de películas y series
- ReyAnime: Nuevo canal de series y películas anime.
This commit is contained in:
Alfa-beto
2019-01-30 13:40:53 -03:00
committed by GitHub
parent 9a28c1a5dd
commit 29001df8a6
8 changed files with 1118 additions and 147 deletions

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# -*- Channel CinemaHD -*-
# -*- Channel CineDeTodo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
@@ -16,39 +16,61 @@ from channels import autoplay
from channels import filtertools
host = 'http://www.cinedetodo.com/'
host = 'https://www.cinedetodo.net/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
itemlist.append(item.clone(title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
# itemlist.append(item.clone(title="Por Calidad", action="section", section='quality',
# thumbnail=get_thumb('quality', auto=True)))
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
def sub_menu(item):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -57,60 +79,86 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
patron += '<td>(\d{4})</td>'
full_data = data
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
cleantitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
cleantitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
cleantitle = re.sub('\(.*?\)', '', cleantitle)
title = '%s [%s]'%(contentTitle, year)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year':year}
))
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
elif item.section == 'year':
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
@@ -118,38 +166,104 @@ def section(item):
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = language.replace('(','').replace(')','')
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = item.clone(title='Trailer', url=url, action='play', server='youtube')
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
itemlist.append(trailer)
except:
@@ -175,7 +289,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
@@ -190,11 +304,11 @@ def newest(categoria):
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'/animacion'
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'/terror'
elif categoria == 'documentales':
item.url = host+'/documental'
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()

View File

@@ -16,41 +16,61 @@ from channels import autoplay
from channels import filtertools
host = 'http://www.cinemahd.co/'
host = 'https://www.cinemahd.co/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Calidad", action="section", section='quality',
thumbnail=get_thumb('quality', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Año", action="section", section='year',
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
def sub_menu(item):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -60,14 +80,18 @@ def list_all(item):
data = get_source(item.url)
full_data = data
data = scrapertools.find_single_match(data, '<ul class=MovieList NoLmtxt.*?</ul>')
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
patron += '<td>(\d{4})</td>'
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h3 class=Title>(.*?)<\/h3>(?:</a>|<span class=Year>(.*?)<\/span>)'
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
@@ -77,45 +101,64 @@ def list_all(item):
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
cleantitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
cleantitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
cleantitle = re.sub('\(.*?\)', '', cleantitle)
title = '%s [%s]'%(contentTitle, year)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year':year}
))
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class=next.*?href=(.*?)>')
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+ menu-category-list><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = '<a href=([^ ]+) class=Button STPb>(.*?)</a>'
elif item.section == 'year':
patron = '<li><a href=([^>]+)>(\d{4})<\/a><\/li>'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
@@ -123,32 +166,99 @@ def section(item):
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play'))
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
@@ -179,7 +289,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
@@ -194,9 +304,11 @@ def newest(categoria):
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'/animacion'
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'/terror'
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()

View File

@@ -241,7 +241,7 @@ def findvideos(item):
patron = '"file":"([^"]+)","label":"([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(url_data)
for url, quality in matches:
url = 'https://www.pelisplus.net' + url.replace('\/', '/')
url = url.replace('\/', '/')
itemlist.append(
Item(channel=item.channel, title='%s' + title, url=url, action='play', language=IDIOMAS[language],
quality=quality, infoLabels=item.infoLabels))

View File

@@ -0,0 +1,67 @@
{
"id": "pelisrex",
"name": "PelisRex",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://www.pelisrex.com/wp-content/uploads/2018/11/hjko.png",
"banner": "",
"version": 1,
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,321 @@
# -*- coding: utf-8 -*-
# -*- Channel PelisRex -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.pelisrex.com/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
cleantitle = scrapedtitle[0].strip()
else:
cleantitle = scrapedtitle
cleantitle = re.sub('\(.*?\)', '', cleantitle)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
if title != 'Ver más':
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,53 @@
{
"id": "reyanime",
"name": "Rey Anime",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://reyanimeonline.com/assets/img/logo.png",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,303 @@
# -*- coding: utf-8 -*-
# -*- Channel ReyAnime -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
host = "https://reyanimeonline.com/"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'reyanime')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'reyanime')
IDIOMAS = {'latino':'LAT', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango', 'mp4upload', 'gvideo']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Anime",
action="sub_menu",
thumbnail=''
))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + 'inc/a.pelicula.php', page='1'))
itemlist.append(Item(channel=item.channel, title="Géneros",
action="section",
thumbnail=get_thumb('genres', auto=True),
url=host + 'estrenos/'))
itemlist.append(Item(channel=item.channel, title="Alfabetico",
action="section",
thumbnail=get_thumb('alphabet', auto=True),
url=host + 'estrenos/'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + 'resultado/?buscar=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def sub_menu(item):
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Capitulos",
action="new_episodes",
thumbnail=get_thumb('new episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Recientes",
action="list_all",
thumbnail=get_thumb('recents', auto=True),
url=host + 'inc/a.emision.php', page='1'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'inc/a.animes.php', page='1'))
itemlist.append(Item(channel=item.channel, title="Mas Vistos",
action="list_all",
thumbnail=get_thumb('more watched', auto=True),
url=host + 'mas-vistos/'))
itemlist.append(Item(channel=item.channel, title="Recomendados",
action="list_all",
thumbnail=get_thumb('recomended', auto=True),
url=host + 'recomendado/'))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def section(item):
logger.info()
itemlist = []
listed = []
param = ''
full_data = get_source(item.url)
if item.title == 'Géneros':
data = scrapertools.find_single_match(full_data, 'id="generos"(.*?)"letras')
elif item.title == 'Alfabetico':
data = scrapertools.find_single_match(full_data,'class="letras(.*?)<h2')
patron = 'href="([^"]+)">(?:<i class="fa fa-circle-o"></i>|)([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
param = scrapedtitle.strip()
title = param.capitalize()
if item.title == 'Géneros':
url = 'inc/a.genero.php'
elif item.title == 'Alfabetico':
url = 'inc/a.letra.php'
if scrapedurl not in listed:
itemlist.append(Item(channel=item.channel, title=title, url=host+url, action='list_all', param=param,
page='1'))
listed.append(scrapedurl)
return sorted(itemlist, key=lambda i: i.title)
def list_all(item):
logger.info()
itemlist = []
if item.param == '':
data = httptools.downloadpage(item.url, post='page=%s' % item.page).data
else:
if len(item.param) > 1:
var = 'genero'
else:
var = 'letra'
data = httptools.downloadpage(item.url, post='%s=%s&page=%s' % (var, item.param, item.page)).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div class="col-md-4"><a href="([^"]+)" class="front"><img src="([^"]+)" alt="([^"]+)".*?'
patron += '<div class="categoria">([^"]+).*?"sinopsis">.*?<p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, type, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
new_item= Item(channel=item.channel,
action='episodios',
title=scrapedtitle,
url=url,
thumbnail=thumbnail,
plot = scrapedplot,
)
if 'pelicula' in url:
new_item.contentTitle=scrapedtitle
new_item.infoLabels['year':'-']
else:
new_item.contentSerieName = scrapedtitle
if 'reyanime' not in scrapedtitle:
itemlist.append(new_item)
# Paginacion
if len(itemlist) > 0 and item.page != '':
next_page_value = int(item.page)+1
next_page = str(next_page_value)
itemlist.append(item.clone(title=">> Página siguiente",
page=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class="overarchingdiv"> <a href="([^"]+)".*?src="([^"]+)".*?"overtitle">([^<]+)<.*?"overepisode">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, episode in matches:
title = '%s - Episodio %s' % (scrapedtitle, episode)
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
action='findvideos'))
return itemlist
def episodios(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '>Lista de episodios:(.*?)</ul>')
patron = '<li class="item_cap"> <a href="([^"]+)" class=".*?">.*?</i>.*?Episodio (\d+)([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, episode, lang in matches:
if 'latino' in scrapedurl:
lang = 'LAT'
elif 'sub' in lang.lower() or lang.strip() == '':
lang='VOSE'
title = "1x" + episode + " - Episodio %s" % episode
url = scrapedurl
infoLabels['season'] = '1'
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
if 'pelicula' in item.url and len(itemlist) > 0:
return findvideos(itemlist[0])
else:
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "tabsArray\['\d+'\] =.*?src='([^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if 'reyanimeonline' in scrapedurl:
new_data = get_source(scrapedurl)
scrapedurl = scrapertools.find_single_match(new_data, '"file":"([^"]+)",')
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play', language = item.language,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist

View File

@@ -33,8 +33,8 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host + 'pelicula/',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
# thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
@@ -48,7 +48,6 @@ def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
@@ -56,43 +55,45 @@ def list_all(item):
logger.info()
itemlist = []
try:
patron = '<article id="post-.*?<a href="([^"]+)">.*?"Langu">([^<]+)<.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
#try:
data = get_source(item.url)
for scrapedurl, language, scrapedthumbnail, scrapedtitle, year in matches:
patron = '<article id="post-.*?<a href="([^"]+)">.*?"Langu">([^ ]+) .*?<.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle = scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
matches = re.compile(patron, re.DOTALL).findall(data)
contentTitle = re.sub('\(.*?\)', '', contentTitle)
for scrapedurl, language, scrapedthumbnail, scrapedtitle, year in matches:
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=IDIOMAS[language],
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
url = scrapedurl
if "|" in scrapedtitle:
scrapedtitle = scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
# Paginación
contentTitle = re.sub('\(.*?\)', '', contentTitle)
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
except:
pass
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
language=IDIOMAS[language],
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
#except:
# pass
return itemlist
def search_results(item):
@@ -100,7 +101,7 @@ def search_results(item):
itemlist = []
try:
patron = '<article id="post-.*?<a href="([^"]+)">.*?src="([^"]+)".*?"Langu">([^<]+)<.*?'
patron = '<article id="post-.*?<a href="([^"]+)">.*?src="([^"]+)".*?"Langu">([^ ]+) .*?<.*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.*?date_range">(\d{4})<'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -169,7 +170,7 @@ def findvideos(item):
data_video = get_source(scrapedurl)
opt_data = scrapertools.find_single_match(data, '"%s"><span>.*?</span>.*?<span>([^<]+)</span>' %
option).split('-')
language = opt_data[0].strip()
language = scrapertools.find_single_match(opt_data[0].strip(), '([^ ]+) ')
quality = opt_data[1].strip()
quality = re.sub('Full|HD', '', quality).strip()
if 'rip' in quality.lower():