Merge pull request #564 from Alfa-beto/fixes

Correcciones y Novedades
This commit is contained in:
Alfa
2019-02-27 15:31:56 -05:00
committed by GitHub
7 changed files with 529 additions and 38 deletions

View File

@@ -130,7 +130,7 @@ def anyos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)">([^<]+)</a><br'
patron = '<a href=([^>]+)>([^<]+)</a><br'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -171,8 +171,8 @@ def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li id="menu-item-.*?" class="menu-item menu-item-type-taxonomy menu-item-object-category ' \
'menu-item-.*?"><a href="([^"]+)">([^<]+)<\/a></li>'
patron = '<li id=menu-item-.*? class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?'
patron +='"><a href=([^>]+)>([^<]+)<\/a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
@@ -206,8 +206,8 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href="(.*?)".*?'
patron += 'src="(.*?)".*?title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
patron = '<div class="home_post_cont.*? post_box">.*?<a href=([^>]+)>.*?src=([^ ]+).*?'
patron += 'title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
@@ -232,7 +232,7 @@ def peliculas(item):
))
try:
patron = "<link rel='next' href='([^']+)' />"
patron = "<link rel=next href=([^>]+)>"
next_page = re.compile(patron, re.DOTALL).findall(data)
itemlist.append(Item(channel=item.channel,
action="peliculas",
@@ -298,7 +298,7 @@ def findvideos(item):
lang = 'latino'
data = httptools.downloadpage(item.url).data
patron = 'target="_blank".*? service=".*?" data="(.*?)"><li>(.*?)<\/li>'
patron = 'target=_blank.*? service=.*? data="(.*?)"><li>(.*?)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
server_url = {'YourUpload': 'https://www.yourupload.com/embed/',
@@ -315,7 +315,6 @@ def findvideos(item):
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
video_id = dec(video_cod, dec_value)
logger.debug('server_id %s' % server_id)
if server_id in server_url:
server = server_id.lower()
thumbnail = item.thumbnail

View File

@@ -15,12 +15,10 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'latino': 'Latino'}
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'}
list_quality = CALIDADES.values()
list_servers = ['directo', 'openload']
list_quality = []
list_servers = ['dostream', 'openload']
host = 'http://doomtv.net/'
@@ -28,6 +26,8 @@ host = 'http://doomtv.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
@@ -65,6 +65,8 @@ def mainlist(item):
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -75,7 +77,6 @@ def get_source(url, referer=None):
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
logger.debug(data)
return data
def lista(item):
@@ -98,9 +99,9 @@ def lista(item):
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = 'http:'+scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
url = host+scrapedurl
thumbnail = 'https:'+scrapedthumbnail.strip()
filtro_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
filtro_list = filtro_list.items()
title = scrapedtitle
@@ -144,7 +145,7 @@ def seccion(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = 'http:'+ scrapedurl
url = host+scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
@@ -196,22 +197,36 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
language = 'Latino'
if 'http' not in urls:
urls = 'https:'+urls
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = '%s'
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
title= '%s'+ title,
contentTitle=item.title,
action='play',
language = IDIOMAS[language],
infoLabels = item.infoLabels
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
@@ -223,4 +238,5 @@ def findvideos(item):
contentTitle=item.contentTitle,
))
return itemlist

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://www.dospelis.com/wp-content/uploads/2018/07/dospelislogo.png",
"thumbnail": "https://www.dospelis.net/wp-content/uploads/2019/02/logodospelisamor.png",
"banner": "",
"categories": [
"movie",

View File

@@ -90,11 +90,11 @@ def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host+'/'+item.type)
data = get_source(host+item.type)
if 'Genero' in item.title:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)/i>'
patron = '<liclass="cat-item cat-item-\d+"><ahref=([^ ]+) .*?>(.*?)/i>'
elif 'Año' in item.title:
patron = '<li><a href="(.*?release.*?)">([^<]+)</a>'
patron = '<li><ahref=(.*?release.*?)>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -102,7 +102,7 @@ def section(item):
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
quantity = scrapertools.find_single_match(scrapedtitle,'<i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
@@ -123,9 +123,8 @@ def list_all(item):
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id="post-\d+" class="item movies"><div class="poster">.?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='"quality">([^<]+)</span><\/div>.?<a href="([^"]+)">.*?'
patron +='<\/h3>.?<span>([^"]+)<\/span><\/div>.*?"flags"(.*?)metadata'
patron = '<articleid=post-\d+ class="item movies"><divclass=poster>.?<imgsrc=([^ ]+) alt="([^"]+)">.*?'
patron += 'quality>([^<]+)<.*?<ahref=([^>]+)>.*?<\/h3><span>([^<]+)<.*?flags(.*?)metadata'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -148,8 +147,8 @@ def list_all(item):
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id="post-\d+" class="item tvshows">.?<div class="poster">.?<img src="([^"]+)"'
patron += ' alt="([^"]+)">.*?<a href="([^"]+)">.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
patron = '<articleid=post-\d+ class="item tvshows">.?<divclass=poster>.?<imgsrc=([^ ]+)'
patron += ' alt="([^"]+)">.*?<ahref=([^>]+)>.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
@@ -168,7 +167,7 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
url_next_page = scrapertools.find_single_match(data,'<linkrel=next href=([^>]+)>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -180,7 +179,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='Temporada.?\d+'
patron='title>Temporada.?(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -214,7 +213,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div>.?<div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
patron='class=numerando>%s - (\d+)</div>.?<divclass=episodiotitle>.?<ahref=([^>]+)>([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -236,12 +235,15 @@ def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="option-(\d+)".*?rptss" src="([^"]+)" frameborder'
patron = 'id=option-(\d+).*?src=([^ ]+) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
lang=''
for option, scrapedurl in matches:
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
quality = ''
if 'goo.gl' in scrapedurl:
new_data = httptools.downloadpage(scrapedurl, follow_redirects=False).headers
scrapedurl = new_data['location']
if lang not in IDIOMAS:
lang = 'en'
title = '%s %s'
@@ -291,8 +293,7 @@ def search_results(item):
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" \/>.*?meta.*?'
patron += '"year">([^<]+)<(.*?)<p>([^<]+)<\/p>'
patron = '<article>.*?<ahref=([^>]+)><imgsrc=([^ ]+) alt="([^"]+)">.*?year>([^<]+)<(.*?)<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

View File

@@ -0,0 +1,78 @@
{
"id": "pelix",
"name": "Pelix",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://pelix.tv/build/images/logo.png",
"banner": "",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Castellano",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,352 @@
# -*- coding: utf-8 -*-
# -*- Channel Pelix -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import base64
from channelselector import get_thumb
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from lib import jsunpack
from core.item import Item
from channels import filtertools
from channels import autoplay
from platformcode import config, logger
IDIOMAS = {'6': 'Latino', '7': 'Castellano'}
list_language = IDIOMAS.values()
CALIDADES = {'1': '1080p', '3': '720p', '4':'720p'}
list_quality = CALIDADES.values()
list_servers = [
'openload',
'streamango',
'fastplay',
'rapidvideo',
'netutv'
]
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'pelix')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'pelix')
host = 'https://pelix.tv/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title='Peliculas', action='menu_movies',
thumbnail= get_thumb('movies', auto=True), page=0))
itemlist.append(Item(channel=item.channel, title='Series', url=host+'home/genero/5', action='list_all',
type='tvshows', thumbnail= get_thumb('tvshows', auto=True), page=0))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'movies/headserach', thumbnail=get_thumb("search", auto=True),
extra='movie'))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_movies(item):
logger.info()
itemlist=[]
itemlist.append(Item(channel=item.channel, title='Ultimas', url=host, path='home/newest?show=', action='list_all',
thumbnail=get_thumb('last', auto=True), type='movies', page=0))
#itemlist.append(Item(channel=item.channel, title='Mas Vistas', url=host, path='home/views?show=', action='list_all',
# thumbnail=get_thumb('all', auto=True), type='movies', page=0))
itemlist.append(Item(channel=item.channel, title='Genero', action='section',
thumbnail=get_thumb('genres', auto=True), type='movies'))
itemlist.append(Item(channel=item.channel, title='Por Año', action='section',
thumbnail=get_thumb('year', auto=True), type='movies'))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def get_language(lang_data):
logger.info()
language = []
lang_list = scrapertools.find_multiple_matches(lang_data, '/flags/(.*?).png\)')
for lang in lang_list:
if lang == 'en':
lang = 'vose'
if lang not in language:
language.append(lang)
return language
def section(item):
logger.info()
itemlist=[]
data = get_source(host)
if 'Genero' in item.title:
data = scrapertools.find_single_match(data, '<a href="#">Género</a>(.*?)</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, '<a href="#">Año</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel=item.channel, url=scrapedurl, title=scrapedtitle, action='list_all',
type=item.type, page=0))
return itemlist
def list_all(item):
logger.info()
import urllib
itemlist = []
if item.page == 0:
data = get_source(item.url+item.path)
else:
post = {'page': str(item.page)}
post = urllib.urlencode(post)
data = httptools.downloadpage(host+'home/%sAjax/%s' % ('newest', str(item.page)), post=post).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div class="base-used">.*?<a href="([^"]+)">.*?<img class="img-thumbnail" src="([^"]+)".*?'
patron += '<h2>([^<]+)</h2><p class="year">(\d{4})</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
title = '%s [%s]' % (scrapedtitle, year)
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
url = scrapedurl
new_item= Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year})
if item.type == 'movies':
new_item.action = 'findvideos'
new_item.contentTitle = contentTitle
else:
new_item.action = 'seasons'
new_item.contentSerieName = contentTitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
next_page = item.page + 30
itemlist.append(item.clone(title="Siguiente >>", url=item.url, action='list_all', page=next_page, path=item.path))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='data-type="host">(Temporada \d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
if matches is None:
return findvideos(item)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
duplicados = []
data=get_source(item.url)
patron='data-id="(\d+)" season="%s" id_lang="(\d+)" id_movies_types="\d".*?' \
'block;">([^<]+)</a>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, lang, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = item.url
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
if scrapedepisode not in duplicados:
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
duplicados.append(scrapedepisode)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
if 'episode="0" season="0"' not in data and item.contentType != 'episode':
item.contentSerieName = item.contentTitle
item.contentTitle = None
item.contentType = None
item.infoLabels = None
tmdb.set_infoLabels_item(item, seekTmdb=True)
return seasons(item)
if 'episode="0" season="0"' not in data:
season = item.infoLabels['season']
episode = item.infoLabels['episode']
else:
season = '0'
episode = '0'
patron = '<span class="movie-online-list" id_movies_types="(\d)".*?'
patron += 'episode="%s" season="%s" id_lang="([^"]+)".*?online-link="([^"]+)" link-id="\d+">' % (episode, season)
matches = re.compile(patron, re.DOTALL).findall(data)
for quality_value, lang_value, scrapedurl in matches:
if lang_value not in IDIOMAS:
lang_value = '6'
if quality_value not in CALIDADES:
quality_value = '3'
language = IDIOMAS[lang_value]
quality = CALIDADES[quality_value]
if not config.get_setting("unify"):
title = ' [%s] [%s]' % (quality, language)
else:
title = ''
itemlist.append(Item(channel=item.channel, url=scrapedurl, title='%s'+title, action='play',
language=language, quality=quality, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
itemlist = sorted(itemlist, key=lambda it: it.language)
if item.contentType != 'episode':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
post = 'search=%s' % texto
item.post = post
item.url = item.url
if texto != '':
return search_results(item)
else:
return []
def search_results(item):
logger.info()
itemlist=[]
headers = {'Referer': host, 'X-Requested-With': 'XMLHttpRequest'}
data = httptools.downloadpage(item.url, headers=headers, post=item.post).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'class="results\d+".*?<a href="([^"]+)"><img src="([^"]+)".*?#\w+">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle in matches:
if '(' in scrapedtitle:
title = scrapertools.find_single_match(scrapedtitle, '(.*?)\(').strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
else:
title = scrapedtitle
year = '-'
url = scrapedurl
thumbnail = scrapedthumb
new_item=Item(channel=item.channel, title=title, url=url, thumbnail=thumbnail,
action='findvideos', infoLabels={'year':year})
itemlist.append(new_item)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
item.type = 'movies'
item.page = 0
if categoria in ['peliculas']:
item.url = host + 'home/newest?show='
elif categoria == 'infantiles':
item.url = host + 'home/genero/54'
elif categoria == 'terror':
item.url = host + 'home/genero/49'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -114,6 +114,49 @@ def list_all(item):
))
return itemlist
def list_from_genre(item):
logger.info()
itemlist = []
data = get_source(item.url)
contentSerieName = ''
patron = '<div style="float.*?<a href="([^"]+)">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapertools.find_single_match(scrapedurl, 'https://seriesblanco.org/capitulos/([^/]+)/')
title = title.replace('-', ' ').capitalize()
itemlist.append(Item(channel=item.channel,
action='seasons',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title,
context=filtertools.context(item, list_language, list_quality),
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# #Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="Next')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_from_genre",
title='Siguiente >>>',
url=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
))
return itemlist
def section(item):
logger.info()
@@ -121,8 +164,10 @@ def section(item):
data = get_source(item.url)
if item.title == 'Generos':
patron = '<li><a href="([^ ]+)"><i class="fa fa-bookmark-o"></i> ([^<]+)</a></li>'
action = 'list_from_genre'
elif item.title == 'A - Z':
patron = '<a dir="ltr" href="([^"]+)" class="label label-primary">([^<]+)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -130,7 +175,7 @@ def section(item):
url = scrapedurl
title = scrapedtitle
itemlist.append(Item(channel=item.channel,
action='list_all',
action=action,
title=title,
url=url
))