From 4d13c23a7e597c291ef5904124e70a9df7702328 Mon Sep 17 00:00:00 2001
From: pipcat
Date: Sat, 13 Oct 2018 10:37:19 +0200
Subject: [PATCH 01/19] Evitar llamadas a tmdb con todo el itemlist en los
findvideos
---
plugin.video.alfa/channels/peliculasdk.py | 4 ++--
plugin.video.alfa/channels/pepecine.py | 3 ++-
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/plugin.video.alfa/channels/peliculasdk.py b/plugin.video.alfa/channels/peliculasdk.py
index 689123b5..59be39bd 100644
--- a/plugin.video.alfa/channels/peliculasdk.py
+++ b/plugin.video.alfa/channels/peliculasdk.py
@@ -194,6 +194,8 @@ def peliculas(item):
def findvideos(item):
logger.info()
itemlist = []
+
+ tmdb.set_infoLabels(item, True) # para refrescar infolabels y obtener más datos en "segunda pasada"
data = httptools.downloadpage(item.url).data
@@ -251,8 +253,6 @@ def findvideos(item):
language=idioma, quality=calidad))
break
- tmdb.set_infoLabels(itemlist)
-
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if __comprueba_enlaces__:
diff --git a/plugin.video.alfa/channels/pepecine.py b/plugin.video.alfa/channels/pepecine.py
index 8e23b3e4..90f0f8bb 100644
--- a/plugin.video.alfa/channels/pepecine.py
+++ b/plugin.video.alfa/channels/pepecine.py
@@ -331,6 +331,8 @@ def seasons_episodes(item):
def findvideos(item):
logger.info()
itemlist=[]
+
+ tmdb.set_infoLabels(item, True) # para refrescar infolabels y obtener más datos en "segunda pasada"
if item.extra != "links_encoded":
data = httptools.downloadpage(item.url).data
@@ -366,7 +368,6 @@ def findvideos(item):
for videoitem in itemlist:
videoitem.title = '%s [%s] [%s]' % (videoitem.server.capitalize(), videoitem.language, videoitem.quality)
- tmdb.set_infoLabels(itemlist)
if itemlist and not item.show:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
From 9aca1489a8f720aeb43a4e753f41ae55c3c5af28 Mon Sep 17 00:00:00 2001
From: William Lopez
Date: Sat, 13 Oct 2018 15:08:19 -0400
Subject: [PATCH 02/19] agregadas secciones de generos y calidades
---
plugin.video.alfa/channels/yts.py | 92 +++++++++++++++++++++----------
1 file changed, 62 insertions(+), 30 deletions(-)
diff --git a/plugin.video.alfa/channels/yts.py b/plugin.video.alfa/channels/yts.py
index 183629ff..e3b5c1b5 100644
--- a/plugin.video.alfa/channels/yts.py
+++ b/plugin.video.alfa/channels/yts.py
@@ -8,52 +8,91 @@ from core.item import Item
from lib import generictools
from platformcode import logger
+URL_BROWSE = "https://yts.am/browse-movies"
+URL = "https://yts.am"
+
def mainlist(item):
logger.info()
itemlist = []
+
itemlist.append(Item(channel = item.channel,
- title = "Browse",
+ title = "Explorar por generos",
+ action = "categories",
+ opt = 'genre',
+ url = URL_BROWSE
+ ))
+
+ itemlist.append(Item(channel = item.channel,
+ title = "Explorar por calidad",
+ action = "categories",
+ opt = 'quality',
+ url = URL_BROWSE
+ ))
+
+ itemlist.append(Item(channel = item.channel,
+ title = "Explorar películas",
action = "movies",
opt = 0,
- url = "https://yts.am/browse-movies"
+ url = URL_BROWSE
))
itemlist.append(Item(channel = item.channel,
- title = "Popular",
+ title = "Más populares",
action = "movies",
opt = 1,
- url = "https://yts.am" ))
+ url = URL ))
itemlist.append(Item(channel = item.channel,
- title = "Search",
+ title = "Buscar",
action = "search",
opt = 0,
- url = "https://yts.am/browse-movies"
+ url = URL_BROWSE
))
return itemlist
+
+def categories(item):
+ logger.info()
+ itemList = []
+ data = httptools.downloadpage(item.url).data
+
+ block = scrapertools.find_single_match( data, '(?s)<.*?="' + item.opt + '">(.*?)')
+ pattern = ''
+ categories = scrapertools.find_multiple_matches( block, pattern )
+
+ for category in categories:
+ url = URL_BROWSE + '/0/all/' + category + '/0/latest' if item.opt == "genre" else URL_BROWSE + '/0/' + category + '/all/0/latest'
+
+ itemList.append( Item( action = "movies",
+ channel = item.channel,
+ title = category,
+ url = url ))
+
+ return itemList
+
def movies(item):
logger.info()
itemlist = []
infoLabels = {}
data = httptools.downloadpage(item.url).data
- patron = '(?s)class="browse-movie-wrap.*?a href="([^"]+).*?' #Movie link
- patron += 'img class.*?src="([^"]+).*?' #Image
- patron += 'movie-title">.*?([^<]+)' #Movie title
- patron += '.*?year">(.*?)<' #Year
+ pattern = '(?s)class="browse-movie-wrap.*?a href="([^"]+).*?' #Movie link
+ pattern += 'img class.*?src="([^"]+).*?' #Image
+ pattern += 'movie-title">.*?([^<]+)' #Movie title
+ pattern += '.*?year">(.*?)<' #Year
- matches = scrapertools.find_multiple_matches(data, patron)
+ matches = scrapertools.find_multiple_matches(data, pattern)
idx = 0
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
if item.opt == 1:
- scrapedthumbnail = 'https://yts.am' + scrapedthumbnail
- infoLabels['plot'] = findplot(scrapedurl)
+ scrapedthumbnail = URL + scrapedthumbnail
+ infoLabels['year'] = year
itemlist.append(Item(action = "findvideo",
channel = item.channel,
+ contentTitle = scrapedtitle,
infoLabels = infoLabels,
title = scrapedtitle + ' (' + year + ')',
thumbnail = scrapedthumbnail,
@@ -64,35 +103,28 @@ def movies(item):
break
if itemlist != []:
actual_page = item.url
- pattern = '(?s)href="([^"]+)">Next.*?'
- next_page = scrapertools.find_single_match(data, pattern)
+ nextPattern = '(?s)href="([^"]+)">Next.*?'
+ next_page = scrapertools.find_single_match(data, nextPattern)
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="movies",
title='Next >>>',
- url='https://yts.am' + next_page))
+ url=URL + next_page))
+
+ tmdb.set_infoLabels_itemlist( itemlist, seekTmdb=True)
return itemlist
-def findplot(url):
- data = httptools.downloadpage(url).data
-
- pattern = '(?s)(.*?)
' #Synopsis
-
- plot = scrapertools.find_single_match(data, pattern)
-
- return plot
-
def findvideo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
- patron = '(?s)modal-quality.*?(.*?)' #Quality
- patron += '.*?size">(.*?)
' #Type
- patron += '.*?href="([^"]+)" rel' #Torrent link
+ pattern = '(?s)modal-quality.*?(.*?)' #Quality
+ pattern += '.*?size">(.*?)' #Type
+ pattern += '.*?href="([^"]+)" rel' #Torrent link
- matches = scrapertools.find_multiple_matches(data, patron)
+ matches = scrapertools.find_multiple_matches(data, pattern)
for quality, videoType, link in matches:
@@ -111,7 +143,7 @@ def search(item, text):
logger.info('search: ' + text)
try:
- item.url = 'https://yts.am/browse-movies/' + text + '/all/all/0/latest'
+ item.url = URL_BROWSE + text + '/all/all/0/latest'
itemlist = movies(item)
return itemlist
From 1d1992bed920c1d0c85269b124f144faac667bbc Mon Sep 17 00:00:00 2001
From: pipcat
Date: Wed, 17 Oct 2018 10:34:29 +0200
Subject: [PATCH 03/19] Corregidos errores de traducciones
---
plugin.video.alfa/channels/tvmoviedb.py | 2 +-
.../resources/language/Spanish/strings.po | 12 ++++++++++--
2 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/plugin.video.alfa/channels/tvmoviedb.py b/plugin.video.alfa/channels/tvmoviedb.py
index 2eabf442..8271e21e 100644
--- a/plugin.video.alfa/channels/tvmoviedb.py
+++ b/plugin.video.alfa/channels/tvmoviedb.py
@@ -1496,7 +1496,7 @@ def detalles_fa(item):
if item.contentType == "tvshow" and ob_tmdb.result:
itemlist.append(item.clone(action="info_seasons", text_color=color4,
- title=config.get_localized_string(7007) % item.infoLabels["number_of_seasons"]))
+ title=config.get_localized_string(70067) % item.infoLabels["number_of_seasons"]))
if ob_tmdb.result:
itemlist.append(item.clone(action="reparto", title=config.get_localized_string(70071), text_color=color4,
infoLabels={'tmdb_id': item.infoLabels['tmdb_id'],
diff --git a/plugin.video.alfa/resources/language/Spanish/strings.po b/plugin.video.alfa/resources/language/Spanish/strings.po
index 2fd0cc89..a2f26113 100644
--- a/plugin.video.alfa/resources/language/Spanish/strings.po
+++ b/plugin.video.alfa/resources/language/Spanish/strings.po
@@ -3358,8 +3358,16 @@ msgid "Press to 'Clear cache' saved"
msgstr "Pulse para 'Borrar caché' guardada"
msgctxt "#70164"
-msgid "Free First|Premium First|Debriders First"
-msgstr "Free primero|Premium primero|Debriders primero"
+msgid "Free First"
+msgstr "Free primero"
+
+msgctxt "#70165"
+msgid "Premium First"
+msgstr "Premium primero"
+
+msgctxt "#70166"
+msgid "Debriders First"
+msgstr "Debriders primero"
msgctxt "#70167"
msgid "Titles Options"
From 19e9107efedffdf3b721c264b9cfc2d5705bbfef Mon Sep 17 00:00:00 2001
From: pipcat
Date: Wed, 17 Oct 2018 12:53:11 +0200
Subject: [PATCH 04/19] Otros errores de traducciones
---
plugin.video.alfa/channels/tvmoviedb.py | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/plugin.video.alfa/channels/tvmoviedb.py b/plugin.video.alfa/channels/tvmoviedb.py
index 8271e21e..c7244a80 100644
--- a/plugin.video.alfa/channels/tvmoviedb.py
+++ b/plugin.video.alfa/channels/tvmoviedb.py
@@ -2457,7 +2457,7 @@ def detalles_mal(item):
# Opción para ver la info de personajes y dobladores/equipo de rodaje
if not "No characters or voice actors" in data and not "No staff for this anime" in data:
- itemlist.append(item.clone(action="staff_mal", title=onfig.get_localized_string(70354), text_color=color2,
+ itemlist.append(item.clone(action="staff_mal", title=config.get_localized_string(70354), text_color=color2,
url=item.url + "/characters"))
if config.is_xbmc():
item.contextual = True
@@ -2514,7 +2514,7 @@ def detalles_mal(item):
for url, title in matches:
new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan,
thumbnail="")
- new_item.title = onfig.get_localized_string(70355) % title
+ new_item.title = config.get_localized_string(70355) % title
new_item.contentTitle = title
new_item.url = "https://myanimelist.net%s" % url
itemlist.append(new_item)
@@ -2525,7 +2525,7 @@ def detalles_mal(item):
for url, title in matches:
new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan,
thumbnail="")
- new_item.title = onfig.get_localized_string(70356) % title
+ new_item.title = config.get_localized_string(70356) % title
new_item.contentTitle = title
new_item.url = "https://myanimelist.net%s" % url
itemlist.append(new_item)
@@ -2536,7 +2536,7 @@ def detalles_mal(item):
for url, title in matches:
new_item = item.clone(infoLabels={'mediatype': item.contentType}, extra="", fanart=default_fan,
thumbnail="")
- new_item.title = onfig.get_localized_string(70357) % title
+ new_item.title = config.get_localized_string(70357) % title
new_item.contentTitle = title
new_item.url = "https://myanimelist.net%s" % url
itemlist.append(new_item)
@@ -2556,12 +2556,12 @@ def detalles_mal(item):
itemlist.append(new_item)
itemlist.append(
- item.clone(title=onfig.get_localized_string(70358), action="listado_tmdb", infoLabels={'mediatype': item.contentType},
+ item.clone(title=config.get_localized_string(70358), action="listado_tmdb", infoLabels={'mediatype': item.contentType},
search={'url': '%s/%s/recommendations' % (item.extra, item.infoLabels['tmdb_id']),
'language': langt, 'page': 1}, text_color=color2))
# Recomendaciones myanimelist y búsqueda de info en anidb (fansubs en español)
- itemlist.append(item.clone(title=onfig.get_localized_string(70359), action="reco_mal"))
+ itemlist.append(item.clone(title=config.get_localized_string(70359), action="reco_mal"))
anidb_link = scrapertools.find_single_match(data,
'More')
if next_page:
- itemlist.append(item.clone(title=onfig.get_localized_string(70361), url=next_page, text_color=""))
+ itemlist.append(item.clone(title=config.get_localized_string(70361), url=next_page, text_color=""))
if itemlist:
itemlist.insert(0, item.clone(title=config.get_localized_string(70362), action="", text_color=color3))
@@ -3142,7 +3142,7 @@ def login_mal(from_list=False):
if not re.search(r'(?i)' + user, response.data):
logger.error("Error en el login")
- return False, onfig.get_localized_string(70330), user
+ return False, config.get_localized_string(70330), user
else:
if generic:
return False, config.get_localized_string(70381), user
From 57e99b1db64c57b6db1fbacbeef95bb4055040cc Mon Sep 17 00:00:00 2001
From: Kingbox <37674310+lopezvg@users.noreply.github.com>
Date: Wed, 17 Oct 2018 19:04:58 +0200
Subject: [PATCH 05/19] DocumaniaTV: borrado del canal con imcompatilibilidad
con la web
---
plugin.video.alfa/channels/documaniatv.json | 79 --
plugin.video.alfa/channels/documaniatv.py | 792 --------------------
2 files changed, 871 deletions(-)
delete mode 100644 plugin.video.alfa/channels/documaniatv.json
delete mode 100644 plugin.video.alfa/channels/documaniatv.py
diff --git a/plugin.video.alfa/channels/documaniatv.json b/plugin.video.alfa/channels/documaniatv.json
deleted file mode 100644
index 3cdda230..00000000
--- a/plugin.video.alfa/channels/documaniatv.json
+++ /dev/null
@@ -1,79 +0,0 @@
-{
- "id": "documaniatv",
- "name": "DocumaniaTV",
- "active": true,
- "adult": false,
- "language": ["cast"],
- "banner": "",
- "thumbnail": "https://www.documaniatv.com/uploads/xcustom-logo.png.pagespeed.ic.lxJKR_lQE9.webp",
- "version": 1,
- "categories": [
- "documentary",
- "vos",
- "direct",
- "torrent"
- ],
- "settings": [
- {
- "id": "include_in_global_search",
- "type": "bool",
- "label": "Incluir en busqueda global",
- "default": true,
- "enabled": true,
- "visible": true
- },
- {
- "id": "modo_grafico",
- "type": "bool",
- "label": "Buscar información extra",
- "default": true,
- "enabled": true,
- "visible": true
- },
- {
- "id": "filter_languages",
- "type": "list",
- "label": "Mostrar enlaces en idioma...",
- "default": 0,
- "enabled": true,
- "visible": true,
- "lvalues": [
- "No filtrar",
- "CAST",
- "LAT",
- "VO",
- "VOS",
- "VOSE"
- ]
- },
- {
- "id": "timeout_downloadpage",
- "type": "list",
- "label": "Timeout (segs.) en descarga de páginas o verificación de servidores",
- "default": 5,
- "enabled": true,
- "visible": true,
- "lvalues": [
- "None",
- "1",
- "2",
- "3",
- "4",
- "5",
- "6",
- "7",
- "8",
- "9",
- "10"
- ]
- },
- {
- "id": "include_in_newest_documentales",
- "type": "bool",
- "label": "Incluir en Novedades - Documentales",
- "default": true,
- "enabled": true,
- "visible": true
- }
- ]
-}
diff --git a/plugin.video.alfa/channels/documaniatv.py b/plugin.video.alfa/channels/documaniatv.py
deleted file mode 100644
index 05603135..00000000
--- a/plugin.video.alfa/channels/documaniatv.py
+++ /dev/null
@@ -1,792 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import re
-import sys
-import urllib
-import urlparse
-import time
-
-from channelselector import get_thumb
-from core import httptools
-from core import scrapertools
-from core import servertools
-from core.item import Item
-from platformcode import config, logger
-from core import tmdb
-from lib import generictools
-from channels import filtertools
-from channels import autoplay
-
-
-#IDIOMAS = {'CAST': 'Castellano', 'LAT': 'Latino', 'VO': 'Version Original'}
-IDIOMAS = {'Castellano': 'CAST', 'Latino': 'LAT', 'Version Original': 'VO'}
-list_language = IDIOMAS.values()
-list_quality = []
-list_servers = ['directo']
-
-
-host = 'https://www.documaniatv.com/'
-channel = "documaniatv"
-
-categoria = channel.capitalize()
-__modo_grafico__ = config.get_setting('modo_grafico', channel)
-timeout = config.get_setting('timeout_downloadpage', channel)
-
-
-def mainlist(item):
- logger.info()
- itemlist = []
-
- thumb_docus = get_thumb("channels_documentary.png")
- thumb_series = get_thumb("channels_tvshow.png")
- thumb_buscar = get_thumb("search.png")
- thumb_separador = get_thumb("next.png")
- thumb_settings = get_thumb("setting_0.png")
- thumb_cartelera = get_thumb("now_playing.png")
- thumb_pelis_vos = get_thumb("channels_vos.png")
- thumb_popular = get_thumb("popular.png")
- thumb_generos = get_thumb("genres.png")
-
- autoplay.init(item.channel, list_servers, list_quality)
-
- itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=host + "newvideos.html", thumbnail=thumb_docus, extra="novedades"))
- itemlist.append(Item(channel=item.channel, title="Los Más Vistos", action="listado", url=host + "topvideos.html", thumbnail=thumb_popular, extra="populares"))
- itemlist.append(Item(channel=item.channel, title="Por Géneros", action="categorias", url=host + "categorias-y-canales.html", thumbnail=thumb_generos, extra="categorias"))
- itemlist.append(Item(channel=item.channel, title="Series", action="listado", url=host + "top-series-documentales.html", thumbnail=thumb_series, extra="series"))
-
- itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host + "search.php?keywords=", thumbnail=thumb_buscar, extra="search"))
-
- itemlist.append(Item(channel=item.channel, url=host, title="[COLOR yellow]Configuración:[/COLOR]", folder=False, thumbnail=thumb_separador))
-
- itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal", thumbnail=thumb_settings))
-
- autoplay.show_option(item.channel, itemlist) #Activamos Autoplay
-
- return itemlist
-
-def configuracion(item):
- from platformcode import platformtools
- ret = platformtools.show_channel_settings()
- platformtools.itemlist_refresh()
- return
-
-
-def categorias(item):
- logger.info()
-
- itemlist = []
-
- data = ''
- try:
- data = re.sub(r"\n|\r|\t|\s{2}|()", "", httptools.downloadpage(item.url, timeout=timeout).data)
- data = unicode(data, "utf-8", errors="replace").encode("utf-8")
- except:
- pass
-
- patron = ''
- #Verificamos si se ha cargado una página, y si además tiene la estructura correcta
- if not data or not scrapertools.find_single_match(data, patron):
- item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
- if item.intervencion: #Sí ha sido clausurada judicialmente
- for clone_inter, autoridad in item.intervencion:
- thumb_intervenido = get_thumb(autoridad)
- itemlist.append(item.clone(action='', title="[COLOR yellow]" + clone_inter.capitalize() + ': [/COLOR]' + intervenido_judicial + '. Reportar el problema en el foro', thumbnail=thumb_intervenido))
- return itemlist #Salimos
-
- logger.error("ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL: " + item.url + data)
- if not data: #Si no ha logrado encontrar nada, salimos
- itemlist.append(item.clone(action='', title=item.category + ': ERROR 01: SUBMENU: La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
- return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
-
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- if not matches:
- logger.error("ERROR 02: SUBMENU: Ha cambiado la estructura de la Web " + " / PATRON: " + patron + " / DATA: " + data)
- itemlist.append(item.clone(action='', title=item.category + ': ERROR 02: SUBMENU: Ha cambiado la estructura de la Web. Reportar el error con el log'))
- return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
-
- #logger.debug(matches)
-
- for scrapedurl, scrapedtitle in matches:
- if 'series documentales' in scrapedtitle.lower():
- continue
- itemlist.append(item.clone(action="listado", title=scrapedtitle.capitalize().strip(), url=scrapedurl))
-
- return itemlist
-
-
-def listado(item):
- logger.info()
- itemlist = []
- item.category = categoria
-
- #logger.debug(item)
-
- curr_page = 1 # Página inicial Web
- curr_page_foot = 1 # Página inicial Alfa
- last_page = 99999 # Última página inicial
- last_page_foot = 1 # Última página inicial
- cnt_tot = 40 # Poner el num. máximo de items por página
- cnt_title = 0 # Contador de líneas insertadas en Itemlist
- cnt_title_tot = 0 # Contador de líneas insertadas en Itemlist, total
- if item.curr_page:
- curr_page = int(item.curr_page) # Si viene de una pasada anterior, lo usamos
- del item.curr_page # ... y lo borramos
- if item.curr_page_foot:
- curr_page_foot = int(item.curr_page_foot) # Si viene de una pasada anterior, lo usamos
- del item.curr_page_foot # ... y lo borramos
- if item.last_page:
- last_page = int(item.last_page) # Si viene de una pasada anterior, lo usamos
- del item.last_page # ... y lo borramos
- if item.last_page_foot:
- last_page_foot = int(item.last_page_foot) # Si viene de una pasada anterior, lo usamos
- del item.last_page_foot # ... y lo borramos
- if item.cnt_tot:
- cnt_tot = int(item.cnt_tot) # Si viene de una pasada anterior, lo usamos
- del item.cnt_tot # ... y lo borramos
- if item.cnt_title_tot:
- cnt_title_tot = int(item.cnt_title_tot) # Si viene de una pasada anterior, lo usamos
- del item.cnt_title_tot # ... y lo borramos
-
- inicio = time.time() # Controlaremos que el proceso no exceda de un tiempo razonable
- fin = inicio + 10 # Después de este tiempo pintamos (segundos)
- timeout_search = timeout # Timeout para descargas
- if item.extra == 'search':
- timeout_search = timeout * 2 # Timeout un poco más largo para las búsquedas
- if timeout_search < 5:
- timeout_search = 5 # Timeout un poco más largo para las búsquedas
-
- if not item.extra2: # Si viene de Catálogo o de Alfabeto
- item.extra2 = ''
-
- next_page_url = item.url
- #Máximo num. de líneas permitidas por TMDB. Máx de 10 segundos por Itemlist para no degradar el rendimiento
- while cnt_title < cnt_tot and curr_page <= last_page and fin > time.time():
-
- # Descarga la página
- data = ''
- try:
- data = re.sub(r"\n|\r|\t|\s{2}|()| ", "", httptools.downloadpage(next_page_url, timeout=timeout_search).data)
- data = unicode(data, "utf-8", errors="replace").encode("utf-8")
- except:
- pass
-
- if not data: #Si la web está caída salimos sin dar error
- logger.error("ERROR 01: LISTADO: La Web no responde o ha cambiado de URL: " + item.url + " / DATA: " + data)
- itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 01: LISTADO:. La Web no responde o ha cambiado de URL. Si la Web está activa, reportar el error con el log'))
- break #si no hay más datos, algo no funciona, pintamos lo que tenemos
-
- #Patrón para todo, menos para Series
- patron = '(.*?)<\/span>.*?.*?data-echo="([^"]+)"'
-
- #Si viene de Series, ponemos un patrón especializado
- if item.extra == 'series':
- patron = '(?:(.*?)<\/span>.*?)?.*?
cnt_title_tot and cnt_title_tot > 0:
- matches = matches[cnt_title_tot:]
-
- #logger.debug("PATRON: " + patron)
- #logger.debug(matches)
- #logger.debug(data)
-
- #Buscamos la url de paginado y la última página
- data_page = scrapertools.find_single_match(data, '