From 9e1c190c0b130ad12e60b1ba6fe4b3ebdba373dc Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Tue, 2 Jan 2018 11:58:40 -0500
Subject: [PATCH 01/10] =?UTF-8?q?ciberpeliculashd:=20agregado=20secci?=
=?UTF-8?q?=C3=B3n=20series?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../channels/ciberpeliculashd.py | 150 ++++++++++++++++--
1 file changed, 135 insertions(+), 15 deletions(-)
diff --git a/plugin.video.alfa/channels/ciberpeliculashd.py b/plugin.video.alfa/channels/ciberpeliculashd.py
index 37c9439c..3562697f 100644
--- a/plugin.video.alfa/channels/ciberpeliculashd.py
+++ b/plugin.video.alfa/channels/ciberpeliculashd.py
@@ -20,14 +20,125 @@ except:
def mainlist(item):
logger.info()
itemlist = []
- itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host + "/?peli=1"))
- itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, extra = "categories" ))
- itemlist.append(Item(channel = item.channel, title = "Por calidad", action = "filtro", url = host, extra = "qualitys"))
- itemlist.append(Item(channel = item.channel, title = "Por idioma", action = "filtro", url = host, extra = "languages"))
+ itemlist.append(Item(channel = item.channel, title = "Películas", text_bold = True, folder = False))
+ itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host + "/?peli=1"))
+ itemlist.append(Item(channel = item.channel, title = " Por género", action = "filtro", url = host, extra = "categories" ))
+ itemlist.append(Item(channel = item.channel, title = " Por calidad", action = "filtro", url = host, extra = "qualitys"))
+ itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host, extra = "languages"))
+ itemlist.append(Item(channel = item.channel, title = ""))
+ itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
+ itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series", url = host + "/series/?peli=1"))
+ itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos", url = host + "/series/?peli=1"))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s="))
return itemlist
+def nuevos_capitulos(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ patron = 'class="episode" href="([^"]+).*?'
+ patron += 'src="([^"]+).*?'
+ patron += 'title="([^"]+).*?'
+ patron += '-->([^<]+).*?'
+ patron += 'created_at">([^<]+)'
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
+ scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
+ itemlist.append(Item(action = "findvideos",
+ channel = item.channel,
+ title = scrapedtitle,
+ thumbnail = scrapedthumbnail,
+ url = scrapedurl
+ ))
+ return itemlist
+
+def series(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ bloque = scrapertools.find_single_match(data, 'loop-posts series.*?panel-pagination pagination-bottom')
+ patron = 'a href="([^"]+).*?'
+ patron += '((?:http|https)://image.tmdb.org[^"]+).*?'
+ patron += 'title="([^"]+)'
+ matches = scrapertools.find_multiple_matches(bloque, patron)
+ for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
+ itemlist.append(Item(action = "temporadas",
+ channel = item.channel,
+ thumbnail = scrapedthumbnail,
+ title = scrapedtitle,
+ contentSerieName = scrapedtitle,
+ url = scrapedurl
+ ))
+ if itemlist:
+ tmdb.set_infoLabels(itemlist)
+ page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
+ next_page = scrapertools.find_single_match(item.url,".*?peli=")
+ next_page += "%s" %page
+ itemlist.append(Item(action = "series",
+ channel = item.channel,
+ title = "Página siguiente",
+ url = next_page
+ ))
+ return itemlist
+
+
+def temporadas(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?')
+ matches = scrapertools.find_multiple_matches(bloque, ' (.*?[0-9]+)')
+ for scrapedtitle in matches:
+ season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
+ item.infoLabels["season"] = season
+ url = item.url + "?temporada=%s" %season
+ itemlist.append(item.clone(action = "capitulos",
+ title = scrapedtitle,
+ url = url
+ ))
+ tmdb.set_infoLabels(itemlist)
+ if config.get_videolibrary_support():
+ itemlist.append(Item(channel=item.channel, title =""))
+ itemlist.append(item.clone(action = "add_serie_to_library",
+ channel = item.channel,
+ extra = "episodios",
+ title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
+ url = item.url
+ ))
+ return itemlist
+
+
+def episodios(item):
+ logger.info()
+ itemlist = []
+ templist = temporadas(item)
+ for tempitem in templist:
+ itemlist += capitulos(tempitem)
+ return itemlist
+
+
+def capitulos(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ patron = '
(.*?)'
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for scrapedurl, scrapedtitle in matches:
+ scrapedtitle = scrapedtitle.replace("", "")
+ episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
+ scrapedtitle = scrapedtitle.split(":")[1]
+ scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
+ item.infoLabels["episode"] = episode
+ itemlist.append(item.clone(action = "findvideos",
+ title = scrapedtitle,
+ url = scrapedurl
+ ))
+ tmdb.set_infoLabels(itemlist)
+ return itemlist
+
+
def newest(categoria):
logger.info()
itemlist = []
@@ -83,6 +194,7 @@ def filtro(item):
def peliculas(item):
logger.info()
itemlist = []
+ infoLabels = dict()
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
patron = 'a href="([^"]+)".*?'
@@ -98,23 +210,31 @@ def peliculas(item):
else:
year = 0
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
- itemlist.append(Item(action = "findvideos",
+ if "serie" in scrapedurl:
+ action = "temporadas"
+ infoLabels ['tvshowtitle'] = scrapedtitle
+ else:
+ action = "findvideos"
+ infoLabels ['tvshowtitle'] = ""
+ infoLabels ['year'] = year
+ itemlist.append(Item(action = action,
channel = item.channel,
fulltitle = fulltitle,
thumbnail = scrapedthumbnail,
- infoLabels = {'year': year},
+ infoLabels = infoLabels,
title = scrapedtitle,
url = scrapedurl
))
- tmdb.set_infoLabels(itemlist)
- page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
- next_page = scrapertools.find_single_match(item.url,".*?peli=")
- next_page += "%s" %page
- itemlist.append(Item(action = "peliculas",
- channel = item.channel,
- title = "Página siguiente",
- url = next_page
- ))
+ if itemlist:
+ tmdb.set_infoLabels(itemlist)
+ page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
+ next_page = scrapertools.find_single_match(item.url,".*?peli=")
+ next_page += "%s" %page
+ itemlist.append(Item(action = "peliculas",
+ channel = item.channel,
+ title = "Página siguiente",
+ url = next_page
+ ))
return itemlist
From 738fb50ce9371c02e2345e312c6632b46003d0f3 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Fri, 5 Jan 2018 16:39:46 -0500
Subject: [PATCH 02/10] userscloud: actualido test_video_exists
---
plugin.video.alfa/servers/userscloud.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugin.video.alfa/servers/userscloud.py b/plugin.video.alfa/servers/userscloud.py
index a2dfc668..58a572b6 100755
--- a/plugin.video.alfa/servers/userscloud.py
+++ b/plugin.video.alfa/servers/userscloud.py
@@ -11,7 +11,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
- if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data:
+ if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
return False, "[Userscloud] El fichero no existe o ha sido borrado"
return True, ""
From 4a0f1b5c413e603ba2b1ee1afb3ea9c82d6b753d Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 8 Jan 2018 08:30:12 -0500
Subject: [PATCH 03/10] doomtv: fix
---
plugin.video.alfa/channels/doomtv.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py
index f906175d..a5f1bd9c 100644
--- a/plugin.video.alfa/channels/doomtv.py
+++ b/plugin.video.alfa/channels/doomtv.py
@@ -222,11 +222,14 @@ def newest(categoria):
def findvideos(item):
logger.info()
itemlist = []
- #itemlist = get_url(item)
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t| | |\s{2,}', "", data)
- url_m3u8 = scrapertools.find_single_match(data, '')
+ player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)')
+ data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data
+ data_m3u8 = re.sub(r'"|\n|\r|\t| | |\s{2,}', "", data_m3u8)
+ url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),')
itemlist.append(item.clone(url=url_m3u8, action='play'))
+
patron = 'id=(tab\d+)><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)
From 02abbfcc646c2fba7bf24c54c173dd34bed563db Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 15 Jan 2018 16:02:31 -0500
Subject: [PATCH 04/10] httptools: updated
---
plugin.video.alfa/core/httptools.py | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py
index dfb404b0..82345bfb 100755
--- a/plugin.video.alfa/core/httptools.py
+++ b/plugin.video.alfa/core/httptools.py
@@ -3,6 +3,7 @@
# httptools
# --------------------------------------------------------------------------------
+import inspect
import cookielib
import gzip
import os
@@ -15,6 +16,7 @@ from threading import Lock
from core.cloudflare import Cloudflare
from platformcode import config, logger
+from platformcode.logger import WebErrorException
cookies_lock = Lock()
@@ -23,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
# Headers por defecto, si no se especifica nada
default_headers = dict()
-default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3163.100 Safari/537.36"
+default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8"
@@ -205,8 +207,18 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
logger.info("Response error: %s" % (response["error"]))
logger.info("Response data length: %s" % (len(response["data"])))
logger.info("Response headers:")
+ server_cloudflare = ""
for header in response["headers"]:
logger.info("- %s: %s" % (header, response["headers"][header]))
+ if "cloudflare" in response["headers"][header]:
+ server_cloudflare = "cloudflare"
+
+ is_channel = inspect.getmodule(inspect.currentframe().f_back)
+ # error 4xx o 5xx se lanza excepcion
+ # response["code"] = 400
+ if type(response["code"]) == int and "\\servers\\" not in str(is_channel):
+ if response["code"] > 399 and (server_cloudflare == "cloudflare" and response["code"] != 503):
+ raise WebErrorException(urlparse.urlparse(url)[1])
if cookies:
save_cookies()
From ba2a6c682ea869bf5de1c6687cdc9d8386af324d Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 15 Jan 2018 16:27:50 -0500
Subject: [PATCH 05/10] logger: updated
---
plugin.video.alfa/platformcode/logger.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/plugin.video.alfa/platformcode/logger.py b/plugin.video.alfa/platformcode/logger.py
index 7008407a..10741580 100644
--- a/plugin.video.alfa/platformcode/logger.py
+++ b/plugin.video.alfa/platformcode/logger.py
@@ -76,3 +76,8 @@ def error(texto=""):
xbmc.log("######## ERROR #########", xbmc.LOGERROR)
xbmc.log(texto, xbmc.LOGERROR)
+
+
+class WebErrorException(Exception):
+ def __init__(self, *args, **kwargs):
+ Exception.__init__(self, *args, **kwargs)
From f4238302a5d2d07db7d440a4a9fade91cf833e58 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Mon, 15 Jan 2018 16:29:35 -0500
Subject: [PATCH 06/10] launcher: updated
---
plugin.video.alfa/platformcode/launcher.py | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/platformcode/launcher.py b/plugin.video.alfa/platformcode/launcher.py
index 513576c8..d9eabd37 100644
--- a/plugin.video.alfa/platformcode/launcher.py
+++ b/plugin.video.alfa/platformcode/launcher.py
@@ -14,8 +14,7 @@ from core import videolibrarytools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
-from channelselector import get_thumb
-
+from platformcode.logger import WebErrorException
def start():
@@ -298,7 +297,19 @@ def run(item=None):
logger.error("Codigo de error HTTP : %d" % e.code)
# "El sitio web no funciona correctamente (error http %d)"
platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code)
+ except WebErrorException, e:
+ import traceback
+ logger.error(traceback.format_exc())
+ patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
+ "\\\\") + '([^.]+)\.py"'
+ canal = scrapertools.find_single_match(traceback.format_exc(), patron)
+
+ platformtools.dialog_ok(
+ "Error en el canal " + canal,
+ "La web de la que depende parece no estar disponible, puede volver a intentarlo, "
+ "si el problema persiste verifique mediante un navegador la web: %s. "
+ "Si la web funciona correctamente informe el error en: www.alfa-addon.com" %(e))
except:
import traceback
logger.error(traceback.format_exc())
From b8f66623da988eed3b6dc7e1ca1461cd8c363518 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Tue, 16 Jan 2018 08:12:02 -0500
Subject: [PATCH 07/10] Update platformtools.py
---
plugin.video.alfa/platformcode/platformtools.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugin.video.alfa/platformcode/platformtools.py b/plugin.video.alfa/platformcode/platformtools.py
index a5b88b8a..611338ac 100644
--- a/plugin.video.alfa/platformcode/platformtools.py
+++ b/plugin.video.alfa/platformcode/platformtools.py
@@ -142,7 +142,7 @@ def render_items(itemlist, parent_item):
if item.fanart:
fanart = item.fanart
else:
- fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg")
+ fanart = os.path.join(config.get_runtime_path(), "fanart.jpg")
# Creamos el listitem
listitem = xbmcgui.ListItem(item.title)
From d76430c6d710b880dc96aab0639ca76d64b3e1e7 Mon Sep 17 00:00:00 2001
From: alfa-addon
Date: Tue, 16 Jan 2018 10:34:07 -0500
Subject: [PATCH 08/10] added
---
plugin.video.alfa/channels/pordede.json | 85 +++
plugin.video.alfa/channels/pordede.py | 665 ++++++++++++++++++++++++
2 files changed, 750 insertions(+)
create mode 100644 plugin.video.alfa/channels/pordede.json
create mode 100644 plugin.video.alfa/channels/pordede.py
diff --git a/plugin.video.alfa/channels/pordede.json b/plugin.video.alfa/channels/pordede.json
new file mode 100644
index 00000000..65c0cde2
--- /dev/null
+++ b/plugin.video.alfa/channels/pordede.json
@@ -0,0 +1,85 @@
+{
+ "id": "pordede",
+ "name": "Pordede",
+ "active": true,
+ "adult": false,
+ "language": ["cast"],
+ "thumbnail": "pordede.png",
+ "banner": "pordede.png",
+ "categories": [
+ "movie",
+ "tvshow"
+ ],
+ "settings": [
+ {
+ "id": "pordedeuser",
+ "type": "text",
+ "label": "@30014",
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "pordedepassword",
+ "type": "text",
+ "hidden": true,
+ "label": "@30015",
+ "enabled": "!eq(-1,'')",
+ "visible": true
+ },
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Incluir en busqueda global",
+ "default": false,
+ "enabled": "!eq(-1,'') + !eq(-2,'')",
+ "visible": true
+ },
+ {
+ "id": "pordedesortlinks",
+ "type": "list",
+ "label": "Ordenar enlaces",
+ "default": 0,
+ "enabled": true,
+ "visible": "!eq(-2,'') + !eq(-3,'')",
+ "lvalues": [
+ "No",
+ "Por no Reportes",
+ "Por Idioma",
+ "Por Calidad",
+ "Por Idioma y Calidad",
+ "Por Idioma y no Reportes",
+ "Por Idioma, Calidad y no Reportes"
+ ]
+ },
+ {
+ "id": "pordedeshowlinks",
+ "type": "list",
+ "label": "Mostrar enlaces",
+ "default": 0,
+ "enabled": true,
+ "visible": "!eq(-3,'') + !eq(-4,'')",
+ "lvalues": [
+ "Todos",
+ "Ver online",
+ "Descargar"
+ ]
+ },
+ {
+ "id": "pordedenumberlinks",
+ "type": "list",
+ "label": "Limitar número de enlaces",
+ "default": 0,
+ "enabled": true,
+ "visible": "!eq(-4,'') + !eq(-5,'')",
+ "lvalues": [
+ "No",
+ "5",
+ "10",
+ "15",
+ "20",
+ "25",
+ "30"
+ ]
+ }
+ ]
+}
diff --git a/plugin.video.alfa/channels/pordede.py b/plugin.video.alfa/channels/pordede.py
new file mode 100644
index 00000000..29347d39
--- /dev/null
+++ b/plugin.video.alfa/channels/pordede.py
@@ -0,0 +1,665 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+import sys
+import urlparse
+
+from core import channeltools
+from core import httptools
+from core import jsontools
+from core import scrapertools
+from core import servertools
+from core.item import Item
+from platformcode import config, logger
+from platformcode import platformtools
+
+def login():
+ url_origen = "http://www.pordede.com"
+ data = httptools.downloadpage(url_origen).data
+ if config.get_setting("pordedeuser", "pordede") in data:
+ return True
+
+ url = "http://www.pordede.com/api/login/auth?response_type=code&client_id=appclient&redirect_uri=http%3A%2F%2Fwww.pordede.com%2Fapi%2Flogin%2Freturn&state=none"
+ post = "username=%s&password=%s&authorized=autorizar" % (config.get_setting("pordedeuser", "pordede"), config.get_setting("pordedepassword", "pordede"))
+ data = httptools.downloadpage(url, post).data
+ if '"ok":true' in data:
+ return True
+ else:
+ return False
+
+def mainlist(item):
+ logger.info()
+
+ itemlist = []
+
+ if not config.get_setting("pordedeuser", "pordede"):
+ itemlist.append( Item( channel=item.channel , title="Habilita tu cuenta en la configuración..." , action="settingCanal" , url="") )
+ else:
+ result = login()
+ if not result:
+ itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
+ return itemlist
+ itemlist.append( Item(channel=item.channel, action="menuseries" , title="Series" , url="" ))
+ itemlist.append( Item(channel=item.channel, action="menupeliculas" , title="Películas y documentales" , url="" ))
+ itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Listas que sigues" , url="http://www.pordede.com/lists/following" ))
+ itemlist.append( Item(channel=item.channel, action="tus_listas" , title="Tus listas" , url="http://www.pordede.com/lists/yours" ))
+ itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Top listas" , url="http://www.pordede.com/lists" ))
+ itemlist.append( Item(channel=item.channel, action="settingCanal" , title="Configuración..." , url="" ))
+
+ return itemlist
+
+def settingCanal(item):
+ return platformtools.show_channel_settings()
+
+def menuseries(item):
+ logger.info()
+
+ itemlist = []
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot" ))
+ itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/series" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Siguiendo" , url="http://www.pordede.com/series/following" ))
+ itemlist.append( Item(channel=item.channel, action="siguientes" , title="Siguientes Capítulos" , url="http://www.pordede.com/main/index" , viewmode="movie"))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/series/favorite" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/series/pending" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Terminadas" , url="http://www.pordede.com/series/seen" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/series/recommended" ))
+ itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/series" ))
+
+ return itemlist
+
+def menupeliculas(item):
+ logger.info()
+
+ itemlist = []
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot" ))
+ itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/pelis" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/pelis/favorite" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/pelis/pending" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Vistas" , url="http://www.pordede.com/pelis/seen" ))
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/pelis/recommended" ))
+ itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/pelis" ))
+
+ return itemlist
+
+def generos(item):
+ logger.info()
+
+ # Descarga la pagina
+ data = httptools.downloadpage(item.url).data
+
+ # Extrae las entradas (carpetas)
+ data = scrapertools.find_single_match(data,'(.*?) ')
+ patron = '([^<]+)\((\d+)\)'
+ matches = re.compile(patron,re.DOTALL).findall(data)
+ itemlist = []
+
+ for textid,scrapedurl,scrapedtitle,cuantos in matches:
+ title = scrapedtitle.strip()+" ("+cuantos+")"
+ thumbnail = ""
+ plot = ""
+
+ if "/pelis" in item.url:
+ url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
+ else:
+ url = "http://www.pordede.com/series/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
+
+ itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title))
+
+ return itemlist
+
+def search(item,texto):
+ logger.info()
+
+ if item.url=="":
+ item.url="http://www.pordede.com/pelis"
+
+ texto = texto.replace(" ","-")
+
+ item.extra = item.url
+ item.url = item.url+"/loadmedia/offset/0/query/"+texto+"/years/1950/on/undefined/showlist/all"
+
+ try:
+ return buscar(item)
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+def buscar(item):
+ logger.info()
+
+ # Descarga la pagina
+ headers = {"X-Requested-With": "XMLHttpRequest"}
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ # Extrae las entradas (carpetas)
+ json_object = jsontools.load(data)
+ data = json_object["html"]
+
+ return parse_mixed_results(item,data)
+
+def parse_mixed_results(item,data):
+ patron = ''
+ patron += '([^<]+)'
+ patron += '([^<]+)'
+ matches = re.compile(patron,re.DOTALL).findall(data)
+ itemlist = []
+
+ for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear,scrapedvalue in matches:
+ title = scrapertools.htmlclean(scrapedtitle)
+ if scrapedyear != '':
+ title += " ("+scrapedyear+")"
+ fulltitle = title
+ if scrapedvalue != '':
+ title += " ("+scrapedvalue+")"
+ thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
+ fanart = thumbnail.replace("mediathumb","mediabigcover")
+ plot = ""
+
+ if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
+
+ if "/peli/" in scrapedurl:
+ sectionStr = "peli"
+ else:
+ sectionStr = "docu"
+
+ referer = urlparse.urljoin(item.url,scrapedurl)
+ url = referer.replace("/{0}/".format(sectionStr),"/links/view/slug/")+"/what/{0}".format(sectionStr)
+
+ itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
+ contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
+ else:
+ referer = item.url
+ url = urlparse.urljoin(item.url,scrapedurl)
+ itemlist.append( Item(channel=item.channel, action="episodios" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
+ contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
+
+ next_page = scrapertools.find_single_match(data, 'Siguiendo(.*?) ')
+ patron = ' [^<]+'
+ patron += '  [^<]+'
+ patron += '[^<]+'
+ patron += ' [^<]+'
+ patron += '[^<]+'
+ patron += ' (\d+)x(\d+)'
+ matches = re.compile(patron,re.DOTALL).findall(data)
+ itemlist = []
+
+ for scrapedtitle,scrapedthumbnail,scrapedurl,scrapedsession,scrapedepisode in matches:
+ title = scrapertools.htmlclean(scrapedtitle)
+ session = scrapertools.htmlclean(scrapedsession)
+ episode = scrapertools.htmlclean(scrapedepisode)
+ thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
+ fanart = thumbnail.replace("mediathumb","mediabigcover")
+ plot = ""
+ title = session + "x" + episode + " - " + title
+
+ referer = urlparse.urljoin(item.url,scrapedurl)
+ url = referer
+
+ itemlist.append( Item(channel=item.channel, action="episodio" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, fanart=fanart, extra=session+"|"+episode))
+
+ return itemlist
+
+def episodio(item):
+ logger.info()
+ itemlist = []
+
+ # Descarga la pagina
+ data = httptools.downloadpage(item.url).data
+
+ session = str(int(item.extra.split("|")[0]))
+ episode = str(int(item.extra.split("|")[1]))
+ patrontemporada = ']+>Temporada '+session+' '+episode+' ([^<]+)(\s* \s* ]*>]*>[^<]*]*>[^<]*]*> )?'
+ matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
+
+ for scrapedurl,scrapedtitle,info,visto in matches:
+ if visto.strip()=="active":
+ visto_string = "[visto] "
+ else:
+ visto_string = ""
+ numero=episode
+ title = visto_string+session+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
+ thumbnail = ""
+ plot = ""
+
+ epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
+ url = "http://www.pordede.com/links/viewepisode/id/"+epid
+ itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=item.fanart, show=item.show))
+
+ itemlist2 = []
+ for capitulo in itemlist:
+ itemlist2 = findvideos(capitulo)
+
+ return itemlist2
+
+def peliculas(item):
+ logger.info()
+
+ # Descarga la pagina
+ headers = {"X-Requested-With": "XMLHttpRequest"}
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ # Extrae las entradas (carpetas)
+ json_object = jsontools.load(data)
+ data = json_object["html"]
+
+ return parse_mixed_results(item,data)
+
+def episodios(item):
+ logger.info()
+
+ itemlist = []
+
+ # Descarga la pagina
+ idserie = ''
+ data = httptools.downloadpage(item.url).data
+
+ patrontemporada = ' ]+>([^<]+) ([^<]+)'
+ patron += '[^<]+ ]+>([^<]+)\s+ \s+ \s+ ([0-9]+)'
+
+ return parse_listas(item, patron)
+
+def tus_listas(item):
+ logger.info()
+
+ patron = ' ([^<]+)'
+ patron += '[^<]+ ]+>([^<]+)\s+ \s+ \s+ ([0-9]+)'
+
+ return parse_listas(item, patron)
+
+def lista(item):
+ logger.info()
+
+ # Descarga la pagina
+ headers = {"X-Requested-With": "XMLHttpRequest"}
+ data = httptools.downloadpage(item.url, headers=headers).data
+
+ # Extrae las entradas (carpetas)
+ json_object = jsontools.load(data)
+ data = json_object["html"]
+
+ return parse_mixed_results(item,data)
+
+def findvideos(item, verTodos=False):
+ logger.info()
+
+ # Descarga la pagina
+ data = httptools.downloadpage(item.url).data
+ logger.info(data)
+
+ sesion = scrapertools.find_single_match(data,'SESS = "([^"]+)";')
+
+ patron = ' |