Compare commits
269 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78b4a8d768 | ||
|
|
537c640771 | ||
|
|
d76430c6d7 | ||
|
|
b97f01d4d4 | ||
|
|
9059a6d14f | ||
|
|
b8f66623da | ||
|
|
1a1c8f2547 | ||
|
|
0fc75a5305 | ||
|
|
f4238302a5 | ||
|
|
ba2a6c682e | ||
|
|
02abbfcc64 | ||
|
|
4a0f1b5c41 | ||
|
|
738fb50ce9 | ||
|
|
64396b3a4f | ||
|
|
9e1c190c0b | ||
|
|
7b8c1c6eb7 | ||
|
|
9a5b8cb4e7 | ||
|
|
2793ea9952 | ||
|
|
3067d137cb | ||
|
|
825d9f2198 | ||
|
|
b41c2c08fc | ||
|
|
1f4825dd05 | ||
|
|
4f760040bc | ||
|
|
5391152408 | ||
|
|
815857404d | ||
|
|
239a73219e | ||
|
|
b7787e0ee5 | ||
|
|
14fce6ebac | ||
|
|
16a530a9d4 | ||
|
|
f4bc398f9e | ||
|
|
841f5e5f3d | ||
|
|
ee80f9c4d2 | ||
|
|
50c42fbc4b | ||
|
|
76170820bb | ||
|
|
b5083d16b5 | ||
|
|
6927f1f955 | ||
|
|
a3b70aba22 | ||
|
|
1fb8db8496 | ||
|
|
201e7f1e2e | ||
|
|
245190ca70 | ||
|
|
23ebf3d19b | ||
|
|
5c699ed892 | ||
|
|
2da412890e | ||
|
|
b47db3ae04 | ||
|
|
660a05f39d | ||
|
|
1f1b860715 | ||
|
|
bc318f7a18 | ||
|
|
87e8ee4d46 | ||
|
|
de5eda5477 | ||
|
|
0738a82372 | ||
|
|
c3ca5b472f | ||
|
|
4008c63e12 | ||
|
|
f5a5328620 | ||
|
|
48d7f754be | ||
|
|
f1ffdf425e | ||
|
|
fac578f631 | ||
|
|
f7df5e9494 | ||
|
|
aeea88395a | ||
|
|
8cc0ac4083 | ||
|
|
ba2e824ec6 | ||
|
|
3aabe7eb75 | ||
|
|
b3c345fd11 | ||
|
|
9ddc0c7eec | ||
|
|
6a25a4add4 | ||
|
|
b339965287 | ||
|
|
800d6ae02e | ||
|
|
f441d34222 | ||
|
|
57001ef13e | ||
|
|
19df05e956 | ||
|
|
9f20d50a79 | ||
|
|
d8052c4bab | ||
|
|
4aca9a2306 | ||
|
|
5729e04cb5 | ||
|
|
eb27cd53bc | ||
|
|
afaf2fd4b4 | ||
|
|
e264614a2e | ||
|
|
de439ff8ea | ||
|
|
37df471d60 | ||
|
|
98b61f1b50 | ||
|
|
65648bca9b | ||
|
|
e7fd77bcee | ||
|
|
27f93b9d98 | ||
|
|
a77a009c3a | ||
|
|
94d4244cd1 | ||
|
|
c2b9f1f009 | ||
|
|
7c887bf546 | ||
|
|
05535344c2 | ||
|
|
0caea46619 | ||
|
|
c49439bdc7 | ||
|
|
b5f1e7180c | ||
|
|
29324c4302 | ||
|
|
b88ef13772 | ||
|
|
b7b0c02589 | ||
|
|
f0e07b7b28 | ||
|
|
735b4a6584 | ||
|
|
120e77b44b | ||
|
|
ac27cd2f00 | ||
|
|
002e62aa19 | ||
|
|
4b9bbd0540 | ||
|
|
b474db07e3 | ||
|
|
9a6e070799 | ||
|
|
e0997a387b | ||
|
|
02797b5571 | ||
|
|
de8f6af086 | ||
|
|
98c06a1140 | ||
|
|
2e9573b6e9 | ||
|
|
fd5b972281 | ||
|
|
30e32ac133 | ||
|
|
1a7d23d7dc | ||
|
|
34f4e474fd | ||
|
|
c546976329 | ||
|
|
fe4c2685d7 | ||
|
|
1b7e71e3bf | ||
|
|
0ab5deba05 | ||
|
|
dd4bcb8ef4 | ||
|
|
796bf25e6f | ||
|
|
f136e6e2aa | ||
|
|
bad4e91aee | ||
|
|
48d76ad6d4 | ||
|
|
4d248cab54 | ||
|
|
df3022353c | ||
|
|
48a8f2fa1a | ||
|
|
5304271782 | ||
|
|
3701aba7fa | ||
|
|
ef962d7ed2 | ||
|
|
9d03b4de54 | ||
|
|
6bbf26a9a9 | ||
|
|
392435e6fb | ||
|
|
8b445165df | ||
|
|
ba28a426e6 | ||
|
|
aa5fa6d275 | ||
|
|
6d8de2efec | ||
|
|
0c65e2d20c | ||
|
|
2a5c27355b | ||
|
|
cad93526ce | ||
|
|
4cfe60c2a8 | ||
|
|
10a7535242 | ||
|
|
489cd02192 | ||
|
|
089098c58f | ||
|
|
57e5d32567 | ||
|
|
9adcafc343 | ||
|
|
cf068bc13f | ||
|
|
7579428087 | ||
|
|
5ca2ed6212 | ||
|
|
11494549b9 | ||
|
|
77423ec5a8 | ||
|
|
be2c691909 | ||
|
|
8fb445edaf | ||
|
|
fada17bb78 | ||
|
|
5c90256a3d | ||
|
|
611a0e28a3 | ||
|
|
8ea2efb632 | ||
|
|
f71de37f0f | ||
|
|
d4b2a61318 | ||
|
|
fd1f5c28df | ||
|
|
1e08ee9bd6 | ||
|
|
08ac52b279 | ||
|
|
7b52463ce6 | ||
|
|
e79364ef93 | ||
|
|
de4b08606a | ||
|
|
21b655b074 | ||
|
|
48120ac6ab | ||
|
|
5c360bdc68 | ||
|
|
de267299e7 | ||
|
|
d0139dfde3 | ||
|
|
7115c2f832 | ||
|
|
85135711de | ||
|
|
8c5c495633 | ||
|
|
fdcf27a5fa | ||
|
|
7523b02e62 | ||
|
|
3ca234f8ae | ||
|
|
2848692d79 | ||
|
|
d6f73e1f06 | ||
|
|
0dbf9c544a | ||
|
|
4fdf382ca3 | ||
|
|
ca943ab6ef | ||
|
|
41a66823e5 | ||
|
|
a6206420b5 | ||
|
|
1ebe99ede1 | ||
|
|
aaa0149bc8 | ||
|
|
4cb704a6c3 | ||
|
|
411b3ce23d | ||
|
|
1b0f91d4f2 | ||
|
|
3f1baae10c | ||
|
|
a91643694b | ||
|
|
3bd8507889 | ||
|
|
f97a283175 | ||
|
|
3b02b62a29 | ||
|
|
25f8a9dc4b | ||
|
|
860bd0f834 | ||
|
|
6bede726f8 | ||
|
|
f045d2ee7c | ||
|
|
51c4d7d746 | ||
|
|
f340cbce3a | ||
|
|
16bcfdcb15 | ||
|
|
11ef80c3e0 | ||
|
|
4d2562eaac | ||
|
|
5a1a1a97f1 | ||
|
|
035f27f887 | ||
|
|
f1db1236f1 | ||
|
|
790420df0d | ||
|
|
6999f615c8 | ||
|
|
78a6eecf2e | ||
|
|
c9a96831d8 | ||
|
|
e6cfcd3151 | ||
|
|
4fda116759 | ||
|
|
3c2902d5ea | ||
|
|
b9bd644e0a | ||
|
|
93ab41c2da | ||
|
|
87541a3291 | ||
|
|
46646e09d1 | ||
|
|
a206b9ddaf | ||
|
|
76f52ce404 | ||
|
|
afc13bd1d3 | ||
|
|
4469fc75cd | ||
|
|
8428e8571b | ||
|
|
b640303143 | ||
|
|
7456c32fbb | ||
|
|
a8ddc664d4 | ||
|
|
be3345d34f | ||
|
|
d9677a13ea | ||
|
|
d3d44463b7 | ||
|
|
6f63ea5128 | ||
|
|
48740f4a1d | ||
|
|
5224547446 | ||
|
|
feb4f239e3 | ||
|
|
164804d484 | ||
|
|
162928f4d6 | ||
|
|
4c24fe48ed | ||
|
|
5bf145114d | ||
|
|
39669395ae | ||
|
|
f5a5979f74 | ||
|
|
476f7f985d | ||
|
|
42d20e9434 | ||
|
|
9adb713d07 | ||
|
|
513d66dfb4 | ||
|
|
537c2cb0e4 | ||
|
|
ddda31b2af | ||
|
|
c5d1bc1988 | ||
|
|
44a89836d5 | ||
|
|
878dbc8393 | ||
|
|
d625419219 | ||
|
|
3032770580 | ||
|
|
742ff3feff | ||
|
|
3965fdd1c6 | ||
|
|
689e2cc534 | ||
|
|
d37f911d3f | ||
|
|
c7850cef56 | ||
|
|
783b8a11c1 | ||
|
|
3746d3bfb0 | ||
|
|
71bf6ce57b | ||
|
|
f3b4ddee25 | ||
|
|
2f4fb66ff0 | ||
|
|
5a7905d5e0 | ||
|
|
672d1ce0c0 | ||
|
|
17002ddf94 | ||
|
|
2618168737 | ||
|
|
e6e572922f | ||
|
|
6a7e883299 | ||
|
|
d1a264f7c7 | ||
|
|
295f4eab68 | ||
|
|
e72320f12f | ||
|
|
dc77e9733b | ||
|
|
7864fe3740 | ||
|
|
ba03b01cc0 | ||
|
|
97d299b863 | ||
|
|
e85f31dadf | ||
|
|
c54ed630f9 | ||
|
|
5122d2f7fa |
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.3.7" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.4.9" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,14 +19,11 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
» anitoonstv » asialiveaction
|
||||
» cinehindi » danimados
|
||||
» mundiseries » pelisculashndu
|
||||
» seodiv » serieslan
|
||||
» crunchyroll » pelisfox
|
||||
» stormo ¤ arreglos internos
|
||||
[COLOR green]Gracias a [COLOR yellow]Danielr460, numa00009 y numa00009[/COLOR]
|
||||
por su colaboración en esta versión[/COLOR]
|
||||
» pelisfox » pelisgratis
|
||||
» gamovideo » doomtv
|
||||
» usercloud » ciberpeliculashd
|
||||
» pordede ¤ arreglos internos
|
||||
[COLOR green]Gracias a [B][COLOR yellow]f_y_m[/COLOR][/B] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -19,6 +19,14 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
|
||||
@@ -33,7 +33,7 @@ def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'category/animacion/'
|
||||
|
||||
@@ -20,6 +20,14 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
@@ -50,4 +58,4 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,7 +217,7 @@ def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == "peliculas":
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + "movies/newmovies?page=1"
|
||||
item.action = "lista"
|
||||
itemlist = lista(item)
|
||||
|
||||
@@ -25,6 +25,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -54,6 +54,7 @@ def browser(url):
|
||||
api_key = "2e2160006592024ba87ccdf78c28f49f"
|
||||
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
|
||||
|
||||
host = 'http://alltorrent.net/'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -392,3 +393,26 @@ def get_art(item):
|
||||
item.extra = item.extra + "|" + item.thumbnail
|
||||
else:
|
||||
item.extra = item.extra + "|" + item.thumbnail
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = host
|
||||
|
||||
itemlist = scraper(item)
|
||||
|
||||
if itemlist[-1].action == "[COLOR olivedrab][B]Siguiente >>[/B][/COLOR]":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -11,6 +11,12 @@ from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
list_servers = ['openload',
|
||||
'directo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
CHANNEL_HOST = "http://animeflv.co"
|
||||
CHANNEL_DEFAULT_HEADERS = [
|
||||
@@ -117,7 +123,8 @@ def __find_series(html):
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="letras",
|
||||
@@ -134,6 +141,7 @@ def mainlist(item):
|
||||
url=CHANNEL_HOST + "/Buscar?s="))
|
||||
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -179,10 +187,13 @@ def search(item, texto):
|
||||
show_list = __find_series(html)
|
||||
|
||||
items = []
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
for show in show_list:
|
||||
title, url, thumbnail, plot = show
|
||||
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
|
||||
plot=plot, show=title, viewmode="movies_with_plot", context=context))
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -197,10 +208,13 @@ def series(item):
|
||||
page_html = get_url_contents(item.url)
|
||||
show_list = __find_series(page_html)
|
||||
items = []
|
||||
context = renumbertools.context(item)
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
for show in show_list:
|
||||
title, url, thumbnail, plot = show
|
||||
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
|
||||
plot=plot, show=title, viewmode="movies_with_plot", context=context))
|
||||
|
||||
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
|
||||
if url_next_page:
|
||||
@@ -292,4 +306,5 @@ def findvideos(item):
|
||||
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
|
||||
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
|
||||
fulltitle=item.title))
|
||||
autoplay.start(__sort_by_quality(itemlist), item)
|
||||
return __sort_by_quality(itemlist)
|
||||
|
||||
@@ -134,8 +134,7 @@ def novedades_episodios(item):
|
||||
contentTitle = scrapedtitle.replace('#' + episodio, '')
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
hasContentDetails=True, contentSeason=1, contentTitle=contentTitle))
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -96,7 +96,6 @@ def recientes(item):
|
||||
action = "peliculas"
|
||||
if not thumb.startswith("http"):
|
||||
thumb = "http:%s" % thumb
|
||||
action ="findvideos"
|
||||
infoLabels = {'filtro': {"original_language": "ja"}.items()}
|
||||
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
|
||||
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
|
||||
|
||||
@@ -89,8 +89,7 @@ def start(itemlist, item):
|
||||
videoitem.contentTitle=item.contentTitle
|
||||
videoitem.contentType=item.contentType
|
||||
videoitem.episode_id=item.episode_id
|
||||
videoitem.hasContentDetails=item.hasContentDetails
|
||||
videoitem.infoLabels=item.infoLabels
|
||||
#videoitem.infoLabels=item.infoLabels
|
||||
videoitem.thumbnail=item.thumbnail
|
||||
#videoitem.title=item.title
|
||||
if not config.is_xbmc():
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -33,8 +34,7 @@ def menupeliculas(item):
|
||||
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
|
||||
# <ul class="submenu2 subcategorias"><li ><a href="/descargas/subcategoria/4/br-scr-dvdscr">BR-Scr / DVDScr</a></li><li ><a href="/descargas/subcategoria/6/dvdr-full">DVDR - Full</a></li><li ><a href="/descargas/subcategoria/1/dvdrip-vhsrip">DVDRip / VHSRip</a></li><li ><a href="/descargas/subcategoria/3/hd">HD</a></li><li ><a href="/descargas/subcategoria/2/hdrip-bdrip">HDRip / BDRip</a></li><li ><a href="/descargas/subcategoria/35/latino">Latino</a></li><li ><a href="/descargas/subcategoria/5/ts-scr-cam">TS-Scr / CAM</a></li><li ><a href="/descargas/subcategoria/7/vos">VOS</a></li></ul>
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -78,7 +78,6 @@ def menudocumentales(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
|
||||
def search(item, texto, categoria=""):
|
||||
logger.info(item.url + " search " + texto)
|
||||
itemlist = []
|
||||
@@ -101,9 +100,7 @@ def search(item, texto, categoria=""):
|
||||
def peliculas(item, paginacion=True):
|
||||
logger.info()
|
||||
url = item.url
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
patron = '<li id="ficha-\d+" class="ficha2[^<]+'
|
||||
patron += '<div class="detalles-ficha"[^<]+'
|
||||
patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+'
|
||||
@@ -118,16 +115,11 @@ def peliculas(item, paginacion=True):
|
||||
scrapedtitle = title
|
||||
scrapedplot = clean_plot(plot)
|
||||
scrapedurl = urlparse.urljoin(item.url, url)
|
||||
scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
|
||||
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
|
||||
|
||||
# Añade al listado de XBMC
|
||||
scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
|
||||
fanart=item.fanart, viewmode="movie_with_plot"))
|
||||
|
||||
# Extrae el paginador
|
||||
patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \»\;</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -187,7 +179,7 @@ def enlaces(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
try:
|
||||
item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>')
|
||||
@@ -201,18 +193,6 @@ def enlaces(item):
|
||||
except:
|
||||
pass
|
||||
|
||||
'''
|
||||
<div id="enlaces-34769"><img id="enlaces-cargando-34769" src="/images/cargando.gif" style="display:none;"/></div>
|
||||
</li><li id="box-enlace-330690" class="box-enlace">
|
||||
<div class="box-enlace-cabecera">
|
||||
<div class="datos-usuario"><img class="avatar" src="images/avatars/116305_p.jpg" />Enlaces de:
|
||||
<a class="nombre-usuario" href="/usuario/jerobien">jerobien</a> </div>
|
||||
<div class="datos-act">Actualizado: Hace 8 minutos</div>
|
||||
<div class="datos-boton-mostrar"><a id="boton-mostrar-330690" class="boton" href="javascript:mostrar_enlaces(330690,'b01de63028139fdd348d');">Mostrar enlaces</a></div>
|
||||
<div class="datos-servidores"><div class="datos-servidores-cell"><img src="/images/servidores/ul.to.png" title="uploaded.com" border="0" alt="uploaded.com" /><img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/cloudzer.png" title="clz.to" border="0" alt="clz.to" /></div></div>
|
||||
</div>
|
||||
'''
|
||||
|
||||
patron = '<div class="box-enlace-cabecera"[^<]+'
|
||||
patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+'
|
||||
patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+'
|
||||
@@ -222,19 +202,15 @@ def enlaces(item):
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
logger.debug("matches=" + repr(matches))
|
||||
|
||||
for thumbnail, usuario, fecha, id, id2, servidores in matches:
|
||||
# <img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/muchshare.png" title="muchshare.net" border="0" alt="muchshare.net" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/shareflare.png" title="shareflare.net" border="0" alt="shareflare.net" /><img src="/images/servidores/otros.gif" title="Otros servidores" border="0" alt="Otros" />
|
||||
patronservidores = '<img src="[^"]+" title="([^"]+)"'
|
||||
matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores)
|
||||
lista_servidores = ""
|
||||
for servidor in matches2:
|
||||
lista_servidores = lista_servidores + servidor + ", "
|
||||
lista_servidores = lista_servidores[:-2]
|
||||
|
||||
scrapedthumbnail = item.thumbnail
|
||||
# http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
|
||||
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
|
||||
scrapedplot = item.plot
|
||||
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
|
||||
@@ -250,7 +226,7 @@ def enlaces(item):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"id": "bityouth",
|
||||
"name": "Bityouth",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png",
|
||||
"banner": "bityouth.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"id": "borrachodetorrent",
|
||||
"name": "BorrachodeTorrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://imgur.com/BePrYmy.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"id": "bricocine",
|
||||
"name": "Bricocine",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg",
|
||||
"banner": "bricocine.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -143,14 +143,10 @@ def peliculas(item):
|
||||
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
scrapedtitle, year, quality)
|
||||
thumb_id = scrapertools.find_single_match(scrapedthumbnail, '.*?\/uploads\/(.*?)-')
|
||||
thumbnail = "/%s.jpg" % thumb_id
|
||||
filtro_list = {"poster_path": thumbnail}
|
||||
filtro_list = filtro_list.items()
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
|
||||
url=scrapedurl, infoLabels={'filtro':filtro_list},
|
||||
contentTitle=contentTitle, thumbnail=thumbnail,
|
||||
url=scrapedurl, infoLabels={'year': year},
|
||||
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
|
||||
title=title, context="buscar_trailer", quality = quality))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
@@ -168,17 +164,17 @@ def peliculas(item):
|
||||
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
datas = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas)
|
||||
item.fanart = scrapertools.find_single_match(
|
||||
data, "<meta property='og:image' content='([^']+)' />")
|
||||
datas, "<meta property='og:image' content='([^']+)' />")
|
||||
item.fanart = item.fanart.replace('w780', 'original')
|
||||
item.plot = scrapertools.find_single_match(data, '</span></h4><p>([^*]+)</p><h4')
|
||||
item.plot = scrapertools.find_single_match(datas, '</h4><p>(.*?)</p>')
|
||||
item.plot = scrapertools.htmlclean(item.plot)
|
||||
item.infoLabels['director'] = scrapertools.find_single_match(
|
||||
data, '<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
datas, '<div class="name"><a href="[^"]+">([^<]+)</a>')
|
||||
item.infoLabels['genre'] = scrapertools.find_single_match(
|
||||
data, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
|
||||
datas, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -189,8 +185,7 @@ def generos(item):
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
# url, title, cantidad
|
||||
|
||||
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -216,34 +211,34 @@ def year_release(item):
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
|
||||
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?'
|
||||
patron += '<div class="texto">([^<]+)</div>'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
|
||||
if plot == '':
|
||||
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
|
||||
' Online HD', '').replace('ver ', '').replace(' Online', '')
|
||||
' Online HD', '').replace('ver ', '').replace(' Online', '').replace(' (Serie TV)', '').strip()
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot,
|
||||
thumbnail=scrapedthumbnail, contentType='tvshow'))
|
||||
|
||||
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
if url_next_page:
|
||||
@@ -259,7 +254,6 @@ def temporadas(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(datas)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
|
||||
patron += '<img src="([^"]+)"></a></div>' # capitulos
|
||||
|
||||
@@ -268,13 +262,13 @@ def temporadas(item):
|
||||
for scrapedseason, scrapedthumbnail in matches:
|
||||
scrapedseason = " ".join(scrapedseason.split())
|
||||
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
|
||||
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail)
|
||||
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas')
|
||||
new_item.infoLabels['season'] = temporada
|
||||
new_item.extra = ""
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
|
||||
for i in itemlist:
|
||||
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
|
||||
if i.infoLabels['title']:
|
||||
@@ -286,6 +280,11 @@ def temporadas(item):
|
||||
|
||||
itemlist.sort(key=lambda it: it.title)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
|
||||
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
|
||||
|
||||
return itemlist
|
||||
else:
|
||||
return episodios(item)
|
||||
@@ -328,7 +327,6 @@ def episodios(item):
|
||||
if not item.extra:
|
||||
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
for i in itemlist:
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
@@ -355,13 +353,17 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
|
||||
|
||||
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, url in matches:
|
||||
datas = httptools.downloadpage(urlparse.urljoin(host, url),
|
||||
headers={'Referer': item.url}).data
|
||||
|
||||
patron = '<iframe[^>]+src="([^"]+)"'
|
||||
url = scrapertools.find_single_match(datas, patron)
|
||||
lang = scrapertools.find_single_match(
|
||||
data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
|
||||
lang = lang.replace('Español ', '').replace('B.S.O. ', '')
|
||||
@@ -371,10 +373,9 @@ def findvideos(item):
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
|
||||
server=server, language = lang, text_color=color3))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url, action="add_pelicula_to_library",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
|
||||
extra="findvideos", contentTitle=item.contentTitle))
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
|
||||
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -25,29 +25,15 @@ list_quality = ['default']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
|
||||
thumbnail=thumb_series))
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
"""
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ","+")
|
||||
item.url = item.url+texto
|
||||
if texto!='':
|
||||
return lista(item)
|
||||
"""
|
||||
|
||||
|
||||
def lista_gen(item):
|
||||
logger.info()
|
||||
|
||||
@@ -179,11 +165,10 @@ def findvideos(item):
|
||||
for link in itemla:
|
||||
if server in link:
|
||||
url = link.replace('" + ID' + server + ' + "', str(id))
|
||||
if "drive" in server:
|
||||
server1 = 'Gvideo'
|
||||
else:
|
||||
server1 = server
|
||||
itemlist.append(item.clone(url=url, action="play", server=server1,
|
||||
title="Enlace encontrado en %s " % (server1.capitalize())))
|
||||
itemlist.append(item.clone(url=url, action="play",
|
||||
title="Enlace encontrado en %s "
|
||||
))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
scrapertools.printMatches(itemlist)
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
62
plugin.video.alfa/channels/ciberpeliculashd.json
Normal file
62
plugin.video.alfa/channels/ciberpeliculashd.json
Normal file
@@ -0,0 +1,62 @@
|
||||
{
|
||||
"id": "ciberpeliculashd",
|
||||
"name": "Ciberpeliculashd",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "https://s17.postimg.org/78tekxeov/ciberpeliculashd1.png",
|
||||
"banner": "",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
271
plugin.video.alfa/channels/ciberpeliculashd.py
Normal file
271
plugin.video.alfa/channels/ciberpeliculashd.py
Normal file
@@ -0,0 +1,271 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
__channel__='ciberpeliculashd'
|
||||
|
||||
host = "http://ciberpeliculashd.net"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
except:
|
||||
__modo_grafico__ = True
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel = item.channel, title = "Películas", text_bold = True, folder = False))
|
||||
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host + "/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por género", action = "filtro", url = host, extra = "categories" ))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por calidad", action = "filtro", url = host, extra = "qualitys"))
|
||||
itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host, extra = "languages"))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
|
||||
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series", url = host + "/series/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos", url = host + "/series/?peli=1"))
|
||||
itemlist.append(Item(channel = item.channel, title = ""))
|
||||
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s="))
|
||||
return itemlist
|
||||
|
||||
def nuevos_capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'class="episode" href="([^"]+).*?'
|
||||
patron += 'src="([^"]+).*?'
|
||||
patron += 'title="([^"]+).*?'
|
||||
patron += '-->([^<]+).*?'
|
||||
patron += 'created_at">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
|
||||
scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
title = scrapedtitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
url = scrapedurl
|
||||
))
|
||||
return itemlist
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'loop-posts series.*?panel-pagination pagination-bottom')
|
||||
patron = 'a href="([^"]+).*?'
|
||||
patron += '((?:http|https)://image.tmdb.org[^"]+).*?'
|
||||
patron += 'title="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
itemlist.append(Item(action = "temporadas",
|
||||
channel = item.channel,
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle,
|
||||
contentSerieName = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
if itemlist:
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
|
||||
next_page = scrapertools.find_single_match(item.url,".*?peli=")
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "series",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?</ul>')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '</i> (.*?[0-9]+)')
|
||||
for scrapedtitle in matches:
|
||||
season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
|
||||
item.infoLabels["season"] = season
|
||||
url = item.url + "?temporada=%s" %season
|
||||
itemlist.append(item.clone(action = "capitulos",
|
||||
title = scrapedtitle,
|
||||
url = url
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title =""))
|
||||
itemlist.append(item.clone(action = "add_serie_to_library",
|
||||
channel = item.channel,
|
||||
extra = "episodios",
|
||||
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url = item.url
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = temporadas(item)
|
||||
for tempitem in templist:
|
||||
itemlist += capitulos(tempitem)
|
||||
return itemlist
|
||||
|
||||
|
||||
def capitulos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<td><a href="([^"]+).*?'
|
||||
patron += '<b>(.*?)</a>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace("</b>", "")
|
||||
episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
|
||||
scrapedtitle = scrapedtitle.split(":")[1]
|
||||
scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
|
||||
item.infoLabels["episode"] = episode
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + "/?peli=1"
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/categories/animacion/?peli=1'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/categories/terror/?peli=1'
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto + "&peli=1"
|
||||
item.extra = "busca"
|
||||
if texto != '':
|
||||
return peliculas(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
def filtro(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'uk-navbar-nav-subtitle taxonomy-menu-title">%s.*?</ul>' %item.extra
|
||||
bloque = scrapertools.find_single_match(data, patron)
|
||||
patron = "href='([^']+)"
|
||||
patron += "'>([^<]+)"
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for url, titulo in matches:
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = "peliculas",
|
||||
title = titulo,
|
||||
url = url + "/?peli=1"
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
infoLabels = dict()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
|
||||
patron = 'a href="([^"]+)".*?'
|
||||
patron += 'img alt="([^"]+)".*?'
|
||||
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
|
||||
patron += 'a href="([^"]+)".*?'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedurl1 in matches:
|
||||
scrapedtitle = scrapedtitle.replace(" Online imagen","").replace("Pelicula ","")
|
||||
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]+)\)")
|
||||
if year:
|
||||
year = int(year)
|
||||
else:
|
||||
year = 0
|
||||
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
|
||||
if "serie" in scrapedurl:
|
||||
action = "temporadas"
|
||||
infoLabels ['tvshowtitle'] = scrapedtitle
|
||||
else:
|
||||
action = "findvideos"
|
||||
infoLabels ['tvshowtitle'] = ""
|
||||
infoLabels ['year'] = year
|
||||
itemlist.append(Item(action = action,
|
||||
channel = item.channel,
|
||||
fulltitle = fulltitle,
|
||||
thumbnail = scrapedthumbnail,
|
||||
infoLabels = infoLabels,
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
if itemlist:
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
|
||||
next_page = scrapertools.find_single_match(item.url,".*?peli=")
|
||||
next_page += "%s" %page
|
||||
itemlist.append(Item(action = "peliculas",
|
||||
channel = item.channel,
|
||||
title = "Página siguiente",
|
||||
url = next_page
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'src="([^&]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl in matches:
|
||||
title = "Ver en: %s"
|
||||
itemlist.append(item.clone(action = "play",
|
||||
title = title,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if itemlist:
|
||||
itemlist.append(Item(channel = item.channel))
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la biblioteca de KODI"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
|
||||
fulltitle = item.fulltitle
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -55,6 +55,22 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -378,12 +378,14 @@ def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = 'http://www.cinecalidad.to'
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = 'http://www.cinecalidad.com'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = 'http://www.cinecalidad.to/genero-peliculas/infantil/'
|
||||
item.url = 'http://www.cinecalidad.com/genero-peliculas/infantil/'
|
||||
elif categoria == 'terror':
|
||||
item.url = 'http://www.cinecalidad.to/genero-peliculas/terror/'
|
||||
item.url = 'http://www.cinecalidad.com/genero-peliculas/terror/'
|
||||
elif categoria == 'castellano':
|
||||
item.url = 'http://www.cinecalidad.com/espana/'
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == 'Página siguiente >>':
|
||||
itemlist.pop()
|
||||
|
||||
@@ -512,7 +512,7 @@ def episodios(item):
|
||||
else:
|
||||
action = "menu_info_episode"
|
||||
|
||||
seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
|
||||
seasons = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
|
||||
for i, url in enumerate(seasons):
|
||||
if i != 0:
|
||||
data_season = httptools.downloadpage(url, add_referer=True).data
|
||||
|
||||
@@ -18,6 +18,14 @@
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
@@ -35,4 +43,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'http://cinefoxtv.net/'
|
||||
host = 'http://verhdpelis.com/'
|
||||
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
@@ -193,7 +193,7 @@ def newest(categoria):
|
||||
item = Item()
|
||||
# categoria='peliculas'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + 'page/1.html'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'peliculas-de-genero/infantil/1.html'
|
||||
|
||||
@@ -44,6 +44,30 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
|
||||
@@ -90,26 +90,30 @@ def newest(categoria):
|
||||
if categoria == 'peliculas':
|
||||
item.url = CHANNEL_HOST
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
elif categoria == 'documentales':
|
||||
item.url = CHANNEL_HOST + "genero/documental/"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = CHANNEL_HOST + "genero/infantil/"
|
||||
item.url = CHANNEL_HOST + "genero/animacion/"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
elif categoria == 'terror':
|
||||
item.url = CHANNEL_HOST + "genero/terror/"
|
||||
item.action = "peliculas"
|
||||
|
||||
elif categoria == 'castellano':
|
||||
item.url = CHANNEL_HOST + "idioma/espanol/"
|
||||
item.action = "peliculas"
|
||||
|
||||
elif categoria == 'latino':
|
||||
item.url = CHANNEL_HOST + "idioma/latino/"
|
||||
item.action = "peliculas"
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
@@ -178,18 +182,15 @@ def destacadas(item):
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
|
||||
scrapedurl = CHANNEL_HOST + scrapedurl
|
||||
scrapedtitle = scrapedtitle.replace("Ver ", "")
|
||||
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
contentType="movie")
|
||||
itemlist.append(new_item)
|
||||
|
||||
contentType="movie"
|
||||
))
|
||||
# Extrae el paginador
|
||||
next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>»</span>')
|
||||
if next_page_link:
|
||||
itemlist.append(
|
||||
item.clone(action="destacadas", title=">> Página siguiente", url=next_page_link, text_color=color3))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -239,13 +240,9 @@ def findvideos(item):
|
||||
|
||||
# Busca el argumento
|
||||
data = httptools.downloadpage(item.url).data
|
||||
year = scrapertools.find_single_match(item.title, "\(([0-9]+)")
|
||||
|
||||
tmdb.set_infoLabels(item, __modo_grafico__)
|
||||
|
||||
if not item.infoLabels.get('plot'):
|
||||
plot = scrapertools.find_single_match(data, '<div class="sinopsis"><p>(.*?)</p>')
|
||||
item.infoLabels['plot'] = plot
|
||||
if item.infoLabels["year"]:
|
||||
tmdb.set_infoLabels(item, __modo_grafico__)
|
||||
|
||||
if filtro_enlaces != 0:
|
||||
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
|
||||
@@ -263,15 +260,11 @@ def findvideos(item):
|
||||
if itemlist:
|
||||
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
|
||||
text_color="magenta"))
|
||||
# Opción "Añadir esta película a la videoteca"
|
||||
if item.extra != "library":
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
|
||||
action="add_pelicula_to_library", url=item.url, fulltitle = item.fulltitle
|
||||
))
|
||||
|
||||
else:
|
||||
itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -296,6 +289,8 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
url = scrapertools.find_single_match(bloque1, patron)
|
||||
if "goo.gl" in url:
|
||||
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
if "drive.php" in url:
|
||||
scrapedserver = "gvideo"
|
||||
if "player" in url:
|
||||
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
|
||||
if "ok" in scrapedserver: scrapedserver = "okru"
|
||||
@@ -348,12 +343,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
if "api.cinetux" in item.url or item.server == "okru":
|
||||
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
|
||||
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
|
||||
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
|
||||
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
|
||||
item.url = "http://docs.google.com/get_video_info?docid=" + id
|
||||
if item.server == "okru":
|
||||
item.url = "https://ok.ru/videoembed/" + id
|
||||
if item.server == "youtube":
|
||||
item.url = "https://www.youtube.com/embed/" + id
|
||||
elif "links" in item.url or "www.cinetux.me" in item.url:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')
|
||||
@@ -365,6 +362,9 @@ def play(item):
|
||||
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
|
||||
"location", "")
|
||||
item.url = scrapedurl
|
||||
item.thumbnail = item.contentThumbnail
|
||||
item.server = servertools.get_server_from_url(item.url)
|
||||
return [item]
|
||||
item.server = ""
|
||||
itemlist.append(item.clone())
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
for i in itemlist:
|
||||
i.thumbnail = i.contentThumbnail
|
||||
return itemlist
|
||||
|
||||
@@ -2,11 +2,15 @@
|
||||
|
||||
import re
|
||||
|
||||
from core import filetools
|
||||
from core import jsontools
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core import videolibrarytools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import config, platformtools, logger
|
||||
|
||||
host = "http://www.clasicofilm.com/"
|
||||
# Configuracion del canal
|
||||
@@ -47,7 +51,6 @@ def mainlist(item):
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
@@ -55,13 +58,9 @@ def configuracion(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
data = httptools.downloadpage(host).data
|
||||
cx = scrapertools.find_single_match(data, "var cx = '([^']+)'")
|
||||
texto = texto.replace(" ", "%20")
|
||||
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www.google.com&start=0" % (
|
||||
cx, texto)
|
||||
|
||||
item.url = host + "search?q=%s" % texto
|
||||
try:
|
||||
return busqueda(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -104,7 +103,6 @@ def peliculas(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
|
||||
from core import jsontools
|
||||
data = jsontools.load(data)["feed"]
|
||||
|
||||
for entry in data["entry"]:
|
||||
@@ -133,7 +131,6 @@ def peliculas(item):
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
|
||||
totalresults = int(data["openSearch$totalResults"]["$t"])
|
||||
if actualpage + 20 < totalresults:
|
||||
@@ -146,48 +143,22 @@ def peliculas(item):
|
||||
def busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color2
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
from core import jsontools
|
||||
data = jsontools.load(data)
|
||||
|
||||
for entry in data["results"]:
|
||||
try:
|
||||
title = entry["richSnippet"]["metatags"]["ogTitle"]
|
||||
url = entry["richSnippet"]["metatags"]["ogUrl"]
|
||||
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
|
||||
except:
|
||||
continue
|
||||
|
||||
try:
|
||||
title_split = re.split(r"\s*\((\d)", title, 1)
|
||||
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
|
||||
fulltitle = title_split[0]
|
||||
except:
|
||||
fulltitle = title
|
||||
year = ""
|
||||
if not "DVD" in title and not "HDTV" in title and not "HD-" in title:
|
||||
continue
|
||||
infolabels = {'year': year}
|
||||
new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle,
|
||||
url=url, thumbnail=thumbnail, infoLabels=infolabels,
|
||||
contentTitle=fulltitle, contentType="movie")
|
||||
itemlist.append(new_item)
|
||||
|
||||
try:
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
|
||||
totalresults = int(data["cursor"]["resultCount"])
|
||||
if actualpage + 20 <= totalresults:
|
||||
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
|
||||
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next))
|
||||
|
||||
patron = """post-title entry-titl.*?href='([^']+)'"""
|
||||
patron += """>([^<]+).*?"""
|
||||
patron += """src="([^"]+)"""
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]{4})\)")
|
||||
ctitle = scrapedtitle.split("(")[0].strip()
|
||||
itemlist.append(item.clone(action = "findvideos",
|
||||
contentTitle = ctitle,
|
||||
infoLabels = {"year" : year},
|
||||
thumbnail = scrapedthumbnail,
|
||||
title = scrapedtitle,
|
||||
url = scrapedurl
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -197,9 +168,10 @@ def generos(item):
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<b>([^<]+)</b><br/>\s*<script src="([^"]+)"'
|
||||
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedtitle, scrapedurl in matches:
|
||||
scrapedurl = scrapedurl.replace("&","&")
|
||||
scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \
|
||||
.replace("recentpostslist", "finddatepost")
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
|
||||
@@ -210,13 +182,13 @@ def generos(item):
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
from core import servertools
|
||||
|
||||
if item.infoLabels["tmdb_id"]:
|
||||
tmdb.set_infoLabels_item(item, __modo_grafico__)
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
|
||||
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
|
||||
if "goo.gl/" in iframe:
|
||||
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
itemlist = servertools.find_video_items(item, data)
|
||||
@@ -226,13 +198,11 @@ def findvideos(item):
|
||||
title = "Añadir película a la videoteca"
|
||||
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
|
||||
try:
|
||||
from core import filetools
|
||||
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
|
||||
files = filetools.walk(movie_path)
|
||||
for dirpath, dirname, filename in files:
|
||||
for f in filename:
|
||||
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
|
||||
from core import videolibrarytools
|
||||
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
|
||||
canales = it.library_urls.keys()
|
||||
canales.sort()
|
||||
|
||||
@@ -21,6 +21,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1223,3 +1223,25 @@ def browser(url):
|
||||
response = r.read()
|
||||
|
||||
return response
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = 'http://cuelgame.net/?category=4'
|
||||
|
||||
itemlist = scraper(item)
|
||||
|
||||
if itemlist[-1].action == "Página siguiente >>":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -43,6 +43,14 @@
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -42,12 +42,12 @@ def mainlist(item):
|
||||
fanart="http://i.imgur.com/ggFFR8o.png"))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search"))
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
def setting_channel(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
@@ -108,13 +108,15 @@ def busqueda(item):
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host))
|
||||
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
|
||||
itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host))
|
||||
itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host))
|
||||
itemlist.append(item.clone(title="4K", action="entradas", url="%s/peliculas/4k" % host))
|
||||
itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host))
|
||||
|
||||
itemlist.append(item.clone(title="Latino", action="entradas",
|
||||
url="%s/peliculas/latino-peliculas" % host))
|
||||
itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host))
|
||||
@@ -125,7 +127,7 @@ def lista(item):
|
||||
|
||||
def lista_series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
|
||||
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
|
||||
@@ -254,7 +256,7 @@ def episodios(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def epienlaces(item):
|
||||
def episode_links(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color3
|
||||
@@ -286,7 +288,7 @@ def epienlaces(item):
|
||||
else:
|
||||
if servertools.is_server_enabled(scrapedserver):
|
||||
try:
|
||||
servers_module = __import__("servers." + scrapedserver)
|
||||
# servers_module = __import__("servers." + scrapedserver)
|
||||
lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
|
||||
extra=item.url))
|
||||
except:
|
||||
@@ -302,13 +304,14 @@ def epienlaces(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
if item.contentSeason!='':
|
||||
return epienlaces(item)
|
||||
if item.contentSeason != '':
|
||||
return episode_links(item)
|
||||
|
||||
itemlist = []
|
||||
item.text_color = color3
|
||||
|
||||
data = get_data(item.url)
|
||||
|
||||
item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
|
||||
year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
|
||||
if year:
|
||||
@@ -346,9 +349,9 @@ def findvideos(item):
|
||||
patron = 'make_links.*?,[\'"]([^"\']+)["\']'
|
||||
matches = scrapertools.find_multiple_matches(data_online, patron)
|
||||
for i, code in enumerate(matches):
|
||||
enlace = mostrar_enlaces(code)
|
||||
enlaces = servertools.findvideos(data=enlace[0])
|
||||
if enlaces and "peliculas.nu" not in enlaces:
|
||||
enlace = show_links(code)
|
||||
links = servertools.findvideos(data=enlace[0])
|
||||
if links and "peliculas.nu" not in links:
|
||||
if i == 0:
|
||||
extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
|
||||
size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
|
||||
@@ -362,8 +365,8 @@ def findvideos(item):
|
||||
new_item.title += " +INFO"
|
||||
itemlist.append(new_item)
|
||||
|
||||
title = " Ver vídeo en " + enlaces[0][2]
|
||||
itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
|
||||
title = " Ver vídeo en " + links[0][2]
|
||||
itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
|
||||
scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
|
||||
if scriptg:
|
||||
gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
|
||||
@@ -419,9 +422,9 @@ def findvideos(item):
|
||||
continue
|
||||
if servertools.is_server_enabled(scrapedserver):
|
||||
try:
|
||||
servers_module = __import__("servers." + scrapedserver)
|
||||
# servers_module = __import__("servers." + scrapedserver)
|
||||
# Saca numero de enlaces
|
||||
urls = mostrar_enlaces(scrapedurl)
|
||||
urls = show_links(scrapedurl)
|
||||
numero = str(len(urls))
|
||||
titulo = " %s - Nº enlaces: %s" % (titulo, numero)
|
||||
itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
|
||||
@@ -449,12 +452,13 @@ def play(item):
|
||||
headers=headers, follow_redirects=False).data
|
||||
|
||||
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
|
||||
if "enlacesmix" in url:
|
||||
|
||||
if "enlacesmix" in url or "enlacesws.com" in url:
|
||||
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
|
||||
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
|
||||
enlaces = servertools.findvideosbyserver(url, item.server)
|
||||
if enlaces:
|
||||
itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))
|
||||
links = servertools.findvideosbyserver(url, item.server)
|
||||
if links:
|
||||
itemlist.append(item.clone(action="play", server=links[0][2], url=links[0][1]))
|
||||
else:
|
||||
itemlist.append(item.clone())
|
||||
|
||||
@@ -465,13 +469,13 @@ def enlaces(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
urls = mostrar_enlaces(item.extra)
|
||||
urls = show_links(item.extra)
|
||||
numero = len(urls)
|
||||
for enlace in urls:
|
||||
enlaces = servertools.findvideos(data=enlace)
|
||||
if enlaces:
|
||||
for link in enlaces:
|
||||
if "/folder/" in enlace:
|
||||
for url in urls:
|
||||
links = servertools.findvideos(data=url)
|
||||
if links:
|
||||
for link in links:
|
||||
if "/folder/" in url:
|
||||
titulo = link[0]
|
||||
else:
|
||||
titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
|
||||
@@ -482,7 +486,7 @@ def enlaces(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def mostrar_enlaces(data):
|
||||
def show_links(data):
|
||||
import base64
|
||||
data = data.split(",")
|
||||
len_data = len(data)
|
||||
@@ -535,3 +539,38 @@ def get_data(url_orig, get_host=False):
|
||||
break
|
||||
|
||||
return response.data
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'/peliculas'
|
||||
|
||||
itemlist = entradas(item)
|
||||
if itemlist[-1].title == ">> Siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
item.url = host + '/series'
|
||||
|
||||
itemlist.extend(entradas(item))
|
||||
if itemlist[-1].title == ">> Siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
item.url = host + '/anime'
|
||||
|
||||
itemlist.extend(entradas(item))
|
||||
|
||||
if itemlist[-1].title == ">> Siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -35,6 +35,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -94,7 +94,7 @@ def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas', 'torrent']:
|
||||
item.url = "http://www.divxatope1.com/peliculas"
|
||||
|
||||
elif categoria == 'series':
|
||||
@@ -260,14 +260,16 @@ def findvideos(item):
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
|
||||
link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
|
||||
if link != "":
|
||||
link = "http://www.divxatope1.com/" + link
|
||||
logger.info("torrent=" + link)
|
||||
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
|
||||
if al_url_fa == "":
|
||||
al_url_fa = scrapertools.find_single_match(data,
|
||||
'location\.href.*?=.*?"http:\/\/divxatope1.com/(.*?)"')
|
||||
if al_url_fa != "":
|
||||
al_url_fa = "http://www.divxatope1.com/" + al_url_fa
|
||||
logger.info("torrent=" + al_url_fa)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
|
||||
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
|
||||
parentContent=item))
|
||||
|
||||
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
|
||||
|
||||
@@ -26,6 +26,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1020,3 +1020,26 @@ def ext_size(url):
|
||||
ext_v = ext_v + " -- No reproducible"
|
||||
size = ""
|
||||
return ext_v, size
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = 'http://www.divxtotal.com/peliculas/'
|
||||
item.contentType="movie"
|
||||
|
||||
itemlist = scraper(item)
|
||||
|
||||
if itemlist[-1].title == "[COLOR springgreen][B]Siguiente >>[/B][/COLOR]":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -55,6 +55,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -202,7 +202,7 @@ def newest(categoria):
|
||||
item = Item()
|
||||
# categoria='peliculas'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas', 'latino']:
|
||||
item.url = host +'peliculas/page/1'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'categoria/animacion/'
|
||||
@@ -222,9 +222,14 @@ def newest(categoria):
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
#itemlist = get_url(item)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)')
|
||||
data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data
|
||||
data_m3u8 = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data_m3u8)
|
||||
url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),')
|
||||
itemlist.append(item.clone(url=url_m3u8, action='play'))
|
||||
|
||||
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
|
||||
@@ -12,5 +12,15 @@
|
||||
"tvshow",
|
||||
"documentary",
|
||||
"vos"
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -105,3 +105,24 @@ def play(item):
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = 'http://www.elitetorrent.wesconference.net/categoria/2/peliculas/modo:mini'
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "Página siguiente >>":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -31,6 +31,14 @@
|
||||
"VOS"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
@@ -56,4 +64,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -457,7 +457,7 @@ def newest(categoria):
|
||||
item = Item()
|
||||
# categoria='peliculas'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'search?q=animación'
|
||||
|
||||
@@ -18,6 +18,22 @@
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -12,10 +12,11 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://gnula.mobi/'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", url="http://gnula.mobi/"))
|
||||
itemlist.append(item.clone(title="Novedades", action="peliculas", url=host))
|
||||
itemlist.append(item.clone(title="Castellano", action="peliculas",
|
||||
url="http://www.gnula.mobi/tag/espanol/"))
|
||||
itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/"))
|
||||
@@ -113,3 +114,25 @@ def findvideos(item):
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
elif categoria == 'castellano':
|
||||
item.url = host +'tag/espanol/'
|
||||
elif categoria == 'latino':
|
||||
item.url = host +'tag/latino/'
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -51,8 +51,6 @@ def generos(item):
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
|
||||
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
|
||||
@@ -61,25 +59,21 @@ def peliculas(item):
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
|
||||
language = []
|
||||
plot = scrapertools.htmlclean(resto).strip()
|
||||
logger.debug('plot: %s' % plot)
|
||||
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
|
||||
quality = scrapertools.find_single_match(plot, r'(?:\[.*?\].*?)\[(.*?)\]')
|
||||
for lang in languages:
|
||||
language.append(lang)
|
||||
logger.debug('languages: %s' % languages)
|
||||
title = scrapedtitle + " " + plot
|
||||
contentTitle = scrapedtitle
|
||||
url = item.url + scrapedurl
|
||||
if not scrapedurl.startswith("http"):
|
||||
scrapedurl = item.url + scrapedurl
|
||||
itemlist.append(Item(channel = item.channel,
|
||||
action = 'findvideos',
|
||||
title = title,
|
||||
url = url,
|
||||
url = scrapedurl,
|
||||
thumbnail = scrapedthumbnail,
|
||||
plot = plot,
|
||||
hasContentDetails = True,
|
||||
contentTitle = contentTitle,
|
||||
contentTitle = scrapedtitle,
|
||||
contentType = "movie",
|
||||
context = ["buscar_trailer"],
|
||||
language=language,
|
||||
quality=quality
|
||||
))
|
||||
@@ -89,13 +83,11 @@ def peliculas(item):
|
||||
def findvideos(item):
|
||||
logger.info("item=" + item.tostring())
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página para obtener el argumento
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
|
||||
item.plot = scrapertools.htmlclean(item.plot).strip()
|
||||
item.contentPlot = item.plot
|
||||
patron = 'Ver película online.*?>.*?>([^<]+)'
|
||||
patron = '<strong>Ver película online.*?>.*?>([^<]+)'
|
||||
scrapedopcion = scrapertools.find_single_match(data, patron)
|
||||
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
|
||||
bloque = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')
|
||||
|
||||
66
plugin.video.alfa/channels/pasateatorrent.json → plugin.video.alfa/channels/grantorrent.json
Executable file → Normal file
66
plugin.video.alfa/channels/pasateatorrent.json → plugin.video.alfa/channels/grantorrent.json
Executable file → Normal file
@@ -1,33 +1,33 @@
|
||||
{
|
||||
"id": "pasateatorrent",
|
||||
"name": "PasateaTorrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "http://imgur.com/iLeISt0.png",
|
||||
"banner": "pasateatorrent.png",
|
||||
"fanart": "http://imgur.com/uexmGEg.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra (TMDB)",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
{
|
||||
"id": "grantorrent",
|
||||
"name": "GranTorrent",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "grantorrent.jpg",
|
||||
"banner": "grantorrent.png",
|
||||
"fanart": "grantorrent.png",
|
||||
"categories": [
|
||||
"torrent",
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"label": "Buscar información extra (TMDB)",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
546
plugin.video.alfa/channels/pasateatorrent.py → plugin.video.alfa/channels/grantorrent.py
Executable file → Normal file
546
plugin.video.alfa/channels/pasateatorrent.py → plugin.video.alfa/channels/grantorrent.py
Executable file → Normal file
@@ -1,273 +1,273 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "https://pasateatorrent.com/"
|
||||
|
||||
dict_url_seasons = dict()
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'pasateatorrent')
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_movie = get_thumb("channels_movie.png")
|
||||
thumb_tvshow = get_thumb("channels_tvshow.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Peliculas", action="peliculas", thumbnail=thumb_movie))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Series", action="series", thumbnail=thumb_tvshow))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host))
|
||||
# itemlist.append(item.clone(channel=item.channel, title="Filtrar películas", action="listado", url=host))
|
||||
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host, media="película",
|
||||
thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host + "series/"))
|
||||
# itemlist.append(item.clone(channel=item.channel, title="Filtrar series", action="listado", url=host))
|
||||
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host + "series/",
|
||||
media="serie", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("texto:" + texto)
|
||||
texto = texto.replace(" ", "+")
|
||||
itemlist = []
|
||||
|
||||
try:
|
||||
url = "%s?s=%s&post_type=Buscar+%s" % (item.url, texto, item.media)
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
video_section = scrapertools.find_single_match(data, '<div class="contenedor_imagenes">(.*?)</div><center>')
|
||||
|
||||
pattern = '<a href="(?P<url>[^"]+)">.*?<img.*?src="(?P<thumb>[^"]+)".*?class="bloque_inferior">' \
|
||||
'(?P<title>.*?)</div>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(video_section)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
if item.media == "serie":
|
||||
action = "episodios"
|
||||
else:
|
||||
action = "findvideos"
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumb,
|
||||
contentTitle=title, contentType="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
video_section = scrapertools.find_single_match(data, '<div class="contenedor_imagenes">(.*?)</div><center>')
|
||||
# logger.debug("data %s \n\n" % video_section)
|
||||
|
||||
pattern = '<a href="(?P<url>[^"]+)">.*?<img.*?src="(?P<thumb>[^"]+)".*?class="bloque_superior">\s*' \
|
||||
'(?P<quality>.*?)\s*</div>.*?src="(?P<lang>[^"]+)".*?class="bloque_inferior">\s*(?P<title>.*?)\s*' \
|
||||
'</div>.*?class="div_inferior_date">\s*(?P<date>.*?)\s*</div>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(video_section)
|
||||
|
||||
for url, thumb, quality, lang, title, date in matches:
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = re.sub(r"\s{2}", " ", title)
|
||||
|
||||
if "/series" in item.url:
|
||||
if quality:
|
||||
title2 = "%s [%s]" % (title, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
|
||||
quality=quality, contentTitle=title, contentType="tvshow"))
|
||||
else:
|
||||
|
||||
if quality:
|
||||
title2 = "%s [%s]" % (title, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
|
||||
quality=quality, contentTitle=title, contentType="movie"))
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<div class="navigation">(.*?)</ul>')
|
||||
if pagination:
|
||||
next_page = scrapertools.find_single_match(pagination, 'class="active"><a.*?<a.*?href="([^"]+)')
|
||||
# logger.debug("next %s" % next_page)
|
||||
if next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=next_page,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
dict_data = dict()
|
||||
dict_data, item = get_episodes(item, dict_data)
|
||||
|
||||
for key in dict_data.keys():
|
||||
d = dict_data[key]
|
||||
quality = "[%s]" % "][".join(d["quality"])
|
||||
|
||||
d["s_e"] = re.sub(r"\(Contrase.*?\)\s*", "NO REPRODUCIBLE-RAR", d["s_e"])
|
||||
title = "%s [%s] %s" % (d["s_e"], d["lang"], quality)
|
||||
logger.debug("bla %s" % d["s_e"])
|
||||
|
||||
if "temporada" in d["s_e"].lower():
|
||||
regex = re.compile('temporada\s*', re.I)
|
||||
d["s_e"] = regex.sub("", d["s_e"])
|
||||
season = scrapertools.find_single_match(d["s_e"], "(\d+)")
|
||||
episode = 1
|
||||
else:
|
||||
season, episode = scrapertools.find_single_match(d["s_e"], "(\d+)×(\d+)")
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=title, thumbnail=item.thumbnail, url=d["url"],
|
||||
server="torrent", contentSeason=season, contentEpisodeNumber=episode,
|
||||
contentType="tvshow", fulltitle=item.title, quality=d["quality"], lang=d["lang"]))
|
||||
|
||||
# order list
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_episodes(item, dict_data):
|
||||
global dict_url_seasons
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
if item.contentTitle != "":
|
||||
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
|
||||
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
|
||||
# logger.debug("title es %s" % title)
|
||||
if title:
|
||||
item.contentTitle = title
|
||||
item.show = title
|
||||
if year:
|
||||
item.infoLabels['year'] = year
|
||||
|
||||
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<s_e>.*?)</td><td>(?P<quality>.*?)</td><td>' \
|
||||
'<a href="(?P<url>[^"]+)"'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(links_section)
|
||||
for lang, s_e, quality, url in matches:
|
||||
if s_e + lang not in dict_data:
|
||||
dict_data[s_e + lang] = {"url": [url], "lang": lang, "s_e": s_e,
|
||||
"quality": [quality]}
|
||||
else:
|
||||
if quality not in dict_data[s_e+lang]["quality"]:
|
||||
dict_data[s_e + lang]["quality"].append(quality)
|
||||
dict_data[s_e + lang]["url"].append(url)
|
||||
|
||||
url_to_check = scrapertools.find_single_match(links_section, '</table><p><a .*?href="([^"]+)">Temporada.*?</a>')
|
||||
# logger.debug("url es %s " % url_to_check)
|
||||
|
||||
# if url doesn't exist we add it into the dict
|
||||
if url_to_check not in dict_url_seasons:
|
||||
dict_url_seasons[url_to_check] = False
|
||||
|
||||
for key, value in dict_url_seasons.items():
|
||||
if not value:
|
||||
item.url = key
|
||||
dict_url_seasons[key] = True
|
||||
dict_data, item = get_episodes(item, dict_data)
|
||||
|
||||
# logger.debug("URL_LIST es %s " % dict_url_seasons)
|
||||
|
||||
return dict_data, item
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.contentType == "movie":
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
if item.contentTitle != "":
|
||||
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
|
||||
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
|
||||
logger.debug("title es %s" % title)
|
||||
if title:
|
||||
item.contentTitle = title
|
||||
item.show = title
|
||||
if year:
|
||||
item.infoLabels['year'] = year
|
||||
|
||||
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<quality>.*?)</td><td>(?P<size>.*?)</td><td>' \
|
||||
'<a href="(?P<url>[^"]+)"'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(links_section)
|
||||
|
||||
for lang, quality, size, url in matches:
|
||||
title = "[%s] [%s] (%s)" % (lang, quality, size)
|
||||
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
|
||||
fulltitle=item.title))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
else:
|
||||
for index, url in enumerate(item.url):
|
||||
title = "%sx%s [%s] [%s]" % (item.contentSeason, item.contentEpisodeNumber, item.lang, item.quality[index])
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
|
||||
quality=item.quality[index]))
|
||||
|
||||
return itemlist
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "https://grantorrent.com/"
|
||||
|
||||
dict_url_seasons = dict()
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'grantorrent')
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_movie = get_thumb("channels_movie.png")
|
||||
thumb_tvshow = get_thumb("channels_tvshow.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Peliculas", action="peliculas", thumbnail=thumb_movie))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Series", action="series", thumbnail=thumb_tvshow))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host))
|
||||
# itemlist.append(item.clone(channel=item.channel, title="Filtrar películas", action="listado", url=host))
|
||||
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host, media="película",
|
||||
thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
|
||||
thumb_search = get_thumb("search.png")
|
||||
|
||||
itemlist = list()
|
||||
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host + "series/"))
|
||||
# itemlist.append(item.clone(channel=item.channel, title="Filtrar series", action="listado", url=host))
|
||||
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host + "series/",
|
||||
media="serie", thumbnail=thumb_search))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info("texto:" + texto)
|
||||
texto = texto.replace(" ", "+")
|
||||
itemlist = []
|
||||
|
||||
try:
|
||||
url = "%s?s=%s" % (item.url, texto)
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
video_section = scrapertools.find_single_match(data, '<div class="contenedor-imagen">(.*?</div>)</div></div>')
|
||||
|
||||
pattern = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?class="bloque-inferior">' \
|
||||
'\s*(?P<title>.*?)\s*</div>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(video_section)
|
||||
|
||||
for url, thumb, title in matches:
|
||||
if item.media == "serie":
|
||||
action = "episodios"
|
||||
else:
|
||||
action = "findvideos"
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumb,
|
||||
contentTitle=title, contentType="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
video_section = scrapertools.find_single_match(data, '<br><div class="contenedor-home">(.*?</div>)</div></div>')
|
||||
# logger.debug("data %s \n\n" % video_section)
|
||||
|
||||
pattern = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?.*?class="bloque-superior">\s*' \
|
||||
'(?P<quality>.*?)\s*<div class="imagen-idioma">\s*<img src=".*?icono_(?P<lang>[^\.]+).*?<div class=' \
|
||||
'"bloque-inferior">\s*(?P<title>.*?)\s*</div><div class="bloque-date">\s*(?P<date>.*?)\s*</div>'
|
||||
|
||||
matches = re.compile(pattern, re.DOTALL).findall(video_section)
|
||||
|
||||
for url, thumb, quality, lang, title, date in matches:
|
||||
title = scrapertools.htmlclean(title)
|
||||
title = re.sub(r"\s{2}", " ", title)
|
||||
|
||||
if "/series" in item.url:
|
||||
if quality:
|
||||
title2 = "%s [%s]" % (title, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
|
||||
quality=quality, contentTitle=title, contentType="tvshow"))
|
||||
else:
|
||||
|
||||
if quality:
|
||||
title2 = "%s [%s]" % (title, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
|
||||
quality=quality, contentTitle=title, contentType="movie"))
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<div class="nav-links">(.*?)</ul>')
|
||||
if pagination:
|
||||
next_page = scrapertools.find_single_match(pagination, "class='page-numbers current'.*?<a.*?href='([^']+)'")
|
||||
# logger.debug("next %s" % next_page)
|
||||
if next_page:
|
||||
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=next_page,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
dict_data = dict()
|
||||
dict_data, item = get_episodes(item, dict_data)
|
||||
|
||||
for key in dict_data.keys():
|
||||
d = dict_data[key]
|
||||
quality = "[%s]" % "][".join(d["quality"])
|
||||
|
||||
d["s_e"] = re.sub(r"\(Contrase.*?\)\s*", "NO REPRODUCIBLE-RAR", d["s_e"])
|
||||
title = "%s [%s] %s" % (d["s_e"], d["lang"], quality)
|
||||
# logger.debug("%s" % d["s_e"])
|
||||
|
||||
if "temporada" in d["s_e"].lower():
|
||||
regex = re.compile('temporada\s*', re.I)
|
||||
d["s_e"] = regex.sub("", d["s_e"])
|
||||
season = scrapertools.find_single_match(d["s_e"], "(\d+)")
|
||||
episode = 1
|
||||
else:
|
||||
season, episode = scrapertools.find_single_match(d["s_e"], "(\d+)×(\d+)")
|
||||
|
||||
itemlist.append(item.clone(action="findvideos", title=title, thumbnail=item.thumbnail, url=d["url"],
|
||||
server="torrent", contentSeason=season, contentEpisodeNumber=episode,
|
||||
contentType="tvshow", fulltitle=item.title, quality=d["quality"], lang=d["lang"]))
|
||||
|
||||
# order list
|
||||
if len(itemlist) > 1:
|
||||
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_episodes(item, dict_data):
|
||||
global dict_url_seasons
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
if item.contentTitle != "":
|
||||
title = scrapertools.find_single_match(data, '<h3 class="bold">.*?original:\s*(.*?)[.]</h3>')
|
||||
year = scrapertools.find_single_match(data, '<h3 class="bold">\s*Estreno:\s*(\d+)[.]</h')
|
||||
# logger.debug("title es %s" % title)
|
||||
if title:
|
||||
item.contentTitle = title
|
||||
item.show = title
|
||||
if year:
|
||||
item.infoLabels['year'] = year
|
||||
|
||||
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
|
||||
# logger.debug("data %s \n\n" % links_section)
|
||||
|
||||
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<s_e>.*?)</td><td>(?P<quality>.*?)</td><td>' \
|
||||
'<a class="link" href="(?P<url>[^"]+)"'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(links_section)
|
||||
for lang, s_e, quality, url in matches:
|
||||
if s_e + lang not in dict_data:
|
||||
dict_data[s_e + lang] = {"url": [url], "lang": lang, "s_e": s_e,
|
||||
"quality": [quality]}
|
||||
else:
|
||||
if quality not in dict_data[s_e+lang]["quality"]:
|
||||
dict_data[s_e + lang]["quality"].append(quality)
|
||||
dict_data[s_e + lang]["url"].append(url)
|
||||
|
||||
url_to_check = scrapertools.find_single_match(links_section, '</table><p><a.*?href="([^"]+)".*?>\s*Temporada.*?</a>')
|
||||
# logger.debug("url es %s " % url_to_check)
|
||||
|
||||
# if url doesn't exist we add it into the dict
|
||||
if url_to_check and url_to_check not in dict_url_seasons:
|
||||
dict_url_seasons[url_to_check] = False
|
||||
|
||||
for key, value in dict_url_seasons.items():
|
||||
if not value:
|
||||
item.url = key
|
||||
dict_url_seasons[key] = True
|
||||
dict_data, item = get_episodes(item, dict_data)
|
||||
|
||||
# logger.debug("URL_LIST es %s " % dict_url_seasons)
|
||||
|
||||
return dict_data, item
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.contentType == "movie":
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
if item.contentTitle != "":
|
||||
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
|
||||
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
|
||||
logger.debug("title es %s" % title)
|
||||
if title:
|
||||
item.contentTitle = title
|
||||
item.show = title
|
||||
if year:
|
||||
item.infoLabels['year'] = year
|
||||
|
||||
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
|
||||
# logger.debug("data %s \n\n" % data)
|
||||
|
||||
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<quality>.*?)</td><td>(?P<size>.*?)</td><td>' \
|
||||
'<a class="link" href="(?P<url>[^"]+)"'
|
||||
matches = re.compile(pattern, re.DOTALL).findall(links_section)
|
||||
|
||||
for lang, quality, size, url in matches:
|
||||
title = "[%s] [%s] (%s)" % (lang, quality, size)
|
||||
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
|
||||
fulltitle=item.title))
|
||||
|
||||
try:
|
||||
from core import tmdb
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
except:
|
||||
pass
|
||||
|
||||
else:
|
||||
for index, url in enumerate(item.url):
|
||||
title = "%sx%s [%s] [%s]" % (item.contentSeason, item.contentEpisodeNumber, item.lang, item.quality[index])
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
|
||||
quality=item.quality[index]))
|
||||
|
||||
return itemlist
|
||||
@@ -310,7 +310,8 @@ def fichas(item):
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
|
||||
|
||||
thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
|
||||
#thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
|
||||
thumbnail = scrapedthumbnail
|
||||
language = ''
|
||||
title = scrapedtitle.strip()
|
||||
show = title
|
||||
@@ -692,12 +693,10 @@ def findvideos(item):
|
||||
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
|
||||
if account:
|
||||
url += "###" + id + ";" + type
|
||||
|
||||
it2.append(
|
||||
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
|
||||
contentTitle=item.title, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
|
||||
|
||||
contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
|
||||
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
|
||||
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
|
||||
for item in it2:
|
||||
|
||||
@@ -177,7 +177,8 @@ class main(xbmcgui.WindowDialog):
|
||||
self.infoLabels["originaltitle"] = otmdb.result.get("original_title",
|
||||
otmdb.result.get("original_name", ""))
|
||||
self.trailers = otmdb.get_videos()
|
||||
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
|
||||
if otmdb.result.get("runtime", 0):
|
||||
self.infoLabels["duration"] = int(otmdb.result.get("runtime", 0))
|
||||
else:
|
||||
self.trailers = []
|
||||
|
||||
|
||||
@@ -6,21 +6,18 @@ from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://www.javtasty.com"
|
||||
host = "https://www.javwhores.com"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/videos"))
|
||||
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/videos?o=tr"))
|
||||
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/videos?o=mv"))
|
||||
itemlist.append(item.clone(action="lista", title="Ordenados por duración", url=host + "/videos?o=lg"))
|
||||
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories"))
|
||||
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
|
||||
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
|
||||
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
|
||||
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
|
||||
itemlist.append(item.clone(title="Buscar...", action="search"))
|
||||
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -33,7 +30,7 @@ def configuracion(item):
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
item.url = "%s/search?search_query=%s&search_type=videos" % (host, texto)
|
||||
item.url = "%s/search/%s/" % (host, texto)
|
||||
item.extra = texto
|
||||
try:
|
||||
return lista(item)
|
||||
@@ -48,83 +45,66 @@ def search(item, texto):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
action = "play"
|
||||
if config.get_setting("menu_info", "javtasty"):
|
||||
action = "menu_info"
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="well wellov well-sm".*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"(.*?)<div class="duration">(?:.*?</i>|)\s*([^<]+)<'
|
||||
patron = 'div class="video-item.*?href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)" '
|
||||
patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
scrapedtitle = scrapedtitle.strip()
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle)
|
||||
|
||||
if '>HD<' in quality:
|
||||
scrapedtitle += " [COLOR red][HD][/COLOR]"
|
||||
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="prevnext">')
|
||||
next_page = scrapertools.find_single_match(data, 'next"><a href="([^"]+)')
|
||||
if next_page:
|
||||
next_page = next_page.replace("&", "&")
|
||||
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
|
||||
|
||||
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=host + next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="col-sm-4.*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"'
|
||||
patron = '(?s)<a class="item" href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)" '
|
||||
patron += 'alt="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
videourl = scrapertools.find_single_match(data, "var video_sd\s*=\s*'([^']+)'")
|
||||
videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 [directo]', videourl])
|
||||
videourl = scrapertools.find_single_match(data, "var video_hd\s*=\s*'([^']+)'")
|
||||
videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'")
|
||||
if videourl:
|
||||
itemlist.append(['.mp4 HD [directo]', videourl])
|
||||
|
||||
if item.extra == "play_menu":
|
||||
return itemlist, data
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_info(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
video_urls, data = play(item.clone(extra="play_menu"))
|
||||
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
|
||||
|
||||
bloque = scrapertools.find_single_match(data, '<div class="carousel-inner"(.*?)<div class="container">')
|
||||
matches = scrapertools.find_multiple_matches(bloque, 'src="([^"]+)"')
|
||||
for i, img in enumerate(matches):
|
||||
@@ -132,5 +112,4 @@ def menu_info(item):
|
||||
continue
|
||||
title = "Imagen %s" % (str(i))
|
||||
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -48,6 +48,22 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -265,14 +265,25 @@ def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.page = 0
|
||||
try:
|
||||
if categoria == "terror":
|
||||
item.url = host +"/listado/terror/"
|
||||
item.action = "updated"
|
||||
item.page = 0
|
||||
itemlist = updated(item)
|
||||
itemlist = updated(item)
|
||||
elif categoria == 'castellano':
|
||||
item.url = host + "/estrenos/es/"
|
||||
item.action = "entradas"
|
||||
|
||||
if itemlist[-1].action == "updated":
|
||||
elif categoria == 'latino':
|
||||
item.url = host + "/estrenos/la/"
|
||||
item.action = "entradas"
|
||||
|
||||
if categoria != 'terror':
|
||||
itemlist = entradas(item)
|
||||
|
||||
|
||||
if itemlist[-1].action == item.action:
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
|
||||
@@ -20,6 +20,14 @@
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -12,7 +12,7 @@ from core.item import Item
|
||||
from core.tmdb import Tmdb
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://www.mejortorrent.com"
|
||||
host = "https://mejortorrent.website"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -29,19 +29,19 @@ def mainlist(item):
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-peliculas.html", thumbnail=thumb_pelis))
|
||||
url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-peliculas-hd-alta-definicion.html",
|
||||
url= host + "/torrents-de-peliculas-hd-alta-definicion.html",
|
||||
thumbnail=thumb_pelis_hd))
|
||||
itemlist.append(Item(channel=item.channel, title="Series", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series))
|
||||
url= host + "/torrents-de-series.html", thumbnail=thumb_series))
|
||||
itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-series-hd-alta-definicion.html",
|
||||
url= host + "/torrents-de-series-hd-alta-definicion.html",
|
||||
thumbnail=thumb_series_hd))
|
||||
itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico",
|
||||
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series_az))
|
||||
url= host + "/torrents-de-series.html", thumbnail=thumb_series_az))
|
||||
itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
|
||||
url="http://www.mejortorrent.com/torrents-de-documentales.html", thumbnail=thumb_docus))
|
||||
url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar))
|
||||
|
||||
return itemlist
|
||||
@@ -55,10 +55,10 @@ def listalfabetico(item):
|
||||
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
|
||||
'U', 'V', 'W', 'X', 'Y', 'Z']:
|
||||
itemlist.append(Item(channel=item.channel, action="getlist", title=letra,
|
||||
url="http://www.mejortorrent.com/series-letra-" + letra.lower() + ".html"))
|
||||
url= host + "/series-letra-" + letra.lower() + ".html"))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="getlist", title="Todas",
|
||||
url="http://www.mejortorrent.com/series-letra..html"))
|
||||
url= host + "/series-letra..html"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -67,7 +67,7 @@ def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
item.url = "http://www.mejortorrent.com/secciones.php?sec=buscador&valor=%s" % (texto)
|
||||
item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto)
|
||||
try:
|
||||
return buscador(item)
|
||||
|
||||
@@ -81,30 +81,12 @@ def search(item, texto):
|
||||
def buscador(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# pelis
|
||||
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
|
||||
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
|
||||
#
|
||||
# series
|
||||
#
|
||||
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
|
||||
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
|
||||
#
|
||||
# docs
|
||||
#
|
||||
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
|
||||
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
|
||||
|
||||
# busca series
|
||||
patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>"
|
||||
patron += ".*?<span style='color:gray;'>([^']+)</span>"
|
||||
patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedinfo in matches:
|
||||
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode(
|
||||
@@ -119,10 +101,7 @@ def buscador(item):
|
||||
# busca pelis
|
||||
patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>"
|
||||
patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -135,10 +114,7 @@ def buscador(item):
|
||||
patron += "<font Color='darkblue'>(.*?)</font>.*?"
|
||||
patron += "<td align='right' width='20%'>(.*?)</td>"
|
||||
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedinfo in matches:
|
||||
title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8')
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -154,23 +130,7 @@ def buscador(item):
|
||||
def getlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# pelis
|
||||
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
|
||||
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
|
||||
#
|
||||
# series
|
||||
#
|
||||
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
|
||||
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
|
||||
#
|
||||
# docs
|
||||
#
|
||||
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
|
||||
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
|
||||
|
||||
if item.url.find("peliculas") > -1:
|
||||
patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+'
|
||||
patron += '<img src="([^"]+)"[^<]+</a>'
|
||||
@@ -202,27 +162,18 @@ def getlist(item):
|
||||
action = "episodios"
|
||||
folder = True
|
||||
extra = "docus"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl, scrapedthumbnail in matches:
|
||||
title = scrapertools.get_match(scrapedurl, patron_enlace)
|
||||
title = title.replace("-", " ")
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail))
|
||||
thumbnail = host + urllib.quote(scrapedthumbnail)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
folder=folder, extra=extra))
|
||||
|
||||
matches = re.compile(patron_title, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
# Cambia el título sacado de la URL por un título con más información.
|
||||
# esta implementación asume que va a encontrar las mismas coincidencias
|
||||
# que en el bucle anterior, lo cual técnicamente es erróneo, pero que
|
||||
# funciona mientras no cambien el formato de la página
|
||||
cnt = 0
|
||||
for scrapedtitle, notused, scrapedinfo in matches:
|
||||
title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
|
||||
@@ -244,7 +195,6 @@ def getlist(item):
|
||||
# Extrae el paginador
|
||||
patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>"
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
if len(matches) > 0:
|
||||
scrapedurl = urlparse.urljoin(item.url, matches[0])
|
||||
@@ -267,18 +217,11 @@ def episodios(item):
|
||||
|
||||
item.thumbnail = scrapertools.find_single_match(data,
|
||||
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
|
||||
item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail)
|
||||
item.thumbnail = host + + urllib.quote(item.thumbnail)
|
||||
|
||||
# <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'>
|
||||
data = scrapertools.get_match(data,
|
||||
"<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>")
|
||||
'''
|
||||
<td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td>
|
||||
<td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td>
|
||||
<td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'>
|
||||
<input type='checkbox' name='episodios[1]' value='18741'>
|
||||
'''
|
||||
|
||||
if item.extra == "series":
|
||||
patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+"
|
||||
else:
|
||||
@@ -289,7 +232,6 @@ def episodios(item):
|
||||
patron += "<input type='checkbox' name='([^']+)' value='([^']+)'"
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip()
|
||||
logger.debug('tmdb_title=' + tmdb_title)
|
||||
@@ -306,7 +248,7 @@ def episodios(item):
|
||||
|
||||
title = scrapedtitle + " (" + fecha + ")"
|
||||
|
||||
url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios"
|
||||
url = host + "/secciones.php?sec=descargas&ap=contar_varios"
|
||||
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
|
||||
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
|
||||
logger.debug("post=" + post)
|
||||
@@ -370,20 +312,15 @@ def show_movie_info(item):
|
||||
|
||||
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedurl in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
|
||||
|
||||
torrent_data = httptools.downloadpage(url).data
|
||||
logger.debug("torrent_data=" + torrent_data)
|
||||
# <a href='/uploads/torrents/peliculas/los-juegos-del-hambre-brrip.torrent'>
|
||||
link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>")
|
||||
link = urlparse.urljoin(url, link)
|
||||
|
||||
logger.debug("link=" + link)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
|
||||
|
||||
@@ -402,24 +339,35 @@ def play(item):
|
||||
data = httptools.downloadpage(item.url, post=item.extra).data
|
||||
logger.debug("data=" + data)
|
||||
|
||||
# series
|
||||
#
|
||||
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-01_02.torrent"
|
||||
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-03.torrent"
|
||||
#
|
||||
# docus
|
||||
#
|
||||
# <a href="http://www.mejortorrent.com/uploads/torrents/documentales/En_Suenyos_De_Todos_DVDrip.torrent">El sueo de todos. </a>
|
||||
|
||||
params = dict(urlparse.parse_qsl(item.extra))
|
||||
|
||||
patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"'
|
||||
|
||||
link = scrapertools.get_match(data, patron)
|
||||
|
||||
logger.info("link=" + link)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
|
||||
thumbnail=item.thumbnail, plot=item.plot, folder=False))
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = host + "/torrents-de-peliculas.html"
|
||||
|
||||
itemlist = getlist(item)
|
||||
if itemlist[-1].title == "Pagina siguiente >>":
|
||||
itemlist.pop()
|
||||
item.url = host + "/torrents-de-series.html"
|
||||
itemlist.extend(getlist(item))
|
||||
if itemlist[-1].title == "Pagina siguiente >>":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -19,6 +19,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1555,3 +1555,28 @@ def busqueda(item):
|
||||
|
||||
from channels import search
|
||||
return search.do_search(new_item, cat)
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = 'http://www.miltorrents.com'
|
||||
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].title == "[COLOR khaki]siguiente[/COLOR]":
|
||||
itemlist.pop()
|
||||
item.url = 'http://www.miltorrents.com/series'
|
||||
itemlist.extend(peliculas(item))
|
||||
if itemlist[-1].title == "[COLOR khaki]siguiente[/COLOR]":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -18,6 +18,14 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
@@ -35,4 +43,4 @@
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -382,7 +382,7 @@ def newest(categoria):
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + 'page/1/?s'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
|
||||
@@ -20,6 +20,14 @@
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -442,3 +442,29 @@ def search(item, texto):
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
item.extra = 'pelilist'
|
||||
if categoria == 'torrent':
|
||||
item.url = host+'peliculas/'
|
||||
|
||||
itemlist = listado(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = host+'series/'
|
||||
itemlist.extend(listado(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -15,6 +15,8 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
from core import jsontools
|
||||
from channels import side_menu
|
||||
|
||||
THUMBNAILS = {'0': 'posters', '1': 'banners', '2': 'squares'}
|
||||
|
||||
@@ -27,11 +29,16 @@ perfil = [['0xFF0B7B92', '0xFF89FDFB', '0xFFACD5D4'],
|
||||
['0xFFA5DEE5', '0xFFE0F9B5', '0xFFFEFDCA'],
|
||||
['0xFFF23557', '0xFF22B2DA', '0xFFF0D43A']]
|
||||
|
||||
#color1, color2, color3 = ["white", "white", "white"]
|
||||
color1, color2, color3 = perfil[__perfil__]
|
||||
|
||||
list_newest = []
|
||||
list_newest_tourl = []
|
||||
channels_id_name = {}
|
||||
|
||||
menu_cache_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_cache_data.json')
|
||||
menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_settings_data.json')
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
@@ -74,6 +81,26 @@ def mainlist(item):
|
||||
set_category_context(new_item)
|
||||
itemlist.append(new_item)
|
||||
|
||||
# if list_canales['Castellano']:
|
||||
thumbnail = get_thumb("channels_spanish.png")
|
||||
new_item = Item(channel=item.channel, action="novedades", extra="castellano", title="Castellano",
|
||||
thumbnail=thumbnail)
|
||||
set_category_context(new_item)
|
||||
itemlist.append(new_item)
|
||||
|
||||
# if list_canales['Latino']:
|
||||
thumbnail = get_thumb("channels_latino.png")
|
||||
new_item = Item(channel=item.channel, action="novedades", extra="latino", title="Latino",
|
||||
thumbnail=thumbnail)
|
||||
set_category_context(new_item)
|
||||
itemlist.append(new_item)
|
||||
|
||||
# if list_canales['Torrent']:
|
||||
thumbnail = get_thumb("channels_torrent.png")
|
||||
new_item = Item(channel=item.channel, action="novedades", extra="torrent", title="Torrent", thumbnail=thumbnail)
|
||||
set_category_context(new_item)
|
||||
itemlist.append(new_item)
|
||||
|
||||
#if list_canales['documentales']:
|
||||
thumbnail = get_thumb("channels_documentary.png")
|
||||
new_item = Item(channel=item.channel, action="novedades", extra="documentales", title="Documentales",
|
||||
@@ -95,7 +122,8 @@ def set_category_context(item):
|
||||
def get_channels_list():
|
||||
logger.info()
|
||||
|
||||
list_canales = {'peliculas': [], 'terror': [], 'infantiles': [], 'series': [], 'anime': [], 'documentales': []}
|
||||
list_canales = {'peliculas': [], 'terror': [], 'infantiles': [], 'series': [], 'anime': [],
|
||||
'castellano': [], 'latino':[], 'torrent':[], 'documentales': []}
|
||||
any_active = False
|
||||
# Rellenar listas de canales disponibles
|
||||
channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json')
|
||||
@@ -129,6 +157,54 @@ def get_channels_list():
|
||||
|
||||
return list_canales, any_active
|
||||
|
||||
def set_cache(item):
|
||||
logger.info()
|
||||
item.mode = 'set_cache'
|
||||
t = Thread(target=novedades, args=[item])
|
||||
t.start()
|
||||
#t.join()
|
||||
|
||||
def get_from_cache(item):
|
||||
logger.info()
|
||||
itemlist=[]
|
||||
cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached')
|
||||
first=item.last
|
||||
last = first+40
|
||||
#if last >=len(cache_node[item.extra]):
|
||||
# last = len(cache_node[item.extra])
|
||||
|
||||
for cached_item in cache_node[item.extra][first:last]:
|
||||
new_item= Item()
|
||||
new_item = new_item.fromurl(cached_item)
|
||||
itemlist.append(new_item)
|
||||
if item.mode == 'silent':
|
||||
set_cache(item)
|
||||
if last >= len(cache_node[item.extra]):
|
||||
item.mode='finish'
|
||||
itemlist = add_menu_items(item, itemlist)
|
||||
else:
|
||||
item.mode='get_cached'
|
||||
item.last =last
|
||||
itemlist = add_menu_items(item, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def add_menu_items(item, itemlist):
|
||||
logger.info()
|
||||
|
||||
menu_icon = get_thumb('menu.png')
|
||||
menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon, title='Menu')
|
||||
itemlist.insert(0, menu)
|
||||
if item.mode != 'finish':
|
||||
if item.mode == 'get_cached':
|
||||
last=item.last
|
||||
else:
|
||||
last = len(itemlist)
|
||||
refresh_icon = get_thumb('more.png')
|
||||
refresh = item.clone(thumbnail=refresh_icon, mode='get_cached',title='Mas', last=last)
|
||||
itemlist.insert(len(itemlist), refresh)
|
||||
|
||||
return itemlist
|
||||
|
||||
def novedades(item):
|
||||
logger.info()
|
||||
@@ -138,6 +214,14 @@ def novedades(item):
|
||||
list_newest = []
|
||||
start_time = time.time()
|
||||
|
||||
mode = item.mode
|
||||
if mode == '':
|
||||
mode = 'normal'
|
||||
|
||||
if mode=='get_cached':
|
||||
if os.path.exists(menu_cache_path):
|
||||
return get_from_cache(item)
|
||||
|
||||
multithread = config.get_setting("multithread", "news")
|
||||
logger.info("multithread= " + str(multithread))
|
||||
|
||||
@@ -149,8 +233,22 @@ def novedades(item):
|
||||
if config.set_setting("multithread", True, "news"):
|
||||
multithread = True
|
||||
|
||||
progreso = platformtools.dialog_progress(item.category, "Buscando canales...")
|
||||
if mode == 'normal':
|
||||
progreso = platformtools.dialog_progress(item.category, "Buscando canales...")
|
||||
|
||||
list_canales, any_active = get_channels_list()
|
||||
|
||||
if mode=='silent' and any_active and len(list_canales[item.extra]) > 0:
|
||||
side_menu.set_menu_settings(item)
|
||||
aux_list=[]
|
||||
for canal in list_canales[item.extra]:
|
||||
if len(aux_list)<2:
|
||||
aux_list.append(canal)
|
||||
list_canales[item.extra]=aux_list
|
||||
|
||||
if mode == 'set_cache':
|
||||
list_canales[item.extra] = list_canales[item.extra][2:]
|
||||
|
||||
if any_active and len(list_canales[item.extra])>0:
|
||||
import math
|
||||
# fix float porque la division se hace mal en python 2.x
|
||||
@@ -170,12 +268,14 @@ def novedades(item):
|
||||
t = Thread(target=get_newest, args=[channel_id, item.extra], name=channel_title)
|
||||
t.start()
|
||||
threads.append(t)
|
||||
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
|
||||
if mode == 'normal':
|
||||
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
|
||||
|
||||
# Modo single Thread
|
||||
else:
|
||||
logger.info("Obteniendo novedades de channel_id=" + channel_id)
|
||||
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
|
||||
if mode == 'normal':
|
||||
logger.info("Obteniendo novedades de channel_id=" + channel_id)
|
||||
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
|
||||
get_newest(channel_id, item.extra)
|
||||
|
||||
# Modo Multi Thread: esperar q todos los hilos terminen
|
||||
@@ -187,25 +287,29 @@ def novedades(item):
|
||||
percentage = int(math.ceil(index * t))
|
||||
|
||||
list_pendent_names = [a.getName() for a in pendent]
|
||||
mensaje = "Buscando en %s" % (", ".join(list_pendent_names))
|
||||
progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)),
|
||||
if mode == 'normal':
|
||||
mensaje = "Buscando en %s" % (", ".join(list_pendent_names))
|
||||
progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)),
|
||||
mensaje)
|
||||
logger.debug(mensaje)
|
||||
logger.debug(mensaje)
|
||||
|
||||
if progreso.iscanceled():
|
||||
logger.info("Busqueda de novedades cancelada")
|
||||
break
|
||||
if progreso.iscanceled():
|
||||
logger.info("Busqueda de novedades cancelada")
|
||||
break
|
||||
|
||||
time.sleep(0.5)
|
||||
pendent = [a for a in threads if a.isAlive()]
|
||||
|
||||
mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time() - start_time)
|
||||
progreso.update(100, mensaje, " ", " ")
|
||||
logger.info(mensaje)
|
||||
start_time = time.time()
|
||||
# logger.debug(start_time)
|
||||
if mode == 'normal':
|
||||
mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time() - start_time)
|
||||
progreso.update(100, mensaje, " ", " ")
|
||||
logger.info(mensaje)
|
||||
start_time = time.time()
|
||||
# logger.debug(start_time)
|
||||
|
||||
result_mode = config.get_setting("result_mode", "news")
|
||||
if mode != 'normal':
|
||||
result_mode=0
|
||||
|
||||
if result_mode == 0: # Agrupados por contenido
|
||||
ret = group_by_content(list_newest)
|
||||
elif result_mode == 1: # Agrupados por canales
|
||||
@@ -216,13 +320,19 @@ def novedades(item):
|
||||
while time.time() - start_time < 2:
|
||||
# mostrar cuadro de progreso con el tiempo empleado durante almenos 2 segundos
|
||||
time.sleep(0.5)
|
||||
|
||||
progreso.close()
|
||||
return ret
|
||||
if mode == 'normal':
|
||||
progreso.close()
|
||||
if mode == 'silent':
|
||||
set_cache(item)
|
||||
item.mode = 'set_cache'
|
||||
ret = add_menu_items(item, ret)
|
||||
if mode != 'set_cache':
|
||||
return ret
|
||||
else:
|
||||
no_channels = platformtools.dialog_ok('Novedades - %s'%item.extra, 'No se ha definido ningun canal para la '
|
||||
'busqueda.','Utilice el menu contextual '
|
||||
'para agregar al menos uno')
|
||||
if mode != 'set_cache':
|
||||
no_channels = platformtools.dialog_ok('Novedades - %s'%item.extra, 'No se ha definido ningun canal para la '
|
||||
'busqueda.','Utilice el menu contextual '
|
||||
'para agregar al menos uno')
|
||||
return
|
||||
|
||||
|
||||
@@ -230,6 +340,7 @@ def get_newest(channel_id, categoria):
|
||||
logger.info("channel_id=" + channel_id + ", categoria=" + categoria)
|
||||
|
||||
global list_newest
|
||||
global list_newest_tourl
|
||||
|
||||
# Solicitamos las novedades de la categoria (item.extra) buscada en el canal channel
|
||||
# Si no existen novedades para esa categoria en el canal devuelve una lista vacia
|
||||
@@ -250,11 +361,22 @@ def get_newest(channel_id, categoria):
|
||||
logger.info("running channel " + modulo.__name__ + " " + modulo.__file__)
|
||||
list_result = modulo.newest(categoria)
|
||||
logger.info("canal= %s %d resultados" % (channel_id, len(list_result)))
|
||||
|
||||
exist=False
|
||||
if os.path.exists(menu_cache_path):
|
||||
cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached')
|
||||
exist=True
|
||||
else:
|
||||
cache_node = {}
|
||||
#logger.debug('cache node: %s' % cache_node)
|
||||
for item in list_result:
|
||||
# logger.info("item="+item.tostring())
|
||||
item.channel = channel_id
|
||||
list_newest.append(item)
|
||||
list_newest_tourl.append(item.tourl())
|
||||
|
||||
cache_node[categoria] = list_newest_tourl
|
||||
|
||||
jsontools.update_node(cache_node, 'menu_cache_data.json', "cached")
|
||||
|
||||
except:
|
||||
logger.error("No se pueden recuperar novedades de: " + channel_id)
|
||||
@@ -419,6 +541,16 @@ def menu_opciones(item):
|
||||
title=" - Episodios de anime",
|
||||
thumbnail=get_thumb("channels_anime.png"),
|
||||
folder=False))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="setting_channel", extra="castellano", title=" - Castellano",
|
||||
thumbnail=get_thumb("channels_documentary.png"), folder=False))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="latino", title=" - Latino",
|
||||
thumbnail=get_thumb("channels_documentary.png"), folder=False))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="Torrent", title=" - Torrent",
|
||||
thumbnail=get_thumb("channels_documentary.png"), folder=False))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="documentales",
|
||||
title=" - Documentales",
|
||||
thumbnail=get_thumb("channels_documentary.png"),
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
{
|
||||
"id": "ohlatino",
|
||||
"name": "OH!Latino",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat"],
|
||||
"thumbnail": "http://cinemiltonero.com/wp-content/uploads/2017/08/logo-Latino0.png",
|
||||
"banner": "https://s27.postimg.org/bz0fh8jpf/oh-pelis-banner.png",
|
||||
"categories": [
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- Channel OH!Latino -*-
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import re
|
||||
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = 'http://www.ohpeliculas.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(host).data
|
||||
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>(\d+)<\/i>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
mcantidad = 0
|
||||
for scrapedurl, scrapedtitle, cantidad in matches:
|
||||
mcantidad += int(cantidad)
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Peliculas",
|
||||
action='movies_menu'
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Buscar",
|
||||
action="search",
|
||||
url=host+'?s=',
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def movies_menu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Todas",
|
||||
action="list_all",
|
||||
url=host
|
||||
))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Generos",
|
||||
action="section",
|
||||
url=host, extra='genres'))
|
||||
|
||||
itemlist.append(
|
||||
item.clone(title="Por año",
|
||||
action="section",
|
||||
url=host, extra='byyear'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def get_source(url):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = '<div id=mt-.*? class=item>.*?<a href=(.*?)><div class=image>.*?'
|
||||
patron +='<img src=(.*?) alt=.*?span class=tt>(.*?)<.*?ttx>(.*?)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
action = 'findvideos'
|
||||
thumbnail = scrapedthumbnail
|
||||
contentTitle = scrapedtitle
|
||||
plot = scrapedplot
|
||||
title = contentTitle
|
||||
|
||||
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
|
||||
filtro_list = {"poster_path": filtro_thumb}
|
||||
filtro_list = filtro_list.items()
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action=action,
|
||||
title=title,
|
||||
url=url,
|
||||
plot=plot,
|
||||
thumbnail=thumbnail,
|
||||
contentTitle=contentTitle,
|
||||
infoLabels={'filtro': filtro_list}
|
||||
))
|
||||
#tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
actual_page_url = item.url
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'alignleft><a href=(.*?) ><\/a><\/div><div class=nav-next alignright>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="list_all",
|
||||
title='Siguiente >>>',
|
||||
url=next_page,
|
||||
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def section(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
duplicated =[]
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.extra == 'genres':
|
||||
patron = '<li class="cat-item cat-item-.*?><a href="(.*?)" >(.*?)<\/a>'
|
||||
elif item.extra == 'byyear':
|
||||
patron = '<a href="([^"]+)">(\d{4})<\/a><\/li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle
|
||||
url = scrapedurl
|
||||
if url not in duplicated:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action='list_all',
|
||||
title=title,
|
||||
url=url
|
||||
))
|
||||
duplicated.append(url)
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel = item.channel
|
||||
videoitem.contentTitle = item.fulltitle
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
if videoitem.server != 'youtube':
|
||||
videoitem.title = item.title + ' (%s)' % videoitem.server
|
||||
else:
|
||||
videoitem.title = 'Trailer en %s' % videoitem.server
|
||||
videoitem.action = 'play'
|
||||
videoitem.server = ""
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host + '/release/2017/'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/genero/infantil/'
|
||||
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == '>> Página siguiente':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
@@ -10,13 +10,45 @@
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,29 @@ def mainlist(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = HOST
|
||||
elif categoria == 'infantiles':
|
||||
item.url = HOST + '/genero/animacion.html'
|
||||
elif categoria == 'terror':
|
||||
item.url = HOST + '/genero/terror.html'
|
||||
itemlist = peliculas(item)
|
||||
if ">> Página siguiente" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
|
||||
@@ -18,6 +18,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -10,74 +11,29 @@ from platformcode import logger
|
||||
from platformcode import config
|
||||
from core import tmdb
|
||||
|
||||
try:
|
||||
import xbmc
|
||||
import xbmcgui
|
||||
except:
|
||||
pass
|
||||
import unicodedata
|
||||
|
||||
ACTION_SHOW_FULLSCREEN = 36
|
||||
ACTION_GESTURE_SWIPE_LEFT = 511
|
||||
ACTION_SELECT_ITEM = 7
|
||||
ACTION_PREVIOUS_MENU = 10
|
||||
ACTION_MOVE_LEFT = 1
|
||||
ACTION_MOVE_RIGHT = 2
|
||||
ACTION_MOVE_DOWN = 4
|
||||
ACTION_MOVE_UP = 3
|
||||
OPTION_PANEL = 6
|
||||
OPTIONS_OK = 5
|
||||
|
||||
host = "http://www.peliculasdk.com/"
|
||||
|
||||
|
||||
def bbcode_kodi2html(text):
|
||||
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
|
||||
import re
|
||||
text = re.sub(r'\[COLOR\s([^\]]+)\]',
|
||||
r'<span style="color: \1">',
|
||||
text)
|
||||
text = text.replace('[/COLOR]', '</span>')
|
||||
text = text.replace('[CR]', '<br>')
|
||||
text = text.replace('[B]', '<b>')
|
||||
text = text.replace('[/B]', '</b>')
|
||||
text = text.replace('"color: yellow"', '"color: gold"')
|
||||
text = text.replace('"color: white"', '"color: auto"')
|
||||
|
||||
return text
|
||||
|
||||
host = "http://www.peliculasdk.com"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
title = "Estrenos"
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/ver/estrenos",
|
||||
Item(channel=item.channel, title="[COLOR orange]Estrenos[/COLOR]", action="peliculas", url= host + "/ver/estrenos",
|
||||
fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg",
|
||||
thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg"))
|
||||
title = "PelisHd"
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-720/",
|
||||
Item(channel=item.channel, title="[COLOR orange]PelisHd[/COLOR]", action="peliculas", url= host + "/calidad/HD-720/",
|
||||
fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg",
|
||||
thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg"))
|
||||
title = "Pelis HD-Rip"
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-320",
|
||||
Item(channel=item.channel, title="[COLOR orange]Pelis HD-Rip[/COLOR]", action="peliculas", url= host + "/calidad/HD-320",
|
||||
fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg",
|
||||
thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg"))
|
||||
title = "Pelis Audio español"
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/idioma/Espanol/",
|
||||
Item(channel=item.channel, title="[COLOR orange]Pelis Audio español[/COLOR]", action="peliculas", url= host + "/idioma/Espanol/",
|
||||
fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg",
|
||||
thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg"))
|
||||
title = "Buscar..."
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, action="search", url="http://www.peliculasdk.com/calidad/HD-720/",
|
||||
Item(channel=item.channel, title="[COLOR orange]Buscar...[/COLOR]", action="search", url= host + "/calidad/HD-720/",
|
||||
fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg",
|
||||
thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg"))
|
||||
|
||||
@@ -88,7 +44,7 @@ def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
|
||||
item.url = "http://www.peliculasdk.com/index.php?s=%s&x=0&y=0" % (texto)
|
||||
item.url = host + "/index.php?s=%s&x=0&y=0" % (texto)
|
||||
|
||||
try:
|
||||
return buscador(item)
|
||||
@@ -103,11 +59,8 @@ def search(item, texto):
|
||||
def buscador(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<div class="karatula".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<div class="tisearch"><a href="([^"]+)">'
|
||||
@@ -115,57 +68,38 @@ def buscador(item):
|
||||
patron += 'Audio:(.*?)</a>.*?'
|
||||
patron += 'Género:(.*?)</a>.*?'
|
||||
patron += 'Calidad:(.*?),'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedlenguaje, scrapedgenero, scrapedcalidad in matches:
|
||||
try:
|
||||
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
|
||||
except:
|
||||
year = ""
|
||||
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "",
|
||||
scrapedtitle).strip()
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
|
||||
scrapedcalidad = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedcalidad).strip()
|
||||
scrapedlenguaje = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedlenguaje).strip()
|
||||
|
||||
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
|
||||
scrapedcalidad = scrapedcalidad.replace(scrapedcalidad,
|
||||
bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]"))
|
||||
scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje,
|
||||
bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]"))
|
||||
|
||||
scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
|
||||
scrapedtitle = scrapedtitle.replace(scrapedtitle,
|
||||
bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]"))
|
||||
extra = year + "|" + title_fan
|
||||
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
|
||||
thumbnail=scrapedthumbnail, extra=extra,
|
||||
scrapedcalidad = "[COLOR orange]" + scrapedcalidad + "[/COLOR]"
|
||||
scrapedlenguaje = "[COLOR orange]" + scrapedlenguaje + "[/COLOR]"
|
||||
title = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
|
||||
title = "[COLOR white]" + title + "[/COLOR]"
|
||||
scrapedtitle = scrapedtitle.split("(")[0].strip()
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action="findvideos",
|
||||
thumbnail=scrapedthumbnail, contentTitle = scrapedtitle, infoLabels={'year':year},
|
||||
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True))
|
||||
|
||||
tmdb.set_infoLabels(itemlist, True)
|
||||
try:
|
||||
next_page = scrapertools.get_match(data,
|
||||
'<span class="current">.*?<a href="(.*?)".*?>Siguiente »</a></div>')
|
||||
|
||||
title = "siguiente>>"
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
|
||||
itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=next_page,
|
||||
itemlist.append(Item(channel=item.channel, action="buscador", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
|
||||
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
|
||||
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;", "", data)
|
||||
|
||||
patron = 'style="position:relative;"> '
|
||||
patron += '<a href="([^"]+)" '
|
||||
patron += 'title="([^<]+)">'
|
||||
@@ -173,363 +107,64 @@ def peliculas(item):
|
||||
patron += 'Audio:(.*?)</br>.*?'
|
||||
patron += 'Calidad:(.*?)</br>.*?'
|
||||
patron += 'Género:.*?tag">(.*?)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches:
|
||||
|
||||
try:
|
||||
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
|
||||
except:
|
||||
year = ""
|
||||
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitle)
|
||||
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
|
||||
scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip()
|
||||
scrapedcalidad = re.sub(r"<a href.*?>|</a>", "", scrapedcalidad).strip()
|
||||
scrapedlenguaje = re.sub(r"<a href.*?>|</a>", "", scrapedlenguaje).strip()
|
||||
scrapedlenguaje = scrapedlenguaje.split(',')
|
||||
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
|
||||
|
||||
scrapedtitle = scrapedtitle
|
||||
|
||||
extra = year + "|" + title_fan
|
||||
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
|
||||
thumbnail=scrapedthumbnail, extra=extra,
|
||||
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
|
||||
language=scrapedlenguaje, quality=scrapedcalidad, contentTitle= scrapedtitle, infoLabels={
|
||||
'year':year})
|
||||
#TODO Dividir los resultados antes
|
||||
#if year:
|
||||
# tmdb.set_infoLabels_item(new_item)
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title=scrapedtitle,
|
||||
url=scrapedurl,
|
||||
action="findvideos",
|
||||
thumbnail=scrapedthumbnail,
|
||||
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
|
||||
language=scrapedlenguaje,
|
||||
quality=scrapedcalidad,
|
||||
contentTitle = scrapedtitle,
|
||||
infoLabels={'year':year}
|
||||
))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
## Paginación
|
||||
|
||||
next_page = scrapertools.get_match(data, '<span class="current">.*?<a href="(.*?)".*?>Siguiente »</a></div>')
|
||||
|
||||
title = "siguiente>>"
|
||||
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page,
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
|
||||
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
|
||||
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def fanart(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = item.url
|
||||
data = scrapertools.cachePage(url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
title_fan = item.extra.split("|")[1]
|
||||
title = re.sub(r'Serie Completa|Temporada.*?Completa', '', title_fan)
|
||||
fulltitle = title
|
||||
title = title.replace(' ', '%20')
|
||||
title = ''.join(
|
||||
(c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn'))
|
||||
try:
|
||||
sinopsis = scrapertools.find_single_match(data, '<span class="clms">Sinopsis: <\/span>(.*?)<\/div>')
|
||||
except:
|
||||
sinopsis = ""
|
||||
year = item.extra.split("|")[0]
|
||||
|
||||
if not "series" in item.url:
|
||||
|
||||
# filmafinity
|
||||
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
|
||||
title, year)
|
||||
data = scrapertools.downloadpage(url)
|
||||
|
||||
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
|
||||
if url_filmaf:
|
||||
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
|
||||
data = scrapertools.downloadpage(url_filmaf)
|
||||
else:
|
||||
|
||||
try:
|
||||
url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year)
|
||||
data = browser(url_bing)
|
||||
data = re.sub(r'\n|\r|\t|\s{2}| ', '', data)
|
||||
|
||||
if "myaddrproxy.php" in data:
|
||||
subdata_bing = scrapertools.get_match(data,
|
||||
'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
|
||||
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
|
||||
else:
|
||||
subdata_bing = scrapertools.get_match(data,
|
||||
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"')
|
||||
|
||||
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
|
||||
|
||||
if not "http" in url_filma:
|
||||
data = scrapertools.cachePage("http://" + url_filma)
|
||||
else:
|
||||
data = scrapertools.cachePage(url_filma)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
except:
|
||||
pass
|
||||
|
||||
if sinopsis == " ":
|
||||
try:
|
||||
sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
|
||||
sinopsis = sinopsis.replace("<br><br />", "\n")
|
||||
sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">')
|
||||
except:
|
||||
rating_filma = "Sin puntuacion"
|
||||
|
||||
critica = ""
|
||||
patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
|
||||
matches_reviews = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
if matches_reviews:
|
||||
for review, autor, valoracion in matches_reviews:
|
||||
review = dhe(scrapertools.htmlclean(review))
|
||||
review += "\n" + autor + "[CR]"
|
||||
review = re.sub(r'Puntuac.*?\)', '', review)
|
||||
if "positiva" in valoracion:
|
||||
critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
|
||||
elif "neutral" in valoracion:
|
||||
critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
|
||||
else:
|
||||
critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
|
||||
else:
|
||||
critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]"
|
||||
print "ozuu"
|
||||
print critica
|
||||
|
||||
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false"
|
||||
data = scrapertools.cachePage(url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) == 0:
|
||||
|
||||
title = re.sub(r":.*|\(.*?\)", "", title)
|
||||
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&language=es&include_adult=false"
|
||||
|
||||
data = scrapertools.cachePage(url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if len(matches) == 0:
|
||||
extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica
|
||||
show = item.fanart + "|" + "" + "|" + sinopsis
|
||||
posterdb = item.thumbnail
|
||||
fanart_info = item.fanart
|
||||
fanart_3 = ""
|
||||
fanart_2 = item.fanart
|
||||
category = item.thumbnail
|
||||
id_scraper = ""
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos",
|
||||
thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show,
|
||||
category=category, library=item.library, fulltitle=fulltitle, folder=True))
|
||||
|
||||
for id, fan in matches:
|
||||
|
||||
fan = re.sub(r'\\|"', '', fan)
|
||||
|
||||
try:
|
||||
rating = scrapertools.find_single_match(data, '"vote_average":(.*?),')
|
||||
except:
|
||||
rating = "Sin puntuación"
|
||||
|
||||
id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica
|
||||
try:
|
||||
posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"')
|
||||
posterdb = "https://image.tmdb.org/t/p/original" + posterdb
|
||||
except:
|
||||
posterdb = item.thumbnail
|
||||
|
||||
if "null" in fan:
|
||||
fanart = item.fanart
|
||||
else:
|
||||
fanart = "https://image.tmdb.org/t/p/original" + fan
|
||||
item.extra = fanart
|
||||
|
||||
url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f"
|
||||
data = scrapertools.cachePage(url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) == 0:
|
||||
patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if len(matches) == 0:
|
||||
fanart_info = item.extra
|
||||
fanart_3 = ""
|
||||
fanart_2 = item.extra
|
||||
for fanart_info, fanart_3, fanart_2 in matches:
|
||||
fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info
|
||||
fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3
|
||||
fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2
|
||||
if fanart == item.fanart:
|
||||
fanart = fanart_info
|
||||
# clearart, fanart_2 y logo
|
||||
url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
|
||||
data = scrapertools.cachePage(url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '"hdmovielogo":.*?"url": "([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if '"moviedisc"' in data:
|
||||
disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"')
|
||||
if '"movieposter"' in data:
|
||||
poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"')
|
||||
if '"moviethumb"' in data:
|
||||
thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"')
|
||||
if '"moviebanner"' in data:
|
||||
banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"')
|
||||
|
||||
if len(matches) == 0:
|
||||
extra = posterdb
|
||||
# "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png"
|
||||
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
|
||||
category = posterdb
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent",
|
||||
thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category,
|
||||
library=item.library, fulltitle=fulltitle, folder=True))
|
||||
for logo in matches:
|
||||
if '"hdmovieclearart"' in data:
|
||||
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
|
||||
if '"moviebackground"' in data:
|
||||
|
||||
extra = clear
|
||||
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
|
||||
if '"moviedisc"' in data:
|
||||
category = disc
|
||||
else:
|
||||
category = clear
|
||||
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
|
||||
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
|
||||
show=show, category=category, library=item.library, fulltitle=fulltitle,
|
||||
folder=True))
|
||||
else:
|
||||
extra = clear
|
||||
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
|
||||
if '"moviedisc"' in data:
|
||||
category = disc
|
||||
else:
|
||||
category = clear
|
||||
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
|
||||
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
|
||||
show=show, category=category, library=item.library, fulltitle=fulltitle,
|
||||
folder=True))
|
||||
|
||||
if '"moviebackground"' in data:
|
||||
|
||||
if '"hdmovieclearart"' in data:
|
||||
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
|
||||
extra = clear
|
||||
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
|
||||
if '"moviedisc"' in data:
|
||||
category = disc
|
||||
else:
|
||||
category = clear
|
||||
else:
|
||||
extra = logo
|
||||
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
|
||||
if '"moviedisc"' in data:
|
||||
category = disc
|
||||
else:
|
||||
category = logo
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
|
||||
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
|
||||
show=show, category=category, library=item.library, fulltitle=fulltitle,
|
||||
folder=True))
|
||||
|
||||
if not '"hdmovieclearart"' in data and not '"moviebackground"' in data:
|
||||
extra = logo
|
||||
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
|
||||
if '"moviedisc"' in data:
|
||||
category = disc
|
||||
else:
|
||||
category = item.extra
|
||||
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
|
||||
thumbnail=logo, fanart=item.extra, extra=extra, show=show,
|
||||
category=category, library=item.library, fulltitle=fulltitle, folder=True))
|
||||
|
||||
title_info = "Info"
|
||||
|
||||
if posterdb == item.thumbnail:
|
||||
if '"movieposter"' in data:
|
||||
thumbnail = poster
|
||||
else:
|
||||
thumbnail = item.thumbnail
|
||||
else:
|
||||
thumbnail = posterdb
|
||||
|
||||
id = id_scraper
|
||||
|
||||
extra = extra + "|" + id + "|" + title.encode('utf8')
|
||||
|
||||
title_info = title_info.replace(title_info, bbcode_kodi2html("[COLOR skyblue]" + title_info + "[/COLOR]"))
|
||||
itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=thumbnail,
|
||||
fanart=fanart_info, extra=extra, category=category, show=show, folder=False))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"<!--.*?-->", "", data)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">')
|
||||
patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>'
|
||||
check = re.compile(patron, re.DOTALL).findall(bloque_tab)
|
||||
|
||||
servers_data_list = []
|
||||
|
||||
patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
if len(matches) == 0:
|
||||
patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for check_tab, server, id in matches:
|
||||
scrapedplot = scrapertools.get_match(data, '<span class="clms">(.*?)</div></div>')
|
||||
plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot)
|
||||
scrapedplot = scrapedplot.replace(scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]"))
|
||||
|
||||
for plot in plotformat:
|
||||
scrapedplot = scrapedplot.replace(plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]"))
|
||||
scrapedplot = scrapedplot.replace("</span>", "[CR]")
|
||||
scrapedplot = scrapedplot.replace(":", "")
|
||||
if check_tab in str(check):
|
||||
idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'")
|
||||
|
||||
servers_data_list.append([server, id, idioma, calidad])
|
||||
|
||||
url = "http://www.peliculasdk.com/Js/videod.js"
|
||||
data = scrapertools.cachePage(url)
|
||||
url = host + "/Js/videod.js"
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '')
|
||||
|
||||
patron = 'function (\w+)\(id\).*?'
|
||||
patron += 'data-src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for server, url in matches:
|
||||
|
||||
for enlace, id, idioma, calidad in servers_data_list:
|
||||
|
||||
if server == enlace:
|
||||
|
||||
video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url))
|
||||
video_url = re.sub(r"'\+codigo\+'", "", video_url)
|
||||
video_url = video_url.replace('embed//', 'embed/')
|
||||
@@ -541,21 +176,13 @@ def findvideos(item):
|
||||
video_url = scrapertools.get_match(str(url), "u'([^']+)'")
|
||||
except:
|
||||
continue
|
||||
|
||||
servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/')
|
||||
servertitle = servertitle.replace("embed.", "")
|
||||
servertitle = servertitle.replace("player.", "")
|
||||
servertitle = servertitle.replace("api.video.", "")
|
||||
servertitle = re.sub(r"hqq.tv|hqq.watch", "netutv", servertitle)
|
||||
servertitle = servertitle.replace("anonymouse.org", "netu")
|
||||
title = servertitle
|
||||
logger.debug('servertitle: %s' % servertitle)
|
||||
server = servertools.get_server_name(servertitle)
|
||||
logger.debug('server: %s'%server)
|
||||
title = "Ver en: %s [" + idioma + "][" + calidad + "]"
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, url=video_url, action="play",
|
||||
item.clone(title=title, url=video_url, action="play",
|
||||
thumbnail=item.category,
|
||||
plot=scrapedplot, fanart=item.show, server=server, language=idioma, quality=calidad))
|
||||
language=idioma, quality=calidad))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
|
||||
'title': item.fulltitle}
|
||||
@@ -563,218 +190,29 @@ def findvideos(item):
|
||||
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
|
||||
text_color="0xFFff6666",
|
||||
thumbnail='http://imgur.com/0gyYvuC.png'))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
data = scrapertools.cache_page(item.url)
|
||||
|
||||
listavideos = servertools.findvideos(data)
|
||||
|
||||
for video in listavideos:
|
||||
videotitle = scrapertools.unescape(video[0])
|
||||
url = item.url
|
||||
server = video[2]
|
||||
|
||||
# xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot )
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", server=server, title="Trailer - " + videotitle, url=url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title,
|
||||
fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False))
|
||||
|
||||
return itemlist
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
|
||||
def info(item):
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url = item.url
|
||||
id = item.extra
|
||||
if "serie" in item.url:
|
||||
try:
|
||||
rating_tmdba_tvdb = item.extra.split("|")[6]
|
||||
if item.extra.split("|")[6] == "":
|
||||
rating_tmdba_tvdb = "Sin puntuación"
|
||||
except:
|
||||
rating_tmdba_tvdb = "Sin puntuación"
|
||||
else:
|
||||
rating_tmdba_tvdb = item.extra.split("|")[3]
|
||||
rating_filma = item.extra.split("|")[4]
|
||||
print "eztoquee"
|
||||
print rating_filma
|
||||
print rating_tmdba_tvdb
|
||||
|
||||
filma = "http://s6.postimg.org/6yhe5fgy9/filma.png"
|
||||
|
||||
item = Item()
|
||||
try:
|
||||
if "serie" in item.url:
|
||||
title = item.extra.split("|")[8]
|
||||
|
||||
else:
|
||||
title = item.extra.split("|")[6]
|
||||
title = title.replace("%20", " ")
|
||||
title = "[COLOR yellow][B]" + title + "[/B][/COLOR]"
|
||||
if categoria == 'castellano':
|
||||
item.url = host + "idioma/Espanol/"
|
||||
item.action = "peliculas"
|
||||
itemlist = peliculas(item)
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
title = item.title
|
||||
|
||||
try:
|
||||
if "." in rating_tmdba_tvdb:
|
||||
check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).')
|
||||
else:
|
||||
check_rat_tmdba = rating_tmdba_tvdb
|
||||
if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8:
|
||||
rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
|
||||
elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10:
|
||||
rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
|
||||
else:
|
||||
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
|
||||
print "lolaymaue"
|
||||
except:
|
||||
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
|
||||
if "10." in rating:
|
||||
rating = re.sub(r'10\.\d+', '10', rating)
|
||||
try:
|
||||
check_rat_filma = scrapertools.get_match(rating_filma, '(\d)')
|
||||
print "paco"
|
||||
print check_rat_filma
|
||||
if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8:
|
||||
print "dios"
|
||||
print check_rat_filma
|
||||
rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]"
|
||||
elif int(check_rat_filma) >= 8:
|
||||
|
||||
print check_rat_filma
|
||||
rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]"
|
||||
else:
|
||||
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
|
||||
print "rojo??"
|
||||
print check_rat_filma
|
||||
except:
|
||||
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
|
||||
|
||||
if not "serie" in item.url:
|
||||
url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
|
||||
1] + "?api_key=2e2160006592024ba87ccdf78c28f49f&append_to_response=credits&language=es"
|
||||
data_plot = scrapertools.cache_page(url_plot)
|
||||
plot = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",')
|
||||
tagline = scrapertools.find_single_match(data_plot, '"tagline":(".*?")')
|
||||
if plot == "":
|
||||
plot = item.show.split("|")[2]
|
||||
|
||||
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
|
||||
plot = re.sub(r"\\", "", plot)
|
||||
|
||||
else:
|
||||
plot = item.show.split("|")[2]
|
||||
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
|
||||
plot = re.sub(r"\\", "", plot)
|
||||
|
||||
if item.extra.split("|")[7] != "":
|
||||
tagline = item.extra.split("|")[7]
|
||||
# tagline= re.sub(r',','.',tagline)
|
||||
else:
|
||||
tagline = ""
|
||||
|
||||
if "serie" in item.url:
|
||||
check2 = "serie"
|
||||
icon = "http://s6.postimg.org/hzcjag975/tvdb.png"
|
||||
foto = item.show.split("|")[1]
|
||||
if item.extra.split("|")[5] != "":
|
||||
critica = item.extra.split("|")[5]
|
||||
else:
|
||||
critica = "Esta serie no tiene críticas..."
|
||||
if not ".png" in item.extra.split("|")[0]:
|
||||
photo = "http://imgur.com/6uXGkrz.png"
|
||||
else:
|
||||
photo = item.extra.split("|")[0].replace(" ", "%20")
|
||||
try:
|
||||
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
|
||||
except:
|
||||
tagline = ""
|
||||
|
||||
else:
|
||||
critica = item.extra.split("|")[5]
|
||||
if "%20" in critica:
|
||||
critica = "No hay críticas"
|
||||
icon = "http://imgur.com/SenkyxF.png"
|
||||
photo = item.extra.split("|")[0].replace(" ", "%20")
|
||||
foto = item.show.split("|")[1]
|
||||
try:
|
||||
if tagline == "\"\"":
|
||||
tagline = " "
|
||||
except:
|
||||
tagline = " "
|
||||
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
|
||||
check2 = "pelicula"
|
||||
|
||||
# Tambien te puede interesar
|
||||
peliculas = []
|
||||
if "serie" in item.url:
|
||||
|
||||
url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
|
||||
5] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
|
||||
data_tpi = scrapertools.cachePage(url_tpi)
|
||||
tpi = scrapertools.find_multiple_matches(data_tpi,
|
||||
'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),')
|
||||
|
||||
else:
|
||||
url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
|
||||
1] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
|
||||
data_tpi = scrapertools.cachePage(url_tpi)
|
||||
tpi = scrapertools.find_multiple_matches(data_tpi,
|
||||
'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),')
|
||||
|
||||
for idp, peli, thumb in tpi:
|
||||
|
||||
thumb = re.sub(r'"|}', '', thumb)
|
||||
if "null" in thumb:
|
||||
thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png"
|
||||
else:
|
||||
thumb = "https://image.tmdb.org/t/p/original" + thumb
|
||||
peliculas.append([idp, peli, thumb])
|
||||
|
||||
check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow")
|
||||
infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline,
|
||||
'rating': rating}
|
||||
item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma,
|
||||
critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/kdfWEJ6.png")
|
||||
from channels import infoplus
|
||||
infoplus.start(item_info, peliculas)
|
||||
|
||||
|
||||
def browser(url):
|
||||
import mechanize
|
||||
|
||||
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
|
||||
br = mechanize.Browser()
|
||||
# Browser options
|
||||
br.set_handle_equiv(False)
|
||||
br.set_handle_gzip(True)
|
||||
br.set_handle_redirect(True)
|
||||
br.set_handle_referer(False)
|
||||
br.set_handle_robots(False)
|
||||
# Follows refresh 0 but not hangs on refresh > 0
|
||||
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
|
||||
# Want debugging messages?
|
||||
# br.set_debug_http(True)
|
||||
# br.set_debug_redirects(True)
|
||||
# br.set_debug_responses(True)
|
||||
|
||||
# User-Agent (this is cheating, ok?)
|
||||
br.addheaders = [('User-agent',
|
||||
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
|
||||
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
|
||||
# Open some site, let's pick a random one, the first that pops in mind
|
||||
r = br.open(url)
|
||||
response = r.read()
|
||||
print response
|
||||
if "img,divreturn" in response:
|
||||
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
|
||||
print "prooooxy"
|
||||
response = r.read()
|
||||
|
||||
return response
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
return itemlist
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -10,13 +10,45 @@
|
||||
"movie"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,28 +10,47 @@ from core.item import Item
|
||||
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
host = "http://www.peliculasmx.net"
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Últimas añadidas", action="peliculas", url="http://www.peliculasmx.net/"))
|
||||
Item(channel=item.channel, title="Últimas añadidas", action="peliculas", url=host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Últimas por género", action="generos", url="http://www.peliculasmx.net/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.peliculasmx.net/"))
|
||||
Item(channel=item.channel, title="Últimas por género", action="generos", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host))
|
||||
return itemlist
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/category/animacion/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/category/terror/'
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug(data)
|
||||
# <li class="cat-item cat-item-3"><a href="http://peliculasmx.net/category/accion/" >Accion</a> <span>246</span>
|
||||
patron = '<li class="cat-item cat-item-.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '>([^<]+).*?'
|
||||
@@ -92,7 +111,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
try:
|
||||
# Series
|
||||
item.url = "http://www.peliculasmx.net/?s=%s" % texto
|
||||
item.url = host + "/?s=%s" % texto
|
||||
itemlist.extend(peliculas(item))
|
||||
itemlist = sorted(itemlist, key=lambda Item: Item.title)
|
||||
|
||||
|
||||
@@ -35,6 +35,22 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
|
||||
@@ -80,6 +80,12 @@ def newest(categoria):
|
||||
item.url = host
|
||||
elif categoria == "terror":
|
||||
item.url = host+"terror/"
|
||||
elif categoria == 'castellano':
|
||||
item.url = host + "?s=Español"
|
||||
elif categoria == 'latino':
|
||||
item.url = host + "?s=Latino"
|
||||
|
||||
|
||||
item.from_newest = True
|
||||
item.action = "entradas"
|
||||
itemlist = entradas(item)
|
||||
|
||||
@@ -9,5 +9,63 @@
|
||||
"categories": [
|
||||
"direct",
|
||||
"movie"
|
||||
]
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Películas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -148,3 +148,41 @@ def findvideos(item):
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
|
||||
elif categoria == 'documentales':
|
||||
item.url = host + "genero/documental/"
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "genero/animacion-e-infantil/"
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + "genero/terror/"
|
||||
|
||||
elif categoria == 'castellano':
|
||||
item.url = host + "idioma/castellano/"
|
||||
|
||||
elif categoria == 'latino':
|
||||
item.url = host + "idioma/latino/"
|
||||
|
||||
itemlist = peliculas(item)
|
||||
|
||||
if itemlist[-1].action == "peliculas":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -10,5 +10,63 @@
|
||||
"movie",
|
||||
"direct",
|
||||
"VOS"
|
||||
]
|
||||
],
|
||||
"settings":[
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Películas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_documentales",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Documentales",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -164,3 +164,41 @@ def play(item):
|
||||
logger.info()
|
||||
item.thumbnail = item.extra
|
||||
return [item]
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
|
||||
elif categoria == 'documentales':
|
||||
item.url = host + "/genero/documental/"
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + "/genero/animacion/"
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + "/genero/terror/"
|
||||
|
||||
elif categoria == 'castellano':
|
||||
item.url = host + "/idioma/espanol-castellano/"
|
||||
|
||||
elif categoria == 'latino':
|
||||
item.url = host + "/idioma/espanol-latino/"
|
||||
|
||||
itemlist = agregadas(item)
|
||||
|
||||
if itemlist[-1].action == "agregadas":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -27,6 +27,14 @@
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
|
||||
@@ -74,16 +74,15 @@ def lista(item):
|
||||
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
if item.seccion != 'actor':
|
||||
patron = '<li class=item-serie.*?><a href=(.*?) title=(.*?)><img src=(.*?) alt=><span '
|
||||
patron += 'class=s-title><strong>.*?<\/strong><p>(.*?)<\/p><\/span><\/a><\/li>'
|
||||
patron = '(?s)<li class="item-serie.*?href="([^"]+).*?title="([^"]+).*?data-src="([^"]+).*?<span '
|
||||
patron += 'class="s-title">.*?<p>([^<]+)'
|
||||
else:
|
||||
patron = '<li><a href=(\/pelicula\/.*?)><figure><img src=(.*?) alt=><\/figure><p class=title>(.*?)<\/p><p '
|
||||
patron += 'class=year>(.*?)<\/p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
patron = '(?s)<li>.*?<a href="(/pelicula/[^"]+)".*?<figure>.*?data-src="([^"]+)".*?p class="title">([^<]+).*?'
|
||||
patron += 'year">([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
|
||||
url = host + scrapedurl
|
||||
if item.seccion != 'actor':
|
||||
@@ -109,11 +108,11 @@ def lista(item):
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
actual_page = scrapertools.find_single_match(data, '<a class=active item href=.*?>(.*?)<\/a>')
|
||||
actual_page = scrapertools.find_single_match(data, '<a class="active item" href=".*?">(.*?)<\/a>')
|
||||
if actual_page:
|
||||
next_page_num = int(actual_page) + 1
|
||||
next_page = scrapertools.find_single_match(data,
|
||||
'<li><a class= item href=(.*?)\?page=.*?&limit=.*?>Siguiente')
|
||||
'<li><a class=" item" href="(.*?)\?page=.*?&limit=.*?">Siguiente')
|
||||
next_page_url = host + next_page + '?page=%s' % next_page_num
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
@@ -129,15 +128,14 @@ def seccion(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
if item.seccion == 'generos':
|
||||
patron = '<a href=(\/peliculas\/[\D].*?\/) title=Películas de .*?>(.*?)<\/a>'
|
||||
patron = '<a href="(\/peliculas\/[\D].*?\/)" title="Películas de .*?>(.*?)<\/a>'
|
||||
elif item.seccion == 'anios':
|
||||
patron = '<li class=.*?><a href=(.*?)>(\d{4})<\/a> <\/li>'
|
||||
patron = '<li class=.*?><a href="(.*?)">(\d{4})<\/a> <\/li>'
|
||||
elif item.seccion == 'actor':
|
||||
patron = '<li><a href=(.*?)><div.*?<div class=photopurple title=(.*?)><\/div><img src=(.*?)><\/figure>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
patron = '<li><a href="(.*?)".*?div.*?<div class="photopurple" title="(.*?)">.*?data-src="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.seccion != 'actor':
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.decode('utf-8')
|
||||
@@ -158,7 +156,6 @@ def seccion(item):
|
||||
))
|
||||
else:
|
||||
for scrapedurl, scrapedname, scrapedthumbnail in matches:
|
||||
thumbnail = scrapedthumbnail
|
||||
fanart = ''
|
||||
title = scrapedname
|
||||
url = host + scrapedurl
|
||||
@@ -168,14 +165,14 @@ def seccion(item):
|
||||
title=title,
|
||||
fulltitle=item.title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
thumbnail=scrapedthumbnail,
|
||||
fanart=fanart,
|
||||
seccion=item.seccion
|
||||
))
|
||||
# Paginacion
|
||||
|
||||
if itemlist != []:
|
||||
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
|
||||
next_page = scrapertools.find_single_match(data, '<li><a class=" item" href="(.*?)&limit=.*?>Siguiente <')
|
||||
next_page_url = host + next_page
|
||||
if next_page != '':
|
||||
itemlist.append(item.clone(action="seccion",
|
||||
@@ -229,6 +226,7 @@ def findvideos(item):
|
||||
video_list = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
|
||||
patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for quality, lang, scrapedurl in matches:
|
||||
@@ -240,14 +238,20 @@ def findvideos(item):
|
||||
))
|
||||
for videoitem in templist:
|
||||
data = httptools.downloadpage(videoitem.url).data
|
||||
|
||||
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
|
||||
urls_list = scrapertools.find_single_match(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
|
||||
urls_list = urls_list.split("},")
|
||||
for element in urls_list:
|
||||
json_data=jsontools.load(element)
|
||||
id = json_data['id']
|
||||
sub = json_data['srt']
|
||||
url = json_data['source']
|
||||
if not element.endswith('}'):
|
||||
element=element+'}'
|
||||
json_data = jsontools.load(element)
|
||||
if 'id' in json_data:
|
||||
id = json_data['id']
|
||||
sub=''
|
||||
if 'srt' in json_data:
|
||||
sub = json_data['srt']
|
||||
|
||||
url = json_data['source'].replace('\\','')
|
||||
server = json_data['server']
|
||||
quality = json_data['quality']
|
||||
if 'http' not in url :
|
||||
|
||||
@@ -260,19 +264,19 @@ def findvideos(item):
|
||||
for urls in video_list:
|
||||
if urls.language == '':
|
||||
urls.language = videoitem.language
|
||||
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
|
||||
|
||||
urls.title = item.title + urls.language + '(%s)'
|
||||
|
||||
for video_url in video_list:
|
||||
video_url.channel = item.channel
|
||||
video_url.action = 'play'
|
||||
video_url.quality = quality
|
||||
video_url.server = ""
|
||||
video_url.infoLabels = item.infoLabels
|
||||
else:
|
||||
server = servertools.get_server_from_url(url)
|
||||
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality,
|
||||
server=server))
|
||||
|
||||
|
||||
title = '%s [%s]'% (server, quality)
|
||||
video_list.append(item.clone(title=title, url=url, action='play', quality = quality,
|
||||
server=server, subtitle=sub))
|
||||
tmdb.set_infoLabels(video_list)
|
||||
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
|
||||
video_list.append(
|
||||
Item(channel=item.channel,
|
||||
@@ -291,7 +295,7 @@ def newest(categoria):
|
||||
item = Item()
|
||||
# categoria='peliculas'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + '/estrenos/'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/peliculas/animacion/'
|
||||
@@ -308,3 +312,8 @@ def newest(categoria):
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
@@ -136,18 +136,17 @@ def lista(item):
|
||||
itemlist = []
|
||||
data = get_source(item.url)
|
||||
patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) class=attachment.*?'
|
||||
patron += '(?:strong|class=Title)>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?class=Qlty>(.*?)<.*?'
|
||||
patron += '(?:strong|class=Title)>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?'
|
||||
patron += '(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality, scrapedplot in matches:
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = scrapedplot
|
||||
quality = scrapedquality
|
||||
quality = ''
|
||||
contentTitle = scrapedtitle
|
||||
title = contentTitle + ' (%s)' % quality
|
||||
title = contentTitle
|
||||
year = scrapedyear
|
||||
|
||||
itemlist.append(item.clone(action='findvideos',
|
||||
|
||||
@@ -28,6 +28,14 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -334,3 +334,29 @@ def findvideos(item):
|
||||
servertools.get_servers_itemlist(itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'torrent':
|
||||
item.url = api + "?sort_by=''&page=0"
|
||||
|
||||
itemlist = pelis(item)
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
item.url = api_serie + "?sort_by=''&page=0"
|
||||
itemlist.extend(series(item))
|
||||
if itemlist[-1].title == ">> Página siguiente":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -49,6 +49,22 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -34,9 +34,14 @@ def newest(categoria):
|
||||
if categoria == 'peliculas':
|
||||
item.url = host
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'genero/infantil/'
|
||||
item.url = host + '/genero/infantil/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'genero/terror/'
|
||||
item.url = host + '/genero/terror/'
|
||||
elif categoria == 'castellano':
|
||||
item.url = host +'/lenguaje/castellano/'
|
||||
elif categoria == 'latino':
|
||||
item.url = host +'/lenguaje/latino/'
|
||||
itemlist = peliculas(item)
|
||||
itemlist = peliculas(item)
|
||||
if "Pagina" in itemlist[-1].title:
|
||||
itemlist.pop()
|
||||
@@ -126,6 +131,7 @@ def filtro(item):
|
||||
for url, title in matches:
|
||||
if "eroticas" in title and config.get_setting("adult_mode") == 0:
|
||||
continue
|
||||
logger.debug('la url: %s' %url)
|
||||
itemlist.append(item.clone(action = "peliculas",
|
||||
title = title.title(),
|
||||
url = url
|
||||
|
||||
@@ -16,19 +16,6 @@ def mainlist(item):
|
||||
item.url = "http://www.pelispekes.com/"
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
'''
|
||||
<div class="poster-media-card">
|
||||
<a href="http://www.pelispekes.com/un-gallo-con-muchos-huevos/" title="Un gallo con muchos Huevos">
|
||||
<div class="poster">
|
||||
<div class="title">
|
||||
<span class="under-title">Animacion</span>
|
||||
</div>
|
||||
<span class="rating">
|
||||
<i class="glyphicon glyphicon-star"></i><span class="rating-number">6.2</span>
|
||||
</span>
|
||||
<div class="poster-image-container">
|
||||
<img width="300" height="428" src="http://image.tmdb.org/t/p/w185/cz3Kb6Xa1q0uCrsTIRDS7fYOZyw.jpg" title="Un gallo con muchos Huevos" alt="Un gallo con muchos Huevos"/>
|
||||
'''
|
||||
patron = '<div class="poster-media-card"[^<]+'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
patron += '<div class="poster"[^<]+'
|
||||
@@ -51,7 +38,7 @@ def mainlist(item):
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
|
||||
plot=plot, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail))
|
||||
plot=plot, contentTitle=title, contentThumbnail=thumbnail))
|
||||
|
||||
# Extrae la pagina siguiente
|
||||
next_page_url = scrapertools.find_single_match(data,
|
||||
@@ -65,14 +52,6 @@ def mainlist(item):
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("item=" + item.tostring())
|
||||
|
||||
'''
|
||||
<h2>Sinopsis</h2>
|
||||
<p>Para que todo salga bien en la prestigiosa Academia Werth, la pequeña y su madre se mudan a una casa nueva. La pequeña es muy seria y madura para su edad y planea estudiar durante las vacaciones siguiendo un estricto programa organizado por su madre; pero sus planes son perturbados por un vecino excéntrico y generoso. Él le enseña un mundo extraordinario en donde todo es posible. Un mundo en el que el Aviador se topó alguna vez con el misterioso Principito. Entonces comienza la aventura de la pequeña en el universo del Principito. Y así descubre nuevamente su infancia y comprenderá que sólo se ve bien con el corazón. Lo esencial es invisible a los ojos. Adaptación de la novela homónima de Antoine de Saint-Exupery.</p>
|
||||
<div
|
||||
'''
|
||||
|
||||
# Descarga la página para obtener el argumento
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=")
|
||||
|
||||
|
||||
@@ -61,6 +61,22 @@
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_castellano",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Castellano",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -143,6 +143,11 @@ def newest(categoria):
|
||||
item.url = host + "genero/animacion-e-infantil/"
|
||||
elif categoria == 'terror':
|
||||
item.url = host + "genero/terror/"
|
||||
elif categoria == 'castellano':
|
||||
item.url = host + "idioma/castellano/"
|
||||
|
||||
elif categoria == 'latino':
|
||||
item.url = host + "idioma/latino/"
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
@@ -11,28 +11,47 @@
|
||||
"tvshow",
|
||||
"documentary",
|
||||
"direct"
|
||||
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": true,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"Latino"
|
||||
]
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -145,7 +145,8 @@ def menuseries(item):
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.url = host + 'busqueda/?s=' + texto
|
||||
|
||||
try:
|
||||
if texto != '':
|
||||
return lista(item)
|
||||
@@ -172,7 +173,7 @@ def lista(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
if item.title != 'Buscar':
|
||||
if item.action != 'search':
|
||||
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
|
||||
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
|
||||
actual = scrapertools.find_single_match(data,
|
||||
@@ -195,8 +196,8 @@ def lista(item):
|
||||
# de tmdb
|
||||
filtro_list = filtro_list.items()
|
||||
|
||||
if item.action != 'search':
|
||||
|
||||
if item.title != 'Buscar':
|
||||
new_item=(
|
||||
Item(channel=item.channel,
|
||||
contentType=tipo,
|
||||
@@ -215,9 +216,17 @@ def lista(item):
|
||||
new_item.contentTitle = scrapedtitle
|
||||
itemlist.append(new_item)
|
||||
else:
|
||||
if item.extra=='':
|
||||
item.extra = scrapertools.find_single_match(url, 'serie|pelicula')+'s/'
|
||||
if 'series/' in item.extra:
|
||||
accion = 'temporadas'
|
||||
tipo = 'tvshow'
|
||||
else:
|
||||
accion = 'findvideos'
|
||||
tipo = 'movie'
|
||||
item.extra = item.extra.rstrip('s/')
|
||||
if item.extra in url:
|
||||
itemlist.append(
|
||||
new_item=(
|
||||
Item(channel=item.channel,
|
||||
contentType=tipo,
|
||||
action=accion,
|
||||
@@ -236,21 +245,12 @@ def lista(item):
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
# Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
|
||||
for item in itemlist:
|
||||
if item.infoLabels['plot'] == '':
|
||||
data = httptools.downloadpage(item.url).data
|
||||
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
|
||||
item.plot = scrapertools.find_single_match(data,
|
||||
'<span>Sinopsis:<\/span>.([^<]+)<span '
|
||||
'class="text-detail-hide"><\/span>.<\/p>')
|
||||
|
||||
# Paginacion
|
||||
if item.title != 'Buscar' and actual != '':
|
||||
if item.action != 'search' and actual != '':
|
||||
if itemlist != []:
|
||||
next_page = str(int(actual) + 1)
|
||||
next_page_url = host + item.extra + 'pag-' + next_page
|
||||
next_page_url = item.extra + 'pag-' + next_page
|
||||
if not next_page_url.startswith("http"):
|
||||
next_page_url = host + next_page_url
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
action="lista",
|
||||
@@ -437,9 +437,8 @@ def get_vip(url):
|
||||
else:
|
||||
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
|
||||
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
|
||||
data=httptools.downloadpage(new_url, follow_redirects=False).headers
|
||||
itemlist.extend(servertools.find_video_items(data=str(data)))
|
||||
|
||||
data=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
|
||||
itemlist.append(Item(url=data))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -459,22 +458,17 @@ def findvideos(item):
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
|
||||
for videoitem in itemlist:
|
||||
# videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
if videoitem.quality == '' or videoitem.language == '':
|
||||
videoitem.quality = 'default'
|
||||
videoitem.language = 'Latino'
|
||||
if videoitem.server != '':
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
else:
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.server = 'directo'
|
||||
videoitem.action = 'play'
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
|
||||
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
|
||||
videoitem.title = item.contentTitle + ' (%s)'
|
||||
|
||||
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
n = 0
|
||||
for videoitem in itemlist:
|
||||
if 'youtube' in videoitem.url:
|
||||
@@ -486,7 +480,7 @@ def findvideos(item):
|
||||
itemlist.pop(1)
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, True)
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
@@ -507,18 +501,26 @@ def findvideos(item):
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
item.thumbnail = item.contentThumbnail
|
||||
return [item]
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
item.extra = 'estrenos/'
|
||||
try:
|
||||
if categoria == 'peliculas':
|
||||
if categoria in ['peliculas','latino']:
|
||||
item.url = host + 'estrenos/pag-1'
|
||||
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + 'peliculas/animacion/pag-1'
|
||||
|
||||
elif categoria == 'terror':
|
||||
item.url = host + 'peliculas/terror/pag-1'
|
||||
|
||||
elif categoria == 'documentales':
|
||||
item.url = host + 'documentales/pag-1'
|
||||
item.extra = 'documentales/'
|
||||
@@ -532,6 +534,5 @@ def newest(categoria):
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
|
||||
#itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
return itemlist
|
||||
|
||||
@@ -97,12 +97,12 @@ def list_all (item):
|
||||
contentType = 'pelicula'
|
||||
action = 'findvideos'
|
||||
|
||||
patron = 'item-%s><a href=(.*?)><figure><img src=https:(.*?)'%contentType
|
||||
patron += ' alt=><\/figure><p>(.*?)<\/p><span>(.*?)<\/span>'
|
||||
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'%contentType
|
||||
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
|
||||
url = host+scrapedurl
|
||||
url = host+scrapedurl+'p001/'
|
||||
thumbnail = scrapedthumbnail
|
||||
plot= ''
|
||||
contentTitle=scrapedtitle
|
||||
@@ -263,7 +263,9 @@ def findvideos(item):
|
||||
video_list = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = 'data-source=(.*?) data.*?-srt=(.*?) data-iframe=0><a>(.*?) - (.*?)<\/a>'
|
||||
|
||||
patron = 'data-source=(.*?) .*?tab.*?data.*?srt=(.*?) data-iframe=><a>(.*?)\s?-\s?(.*?)<\/a>'
|
||||
|
||||
matches = matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for url, sub, language, quality in matches:
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
{
|
||||
"id": "playmax",
|
||||
"name": "PlayMax",
|
||||
"language": ["cast", "lat"],
|
||||
"active": false,
|
||||
"adult": false,
|
||||
"thumbnail": "playmax.png",
|
||||
"banner": "playmax.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "playmaxuser",
|
||||
"type": "text",
|
||||
"color": "0xFF25AA48",
|
||||
"label": "@30014",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "playmaxpassword",
|
||||
"type": "text",
|
||||
"color": "0xFF25AA48",
|
||||
"hidden": true,
|
||||
"label": "@30015",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en búsqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Películas",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_series",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Series",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "modo_grafico",
|
||||
"type": "bool",
|
||||
"color": "0xFFd50b0b",
|
||||
"label": "Buscar información extra",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "menu_info",
|
||||
"type": "bool",
|
||||
"color": "0xFFd50b0b",
|
||||
"label": "Mostrar menú intermedio película/episodio",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "perfil",
|
||||
"type": "list",
|
||||
"label": "Perfil de color",
|
||||
"default": 3,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"Sin color",
|
||||
"Perfil 3",
|
||||
"Perfil 2",
|
||||
"Perfil 1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "last_page",
|
||||
"type": "bool",
|
||||
"color": "0xFFd50b0b",
|
||||
"label": "Ocultar opción elegir página en películas (Kodi)",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "order_web",
|
||||
"type": "bool",
|
||||
"color": "0xFFd50b0b",
|
||||
"label": "Usar el mismo orden de los enlaces que la web",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,979 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
sid = config.get_setting("sid_playmax", "playmax")
|
||||
apikey = "0ea143087685e9e0a23f98ae"
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'playmax')
|
||||
__perfil__ = config.get_setting('perfil', "playmax")
|
||||
__menu_info__ = config.get_setting('menu_info', 'playmax')
|
||||
|
||||
# Fijar perfil de color
|
||||
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
|
||||
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
|
||||
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
|
||||
|
||||
if __perfil__ - 1 >= 0:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
host = "https://playmax.mx"
|
||||
|
||||
|
||||
def login():
|
||||
logger.info()
|
||||
|
||||
try:
|
||||
user = config.get_setting("playmaxuser", "playmax")
|
||||
password = config.get_setting("playmaxpassword", "playmax")
|
||||
if user == "" and password == "":
|
||||
return False, "Para ver los enlaces de este canal es necesario registrarse en playmax.mx"
|
||||
elif user == "" or password == "":
|
||||
return False, "Usuario o contraseña en blanco. Revisa tus credenciales"
|
||||
|
||||
data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login").data
|
||||
if re.search(r'(?i)class="hb_user_data" title="%s"' % user, data):
|
||||
if not config.get_setting("sid_playmax", "playmax"):
|
||||
sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
|
||||
if not sid_:
|
||||
sid_ = scrapertools.find_single_match(config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)')
|
||||
config.set_setting("sid_playmax", sid_, "playmax")
|
||||
return True, ""
|
||||
|
||||
confirm_id = scrapertools.find_single_match(data, 'name="confirm_id" value="([^"]+)"')
|
||||
sid_log = scrapertools.find_single_match(data, 'name="sid" value="([^"]+)"')
|
||||
post = "username=%s&password=%s&autologin=on&agreed=true&change_lang=0&confirm_id=%s&login=&sid=%s" \
|
||||
"&redirect=index.php&login=Entrar" % (user, password, confirm_id, sid_log)
|
||||
data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login", post=post).data
|
||||
if "contraseña incorrecta" in data:
|
||||
logger.error("Error en el login")
|
||||
return False, "Contraseña errónea. Comprueba tus credenciales"
|
||||
elif "nombre de usuario incorrecto" in data:
|
||||
logger.error("Error en el login")
|
||||
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
|
||||
else:
|
||||
logger.info("Login correcto")
|
||||
sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
|
||||
if not sid_:
|
||||
sid_ = scrapertools.find_single_match(config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)')
|
||||
config.set_setting("sid_playmax", sid_, "playmax")
|
||||
# En el primer logueo se activa la busqueda global y la seccion novedades
|
||||
if not config.get_setting("primer_log", "playmax"):
|
||||
config.set_setting("include_in_global_search", True, "playmax")
|
||||
config.set_setting("include_in_newest_peliculas", True, "playmax")
|
||||
config.set_setting("include_in_newest_series", True, "playmax")
|
||||
config.set_setting("include_in_newest_infantiles", True, "playmax")
|
||||
config.set_setting("primer_log", False, "playmax")
|
||||
return True, ""
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
return False, "Error en el login. Comprueba tus credenciales o si la web está operativa"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
logueado, error_message = login()
|
||||
|
||||
if not logueado:
|
||||
config.set_setting("include_in_global_search", False, "playmax")
|
||||
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
|
||||
return itemlist
|
||||
|
||||
itemlist.append(item.clone(title="Películas", action="", text_color=color2))
|
||||
item.contentType = "movie"
|
||||
itemlist.append(
|
||||
item.clone(title=" Novedades", action="fichas", url=host + "/catalogo.php?tipo[]=2&ad=2&ordenar="
|
||||
"novedades&con_dis=on"))
|
||||
itemlist.append(
|
||||
item.clone(title=" Populares", action="fichas", url=host + "/catalogo.php?tipo[]=2&ad=2&ordenar="
|
||||
"pop&con_dis=on"))
|
||||
itemlist.append(item.clone(title=" Índices", action="indices"))
|
||||
|
||||
itemlist.append(item.clone(title="Series", action="", text_color=color2))
|
||||
item.contentType = "tvshow"
|
||||
itemlist.append(item.clone(title=" Nuevos capítulos", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&"
|
||||
"ordenar=novedades&con_dis=on"))
|
||||
itemlist.append(item.clone(title=" Nuevas series", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&"
|
||||
"ordenar=año&con_dis=on"))
|
||||
itemlist.append(item.clone(title=" Índices", action="indices"))
|
||||
|
||||
item.contentType = "movie"
|
||||
itemlist.append(item.clone(title="Documentales", action="fichas", text_color=color2,
|
||||
url=host + "/catalogo.php?&tipo[]=3&ad=2&ordenar=novedades&con_dis=on"))
|
||||
itemlist.append(item.clone(title="Listas", action="listas", text_color=color2,
|
||||
url=host + "/listas.php?apikey=%s&sid=%s&start=0" % (apikey, sid), extra="listas"))
|
||||
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color2))
|
||||
itemlist.append(item.clone(action="acciones_cuenta", title="Tus fichas", text_color=color4))
|
||||
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "%20")
|
||||
item.url = "%s/buscar.php?apikey=%s&sid=%s&buscar=%s&modo=[fichas]&start=0" % (host, apikey, sid, texto)
|
||||
try:
|
||||
return busqueda(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def busqueda(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = xml2dict(data)
|
||||
if type(data["Data"]["Fichas"]["Ficha"]) == dict:
|
||||
searched_data = [data["Data"]["Fichas"]["Ficha"]]
|
||||
else:
|
||||
searched_data = data["Data"]["Fichas"]["Ficha"]
|
||||
|
||||
for f in searched_data:
|
||||
f["Title"] = f["Title"].replace("<![CDATA[", "").replace("]]>", "")
|
||||
title = "%s (%s)" % (f["Title"], f["Year"])
|
||||
infolab = {'year': f["Year"]}
|
||||
thumbnail = f["Poster"]
|
||||
url = "%s/ficha.php?f=%s" % (host, f["Id"])
|
||||
action = "findvideos"
|
||||
if __menu_info__:
|
||||
action = "menu_info"
|
||||
if f["IsSerie"] == "1":
|
||||
tipo = "tvshow"
|
||||
show = f["Title"]
|
||||
if not __menu_info__:
|
||||
action = "episodios"
|
||||
else:
|
||||
tipo = "movie"
|
||||
show = ""
|
||||
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, text_color=color2,
|
||||
contentTitle=f["Title"], show=show, contentType=tipo, infoLabels=infolab,
|
||||
thumbnail=thumbnail))
|
||||
|
||||
if __modo_grafico__:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
total = int(data["Data"]["totalResultsFichas"])
|
||||
actualpage = int(scrapertools.find_single_match(item.url, "start=(\d+)"))
|
||||
if actualpage + 20 < total:
|
||||
next_page = item.url.replace("start=%s" % actualpage, "start=%s" % (actualpage + 20))
|
||||
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente",
|
||||
url=next_page, thumbnail=item.thumbnail))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
platformtools.itemlist_refresh()
|
||||
return ret
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria == 'series':
|
||||
item.channel = "playmax"
|
||||
item.extra = "newest"
|
||||
item.url = host + "/catalogo.php?tipo[]=1&ad=2&ordenar=novedades&con_dis=on"
|
||||
item.contentType = "tvshow"
|
||||
itemlist = fichas(item)
|
||||
|
||||
if itemlist[-1].action == "fichas":
|
||||
itemlist.pop()
|
||||
elif categoria == 'peliculas':
|
||||
item.channel = "playmax"
|
||||
item.extra = "newest"
|
||||
item.url = host + "/catalogo.php?tipo[]=2&ad=2&ordenar=novedades&con_dis=on"
|
||||
item.contentType = "movie"
|
||||
itemlist = fichas(item)
|
||||
|
||||
if itemlist[-1].action == "fichas":
|
||||
itemlist.pop()
|
||||
elif categoria == 'infantiles':
|
||||
item.channel = "playmax"
|
||||
item.extra = "newest"
|
||||
item.url = host + "/catalogo.php?tipo[]=2&generos[]=60&ad=2&ordenar=novedades&con_dis=on"
|
||||
item.contentType = "movie"
|
||||
itemlist = fichas(item)
|
||||
|
||||
if itemlist[-1].action == "fichas":
|
||||
itemlist.pop()
|
||||
|
||||
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def indices(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
tipo = "2"
|
||||
if item.contentType == "tvshow":
|
||||
tipo = "1"
|
||||
if "Índices" in item.title:
|
||||
if item.contentType == "tvshow":
|
||||
itemlist.append(item.clone(title="Populares", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&"
|
||||
"ordenar=pop&con_dis=on"))
|
||||
itemlist.append(item.clone(title="Más vistas", action="fichas", url=host + "/catalogo.php?tipo[]=%s&ad=2&"
|
||||
"ordenar=siempre&con_dis=on" % tipo))
|
||||
itemlist.append(item.clone(title="Mejor valoradas", action="fichas", url=host + "/catalogo.php?tipo[]=%s&ad=2&"
|
||||
"ordenar=valoracion&con_dis=on" % tipo))
|
||||
itemlist.append(item.clone(title="Géneros", url=host + "/catalogo.php"))
|
||||
itemlist.append(item.clone(title="Idiomas", url=host + "/catalogo.php"))
|
||||
if item.contentType == "movie":
|
||||
itemlist.append(item.clone(title="Por calidad", url=host + "/catalogo.php"))
|
||||
itemlist.append(item.clone(title="Por año"))
|
||||
itemlist.append(item.clone(title="Por país", url=host + "/catalogo.php"))
|
||||
|
||||
return itemlist
|
||||
|
||||
if "Géneros" in item.title:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="sel gen" value="([^"]+)">([^<]+)</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for value, genero in matches:
|
||||
url = item.url + "?tipo[]=%s&generos[]=%s&ad=2&ordenar=novedades&con_dis=on" % (tipo, value)
|
||||
itemlist.append(item.clone(action="fichas", title=genero, url=url))
|
||||
elif "Idiomas" in item.title:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'oname="Idioma">Cualquier(.*?)<input')
|
||||
patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for value, idioma in matches:
|
||||
url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_idioma=%s" % (tipo, value)
|
||||
itemlist.append(item.clone(action="fichas", title=idioma, url=url))
|
||||
elif "calidad" in item.title:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'oname="Calidad">Cualquier(.*?)<input')
|
||||
patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for value, calidad in matches:
|
||||
url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_calidad=%s" % (tipo, value)
|
||||
itemlist.append(item.clone(action="fichas", title=calidad, url=url))
|
||||
elif "país" in item.title:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
bloque = scrapertools.find_single_match(data, 'oname="País">Todos(.*?)<input')
|
||||
patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for value, pais in matches:
|
||||
url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&pais=%s" % (tipo, value)
|
||||
itemlist.append(item.clone(action="fichas", title=pais, url=url))
|
||||
else:
|
||||
from datetime import datetime
|
||||
year = datetime.now().year
|
||||
for i in range(year, 1899, -1):
|
||||
url = "%s/catalogo.php?tipo[]=%s&del=%s&al=%s&año=personal&ad=2&ordenar=novedades&con_dis=on" \
|
||||
% (host, tipo, i, i)
|
||||
itemlist.append(item.clone(action="fichas", title=str(i), url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def fichas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
|
||||
|
||||
fichas_marca = {'1': 'Siguiendo', '2': 'Pendiente', '3': 'Favorita', '4': 'Vista', '5': 'Abandonada'}
|
||||
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src-data="([^"]+)".*?' \
|
||||
'<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \
|
||||
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, marca, serie, episodio, scrapedtitle in matches:
|
||||
tipo = "movie"
|
||||
scrapedurl = host + scrapedurl.rsplit("-dc=")[0]
|
||||
if not "-dc=" in scrapedurl:
|
||||
scrapedurl += "-dc="
|
||||
action = "findvideos"
|
||||
if __menu_info__:
|
||||
action = "menu_info"
|
||||
if serie:
|
||||
tipo = "tvshow"
|
||||
if episodio:
|
||||
title = "%s - %s" % (episodio.replace("X", "x"), scrapedtitle)
|
||||
else:
|
||||
title = scrapedtitle
|
||||
|
||||
if marca:
|
||||
title += " [COLOR %s][%s][/COLOR]" % (color4, fichas_marca[marca])
|
||||
|
||||
new_item = Item(channel=item.channel, action=action, title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, contentType=tipo,
|
||||
text_color=color2)
|
||||
if new_item.contentType == "tvshow":
|
||||
new_item.show = scrapedtitle
|
||||
if not __menu_info__:
|
||||
new_item.action = "episodios"
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
if itemlist and (item.extra == "listas_plus" or item.extra == "sigo"):
|
||||
follow = scrapertools.find_single_match(data, '<div onclick="seguir_lista.*?>(.*?)<')
|
||||
title = "Seguir Lista"
|
||||
if follow == "Siguiendo":
|
||||
title = "Dejar de seguir lista"
|
||||
item.extra = ""
|
||||
url = host + "/data.php?mode=seguir_lista&apikey=%s&sid=%s&lista=%s" % (
|
||||
apikey, sid, item.url.rsplit("/l", 1)[1])
|
||||
itemlist.insert(0, item.clone(action="acciones_cuenta", title=title, url=url, text_color=color4,
|
||||
lista=item.title, folder=False))
|
||||
|
||||
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="next"')
|
||||
if next_page:
|
||||
next_page = host + next_page.replace("&", "&")
|
||||
itemlist.append(Item(channel=item.channel, action="fichas", title=">> Página Siguiente", url=next_page))
|
||||
|
||||
try:
|
||||
total = int(scrapertools.find_single_match(data, '<span class="page-dots">.*href.*?>(\d+)'))
|
||||
except:
|
||||
total = 0
|
||||
if not config.get_setting("last_page", item.channel) and config.is_xbmc() and total > 2 \
|
||||
and item.extra != "newest":
|
||||
itemlist.append(item.clone(action="select_page", title="Ir a página... (Total:%s)" % total, url=next_page,
|
||||
text_color=color5))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
if not item.infoLabels["tmdb_id"]:
|
||||
item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data,
|
||||
'<a href="https://www.themoviedb.org/[^/]+/(\d+)')
|
||||
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
|
||||
if not item.infoLabels["genre"]:
|
||||
item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data,
|
||||
'<a itemprop="genre"[^>]+>([^<]+)</a>'))
|
||||
if not item.infoLabels["plot"]:
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
|
||||
|
||||
dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'")
|
||||
patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \
|
||||
'.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
lista_epis = []
|
||||
for c_id, episodio, title, ficha, status in matches:
|
||||
episodio = episodio.replace("X", "x")
|
||||
if episodio in lista_epis:
|
||||
continue
|
||||
lista_epis.append(episodio)
|
||||
url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % (ficha, c_id, dc)
|
||||
title = "%s - %s" % (episodio, title)
|
||||
if "_mc a" in status:
|
||||
title = "[COLOR %s]%s[/COLOR] %s" % (color5, u"\u0474".encode('utf-8'), title)
|
||||
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
|
||||
fanart=item.fanart, show=item.show, infoLabels=item.infoLabels, text_color=color2,
|
||||
referer=item.url, contentType="episode")
|
||||
try:
|
||||
new_item.infoLabels["season"], new_item.infoLabels["episode"] = episodio.split('x', 1)
|
||||
except:
|
||||
pass
|
||||
itemlist.append(new_item)
|
||||
|
||||
itemlist.sort(key=lambda it: (it.infoLabels["season"], it.infoLabels["episode"]), reverse=True)
|
||||
if __modo_grafico__:
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
|
||||
library_path = config.get_videolibrary_path()
|
||||
if config.get_videolibrary_support() and not item.extra:
|
||||
title = "Añadir serie a la videoteca"
|
||||
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
|
||||
try:
|
||||
from core import filetools
|
||||
path = filetools.join(library_path, "SERIES")
|
||||
files = filetools.walk(path)
|
||||
for dirpath, dirname, filename in files:
|
||||
if item.infoLabels["imdb_id"] in dirpath:
|
||||
for f in filename:
|
||||
if f != "tvshow.nfo":
|
||||
continue
|
||||
from core import videolibrarytools
|
||||
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
|
||||
canales = it.library_urls.keys()
|
||||
canales.sort()
|
||||
if "playmax" in canales:
|
||||
canales.pop(canales.index("playmax"))
|
||||
canales.insert(0, "[COLOR red]playmax[/COLOR]")
|
||||
title = "Serie ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales)
|
||||
break
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
pass
|
||||
|
||||
itemlist.append(item.clone(action="add_serie_to_library", title=title, text_color=color5,
|
||||
extra="episodios###library"))
|
||||
if itemlist and not __menu_info__:
|
||||
ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
|
||||
itemlist.extend(acciones_fichas(item, sid, ficha))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if item.contentType == "movie":
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
if not item.infoLabels["tmdb_id"]:
|
||||
item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/'
|
||||
'[^/]+/(\d+)')
|
||||
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
|
||||
|
||||
if __modo_grafico__:
|
||||
tmdb.set_infoLabels_item(item, __modo_grafico__)
|
||||
if not item.infoLabels["plot"]:
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
|
||||
if not item.infoLabels["genre"]:
|
||||
item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data, '<a itemprop="genre"[^>]+>'
|
||||
'([^<]+)</a>'))
|
||||
|
||||
ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
|
||||
if not ficha:
|
||||
ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
|
||||
cid = "0"
|
||||
else:
|
||||
ficha, cid = scrapertools.find_single_match(item.url, 'ficha=(\d+)&c_id=(\d+)')
|
||||
|
||||
url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (apikey, sid, ficha, cid)
|
||||
data = httptools.downloadpage(url).data
|
||||
data = xml2dict(data)
|
||||
|
||||
for k, v in data["Data"].items():
|
||||
try:
|
||||
if type(v) is dict:
|
||||
if k == "Online":
|
||||
order = 1
|
||||
elif k == "Download":
|
||||
order = 0
|
||||
else:
|
||||
order = 2
|
||||
|
||||
itemlist.append(item.clone(action="", title=k, text_color=color3, order=order))
|
||||
if type(v["Item"]) is str:
|
||||
continue
|
||||
elif type(v["Item"]) is dict:
|
||||
v["Item"] = [v["Item"]]
|
||||
for it in v["Item"]:
|
||||
try:
|
||||
thumbnail = "%s/styles/prosilver/imageset/%s.png" % (host, it['Host'])
|
||||
title = " %s - %s/%s" % (it['Host'].capitalize(), it['Quality'], it['Lang'])
|
||||
calidad = int(scrapertools.find_single_match(it['Quality'], '(\d+)p'))
|
||||
calidadaudio = it['QualityA'].replace("...", "")
|
||||
subtitulos = it['Subtitles'].replace("Sin subtítulos", "")
|
||||
if subtitulos:
|
||||
title += " (%s)" % subtitulos
|
||||
if calidadaudio:
|
||||
title += " [Audio:%s]" % calidadaudio
|
||||
|
||||
likes = 0
|
||||
if it["Likes"] != "0" or it["Dislikes"] != "0":
|
||||
likes = int(it["Likes"]) - int(it["Dislikes"])
|
||||
title += " (%s ok, %s ko)" % (it["Likes"], it["Dislikes"])
|
||||
if type(it["Url"]) is dict:
|
||||
for i, enlace in enumerate(it["Url"]["Item"]):
|
||||
titulo = title + " (Parte %s)" % (i + 1)
|
||||
itemlist.append(item.clone(title=titulo, url=enlace, action="play", calidad=calidad,
|
||||
thumbnail=thumbnail, order=order, like=likes, ficha=ficha,
|
||||
cid=cid, folder=False))
|
||||
else:
|
||||
url = it["Url"]
|
||||
itemlist.append(item.clone(title=title, url=url, action="play", calidad=calidad,
|
||||
thumbnail=thumbnail, order=order, like=likes, ficha=ficha,
|
||||
cid=cid, folder=False))
|
||||
except:
|
||||
pass
|
||||
except:
|
||||
pass
|
||||
|
||||
if not config.get_setting("order_web", "playmax"):
|
||||
itemlist.sort(key=lambda it: (it.order, it.calidad, it.like), reverse=True)
|
||||
else:
|
||||
itemlist.sort(key=lambda it: it.order, reverse=True)
|
||||
if itemlist:
|
||||
itemlist.extend(acciones_fichas(item, sid, ficha))
|
||||
|
||||
if not itemlist and item.contentType != "movie":
|
||||
url = url.replace("apikey=%s&" % apikey, "")
|
||||
data = httptools.downloadpage(url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>'
|
||||
estrenos = scrapertools.find_multiple_matches(data, patron)
|
||||
for info in estrenos:
|
||||
info = "Estreno en " + scrapertools.htmlclean(info)
|
||||
itemlist.append(item.clone(action="", title=info))
|
||||
|
||||
if not itemlist:
|
||||
itemlist.append(item.clone(action="", title="No hay enlaces disponibles"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def menu_info(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
|
||||
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
|
||||
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
|
||||
item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data,
|
||||
'<a itemprop="genre"[^>]+>([^<]+)</a>'))
|
||||
if __modo_grafico__:
|
||||
tmdb.set_infoLabels_item(item, __modo_grafico__)
|
||||
|
||||
action = "findvideos"
|
||||
title = "Ver enlaces"
|
||||
if item.contentType == "tvshow":
|
||||
action = "episodios"
|
||||
title = "Ver capítulos"
|
||||
itemlist.append(item.clone(action=action, title=title))
|
||||
|
||||
carpeta = "CINE"
|
||||
tipo = "película"
|
||||
action = "add_pelicula_to_library"
|
||||
extra = ""
|
||||
if item.contentType == "tvshow":
|
||||
carpeta = "SERIES"
|
||||
tipo = "serie"
|
||||
action = "add_serie_to_library"
|
||||
extra = "episodios###library"
|
||||
|
||||
library_path = config.get_videolibrary_path()
|
||||
if config.get_videolibrary_support():
|
||||
title = "Añadir %s a la videoteca" % tipo
|
||||
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
|
||||
try:
|
||||
from core import filetools
|
||||
path = filetools.join(library_path, carpeta)
|
||||
files = filetools.walk(path)
|
||||
for dirpath, dirname, filename in files:
|
||||
if item.infoLabels["imdb_id"] in dirpath:
|
||||
namedir = dirpath.replace(path, '')[1:]
|
||||
for f in filename:
|
||||
if f != namedir + ".nfo" and f != "tvshow.nfo":
|
||||
continue
|
||||
from core import videolibrarytools
|
||||
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, f))
|
||||
canales = it.library_urls.keys()
|
||||
canales.sort()
|
||||
if "playmax" in canales:
|
||||
canales.pop(canales.index("playmax"))
|
||||
canales.insert(0, "[COLOR red]playmax[/COLOR]")
|
||||
title = "%s ya en tu videoteca. [%s] ¿Añadir?" % (tipo.capitalize(), ",".join(canales))
|
||||
break
|
||||
except:
|
||||
import traceback
|
||||
logger.error(traceback.format_exc())
|
||||
pass
|
||||
|
||||
itemlist.append(item.clone(action=action, title=title, text_color=color5, extra=extra))
|
||||
|
||||
token_auth = config.get_setting("token_trakt", "tvmoviedb")
|
||||
if token_auth and item.infoLabels["tmdb_id"]:
|
||||
extra = "movie"
|
||||
if item.contentType != "movie":
|
||||
extra = "tv"
|
||||
itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt",
|
||||
extra=extra))
|
||||
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
|
||||
text_color="magenta", context=""))
|
||||
|
||||
itemlist.append(item.clone(action="", title=""))
|
||||
ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
|
||||
if not ficha:
|
||||
ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
|
||||
|
||||
itemlist.extend(acciones_fichas(item, sid, ficha, season=True))
|
||||
itemlist.append(item.clone(action="acciones_cuenta", title="Añadir a una lista", text_color=color3, ficha=ficha))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def acciones_fichas(item, sid, ficha, season=False):
|
||||
marcarlist = []
|
||||
new_item = item.clone()
|
||||
new_item.infoLabels.pop("duration", None)
|
||||
estados = [{'following': 'seguir'}, {'favorite': 'favorita'}, {'view': 'vista'}, {'slope': 'pendiente'}]
|
||||
url = "https://playmax.mx/ficha.php?apikey=%s&sid=%s&f=%s" % (apikey, sid, ficha)
|
||||
data = httptools.downloadpage(url).data
|
||||
data = xml2dict(data)
|
||||
|
||||
try:
|
||||
marked = data["Data"]["User"]["Marked"]
|
||||
if new_item.contentType == "episode":
|
||||
for epi in data["Data"]["Episodes"]["Season_%s" % new_item.infoLabels["season"]]["Item"]:
|
||||
if int(epi["Episode"]) == new_item.infoLabels["episode"]:
|
||||
epi_marked = epi["EpisodeViewed"].replace("yes", "ya")
|
||||
epi_id = epi["Id"]
|
||||
marcarlist.append(new_item.clone(action="marcar", title="Capítulo %s visto. ¿Cambiar?" % epi_marked,
|
||||
text_color=color3, epi_id=epi_id))
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
tipo = new_item.contentType.replace("movie", "Película").replace("episode", "Serie").replace("tvshow", "Serie")
|
||||
for status in estados:
|
||||
for k, v in status.items():
|
||||
if k != marked:
|
||||
title = "Marcar %s como %s" % (tipo.lower(), v)
|
||||
action = "marcar"
|
||||
else:
|
||||
title = "%s marcada como %s" % (tipo, v)
|
||||
action = ""
|
||||
if k == "following" and tipo == "Película":
|
||||
continue
|
||||
elif k == "following" and tipo == "Serie":
|
||||
title = title.replace("seguir", "seguida")
|
||||
if k != marked:
|
||||
title = "Seguir serie"
|
||||
action = "marcar"
|
||||
marcarlist.insert(1, new_item.clone(action=action, title=title, text_color=color4, ficha=ficha,
|
||||
folder=False))
|
||||
continue
|
||||
|
||||
marcarlist.append(new_item.clone(action="marcar", title=title, text_color=color3, ficha=ficha,
|
||||
folder=False))
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
if season and item.contentType == "tvshow":
|
||||
seasonlist = []
|
||||
for k, v in data["Data"]["Episodes"].items():
|
||||
vistos = False
|
||||
season = k.rsplit("_", 1)[1]
|
||||
if type(v) is str:
|
||||
continue
|
||||
elif type(v["Item"]) is not list:
|
||||
v["Item"] = [v["Item"]]
|
||||
|
||||
for epi in v["Item"]:
|
||||
if epi["EpisodeViewed"] == "no":
|
||||
vistos = True
|
||||
seasonlist.append(
|
||||
new_item.clone(action="marcar", title="Marcar temporada %s como vista" % season,
|
||||
text_color=color1, season=int(season), ficha=ficha, folder=False))
|
||||
break
|
||||
|
||||
if not vistos:
|
||||
seasonlist.append(
|
||||
new_item.clone(action="marcar", title="Temporada %s ya vista. ¿Revertir?" % season,
|
||||
text_color=color1, season=int(season), ficha=ficha, folder=False))
|
||||
|
||||
seasonlist.sort(key=lambda it: it.season, reverse=True)
|
||||
marcarlist.extend(seasonlist)
|
||||
except:
|
||||
pass
|
||||
return marcarlist
|
||||
|
||||
|
||||
def acciones_cuenta(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if "Tus fichas" in item.title:
|
||||
itemlist.append(item.clone(title="Capítulos", url="tf_block_c a", contentType="tvshow"))
|
||||
itemlist.append(item.clone(title="Series", url="tf_block_s", contentType="tvshow"))
|
||||
itemlist.append(item.clone(title="Películas", url="tf_block_p"))
|
||||
itemlist.append(item.clone(title="Documentales", url="tf_block_d"))
|
||||
return itemlist
|
||||
elif "Añadir a una lista" in item.title:
|
||||
data = httptools.downloadpage(host + "/c_listas.php?apikey=%s&sid=%s" % (apikey, sid)).data
|
||||
data = xml2dict(data)
|
||||
itemlist.append(item.clone(title="Crear nueva lista", folder=False))
|
||||
if data["Data"]["TusListas"] != "\t":
|
||||
import random
|
||||
data = data["Data"]["TusListas"]["Item"]
|
||||
if type(data) is not list:
|
||||
data = [data]
|
||||
for child in data:
|
||||
image = ""
|
||||
title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
|
||||
images = []
|
||||
for i in range(1, 5):
|
||||
if "sinimagen.png" not in child["Poster%s" % i]:
|
||||
images.append(child["Poster%s" % i].replace("/100/", "/400/"))
|
||||
if images:
|
||||
image = images[random.randint(0, len(images) - 1)]
|
||||
url = host + "/data.php?mode=add_listas&apikey=%s&sid=%s&ficha_id=%s" % (apikey, sid, item.ficha)
|
||||
post = "lista_id[]=%s" % child["Id"]
|
||||
itemlist.append(item.clone(title=title, url=url, post=post, thumbnail=image, folder=False))
|
||||
|
||||
return itemlist
|
||||
elif "Crear nueva lista" in item.title:
|
||||
from platformcode import platformtools
|
||||
nombre = platformtools.dialog_input("", "Introduce un nombre para la lista")
|
||||
if nombre:
|
||||
dict_priv = {0: 'Pública', 1: 'Privada'}
|
||||
priv = platformtools.dialog_select("Privacidad de la lista", ['Pública', 'Privada'])
|
||||
if priv != -1:
|
||||
url = host + "/data.php?mode=create_list&apikey=%s&sid=%s" % (apikey, sid)
|
||||
post = "name=%s&private=%s" % (nombre, priv)
|
||||
data = httptools.downloadpage(url, post)
|
||||
platformtools.dialog_notification("Lista creada correctamente",
|
||||
"Nombre: %s - %s" % (nombre, dict_priv[priv]))
|
||||
platformtools.itemlist_refresh()
|
||||
return
|
||||
elif re.search(r"(?i)Seguir Lista", item.title):
|
||||
from platformcode import platformtools
|
||||
data = httptools.downloadpage(item.url)
|
||||
platformtools.dialog_notification("Operación realizada con éxito", "Lista: %s" % item.lista)
|
||||
return
|
||||
elif item.post:
|
||||
from platformcode import platformtools
|
||||
data = httptools.downloadpage(item.url, item.post).data
|
||||
platformtools.dialog_notification("Ficha añadida a la lista", "Lista: %s" % item.title)
|
||||
platformtools.itemlist_refresh()
|
||||
return
|
||||
|
||||
data = httptools.downloadpage("https://playmax.mx/tusfichas.php").data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data)
|
||||
|
||||
bloque = scrapertools.find_single_match(data, item.url + '">(.*?)(?:<div class="tf_blocks|<div class="tf_o_move">)')
|
||||
matches = scrapertools.find_multiple_matches(bloque, '<div class="tf_menu_mini">([^<]+)<(.*?)<cb></cb></div>')
|
||||
for category, contenido in matches:
|
||||
itemlist.append(item.clone(action="", title=category, text_color=color3))
|
||||
|
||||
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="([^"]+)".*?serie="([^"]*)".*?' \
|
||||
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
|
||||
entradas = scrapertools.find_multiple_matches(contenido, patron)
|
||||
for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:
|
||||
tipo = "movie"
|
||||
scrapedurl = host + scrapedurl
|
||||
scrapedthumbnail = host + scrapedthumbnail
|
||||
action = "findvideos"
|
||||
if __menu_info__:
|
||||
action = "menu_info"
|
||||
if serie:
|
||||
tipo = "tvshow"
|
||||
if episodio:
|
||||
title = " %s - %s" % (episodio.replace("X", "x"), scrapedtitle)
|
||||
else:
|
||||
title = " " + scrapedtitle
|
||||
|
||||
new_item = Item(channel=item.channel, action=action, title=title, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, contentType=tipo,
|
||||
text_color=color2)
|
||||
if new_item.contentType == "tvshow":
|
||||
new_item.show = scrapedtitle
|
||||
if not __menu_info__:
|
||||
new_item.action = "episodios"
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def marcar(item):
|
||||
logger.info()
|
||||
|
||||
if "Capítulo" in item.title:
|
||||
url = "%s/data.php?mode=capitulo_visto&apikey=%s&sid=%s&c_id=%s" % (host, apikey, sid, item.epi_id)
|
||||
message = item.title.replace("no", "marcado como").replace("ya", "cambiado a no").replace(" ¿Cambiar?", "")
|
||||
elif "temporada" in item.title.lower():
|
||||
type_marcado = "1"
|
||||
if "como vista" in item.title:
|
||||
message = "Temporada %s marcada como vista" % item.season
|
||||
else:
|
||||
type_marcado = "2"
|
||||
message = "Temporada %s marcada como no vista" % item.season
|
||||
url = "%s/data.php?mode=temporada_vista&apikey=%s&sid=%s&ficha=%s&t_id=%s&type=%s" \
|
||||
% (host, apikey, sid, item.ficha, item.season, type_marcado)
|
||||
else:
|
||||
message = item.title.replace("Marcar ", "Marcada ").replace("Seguir serie", "Serie en seguimiento")
|
||||
if "favorita" in item.title:
|
||||
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
|
||||
% (host, apikey, sid, item.ficha, "3")
|
||||
elif "pendiente" in item.title:
|
||||
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
|
||||
% (host, apikey, sid, item.ficha, "2")
|
||||
elif "vista" in item.title:
|
||||
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
|
||||
% (host, apikey, sid, item.ficha, "4")
|
||||
elif "Seguir" in item.title:
|
||||
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
|
||||
% (host, apikey, sid, item.ficha, "2")
|
||||
data = httptools.downloadpage(url)
|
||||
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
|
||||
% (host, apikey, sid, item.ficha, "1")
|
||||
|
||||
data = httptools.downloadpage(url)
|
||||
if data.sucess and config.get_platform() != "plex" and item.action != "play":
|
||||
from platformcode import platformtools
|
||||
platformtools.dialog_notification("Acción correcta", message)
|
||||
|
||||
|
||||
def listas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = xml2dict(data)
|
||||
if item.extra == "listas":
|
||||
itemlist.append(Item(channel=item.channel, title="Listas más seguidas", action="listas", text_color=color1,
|
||||
url=item.url + "&orden=1", extra="listas_plus"))
|
||||
itemlist.append(Item(channel=item.channel, title="Listas con más fichas", action="listas", text_color=color1,
|
||||
url=item.url + "&orden=2", extra="listas_plus"))
|
||||
itemlist.append(Item(channel=item.channel, title="Listas aleatorias", action="listas", text_color=color1,
|
||||
url=item.url + "&orden=3", extra="listas_plus"))
|
||||
if data["Data"]["ListasSiguiendo"] != "\t":
|
||||
itemlist.append(Item(channel=item.channel, title="Listas que sigo", action="listas", text_color=color1,
|
||||
url=item.url, extra="sigo"))
|
||||
if data["Data"]["TusListas"] != "\t":
|
||||
itemlist.append(Item(channel=item.channel, title="Mis listas", action="listas", text_color=color1,
|
||||
url=item.url, extra="mislistas"))
|
||||
|
||||
return itemlist
|
||||
|
||||
elif item.extra == "sigo":
|
||||
data = data["Data"]["ListasSiguiendo"]["Item"]
|
||||
elif item.extra == "mislistas":
|
||||
data = data["Data"]["TusListas"]["Item"]
|
||||
else:
|
||||
data = data["Data"]["Listas"]["Item"]
|
||||
|
||||
if type(data) is not list:
|
||||
data = [data]
|
||||
import random
|
||||
for child in data:
|
||||
image = ""
|
||||
title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
|
||||
images = []
|
||||
for i in range(1, 5):
|
||||
if "sinimagen.png" not in child["Poster%s" % i]:
|
||||
images.append(child["Poster%s" % i].replace("/100/", "/400/"))
|
||||
if images:
|
||||
image = images[random.randint(0, len(images) - 1)]
|
||||
url = host + "/l%s" % child["Id"]
|
||||
itemlist.append(Item(channel=item.channel, action="fichas", url=url, text_color=color3,
|
||||
thumbnail=image, title=title, extra=item.extra))
|
||||
|
||||
if len(itemlist) == 20:
|
||||
start = scrapertools.find_single_match(item.url, 'start=(\d+)')
|
||||
end = int(start) + 20
|
||||
url = re.sub(r'start=%s' % start, 'start=%s' % end, item.url)
|
||||
itemlist.append(item.clone(title=">> Página Siguiente", url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
from core import servertools
|
||||
|
||||
devuelve = servertools.findvideos(item.url, True)
|
||||
if devuelve:
|
||||
item.url = devuelve[0][1]
|
||||
item.server = devuelve[0][2]
|
||||
|
||||
if config.get_setting("mark_play", "playmax"):
|
||||
if item.contentType == "movie":
|
||||
marcar(item.clone(title="marcar como vista"))
|
||||
else:
|
||||
marcar(item.clone(title="Capítulo", epi_id=item.cid))
|
||||
|
||||
return [item]
|
||||
|
||||
|
||||
def select_page(item):
|
||||
import xbmcgui
|
||||
dialog = xbmcgui.Dialog()
|
||||
number = dialog.numeric(0, "Introduce el número de página")
|
||||
if number != "":
|
||||
number = int(number) * 60
|
||||
item.url = re.sub(r'start=(\d+)', "start=%s" % number, item.url)
|
||||
|
||||
return fichas(item)
|
||||
|
||||
|
||||
def xml2dict(xmldata):
|
||||
"""
|
||||
Lee un fichero o texto XML y retorna un diccionario json
|
||||
|
||||
Parametros:
|
||||
file (str) -- Ruta completa al archivo XML que se desea convertir en JSON.
|
||||
xmldata (str) -- Texto XML que se desea convertir en JSON.
|
||||
|
||||
Retorna:
|
||||
Un diccionario construido a partir de los campos del XML.
|
||||
|
||||
"""
|
||||
import sys
|
||||
parse = globals().get(sys._getframe().f_code.co_name)
|
||||
|
||||
matches = re.compile("<(?P<tag>[^>]+)>[\n]*[\s]*[\t]*(?P<value>.*?)[\n]*[\s]*[\t]*<\/(?P=tag)\s*>",
|
||||
re.DOTALL).findall(xmldata)
|
||||
|
||||
return_dict = {}
|
||||
for tag, value in matches:
|
||||
# Si tiene elementos
|
||||
if "<" and "</" in value:
|
||||
if tag in return_dict:
|
||||
if type(return_dict[tag]) == list:
|
||||
return_dict[tag].append(parse(value))
|
||||
else:
|
||||
return_dict[tag] = [return_dict[tag]]
|
||||
return_dict[tag].append(parse(value))
|
||||
else:
|
||||
return_dict[tag] = parse(value)
|
||||
|
||||
else:
|
||||
if tag in return_dict:
|
||||
if type(return_dict[tag]) == list:
|
||||
return_dict[tag].append(value)
|
||||
else:
|
||||
return_dict[tag] = [return_dict[tag]]
|
||||
return_dict[tag].append(value)
|
||||
else:
|
||||
if value in ["true", "false"]:
|
||||
if value == "true":
|
||||
value = True
|
||||
else:
|
||||
value = False
|
||||
|
||||
return_dict[tag] = value
|
||||
|
||||
return return_dict
|
||||
@@ -7,7 +7,7 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = "http://www.playpornx.net/"
|
||||
host = "https://watchfreexxx.net/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -17,7 +17,7 @@ def mainlist(item):
|
||||
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
|
||||
url =host))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=',
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
|
||||
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
|
||||
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
|
||||
|
||||
@@ -31,13 +31,21 @@ def lista(item):
|
||||
if item.url == '': item.url = host
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt.*?<h2>(.*?)<\/h2>'
|
||||
if item.extra != 'Buscar':
|
||||
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt=(.*?) width'
|
||||
else:
|
||||
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?) \/>.*?href=(.*?)\/>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
url = scrapedurl
|
||||
thumbnail = scrapedthumbnail
|
||||
title = scrapedtitle
|
||||
for data_1, data_2, data_3 in matches:
|
||||
if item.extra != 'Buscar':
|
||||
url = data_1
|
||||
thumbnail = data_2
|
||||
title = data_3
|
||||
else:
|
||||
url = data_3
|
||||
thumbnail = data_1
|
||||
title = data_2
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail))
|
||||
|
||||
@@ -59,6 +67,7 @@ def search(item, texto):
|
||||
|
||||
try:
|
||||
if texto != '':
|
||||
item.extra = 'Buscar'
|
||||
return lista(item)
|
||||
else:
|
||||
return []
|
||||
|
||||
@@ -34,7 +34,8 @@ def login():
|
||||
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
|
||||
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
|
||||
url = "https://www.plusdede.com/"
|
||||
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36","Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
|
||||
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
|
||||
replace_headers=False).data
|
||||
if "redirect" in data:
|
||||
@@ -771,14 +772,17 @@ def checkseen(item):
|
||||
if item.tipo == "8":
|
||||
url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen"
|
||||
tipo_str = "series"
|
||||
headers = {"Referer": "https://www.plusdede.com/serie/", "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
else:
|
||||
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
|
||||
tipo_str = "pelis"
|
||||
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
|
||||
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
|
||||
#logger.debug(data)
|
||||
return True
|
||||
|
||||
|
||||
@@ -927,7 +931,8 @@ def plusdede_check(item):
|
||||
tipo_str = "listas"
|
||||
else:
|
||||
tipo_str = "pelis"
|
||||
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
|
||||
"Chrome/61.0.3163.100 Safari/537.36","Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
|
||||
"X-CSRF-TOKEN": item.token}
|
||||
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
|
||||
replace_headers=True).data.strip()
|
||||
|
||||
85
plugin.video.alfa/channels/pordede.json
Normal file
85
plugin.video.alfa/channels/pordede.json
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"id": "pordede",
|
||||
"name": "Pordede",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "pordede.png",
|
||||
"banner": "pordede.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "pordedeuser",
|
||||
"type": "text",
|
||||
"label": "@30014",
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "pordedepassword",
|
||||
"type": "text",
|
||||
"hidden": true,
|
||||
"label": "@30015",
|
||||
"enabled": "!eq(-1,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": "!eq(-1,'') + !eq(-2,'')",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "pordedesortlinks",
|
||||
"type": "list",
|
||||
"label": "Ordenar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-2,'') + !eq(-3,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"Por no Reportes",
|
||||
"Por Idioma",
|
||||
"Por Calidad",
|
||||
"Por Idioma y Calidad",
|
||||
"Por Idioma y no Reportes",
|
||||
"Por Idioma, Calidad y no Reportes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "pordedeshowlinks",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-3,'') + !eq(-4,'')",
|
||||
"lvalues": [
|
||||
"Todos",
|
||||
"Ver online",
|
||||
"Descargar"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "pordedenumberlinks",
|
||||
"type": "list",
|
||||
"label": "Limitar número de enlaces",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": "!eq(-4,'') + !eq(-5,'')",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"5",
|
||||
"10",
|
||||
"15",
|
||||
"20",
|
||||
"25",
|
||||
"30"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
665
plugin.video.alfa/channels/pordede.py
Normal file
665
plugin.video.alfa/channels/pordede.py
Normal file
@@ -0,0 +1,665 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import urlparse
|
||||
|
||||
from core import channeltools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from platformcode import platformtools
|
||||
|
||||
def login():
|
||||
url_origen = "http://www.pordede.com"
|
||||
data = httptools.downloadpage(url_origen).data
|
||||
if config.get_setting("pordedeuser", "pordede") in data:
|
||||
return True
|
||||
|
||||
url = "http://www.pordede.com/api/login/auth?response_type=code&client_id=appclient&redirect_uri=http%3A%2F%2Fwww.pordede.com%2Fapi%2Flogin%2Freturn&state=none"
|
||||
post = "username=%s&password=%s&authorized=autorizar" % (config.get_setting("pordedeuser", "pordede"), config.get_setting("pordedepassword", "pordede"))
|
||||
data = httptools.downloadpage(url, post).data
|
||||
if '"ok":true' in data:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
if not config.get_setting("pordedeuser", "pordede"):
|
||||
itemlist.append( Item( channel=item.channel , title="Habilita tu cuenta en la configuración..." , action="settingCanal" , url="") )
|
||||
else:
|
||||
result = login()
|
||||
if not result:
|
||||
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
|
||||
return itemlist
|
||||
itemlist.append( Item(channel=item.channel, action="menuseries" , title="Series" , url="" ))
|
||||
itemlist.append( Item(channel=item.channel, action="menupeliculas" , title="Películas y documentales" , url="" ))
|
||||
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Listas que sigues" , url="http://www.pordede.com/lists/following" ))
|
||||
itemlist.append( Item(channel=item.channel, action="tus_listas" , title="Tus listas" , url="http://www.pordede.com/lists/yours" ))
|
||||
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Top listas" , url="http://www.pordede.com/lists" ))
|
||||
itemlist.append( Item(channel=item.channel, action="settingCanal" , title="Configuración..." , url="" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
def settingCanal(item):
|
||||
return platformtools.show_channel_settings()
|
||||
|
||||
def menuseries(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot" ))
|
||||
itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/series" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Siguiendo" , url="http://www.pordede.com/series/following" ))
|
||||
itemlist.append( Item(channel=item.channel, action="siguientes" , title="Siguientes Capítulos" , url="http://www.pordede.com/main/index" , viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/series/favorite" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/series/pending" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Terminadas" , url="http://www.pordede.com/series/seen" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/series/recommended" ))
|
||||
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/series" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
def menupeliculas(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot" ))
|
||||
itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/pelis" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/pelis/favorite" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/pelis/pending" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Vistas" , url="http://www.pordede.com/pelis/seen" ))
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/pelis/recommended" ))
|
||||
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/pelis" ))
|
||||
|
||||
return itemlist
|
||||
|
||||
def generos(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
data = scrapertools.find_single_match(data,'<div class="section genre">(.*?)</div>')
|
||||
patron = '<a class="mediaFilterLink" data-value="([^"]+)" href="([^"]+)">([^<]+)<span class="num">\((\d+)\)</span></a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for textid,scrapedurl,scrapedtitle,cuantos in matches:
|
||||
title = scrapedtitle.strip()+" ("+cuantos+")"
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
|
||||
if "/pelis" in item.url:
|
||||
url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
|
||||
else:
|
||||
url = "http://www.pordede.com/series/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title))
|
||||
|
||||
return itemlist
|
||||
|
||||
def search(item,texto):
|
||||
logger.info()
|
||||
|
||||
if item.url=="":
|
||||
item.url="http://www.pordede.com/pelis"
|
||||
|
||||
texto = texto.replace(" ","-")
|
||||
|
||||
item.extra = item.url
|
||||
item.url = item.url+"/loadmedia/offset/0/query/"+texto+"/years/1950/on/undefined/showlist/all"
|
||||
|
||||
try:
|
||||
return buscar(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
def buscar(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
return parse_mixed_results(item,data)
|
||||
|
||||
def parse_mixed_results(item,data):
|
||||
patron = '<a class="defaultLink extended" href="([^"]+)"[^<]+'
|
||||
patron += '<div class="coverMini shadow tiptip" title="([^"]+)"[^<]+'
|
||||
patron += '<img class="centeredPic.*?src="([^"]+)"'
|
||||
patron += '[^<]+<img[^<]+<div class="extra-info">'
|
||||
patron += '<span class="year">([^<]+)</span>'
|
||||
patron += '<span class="value"><i class="icon-star"></i>([^<]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear,scrapedvalue in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
if scrapedyear != '':
|
||||
title += " ("+scrapedyear+")"
|
||||
fulltitle = title
|
||||
if scrapedvalue != '':
|
||||
title += " ("+scrapedvalue+")"
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb","mediabigcover")
|
||||
plot = ""
|
||||
|
||||
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
|
||||
|
||||
if "/peli/" in scrapedurl:
|
||||
sectionStr = "peli"
|
||||
else:
|
||||
sectionStr = "docu"
|
||||
|
||||
referer = urlparse.urljoin(item.url,scrapedurl)
|
||||
url = referer.replace("/{0}/".format(sectionStr),"/links/view/slug/")+"/what/{0}".format(sectionStr)
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
|
||||
else:
|
||||
referer = item.url
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="episodios" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
|
||||
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
|
||||
|
||||
next_page = scrapertools.find_single_match(data, '<div class="loadingBar" data-url="([^"]+)"')
|
||||
if next_page != "":
|
||||
url = urlparse.urljoin("http://www.pordede.com", next_page)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title=">> Página siguiente", extra=item.extra, url=url))
|
||||
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
def siguientes(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
|
||||
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
|
||||
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
|
||||
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
|
||||
patron += '<div class="extra-info"><span class="year">[^<]+'
|
||||
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
|
||||
patron += '</span></div>[^<]+'
|
||||
patron += '</div>[^<]+'
|
||||
patron += '</a>[^<]+'
|
||||
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedtitle,scrapedthumbnail,scrapedurl,scrapedsession,scrapedepisode in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
session = scrapertools.htmlclean(scrapedsession)
|
||||
episode = scrapertools.htmlclean(scrapedepisode)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
fanart = thumbnail.replace("mediathumb","mediabigcover")
|
||||
plot = ""
|
||||
title = session + "x" + episode + " - " + title
|
||||
|
||||
referer = urlparse.urljoin(item.url,scrapedurl)
|
||||
url = referer
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="episodio" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, fanart=fanart, extra=session+"|"+episode))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodio(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
session = str(int(item.extra.split("|")[0]))
|
||||
episode = str(int(item.extra.split("|")[1]))
|
||||
patrontemporada = '<div class="checkSeason"[^>]+>Temporada '+session+'<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
|
||||
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
|
||||
|
||||
for bloque_episodios in matchestemporadas:
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">'+episode+' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
|
||||
|
||||
for scrapedurl,scrapedtitle,info,visto in matches:
|
||||
if visto.strip()=="active":
|
||||
visto_string = "[visto] "
|
||||
else:
|
||||
visto_string = ""
|
||||
numero=episode
|
||||
title = visto_string+session+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
|
||||
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
|
||||
url = "http://www.pordede.com/links/viewepisode/id/"+epid
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=item.fanart, show=item.show))
|
||||
|
||||
itemlist2 = []
|
||||
for capitulo in itemlist:
|
||||
itemlist2 = findvideos(capitulo)
|
||||
|
||||
return itemlist2
|
||||
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
return parse_mixed_results(item,data)
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
idserie = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
|
||||
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
|
||||
|
||||
idserie = scrapertools.find_single_match(data,'<div id="layout4" class="itemProfile modelContainer" data-model="serie" data-id="(\d+)"')
|
||||
|
||||
for nombre_temporada,bloque_episodios in matchestemporadas:
|
||||
# Extrae los episodios
|
||||
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
|
||||
|
||||
for scrapedurl,numero,scrapedtitle,info,visto in matches:
|
||||
if visto.strip()=="active":
|
||||
visto_string = "[visto] "
|
||||
else:
|
||||
visto_string = ""
|
||||
|
||||
title = visto_string+nombre_temporada.replace("Temporada ", "").replace("Extras", "Extras 0")+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
|
||||
thumbnail = item.thumbnail
|
||||
fanart= item.fanart
|
||||
plot = ""
|
||||
|
||||
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
|
||||
url = "http://www.pordede.com/links/viewepisode/id/"+epid
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart= fanart, show=item.show))
|
||||
|
||||
if config.get_videolibrary_support():
|
||||
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
|
||||
|
||||
itemlist.append( Item(channel='pordede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=show) )
|
||||
itemlist.append( Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4", action="pordede_check", show=show))
|
||||
itemlist.append( Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0", action="pordede_check", show=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
def parse_listas(item, patron):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl,scrapedtitle,scrapeduser,scrapedfichas in matches:
|
||||
title = scrapertools.htmlclean(scrapedtitle + ' (' + scrapedfichas + ' fichas, por ' + scrapeduser + ')')
|
||||
url = urlparse.urljoin(item.url,scrapedurl) + "/offset/0/loadmedia"
|
||||
thumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista" , title=title , url=url))
|
||||
|
||||
nextpage = scrapertools.find_single_match(data,'data-url="(/lists/loadlists/offset/[^"]+)"')
|
||||
if nextpage != '':
|
||||
url = urlparse.urljoin(item.url,nextpage)
|
||||
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title=">> Página siguiente" , extra=item.extra, url=url))
|
||||
|
||||
try:
|
||||
import xbmcplugin
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
|
||||
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
|
||||
except:
|
||||
pass
|
||||
|
||||
return itemlist
|
||||
|
||||
def listas_sigues(item):
|
||||
logger.info()
|
||||
|
||||
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
|
||||
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
|
||||
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
|
||||
|
||||
return parse_listas(item, patron)
|
||||
|
||||
def tus_listas(item):
|
||||
logger.info()
|
||||
|
||||
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
|
||||
patron += '<div class="right"[^<]+'
|
||||
patron += '<button[^<]+</button[^<]+'
|
||||
patron += '<button[^<]+</button[^<]+'
|
||||
patron += '</div[^<]+'
|
||||
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
|
||||
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
|
||||
|
||||
return parse_listas(item, patron)
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
headers = {"X-Requested-With": "XMLHttpRequest"}
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
json_object = jsontools.load(data)
|
||||
data = json_object["html"]
|
||||
|
||||
return parse_mixed_results(item,data)
|
||||
|
||||
def findvideos(item, verTodos=False):
|
||||
logger.info()
|
||||
|
||||
# Descarga la pagina
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.info(data)
|
||||
|
||||
sesion = scrapertools.find_single_match(data,'SESS = "([^"]+)";')
|
||||
|
||||
patron = '<a target="_blank" class="a aporteLink(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
idpeli = scrapertools.find_single_match(data,'<div class="buttons"><button class="defaultPopup onlyLogin" href="/links/create/ref_id/(\d+)/ref_model/4">Añadir enlace')
|
||||
|
||||
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and "/what/peli" in item.url:
|
||||
itemlist.append( Item(channel=item.channel, action="infosinopsis" , title="INFO / SINOPSIS" , url=item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False ))
|
||||
|
||||
itemsort = []
|
||||
sortlinks = config.get_setting("pordedesortlinks",item.channel)
|
||||
showlinks = config.get_setting("pordedeshowlinks",item.channel)
|
||||
|
||||
if sortlinks != '' and sortlinks !="No":
|
||||
sortlinks = int(sortlinks)
|
||||
else:
|
||||
sortlinks = 0
|
||||
|
||||
if showlinks != '' and showlinks !="No":
|
||||
showlinks = int(showlinks)
|
||||
else:
|
||||
showlinks = 0
|
||||
|
||||
for match in matches:
|
||||
jdown = scrapertools.find_single_match(match,'<div class="jdownloader">[^<]+</div>')
|
||||
if (showlinks == 1 and jdown != '') or (showlinks == 2 and jdown == ''):
|
||||
continue
|
||||
|
||||
idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>',re.DOTALL).findall(match)
|
||||
idioma_0 = (idiomas[0][0].replace(" ","").strip() + " " + idiomas[0][1].replace(" ","").strip()).strip()
|
||||
if len(idiomas) > 1:
|
||||
idioma_1 = (idiomas[1][0].replace(" ","").strip() + " " + idiomas[1][1].replace(" ","").strip()).strip()
|
||||
idioma = idioma_0 + ", " + idioma_1
|
||||
else:
|
||||
idioma_1 = ''
|
||||
idioma = idioma_0
|
||||
|
||||
calidad_video = scrapertools.find_single_match(match,'<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>')
|
||||
calidad_audio = scrapertools.find_single_match(match,'<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>')
|
||||
|
||||
thumb_servidor = scrapertools.find_single_match(match,'<div class="hostimage"[^<]+<img\s*src="([^"]+)">')
|
||||
nombre_servidor = scrapertools.find_single_match(thumb_servidor,"popup_([^\.]+)\.png")
|
||||
|
||||
if jdown != '':
|
||||
title = "Download "+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
|
||||
else:
|
||||
title = "Ver en "+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
|
||||
|
||||
cuenta = []
|
||||
valoracion = 0
|
||||
for idx, val in enumerate(['1', '2', 'report']):
|
||||
nn = scrapertools.find_single_match(match,'<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/'+val+'/')
|
||||
if nn != '0' and nn != '':
|
||||
cuenta.append(nn + ' ' + ['ok', 'ko', 'rep'][idx])
|
||||
|
||||
if val == '1':
|
||||
valoracion += int(nn)
|
||||
else:
|
||||
valoracion += -int(nn)
|
||||
|
||||
if len(cuenta) > 0:
|
||||
title += ' (' + ', '.join(cuenta) + ')'
|
||||
|
||||
url = urlparse.urljoin( item.url , scrapertools.find_single_match(match,'href="([^"]+)"') )
|
||||
thumbnail = thumb_servidor
|
||||
plot = ""
|
||||
|
||||
if sortlinks > 0:
|
||||
if sortlinks == 1:
|
||||
orden = valoracion
|
||||
elif sortlinks == 2:
|
||||
orden = valora_idioma(idioma_0, idioma_1)
|
||||
elif sortlinks == 3:
|
||||
orden = valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 4:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
|
||||
elif sortlinks == 5:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
|
||||
elif sortlinks == 6:
|
||||
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
|
||||
|
||||
itemsort.append({'action': "play", 'title': title, 'url':url, 'thumbnail':thumbnail, 'fanart':item.fanart, 'plot':plot, 'extra':sesion+"|"+item.url, 'fulltitle':item.fulltitle, 'orden1': (jdown == ''), 'orden2':orden})
|
||||
else:
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, fanart= item.fanart, plot=plot, extra=sesion+"|"+item.url, fulltitle=item.fulltitle))
|
||||
|
||||
if sortlinks > 0:
|
||||
numberlinks = config.get_setting("pordedenumberlinks",item.channel)
|
||||
|
||||
if numberlinks != '' and numberlinks !="No":
|
||||
numberlinks = int(numberlinks)
|
||||
else:
|
||||
numberlinks = 0
|
||||
|
||||
if numberlinks == 0:
|
||||
verTodos = True
|
||||
|
||||
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
|
||||
for i, subitem in enumerate(itemsort):
|
||||
if verTodos == False and i >= numberlinks:
|
||||
itemlist.append(Item(channel=item.channel, action='findallvideos' , title='Ver todos los enlaces', url=item.url, extra=item.extra ))
|
||||
break
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action=subitem['action'] , title=subitem['title'] , url=subitem['url'] , thumbnail=subitem['thumbnail'] , fanart= subitem['fanart'], plot=subitem['plot'] , extra=subitem['extra'] , fulltitle=subitem['fulltitle'] ))
|
||||
|
||||
if "/what/peli" in item.url or "/what/docu" in item.url:
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Pendiente" , valor="1", idtemp=idpeli))
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Vista" , valor="3", idtemp=idpeli))
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Favorita" , valor="4", idtemp=idpeli))
|
||||
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Quitar Marca" , valor="0", idtemp=idpeli))
|
||||
|
||||
return itemlist
|
||||
|
||||
def findallvideos(item):
|
||||
return findvideos(item, True)
|
||||
|
||||
def play(item):
|
||||
# Marcar como visto
|
||||
checkseen(item.extra.split("|")[1])
|
||||
|
||||
headers = {'Referer': item.extra.split("|")[1]}
|
||||
|
||||
data = httptools.downloadpage(item.url, post="_s="+item.extra.split("|")[0], headers=headers).data
|
||||
url = scrapertools.find_single_match(data,'<p class="nicetry links">\s+<a href="([^"]+)" target="_blank"')
|
||||
url = urlparse.urljoin(item.url,url)
|
||||
|
||||
headers = {'Referer': item.url}
|
||||
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
|
||||
|
||||
itemlist = servertools.find_video_items(data=media_url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
|
||||
def checkseen(item):
|
||||
logger.info(item)
|
||||
|
||||
if "/viewepisode/" in item:
|
||||
episode = item.split("/")[-1]
|
||||
httptools.downloadpage("http://www.pordede.com/ajax/action", post="model=episode&id="+episode+"&action=seen&value=1")
|
||||
|
||||
if "/what/peli" in item:
|
||||
data = httptools.downloadpage(item).data
|
||||
|
||||
movieid = scrapertools.find_single_match(data,'href="/links/create/ref_id/([0-9]+)/ref_model/')
|
||||
httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model=peli&id="+movieid+"&action=status&value=3")
|
||||
|
||||
return True
|
||||
|
||||
def infosinopsis(item):
|
||||
logger.info()
|
||||
|
||||
url_aux = item.url.replace("/links/view/slug/", "/peli/").replace("/what/peli", "")
|
||||
# Descarga la pagina
|
||||
|
||||
data = httptools.downloadpage(url_aux).data
|
||||
|
||||
scrapedtitle = scrapertools.find_single_match(data,'<h1>([^<]+)</h1>')
|
||||
scrapedvalue = scrapertools.find_single_match(data,'<span class="puntuationValue" data-value="([^"]+)"')
|
||||
scrapedyear = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>')
|
||||
scrapedduration = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>', 1)
|
||||
scrapedplot = scrapertools.find_single_match(data,'<div class="info text"[^>]+>([^<]+)</div>')
|
||||
scrapedgenres = re.compile('href="/pelis/index/genre/[^"]+">([^<]+)</a>',re.DOTALL).findall(data)
|
||||
scrapedcasting = re.compile('href="/star/[^"]+">([^<]+)</a><br/><span>([^<]+)</span>',re.DOTALL).findall(data)
|
||||
|
||||
title = scrapertools.htmlclean(scrapedtitle)
|
||||
plot = "Año: [B]"+scrapedyear+"[/B]"
|
||||
plot += " , Duración: [B]"+scrapedduration+"[/B]"
|
||||
plot += " , Puntuación usuarios: [B]"+scrapedvalue+"[/B]"
|
||||
plot += "\nGéneros: "+", ".join(scrapedgenres)
|
||||
plot += "\n\nSinopsis:\n"+scrapertools.htmlclean(scrapedplot)
|
||||
plot += "\n\nCasting:\n"
|
||||
for actor,papel in scrapedcasting:
|
||||
plot += actor+" ("+papel+"). "
|
||||
|
||||
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
|
||||
tbd.ask(title, plot)
|
||||
|
||||
del tbd
|
||||
return
|
||||
|
||||
try:
|
||||
import xbmcgui
|
||||
|
||||
class TextBox( xbmcgui.WindowXML ):
|
||||
""" Create a skinned textbox window """
|
||||
def __init__( self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def onInit( self ):
|
||||
try:
|
||||
self.getControl( 5 ).setText( self.text )
|
||||
self.getControl( 1 ).setLabel( self.title )
|
||||
except: pass
|
||||
|
||||
def onClick( self, controlId ):
|
||||
pass
|
||||
|
||||
def onFocus( self, controlId ):
|
||||
pass
|
||||
|
||||
def onAction( self, action ):
|
||||
if action == 7:
|
||||
self.close()
|
||||
|
||||
def ask(self, title, text ):
|
||||
self.title = title
|
||||
self.text = text
|
||||
self.doModal()
|
||||
except:
|
||||
pass
|
||||
|
||||
def valora_calidad(video, audio):
|
||||
prefs_video = [ 'hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener' ]
|
||||
prefs_audio = [ 'dts', '5.1', 'rip', 'line', 'screener' ]
|
||||
|
||||
video = ''.join(video.split()).lower()
|
||||
if video in prefs_video:
|
||||
pts = (9 - prefs_video.index(video)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
audio = ''.join(audio.split()).lower()
|
||||
if audio in prefs_audio:
|
||||
pts = (9 - prefs_audio.index(audio)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
return pts
|
||||
|
||||
def valora_idioma(idioma_0, idioma_1):
|
||||
prefs = [ 'spanish', 'spanish LAT', 'catalan', 'english', 'french' ]
|
||||
|
||||
if idioma_0 in prefs:
|
||||
pts = (9 - prefs.index(idioma_0)) * 10
|
||||
else:
|
||||
pts = (9 - 1) * 10
|
||||
|
||||
if idioma_1 != '':
|
||||
idioma_1 = idioma_1.replace(' SUB', '')
|
||||
|
||||
if idioma_1 in prefs:
|
||||
pts += 8 - prefs.index(idioma_1)
|
||||
else:
|
||||
pts += 8 - 1
|
||||
|
||||
else:
|
||||
pts += 9
|
||||
|
||||
return pts
|
||||
|
||||
def pordede_check(item):
|
||||
httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model="+item.tipo+"&id="+item.idtemp+"&action=status&value="+item.valor)
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"id": "quierodibujosanimados",
|
||||
"name": "Quiero Dibujos Animados",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast"],
|
||||
"thumbnail": "quierodibujosanimados.png",
|
||||
"banner": "quierodibujosanimados.png",
|
||||
"fanart": "quierodibujosanimados.jpg",
|
||||
"categories": [
|
||||
"tvshow"
|
||||
]
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
# itemlist.append( Item(channel=item.channel , action="novedades" , title="Novedades" , url="http://www.quierodibujosanimados.com/"))
|
||||
return series(
|
||||
Item(channel=item.channel, action="series", title="Series", url="http://www.quierodibujosanimados.com/",
|
||||
fanart=item.fanart))
|
||||
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = scrapertools.get_match(data, '<ul class="categorias">(.*?)</ul')
|
||||
|
||||
# <a href="http://www.quierodibujosanimados.com/cat/popeye-el-marino/38" title="Popeye el marino">Popeye el marino</a>
|
||||
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = ""
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=item.fanart))
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, '</span[^<]+<a href="([^"]+)">')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page_url), folder=True,
|
||||
fanart=item.fanart))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
|
||||
'''
|
||||
<li>
|
||||
<div class="info">
|
||||
<h2><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h2>
|
||||
<p>Caillou volvía con su hermanita Rosi y su mamá de la biblioteca y traían un montón de libros que Caillou quería leer, especialmente uno de piratas. Capítulo titulado "Caillou ratón de biblioteca".</p>
|
||||
<div class="pie">
|
||||
<div class="categoria">
|
||||
<span>Categoría:</span>
|
||||
<a href="http://www.quierodibujosanimados.com/cat/caillou/14" title="Caillou" class="categoria">Caillou</a>
|
||||
</div>
|
||||
<div class="puntuacion">
|
||||
<div class="rating_16 punt_0" data-noticia="954">
|
||||
<span>0.5</span>
|
||||
<span>1</span>
|
||||
<span>1.5</span>
|
||||
<span>2</span>
|
||||
<span>2.5</span>
|
||||
<span>3</span>
|
||||
<span>3.5</span>
|
||||
<span>4</span>
|
||||
<span>4.5</span>
|
||||
<span>5</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<span class="pico"></span>
|
||||
</div>
|
||||
<div class="dibujo">
|
||||
<a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca" class="thumb">
|
||||
<img src="http://www.quierodibujosanimados.com/i/thm-Caillou-raton-de-biblioteca.jpg" alt="Caillou ratón de biblioteca" width="137" height="174" />
|
||||
</a>
|
||||
<h4><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h4>
|
||||
</div>
|
||||
</li>
|
||||
'''
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
patron = '<div class="dibujo"[^<]+'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
|
||||
patron += '<img src="([^"]+)"'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
title = scrapedtitle.strip()
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
plot = ""
|
||||
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
fanart=item.fanart))
|
||||
|
||||
next_page_url = scrapertools.find_single_match(data, '</span[^<]+<a href="([^"]+)">')
|
||||
if next_page_url != "":
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente",
|
||||
url=urlparse.urljoin(item.url, next_page_url), folder=True,
|
||||
fanart=item.fanart))
|
||||
|
||||
return itemlist
|
||||
@@ -24,6 +24,13 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=item.channel, action="search",
|
||||
title="Buscar por titulo", context=context,
|
||||
thumbnail=get_thumb("search.png")))
|
||||
|
||||
thumbnail = get_thumb("search_star.png")
|
||||
|
||||
itemlist.append(Item(channel='tvmoviedb', title="Buscar actor/actriz", action="search_",
|
||||
search={'url': 'search/person', 'language': 'es', 'page': 1}, star=True,
|
||||
thumbnail=thumbnail))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="search",
|
||||
title="Buscar por categorias (búsqueda avanzada)", extra="categorias",
|
||||
context=context,
|
||||
@@ -260,10 +267,9 @@ def channel_search(search_results, channel_parameters, tecleado):
|
||||
if result is None:
|
||||
result = []
|
||||
if len(result):
|
||||
if not channel_parameters["title"] in search_results:
|
||||
search_results[channel_parameters["title"]] = []
|
||||
|
||||
search_results[channel_parameters["title"]].append({"item": item,
|
||||
if not channel_parameters["title"].capitalize() in search_results:
|
||||
search_results[channel_parameters["title"].capitalize()] = []
|
||||
search_results[channel_parameters["title"].capitalize()].append({"item": item,
|
||||
"itemlist": result,
|
||||
"adult": channel_parameters["adult"]})
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user