143 Commits

Author SHA1 Message Date
alfa-addon
78b4a8d768 v2.4.9 2018-01-16 10:34:50 -05:00
alfa-addon
537c640771 fixed 2018-01-16 10:34:20 -05:00
alfa-addon
d76430c6d7 added 2018-01-16 10:34:07 -05:00
Alfa
b97f01d4d4 Merge pull request #192 from Intel11/actualizados
Actualizados
2018-01-16 08:18:02 -05:00
Alfa
9059a6d14f Merge pull request #193 from Alfa-beto/Fixes
pelisgratis+cambios post festividades
2018-01-16 08:17:05 -05:00
Intel1
b8f66623da Update platformtools.py 2018-01-16 08:12:02 -05:00
Unknown
1a1c8f2547 +cambios post festividades 2018-01-16 09:31:02 -03:00
Unknown
0fc75a5305 pelisgratis+cambios post festividades 2018-01-16 09:19:58 -03:00
Intel1
f4238302a5 launcher: updated 2018-01-15 16:29:35 -05:00
Intel1
ba2a6c682e logger: updated 2018-01-15 16:27:50 -05:00
Intel1
02abbfcc64 httptools: updated 2018-01-15 16:02:31 -05:00
Intel1
4a0f1b5c41 doomtv: fix 2018-01-08 08:30:12 -05:00
Intel1
738fb50ce9 userscloud: actualido test_video_exists 2018-01-05 16:39:46 -05:00
Unknown
64396b3a4f Merge remote-tracking branch 'alfa-addon/master' into Fixes 2018-01-03 09:46:51 -03:00
Intel1
9e1c190c0b ciberpeliculashd: agregado sección series 2018-01-02 11:58:40 -05:00
Alfa
7b8c1c6eb7 v2.4.8 2017-12-30 12:08:03 -05:00
Alfa
9a5b8cb4e7 Merge pull request #191 from Intel11/actualizados
Actualizados
2017-12-30 10:18:00 -05:00
Intel1
2793ea9952 Update gvideo.py 2017-12-30 10:09:47 -05:00
Intel1
3067d137cb Add files via upload 2017-12-30 10:08:26 -05:00
Intel1
825d9f2198 Delete cineasiaenlinea.pyo 2017-12-30 10:07:56 -05:00
Intel1
b41c2c08fc ciberpeliculashd: nuevo canal 2017-12-30 09:39:38 -05:00
Intel1
1f4825dd05 cinetux: fix enlaces youtube 2017-12-30 08:47:04 -05:00
Intel1
4f760040bc httptools: header actualizado 2017-12-30 08:33:12 -05:00
Intel1
5391152408 Delete teledocumentales.py 2017-12-28 08:25:54 -05:00
Intel1
815857404d Delete teledocumentales.json 2017-12-28 08:25:46 -05:00
Intel1
239a73219e vk: actualizado test_video_exists 2017-12-27 15:08:38 -05:00
Intel1
b7787e0ee5 bitp: fix patron 2017-12-27 14:56:09 -05:00
Intel1
14fce6ebac Delete turbovideos.py 2017-12-27 14:20:39 -05:00
Intel1
16a530a9d4 Delete turbovideos.json 2017-12-27 14:20:25 -05:00
Intel1
f4bc398f9e Delete stormo.py 2017-12-27 14:17:35 -05:00
Intel1
841f5e5f3d Delete stormo.json 2017-12-27 14:17:20 -05:00
Intel1
ee80f9c4d2 Delete stagevu.py 2017-12-27 14:16:59 -05:00
Intel1
50c42fbc4b Delete stagevu.json 2017-12-27 14:16:39 -05:00
Intel1
76170820bb Delete pcloud.py 2017-12-27 14:14:14 -05:00
Intel1
b5083d16b5 Delete pcloud.json 2017-12-27 14:14:05 -05:00
Intel1
6927f1f955 Delete nowdownload.py 2017-12-27 14:11:38 -05:00
Intel1
a3b70aba22 Delete nowdownload.json 2017-12-27 14:11:26 -05:00
Intel1
1fb8db8496 Delete nosvideo.py 2017-12-27 14:10:17 -05:00
Intel1
201e7f1e2e Delete nosvideo.json 2017-12-27 14:09:58 -05:00
Intel1
245190ca70 Delete idowatch.py 2017-12-27 12:56:42 -05:00
Intel1
23ebf3d19b Delete idowatch.json 2017-12-27 12:56:24 -05:00
Intel1
5c699ed892 Delete divxstage.py 2017-12-27 12:50:37 -05:00
Intel1
2da412890e Delete divxstage.json 2017-12-27 12:49:47 -05:00
Intel1
b47db3ae04 gvideo: actualizado test_video_exists 2017-12-27 12:36:38 -05:00
Intel1
660a05f39d videoteca: fix por temporadas 2017-12-26 17:50:19 -05:00
Intel1
1f1b860715 serviporno: fix patron 2017-12-26 17:44:32 -05:00
Intel1
bc318f7a18 bitp: fix patron 2017-12-26 17:18:19 -05:00
Intel1
87e8ee4d46 javtasty: fix patron y host 2017-12-26 17:11:15 -05:00
Intel1
de5eda5477 Update yaske.py 2017-12-23 10:42:57 -05:00
Intel1
0738a82372 divxatope: fix 2017-12-23 09:29:39 -05:00
Intel1
c3ca5b472f yaske: fix videoteca 2017-12-23 09:27:50 -05:00
alfa-addon
4008c63e12 v2.4.7 2017-12-21 06:33:41 -05:00
alfa-addon
f5a5328620 minor fixes 2017-12-21 06:32:52 -05:00
Alfa
48d7f754be Merge pull request #189 from Intel11/actualizados
Actualizados
2017-12-21 06:08:42 -05:00
Intel1
f1ffdf425e yaske: agregado seccion series 2017-12-20 15:16:51 -05:00
Intel1
fac578f631 Update animeid.py 2017-12-18 09:16:19 -05:00
Intel1
f7df5e9494 Update autoplay.py 2017-12-18 09:15:07 -05:00
Intel1
aeea88395a Update gnula.py 2017-12-18 09:14:31 -05:00
Intel1
8cc0ac4083 Update pelispekes.py 2017-12-18 09:13:52 -05:00
Intel1
ba2e824ec6 Update item.py 2017-12-18 09:11:53 -05:00
Intel1
3aabe7eb75 yaske: fix paginacion 2017-12-16 08:22:05 -05:00
Intel1
b3c345fd11 Update doomtv.py 2017-12-15 16:03:28 -05:00
Intel1
9ddc0c7eec ohlatino: pagina no existe 2017-12-15 14:50:33 -05:00
Intel1
6a25a4add4 ohlatino: pagina no existe 2017-12-15 14:50:22 -05:00
Intel1
b339965287 descargasmix: fix 2017-12-15 14:48:50 -05:00
Intel1
800d6ae02e cinefoxtv: fix 2017-12-15 14:47:35 -05:00
Intel1
f441d34222 bricocine: web no funciona 2017-12-15 14:43:32 -05:00
Intel1
57001ef13e bricocine: web no funciona 2017-12-15 14:43:18 -05:00
Intel1
19df05e956 borrachodetorrent: web no funciona 2017-12-15 14:41:14 -05:00
Intel1
9f20d50a79 borrachodetorrent: web no funciona 2017-12-15 14:41:02 -05:00
Intel1
d8052c4bab bityouth: canal sin contenido 2017-12-15 14:39:06 -05:00
Intel1
4aca9a2306 bityouth: canal sin contenido 2017-12-15 14:38:44 -05:00
Intel1
5729e04cb5 doomtv: fix 2017-12-15 14:35:14 -05:00
Unknown
eb27cd53bc Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-12-15 08:53:43 -03:00
alfa-addon
afaf2fd4b4 v2.4.6 2017-12-14 15:37:29 -05:00
Alfa
e264614a2e Merge pull request #186 from Intel11/master
Actualizados
2017-12-14 15:51:14 -05:00
Intel1
de439ff8ea plusdede: fix vistos 2017-12-14 08:32:39 -05:00
Intel1
37df471d60 zpeliculas: no funciona la pagina 2017-12-13 08:41:45 -05:00
Intel1
98b61f1b50 zpeliculas: no funciona la pagina 2017-12-13 08:41:33 -05:00
Intel1
65648bca9b bitertv: nuevo server 2017-12-13 08:38:39 -05:00
Intel1
e7fd77bcee gnula: actualizado 2017-12-12 15:32:11 -05:00
Intel1
27f93b9d98 Update playpornx.py 2017-12-12 14:25:33 -05:00
Intel1
a77a009c3a bdupload: nuevo server 2017-12-12 12:46:14 -05:00
Intel1
94d4244cd1 Update playpornx.py 2017-12-12 10:17:13 -05:00
Intel1
c2b9f1f009 Update powvideo.py 2017-12-12 10:04:56 -05:00
Intel1
7c887bf546 Update streamplay.py 2017-12-12 10:04:16 -05:00
Intel1
05535344c2 Update canalpelis.py 2017-12-09 15:22:40 -05:00
Intel1
0caea46619 Update yaske.py 2017-12-07 14:58:39 -05:00
unknown
c49439bdc7 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-12-07 12:25:13 -03:00
Intel1
b5f1e7180c userscloud: fix 2017-12-06 17:40:04 -05:00
Intel1
29324c4302 yaske: fix search and get more links 2017-12-06 15:57:28 -05:00
Intel1
b88ef13772 search: fix channels orders 2017-12-06 15:55:20 -05:00
alfa-addon
b7b0c02589 v2.4.5 2017-12-05 00:09:18 -05:00
alfa-addon
f0e07b7b28 fixed 2017-12-05 00:09:01 -05:00
Alfa
735b4a6584 Merge pull request #184 from Intel11/patch-1
Actualizados
2017-12-04 16:17:31 -05:00
Alfa
120e77b44b Merge pull request #185 from Alfa-beto/custom_menu_quick_menu
menu+inicio
2017-12-04 16:17:01 -05:00
Unknown
ac27cd2f00 Corregido canalpelis 2017-12-04 08:48:30 -03:00
Unknown
002e62aa19 mejora para confluence 2017-12-02 15:15:59 -03:00
Intel1
4b9bbd0540 Delete quierodibujosanimados.py 2017-12-02 12:04:57 -05:00
Intel1
b474db07e3 Delete quierodibujosanimados.json 2017-12-02 12:04:42 -05:00
Intel1
9a6e070799 Update powvideo.py 2017-12-02 11:35:06 -05:00
Unknown
e0997a387b ultimos ajustes, opciones dinamicas 2017-12-02 13:27:26 -03:00
Intel1
02797b5571 peliculasgratis: actualizado 2017-12-02 10:57:59 -05:00
Unknown
de8f6af086 Merge remote-tracking branch 'alfa-addon/master' into unify+autoplay 2017-12-01 21:40:17 -03:00
unknown
98c06a1140 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-12-01 21:37:37 -03:00
Intel1
2e9573b6e9 streamixcloud: fix test_video 2017-12-01 12:56:44 -05:00
Intel1
fd5b972281 Update cinetux.py 2017-12-01 11:58:39 -05:00
Intel1
30e32ac133 gamovideo: test_video 2017-12-01 11:37:21 -05:00
Intel1
1a7d23d7dc cinetux: fix destacadas 2017-12-01 11:31:10 -05:00
Intel1
34f4e474fd Delete unsoloclic.py 2017-12-01 10:46:50 -05:00
Intel1
c546976329 Delete unsoloclic.json 2017-12-01 10:46:38 -05:00
Intel1
fe4c2685d7 Update uptobox.py 2017-12-01 10:41:50 -05:00
Intel1
1b7e71e3bf uptobox: fix patron 2017-12-01 10:21:59 -05:00
alfa-addon
0ab5deba05 icon1 added 2017-11-30 18:37:04 -05:00
alfa-addon
dd4bcb8ef4 icon deleted 2017-11-30 18:36:40 -05:00
alfa-addon
796bf25e6f xmas icon 2017-11-30 18:20:17 -05:00
alfa-addon
f136e6e2aa deleted 2017-11-30 18:19:59 -05:00
alfa-addon
bad4e91aee v2.4.4 2017-11-30 18:10:28 -05:00
alfa-addon
48d76ad6d4 xmas fanart 2017-11-30 18:10:10 -05:00
Alfa
4d248cab54 Merge pull request #182 from Intel11/ultimo
Actualizados
2017-11-30 17:57:17 -05:00
Intel1
df3022353c Update pelisplusco.py 2017-11-30 17:06:03 -05:00
Intel1
48a8f2fa1a pelisplus: fix buscador global series 2017-11-30 17:02:54 -05:00
Unknown
5304271782 Ajustes human friendly 2017-11-30 14:08:44 -03:00
Intel1
3701aba7fa Update powvideo.py 2017-11-30 08:45:22 -05:00
Unknown
ef962d7ed2 Agregada opcion para poner inicio desde cualquier canal 2017-11-29 13:08:51 -03:00
Intel1
9d03b4de54 Update xbmc_videolibrary.py 2017-11-28 16:05:12 -05:00
Intel1
6bbf26a9a9 tvmoviedb: activado lista de imagenes 2017-11-28 14:20:13 -05:00
Intel1
392435e6fb gvideo: actualizado test_video_exists 2017-11-28 09:18:12 -05:00
Intel1
8b445165df servertools: Eliminado server_stats 2017-11-28 08:52:06 -05:00
Intel1
ba28a426e6 settings: Eliminado server_stats 2017-11-28 08:49:34 -05:00
unknown
aa5fa6d275 Merge remote-tracking branch 'alfa-addon/master' into custom_menu_quick_menu 2017-11-28 09:04:15 -03:00
unknown
6d8de2efec Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-28 09:01:57 -03:00
unknown
8fb445edaf Merge remote-tracking branch 'alfa-addon/master' into custom_menu_quick_menu 2017-11-24 16:59:35 -03:00
unknown
fada17bb78 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-24 16:57:15 -03:00
Unknown
5c90256a3d Menu personalizado + Menu Rapido 2017-11-24 16:54:33 -03:00
unknown
3f1baae10c Merge remote-tracking branch 'origin/master' 2017-11-21 08:58:50 -03:00
unknown
a91643694b Merge remote-tracking branch 'alfa-addon/master' 2017-11-21 08:55:38 -03:00
unknown
3bd8507889 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-21 08:55:31 -03:00
Alfa-beto
b9bd644e0a agregado icono para idioma castellano 2017-11-17 18:23:59 -03:00
unknown
3032770580 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-15 08:35:09 -03:00
Unknown
3965fdd1c6 reparado pelisfox 2017-11-13 08:09:52 -03:00
Unknown
689e2cc534 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-12 20:02:33 -03:00
unknown
783b8a11c1 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-10 23:06:48 -03:00
93 changed files with 2381 additions and 8168 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.4.3" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.9" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,11 +19,11 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» cinetux » descargasmix
» hdfull » peliculasdk
» pelisfox » yaske
» gvideo » powvideo
» yourupload ¤ arreglos internos
» pelisfox » pelisgratis
» gamovideo » doomtv
» usercloud » ciberpeliculashd
» pordede ¤ arreglos internos
[COLOR green]Gracias a [B][COLOR yellow]f_y_m[/COLOR][/B] por su colaboración en esta versión[/COLOR]
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>

View File

@@ -134,8 +134,7 @@ def novedades_episodios(item):
contentTitle = scrapedtitle.replace('#' + episodio, '')
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot,
hasContentDetails=True, contentSeason=1, contentTitle=contentTitle))
thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle))
return itemlist

View File

@@ -89,7 +89,6 @@ def start(itemlist, item):
videoitem.contentTitle=item.contentTitle
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
#videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title

View File

@@ -1,24 +0,0 @@
{
"id": "bityouth",
"name": "Bityouth",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png",
"banner": "bityouth.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,31 +0,0 @@
{
"id": "borrachodetorrent",
"name": "BorrachodeTorrent",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/BePrYmy.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,24 +0,0 @@
{
"id": "bricocine",
"name": "Bricocine",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg",
"banner": "bricocine.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -143,14 +143,10 @@ def peliculas(item):
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
scrapedtitle, year, quality)
thumb_id = scrapertools.find_single_match(scrapedthumbnail, '.*?\/uploads\/(.*?)-')
thumbnail = "/%s.jpg" % thumb_id
filtro_list = {"poster_path": thumbnail}
filtro_list = filtro_list.items()
itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3,
url=scrapedurl, infoLabels={'filtro':filtro_list},
contentTitle=contentTitle, thumbnail=thumbnail,
url=scrapedurl, infoLabels={'year': year},
contentTitle=contentTitle, thumbnail=scrapedthumbnail,
title=title, context="buscar_trailer", quality = quality))
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
@@ -168,17 +164,17 @@ def peliculas(item):
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
datas = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", datas)
item.fanart = scrapertools.find_single_match(
data, "<meta property='og:image' content='([^']+)' />")
datas, "<meta property='og:image' content='([^']+)' />")
item.fanart = item.fanart.replace('w780', 'original')
item.plot = scrapertools.find_single_match(data, '</span></h4><p>([^*]+)</p><h4')
item.plot = scrapertools.find_single_match(datas, '</h4><p>(.*?)</p>')
item.plot = scrapertools.htmlclean(item.plot)
item.infoLabels['director'] = scrapertools.find_single_match(
data, '<div class="name"><a href="[^"]+">([^<]+)</a>')
datas, '<div class="name"><a href="[^"]+">([^<]+)</a>')
item.infoLabels['genre'] = scrapertools.find_single_match(
data, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
datas, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>')
return itemlist
@@ -189,8 +185,7 @@ def generos(item):
data = scrapertools.cache_page(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
# url, title, cantidad
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -216,34 +211,34 @@ def year_release(item):
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(datas)
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">'
patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?'
patron += '<div class="texto">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches:
if plot == '':
plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>')
scrapedtitle = scrapedtitle.replace('Ver ', '').replace(
' Online HD', '').replace('ver ', '').replace(' Online', '')
' Online HD', '').replace('ver ', '').replace(' Online', '').replace(' (Serie TV)', '').strip()
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot,
thumbnail=scrapedthumbnail, contentType='tvshow'))
url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
if url_next_page:
@@ -259,7 +254,6 @@ def temporadas(item):
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(datas)
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
patron += '<img src="([^"]+)"></a></div>' # capitulos
@@ -268,13 +262,13 @@ def temporadas(item):
for scrapedseason, scrapedthumbnail in matches:
scrapedseason = " ".join(scrapedseason.split())
temporada = scrapertools.find_single_match(scrapedseason, '(\d+)')
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail)
new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas')
new_item.infoLabels['season'] = temporada
new_item.extra = ""
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
i.title = "%s. %s" % (i.infoLabels['season'], i.infoLabels['tvshowtitle'])
if i.infoLabels['title']:
@@ -286,6 +280,11 @@ def temporadas(item):
itemlist.sort(key=lambda it: it.title)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show, category="Series",
text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host))
return itemlist
else:
return episodios(item)
@@ -328,7 +327,6 @@ def episodios(item):
if not item.extra:
# Obtenemos los datos de todos los capitulos de la temporada mediante multihilos
tmdb.set_infoLabels(itemlist, __modo_grafico__)
tmdb.set_infoLabels(itemlist, __modo_grafico__)
for i in itemlist:
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
@@ -355,13 +353,17 @@ def findvideos(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, url in matches:
datas = httptools.downloadpage(urlparse.urljoin(host, url),
headers={'Referer': item.url}).data
patron = '<iframe[^>]+src="([^"]+)"'
url = scrapertools.find_single_match(datas, patron)
lang = scrapertools.find_single_match(
data, '<li><a class="options" href="#option-%s"><b class="icon-play_arrow"><\/b> (.*?)<span class="dt_flag">' % option)
lang = lang.replace('Español ', '').replace('B.S.O. ', '')
@@ -371,10 +373,9 @@ def findvideos(item):
itemlist.append(item.clone(action='play', url=url, title=title, extra1=title,
server=server, language = lang, text_color=color3))
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library",
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png',
extra="findvideos", contentTitle=item.contentTitle))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
thumbnail=thumbnail_host, contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,22 +1,22 @@
{
"id": "ohlatino",
"name": "OH!Latino",
"id": "ciberpeliculashd",
"name": "Ciberpeliculashd",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "http://cinemiltonero.com/wp-content/uploads/2017/08/logo-Latino0.png",
"banner": "https://s27.postimg.org/bz0fh8jpf/oh-pelis-banner.png",
"thumbnail": "https://s17.postimg.org/78tekxeov/ciberpeliculashd1.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"id": "modo_grafico",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
@@ -26,6 +26,14 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -41,6 +49,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -0,0 +1,271 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
__channel__='ciberpeliculashd'
host = "http://ciberpeliculashd.net"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel = item.channel, title = "Películas", text_bold = True, folder = False))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "peliculas", url = host + "/?peli=1"))
itemlist.append(Item(channel = item.channel, title = " Por género", action = "filtro", url = host, extra = "categories" ))
itemlist.append(Item(channel = item.channel, title = " Por calidad", action = "filtro", url = host, extra = "qualitys"))
itemlist.append(Item(channel = item.channel, title = " Por idioma", action = "filtro", url = host, extra = "languages"))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Series", text_bold = True, folder = False))
itemlist.append(Item(channel = item.channel, title = " Novedades", action = "series", url = host + "/series/?peli=1"))
itemlist.append(Item(channel = item.channel, title = " Nuevos Capitulos", action = "nuevos_capitulos", url = host + "/series/?peli=1"))
itemlist.append(Item(channel = item.channel, title = ""))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s="))
return itemlist
def nuevos_capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="episode" href="([^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'title="([^"]+).*?'
patron += '-->([^<]+).*?'
patron += 'created_at">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedepisode, scrapeddays in matches:
scrapedtitle = scrapedtitle + " %s (%s)" %(scrapedepisode.strip(), scrapeddays.strip())
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = scrapedtitle,
thumbnail = scrapedthumbnail,
url = scrapedurl
))
return itemlist
def series(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'loop-posts series.*?panel-pagination pagination-bottom')
patron = 'a href="([^"]+).*?'
patron += '((?:http|https)://image.tmdb.org[^"]+).*?'
patron += 'title="([^"]+)'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
itemlist.append(Item(action = "temporadas",
channel = item.channel,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
contentSerieName = scrapedtitle,
url = scrapedurl
))
if itemlist:
tmdb.set_infoLabels(itemlist)
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
next_page = scrapertools.find_single_match(item.url,".*?peli=")
next_page += "%s" %page
itemlist.append(Item(action = "series",
channel = item.channel,
title = "Página siguiente",
url = next_page
))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'Lista de Temporadas.*?</ul>')
matches = scrapertools.find_multiple_matches(bloque, '</i> (.*?[0-9]+)')
for scrapedtitle in matches:
season = scrapertools.find_single_match(scrapedtitle, '[0-9]+')
item.infoLabels["season"] = season
url = item.url + "?temporada=%s" %season
itemlist.append(item.clone(action = "capitulos",
title = scrapedtitle,
url = url
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += capitulos(tempitem)
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td><a href="([^"]+).*?'
patron += '<b>(.*?)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapedtitle.replace("</b>", "")
episode = scrapertools.find_single_match(scrapedtitle, "Capitulo ([0-9]+)")
scrapedtitle = scrapedtitle.split(":")[1]
scrapedtitle = "%sx%s %s" %(item.infoLabels["season"], episode, scrapedtitle)
item.infoLabels["episode"] = episode
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + "/?peli=1"
elif categoria == 'infantiles':
item.url = host + '/categories/animacion/?peli=1'
elif categoria == 'terror':
item.url = host + '/categories/terror/?peli=1'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto + "&peli=1"
item.extra = "busca"
if texto != '':
return peliculas(item)
else:
return []
def filtro(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'uk-navbar-nav-subtitle taxonomy-menu-title">%s.*?</ul>' %item.extra
bloque = scrapertools.find_single_match(data, patron)
patron = "href='([^']+)"
patron += "'>([^<]+)"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, titulo in matches:
itemlist.append(Item(channel = item.channel,
action = "peliculas",
title = titulo,
url = url + "/?peli=1"
))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
infoLabels = dict()
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom')
patron = 'a href="([^"]+)".*?'
patron += 'img alt="([^"]+)".*?'
patron += '((?:http|https)://image.tmdb.org[^"]+)".*?'
patron += 'a href="([^"]+)".*?'
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedurl1 in matches:
scrapedtitle = scrapedtitle.replace(" Online imagen","").replace("Pelicula ","")
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]+)\)")
if year:
year = int(year)
else:
year = 0
fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(")
if "serie" in scrapedurl:
action = "temporadas"
infoLabels ['tvshowtitle'] = scrapedtitle
else:
action = "findvideos"
infoLabels ['tvshowtitle'] = ""
infoLabels ['year'] = year
itemlist.append(Item(action = action,
channel = item.channel,
fulltitle = fulltitle,
thumbnail = scrapedthumbnail,
infoLabels = infoLabels,
title = scrapedtitle,
url = scrapedurl
))
if itemlist:
tmdb.set_infoLabels(itemlist)
page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1
next_page = scrapertools.find_single_match(item.url,".*?peli=")
next_page += "%s" %page
itemlist.append(Item(action = "peliculas",
channel = item.channel,
title = "Página siguiente",
url = next_page
))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'src=&quot;([^&]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
title = "Ver en: %s"
itemlist.append(item.clone(action = "play",
title = title,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
fulltitle = item.fulltitle
))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -10,7 +10,7 @@ from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://cinefoxtv.net/'
host = 'http://verhdpelis.com/'
headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]

View File

@@ -182,18 +182,15 @@ def destacadas(item):
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedtitle, scrapedurl, scrapedthumbnail in matches:
scrapedurl = CHANNEL_HOST + scrapedurl
scrapedtitle = scrapedtitle.replace("Ver ", "")
new_item = item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle,
url=scrapedurl, thumbnail=scrapedthumbnail,
contentType="movie")
itemlist.append(new_item)
contentType="movie"
))
# Extrae el paginador
next_page_link = scrapertools.find_single_match(data, '<a href="([^"]+)"\s+><span [^>]+>&raquo;</span>')
if next_page_link:
itemlist.append(
item.clone(action="destacadas", title=">> Página siguiente", url=next_page_link, text_color=color3))
return itemlist
@@ -243,13 +240,9 @@ def findvideos(item):
# Busca el argumento
data = httptools.downloadpage(item.url).data
year = scrapertools.find_single_match(item.title, "\(([0-9]+)")
tmdb.set_infoLabels(item, __modo_grafico__)
if not item.infoLabels.get('plot'):
plot = scrapertools.find_single_match(data, '<div class="sinopsis"><p>(.*?)</p>')
item.infoLabels['plot'] = plot
if item.infoLabels["year"]:
tmdb.set_infoLabels(item, __modo_grafico__)
if filtro_enlaces != 0:
list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas, "online", item)
@@ -350,12 +343,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url:
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "http://docs.google.com/get_video_info?docid=" + id
if item.server == "okru":
item.url = "https://ok.ru/videoembed/" + id
if item.server == "youtube":
item.url = "https://www.youtube.com/embed/" + id
elif "links" in item.url or "www.cinetux.me" in item.url:
data = httptools.downloadpage(item.url).data
scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)')

View File

@@ -114,7 +114,9 @@ def lista(item):
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host))
itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host))
itemlist.append(item.clone(title="4K", action="entradas", url="%s/peliculas/4k" % host))
itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host))
itemlist.append(item.clone(title="Latino", action="entradas",
url="%s/peliculas/latino-peliculas" % host))
itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host))

View File

@@ -260,14 +260,16 @@ def findvideos(item):
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if link != "":
link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link)
al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if al_url_fa == "":
al_url_fa = scrapertools.find_single_match(data,
'location\.href.*?=.*?"http:\/\/divxatope1.com/(.*?)"')
if al_url_fa != "":
al_url_fa = "http://www.divxatope1.com/" + al_url_fa
logger.info("torrent=" + al_url_fa)
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
parentContent=item))
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'

View File

@@ -222,9 +222,14 @@ def newest(categoria):
def findvideos(item):
logger.info()
itemlist = []
#itemlist = get_url(item)
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
player_vip = scrapertools.find_single_match(data, 'src=(https:\/\/content.jwplatform.com\/players.*?js)')
data_m3u8 = httptools.downloadpage(player_vip, headers= {'referer':item.url}).data
data_m3u8 = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data_m3u8)
url_m3u8 = scrapertools.find_single_match(data_m3u8,',sources:.*?file: (.*?),')
itemlist.append(item.clone(url=url_m3u8, action='play'))
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)'
matches = re.compile(patron, re.DOTALL).findall(data)

View File

@@ -51,8 +51,6 @@ def generos(item):
def peliculas(item):
logger.info()
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<a class="Ntooltip" href="([^"]+)">([^<]+)<span><br[^<]+'
patron += '<img src="([^"]+)"></span></a>(.*?)<br'
@@ -61,25 +59,21 @@ def peliculas(item):
for scrapedurl, scrapedtitle, scrapedthumbnail, resto in matches:
language = []
plot = scrapertools.htmlclean(resto).strip()
logger.debug('plot: %s' % plot)
languages = scrapertools.find_multiple_matches(plot, r'\((V.)\)')
quality = scrapertools.find_single_match(plot, r'(?:\[.*?\].*?)\[(.*?)\]')
for lang in languages:
language.append(lang)
logger.debug('languages: %s' % languages)
title = scrapedtitle + " " + plot
contentTitle = scrapedtitle
url = item.url + scrapedurl
if not scrapedurl.startswith("http"):
scrapedurl = item.url + scrapedurl
itemlist.append(Item(channel = item.channel,
action = 'findvideos',
title = title,
url = url,
url = scrapedurl,
thumbnail = scrapedthumbnail,
plot = plot,
hasContentDetails = True,
contentTitle = contentTitle,
contentTitle = scrapedtitle,
contentType = "movie",
context = ["buscar_trailer"],
language=language,
quality=quality
))
@@ -89,13 +83,11 @@ def peliculas(item):
def findvideos(item):
logger.info("item=" + item.tostring())
itemlist = []
# Descarga la página para obtener el argumento
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="entry">(.*?)<div class="iframes">')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
patron = 'Ver película online.*?>.*?>([^<]+)'
patron = '<strong>Ver película online.*?>.*?>([^<]+)'
scrapedopcion = scrapertools.find_single_match(data, patron)
titulo_opcional = scrapertools.find_single_match(scrapedopcion, ".*?, (.*)").upper()
bloque = scrapertools.find_multiple_matches(data, 'contenedor_tab.*?/table')

View File

@@ -6,21 +6,18 @@ from core import httptools
from core import scrapertools
from platformcode import config, logger
host = "http://www.javtasty.com"
host = "https://www.javwhores.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/videos"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/videos?o=tr"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/videos?o=mv"))
itemlist.append(item.clone(action="lista", title="Ordenados por duración", url=host + "/videos?o=lg"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories"))
itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
@@ -33,7 +30,7 @@ def configuracion(item):
def search(item, texto):
logger.info()
item.url = "%s/search?search_query=%s&search_type=videos" % (host, texto)
item.url = "%s/search/%s/" % (host, texto)
item.extra = texto
try:
return lista(item)
@@ -48,83 +45,66 @@ def search(item, texto):
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
action = "play"
if config.get_setting("menu_info", "javtasty"):
action = "menu_info"
# Extrae las entradas
patron = '<div class="well wellov well-sm".*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"(.*?)<div class="duration">(?:.*?</i>|)\s*([^<]+)<'
patron = 'div class="video-item.*?href="([^"]+)".*?'
patron += 'data-original="([^"]+)" '
patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches:
scrapedurl = urlparse.urljoin(host, scrapedurl)
scrapedtitle = scrapedtitle.strip()
if duration:
scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle)
if '>HD<' in quality:
scrapedtitle += " [COLOR red][HD][/COLOR]"
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
# Extrae la marca de siguiente página
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="prevnext">')
next_page = scrapertools.find_single_match(data, 'next"><a href="([^"]+)')
if next_page:
next_page = next_page.replace("&amp;", "&")
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=host + next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="col-sm-4.*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"'
patron = '(?s)<a class="item" href="([^"]+)".*?'
patron += 'src="([^"]+)" '
patron += 'alt="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedurl = urlparse.urljoin(host, scrapedurl)
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
fanart=scrapedthumbnail))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
videourl = scrapertools.find_single_match(data, "var video_sd\s*=\s*'([^']+)'")
videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'")
if videourl:
itemlist.append(['.mp4 [directo]', videourl])
videourl = scrapertools.find_single_match(data, "var video_hd\s*=\s*'([^']+)'")
videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'")
if videourl:
itemlist.append(['.mp4 HD [directo]', videourl])
if item.extra == "play_menu":
return itemlist, data
return itemlist
def menu_info(item):
logger.info()
itemlist = []
video_urls, data = play(item.clone(extra="play_menu"))
itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls))
bloque = scrapertools.find_single_match(data, '<div class="carousel-inner"(.*?)<div class="container">')
matches = scrapertools.find_multiple_matches(bloque, 'src="([^"]+)"')
for i, img in enumerate(matches):
@@ -132,5 +112,4 @@ def menu_info(item):
continue
title = "Imagen %s" % (str(i))
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
return itemlist

View File

@@ -15,6 +15,8 @@ from core import scrapertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from core import jsontools
from channels import side_menu
THUMBNAILS = {'0': 'posters', '1': 'banners', '2': 'squares'}
@@ -27,11 +29,16 @@ perfil = [['0xFF0B7B92', '0xFF89FDFB', '0xFFACD5D4'],
['0xFFA5DEE5', '0xFFE0F9B5', '0xFFFEFDCA'],
['0xFFF23557', '0xFF22B2DA', '0xFFF0D43A']]
#color1, color2, color3 = ["white", "white", "white"]
color1, color2, color3 = perfil[__perfil__]
list_newest = []
list_newest_tourl = []
channels_id_name = {}
menu_cache_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_cache_data.json')
menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_settings_data.json')
def mainlist(item):
logger.info()
@@ -150,6 +157,54 @@ def get_channels_list():
return list_canales, any_active
def set_cache(item):
logger.info()
item.mode = 'set_cache'
t = Thread(target=novedades, args=[item])
t.start()
#t.join()
def get_from_cache(item):
logger.info()
itemlist=[]
cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached')
first=item.last
last = first+40
#if last >=len(cache_node[item.extra]):
# last = len(cache_node[item.extra])
for cached_item in cache_node[item.extra][first:last]:
new_item= Item()
new_item = new_item.fromurl(cached_item)
itemlist.append(new_item)
if item.mode == 'silent':
set_cache(item)
if last >= len(cache_node[item.extra]):
item.mode='finish'
itemlist = add_menu_items(item, itemlist)
else:
item.mode='get_cached'
item.last =last
itemlist = add_menu_items(item, itemlist)
return itemlist
def add_menu_items(item, itemlist):
logger.info()
menu_icon = get_thumb('menu.png')
menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon, title='Menu')
itemlist.insert(0, menu)
if item.mode != 'finish':
if item.mode == 'get_cached':
last=item.last
else:
last = len(itemlist)
refresh_icon = get_thumb('more.png')
refresh = item.clone(thumbnail=refresh_icon, mode='get_cached',title='Mas', last=last)
itemlist.insert(len(itemlist), refresh)
return itemlist
def novedades(item):
logger.info()
@@ -159,6 +214,14 @@ def novedades(item):
list_newest = []
start_time = time.time()
mode = item.mode
if mode == '':
mode = 'normal'
if mode=='get_cached':
if os.path.exists(menu_cache_path):
return get_from_cache(item)
multithread = config.get_setting("multithread", "news")
logger.info("multithread= " + str(multithread))
@@ -170,8 +233,22 @@ def novedades(item):
if config.set_setting("multithread", True, "news"):
multithread = True
progreso = platformtools.dialog_progress(item.category, "Buscando canales...")
if mode == 'normal':
progreso = platformtools.dialog_progress(item.category, "Buscando canales...")
list_canales, any_active = get_channels_list()
if mode=='silent' and any_active and len(list_canales[item.extra]) > 0:
side_menu.set_menu_settings(item)
aux_list=[]
for canal in list_canales[item.extra]:
if len(aux_list)<2:
aux_list.append(canal)
list_canales[item.extra]=aux_list
if mode == 'set_cache':
list_canales[item.extra] = list_canales[item.extra][2:]
if any_active and len(list_canales[item.extra])>0:
import math
# fix float porque la division se hace mal en python 2.x
@@ -191,12 +268,14 @@ def novedades(item):
t = Thread(target=get_newest, args=[channel_id, item.extra], name=channel_title)
t.start()
threads.append(t)
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
if mode == 'normal':
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
# Modo single Thread
else:
logger.info("Obteniendo novedades de channel_id=" + channel_id)
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
if mode == 'normal':
logger.info("Obteniendo novedades de channel_id=" + channel_id)
progreso.update(percentage, "", "Buscando en '%s'..." % channel_title)
get_newest(channel_id, item.extra)
# Modo Multi Thread: esperar q todos los hilos terminen
@@ -208,25 +287,29 @@ def novedades(item):
percentage = int(math.ceil(index * t))
list_pendent_names = [a.getName() for a in pendent]
mensaje = "Buscando en %s" % (", ".join(list_pendent_names))
progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)),
if mode == 'normal':
mensaje = "Buscando en %s" % (", ".join(list_pendent_names))
progreso.update(percentage, "Finalizado en %d/%d canales..." % (len(threads) - len(pendent), len(threads)),
mensaje)
logger.debug(mensaje)
logger.debug(mensaje)
if progreso.iscanceled():
logger.info("Busqueda de novedades cancelada")
break
if progreso.iscanceled():
logger.info("Busqueda de novedades cancelada")
break
time.sleep(0.5)
pendent = [a for a in threads if a.isAlive()]
mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time() - start_time)
progreso.update(100, mensaje, " ", " ")
logger.info(mensaje)
start_time = time.time()
# logger.debug(start_time)
if mode == 'normal':
mensaje = "Resultados obtenidos: %s | Tiempo: %2.f segundos" % (len(list_newest), time.time() - start_time)
progreso.update(100, mensaje, " ", " ")
logger.info(mensaje)
start_time = time.time()
# logger.debug(start_time)
result_mode = config.get_setting("result_mode", "news")
if mode != 'normal':
result_mode=0
if result_mode == 0: # Agrupados por contenido
ret = group_by_content(list_newest)
elif result_mode == 1: # Agrupados por canales
@@ -237,13 +320,19 @@ def novedades(item):
while time.time() - start_time < 2:
# mostrar cuadro de progreso con el tiempo empleado durante almenos 2 segundos
time.sleep(0.5)
progreso.close()
return ret
if mode == 'normal':
progreso.close()
if mode == 'silent':
set_cache(item)
item.mode = 'set_cache'
ret = add_menu_items(item, ret)
if mode != 'set_cache':
return ret
else:
no_channels = platformtools.dialog_ok('Novedades - %s'%item.extra, 'No se ha definido ningun canal para la '
'busqueda.','Utilice el menu contextual '
'para agregar al menos uno')
if mode != 'set_cache':
no_channels = platformtools.dialog_ok('Novedades - %s'%item.extra, 'No se ha definido ningun canal para la '
'busqueda.','Utilice el menu contextual '
'para agregar al menos uno')
return
@@ -251,6 +340,7 @@ def get_newest(channel_id, categoria):
logger.info("channel_id=" + channel_id + ", categoria=" + categoria)
global list_newest
global list_newest_tourl
# Solicitamos las novedades de la categoria (item.extra) buscada en el canal channel
# Si no existen novedades para esa categoria en el canal devuelve una lista vacia
@@ -271,11 +361,22 @@ def get_newest(channel_id, categoria):
logger.info("running channel " + modulo.__name__ + " " + modulo.__file__)
list_result = modulo.newest(categoria)
logger.info("canal= %s %d resultados" % (channel_id, len(list_result)))
exist=False
if os.path.exists(menu_cache_path):
cache_node = jsontools.get_node_from_file('menu_cache_data.json', 'cached')
exist=True
else:
cache_node = {}
#logger.debug('cache node: %s' % cache_node)
for item in list_result:
# logger.info("item="+item.tostring())
item.channel = channel_id
list_newest.append(item)
list_newest_tourl.append(item.tourl())
cache_node[categoria] = list_newest_tourl
jsontools.update_node(cache_node, 'menu_cache_data.json', "cached")
except:
logger.error("No se pueden recuperar novedades de: " + channel_id)

View File

@@ -1,206 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel OH!Latino -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.ohpeliculas.com'
def mainlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(host).data
patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>(\d+)<\/i>'
matches = scrapertools.find_multiple_matches(data, patron)
mcantidad = 0
for scrapedurl, scrapedtitle, cantidad in matches:
mcantidad += int(cantidad)
itemlist.append(
item.clone(title="Peliculas",
action='movies_menu'
))
itemlist.append(
item.clone(title="Buscar",
action="search",
url=host+'?s=',
))
return itemlist
def movies_menu(item):
logger.info()
itemlist = []
itemlist.append(
item.clone(title="Todas",
action="list_all",
url=host
))
itemlist.append(
item.clone(title="Generos",
action="section",
url=host, extra='genres'))
itemlist.append(
item.clone(title="Por año",
action="section",
url=host, extra='byyear'
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div id=mt-.*? class=item>.*?<a href=(.*?)><div class=image>.*?'
patron +='<img src=(.*?) alt=.*?span class=tt>(.*?)<.*?ttx>(.*?)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
url = scrapedurl
action = 'findvideos'
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle
plot = scrapedplot
title = contentTitle
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
itemlist.append(Item(channel=item.channel,
action=action,
title=title,
url=url,
plot=plot,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'filtro': filtro_list}
))
#tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,
'alignleft><a href=(.*?) ><\/a><\/div><div class=nav-next alignright>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_all",
title='Siguiente >>>',
url=next_page,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'
))
return itemlist
def section(item):
logger.info()
itemlist = []
duplicated =[]
data = httptools.downloadpage(item.url).data
if item.extra == 'genres':
patron = '<li class="cat-item cat-item-.*?><a href="(.*?)" >(.*?)<\/a>'
elif item.extra == 'byyear':
patron = '<a href="([^"]+)">(\d{4})<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle
url = scrapedurl
if url not in duplicated:
itemlist.append(Item(channel=item.channel,
action='list_all',
title=title,
url=url
))
duplicated.append(url)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.contentTitle = item.fulltitle
videoitem.infoLabels = item.infoLabels
if videoitem.server != 'youtube':
videoitem.title = item.title + ' (%s)' % videoitem.server
else:
videoitem.title = 'Trailer en %s' % videoitem.server
videoitem.action = 'play'
videoitem.server = ""
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host + '/release/2017/'
elif categoria == 'infantiles':
item.url = host + '/genero/infantil/'
itemlist = list_all(item)
if itemlist[-1].title == '>> Página siguiente':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -43,7 +43,7 @@ def newest(categoria):
elif categoria == 'terror':
item.url = HOST + '/genero/terror.html'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
if ">> Página siguiente" in itemlist[-1].title:
itemlist.pop()
except:
import sys

File diff suppressed because it is too large Load Diff

View File

@@ -226,6 +226,7 @@ def findvideos(item):
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<li data-quality=(.*?) data-lang=(.*?)><a href=(.*?) title=.*?'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
for quality, lang, scrapedurl in matches:
@@ -237,13 +238,20 @@ def findvideos(item):
))
for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
urls_list = scrapertools.find_single_match(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
urls_list = urls_list.split("},")
for element in urls_list:
json_data=jsontools.load(element)
id = json_data['id']
sub = json_data['srt']
url = json_data['source']
if not element.endswith('}'):
element=element+'}'
json_data = jsontools.load(element)
if 'id' in json_data:
id = json_data['id']
sub=''
if 'srt' in json_data:
sub = json_data['srt']
url = json_data['source'].replace('\\','')
server = json_data['server']
quality = json_data['quality']
if 'http' not in url :
@@ -265,9 +273,9 @@ def findvideos(item):
video_url.server = ""
video_url.infoLabels = item.infoLabels
else:
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality
))
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % i.server.capitalize())
title = '%s [%s]'% (server, quality)
video_list.append(item.clone(title=title, url=url, action='play', quality = quality,
server=server, subtitle=sub))
tmdb.set_infoLabels(video_list)
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append(

View File

@@ -136,18 +136,17 @@ def lista(item):
itemlist = []
data = get_source(item.url)
patron = 'class=(?:MvTbImg|TPostMv).*?href=(.*?)\/(?:>| class).*?src=(.*?) class=attachment.*?'
patron += '(?:strong|class=Title)>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?class=Qlty>(.*?)<.*?'
patron += '(?:strong|class=Title)>(.*?)<.*?(?:<td|class=Year)>(.*?)<.*?'
patron += '(?:<td|class=Description)>(.*?)<(?:\/td|\/p)>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality, scrapedplot in matches:
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
quality = scrapedquality
quality = ''
contentTitle = scrapedtitle
title = contentTitle + ' (%s)' % quality
title = contentTitle
year = scrapedyear
itemlist.append(item.clone(action='findvideos',

View File

@@ -16,19 +16,6 @@ def mainlist(item):
item.url = "http://www.pelispekes.com/"
data = scrapertools.cachePage(item.url)
'''
<div class="poster-media-card">
<a href="http://www.pelispekes.com/un-gallo-con-muchos-huevos/" title="Un gallo con muchos Huevos">
<div class="poster">
<div class="title">
<span class="under-title">Animacion</span>
</div>
<span class="rating">
<i class="glyphicon glyphicon-star"></i><span class="rating-number">6.2</span>
</span>
<div class="poster-image-container">
<img width="300" height="428" src="http://image.tmdb.org/t/p/w185/cz3Kb6Xa1q0uCrsTIRDS7fYOZyw.jpg" title="Un gallo con muchos Huevos" alt="Un gallo con muchos Huevos"/>
'''
patron = '<div class="poster-media-card"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<div class="poster"[^<]+'
@@ -51,7 +38,7 @@ def mainlist(item):
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail,
plot=plot, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail))
plot=plot, contentTitle=title, contentThumbnail=thumbnail))
# Extrae la pagina siguiente
next_page_url = scrapertools.find_single_match(data,
@@ -65,14 +52,6 @@ def mainlist(item):
def findvideos(item):
logger.info("item=" + item.tostring())
'''
<h2>Sinopsis</h2>
<p>Para que todo salga bien en la prestigiosa Academia Werth, la pequeña y su madre se mudan a una casa nueva. La pequeña es muy seria y madura para su edad y planea estudiar durante las vacaciones siguiendo un estricto programa organizado por su madre; pero sus planes son perturbados por un vecino excéntrico y generoso. Él le enseña un mundo extraordinario en donde todo es posible. Un mundo en el que el Aviador se topó alguna vez con el misterioso Principito. Entonces comienza la aventura de la pequeña en el universo del Principito. Y así descubre nuevamente su infancia y comprenderá que sólo se ve bien con el corazón. Lo esencial es invisible a los ojos. Adaptación de la novela homónima de Antoine de Saint-Exupery.</p>
<div
'''
# Descarga la página para obtener el argumento
data = scrapertools.cachePage(item.url)
data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=")

View File

@@ -146,8 +146,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + 'busqueda/?s=' + texto
if not item.extra:
item.extra = 'peliculas/'
try:
if texto != '':
return lista(item)
@@ -174,7 +173,7 @@ def lista(item):
data = httptools.downloadpage(item.url).data
if item.title != 'Buscar':
if item.action != 'search':
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
actual = scrapertools.find_single_match(data,
@@ -197,8 +196,8 @@ def lista(item):
# de tmdb
filtro_list = filtro_list.items()
if item.action != 'search':
if item.title != 'Buscar':
new_item=(
Item(channel=item.channel,
contentType=tipo,
@@ -217,6 +216,14 @@ def lista(item):
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
else:
if item.extra=='':
item.extra = scrapertools.find_single_match(url, 'serie|pelicula')+'s/'
if 'series/' in item.extra:
accion = 'temporadas'
tipo = 'tvshow'
else:
accion = 'findvideos'
tipo = 'movie'
item.extra = item.extra.rstrip('s/')
if item.extra in url:
new_item=(
@@ -238,7 +245,7 @@ def lista(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if item.title != 'Buscar' and actual != '':
if item.action != 'search' and actual != '':
if itemlist != []:
next_page = str(int(actual) + 1)
next_page_url = item.extra + 'pag-' + next_page

View File

@@ -97,12 +97,12 @@ def list_all (item):
contentType = 'pelicula'
action = 'findvideos'
patron = 'item-%s><a href=(.*?)><figure><img src=https:(.*?)'%contentType
patron += ' alt=><\/figure><p>(.*?)<\/p><span>(.*?)<\/span>'
patron = 'item-%s><a href=(.*?)><figure><img.*?data-src=(.*?) alt=.*?<p>(.*?)<\/p><span>(\d{4})<\/span>'%contentType
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
url = host+scrapedurl
url = host+scrapedurl+'p001/'
thumbnail = scrapedthumbnail
plot= ''
contentTitle=scrapedtitle
@@ -263,7 +263,9 @@ def findvideos(item):
video_list = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'data-source=(.*?) data.*?-srt=(.*?) data-iframe=0><a>(.*?) - (.*?)<\/a>'
patron = 'data-source=(.*?) .*?tab.*?data.*?srt=(.*?) data-iframe=><a>(.*?)\s?-\s?(.*?)<\/a>'
matches = matches = re.compile(patron, re.DOTALL).findall(data)
for url, sub, language, quality in matches:

View File

@@ -7,7 +7,7 @@ from core import scrapertools
from core.item import Item
from platformcode import logger
host = "http://www.playpornx.net/"
host = "https://watchfreexxx.net/"
def mainlist(item):
@@ -17,7 +17,7 @@ def mainlist(item):
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png',
url =host))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url='http://www.playpornx.net/?s=',
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
@@ -31,13 +31,21 @@ def lista(item):
if item.url == '': item.url = host
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt.*?<h2>(.*?)<\/h2>'
if item.extra != 'Buscar':
patron = '<div class=item>.*?href=(.*?)><div.*?<img src=(.*?) alt=(.*?) width'
else:
patron = '<div class=movie>.*?<img src=(.*?) alt=(.*?) \/>.*?href=(.*?)\/>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
title = scrapedtitle
for data_1, data_2, data_3 in matches:
if item.extra != 'Buscar':
url = data_1
thumbnail = data_2
title = data_3
else:
url = data_3
thumbnail = data_1
title = data_2
itemlist.append(Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail))
@@ -59,6 +67,7 @@ def search(item, texto):
try:
if texto != '':
item.extra = 'Buscar'
return lista(item)
else:
return []

View File

@@ -34,7 +34,8 @@ def login():
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
url = "https://www.plusdede.com/"
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36","Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
replace_headers=False).data
if "redirect" in data:
@@ -771,14 +772,17 @@ def checkseen(item):
if item.tipo == "8":
url_temp = "https://www.plusdede.com/set/episode/" + item.data_id + "/seen"
tipo_str = "series"
headers = {"Referer": "https://www.plusdede.com/serie/", "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
else:
url_temp = "https://www.plusdede.com/set/usermedia/" + item.tipo + "/" + item.data_id + "/seen"
tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36", "Referer": "https://www.plusdede.com/serie/",
"X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
#logger.debug(data)
return True
@@ -927,7 +931,8 @@ def plusdede_check(item):
tipo_str = "listas"
else:
tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/61.0.3163.100 Safari/537.36","Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip()

View File

@@ -0,0 +1,85 @@
{
"id": "pordede",
"name": "Pordede",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "pordede.png",
"banner": "pordede.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "pordedeuser",
"type": "text",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "pordedepassword",
"type": "text",
"hidden": true,
"label": "@30015",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": "!eq(-1,'') + !eq(-2,'')",
"visible": true
},
{
"id": "pordedesortlinks",
"type": "list",
"label": "Ordenar enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-2,'') + !eq(-3,'')",
"lvalues": [
"No",
"Por no Reportes",
"Por Idioma",
"Por Calidad",
"Por Idioma y Calidad",
"Por Idioma y no Reportes",
"Por Idioma, Calidad y no Reportes"
]
},
{
"id": "pordedeshowlinks",
"type": "list",
"label": "Mostrar enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-3,'') + !eq(-4,'')",
"lvalues": [
"Todos",
"Ver online",
"Descargar"
]
},
{
"id": "pordedenumberlinks",
"type": "list",
"label": "Limitar número de enlaces",
"default": 0,
"enabled": true,
"visible": "!eq(-4,'') + !eq(-5,'')",
"lvalues": [
"No",
"5",
"10",
"15",
"20",
"25",
"30"
]
}
]
}

View File

@@ -0,0 +1,665 @@
# -*- coding: utf-8 -*-
import os
import re
import sys
import urlparse
from core import channeltools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
def login():
url_origen = "http://www.pordede.com"
data = httptools.downloadpage(url_origen).data
if config.get_setting("pordedeuser", "pordede") in data:
return True
url = "http://www.pordede.com/api/login/auth?response_type=code&client_id=appclient&redirect_uri=http%3A%2F%2Fwww.pordede.com%2Fapi%2Flogin%2Freturn&state=none"
post = "username=%s&password=%s&authorized=autorizar" % (config.get_setting("pordedeuser", "pordede"), config.get_setting("pordedepassword", "pordede"))
data = httptools.downloadpage(url, post).data
if '"ok":true' in data:
return True
else:
return False
def mainlist(item):
logger.info()
itemlist = []
if not config.get_setting("pordedeuser", "pordede"):
itemlist.append( Item( channel=item.channel , title="Habilita tu cuenta en la configuración..." , action="settingCanal" , url="") )
else:
result = login()
if not result:
itemlist.append(Item(channel=item.channel, action="mainlist", title="Login fallido. Volver a intentar..."))
return itemlist
itemlist.append( Item(channel=item.channel, action="menuseries" , title="Series" , url="" ))
itemlist.append( Item(channel=item.channel, action="menupeliculas" , title="Películas y documentales" , url="" ))
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Listas que sigues" , url="http://www.pordede.com/lists/following" ))
itemlist.append( Item(channel=item.channel, action="tus_listas" , title="Tus listas" , url="http://www.pordede.com/lists/yours" ))
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title="Top listas" , url="http://www.pordede.com/lists" ))
itemlist.append( Item(channel=item.channel, action="settingCanal" , title="Configuración..." , url="" ))
return itemlist
def settingCanal(item):
return platformtools.show_channel_settings()
def menuseries(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/series/loadmedia/offset/0/showlist/hot" ))
itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/series" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Siguiendo" , url="http://www.pordede.com/series/following" ))
itemlist.append( Item(channel=item.channel, action="siguientes" , title="Siguientes Capítulos" , url="http://www.pordede.com/main/index" , viewmode="movie"))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/series/favorite" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/series/pending" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Terminadas" , url="http://www.pordede.com/series/seen" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/series/recommended" ))
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/series" ))
return itemlist
def menupeliculas(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Novedades" , url="http://www.pordede.com/pelis/loadmedia/offset/0/showlist/hot" ))
itemlist.append( Item(channel=item.channel, action="generos" , title="Por géneros" , url="http://www.pordede.com/pelis" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Favoritas" , url="http://www.pordede.com/pelis/favorite" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Pendientes" , url="http://www.pordede.com/pelis/pending" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Vistas" , url="http://www.pordede.com/pelis/seen" ))
itemlist.append( Item(channel=item.channel, action="peliculas" , title="Recomendadas" , url="http://www.pordede.com/pelis/recommended" ))
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar..." , url="http://www.pordede.com/pelis" ))
return itemlist
def generos(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data,'<div class="section genre">(.*?)</div>')
patron = '<a class="mediaFilterLink" data-value="([^"]+)" href="([^"]+)">([^<]+)<span class="num">\((\d+)\)</span></a>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for textid,scrapedurl,scrapedtitle,cuantos in matches:
title = scrapedtitle.strip()+" ("+cuantos+")"
thumbnail = ""
plot = ""
if "/pelis" in item.url:
url = "http://www.pordede.com/pelis/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
else:
url = "http://www.pordede.com/series/loadmedia/offset/0/genre/"+textid.replace(" ","%20")+"/showlist/all"
itemlist.append( Item(channel=item.channel, action="peliculas" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title))
return itemlist
def search(item,texto):
logger.info()
if item.url=="":
item.url="http://www.pordede.com/pelis"
texto = texto.replace(" ","-")
item.extra = item.url
item.url = item.url+"/loadmedia/offset/0/query/"+texto+"/years/1950/on/undefined/showlist/all"
try:
return buscar(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def buscar(item):
logger.info()
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
data = json_object["html"]
return parse_mixed_results(item,data)
def parse_mixed_results(item,data):
patron = '<a class="defaultLink extended" href="([^"]+)"[^<]+'
patron += '<div class="coverMini shadow tiptip" title="([^"]+)"[^<]+'
patron += '<img class="centeredPic.*?src="([^"]+)"'
patron += '[^<]+<img[^<]+<div class="extra-info">'
patron += '<span class="year">([^<]+)</span>'
patron += '<span class="value"><i class="icon-star"></i>([^<]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedyear,scrapedvalue in matches:
title = scrapertools.htmlclean(scrapedtitle)
if scrapedyear != '':
title += " ("+scrapedyear+")"
fulltitle = title
if scrapedvalue != '':
title += " ("+scrapedvalue+")"
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
fanart = thumbnail.replace("mediathumb","mediabigcover")
plot = ""
if "/peli/" in scrapedurl or "/docu/" in scrapedurl:
if "/peli/" in scrapedurl:
sectionStr = "peli"
else:
sectionStr = "docu"
referer = urlparse.urljoin(item.url,scrapedurl)
url = referer.replace("/{0}/".format(sectionStr),"/links/view/slug/")+"/what/{0}".format(sectionStr)
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
contentTitle=scrapedtitle, contentType="movie", context=["buscar_trailer"]))
else:
referer = item.url
url = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="episodios" , title=title , extra=referer, url=url, thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
contentTitle=scrapedtitle, contentType="tvshow", context=["buscar_trailer"]))
next_page = scrapertools.find_single_match(data, '<div class="loadingBar" data-url="([^"]+)"')
if next_page != "":
url = urlparse.urljoin("http://www.pordede.com", next_page)
itemlist.append(
Item(channel=item.channel, action="lista", title=">> Página siguiente", extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def siguientes(item):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
patron = '<div class="coverMini shadow tiptip" title="([^"]+)">[^<]+'
patron += '<img class="centeredPic centeredPicFalse" onerror="[^"]+" src="([^"]+)"[^<]+'
patron += '<img src="/images/loading-mini.gif" class="loader"/>[^<]+'
patron += '<div class="extra-info"><span class="year">[^<]+'
patron += '</span><span class="value"><i class="icon-star"></i>[^<]+'
patron += '</span></div>[^<]+'
patron += '</div>[^<]+'
patron += '</a>[^<]+'
patron += '<a class="userepiinfo defaultLink" href="([^"]+)">(\d+)x(\d+)'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedtitle,scrapedthumbnail,scrapedurl,scrapedsession,scrapedepisode in matches:
title = scrapertools.htmlclean(scrapedtitle)
session = scrapertools.htmlclean(scrapedsession)
episode = scrapertools.htmlclean(scrapedepisode)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
fanart = thumbnail.replace("mediathumb","mediabigcover")
plot = ""
title = session + "x" + episode + " - " + title
referer = urlparse.urljoin(item.url,scrapedurl)
url = referer
itemlist.append( Item(channel=item.channel, action="episodio" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=title, fanart=fanart, extra=session+"|"+episode))
return itemlist
def episodio(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1]))
patrontemporada = '<div class="checkSeason"[^>]+>Temporada '+session+'<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas:
# Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">'+episode+' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
for scrapedurl,scrapedtitle,info,visto in matches:
if visto.strip()=="active":
visto_string = "[visto] "
else:
visto_string = ""
numero=episode
title = visto_string+session+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
thumbnail = ""
plot = ""
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
url = "http://www.pordede.com/links/viewepisode/id/"+epid
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=item.fanart, show=item.show))
itemlist2 = []
for capitulo in itemlist:
itemlist2 = findvideos(capitulo)
return itemlist2
def peliculas(item):
logger.info()
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
data = json_object["html"]
return parse_mixed_results(item,data)
def episodios(item):
logger.info()
itemlist = []
# Descarga la pagina
idserie = ''
data = httptools.downloadpage(item.url).data
patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>'
matchestemporadas = re.compile(patrontemporada,re.DOTALL).findall(data)
idserie = scrapertools.find_single_match(data,'<div id="layout4" class="itemProfile modelContainer" data-model="serie" data-id="(\d+)"')
for nombre_temporada,bloque_episodios in matchestemporadas:
# Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
matches = re.compile(patron,re.DOTALL).findall(bloque_episodios)
for scrapedurl,numero,scrapedtitle,info,visto in matches:
if visto.strip()=="active":
visto_string = "[visto] "
else:
visto_string = ""
title = visto_string+nombre_temporada.replace("Temporada ", "").replace("Extras", "Extras 0")+"x"+numero+" "+scrapertools.htmlclean(scrapedtitle)
thumbnail = item.thumbnail
fanart= item.fanart
plot = ""
epid = scrapertools.find_single_match(scrapedurl,"id/(\d+)")
url = "http://www.pordede.com/links/viewepisode/id/"+epid
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, fanart= fanart, show=item.show))
if config.get_videolibrary_support():
show = re.sub(r"\s\(\d+\)\s\(\d+\.\d+\)", "", item.show)
itemlist.append( Item(channel='pordede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=show) )
itemlist.append( Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=show))
itemlist.append( Item(channel='pordede', title="Marcar como Pendiente", tipo="serie", idtemp=idserie, valor="1", action="pordede_check", show=show))
itemlist.append( Item(channel='pordede', title="Marcar como Siguiendo", tipo="serie", idtemp=idserie, valor="2", action="pordede_check", show=show))
itemlist.append( Item(channel='pordede', title="Marcar como Finalizada", tipo="serie", idtemp=idserie, valor="3", action="pordede_check", show=show))
itemlist.append( Item(channel='pordede', title="Marcar como Favorita", tipo="serie", idtemp=idserie, valor="4", action="pordede_check", show=show))
itemlist.append( Item(channel='pordede', title="Quitar marca", tipo="serie", idtemp=idserie, valor="0", action="pordede_check", show=show))
return itemlist
def parse_listas(item, patron):
logger.info()
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
data = json_object["html"]
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
for scrapedurl,scrapedtitle,scrapeduser,scrapedfichas in matches:
title = scrapertools.htmlclean(scrapedtitle + ' (' + scrapedfichas + ' fichas, por ' + scrapeduser + ')')
url = urlparse.urljoin(item.url,scrapedurl) + "/offset/0/loadmedia"
thumbnail = ""
itemlist.append( Item(channel=item.channel, action="lista" , title=title , url=url))
nextpage = scrapertools.find_single_match(data,'data-url="(/lists/loadlists/offset/[^"]+)"')
if nextpage != '':
url = urlparse.urljoin(item.url,nextpage)
itemlist.append( Item(channel=item.channel, action="listas_sigues" , title=">> Página siguiente" , extra=item.extra, url=url))
try:
import xbmcplugin
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_TITLE)
except:
pass
return itemlist
def listas_sigues(item):
logger.info()
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
return parse_listas(item, patron)
def tus_listas(item):
logger.info()
patron = '<div class="clearfix modelContainer" data-model="lista"[^<]+'
patron += '<div class="right"[^<]+'
patron += '<button[^<]+</button[^<]+'
patron += '<button[^<]+</button[^<]+'
patron += '</div[^<]+'
patron += '<span class="title"><span class="name"><a class="defaultLink" href="([^"]+)">([^<]+)</a>'
patron += '</span>[^<]+<a[^>]+>([^<]+)</a></span>\s+<div[^<]+<div[^<]+</div>\s+<div class="info">\s+<p>([0-9]+)'
return parse_listas(item, patron)
def lista(item):
logger.info()
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
data = json_object["html"]
return parse_mixed_results(item,data)
def findvideos(item, verTodos=False):
logger.info()
# Descarga la pagina
data = httptools.downloadpage(item.url).data
logger.info(data)
sesion = scrapertools.find_single_match(data,'SESS = "([^"]+)";')
patron = '<a target="_blank" class="a aporteLink(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
itemlist = []
idpeli = scrapertools.find_single_match(data,'<div class="buttons"><button class="defaultPopup onlyLogin" href="/links/create/ref_id/(\d+)/ref_model/4">Añadir enlace')
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")) and "/what/peli" in item.url:
itemlist.append( Item(channel=item.channel, action="infosinopsis" , title="INFO / SINOPSIS" , url=item.url, thumbnail=item.thumbnail, fanart=item.fanart, folder=False ))
itemsort = []
sortlinks = config.get_setting("pordedesortlinks",item.channel)
showlinks = config.get_setting("pordedeshowlinks",item.channel)
if sortlinks != '' and sortlinks !="No":
sortlinks = int(sortlinks)
else:
sortlinks = 0
if showlinks != '' and showlinks !="No":
showlinks = int(showlinks)
else:
showlinks = 0
for match in matches:
jdown = scrapertools.find_single_match(match,'<div class="jdownloader">[^<]+</div>')
if (showlinks == 1 and jdown != '') or (showlinks == 2 and jdown == ''):
continue
idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>',re.DOTALL).findall(match)
idioma_0 = (idiomas[0][0].replace("&nbsp;","").strip() + " " + idiomas[0][1].replace("&nbsp;","").strip()).strip()
if len(idiomas) > 1:
idioma_1 = (idiomas[1][0].replace("&nbsp;","").strip() + " " + idiomas[1][1].replace("&nbsp;","").strip()).strip()
idioma = idioma_0 + ", " + idioma_1
else:
idioma_1 = ''
idioma = idioma_0
calidad_video = scrapertools.find_single_match(match,'<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>')
calidad_audio = scrapertools.find_single_match(match,'<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>')
thumb_servidor = scrapertools.find_single_match(match,'<div class="hostimage"[^<]+<img\s*src="([^"]+)">')
nombre_servidor = scrapertools.find_single_match(thumb_servidor,"popup_([^\.]+)\.png")
if jdown != '':
title = "Download "+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
else:
title = "Ver en "+nombre_servidor+" ("+idioma+") (Calidad "+calidad_video.strip()+", audio "+calidad_audio.strip()+")"
cuenta = []
valoracion = 0
for idx, val in enumerate(['1', '2', 'report']):
nn = scrapertools.find_single_match(match,'<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/'+val+'/')
if nn != '0' and nn != '':
cuenta.append(nn + ' ' + ['ok', 'ko', 'rep'][idx])
if val == '1':
valoracion += int(nn)
else:
valoracion += -int(nn)
if len(cuenta) > 0:
title += ' (' + ', '.join(cuenta) + ')'
url = urlparse.urljoin( item.url , scrapertools.find_single_match(match,'href="([^"]+)"') )
thumbnail = thumb_servidor
plot = ""
if sortlinks > 0:
if sortlinks == 1:
orden = valoracion
elif sortlinks == 2:
orden = valora_idioma(idioma_0, idioma_1)
elif sortlinks == 3:
orden = valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 4:
orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio)
elif sortlinks == 5:
orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion
elif sortlinks == 6:
orden = (valora_idioma(idioma_0, idioma_1) * 100000) + (valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion
itemsort.append({'action': "play", 'title': title, 'url':url, 'thumbnail':thumbnail, 'fanart':item.fanart, 'plot':plot, 'extra':sesion+"|"+item.url, 'fulltitle':item.fulltitle, 'orden1': (jdown == ''), 'orden2':orden})
else:
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, fanart= item.fanart, plot=plot, extra=sesion+"|"+item.url, fulltitle=item.fulltitle))
if sortlinks > 0:
numberlinks = config.get_setting("pordedenumberlinks",item.channel)
if numberlinks != '' and numberlinks !="No":
numberlinks = int(numberlinks)
else:
numberlinks = 0
if numberlinks == 0:
verTodos = True
itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True)
for i, subitem in enumerate(itemsort):
if verTodos == False and i >= numberlinks:
itemlist.append(Item(channel=item.channel, action='findallvideos' , title='Ver todos los enlaces', url=item.url, extra=item.extra ))
break
itemlist.append( Item(channel=item.channel, action=subitem['action'] , title=subitem['title'] , url=subitem['url'] , thumbnail=subitem['thumbnail'] , fanart= subitem['fanart'], plot=subitem['plot'] , extra=subitem['extra'] , fulltitle=subitem['fulltitle'] ))
if "/what/peli" in item.url or "/what/docu" in item.url:
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Pendiente" , valor="1", idtemp=idpeli))
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Vista" , valor="3", idtemp=idpeli))
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Marcar como Favorita" , valor="4", idtemp=idpeli))
itemlist.append( Item(channel=item.channel, action="pordede_check" , tipo="peli", title="Quitar Marca" , valor="0", idtemp=idpeli))
return itemlist
def findallvideos(item):
return findvideos(item, True)
def play(item):
# Marcar como visto
checkseen(item.extra.split("|")[1])
headers = {'Referer': item.extra.split("|")[1]}
data = httptools.downloadpage(item.url, post="_s="+item.extra.split("|")[0], headers=headers).data
url = scrapertools.find_single_match(data,'<p class="nicetry links">\s+<a href="([^"]+)" target="_blank"')
url = urlparse.urljoin(item.url,url)
headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
itemlist = servertools.find_video_items(data=media_url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
def checkseen(item):
logger.info(item)
if "/viewepisode/" in item:
episode = item.split("/")[-1]
httptools.downloadpage("http://www.pordede.com/ajax/action", post="model=episode&id="+episode+"&action=seen&value=1")
if "/what/peli" in item:
data = httptools.downloadpage(item).data
movieid = scrapertools.find_single_match(data,'href="/links/create/ref_id/([0-9]+)/ref_model/')
httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model=peli&id="+movieid+"&action=status&value=3")
return True
def infosinopsis(item):
logger.info()
url_aux = item.url.replace("/links/view/slug/", "/peli/").replace("/what/peli", "")
# Descarga la pagina
data = httptools.downloadpage(url_aux).data
scrapedtitle = scrapertools.find_single_match(data,'<h1>([^<]+)</h1>')
scrapedvalue = scrapertools.find_single_match(data,'<span class="puntuationValue" data-value="([^"]+)"')
scrapedyear = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>')
scrapedduration = scrapertools.find_single_match(data,'<h2 class="info">[^<]+</h2>\s*<p class="info">([^<]+)</p>', 1)
scrapedplot = scrapertools.find_single_match(data,'<div class="info text"[^>]+>([^<]+)</div>')
scrapedgenres = re.compile('href="/pelis/index/genre/[^"]+">([^<]+)</a>',re.DOTALL).findall(data)
scrapedcasting = re.compile('href="/star/[^"]+">([^<]+)</a><br/><span>([^<]+)</span>',re.DOTALL).findall(data)
title = scrapertools.htmlclean(scrapedtitle)
plot = "Año: [B]"+scrapedyear+"[/B]"
plot += " , Duración: [B]"+scrapedduration+"[/B]"
plot += " , Puntuación usuarios: [B]"+scrapedvalue+"[/B]"
plot += "\nGéneros: "+", ".join(scrapedgenres)
plot += "\n\nSinopsis:\n"+scrapertools.htmlclean(scrapedplot)
plot += "\n\nCasting:\n"
for actor,papel in scrapedcasting:
plot += actor+" ("+papel+"). "
tbd = TextBox("DialogTextViewer.xml", os.getcwd(), "Default")
tbd.ask(title, plot)
del tbd
return
try:
import xbmcgui
class TextBox( xbmcgui.WindowXML ):
""" Create a skinned textbox window """
def __init__( self, *args, **kwargs):
pass
def onInit( self ):
try:
self.getControl( 5 ).setText( self.text )
self.getControl( 1 ).setLabel( self.title )
except: pass
def onClick( self, controlId ):
pass
def onFocus( self, controlId ):
pass
def onAction( self, action ):
if action == 7:
self.close()
def ask(self, title, text ):
self.title = title
self.text = text
self.doModal()
except:
pass
def valora_calidad(video, audio):
prefs_video = [ 'hdmicro', 'hd1080', 'hd720', 'hdrip', 'dvdrip', 'rip', 'tc-screener', 'ts-screener' ]
prefs_audio = [ 'dts', '5.1', 'rip', 'line', 'screener' ]
video = ''.join(video.split()).lower()
if video in prefs_video:
pts = (9 - prefs_video.index(video)) * 10
else:
pts = (9 - 1) * 10
audio = ''.join(audio.split()).lower()
if audio in prefs_audio:
pts = (9 - prefs_audio.index(audio)) * 10
else:
pts = (9 - 1) * 10
return pts
def valora_idioma(idioma_0, idioma_1):
prefs = [ 'spanish', 'spanish LAT', 'catalan', 'english', 'french' ]
if idioma_0 in prefs:
pts = (9 - prefs.index(idioma_0)) * 10
else:
pts = (9 - 1) * 10
if idioma_1 != '':
idioma_1 = idioma_1.replace(' SUB', '')
if idioma_1 in prefs:
pts += 8 - prefs.index(idioma_1)
else:
pts += 8 - 1
else:
pts += 9
return pts
def pordede_check(item):
httptools.downloadpage("http://www.pordede.com/ajax/mediaaction", post="model="+item.tipo+"&id="+item.idtemp+"&action=status&value="+item.valor)

View File

@@ -1,13 +0,0 @@
{
"id": "quierodibujosanimados",
"name": "Quiero Dibujos Animados",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "quierodibujosanimados.png",
"banner": "quierodibujosanimados.png",
"fanart": "quierodibujosanimados.jpg",
"categories": [
"tvshow"
]
}

View File

@@ -1,116 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
# itemlist.append( Item(channel=item.channel , action="novedades" , title="Novedades" , url="http://www.quierodibujosanimados.com/"))
return series(
Item(channel=item.channel, action="series", title="Series", url="http://www.quierodibujosanimados.com/",
fanart=item.fanart))
def series(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data, '<ul class="categorias">(.*?)</ul')
# <a href="http://www.quierodibujosanimados.com/cat/popeye-el-marino/38" title="Popeye el marino">Popeye el marino</a>
patron = '<a href="([^"]+)"[^>]+>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = ""
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=item.fanart))
next_page_url = scrapertools.find_single_match(data, '</span[^<]+<a href="([^"]+)">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente",
url=urlparse.urljoin(item.url, next_page_url), folder=True,
fanart=item.fanart))
return itemlist
def episodios(item):
logger.info()
'''
<li>
<div class="info">
<h2><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h2>
<p>Caillou volvía con su hermanita Rosi y su mamá de la biblioteca y traían un montón de libros que Caillou quería leer, especialmente uno de piratas. Capítulo titulado "Caillou ratón de biblioteca".</p>
<div class="pie">
<div class="categoria">
<span>Categor&iacute;a:</span>
<a href="http://www.quierodibujosanimados.com/cat/caillou/14" title="Caillou" class="categoria">Caillou</a>
</div>
<div class="puntuacion">
<div class="rating_16 punt_0" data-noticia="954">
<span>0.5</span>
<span>1</span>
<span>1.5</span>
<span>2</span>
<span>2.5</span>
<span>3</span>
<span>3.5</span>
<span>4</span>
<span>4.5</span>
<span>5</span>
</div>
</div>
</div>
<span class="pico"></span>
</div>
<div class="dibujo">
<a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca" class="thumb">
<img src="http://www.quierodibujosanimados.com/i/thm-Caillou-raton-de-biblioteca.jpg" alt="Caillou ratón de biblioteca" width="137" height="174" />
</a>
<h4><a href="http://www.quierodibujosanimados.com/Caillou-raton-de-biblioteca/954" title="Caillou ratón de biblioteca">Caillou ratón de biblioteca</a></h4>
</div>
</li>
'''
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<div class="dibujo"[^<]+'
patron += '<a href="([^"]+)" title="([^"]+)"[^<]+'
patron += '<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapedtitle.strip()
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=item.fanart))
next_page_url = scrapertools.find_single_match(data, '</span[^<]+<a href="([^"]+)">')
if next_page_url != "":
itemlist.append(Item(channel=item.channel, action="episodios", title=">> Página siguiente",
url=urlparse.urljoin(item.url, next_page_url), folder=True,
fanart=item.fanart))
return itemlist

View File

@@ -267,10 +267,9 @@ def channel_search(search_results, channel_parameters, tecleado):
if result is None:
result = []
if len(result):
if not channel_parameters["title"] in search_results:
search_results[channel_parameters["title"]] = []
search_results[channel_parameters["title"]].append({"item": item,
if not channel_parameters["title"].capitalize() in search_results:
search_results[channel_parameters["title"].capitalize()] = []
search_results[channel_parameters["title"].capitalize()].append({"item": item,
"itemlist": result,
"adult": channel_parameters["adult"]})

View File

@@ -3,16 +3,18 @@
import re
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
host = "https://www.serviporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.serviporno.com/"))
Item(channel=item.channel, action="videos", title="Útimos videos", url= host))
itemlist.append(
Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/"))
itemlist.append(
@@ -43,15 +45,14 @@ def search(item, texto):
def videos(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
data = httptools.downloadpage(item.url).data
patron = '<div class="wrap-box-escena">.*?'
patron = '(?s)<div class="wrap-box-escena">.*?'
patron += '<div class="box-escena">.*?'
patron += '<a href="([^"]+)" data-stats-video-id="[^"]+" data-stats-video-name="([^"]+)" data-stats-video-category="[^"]*" data-stats-list-name="[^"]*" data-stats-list-pos="[^"]*">.*?'
patron += '<img src="([^"]+)" data-src="[^"]+" alt="[^"]+" id=\'[^\']+\' class="thumbs-changer" data-thumbs-prefix="[^"]+" height="150px" width="175px" border=0 />'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info(str(matches))
patron += '<a\s*href="([^"]+)".*?'
patron += 'data-stats-video-name="([^"]+)".*?'
patron += '<img\s*src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title, thumbnail in matches:
url = urlparse.urljoin(item.url, url)
itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail))
@@ -106,10 +107,9 @@ def categorias(item):
def play(item):
logger.info()
itemlist = []
data = scrapertools.downloadpage(item.url)
url = scrapertools.get_match(data, "url: '([^']+)',\s*framesURL:")
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, "sendCdnInfo.'([^']+)")
itemlist.append(
Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail,
plot=item.plot, folder=False))
return itemlist

View File

@@ -0,0 +1,341 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
import os
from core.item import Item
from core import jsontools
from platformcode import config, logger
from platformcode import launcher
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
media_path = os.path.join(config.get_runtime_path(), "resources/skins/Default/media/side_menu/")
menu_settings_path = os.path.join(config.get_data_path(), "settings_channels", 'menu_settings_data.json')
if os.path.exists(menu_settings_path):
menu_node = jsontools.get_node_from_file('menu_setting_data.json', 'menu')
else:
menu_node = {'categoria actual':config.get_setting('category')}
jsontools.update_node(menu_node, 'menu_settings_data.json', "menu")
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
def set_menu_settings(item):
if os.path.exists(menu_settings_path):
menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu')
else:
menu_node = {}
menu_node['categoria actual'] = item.extra
jsontools.update_node(menu_node, 'menu_settings_data.json', "menu")
def check_user_home(item):
logger.info()
if os.path.exists(menu_settings_path):
menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu')
if 'user_home' in menu_node:
item = Item().fromurl(menu_node['user_home'])
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
from platformcode import platformtools
undefined_start = platformtools.dialog_ok('Inicio Personalizado', 'No has definido ninguna seccion para mostrar '
'en tu inicio', 'Utiliza el menu contextual para definir una')
return item
def set_custom_start(item):
logger.info()
if os.path.exists(menu_settings_path):
menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu')
else:
menu_node={}
parent_item= Item().fromurl(item.parent)
parent_item.start=True
config.set_setting("custom_start",True)
if config.get_setting("news_start"):
config.set_setting("news_start", False)
menu_node['user_home']=parent_item.tourl()
jsontools.update_node(menu_node, 'menu_settings_data.json', "menu")
def get_start_page():
logger.info()
category = config.get_setting('category').lower()
custom_start= config.get_setting("custom_start")
#if category != 'definido':
if custom_start == False:
item = Item(channel="news", action="novedades", extra=category, mode='silent')
else:
from channels import side_menu
item = Item()
item = side_menu.check_user_home(item)
return item
def open_menu(item):
main = Main('side_menu.xml', config.get_runtime_path())
main.doModal()
del main
class Main(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.items = []
def onInit(self):
self.setCoordinateResolution(2)
self.focus = -1
self.buttons = []
posx= 0
posy= 145
space = 30
selected = 'selected0.png'
width = 260
height = 30
textcolor = "0xffffd700"
conditional_textcolor = "0xffff3030"
shadow = "0xFF000000"
offsetx = 30
offsety = 0
font = 'font25_title'
if config.get_setting('start_page'):
label = 'Inicio'
self.button_start = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path + selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_start)
self.buttons.append(self.button_start)
posy += space * 2
label = 'Menú Clasico'
self.button_alfa = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path+selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_alfa)
self.buttons.append(self.button_alfa)
posy += space
label = 'Configuracion'
self.button_config = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path + selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_config)
self.buttons.append(self.button_config)
posy += space*2
label = 'Peliculas'
self.button_peliculas = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path+selected, textColor=textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_peliculas)
self.buttons.append(self.button_peliculas)
posy += space
label = 'Series'
self.button_series = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path+selected, textColor=textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_series)
self.buttons.append(self.button_series)
posy += space
label = 'Anime'
self.button_anime = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path+selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_anime)
self.buttons.append(self.button_anime)
posy += space
label = 'Infantiles'
self.button_infantil = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path+selected, textColor=textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_infantil)
self.buttons.append(self.button_infantil)
posy += space
label = 'Documentales'
self.button_docu = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path + selected, textColor=textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_docu)
self.buttons.append(self.button_docu)
posy += space
label = 'Terror'
self.button_terror = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path+selected, textColor=textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_terror)
self.buttons.append(self.button_terror)
posy += space
label = 'Latino'
self.button_lat = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path+selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_lat)
self.buttons.append(self.button_lat)
posy += space
label = 'Castellano'
self.button_cast = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path + selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_cast)
self.buttons.append(self.button_cast)
posy += space
label = 'Torrents'
self.button_torrent = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path+selected, textColor=textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_torrent)
self.buttons.append(self.button_torrent)
start_page_item = get_start_page()
if config.get_setting('start_page') and start_page_item.channel =='news':
posy += space
label = 'Canales Activos'
self.button_config = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000, noFocusTexture='',
focusTexture=media_path+selected, textColor=conditional_textcolor,
shadowColor=shadow, textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_config)
self.buttons.append(self.button_config)
posy += space*2
label = 'Buscar'
self.button_buscar = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path + selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_buscar)
self.buttons.append(self.button_buscar)
posy += space
label = 'Buscar Actor'
self.button_actor = xbmcgui.ControlButton(posx, posy, width, height, label, font=font, alignment=0x00000000,
noFocusTexture='', focusTexture=media_path + selected,
textColor=textcolor, shadowColor=shadow, textOffsetX=offsetx,
textOffsetY=offsety)
self.addControl(self.button_actor)
self.buttons.append(self.button_actor)
posy += space
label = 'Donde Buscar'
self.button_config_search = xbmcgui.ControlButton(posx, posy, width, height, label, font=font,
alignment=0x00000000,
noFocusTexture='', focusTexture=media_path + selected,
textColor=conditional_textcolor, shadowColor=shadow,
textOffsetX=offsetx, textOffsetY=offsety)
self.addControl(self.button_config_search)
self.buttons.append(self.button_config_search)
label=''
self.button_close = xbmcgui.ControlButton(260, 0, 1020, 725, label, noFocusTexture='', focusTexture='')
self.addControl(self.button_close)
def onClick(self, control):
new_item=''
control = self.getControl(control).getLabel()
if control == 'Inicio':
new_item = get_start_page()
elif control == u'Menú Clasico':
new_item = Item(channel='', action='getmainlist', title='Menú Alfa')
elif control == 'Configuracion':
new_item = Item(channel='setting', action="settings")
elif control == 'Peliculas':
new_item = Item(channel='news', action="novedades", extra="peliculas", mode='silent')
elif control == 'Series':
new_item = Item(channel='news', action="novedades", extra="series", mode='silent')
elif control == 'Anime':
new_item = Item(channel='news', action="novedades", extra="anime", mode='silent')
elif control == 'Infantiles':
new_item = Item(channel='news', action="novedades", extra="infantiles", mode='silent')
elif control == 'Documentales':
new_item = Item(channel='news', action="novedades", extra="documentales", mode='silent')
elif control == 'Terror':
new_item = Item(channel='news', action="novedades", extra="terror", mode='silent')
elif control == 'Castellano':
new_item = Item(channel='news', action="novedades", extra="castellano", mode='silent')
elif control == 'Latino':
new_item = Item(channel='news', action="novedades", extra="latino", mode='silent')
elif control == 'Torrents':
new_item = Item(channel='news', action="novedades", extra="torrent", mode='silent')
elif control == 'Canales Activos':
menu_node = jsontools.get_node_from_file('menu_settings_data.json', 'menu')
if 'categoria actual' in menu_node:
category = menu_node['categoria actual']
new_item = Item(channel='news', action="setting_channel", extra=category, menu=True)
elif control == 'Buscar':
new_item = Item(channel='search', action="search")
elif control == 'Buscar Actor':
new_item = Item(channel='tvmoviedb', title="Buscar actor/actriz", action="search_",
search={'url': 'search/person', 'language': 'es', 'page': 1}, star=True)
elif control == 'Donde Buscar':
new_item = Item(channel='search', action="setting_channel")
elif control == '':
self.close()
if new_item !='':
self.run_action(new_item)
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU or action == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
self.close()
if action == ACTION_MOVE_RIGHT or action == ACTION_MOVE_DOWN:
self.focus += 1
if self.focus > len(self.buttons)-1:
self.focus = 0
while True:
id_focus = str(self.buttons[self.focus].getId())
if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'):
self.setFocus(self.buttons[self.focus])
break
self.focus += 1
if action == ACTION_MOVE_LEFT or action == ACTION_MOVE_UP:
self.focus -= 1
if self.focus < 0:
self.focus = len(self.buttons) - 1
while True:
id_focus = str(self.buttons[self.focus].getId())
if xbmc.getCondVisibility('[Control.IsVisible(' + id_focus + ')]'):
self.setFocus(self.buttons[self.focus])
break
self.focus -= 1
def run_action(self, item):
logger.info()
if item.menu != True:
self.close()
xbmc.executebuiltin("Container.update(%s)"%launcher.run(item))

View File

@@ -1,12 +0,0 @@
{
"id": "teledocumentales",
"name": "Teledocumentales",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "teledocumentales.png",
"thumbnail": "teledocumentales.png",
"categories": [
"documentary"
]
}

View File

@@ -1,109 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="ultimo", title="Últimos Documentales",
url="http://www.teledocumentales.com/", viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, action="ListaCat", title="Listado por Genero",
url="http://www.teledocumentales.com/"))
return itemlist
def ultimo(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
# Extrae las entradas
patron = '<div class="imagen"(.*?)<div style="clear.both">'
matches = re.compile(patron, re.DOTALL).findall(data)
print "manolo"
print matches
for match in matches:
scrapedtitle = scrapertools.get_match(match, '<img src="[^"]+" alt="([^"]+)"')
scrapedtitle = scrapertools.htmlclean(scrapedtitle)
scrapedurl = scrapertools.get_match(match, '<a href="([^"]+)"')
scrapedthumbnail = scrapertools.get_match(match, '<img src="([^"]+)" alt="[^"]+"')
scrapedplot = scrapertools.get_match(match, '<div class="excerpt">([^<]+)</div>')
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedplot, fanart=scrapedthumbnail))
# Extrae la marca de siguiente pagina
try:
next_page = scrapertools.get_match(data, '<a class="next" href="([^"]+)">')
itemlist.append(Item(channel=item.channel, action="ultimo", title=">> Página siguiente",
url=urlparse.urljoin(item.url, next_page, viewmode="movie_with_plot")))
except:
pass
return itemlist
def ListaCat(item):
logger.info()
url = item.url
data = scrapertools.cachePage(url)
# Extrae las entradas (carpetas)
# <div class="slidethumb">
# <a href="http://www.cine-adicto.com/transformers-dark-of-the-moon.html"><img src="http://www.cine-adicto.com/wp-content/uploads/2011/09/Transformers-Dark-of-the-moon-wallpaper.jpg" width="638" alt="Transformers: Dark of the Moon 2011" /></a>
# </div>
patron = '<div id="menu_horizontal">(.*?)<div class="cuerpo">'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.info("hay %d matches" % len(matches))
itemlist = []
for match in matches:
data2 = match
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)".*?>(.*?)</a>.*?</li>'
matches2 = re.compile(patron, re.DOTALL).findall(data2)
logger.info("hay %d matches2" % len(matches2))
for match2 in matches2:
scrapedtitle = match2[1].replace("&#8211;", "-").replace("&amp;", "&").strip()
scrapedurl = match2[0]
scrapedthumbnail = match2[0].replace(" ", "%20")
scrapedplot = ""
itemlist.append(Item(channel=item.channel, action="ultimo", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, fanart=scrapedthumbnail,
viewmode="movie_with_plot"))
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
urlvideo = scrapertools.get_match(data, '<!-- end navigation -->.*?<iframe src="([^"]+)"')
data = scrapertools.cachePage(urlvideo)
url = scrapertools.get_match(data, 'iframe src="([^"]+)"')
itemlist = servertools.find_video_items(data=url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.channel = item.channel
return itemlist

View File

@@ -557,12 +557,12 @@ def detalles(item):
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color=color5))
# try:
# images['tmdb'] = ob_tmdb.result["images"]
# itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images,
# extra="menu"))
# except:
# pass
try:
images['tmdb'] = ob_tmdb.result["images"]
itemlist.append(item.clone(action="imagenes", title="Lista de Imágenes", text_color=color5, images=images,
extra="menu"))
except:
pass
try:
if item.contentType == "movie" and item.infoLabels["year"] < 2014:

View File

@@ -1,13 +0,0 @@
{
"id": "unsoloclic",
"name": "Unsoloclic",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "unsoloclic.png",
"thumbnail": "unsoloclic.png",
"categories": [
"movie",
"tvshow"
]
}

View File

@@ -1,144 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import scrapertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
item.url = "http://unsoloclic.info"
return novedades(item)
def novedades(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
'''
<div class="post-45732 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-mkv-hd720p" id="post-45732">
<h2 class="title"><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD</a></h2>
<div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> noviembre 5th, 2012
<!--
<img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> unsoloclic
-->
</div>
<div class="entry">
<p><a href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="attachment wp-att-45737"><img src="http://unsoloclic.info/wp-content/uploads/2012/11/Ek-Tha-Tiger-2012.jpg" alt="" title="Ek Tha Tiger (2012)" width="500" height="629" class="aligncenter size-full wp-image-45737" /></a></p>
<h2 style="text-align: center;"></h2>
<div class="readmorecontent">
<a class="readmore" href="http://unsoloclic.info/2012/11/ek-tha-tiger-2012-blu-ray-720p-hd/" rel="bookmark" title="Permanent Link to Pelicula Ek Tha Tiger (2012) BLU-RAY 720p HD">Seguir Leyendo</a>
</div>
</div>
</div><!--/post-45732-->
'''
'''
<div class="post-45923 post type-post status-publish format-standard hentry category-2012 category-blu-ray category-comedia category-drama category-mkv category-mkv-hd720p category-romance tag-chris-messina tag-jenna-fischer tag-lee-kirk tag-the-giant-mechanical-man-pelicula tag-topher-grace" id="post-45923">
<h2 class="title"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">The Giant Mechanical Man (2012) BluRay 720p HD</a></h2>
<div class="postdate"><img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/date.png" /> diciembre 24th, 2012
<!--
<img src="http://unsoloclic.info/wp-content/themes/TinyWeb/images/user.png" /> deportv
-->
</div>
<div class="entry">
<p style="text-align: center;"><a href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/"><img class="aligncenter size-full wp-image-45924" title="Giant Michanical Man Pelicula Descargar" src="http://unsoloclic.info/wp-content/uploads/2012/12/Giant-Michanical-Man-Pelicula-Descargar.jpg" alt="" width="380" height="500" /></a></p>
<p style="text-align: center;">
<div class="readmorecontent">
<a class="readmore" href="http://unsoloclic.info/2012/12/the-giant-mechanical-man-2012-bluray-720p-hd/" rel="bookmark" title="Permanent Link to The Giant Mechanical Man (2012) BluRay 720p HD">Seguir Leyendo</a>
</div>
</div>
</div><!--/post-45923-->
'''
patron = '<div class="post[^"]+" id="post-\d+">[^<]+'
patron += '<h2 class="title"><a href="([^"]+)" rel="bookmark" title="[^"]+">([^<]+)</a></h2>[^<]+'
patron += '<div class="postdate">.*?</div>[^<]+'
patron += '<div class="entry">[^<]+'
patron += '<p[^<]+<a[^<]+<img.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
'''
<a href="http://unsoloclic.info/page/2/" >&laquo; Peliculas anteriores</a>
'''
patron = '<a href="([^"]+)" >\&laquo\; Peliculas anteriores</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = ">> Página siguiente"
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url, match)
scrapedthumbnail = ""
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(Item(channel=item.channel, action="novedades", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
return itemlist
def findvideos(item):
logger.info()
data = scrapertools.cache_page(item.url)
itemlist = []
# <a href="http://67cfb0db.linkbucks.com"><img title="billionuploads" src="http://unsoloclic.info/wp-content/uploads/2012/11/billonuploads2.png" alt="" width="380" height="50" /></a></p>
# <a href="http://1bd02d49.linkbucks.com"><img class="colorbox-57103" title="Freakeshare" alt="" src="http://unsoloclic.info/wp-content/uploads/2013/01/freakshare.png" width="390" height="55" /></a></p>
patron = '<a href="(http.//[a-z0-9]+.linkbucks.c[^"]+)[^>]+><img.*?title="([^"]+)".*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url, servertag, serverthumb in matches:
itemlist.append(
Item(channel=item.channel, action="play", server="linkbucks", title=servertag + " [linkbucks]", url=url,
thumbnail=serverthumb, plot=item.plot, folder=False))
from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
if videoitem.server != "linkbucks":
videoitem.channel = item.channel
videoitem.action = "play"
videoitem.folder = False
videoitem.title = "[" + videoitem.server + "]"
return itemlist
def play(item):
logger.info()
itemlist = []
if item.server == "linkbucks":
logger.info("Es linkbucks")
# Averigua el enlace
from servers.decrypters import linkbucks
location = linkbucks.get_long_url(item.url)
logger.info("location=" + location)
# Extrae la URL de saltar el anuncio en adf.ly
if location.startswith("http://adf"):
# Averigua el enlace
from servers.decrypters import adfly
location = adfly.get_long_url(location)
logger.info("location=" + location)
from core import servertools
itemlist = servertools.find_video_items(data=location)
for videoitem in itemlist:
videoitem.channel = item.channel
videoitem.folder = False
else:
itemlist.append(item)
return itemlist

View File

@@ -217,7 +217,7 @@ def newest(categoria):
return []
itemlist = scraper(item)
if itemlist[-1].title == "» Siguiente »":
if itemlist[-1].title == "[COLOR crimson]Siguiente >>[/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla

View File

@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
import re
import urllib
import unicodedata
from core import channeltools
from core import httptools
@@ -11,7 +13,11 @@ from core.item import Item
from platformcode import config, logger
idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"}
HOST = 'http://www.yaske.ro'
domain = "yaske.ro"
HOST = "http://www." + domain
HOST_MOVIES = "http://peliculas." + domain + "/now_playing/"
HOST_TVSHOWS = "http://series." + domain + "/popular/"
HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl"
parameters = channeltools.get_channel_parameters('yaske')
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -26,38 +32,156 @@ def mainlist(item):
item.fanart = fanart_host
thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png"
itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies',
url=HOST,
itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True,
itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies',
url=HOST_MOVIES,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Estrenos", action="peliculas",
url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos'))
itemlist.append(item.clone(title="Género", action="menu_buscar_contenido", text_bold=True,thumbnail=thumbnail % 'generos', viewmode="thumbnails",
itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails",
url=HOST
))
itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar',
type = "movie" ))
itemlist.append(item.clone(title="", folder=False))
itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar'))
itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies',
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies',
url=HOST_TVSHOWS,
thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot"))
itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar',
type = "tvshow" ))
return itemlist
def series(item):
logger.info()
itemlist = []
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="(http://series[^"]+)">.*?'
patron += '<img data-original="([^"]+)".*?'
patron += 'glyphicon-play-circle"></i>([^<]+).*?'
patron += 'glyphicon-calendar"></i>([^<]+).*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches:
scrapedepisodes.strip()
year = year.strip()
contentSerieName = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s (%s)" %(contentSerieName, scrapedepisodes)
if "series" in scrapedurl:
itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentSerieName=contentSerieName,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist, True)
# Si es necesario añadir paginacion
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def temporadas(item):
logger.info()
itemlist = []
post = []
data = httptools.downloadpage(item.url).data
patron = 'media-object" src="([^"]+).*?'
patron += 'media-heading">([^<]+).*?'
patron += '<code>(.*?)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches:
id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)")
season = scrapertools.find_single_match(scrapedtitle, "[0-9]+")
title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","")
post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"}
post = urllib.urlencode(post)
item.infoLabels["season"] = season
itemlist.append(item.clone(action = "capitulos",
post = post,
title = title,
url = HOST_TVSHOWS_TPL
))
tmdb.set_infoLabels(itemlist)
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title =""))
itemlist.append(item.clone(action = "add_serie_to_library",
channel = item.channel,
extra = "episodios",
title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url = item.url
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += capitulos(tempitem)
return itemlist
def capitulos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, post=item.post).data
data = data.replace("<wbr>","")
patron = 'href=."([^"]+).*?'
patron += 'media-heading.">([^<]+).*?'
patron += 'fecha de emisi.*?: ([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapeddate in matches:
scrapedtitle = scrapedtitle + " (%s)" %scrapeddate
episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)")
query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+")
item.infoLabels["episode"] = episode
itemlist.append(item.clone(action = "findvideos",
title = scrapedtitle.decode("unicode-escape"),
query = query.replace(" ","+"),
url = scrapedurl.replace("\\","")
))
tmdb.set_infoLabels(itemlist)
return itemlist
def search(item, texto):
logger.info()
itemlist = []
try:
item.url = HOST + "/search/?query=" + texto.replace(' ', '+')
item.extra = ""
itemlist.extend(peliculas(item))
if item.type == "movie":
itemlist.extend(peliculas(item))
else:
itemlist.extend(series(item))
if itemlist[-1].title == ">> Página siguiente":
item_pag = itemlist[-1]
itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle)
itemlist.append(item_pag)
else:
itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle)
return itemlist
except:
import sys
for line in sys.exc_info():
@@ -77,7 +201,6 @@ def newest(categoria):
item.url = HOST + "/genre/27/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
@@ -95,8 +218,18 @@ def newest(categoria):
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url_p = scrapertools.find_single_match(item.url, '(.*?).page=')
page = scrapertools.find_single_match(item.url, 'page=([0-9]+)')
if not page:
page = 1
url_p = item.url
else:
page = int(page) + 1
if "search" in item.url:
url_p += "&page=%s" %page
else:
url_p += "?page=%s" %page
data = httptools.downloadpage(url_p).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '(?s)class="post-item-image btn-play-item".*?'
patron += 'href="([^"]+)">.*?'
@@ -105,13 +238,8 @@ def peliculas(item):
patron += 'post(.*?)</div.*?'
patron += 'text-muted f-14">(.*?)</h3'
matches = scrapertools.find_multiple_matches(data, patron)
patron_next_page = 'href="([^"]+)"> &raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if len(matches_next_page) > 0:
url_next_page = item.url + matches_next_page
for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches:
query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+")
year = year.strip()
patronidiomas = '<img src="([^"]+)"'
matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas)
@@ -125,28 +253,27 @@ def peliculas(item):
contentTitle = scrapertools.htmlclean(scrapedtitle.strip())
title = "%s %s" % (contentTitle, idiomas_disponibles)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=contentTitle,
thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query,
infoLabels={"year": year}, text_color=color1))
# Obtenemos los datos basicos de todas las peliculas mediante multihilos
tmdb.set_infoLabels(itemlist)
# Si es necesario añadir paginacion
if url_next_page:
patron_next_page = 'href="([^"]+)">\s*&raquo;'
matches_next_page = scrapertools.find_single_match(data, patron_next_page)
if matches_next_page and len(itemlist)>0:
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host,
url=url_next_page, folder=True, text_color=color3, text_bold=True))
url=url_p, folder=True, text_color=color3, text_bold=True))
return itemlist
def menu_buscar_contenido(item):
logger.info(item)
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'Generos.*?</ul>'
data = scrapertools.find_single_match(data, patron)
# Extrae las entradas
patron = 'href="([^"]+)">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
@@ -160,11 +287,7 @@ def menu_buscar_contenido(item):
folder = True,
viewmode = "movie_with_plot"
))
if item.extra in ['genre', 'audio', 'year']:
return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year')
else:
return itemlist
return itemlist
def findvideos(item):
@@ -172,21 +295,29 @@ def findvideos(item):
itemlist = []
sublist = []
data = httptools.downloadpage(item.url).data
mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)')
patron = '(?s)id="online".*?server="([^"]+)"'
mserver = scrapertools.find_single_match(data, patron)
url = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver)
data = httptools.downloadpage(url).data
patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
if not item.query:
item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+")
url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver)
patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?'
patron += '\[([^\]]+)\].*?\[([^\]]+)\]'
data = httptools.downloadpage(url_m).data
matches = scrapertools.find_multiple_matches(data, patron)
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(channel=item.channel, action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
title="Ver en %s %s" %(server, calidad)
))
page = 2
while len(matches)>0:
for server, url, idioma, calidad in matches:
if "drive" in server:
server = "gvideo"
sublist.append(item.clone(action="play", url=url, folder=False, text_color=color1, quality=calidad.strip(),
language=idioma.strip(),
server = server,
title="Ver en %s %s" %(server, calidad)
))
data = httptools.downloadpage(url_m + "&page=%s" %page).data
matches = scrapertools.find_multiple_matches(data, patron)
page +=1
sublist = sorted(sublist, key=lambda Item: Item.quality + Item.server)
for k in ["Español", "Latino", "Ingles - Sub Español", "Ingles"]:
lista_idioma = filter(lambda i: i.language == k, sublist)
if lista_idioma:
@@ -213,6 +344,6 @@ def play(item):
ddd = httptools.downloadpage(item.url).data
url = "http://olimpo.link" + scrapertools.find_single_match(ddd, '<iframe src="([^"]+)')
item.url = httptools.downloadpage(url + "&ge=1", follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist.append(item.clone())
itemlist.append(item.clone(server = ""))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -1,38 +0,0 @@
{
"id": "zpeliculas",
"name": "Zpeliculas",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"banner": "zpeliculas.png",
"thumbnail": "zpeliculas.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,370 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urllib
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
# itemlist.append( Item(channel=item.channel, action="destacadas" , title="Destacadas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append(
Item(channel=item.channel, action="peliculas", title="Últimas peliculas", url="http://www.zpeliculas.com/",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
# itemlist.append( Item(channel=item.channel, action="sugeridas" , title="Películas sugeridas", url="http://www.zpeliculas.com", fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por género", url="http://www.zpeliculas.com",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append(Item(channel=item.channel, action="alfabetico", title="Listado alfabético",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscador", url="http://www.zpeliculas.com",
fanart="http://www.zpeliculas.com/templates/mytopV2/images/background.png", viewmode="movie"))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="A", url="http://www.zpeliculas.com/cat/a",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="B", url="http://www.zpeliculas.com/cat/b",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="C", url="http://www.zpeliculas.com/cat/c",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="D", url="http://www.zpeliculas.com/cat/d",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="E", url="http://www.zpeliculas.com/cat/e",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="F", url="http://www.zpeliculas.com/cat/f",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="G", url="http://www.zpeliculas.com/cat/g",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="H", url="http://www.zpeliculas.com/cat/h",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="I", url="http://www.zpeliculas.com/cat/i",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="J", url="http://www.zpeliculas.com/cat/j",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="K", url="http://www.zpeliculas.com/cat/k",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="L", url="http://www.zpeliculas.com/cat/l",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="M", url="http://www.zpeliculas.com/cat/m",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="N", url="http://www.zpeliculas.com/cat/n",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="O", url="http://www.zpeliculas.com/cat/o",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="P", url="http://www.zpeliculas.com/cat/p",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Q", url="http://www.zpeliculas.com/cat/q",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="R", url="http://www.zpeliculas.com/cat/r",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="S", url="http://www.zpeliculas.com/cat/s",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="T", url="http://www.zpeliculas.com/cat/t",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="U", url="http://www.zpeliculas.com/cat/u",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="V", url="http://www.zpeliculas.com/cat/v",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="W", url="http://www.zpeliculas.com/cat/w",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="X", url="http://www.zpeliculas.com/cat/x",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Y", url="http://www.zpeliculas.com/cat/y",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Z", url="http://www.zpeliculas.com/cat/z",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="0", url="http://www.zpeliculas.com/cat/0",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="1", url="http://www.zpeliculas.com/cat/1",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="2", url="http://www.zpeliculas.com/cat/2",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="3", url="http://www.zpeliculas.com/cat/3",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="4", url="http://www.zpeliculas.com/cat/4",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="5", url="http://www.zpeliculas.com/cat/5",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="6", url="http://www.zpeliculas.com/cat/6",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="7", url="http://www.zpeliculas.com/cat/7",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="8", url="http://www.zpeliculas.com/cat/8",
viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="9", url="http://www.zpeliculas.com/cat/9",
viewmode="movie"))
return itemlist
def generos(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="peliculas", title="Acción",
url="http://www.zpeliculas.com/peliculas/p-accion/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Animación",
url="http://www.zpeliculas.com/peliculas/p-animacion/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Aventura",
url="http://www.zpeliculas.com/peliculas/p-aventura/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Biografía",
url="http://www.zpeliculas.com/peliculas/p-biografia/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Bélico",
url="http://www.zpeliculas.com/peliculas/p-belico/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Ciencia Ficción",
url="http://www.zpeliculas.com/peliculas/p-cienciaficcion/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Comedia",
url="http://www.zpeliculas.com/peliculas/p-comedia/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Crimen",
url="http://www.zpeliculas.com/peliculas/p-crimen/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Drama",
url="http://www.zpeliculas.com/peliculas/p-drama/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Fantasía",
url="http://www.zpeliculas.com/peliculas/p-fantasia/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Histórico",
url="http://www.zpeliculas.com/peliculas/p-historico/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Intriga",
url="http://www.zpeliculas.com/peliculas/p-intriga/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Musical",
url="http://www.zpeliculas.com/peliculas/p-musical/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Romántica",
url="http://www.zpeliculas.com/peliculas/p-romantica/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Terror",
url="http://www.zpeliculas.com/peliculas/p-terror/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Thriller",
url="http://www.zpeliculas.com/peliculas/p-thriller/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Western",
url="http://www.zpeliculas.com/peliculas/p-western/", viewmode="movie"))
itemlist.append(Item(channel=item.channel, action="peliculas", title="Otros",
url="http://www.zpeliculas.com/peliculas/p-otros/", viewmode="movie"))
return itemlist
def search(item, texto):
try:
post = urllib.urlencode({"story": texto, "do": "search", "subaction": "search", "x": "0", "y": "0"})
data = scrapertools.cache_page("http://www.zpeliculas.com", post=post)
patron = '<div class="leftpane">(.*?)<div class="clear"'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedtitle = scrapertools.find_single_match(match, '<div class="shortname">([^<]+)</div>')
scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"')
scrapedthumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
scrapedyear = scrapertools.find_single_match(match, '<div class="year"[^>]+>([^<]+)</div>')
scrapedidioma = scrapertools.find_single_match(match, 'title="Idioma">([^<]+)</div>')
scrapedcalidad = scrapertools.find_single_match(match,
'<div class="shortname"[^<]+</div[^<]+<div[^>]+>([^<]+)</div>')
title = scrapedtitle + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']'
url = scrapedurl
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title,
contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = "http://www.zpeliculas.com"
elif categoria == 'infantiles':
item.url = "http://www.zpeliculas.com/peliculas/p-animacion/"
else:
return []
itemlist = peliculas(item)
if itemlist[-1].extra == "next_page":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()
# Descarga la página
body = scrapertools.cachePage(item.url)
data = scrapertools.get_match(body,
'<div class="shortmovies">(.*?)<div class="navigation ignore-select" align="center">')
'''
<div class="leftpane">
<div class="movieposter" title="Descargar Sólo los amantes sobreviven">
<a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html"><img src="http://i.imgur.com/NBPgXrp.jpg" width="110" height="150" alt="Sólo los amantes sobreviven" title="Descargar Sólo los amantes sobreviven" /></a>
<div class="shortname">Sólo los amantes sobreviven</div>
<div class="BDRip">BDRip</div>
</div>
</div>
<div class="rightpane">
<div style="display:block;overflow:hidden;">
<h2 class="title" title="Sólo los amantes sobreviven"><a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html">Sólo los amantes sobreviven</a></h2>
<div style="height:105px; overflow:hidden;">
<div class="small">
<div class="cats" title="Genero"><a href="http://www.zpeliculas.com/peliculas/p-drama/">Drama</a>, <a href="http://www.zpeliculas.com/peliculas/p-fantasia/">Fantasia</a>, <a href="http://www.zpeliculas.com/peliculas/p-romantica/">Romantica</a></div>
<div class="year" title="A&ntilde;o">2013</div>
<div class="ESP" title="Idioma">ESP</div>
<div class="FA" title="Sólo los amantes sobreviven FA Official Website"><a href="http://www.filmaffinity.com/es/film851633.html" target="_blank" title="Sólo los amantes sobreviven en filmaffinity">Sólo los amantes sobreviven en FA</a></div>
</div>
</div>
<div class="clear" style="height:2px;"></div>
<div style="float:right">
'''
patron = '<div class="leftpane">(.*?)<div style="float\:right">'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for match in matches:
scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"')
scrapedthumbnail = scrapertools.find_single_match(match, '<img src="([^"]+)"')
scrapedtitle = scrapertools.find_single_match(match, '<div class="shortname">([^<]+)')
scrapedcalidad = scrapertools.find_single_match(match,
'<div class="shortname">[^<]+</div[^<]+<div class="[^"]+">([^<]+)')
scrapedyear = scrapertools.find_single_match(match, '<div class="year[^>]+>([^<]+)')
scrapedidioma = scrapertools.find_single_match(match,
'<div class="year[^>]+>[^<]+</div[^<]+<div class[^>]+>([^<]+)')
contentTitle = scrapertools.htmlclean(scrapedtitle)
# logger.info("title="+scrapedtitle)
title = contentTitle + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']'
# title = scrapertools.htmlclean(title)
url = scrapedurl
thumbnail = scrapedthumbnail
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
hasContentDetails=True, contentTitle=contentTitle, contentThumbnail=thumbnail, fanart=thumbnail,
contentType="movie", context=["buscar_trailer"]))
next_page = scrapertools.find_single_match(body, '<a href="([^"]+)">Siguiente')
if next_page != "":
itemlist.append(
Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=next_page, thumbnail="",
plot="", show="", viewmode="movie", fanart=thumbnail, extra="next_page"))
return itemlist
def destacadas(item):
logger.info()
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data, '<div id="sliderwrapper">(.*?)<div class="genreblock">')
'''
<div class="imageview view-first">
<a href="/templates/mytopV2/blockpro/noimage-full.png" onclick="return hs.expand(this)"><img src="http://i.imgur.com/H4d96Wn.jpg" alt="Ocho apellidos vascos"></a>
<div class="mask">
<h2><a href="/peliculas/p-comedia/1403-ocho-apellidos-vascos.html" title="Ocho apellidos vascos">Ocho apellidos vascos</a></h2>
</div>
'''
patron = '<div class="imageview view-first">.*?<a href=.*?>.*?src="(.*?)" alt="(.*?)"></a>.*?<h2><a href="(.*?)".*?</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
logger.info("title=" + scrapedtitle)
title = scrapedtitle
title = scrapertools.htmlclean(title)
url = "http://www.zpeliculas.com" + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
def sugeridas(item):
logger.info()
# Descarga la página
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data, '<ul class="links">(.*?)</ul>')
'''
<li><a href="/peliculas/p-accion/425-instinto-asesino.html" title="Descargar Instinto asesino (The Crew)"><span class="movie-name">Instinto asesino (The Crew)</span><img src="http://i.imgur.com/1xXLz.jpg" width="102" height="138" alt="Instinto asesino (The Crew)" title="Descargar Instinto asesino (The Crew)" /></a></li>
'''
patron = '<li>.*?<a href="(.*?)".*?"movie-name">(.*?)</span><img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
logger.info("title=" + scrapedtitle)
title = scrapedtitle
title = scrapertools.htmlclean(title)
url = "http://www.zpeliculas.com" + scrapedurl
thumbnail = scrapedthumbnail
plot = ""
plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, fanart=thumbnail, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail,
contentType="movie", context=["buscar_trailer"]))
return itemlist
def findvideos(item):
logger.info("item=" + item.tostring())
# Descarga la página para obtener el argumento
data = scrapertools.cachePage(item.url)
item.plot = scrapertools.find_single_match(data, '<div class="contenttext">([^<]+)<').strip()
item.contentPlot = item.plot
logger.info("plot=" + item.plot)
return servertools.find_video_items(item=item, data=data)

View File

@@ -3,6 +3,7 @@
# httptools
# --------------------------------------------------------------------------------
import inspect
import cookielib
import gzip
import os
@@ -15,6 +16,7 @@ from threading import Lock
from core.cloudflare import Cloudflare
from platformcode import config, logger
from platformcode.logger import WebErrorException
cookies_lock = Lock()
@@ -23,7 +25,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat")
# Headers por defecto, si no se especifica nada
default_headers = dict()
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["User-Agent"] = "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"
default_headers["Accept-Charset"] = "UTF-8"
@@ -205,8 +207,18 @@ def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=Tr
logger.info("Response error: %s" % (response["error"]))
logger.info("Response data length: %s" % (len(response["data"])))
logger.info("Response headers:")
server_cloudflare = ""
for header in response["headers"]:
logger.info("- %s: %s" % (header, response["headers"][header]))
if "cloudflare" in response["headers"][header]:
server_cloudflare = "cloudflare"
is_channel = inspect.getmodule(inspect.currentframe().f_back)
# error 4xx o 5xx se lanza excepcion
# response["code"] = 400
if type(response["code"]) == int and "\\servers\\" not in str(is_channel):
if response["code"] > 399 and (server_cloudflare == "cloudflare" and response["code"] != 503):
raise WebErrorException(urlparse.urlparse(url)[1])
if cookies:
save_cookies()

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Item is the object we use for representing data
# --------------------------------------------------------------------------------
@@ -170,8 +170,6 @@ class Item(object):
# Al modificar cualquiera de estos atributos content...
if name in ["contentTitle", "contentPlot", "plot", "contentSerieName", "contentType", "contentEpisodeTitle",
"contentSeason", "contentEpisodeNumber", "contentThumbnail", "show", "contentQuality", "quality"]:
# ... marcamos hasContentDetails como "true"...
self.__dict__["hasContentDetails"] = True
# ...y actualizamos infoLables
if name == "contentTitle":
self.__dict__["infoLabels"]["title"] = value
@@ -236,10 +234,6 @@ class Item(object):
self.__dict__["viewcontent"] = viewcontent
return viewcontent
# Valor por defecto para hasContentDetails
elif name == "hasContentDetails":
return False
# valores guardados en infoLabels
elif name in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle",
"contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration",

View File

@@ -104,7 +104,7 @@ def get_node_from_file(name_file, node, path=None):
if node in dict_data:
dict_node = dict_data[node]
logger.debug("dict_node: %s" % dict_node)
#logger.debug("dict_node: %s" % dict_node)
return dict_node

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Server management
# --------------------------------------------------------------------------------
@@ -75,7 +75,6 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
@param sort: indica si el listado resultante se ha de ordenar en funcion de la lista de servidores favoritos
@type sort: bool
"""
server_stats = {}
# Recorre los servidores
for serverid in get_servers_list().keys():
server_parameters = get_server_parameters(serverid)
@@ -90,7 +89,6 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
for x in range(len(match.groups())):
url = url.replace("\\%s" % (x + 1), match.groups()[x])
server_stats[serverid] = "found"
for item in itemlist:
if match.group() in item.url:
if not item.contentThumbnail:
@@ -102,8 +100,6 @@ def get_servers_itemlist(itemlist, fnc=None, sort=False):
else:
item.url = url
save_server_stats(server_stats, "find_videos")
# Eliminamos los servidores desactivados
itemlist = filter(lambda i: not i.server or is_server_enabled(i.server), itemlist)
@@ -188,10 +184,6 @@ def findvideosbyserver(data, serverid):
devuelve.append(value)
logger.info(msg)
# Guardar estadisticas
if devuelve:
save_server_stats({serverid: "found"}, "find_videos")
return devuelve
@@ -324,11 +316,8 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
try:
logger.info("Invocando a %s.get_video_url" % server)
response = serverid.get_video_url(page_url=url, video_password=video_password)
if response:
save_server_stats({server: "sucess"}, "resolve")
video_urls.extend(response)
except:
save_server_stats({server: "error"}, "resolve")
logger.error("Error al obtener la url en modo free")
error_messages.append("Se ha producido un error en %s" % server_name)
import traceback
@@ -343,16 +332,12 @@ def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialo
password=config.get_setting("password", server=opcion),
video_password=video_password)
if response and response[0][1]:
if opcion == server:
save_server_stats({server: "sucess"}, "resolve")
video_urls.extend(response)
elif response and response[0][0]:
error_messages.append(response[0][0])
else:
error_messages.append("Se ha producido un error en %s" % server_name)
except:
if opcion == server:
save_server_stats({server: "error"}, "resolve")
logger.error("Error en el servidor: %s" % opcion)
error_messages.append("Se ha producido un error en %s" % server_name)
import traceback
@@ -720,41 +705,3 @@ def filter_servers(servers_list):
servers_list = servers_list_filter
return servers_list
def save_server_stats(stats, type="find_videos"):
if not config.get_setting("server_stats"):
return
stats_file = os.path.join(config.get_data_path(), "server_stats.json")
today = datetime.datetime.now().strftime("%Y%m%d")
# Leemos el archivo
try:
server_stats = jsontools.load(open(stats_file, "rb").read())
except:
server_stats = {"created": time.time(), "data": {}}
# Actualizamos los datos
for server in stats:
if not server in server_stats["data"]:
server_stats["data"][server] = {}
if not today in server_stats["data"][server]:
server_stats["data"][server][today] = {"find_videos": {"found": 0}, "resolve": {"sucess": 0, "error": 0}}
server_stats["data"][server][today][type][stats[server]] += 1
# Guardamos el archivo
open(stats_file, "wb").write(jsontools.dump(server_stats))
# Enviamos al servidor
return
if time.time() - server_stats["created"] > 86400: # 86400: #1 Dia
from core import httptools
if httptools.downloadpage("url servidor", headers={'Content-Type': 'application/json'},
post=jsontools.dump(server_stats)).sucess:
os.remove(stats_file)
logger.info("Datos enviados correctamente")
else:
logger.info("No se han podido enviar los datos")

View File

@@ -268,8 +268,9 @@ def save_tvshow(item, episodelist):
# Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos
logger.info("Creando tvshow.nfo: " + tvshow_path)
head_nfo = scraper.get_nfo(item)
item_tvshow = Item(title=item.contentTitle, channel="videolibrary", action="get_seasons",
item.infoLabels['mediatype'] = "tvshow"
item.infoLabels['title'] = item.contentSerieName
item_tvshow = Item(title=item.contentSerieName, channel="videolibrary", action="get_seasons",
fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'],
infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, ""))
item_tvshow.library_playcounts = {}
@@ -294,7 +295,6 @@ def save_tvshow(item, episodelist):
if item.channel != "downloads":
item_tvshow.active = 1 # para que se actualice a diario cuando se llame a videolibrary_service
filetools.write(tvshow_path, head_nfo + item_tvshow.tojson())
if not episodelist:
@@ -439,7 +439,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True):
news_in_playcounts["season %s" % e.contentSeason] = 0
# Marcamos la serie como no vista
# logger.debug("serie " + serie.tostring('\n'))
news_in_playcounts[serie.contentTitle] = 0
news_in_playcounts[serie.contentSerieName] = 0
else:
logger.info("Sobreescrito: %s" % json_path)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 836 KiB

0
plugin.video.alfa/icon.png Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 85 KiB

After

Width:  |  Height:  |  Size: 85 KiB

30
plugin.video.alfa/platformcode/launcher.py Executable file → Normal file
View File

@@ -14,6 +14,7 @@ from core import videolibrarytools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode.logger import WebErrorException
def start():
@@ -37,8 +38,18 @@ def run(item=None):
# If no item, this is mainlist
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
if config.get_setting("start_page"):
if not config.get_setting("custom_start"):
category = config.get_setting("category").lower()
item = Item(channel="news", action="novedades", extra=category, mode = 'silent')
else:
from channels import side_menu
item= Item()
item = side_menu.check_user_home(item)
item.start = True;
else:
item = Item(channel="channelselector", action="getmainlist", viewmode="movie")
logger.info(item.tostring())
try:
@@ -264,6 +275,11 @@ def run(item=None):
else:
logger.info("Executing channel '%s' method" % item.action)
itemlist = getattr(channel, item.action)(item)
# if item.start:
# menu_icon = get_thumb('menu.png')
# menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon,
# title='Menu')
# itemlist.insert(0, menu)
platformtools.render_items(itemlist, item)
except urllib2.URLError, e:
@@ -281,7 +297,19 @@ def run(item=None):
logger.error("Codigo de error HTTP : %d" % e.code)
# "El sitio web no funciona correctamente (error http %d)"
platformtools.dialog_ok("alfa", config.get_localized_string(30051) % e.code)
except WebErrorException, e:
import traceback
logger.error(traceback.format_exc())
patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\",
"\\\\") + '([^.]+)\.py"'
canal = scrapertools.find_single_match(traceback.format_exc(), patron)
platformtools.dialog_ok(
"Error en el canal " + canal,
"La web de la que depende parece no estar disponible, puede volver a intentarlo, "
"si el problema persiste verifique mediante un navegador la web: %s. "
"Si la web funciona correctamente informe el error en: www.alfa-addon.com" %(e))
except:
import traceback
logger.error(traceback.format_exc())

View File

@@ -76,3 +76,8 @@ def error(texto=""):
xbmc.log("######## ERROR #########", xbmc.LOGERROR)
xbmc.log(texto, xbmc.LOGERROR)
class WebErrorException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)

View File

@@ -18,6 +18,7 @@ import xbmcgui
import xbmcplugin
from core.item import Item
from platformcode import logger
from channelselector import get_thumb
def dialog_ok(heading, line1, line2="", line3=""):
@@ -95,15 +96,21 @@ def render_items(itemlist, parent_item):
"""
# Si el itemlist no es un list salimos
if not type(itemlist) == list:
return
if parent_item.start:
menu_icon = get_thumb('menu.png')
menu = Item(channel="channelselector", action="getmainlist", viewmode="movie", thumbnail=menu_icon,
title='Menu')
itemlist.insert(0, menu)
# Si no hay ningun item, mostramos un aviso
if not len(itemlist):
itemlist.append(Item(title="No hay elementos que mostrar"))
# Recorremos el itemlist
for item in itemlist:
# logger.debug(item)
# Si el item no contiene categoria, le ponemos la del item padre
if item.category == "":
item.category = parent_item.category
@@ -181,7 +188,6 @@ def render_items(itemlist, parent_item):
if config.get_setting("forceview"):
# ...forzamos segun el viewcontent
xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent)
# logger.debug(parent_item)
elif parent_item.channel not in ["channelselector", ""]:
# ... o segun el canal
xbmcplugin.setContent(int(sys.argv[1]), "movies")
@@ -199,9 +205,12 @@ def render_items(itemlist, parent_item):
if config.get_setting("forceview"):
viewmode_id = get_viewmode_id(parent_item)
xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id)
if parent_item.mode in ['silent', 'get_cached', 'set_cache','finish']:
xbmc.executebuiltin("Container.SetViewMode(500)")
def get_viewmode_id(parent_item):
# viewmode_json habria q guardarlo en un archivo y crear un metodo para q el user fije sus preferencias en:
# user_files, user_movies, user_tvshows, user_season y user_episodes.
viewmode_json = {'skin.confluence': {'default_files': 50,
@@ -393,7 +402,6 @@ def set_context_commands(item, parent_item):
(sys.argv[0], item.clone(channel="favorites", action="addFavourite",
from_channel=item.channel,
from_action=item.action).tourl())))
#Buscar en otros canales
if item.contentType in ['movie','tvshow']and item.channel != 'search':
# Buscar en otros canales
@@ -406,8 +414,16 @@ def set_context_commands(item, parent_item):
item.clone(channel='search',
action="do_search",
from_channel=item.channel,
contextual=True).tourl())))
contextual=True).tourl())))
#Definir como Pagina de inicio
if config.get_setting('start_page'):
if item.action not in ['findvideos', 'play']:
context_commands.insert(0, ("[COLOR 0xffccff00]Definir como pagina de inicio[/COLOR]",
"XBMC.RunPlugin(%s?%s)" % (
sys.argv[0], Item(channel='side_menu',
action="set_custom_start",
parent=item.tourl()).tourl())))
if item.channel != "videolibrary":
# Añadir Serie a la videoteca
@@ -468,7 +484,19 @@ def set_context_commands(item, parent_item):
context_commands.append(("Super Favourites Menu",
"XBMC.RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)"))
return sorted(context_commands, key=lambda comand: comand[0])
context_commands = sorted(context_commands, key=lambda comand: comand[0])
# Menu Rapido
context_commands.insert(0,("[COLOR 0xffccff00]<Menú Rápido>[/COLOR]",
"XBMC.Container.Update (%s?%s)" % (sys.argv[0], Item(channel='side_menu',
action="open_menu",
parent=parent_item.tourl()).tourl(
))))
return context_commands
def is_playing():

View File

@@ -805,7 +805,7 @@ def ask_set_content(flag, silent=False):
if not silent:
heading = "Alfa Auto-configuración"
linea1 = "¿Desea que Alfa auto-configure la videteoca de Kodi?"
linea1 = "¿Desea que Alfa auto-configure la videoteca de Kodi?"
linea2 = "Si pulsa 'No' podra hacerlo desde 'Configuración > Preferencia > Rutas'."
if platformtools.dialog_yesno(heading, linea1, linea2):
do_config()

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.9 KiB

11
plugin.video.alfa/resources/settings.xml Executable file → Normal file
View File

@@ -11,7 +11,6 @@
<setting id="resolve_priority" type="enum" label="Método prioritario" values="Free primero|Premium primero|Debriders primero" default="0"/>
<setting id="resolve_stop" type="bool" label="Dejar de buscar cuando encuentre una opción" default="true"/>
<setting id="hidepremium" type="bool" label="Ocultar servidores de pago sin cuenta" default="false"/>
<setting id="server_stats" type="bool" label="Enviar estadisticas sobre el uso de servidores" default="true"/>
<setting type="sep"/>
<setting label="Canales para adultos" type="lsep"/>
<setting id="adult_aux_intro_password" type="text" label="Contraseña (por defecto 0000):" option="hidden" default=""/>
@@ -38,6 +37,16 @@
<setting id="videolibrary_kodi_flag" type="number" label="" default="0" visible="false"/>
<setting id="videolibrary_kodi" type="bool" label="Autoconfigurar videoteca de XBMC/Kodi para contenido de Alfa" enable="lt(-1,2)+eq(0,false)" default="false"/>
</category>
<category label="Pagina de inicio">
<setting id="start_page" type="bool" label="Activar pagina de inicio" default="false"/>
<setting id="custom_start" type="bool" label="Personalizado (seleccionar desde un canal)" default="false"
visible="eq(-1,True)"/>
<setting id="news_start" type="bool" label="Mostrar Novedades" default="false" visible="eq(-2,True)"
enable="eq(-1,False)+eq(-2,True"/>
<setting id="category" type="labelenum" label="Categoria"
values="Peliculas|Series|Anime|Infantiles|Documentales|Terror|Castellano|Latino|Torrent"
default="Peliculas" visible="eq(-3,True)+eq(-1,True)+eq(-2,False)" enable="eq(-3,True)+eq(-1,True)+(-2,false)"/>
</category>
<category label="Opciones Visuales">
<setting id="icon_set" type="labelenum" label="Set de iconos" values="default|dark" default="default"/>
<setting id="infoplus_set" type="labelenum" label="Opción visual Infoplus" values="Sin animación|Con animación" default="Sin animación"/>

View File

@@ -0,0 +1,27 @@
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<window>
<coordinates>
<left>0</left>
<top>0</top>
</coordinates>
<zorder>1</zorder>
<controls>
<control type="image">
<animation type="WindowOpen" reversible="false">
<effect type="slide" start="-200" end="0" time="80"/>
</animation>
<left>0</left>
<top>0</top>
<width>260</width>
<height>725</height>
<texture border="2">side_menu/bg0.png</texture>
</control>
<control type="image">
<left>25</left>
<top>30</top>
<width>200</width>
<height>70</height>
<texture border="2">side_menu/logo.png</texture>
</control>
</controls>
</window>

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 375 B

View File

@@ -1,42 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "stormo.tv/(?:videos/|embed/)([0-9]+)",
"url": "http://stormo.tv/embed/\\1"
}
]
},
"free": true,
"id": "stormo",
"name": "stormo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/mTYCw5E.png"
}
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(https://bdupload.info/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bdupload",
"name": "bdupload",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s18.postimg.org/68colqvyx/logo-bdupload.png",
"version": 1
}

View File

@@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
import time
from core import httptools
from core import scrapertools
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data:
return False, "[bdupload] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
post = ""
patron = '(?s)type="hidden" name="([^"]+)".*?value="([^"]*)"'
match = scrapertools.find_multiple_matches(data, patron)
for nombre, valor in match:
post += nombre + "=" + valor + "&"
time.sleep(1)
data1 = httptools.downloadpage(page_url, post = post, headers = headers).data
patron = "window.open\('([^']+)"
file = scrapertools.find_single_match(data1, patron)
file += "|User-Agent=" + headers['User-Agent']
video_urls = []
videourl = file
video_urls.append([".MP4 [bdupload]", videourl])
return video_urls

View File

@@ -1,42 +1,43 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(?:divxstage|cloudtime).[^/]+/video/([^\"' ]+)",
"url": "http://www.cloudtime.to/embed/?v=\\1"
}
]
},
"free": true,
"id": "divxstage",
"name": "divxstage",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_divxstage.png"
}
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://biter.tv/v/[A-z0-9]+)",
"url": "\\1"
}
]
},
"free": true,
"id": "bitertv",
"name": "Bitertv",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s18.postimg.org/f56rayqq1/logo-bitertv.png",
"version": 1
}

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Archive no Encontrado" in data:
return False, "[bitertv] El fichero ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
patron = "(?s)file: '([^']+)"
file = scrapertools.find_single_match(data, patron)
video_urls.append([".MP4 [bitertv]", file])
return video_urls

View File

@@ -8,7 +8,7 @@
"url": "https://www.bitporno.com/e/\\1"
},
{
"pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
"pattern": "raptu.com/(?:\\?v=|embed/|e/|v/)([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
}
]

View File

@@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, 'file":"([^"]+).*?label":"([^"]+)')
videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)')
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
if "loadthumb" in scrapedurl:

View File

@@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
host = "http://www.cloudtime.to"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url.replace('/embed/?v=', '/video/')).data
if "This file no longer exists" in data:
return False, "El archivo no existe<br/>en divxstage o ha sido borrado."
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
if "divxstage.net" in page_url:
page_url = page_url.replace("divxstage.net", "cloudtime.to")
data = httptools.downloadpage(page_url).data
video_urls = []
videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]')
if not videourls:
videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]')
for videourl in videourls:
if videourl.endswith(".mpd"):
id = scrapertools.find_single_match(videourl, '/dash/(.*?)/')
videourl = "http://www.cloudtime.to/download.php%3Ffile=mm" + "%s.mp4" % id
videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl)
ext = scrapertools.get_filename_from_url(videourl)[-4:]
videourl = videourl.replace("%3F", "?") + \
"|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0"
video_urls.append([ext + " [cloudtime]", videourl])
return video_urls

View File

@@ -31,13 +31,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
'Cookie': ''}
data = httptools.downloadpage(page_url, headers=headers, replace_headers=True).data
data = httptools.downloadpage(page_url, cookies=False).data
data = data.replace("\n","")
cgi_counter = scrapertools.find_single_match(data, """(?is)src=.(https://www.flashx.tv/counter.cgi.*?[^(?:'|")]+)""")
cgi_counter = cgi_counter.replace("%0A","").replace("%22","")
playnow = scrapertools.find_single_match(data, 'https://www.flashx.tv/dl[^"]+')
# Para obtener el f y el fxfx
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/jss/coder.js.*?[^(?:'|")]+)""")
js_fxfx = "https://www." + scrapertools.find_single_match(data.replace("//","/"), """(?is)(flashx.tv/js\w+/c\w+.*?[^(?:'|")]+)""")
data_fxfx = httptools.downloadpage(js_fxfx).data
mfxfx = scrapertools.find_single_match(data_fxfx, 'get.*?({.*?})').replace("'","").replace(" ","")
matches = scrapertools.find_multiple_matches(mfxfx, '(\w+):(\w+)')
@@ -63,8 +63,8 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers['X-Requested-With'] = 'XMLHttpRequest'
# Obligatorio descargar estos 2 archivos, porque si no, muestra error
httptools.downloadpage(coding_url, headers=headers, replace_headers=True)
httptools.downloadpage(cgi_counter, headers=headers, replace_headers=True)
httptools.downloadpage(coding_url, cookies=False)
httptools.downloadpage(cgi_counter, cookies=False)
try:
time.sleep(int(wait_time) + 1)
@@ -73,7 +73,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
headers.pop('X-Requested-With')
headers['Content-Type'] = 'application/x-www-form-urlencoded'
data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
data = httptools.downloadpage(playnow, post).data
# Si salta aviso, se carga la pagina de comprobacion y luego la inicial
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
@@ -81,7 +81,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
try:
data = httptools.downloadpage(url_reload, cookies=False).data
data = httptools.downloadpage(playnow, post, headers, replace_headers=True).data
data = httptools.downloadpage(playnow, post, cookies=False).data
# LICENSE GPL3, de alfa-addon: https://github.com/alfa-addon/ ES OBLIGATORIO AÑADIR ESTAS LÍNEAS
except:
pass
@@ -92,15 +92,13 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
try:
match = jsunpack.unpack(match)
match = match.replace("\\'", "'")
# {src:\'https://bigcdn.flashx1.tv/cdn25/5k7xmlcjfuvvjuw5lx6jnu2vt7gw4ab43yvy7gmkvhnocksv44krbtawabta/normal.mp4\',type:\'video/mp4\',label:\'SD\',res:360},
media_urls = scrapertools.find_multiple_matches(match, "{src:'([^']+)'.*?,label:'([^']+)'")
subtitle = ""
for media_url, label in media_urls:
if media_url.endswith(".srt") and label == "Spanish":
try:
from core import filetools
data = scrapertools.downloadpage(media_url)
data = httptools.downloadpage(media_url)
subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
filetools.write(subtitle, data)
except:

View File

@@ -18,7 +18,8 @@ def test_video_exists(page_url):
return False, "[Gamovideo] El archivo no existe o ha sido borrado"
if "Video is processing now" in data:
return False, "[Gamovideo] El video está procesándose en estos momentos. Inténtelo mas tarde."
if "File is awaiting for moderation" in data:
return False, "[Gamovideo] El video está esperando por moderación."
return True, ""

View File

@@ -9,8 +9,6 @@ from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, ""
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
@@ -20,7 +18,10 @@ def test_video_exists(page_url):
return False, "[gvideo] No tienes permiso para acceder a este video"
if "Se ha producido un error" in response.data:
return False, "[gvideo] Se ha producido un error en el reproductor de google"
if "No+se+puede+procesar+este" in response.data:
return False, "[gvideo] No se puede procesar este video"
if response.code == 429:
return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después"
return True, ""

View File

@@ -1,42 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "idowatch.net/(?:embed-)?([a-z0-9]+)",
"url": "http://idowatch.net/\\1.html"
}
]
},
"free": true,
"id": "idowatch",
"name": "idowatch",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_idowatch.png"
}

View File

@@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "File Not Found" in data:
return False, "[Idowatch] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
mediaurl = scrapertools.find_single_match(data, ',{file:(?:\s+|)"([^"]+)"')
if not mediaurl:
matches = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
matchjs = jsunpack.unpack(matches).replace("\\", "")
mediaurl = scrapertools.find_single_match(matchjs, ',{file:(?:\s+|)"([^"]+)"')
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [idowatch]", mediaurl])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "nosvideo.com/(?:\\?v=|vj/video.php\\?u=|)([a-z0-9]+)",
"url": "http://nosvideo.com/vj/videomain.php?u=\\1==530"
},
{
"pattern": "nosupload.com(/\\?v\\=[a-z0-9]+)",
"url": "http://nosvideo.com\\1"
}
]
},
"free": true,
"id": "nosvideo",
"name": "nosvideo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,41 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "404 Page no found" in data:
return False, "[nosvideo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Lee la URL
data = scrapertools.cache_page(page_url)
urls = scrapertools.find_multiple_matches(data, ":'(http:\/\/.+?(?:v.mp4|.smil))")
urls = set(urls)
for media_url in urls:
if ".smil" in media_url:
data = scrapertools.downloadpage(media_url)
rtmp = scrapertools.find_single_match(data, '<meta base="([^"]+)"')
playpath = scrapertools.find_single_match(data, '<video src="([^"]+)"')
media_url = rtmp + " playpath=" + playpath
filename = "rtmp"
else:
filename = scrapertools.get_filename_from_url(media_url)[-4:]
video_urls.append([filename + " [nosvideo]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,45 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(nowdownload.\\w{2}]/dl/[a-z0-9]+)",
"url": "http://www.\\1"
}
]
},
"free": false,
"id": "nowdownload",
"name": "nowdownload",
"premium": [
"realdebrid"
],
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "server_nowdownload.png"
}

View File

@@ -1,36 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
'''
<a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>
'''
data = scrapertools.cache_page(page_url)
logger.debug("data:" + data)
try:
url = scrapertools.get_match(data,
'<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>')
except:
# $.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883");
token = scrapertools.get_match(data, '(/api/token.php\?token=[^"]*)')
logger.debug("token:" + token)
d = scrapertools.cache_page("http://www.nowdownload.co" + token)
url = scrapertools.get_match(data, 'expiryText: \'<a class="btn btn-danger" href="([^"]*)')
logger.debug("url_1:" + url)
data = scrapertools.cache_page("http://www.nowdownload.co" + url)
logger.debug("data:" + data)
# <a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a>
url = scrapertools.get_match(data, '<a href="([^"]*)" class="btn btn-success">Click here to download !</a>')
logger.debug("url_final:" + url)
video_urls = [url]
return video_urls

View File

@@ -1,32 +0,0 @@
{
"active": true,
"free": true,
"id": "pcloud",
"name": "pcloud",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,29 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "Invalid link" in data: return False, "[pCloud] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
data = scrapertools.cache_page(page_url)
media_url = scrapertools.find_single_match(data, '"downloadlink":.*?"([^"]+)"')
media_url = media_url.replace("\\", "")
video_urls = []
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [pCloud]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,6 +1,8 @@
# -*- coding: utf-8 -*-
import re
import base64
import urllib
from core import httptools
from core import scrapertools
@@ -12,17 +14,19 @@ host = "http://powvideo.net/"
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
if data == "File was deleted":
return False, "[powvideo] El video ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
#logger.info("(page_url='%s')" % page_url)
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'referer': referer}).data
_0xa3e8 = scrapertools.find_single_match(data, 'var _0xa3e8=(\[[^;]+\]);')
_0xa3e8 = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
@@ -32,14 +36,11 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls.append([".mp4" + " [powvideo]", S(_0xa3e8).decode(url)])
video_urls.sort(key=lambda x: x[0], reverse=True)
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls
class S:
def __init__(self, _0xa3e8):
def __init__(self, var):
self.r = None
self.s = None
self.k = None
@@ -48,8 +49,48 @@ class S:
self.b = None
self.d = None
_0xa3e8 = eval(_0xa3e8)
self.t(_0xa3e8[14] + _0xa3e8[15] + _0xa3e8[14] + _0xa3e8[15], _0xa3e8[16])
var = eval(var)
for x in range(0x1f0, 0, -1):
var.append(var.pop(0))
self.var = var
self.t(
self.decode_index('0x22', '!UJH') +
self.decode_index('0x23', 'NpE)') +
self.decode_index('0x24', '4uT2') +
self.decode_index('0x23', 'NpE)'),
self.decode_index('0x25', '@ZC2')
)
def decode_index(self, index, key):
b64_data = self.var[int(index, 16)];
result = ''
_0xb99338 = 0x0
_0x25e3f4 = 0x0
data = base64.b64decode(b64_data)
data = urllib.unquote(data).decode('utf8')
_0x5da081 = [x for x in range(0x100)]
for x in range(0x100):
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
_0x139847 = _0x5da081[x]
_0x5da081[x] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
_0xb99338 = 0x0
for _0x11ebc5 in range(len(data)):
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
_0x139847 = _0x5da081[_0x25e3f4]
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
return result
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]

View File

@@ -1,49 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "(http://stagevu.com/video/[A-Z0-9a-z]+)",
"url": "\\1"
},
{
"pattern": "http://stagevu.com.*?uid\\=([A-Z0-9a-z]+)",
"url": "http://stagevu.com/video/\\1"
},
{
"pattern": "http://[^\\.]+\\.stagevu.com/v/[^/]+/(.*?).avi",
"url": "http://stagevu.com/video/\\1"
}
]
},
"free": true,
"id": "stagevu",
"name": "stagevu",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import scrapertools
from platformcode import logger
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
# Descarga la página del vídeo
data = scrapertools.cache_page(page_url)
# Busca el vídeo de dos formas distintas
patronvideos = '<param name="src" value="([^"]+)"'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
video_urls = [["[stagevu]", matches[0]]]
else:
patronvideos = 'src="([^"]+stagevu.com/[^i][^"]+)"' # Forma src="XXXstagevu.com/ y algo distinto de i para evitar images e includes
matches = re.findall(patronvideos, data)
if len(matches) > 0:
video_urls = [["[stagevu]", matches[0]]]
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -1,33 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
response = httptools.downloadpage(page_url)
if "video_error.mp4" in response.data:
return False, "[Stormo] El archivo no existe o ha sido borrado"
if response.code == 451:
return False, "[Stormo] El archivo ha sido borrado por problemas legales."
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info(" url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, "file\s*:\s*['\"]([^'\"]+)['\"]")
if media_url.endswith("/"):
media_url = media_url[:-1]
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [stormo]", media_url])
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -9,7 +9,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Not Found" in data:
if "Not Found" in data or "File was deleted" in data:
return False, "[streamixcloud] El archivo no existe o ha sido borrado"
if "Video is processing" in data:
return False, "[streamixcloud] El video se está procesando, inténtelo mas tarde"

View File

@@ -1,6 +1,9 @@
# -*- coding: utf-8 -*-
import re
import base64
import urllib
from core import httptools
from core import scrapertools
@@ -25,12 +28,11 @@ def test_video_exists(page_url):
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
referer = re.sub(r"embed-|player-", "", page_url)[:-5]
referer = page_url.replace('iframe', 'preview')
data = httptools.downloadpage(page_url, headers={'Referer': referer}).data
packed = scrapertools.find_single_match(data, "<script type=[\"']text/javascript[\"']>(eval.*?)</script>")
unpacked = jsunpack.unpack(packed)
_0xd003 = scrapertools.find_single_match(data, 'var _0xd003=(\[[^;]+\]);')
_0xd003 = scrapertools.find_single_match(data, 'var _0x[0-f]+=(\[[^;]+\]);')
video_urls = []
url = scrapertools.find_single_match(unpacked, '(http[^,]+\.mp4)')
@@ -45,7 +47,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
class S:
def __init__(self, _0xd003):
def __init__(self, var):
self.r = None
self.s = None
self.k = None
@@ -54,8 +56,48 @@ class S:
self.b = None
self.d = None
_0xd003 = eval(_0xd003)
self.t(_0xd003[13] + _0xd003[14] + _0xd003[13] + _0xd003[14], _0xd003[15])
var = eval(var)
for x in range(0x1f0, 0, -1):
var.append(var.pop(0))
self.var = var
self.t(
self.decode_index('0x22', '!UJH') +
self.decode_index('0x23', 'NpE)') +
self.decode_index('0x24', '4uT2') +
self.decode_index('0x23', 'NpE)'),
self.decode_index('0x25', '@ZC2')
)
def decode_index(self, index, key):
b64_data = self.var[int(index, 16)];
result = ''
_0xb99338 = 0x0
_0x25e3f4 = 0x0
data = base64.b64decode(b64_data)
data = urllib.unquote(data).decode('utf8')
_0x5da081 = [x for x in range(0x100)]
for x in range(0x100):
_0xb99338 = (_0xb99338 + _0x5da081[x] + ord(key[x % len(key)])) % 0x100
_0x139847 = _0x5da081[x]
_0x5da081[x] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
_0xb99338 = 0x0
for _0x11ebc5 in range(len(data)):
_0x25e3f4 = (_0x25e3f4 + 0x1) % 0x100
_0xb99338 = (_0xb99338 + _0x5da081[_0x25e3f4]) % 0x100
_0x139847 = _0x5da081[_0x25e3f4]
_0x5da081[_0x25e3f4] = _0x5da081[_0xb99338]
_0x5da081[_0xb99338] = _0x139847
result += chr(ord(data[_0x11ebc5]) ^ _0x5da081[(_0x5da081[_0x25e3f4] + _0x5da081[_0xb99338]) % 0x100])
return result
def decode(self, url):
_hash = re.compile('[A-z0-9_-]{40,}', re.DOTALL).findall(url)[0]

View File

@@ -1,41 +0,0 @@
{
"active": true,
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "turbovideos.net/embed-([a-z0-9A-Z]+)",
"url": "http://turbovideos.net/embed-\\1.html"
}
]
},
"free": true,
"id": "turbovideos",
"name": "turbovideos",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
]
}

View File

@@ -1,38 +0,0 @@
# -*- coding: utf-8 -*-
from core import scrapertools
from lib import jsunpack
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
if "embed" not in page_url:
page_url = page_url.replace("http://turbovideos.net/", "http://turbovideos.net/embed-") + ".html"
data = scrapertools.cache_page(page_url)
logger.info("data=" + data)
data = scrapertools.find_single_match(data,
"<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>")
logger.info("data=" + data)
data = jsunpack.unpack(data)
logger.info("data=" + data)
video_urls = []
# {file:"http://ultra.turbovideos.net/73ciplxta26xsbj2bqtkqcd4rtyxhgx5s6fvyzed7ocf4go2lxjnd6e5kjza/v.mp4",label:"360"
media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)",label:"([^"]+)"')
for media_url, label in media_urls:
if not media_url.endswith("srt"):
video_urls.append(
[scrapertools.get_filename_from_url(media_url)[-4:] + " " + label + " [turbovideos]", media_url])
return video_urls

View File

@@ -14,7 +14,7 @@ def test_video_exists(page_url):
if "Streaming link:" in data:
return True, ""
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data:
elif "Unfortunately, the file you want is not available." in data or "Unfortunately, the video you want to see is not available" in data or "This stream doesn" in data:
return False, "[Uptobox] El archivo no existe o ha sido borrado"
wait = scrapertools.find_single_match(data, "You have to wait ([0-9]+) (minute|second)")
if len(wait) > 0:
@@ -40,7 +40,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
# Si el archivo tiene enlace de streaming se redirige a upstream
if "Streaming link:" in data:
page_url = "http://uptostream.com/iframe/" + scrapertools.find_single_match(page_url,
'uptobox.com/([a-z0-9]+)')
'uptobox.com/([a-z0-9]+)')
data = httptools.downloadpage(page_url).data
video_urls = uptostream(data)
else:
@@ -56,17 +56,27 @@ def uptostream(data):
subtitle = scrapertools.find_single_match(data, "kind='subtitles' src='//([^']+)'")
if subtitle:
subtitle = "http://" + subtitle
video_urls = []
patron = "<source src='//([^']+)' type='video/([^']+)' data-res='([^']+)' (?:data-default=\"true\" |)(?:lang='([^']+)'|)"
videos1 = []
data = data.replace("\\","")
patron = 'src":"([^"]+).*?'
patron += 'type":"([^"]+).*?'
patron += 'res":"([^"]+).*?'
patron += 'lang":"([^"]+)'
media = scrapertools.find_multiple_matches(data, patron)
for url, tipo, res, lang in media:
media_url = "http://" + url
for media_url, tipo, res, lang in media:
videos1.append([media_url, tipo, res, lang])
videos1.sort(key=lambda videos1: int(videos1[2]))
for x in videos1:
media_url = x[0]
tipo = x[1]
res = x[2]
lang = x[3]
tipo = tipo.replace("video/","")
extension = ".%s (%s)" % (tipo, res)
if lang:
extension = extension.replace(")", "/%s)" % lang[:3])
video_urls.append([extension + " [uptostream]", media_url, 0, subtitle])
return video_urls

View File

@@ -2,6 +2,7 @@
from core import httptools
from core import scrapertools
from lib import jsunpack
from platformcode import logger
@@ -10,7 +11,7 @@ def test_video_exists(page_url):
response = httptools.downloadpage(page_url)
if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data:
if not response.sucess or "Not Found" in response.data or "File was deleted" in response.data or "is no longer available" in response.data:
return False, "[Userscloud] El fichero no existe o ha sido borrado"
return True, ""
@@ -21,7 +22,9 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
video_urls = []
data = httptools.downloadpage(page_url).data
media_url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
packed = scrapertools.find_single_match(data, "function\(p,a,c,k.*?</script>")
unpacked = jsunpack.unpack(packed)
media_url = scrapertools.find_single_match(unpacked, 'src"value="([^"]+)')
if not media_url:
id_ = page_url.rsplit("/", 1)[1]
rand = scrapertools.find_single_match(data, 'name="rand" value="([^"]+)"')

View File

@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
@@ -7,35 +8,28 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
data = httptools.downloadpage(page_url).data
if "This video has been removed from public access" in data:
if "This video has been removed from public access" in data or "Video not found." in data:
return False, "El archivo ya no esta disponible<br/>en VK (ha sido borrado)"
else:
return True, ""
return True, ""
# Returns an array of possible video url's from the page_url
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
try:
oid, id = scrapertools.find_single_match(page_url, 'oid=([^&]+)&id=(\d+)')
except:
oid, id = scrapertools.find_single_match(page_url, 'video(\d+)_(\d+)')
from core import httptools
headers = {'User-Agent': 'Mozilla/5.0'}
url = "http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s" % (oid, id)
data = httptools.downloadpage(url, headers=headers).data
matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="video/(\w+)')
for media_url, ext in matches:
calidad = scrapertools.find_single_match(media_url, '(\d+)\.%s' % ext)
video_urls.append(["." + ext + " [vk:" + calidad + "]", media_url])
for video_url in video_urls:
logger.info("%s - %s" % (video_url[0], video_url[1]))
return video_urls