249 Commits

Author SHA1 Message Date
alfa-addon
0c65e2d20c v2.4.3 2017-11-27 18:13:01 -05:00
Alfa
2a5c27355b Merge pull request #181 from Intel11/patch-1
Actualizados
2017-11-27 23:54:54 +01:00
Intel1
cad93526ce hdfull: fix thumbnail 2017-11-27 17:40:26 -05:00
Intel1
4cfe60c2a8 Add files via upload 2017-11-27 16:24:11 -05:00
Intel1
10a7535242 Update tvmoviedb.py 2017-11-27 15:38:46 -05:00
Intel1
489cd02192 Update search.py 2017-11-27 15:34:32 -05:00
Intel1
089098c58f yourupload: fix archivo no existe 2017-11-26 09:05:00 -05:00
Intel1
57e5d32567 zstream: dominio no existe 2017-11-25 10:08:59 -05:00
Intel1
9adcafc343 zstream: dominio no existe 2017-11-25 10:08:49 -05:00
Intel1
cf068bc13f letwatch: no existe dominio 2017-11-25 09:50:57 -05:00
Intel1
7579428087 letwatch: no existe dominio 2017-11-25 09:50:48 -05:00
Intel1
5ca2ed6212 pelisfox: fix patron 2017-11-25 09:26:01 -05:00
Intel1
11494549b9 gvideo: patron actualizado 2017-11-25 09:19:39 -05:00
Intel1
77423ec5a8 cientux: fix play 2017-11-24 15:33:20 -05:00
Intel1
be2c691909 Update powvideo.py 2017-11-24 15:23:53 -05:00
Intel1
611a0e28a3 Update descargasmix.py 2017-11-24 14:33:19 -05:00
Intel1
8ea2efb632 pelisculasdk: actualizado 2017-11-24 11:13:06 -05:00
Intel1
f71de37f0f yaske: actualizado para gvideo 2017-11-24 08:40:33 -05:00
alfa-addon
d4b2a61318 v2.4.2 2017-11-23 23:16:11 -05:00
Alfa
fd1f5c28df Merge pull request #180 from Intel11/ultimo
Actualizados
2017-11-24 05:04:37 +01:00
Intel1
1e08ee9bd6 Update hdfull.py 2017-11-23 17:08:29 -05:00
Intel1
08ac52b279 Update clasicofilm.py 2017-11-23 16:54:18 -05:00
Intel1
7b52463ce6 Update clasicofilm.py 2017-11-23 16:04:54 -05:00
Intel1
e79364ef93 clasicofilm: fix 2017-11-23 15:50:10 -05:00
Intel1
de4b08606a thevideome: patron actualizado 2017-11-23 15:08:22 -05:00
Intel1
21b655b074 ver-peliculas: fix 2017-11-23 14:58:06 -05:00
Intel1
48120ac6ab powvideo: fix 2017-11-23 11:01:13 -05:00
Intel1
5c360bdc68 cartoonlatino: actualizado para gvideo 2017-11-23 10:52:20 -05:00
Intel1
de267299e7 yaske: optimizado lista de películas 2017-11-23 09:44:51 -05:00
alfa-addon
d0139dfde3 v2.4.1 2017-11-22 16:42:45 -05:00
alfa-addon
7115c2f832 fix 2017-11-22 16:42:26 -05:00
Alfa
85135711de Merge pull request #166 from danielr460/patch-2
Añadiduras
2017-11-22 21:45:04 +01:00
Alfa
8c5c495633 Merge pull request #178 from Intel11/patch-2
Actualizados
2017-11-22 21:44:49 +01:00
Alfa
fdcf27a5fa Merge pull request #179 from danielr460/master
Arreglos menores
2017-11-22 21:44:32 +01:00
Intel1
7523b02e62 Update yaske.py 2017-11-22 15:39:24 -05:00
Intel1
3ca234f8ae yaske: optimizado findvideos 2017-11-22 15:28:21 -05:00
Intel1
2848692d79 cinefox: fix 2017-11-22 14:25:14 -05:00
Intel1
d6f73e1f06 Update rapidvideo.py 2017-11-22 10:14:29 -05:00
Intel1
0dbf9c544a Update infoplus.py 2017-11-22 09:42:26 -05:00
Intel1
4fdf382ca3 animemovil: fix buscador 2017-11-22 09:18:05 -05:00
Intel1
ca943ab6ef bajui: fix thumbnail 2017-11-22 08:39:55 -05:00
danielr460
41a66823e5 Corrección en calidad de enlaces al momento de buscar desde la videoteca. 2017-11-22 07:42:04 -05:00
Intel1
a6206420b5 Update powvideo.py 2017-11-21 17:54:53 -05:00
Intel1
1ebe99ede1 mejortorrent: dominio cambiado 2017-11-21 17:06:00 -05:00
Intel1
aaa0149bc8 Delete kingvid.py 2017-11-21 16:03:09 -05:00
Intel1
4cb704a6c3 Delete kingvid.json 2017-11-21 16:02:58 -05:00
Intel1
411b3ce23d pelisplus: actualizado 2017-11-21 16:00:41 -05:00
Intel1
1b0f91d4f2 powvideo: fix 2017-11-21 14:59:49 -05:00
alfa-addon
f97a283175 fix 2017-11-20 14:49:03 -05:00
alfa-addon
3b02b62a29 v2.4.0 2017-11-20 13:59:03 -05:00
alfa-addon
25f8a9dc4b fixed 2017-11-20 13:58:41 -05:00
Alfa
860bd0f834 Merge pull request #177 from Intel11/ultimo
Actualizados
2017-11-20 19:38:58 +01:00
Intel1
6bede726f8 gvideo: fix 2017-11-20 10:25:31 -05:00
Intel1
f045d2ee7c pelisplus: fix 2017-11-20 10:23:41 -05:00
Intel1
51c4d7d746 cinetux: fix gvideo 2017-11-20 10:13:28 -05:00
alfa-addon
f340cbce3a v2.3.9 2017-11-19 09:28:40 -05:00
Alfa
16bcfdcb15 Merge pull request #174 from danielr460/master
Arreglo
2017-11-19 17:09:34 +01:00
Alfa
11ef80c3e0 Merge pull request #176 from alfa-jor/master
Arreglos
2017-11-19 17:09:17 +01:00
alfa_addon_10
4d2562eaac logs 2017-11-19 16:49:33 +01:00
alfa_addon_10
5a1a1a97f1 fix 2017-11-19 16:21:16 +01:00
alfa_addon_10
035f27f887 rollback halloween 2017-11-19 16:06:24 +01:00
alfa_addon_10
f1db1236f1 strip in titles 2017-11-19 15:53:04 +01:00
alfa_addon_10
790420df0d pasateatorrent now is grantorrent, playmax deleted 2017-11-19 15:10:12 +01:00
danielr460
6999f615c8 Seodiv: Solucion definitiva a idioma incorrecto en algunos enlaces 2017-11-18 12:14:13 -05:00
alfa-addon
78a6eecf2e v2.3.8 2017-11-17 17:13:56 -05:00
Alfa
c9a96831d8 Merge pull request #171 from Intel11/patch-1
Actualizados
2017-11-17 18:52:07 -03:00
Alfa
e6cfcd3151 Merge pull request #172 from Intel11/news
Canales para News
2017-11-17 18:51:53 -03:00
Alfa
4fda116759 Merge pull request #173 from Alfa-beto/project
nuevas novedades
2017-11-17 18:51:35 -03:00
Alfa-beto
3c2902d5ea agregado icono para idioma castellano 2017-11-17 18:30:38 -03:00
Alfa-beto
93ab41c2da asignado iconos en news 2017-11-17 18:22:32 -03:00
Intel1
87541a3291 Update news.py 2017-11-17 16:21:17 -05:00
Intel1
46646e09d1 conector datoporn: fix 2017-11-17 15:42:38 -05:00
Intel1
a206b9ddaf trailertools: fix 2017-11-17 15:03:21 -05:00
Unknown
76f52ce404 cambios para news 2017-11-17 17:01:06 -03:00
Intel1
afc13bd1d3 vernovelasonline: no existe la pagina 2017-11-17 11:55:19 -05:00
Intel1
4469fc75cd vernovelasonline: no existe la pagina 2017-11-17 11:55:07 -05:00
Intel1
8428e8571b Update ultrapeliculashd.py 2017-11-17 11:16:04 -05:00
Intel1
b640303143 Update ultrapeliculashd.json 2017-11-17 11:15:40 -05:00
Intel1
7456c32fbb Update sipeliculas.py 2017-11-17 11:08:51 -05:00
Intel1
a8ddc664d4 Update sipeliculas.json 2017-11-17 11:08:39 -05:00
Intel1
be3345d34f Update pelisplus.py 2017-11-17 11:02:54 -05:00
Intel1
d9677a13ea Update pelisplus.json 2017-11-17 11:02:28 -05:00
Intel1
d3d44463b7 Update pelisfox.py 2017-11-17 10:32:43 -05:00
Intel1
6f63ea5128 Update pelisfox.json 2017-11-17 10:31:59 -05:00
Intel1
48740f4a1d Update peliculasmx.py 2017-11-17 10:25:14 -05:00
Intel1
5224547446 Update peliculasmx.json 2017-11-17 10:24:36 -05:00
Intel1
feb4f239e3 Update peliculasaudiolatino.py 2017-11-17 10:09:36 -05:00
Intel1
164804d484 Update peliculasaudiolatino.json 2017-11-17 10:08:38 -05:00
Intel1
162928f4d6 Update ohlatino.py 2017-11-17 09:56:54 -05:00
Intel1
4c24fe48ed Update ohlatino.json 2017-11-17 09:56:12 -05:00
Intel1
5bf145114d Update miradetodo.py 2017-11-17 09:54:07 -05:00
Intel1
39669395ae Update miradetodo.json 2017-11-17 09:53:50 -05:00
Intel1
f5a5979f74 Update estadepelis.py 2017-11-17 09:48:02 -05:00
Intel1
476f7f985d Update estadepelis.json 2017-11-17 09:47:07 -05:00
Intel1
42d20e9434 Update cinefoxtv.py 2017-11-17 09:42:30 -05:00
Intel1
9adb713d07 Update cinefoxtv.json 2017-11-17 09:42:14 -05:00
Intel1
513d66dfb4 Update allpeliculas.py 2017-11-17 09:31:36 -05:00
Intel1
537c2cb0e4 Update allpeliculas.json 2017-11-17 09:30:58 -05:00
Intel1
ddda31b2af Update news.py 2017-11-17 09:23:46 -05:00
Intel1
c5d1bc1988 Update allcalidad.py 2017-11-17 09:22:36 -05:00
Intel1
44a89836d5 Update allcalidad.json 2017-11-17 09:21:19 -05:00
Intel1
878dbc8393 seodiv: actualizado 2017-11-15 17:39:41 -05:00
Intel1
d625419219 Update xbmc_config_menu.py 2017-11-15 17:38:52 -05:00
unknown
742ff3feff Merge remote-tracking branch 'alfa-addon/master' 2017-11-15 08:35:02 -03:00
alfa-addon
df0607ec90 v2.3.7 2017-11-14 18:45:27 -05:00
alfa-addon
d83a49743c fixed 2017-11-14 18:45:09 -05:00
Alfa
66762b2c46 Merge pull request #169 from danielr460/master
Arreglos en canales
2017-11-14 19:41:50 -03:00
Alfa
79c761206d Merge pull request #170 from Intel11/patch-3
Actualizados
2017-11-14 19:40:58 -03:00
Intel1
f04647f348 Update settings.xml 2017-11-14 12:43:31 -05:00
Intel1
0f81113225 Update infoplus.py 2017-11-14 12:42:02 -05:00
Intel1
169c09db16 Update pelisfox.py 2017-11-14 08:49:58 -05:00
Intel1
306bb6533d stormo: actualizado 2017-11-14 08:45:54 -05:00
Intel1
210e90cb96 Update help.py 2017-11-14 08:28:40 -05:00
Intel1
74e53f362b help: actualizado 2017-11-13 14:51:24 -05:00
Intel1
947cb7f51f crunchyroll: fix 2017-11-13 14:47:18 -05:00
Daniel Rincón Rodríguez
f88ca81ff5 Corrección estetica 2017-11-13 14:07:12 -05:00
danielr460
42cd9ac14b Danimados: Añadida seccion de peliculas 2017-11-13 10:07:01 -05:00
danielr460
b7520145bb Arreglos Menores 2017-11-13 09:31:15 -05:00
danielr460
209af696b2 Serieslan: Arreglos menores en presentación de los datos 2017-11-13 09:24:14 -05:00
Daniel Rincón Rodríguez
03589b9c39 Seodiv: Añadido tvdb y correcion al idioma 2017-11-13 00:35:48 -05:00
danielr460
a3337df4da Peliculashindu: Arreglado canal 2017-11-12 19:21:03 -05:00
danielr460
acf7f9a27a Mundiseries: Detalles estéticos 2017-11-12 19:19:53 -05:00
danielr460
8082e1b244 CartoonLatino: Detalles estéticos 2017-11-12 18:08:11 -05:00
unknown
d37f911d3f Merge remote-tracking branch 'alfa-addon/master' 2017-11-12 20:01:35 -03:00
danielr460
9345115869 Asialiveaction: Eliminado enlace a "agregar a videoteca" cuando esto ya ocurrió 2017-11-12 17:58:43 -05:00
danielr460
7ae8b203b6 AnitoonsTV: detalles esteticos 2017-11-12 17:52:34 -05:00
danielr460
56c16f2922 Eliminado enlace de youtube inexistente 2017-11-12 17:44:40 -05:00
Alfa
7e47e3ae59 Merge pull request #162 from numa00009/patch-1
Update httptools.py
2017-11-12 13:20:01 -03:00
Alfa
9eef89d1b0 Merge pull request #164 from q1316480/sb-streamixcloud
Seriesblanco -> StreamixCloud
2017-11-12 13:19:35 -03:00
Alfa
2b3d81c9a0 Merge pull request #165 from danielr460/master
Arreglos menores
2017-11-12 13:19:07 -03:00
Alfa
876b02b81f Merge pull request #168 from Intel11/patch-1
Actualizados
2017-11-12 13:18:26 -03:00
Intel1
8028290051 Update xbmc_config_menu.py 2017-11-12 10:22:16 -05:00
Intel1
78252d3452 gamovideo: fix 2017-11-12 09:59:33 -05:00
Intel1
9aa77400d5 hdfull: actualizado 2017-11-12 09:57:26 -05:00
unknown
c7850cef56 Merge remote-tracking branch 'alfa-addon/master' 2017-11-10 23:06:56 -03:00
Daniel Rincón Rodríguez
3746d3bfb0 Añadido autoplay 2017-11-10 14:17:16 -05:00
danielr460
5d592f724d Serieslan: Actualizado 2017-11-10 14:04:07 -05:00
danielr460
d288031a83 Eliminado codigo innecesario 2017-11-10 13:45:26 -05:00
Daniel Rincón Rodríguez
41a39ff02b Update anitoonstv.py 2017-11-10 11:16:35 -05:00
Daniel Rincón Rodríguez
0bad69a7cb lineas innecesarias 2017-11-10 11:15:54 -05:00
danielr460
74e6145d2f Fix netutv 2017-11-10 11:12:07 -05:00
q1316480
c344832c8c Fix: Borar "Ver en" y "Descargar en" 2017-11-10 01:21:05 +01:00
q1316480
a9caf59ce1 Seriesblanco -> StreamixCloud
https://github.com/alfa-addon/addon/issues/163
2017-11-10 00:57:26 +01:00
numa00009
770a2e215a Update httptools.py
Change Firefor Headers into Chrome ones.
default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36"
default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
2017-11-09 10:17:45 +01:00
alfa-addon
28d99deb48 v2.3.6 2017-11-09 03:57:01 -05:00
alfa-addon
23ac80fbd6 v2.3.5 2017-11-08 21:38:26 -05:00
alfa-addon
9a5ddfbccb fixed 2017-11-08 21:38:12 -05:00
alfa-addon
50bbf7d9aa good bye playmax 2017-11-08 21:37:57 -05:00
Alfa
2aab5ae0ff Merge pull request #161 from Intel11/patch-1
Actualizado
2017-11-09 02:36:18 +01:00
Intel1
1bbc51a885 gamovideo: fix 2017-11-08 09:02:49 -05:00
Intel1
f95c3621d4 Ayuda: actualizado 2017-11-08 08:30:30 -05:00
Intel1
f05cbba109 Update channelselector.py 2017-11-08 08:29:26 -05:00
Intel1
16968f9204 seriesblanco: actualizado 2017-11-08 08:22:17 -05:00
alfa-addon
8985f3ebdd v2.3.4 2017-11-06 19:04:42 -05:00
Alfa
d60c246bbb Merge pull request #155 from Intel11/patch-3
Actualizados
2017-11-07 00:09:00 +01:00
Alfa
3b29fe47bb Merge pull request #156 from danielr460/master
Arreglos menores
2017-11-07 00:08:47 +01:00
Alfa
3093f72ce5 Merge pull request #159 from Alfa-beto/Fixes
Corregido error con extras
2017-11-07 00:08:33 +01:00
Unknown
55dcf3f091 Corregido error con extras 2017-11-05 18:21:26 -03:00
Intel1
2924b6958d Update allpeliculas.py 2017-11-04 15:01:27 -05:00
Intel1
927310c7c6 flashx: actualizado 2017-11-04 14:58:29 -05:00
danielr460
0c25891790 fix servers 2017-11-04 00:06:45 -05:00
danielr460
212c06057f Arreglos menores 2017-11-03 22:04:28 -05:00
Intel1
9c3b3e9256 allpeliculas: paginador para colecciones 2017-11-03 17:54:51 -05:00
Intel1
6dc853b41e repelis: fix categoria 2017-11-03 15:49:52 -05:00
Intel1
7afd09dfa9 streamixcloud: fix 2017-11-03 11:08:16 -05:00
Intel1
6855508eaa Update ultrapeliculashd.py 2017-11-03 10:21:18 -05:00
Intel1
2925c29671 Update ultrapeliculashd.json 2017-11-03 10:20:47 -05:00
Intel1
506e68e8a3 vshare: cambiado el orden de resoluciones 2017-11-03 10:17:12 -05:00
Intel1
9cc30152f8 vshare: actualizado patron 2017-11-03 10:15:27 -05:00
Intel1
267c9d8031 gvideo: fix 2017-11-03 10:07:46 -05:00
Intel1
bd68b83b6c flashx: fix 2017-11-01 06:47:51 -05:00
unknown
71bf6ce57b Merge remote-tracking branch 'alfa-addon/master' 2017-11-01 08:37:40 -03:00
Unknown
c1f8039672 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-01 08:37:33 -03:00
alfa-addon
99dfa2be58 v2.3.3 2017-10-31 21:09:17 -04:00
Alfa
39e711b3cb Merge pull request #150 from danielr460/master
Arreglos en canales
2017-11-01 01:40:53 +01:00
Alfa
2d8d2b3baf Merge pull request #151 from Intel11/patch-2
Actualizados
2017-11-01 01:40:36 +01:00
Alfa
82d126c3e1 Merge pull request #152 from Alfa-beto/Fixes
Correcciones
2017-11-01 01:40:20 +01:00
Alfa
8d41fd1c64 Merge pull request #153 from danielr460/patch-1
Otros arreglos
2017-11-01 01:40:05 +01:00
Unknown
a8c2f409eb Correcciones a canales 2017-10-31 14:57:55 -03:00
Daniel Rincón Rodríguez
7b2a3c2181 Update mundiseries.json 2017-10-31 07:19:55 -05:00
Daniel Rincón Rodríguez
9e6729f0be Update danimados.json 2017-10-31 07:17:04 -05:00
Unknown
241e644dcf Correcciones 2017-10-30 15:02:57 -03:00
Intel1
ae318721ab Add files via upload 2017-10-30 10:34:08 -05:00
Intel1
8328610ffa Delete bajui2.json 2017-10-30 10:32:41 -05:00
Intel1
19101b5310 Delete bajui2.py 2017-10-30 10:32:27 -05:00
Intel1
22827e0f7e Update animemovil.json 2017-10-30 10:28:29 -05:00
unknown
f3b4ddee25 Merge remote-tracking branch 'alfa-addon/master' 2017-10-30 10:14:14 -03:00
Unknown
1747c9795d Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-30 10:14:07 -03:00
Unknown
f3effe9a7f Corregidas series en pelisplus 2017-10-30 10:13:40 -03:00
Intel1
0621b1fa91 gamovideo: fix 2017-10-30 04:16:22 -05:00
Intel1
16473764c9 flashx: fix 2017-10-30 04:15:00 -05:00
danielr460
6b1727a0b8 Arreglado serieslan 2017-10-29 19:50:42 -05:00
Intel1
11fceffd14 bajui2: fix 2017-10-29 10:00:39 -05:00
danielr460
3a49b8a442 Función Play eliminaba info de la serie. Corregido 2017-10-29 08:54:27 -05:00
alfa-addon
162772e9dc v2.3.2 2017-10-28 22:47:51 -04:00
alfa-addon
60d61f861b fixed 2017-10-28 22:47:33 -04:00
Alfa
cd1c7b692a Merge pull request #142 from danielr460/master
Nuevos canales
2017-10-29 06:06:34 +01:00
danielr460
10abe4a6d4 Cambio en la estructura de la página web danimados 2017-10-28 23:40:49 -05:00
danielr460
b0fa5e8a75 Eliminada sección Novedades porque la página web la elimino 2017-10-28 23:23:42 -05:00
danielr460
54d6a943f5 Arreglado Mundiseries 2017-10-28 23:19:43 -05:00
Daniel Rincón Rodríguez
44df5b6036 Corregida linea que si hacia 2017-10-28 23:00:36 -05:00
Alfa
ae67d9b5ee Merge pull request #148 from Intel11/patch-1
Actualizados
2017-10-29 02:57:15 +01:00
Alfa
895d14760d Merge pull request #149 from Alfa-beto/Fixes
Corregida pagina siguiente en pelisplus
2017-10-29 02:56:42 +01:00
Intel1
b0b4b218f0 animemovil: fast fix 2017-10-28 20:55:12 -05:00
unknown
2f4fb66ff0 Merge remote-tracking branch 'alfa-addon/master' 2017-10-28 21:56:35 -03:00
Unknown
348787ae97 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-28 21:56:26 -03:00
Unknown
0f7c11efad Corregido pagina siguiente 2017-10-28 21:37:23 -03:00
Intel1
ae7a4a8d83 rapidvideo: actualizado test_video_exists 2017-10-28 11:26:08 -05:00
Intel1
fc58c717eb plusdede: actualizado findvideos 2017-10-28 11:17:48 -05:00
Daniel Rincón Rodríguez
b3a19f3d20 Arreglos sugeridos por Intel1 2017-10-28 11:11:51 -05:00
Daniel Rincón Rodríguez
0cac09eef5 Eliminada solución "tosca" 2017-10-28 11:00:35 -05:00
Intel1
9a1effbe25 cinetux: mostrar cantidad de películas 2017-10-28 10:58:15 -05:00
Daniel Rincón Rodríguez
44145660d0 Eliminado código innecesario 2017-10-28 10:55:55 -05:00
Daniel Rincón Rodríguez
aec2674316 Eliminada función generica 2017-10-28 10:55:01 -05:00
Daniel Rincón Rodríguez
09de611aae Update cartoonlatino.py 2017-10-28 10:54:00 -05:00
Daniel Rincón Rodríguez
74598154c2 Eliminado código innecesario 2017-10-28 10:53:19 -05:00
Daniel Rincón Rodríguez
7ab9c8bb29 Cartoonlatino en su version original 2017-10-28 10:52:30 -05:00
Daniel Rincón Rodríguez
14178974a0 Mejorado Autoplay 2017-10-28 10:51:45 -05:00
Intel1
c43162cbc2 flashx: lo dicho!!! 2017-10-28 08:40:11 -05:00
Daniel Rincón Rodríguez
aa76986a51 Dejado el canal AnitoonsTV como estaba 2017-10-28 08:30:25 -05:00
Daniel Rincón Rodríguez
9aae0e7a1b Arreglos comentarios de Intel1 2017-10-26 18:28:19 -05:00
danielr460
e1fe886602 Autoplay añadido 2017-10-26 15:38:36 -05:00
danielr460
19812c83a9 Añadida info de los canales desde la videoteca 2017-10-26 15:05:12 -05:00
danielr460
cabc2458e3 Añadida info de la serie para que no se borre cuando esta activo autoplay 2017-10-26 14:36:09 -05:00
danielr460
336376ecef Añadida opción de videolibrary para saber que no vengo del addon 2017-10-26 14:34:45 -05:00
danielr460
af06269e39 Añadida opción marcar como visto en autoplay 2017-10-26 13:56:37 -05:00
danielr460
f37d18ee0a Añadido contentChannel para saber en findvideos si vengo del addon o de la videolibrary 2017-10-26 13:54:14 -05:00
danielr460
6fefc3b048 Agregado Autoplay 2017-10-26 13:44:09 -05:00
danielr460
ab5fe41403 Eliminar código innecesario 2017-10-26 07:49:46 -05:00
danielr460
15463ea0f8 Arreglo bug 2017-10-26 07:48:01 -05:00
unknown
5a7905d5e0 Merge remote-tracking branch 'alfa-addon/master' 2017-10-26 08:02:28 -03:00
danielr460
badf40573c Nuevo canal mundiseries 2017-10-25 21:41:55 -05:00
danielr460
c80793e3e0 Media for danimados 2017-10-25 21:41:38 -05:00
danielr460
cbc0ff0bd0 Nuevo canal danimados 2017-10-25 21:25:31 -05:00
unknown
672d1ce0c0 Merge remote-tracking branch 'alfa-addon/master' 2017-10-24 08:25:18 -03:00
unknown
17002ddf94 Merge remote-tracking branch 'alfa-addon/master' 2017-10-17 08:04:20 -03:00
unknown
2618168737 Merge remote-tracking branch 'alfa-addon/master' 2017-10-15 14:19:50 -03:00
unknown
e6e572922f Merge remote-tracking branch 'alfa-addon/master' 2017-10-10 15:22:53 -03:00
Unknown
6a7e883299 Merge remote-tracking branch 'alfa-addon/master' 2017-10-04 21:27:32 -03:00
unknown
d1a264f7c7 Merge remote-tracking branch 'alfa-addon/master' 2017-10-04 08:20:54 -03:00
unknown
295f4eab68 Merge remote-tracking branch 'alfa-addon/master' 2017-10-02 09:41:14 -03:00
unknown
e72320f12f Merge remote-tracking branch 'alfa-addon/master' 2017-09-30 12:19:43 -03:00
unknown
dc77e9733b Merge remote-tracking branch 'alfa-addon/master' 2017-09-27 08:21:20 -03:00
unknown
7864fe3740 Merge remote-tracking branch 'alfa-addon/master' 2017-09-25 08:05:30 -03:00
unknown
ba03b01cc0 Merge remote-tracking branch 'alfa-addon/master' 2017-09-20 09:59:44 -03:00
unknown
97d299b863 Merge remote-tracking branch 'alfa-addon/master' 2017-09-16 22:44:21 -03:00
unknown
e85f31dadf Merge remote-tracking branch 'alfa-addon/master' 2017-09-13 17:38:14 -03:00
unknown
c54ed630f9 Merge remote-tracking branch 'alfa-addon/master' 2017-09-12 14:26:58 -03:00
unknown
5122d2f7fa Merge remote-tracking branch 'alfa-addon/master' 2017-09-08 17:37:09 -03:00
166 changed files with 3583 additions and 3975 deletions

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.3.1" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.4.3" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,10 +19,12 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» ohlatino » animemovil
» pelisplus » flashx
¤ arreglos internos
</news>
» cinetux » descargasmix
» hdfull » peliculasdk
» pelisfox » yaske
» gvideo » powvideo
» yourupload ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>

View File

@@ -19,6 +19,14 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",

View File

@@ -33,7 +33,7 @@ def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host + 'category/animacion/'

View File

@@ -20,6 +20,14 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -50,4 +58,4 @@
]
}
]
}
}

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
@@ -59,6 +57,7 @@ def colecciones(item):
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
page = 1,
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
@@ -71,7 +70,7 @@ def listado_colecciones(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=1"
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
@@ -88,6 +87,16 @@ def listado_colecciones(item):
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
item.page += 1
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
if len(data) > 50:
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
title = "Pagina siguiente>>",
page = item.page,
url = item.url
))
return itemlist
@@ -159,6 +168,7 @@ def lista(item):
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data)
for it in dict_data["items"]:
@@ -167,7 +177,7 @@ def lista(item):
rating = it["imdb"]
year = it["year"]
url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"])
thumb = host + it["image"]
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
@@ -207,7 +217,7 @@ def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == "peliculas":
if categoria in ['peliculas','latino']:
item.url = host + "movies/newmovies?page=1"
item.action = "lista"
itemlist = lista(item)

View File

@@ -25,6 +25,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -54,6 +54,7 @@ def browser(url):
api_key = "2e2160006592024ba87ccdf78c28f49f"
api_fankey = "dffe90fba4d02c199ae7a9e71330c987"
host = 'http://alltorrent.net/'
def mainlist(item):
logger.info()
@@ -392,3 +393,26 @@ def get_art(item):
item.extra = item.extra + "|" + item.thumbnail
else:
item.extra = item.extra + "|" + item.thumbnail
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host
itemlist = scraper(item)
if itemlist[-1].action == "[COLOR olivedrab][B]Siguiente >>[/B][/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -11,6 +11,12 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
list_servers = ['openload',
'directo'
]
list_quality = ['default']
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
@@ -117,7 +123,8 @@ def __find_series(html):
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="letras",
@@ -134,6 +141,7 @@ def mainlist(item):
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -179,10 +187,13 @@ def search(item, texto):
show_list = __find_series(html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
plot=plot, show=title, viewmode="movies_with_plot", context=context))
except:
import sys
for line in sys.exc_info():
@@ -197,10 +208,13 @@ def series(item):
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for show in show_list:
title, url, thumbnail, plot = show
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
plot=plot, show=title, viewmode="movies_with_plot", context=context))
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
@@ -292,4 +306,5 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, action="play", url=video_url, show=re.escape(item.show),
title="Ver en calidad [%s]" % (qualities[quality_id]), plot=item.plot,
fulltitle=item.title))
autoplay.start(__sort_by_quality(itemlist), item)
return __sort_by_quality(itemlist)

View File

@@ -3,7 +3,7 @@
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["cat", "lat"],
"language": ["cast", "lat"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "",
"categories": [

View File

@@ -86,7 +86,7 @@ def recientes(item):
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context
context = renumbertools.context(item)
if item.extra == "recientes":
action = "findvideos"
context = ""
@@ -96,7 +96,6 @@ def recientes(item):
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -127,11 +127,21 @@ def episodios(item):
plot=scrapedplot, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def googl(url):
logger.info()
a=url.split("/")
link=a[3]
link="http://www.trueurl.net/?q=http%3A%2F%2Fgoo.gl%2F"+link+"&lucky=on&Uncloak=Find+True+URL"
data_other = httptools.downloadpage(link).data
data_other = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_other)
patron='<td class="withbg">Destination URL<\/td><td><A title="(.+?)"'
trueurl = scrapertools.find_single_match(data_other, patron)
return trueurl
def findvideos(item):
logger.info()
@@ -147,36 +157,23 @@ def findvideos(item):
scrapedthumbnail = scrapertools.find_single_match(data, '<div class="caracteristicas"><img src="([^<]+)">')
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "HQ" in quality:
quality = "HD"
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
server = server.lower().strip()
if "ok" == server:
server = 'okru'
if "netu" == server:
continue
quality = "HQ"
if " Calidad media - Carga mas rapido" in quality:
quality = "360p"
server = server.lower().strip()
if "ok" in server:
server = 'okru'
if "rapid" in server:
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
url = googl(url)
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist

View File

@@ -180,7 +180,7 @@ def findvideos(item):
show = item.show
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie":
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -7,6 +7,7 @@ from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
__channel__ = "autoplay"
@@ -78,7 +79,20 @@ def start(itemlist, item):
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
'''
logger.info()
for videoitem in itemlist:
#Nos dice de donde viene si del addon o videolibrary
if item.contentChannel=='videolibrary':
videoitem.contentEpisodeNumber=item.contentEpisodeNumber
videoitem.contentPlot=item.contentPlot
videoitem.contentSeason=item.contentSeason
videoitem.contentSerieName=item.contentSerieName
videoitem.contentTitle=item.contentTitle
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
#videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title
if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist
@@ -261,8 +275,12 @@ def start(itemlist, item):
else:
videoitem = resolved_item[0]
# si no directamente reproduce
platformtools.play_video(videoitem)
# si no directamente reproduce y marca como visto
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item)
#platformtools.play_video(videoitem)
videoitem.contentChannel='videolibrary'
launcher.run(videoitem)
try:
if platformtools.is_playing():

View File

@@ -1,6 +1,6 @@
{
"id": "bajui2",
"name": "Bajui2",
"id": "bajui",
"name": "Bajui",
"active": true,
"adult": false,
"language": ["cast"],

View File

@@ -1,8 +1,9 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -13,7 +14,7 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
url="http://www.bajui2.com/descargas/categoria/2/peliculas",
url="http://www.bajui.org/descargas/categoria/2/peliculas",
fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
fanart=item.fanart))
@@ -33,8 +34,7 @@ def menupeliculas(item):
Item(channel=item.channel, title="Películas - A-Z", action="peliculas", url=item.url + "/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
# <ul class="submenu2 subcategorias"><li ><a href="/descargas/subcategoria/4/br-scr-dvdscr">BR-Scr / DVDScr</a></li><li ><a href="/descargas/subcategoria/6/dvdr-full">DVDR - Full</a></li><li ><a href="/descargas/subcategoria/1/dvdrip-vhsrip">DVDRip / VHSRip</a></li><li ><a href="/descargas/subcategoria/3/hd">HD</a></li><li ><a href="/descargas/subcategoria/2/hdrip-bdrip">HDRip / BDRip</a></li><li ><a href="/descargas/subcategoria/35/latino">Latino</a></li><li ><a href="/descargas/subcategoria/5/ts-scr-cam">TS-Scr / CAM</a></li><li ><a href="/descargas/subcategoria/7/vos">VOS</a></li></ul>
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, '<ul class="submenu2 subcategorias">(.*?)</ul>')
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -51,13 +51,13 @@ def menuseries(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series",
url="http://www.bajui.org/descargas/categoria/3/series",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre",
url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre",
url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
@@ -68,17 +68,16 @@ def menudocumentales(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv",
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre",
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item, texto, categoria=""):
logger.info(item.url + " search " + texto)
itemlist = []
@@ -86,7 +85,7 @@ def search(item, texto, categoria=""):
texto = texto.replace(" ", "+")
logger.info("categoria: " + categoria + " url: " + url)
try:
item.url = "http://www.bajui2.com/descargas/busqueda/%s"
item.url = "http://www.bajui.org/descargas/busqueda/%s"
item.url = item.url % texto
itemlist.extend(peliculas(item))
return itemlist
@@ -101,9 +100,7 @@ def search(item, texto, categoria=""):
def peliculas(item, paginacion=True):
logger.info()
url = item.url
# Descarga la página
data = scrapertools.cache_page(url)
data = httptools.downloadpage(url).data
patron = '<li id="ficha-\d+" class="ficha2[^<]+'
patron += '<div class="detalles-ficha"[^<]+'
patron += '<span class="nombre-det">Ficha\: ([^<]+)</span>[^<]+'
@@ -118,22 +115,17 @@ def peliculas(item, paginacion=True):
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui2.com/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
scrapedthumbnail = urlparse.urljoin("http://bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
itemlist.append(
Item(channel=item.channel, action="enlaces", title=scrapedtitle, fulltitle=title, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedtitle, context="4|5",
fanart=item.fanart, viewmode="movie_with_plot"))
# Extrae el paginador
patron = '<a href="([^"]+)" class="pagina pag_sig">Siguiente \&raquo\;</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0])
scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0])
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot")
if not paginacion:
@@ -187,7 +179,7 @@ def enlaces(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
try:
item.plot = scrapertools.get_match(data, '<span class="ficha-descrip">(.*?)</span>')
@@ -197,22 +189,10 @@ def enlaces(item):
try:
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
item.thumbnail = urlparse.urljoin("http://www.bajui2.com/", item.thumbnail)
item.thumbnail = urlparse.urljoin("http://www.bajui.org/", item.thumbnail)
except:
pass
'''
<div id="enlaces-34769"><img id="enlaces-cargando-34769" src="/images/cargando.gif" style="display:none;"/></div>
</li><li id="box-enlace-330690" class="box-enlace">
<div class="box-enlace-cabecera">
<div class="datos-usuario"><img class="avatar" src="images/avatars/116305_p.jpg" />Enlaces de:
<a class="nombre-usuario" href="/usuario/jerobien">jerobien</a> </div>
<div class="datos-act">Actualizado: Hace 8 minutos</div>
<div class="datos-boton-mostrar"><a id="boton-mostrar-330690" class="boton" href="javascript:mostrar_enlaces(330690,'b01de63028139fdd348d');">Mostrar enlaces</a></div>
<div class="datos-servidores"><div class="datos-servidores-cell"><img src="/images/servidores/ul.to.png" title="uploaded.com" border="0" alt="uploaded.com" /><img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/cloudzer.png" title="clz.to" border="0" alt="clz.to" /></div></div>
</div>
'''
patron = '<div class="box-enlace-cabecera"[^<]+'
patron += '<div class="datos-usuario"><img class="avatar" src="([^"]+)" />Enlaces[^<]+'
patron += '<a class="nombre-usuario" href="[^"]+">([^<]+)</a[^<]+</div>[^<]+'
@@ -222,20 +202,16 @@ def enlaces(item):
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
logger.debug("matches=" + repr(matches))
for thumbnail, usuario, fecha, id, id2, servidores in matches:
# <img src="/images/servidores/bitshare.png" title="bitshare.com" border="0" alt="bitshare.com" /><img src="/images/servidores/freakshare.net.jpg" title="freakshare.com" border="0" alt="freakshare.com" /><img src="/images/servidores/rapidgator.png" title="rapidgator.net" border="0" alt="rapidgator.net" /><img src="/images/servidores/turbobit.png" title="turbobit.net" border="0" alt="turbobit.net" /><img src="/images/servidores/muchshare.png" title="muchshare.net" border="0" alt="muchshare.net" /><img src="/images/servidores/letitbit.png" title="letitbit.net" border="0" alt="letitbit.net" /><img src="/images/servidores/shareflare.png" title="shareflare.net" border="0" alt="shareflare.net" /><img src="/images/servidores/otros.gif" title="Otros servidores" border="0" alt="Otros" />
patronservidores = '<img src="[^"]+" title="([^"]+)"'
matches2 = re.compile(patronservidores, re.DOTALL).findall(servidores)
lista_servidores = ""
for servidor in matches2:
lista_servidores = lista_servidores + servidor + ", "
lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail
# http://www.bajui2.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui2.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"
@@ -250,7 +226,7 @@ def enlaces(item):
def findvideos(item):
logger.info()
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.channel = item.channel

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -9,6 +9,7 @@ from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.cartoon-latino.com/"
from channels import autoplay
@@ -24,29 +25,15 @@ list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def lista_gen(item):
logger.info()
@@ -149,8 +136,7 @@ def episodios(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
@@ -179,35 +165,10 @@ def findvideos(item):
for link in itemla:
if server in link:
url = link.replace('" + ID' + server + ' + "', str(id))
if "drive" in server:
server1 = 'Gvideo'
else:
server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize())))
itemlist.append(item.clone(url=url, action="play",
title="Enlace encontrado en %s "
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
scrapertools.printMatches(itemlist)
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -55,6 +55,22 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -378,12 +378,14 @@ def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = 'http://www.cinecalidad.to'
if categoria in ['peliculas','latino']:
item.url = 'http://www.cinecalidad.com'
elif categoria == 'infantiles':
item.url = 'http://www.cinecalidad.to/genero-peliculas/infantil/'
item.url = 'http://www.cinecalidad.com/genero-peliculas/infantil/'
elif categoria == 'terror':
item.url = 'http://www.cinecalidad.to/genero-peliculas/terror/'
item.url = 'http://www.cinecalidad.com/genero-peliculas/terror/'
elif categoria == 'castellano':
item.url = 'http://www.cinecalidad.com/espana/'
itemlist = peliculas(item)
if itemlist[-1].title == 'Página siguiente >>':
itemlist.pop()

View File

@@ -512,7 +512,7 @@ def episodios(item):
else:
action = "menu_info_episode"
seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
seasons = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
for i, url in enumerate(seasons):
if i != 0:
data_season = httptools.downloadpage(url, add_referer=True).data

View File

@@ -18,6 +18,14 @@
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -35,4 +43,4 @@
"visible": true
}
]
}
}

View File

@@ -193,7 +193,7 @@ def newest(categoria):
item = Item()
# categoria='peliculas'
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host + 'page/1.html'
elif categoria == 'infantiles':
item.url = host + 'peliculas-de-genero/infantil/1.html'

View File

@@ -123,7 +123,7 @@ def lista(item):
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist
@@ -132,14 +132,18 @@ def findvideos(item):
logger.info()
itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist:
for videoitem in itemlist1:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0:
for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -44,6 +44,30 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -28,9 +28,9 @@ def mainlist(item):
itemlist = []
item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST).data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
titulo = "Peliculas"
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
@@ -90,26 +90,30 @@ def newest(categoria):
if categoria == 'peliculas':
item.url = CHANNEL_HOST
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
elif categoria == 'documentales':
item.url = CHANNEL_HOST + "genero/documental/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
elif categoria == 'infantiles':
item.url = CHANNEL_HOST + "genero/infantil/"
item.url = CHANNEL_HOST + "genero/animacion/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
elif categoria == 'terror':
item.url = CHANNEL_HOST + "genero/terror/"
item.action = "peliculas"
elif categoria == 'castellano':
item.url = CHANNEL_HOST + "idioma/espanol/"
item.action = "peliculas"
elif categoria == 'latino':
item.url = CHANNEL_HOST + "idioma/latino/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
@@ -263,15 +267,11 @@ def findvideos(item):
if itemlist:
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la videoteca"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, fulltitle = item.fulltitle
))
else:
itemlist.append(item.clone(title="No hay enlaces disponibles", action="", text_color=color3))
return itemlist
@@ -296,6 +296,8 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
url = scrapertools.find_single_match(bloque1, patron)
if "goo.gl" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
if "drive.php" in url:
scrapedserver = "gvideo"
if "player" in url:
scrapedserver = scrapertools.find_single_match(url, 'player/(\w+)')
if "ok" in scrapedserver: scrapedserver = "okru"
@@ -348,10 +350,10 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
def play(item):
logger.info()
itemlist = []
if "api.cinetux" in item.url or item.server == "okru":
if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url:
data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "")
id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"')
item.url = "https://youtube.googleapis.com/embed/?status=ok&hl=es&allow_embed=1&ps=docs&partnerid=30&hd=1&autoplay=0&cc_load_policy=1&showinfo=0&docid=" + id
item.url = "http://docs.google.com/get_video_info?docid=" + id
if item.server == "okru":
item.url = "https://ok.ru/videoembed/" + id
elif "links" in item.url or "www.cinetux.me" in item.url:
@@ -365,6 +367,9 @@ def play(item):
scrapedurl = httptools.downloadpage(scrapedurl, follow_redirects=False, only_headers=True).headers.get(
"location", "")
item.url = scrapedurl
item.thumbnail = item.contentThumbnail
item.server = servertools.get_server_from_url(item.url)
return [item]
item.server = ""
itemlist.append(item.clone())
itemlist = servertools.get_servers_itemlist(itemlist)
for i in itemlist:
i.thumbnail = i.contentThumbnail
return itemlist

View File

@@ -2,11 +2,15 @@
import re
from core import filetools
from core import jsontools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core import videolibrarytools
from core.item import Item
from platformcode import config, logger
from platformcode import config, platformtools, logger
host = "http://www.clasicofilm.com/"
# Configuracion del canal
@@ -47,7 +51,6 @@ def mainlist(item):
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
@@ -55,13 +58,9 @@ def configuracion(item):
def search(item, texto):
logger.info()
data = httptools.downloadpage(host).data
cx = scrapertools.find_single_match(data, "var cx = '([^']+)'")
texto = texto.replace(" ", "%20")
item.url = "https://www.googleapis.com/customsearch/v1element?key=AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY&rsz=filtered_cse&num=20&hl=es&sig=0c3990ce7a056ed50667fe0c3873c9b6&cx=%s&q=%s&sort=&googlehost=www.google.com&start=0" % (
cx, texto)
item.url = host + "search?q=%s" % texto
try:
return busqueda(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -104,7 +103,6 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
from core import jsontools
data = jsontools.load(data)["feed"]
for entry in data["entry"]:
@@ -133,7 +131,6 @@ def peliculas(item):
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start-index=(\d+)'))
totalresults = int(data["openSearch$totalResults"]["$t"])
if actualpage + 20 < totalresults:
@@ -146,48 +143,22 @@ def peliculas(item):
def busqueda(item):
logger.info()
itemlist = []
item.text_color = color2
# Descarga la página
data = httptools.downloadpage(item.url).data
from core import jsontools
data = jsontools.load(data)
for entry in data["results"]:
try:
title = entry["richSnippet"]["metatags"]["ogTitle"]
url = entry["richSnippet"]["metatags"]["ogUrl"]
thumbnail = entry["richSnippet"]["metatags"]["ogImage"]
except:
continue
try:
title_split = re.split(r"\s*\((\d)", title, 1)
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
fulltitle = title_split[0]
except:
fulltitle = title
year = ""
if not "DVD" in title and not "HDTV" in title and not "HD-" in title:
continue
infolabels = {'year': year}
new_item = item.clone(action="findvideos", title=title, fulltitle=fulltitle,
url=url, thumbnail=thumbnail, infoLabels=infolabels,
contentTitle=fulltitle, contentType="movie")
itemlist.append(new_item)
try:
tmdb.set_infoLabels(itemlist, __modo_grafico__)
except:
pass
actualpage = int(scrapertools.find_single_match(item.url, 'start=(\d+)'))
totalresults = int(data["cursor"]["resultCount"])
if actualpage + 20 <= totalresults:
url_next = item.url.replace("start=" + str(actualpage), "start=" + str(actualpage + 20))
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente", url=url_next))
patron = """post-title entry-titl.*?href='([^']+)'"""
patron += """>([^<]+).*?"""
patron += """src="([^"]+)"""
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]{4})\)")
ctitle = scrapedtitle.split("(")[0].strip()
itemlist.append(item.clone(action = "findvideos",
contentTitle = ctitle,
infoLabels = {"year" : year},
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = scrapedurl
))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
return itemlist
@@ -197,9 +168,10 @@ def generos(item):
# Descarga la página
data = httptools.downloadpage(item.url).data
patron = '<b>([^<]+)</b><br/>\s*<script src="([^"]+)"'
patron = '<b>([^<]+)</b><br\s*/>\s*<script src="([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedtitle, scrapedurl in matches:
scrapedurl = scrapedurl.replace("&amp;","&")
scrapedurl = scrapedurl.replace("max-results=500", "start-index=1&max-results=20") \
.replace("recentpostslist", "finddatepost")
itemlist.append(Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
@@ -210,13 +182,13 @@ def generos(item):
def findvideos(item):
from core import servertools
if item.infoLabels["tmdb_id"]:
tmdb.set_infoLabels_item(item, __modo_grafico__)
data = httptools.downloadpage(item.url).data
iframe = scrapertools.find_single_match(data, '<iframe src="([^"]+)"')
data = data.replace("googleusercontent","malo") # para que no busque enlaces erroneos de gvideo
if "goo.gl/" in iframe:
data += httptools.downloadpage(iframe, follow_redirects=False, only_headers=True).headers.get("location", "")
itemlist = servertools.find_video_items(item, data)
@@ -226,13 +198,11 @@ def findvideos(item):
title = "Añadir película a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
from core import filetools
movie_path = filetools.join(config.get_videolibrary_path(), 'CINE')
files = filetools.walk(movie_path)
for dirpath, dirname, filename in files:
for f in filename:
if item.infoLabels["imdb_id"] in f and f.endswith(".nfo"):
from core import videolibrarytools
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()

View File

@@ -168,11 +168,11 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t|\s{2,}', '', data)
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)" ' \
'style="width: (.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
'style="width:(.*?)%.*?<span class="series-title.*?>\s*(.*?)</span>.*?<p class="short-desc".*?>' \
'\s*(.*?)</p>.*?description":"([^"]+)"'
if data.count('class="season-dropdown') > 1:
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+" title="([^"]+)"(.*?)</ul>')
bloques = scrapertools.find_multiple_matches(data, 'class="season-dropdown[^"]+".*?title="([^"]+)"(.*?)</ul>')
for season, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
@@ -209,7 +209,6 @@ def episodios(item):
Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumb, media_id=media_id,
server="crunchyroll", text_color=item.text_color, contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, contentType="tvshow"))
return itemlist

View File

@@ -21,6 +21,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1223,3 +1223,25 @@ def browser(url):
response = r.read()
return response
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://cuelgame.net/?category=4'
itemlist = scraper(item)
if itemlist[-1].action == "Página siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -0,0 +1,12 @@
{
"id": "danimados",
"name": "Danimados",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://imgur.com/kU5Lx1S.png",
"banner": "https://imgur.com/xG5xqBq.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,203 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.danimados.com/"
list_servers = ['openload',
'okru',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas Animadas", url=host+"peliculas/",
thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def mainpage(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
if item.title=="Más Populares":
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
if item.title=="Categorías":
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
data = scrapertools.find_single_match(data1, patron_sec)
matches = scrapertools.find_multiple_matches(data, patron)
if item.title=="Géneros" or item.title=="Categorías":
for scrapedurl, scrapedtitle in matches:
if "Películas Animadas"!=scrapedtitle:
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
return itemlist
else:
for scraped1, scraped2, scrapedtitle in matches:
scrapedthumbnail=scraped1
scrapedurl=scraped2
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.title=="Peliculas Animadas":
data_lista = scrapertools.find_single_match(data,
'<div id="archive-content" class="animation-2 items">(.*)<a href=\'')
else:
data_lista = scrapertools.find_single_match(data,
'<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
if item.title=="Peliculas Animadas":
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentType="movie",
plot=scrapedplot, action="findvideos", show=scrapedtitle))
else:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
if item.title!="Peliculas Animadas":
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
logger.info("assfxxv "+data)
itemla = scrapertools.find_multiple_matches(data,patron)
for i in range(len(itemla)):
#for url in itemla:
url=itemla[i]
#verificar existencia del video (testing)
codigo=verificar_video(itemla[i])
if codigo==200:
if "ok.ru" in url:
server='okru'
else:
server=''
if "youtube" in url:
server='youtube'
if "openload" in url:
server='openload'
if "google" in url:
server='gvideo'
if "rapidvideo" in url:
server='rapidvideo'
if "streamango" in url:
server='streamango'
if server!='':
title="Enlace encontrado en %s " % (server.capitalize())
else:
title="NO DISPONIBLE"
if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentType=="movie" and item.contentChannel!='videolibrary':
itemlist.append(
item.clone(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", contentTitle=item.show))
autoplay.start(itemlist, item)
return itemlist
def verificar_video(url):
codigo=httptools.downloadpage(url).code
if codigo==200:
# Revise de otra forma
data=httptools.downloadpage(url).data
removed = scrapertools.find_single_match(data,'removed(.+)')
if len(removed) != 0:
codigo1=404
else:
codigo1=200
else:
codigo1=200
return codigo1

View File

@@ -43,6 +43,14 @@
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -42,12 +42,12 @@ def mainlist(item):
fanart="http://i.imgur.com/ggFFR8o.png"))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
itemlist.append(item.clone(action="setting_channel", title="Configurar canal...", text_color="gold", folder=False))
return itemlist
def configuracion(item):
def setting_channel(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
@@ -108,7 +108,7 @@ def busqueda(item):
def lista(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/peliculas" % host))
itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host))
@@ -125,7 +125,7 @@ def lista(item):
def lista_series(item):
logger.info()
itemlist = []
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="entradas", url="%s/series/" % host))
itemlist.append(item.clone(title="Miniseries", action="entradas", url="%s/series/miniseries" % host))
@@ -254,7 +254,7 @@ def episodios(item):
return itemlist
def epienlaces(item):
def episode_links(item):
logger.info()
itemlist = []
item.text_color = color3
@@ -286,7 +286,7 @@ def epienlaces(item):
else:
if servertools.is_server_enabled(scrapedserver):
try:
servers_module = __import__("servers." + scrapedserver)
# servers_module = __import__("servers." + scrapedserver)
lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
extra=item.url))
except:
@@ -302,13 +302,14 @@ def epienlaces(item):
def findvideos(item):
logger.info()
if (item.extra and item.extra != "findvideos") or item.path:
return epienlaces(item)
if item.contentSeason != '':
return episode_links(item)
itemlist = []
item.text_color = color3
data = get_data(item.url)
item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
if year:
@@ -346,9 +347,9 @@ def findvideos(item):
patron = 'make_links.*?,[\'"]([^"\']+)["\']'
matches = scrapertools.find_multiple_matches(data_online, patron)
for i, code in enumerate(matches):
enlace = mostrar_enlaces(code)
enlaces = servertools.findvideos(data=enlace[0])
if enlaces and "peliculas.nu" not in enlaces:
enlace = show_links(code)
links = servertools.findvideos(data=enlace[0])
if links and "peliculas.nu" not in links:
if i == 0:
extra_info = scrapertools.find_single_match(data_online, '<span class="tooltiptext">(.*?)</span>')
size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
@@ -362,8 +363,8 @@ def findvideos(item):
new_item.title += " +INFO"
itemlist.append(new_item)
title = " Ver vídeo en " + enlaces[0][2]
itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
title = " Ver vídeo en " + links[0][2]
itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
if scriptg:
gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
@@ -419,9 +420,9 @@ def findvideos(item):
continue
if servertools.is_server_enabled(scrapedserver):
try:
servers_module = __import__("servers." + scrapedserver)
# servers_module = __import__("servers." + scrapedserver)
# Saca numero de enlaces
urls = mostrar_enlaces(scrapedurl)
urls = show_links(scrapedurl)
numero = str(len(urls))
titulo = " %s - Nº enlaces: %s" % (titulo, numero)
itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl, server=scrapedserver))
@@ -449,12 +450,13 @@ def play(item):
headers=headers, follow_redirects=False).data
url = scrapertools.find_single_match(data, 'url":"([^"]+)"').replace("\\", "")
if "enlacesmix" in url:
if "enlacesmix" in url or "enlacesws.com" in url:
data = httptools.downloadpage(url, headers={'Referer': item.extra}, follow_redirects=False).data
url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
enlaces = servertools.findvideosbyserver(url, item.server)
if enlaces:
itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))
links = servertools.findvideosbyserver(url, item.server)
if links:
itemlist.append(item.clone(action="play", server=links[0][2], url=links[0][1]))
else:
itemlist.append(item.clone())
@@ -465,13 +467,13 @@ def enlaces(item):
logger.info()
itemlist = []
urls = mostrar_enlaces(item.extra)
urls = show_links(item.extra)
numero = len(urls)
for enlace in urls:
enlaces = servertools.findvideos(data=enlace)
if enlaces:
for link in enlaces:
if "/folder/" in enlace:
for url in urls:
links = servertools.findvideos(data=url)
if links:
for link in links:
if "/folder/" in url:
titulo = link[0]
else:
titulo = "%s - Enlace %s" % (item.title.split("-")[0], str(numero))
@@ -482,7 +484,7 @@ def enlaces(item):
return itemlist
def mostrar_enlaces(data):
def show_links(data):
import base64
data = data.split(",")
len_data = len(data)
@@ -535,3 +537,38 @@ def get_data(url_orig, get_host=False):
break
return response.data
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host+'/peliculas'
itemlist = entradas(item)
if itemlist[-1].title == ">> Siguiente":
itemlist.pop()
item.url = host + '/series'
itemlist.extend(entradas(item))
if itemlist[-1].title == ">> Siguiente":
itemlist.pop()
item.url = host + '/anime'
itemlist.extend(entradas(item))
if itemlist[-1].title == ">> Siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -35,6 +35,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -94,7 +94,7 @@ def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
if categoria in ['peliculas', 'torrent']:
item.url = "http://www.divxatope1.com/peliculas"
elif categoria == 'series':

View File

@@ -26,6 +26,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1020,3 +1020,26 @@ def ext_size(url):
ext_v = ext_v + " -- No reproducible"
size = ""
return ext_v, size
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.divxtotal.com/peliculas/'
item.contentType="movie"
itemlist = scraper(item)
if itemlist[-1].title == "[COLOR springgreen][B]Siguiente >>[/B][/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -55,6 +55,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -202,7 +202,7 @@ def newest(categoria):
item = Item()
# categoria='peliculas'
try:
if categoria == 'peliculas':
if categoria in ['peliculas', 'latino']:
item.url = host +'peliculas/page/1'
elif categoria == 'infantiles':
item.url = host + 'categoria/animacion/'

View File

@@ -12,5 +12,15 @@
"tvshow",
"documentary",
"vos"
],
"settings":[
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -105,3 +105,24 @@ def play(item):
thumbnail=item.thumbnail, plot=item.plot, folder=False))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.elitetorrent.wesconference.net/categoria/2/peliculas/modo:mini'
itemlist = peliculas(item)
if itemlist[-1].title == "Página siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -31,6 +31,14 @@
"VOS"
]
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -56,4 +64,4 @@
"visible": true
}
]
}
}

View File

@@ -457,7 +457,7 @@ def newest(categoria):
item = Item()
# categoria='peliculas'
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host + 'search?q=animación'

View File

@@ -18,6 +18,22 @@
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -12,10 +12,11 @@ from core import tmdb
from core.item import Item
from platformcode import logger
host = 'http://gnula.mobi/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="peliculas", url="http://gnula.mobi/"))
itemlist.append(item.clone(title="Novedades", action="peliculas", url=host))
itemlist.append(item.clone(title="Castellano", action="peliculas",
url="http://www.gnula.mobi/tag/espanol/"))
itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/"))
@@ -113,3 +114,25 @@ def findvideos(item):
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'castellano':
item.url = host +'tag/espanol/'
elif categoria == 'latino':
item.url = host +'tag/latino/'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,33 +1,33 @@
{
"id": "pasateatorrent",
"name": "PasateaTorrent",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/iLeISt0.png",
"banner": "pasateatorrent.png",
"fanart": "http://imgur.com/uexmGEg.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
}
]
}
{
"id": "grantorrent",
"name": "GranTorrent",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "grantorrent.jpg",
"banner": "grantorrent.png",
"fanart": "grantorrent.png",
"categories": [
"torrent",
"movie",
"tvshow"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra (TMDB)",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,273 +1,273 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "https://pasateatorrent.com/"
dict_url_seasons = dict()
__modo_grafico__ = config.get_setting('modo_grafico', 'pasateatorrent')
def mainlist(item):
logger.info()
thumb_movie = get_thumb("channels_movie.png")
thumb_tvshow = get_thumb("channels_tvshow.png")
itemlist = list()
itemlist.append(
Item(channel=item.channel, title="Peliculas", action="peliculas", thumbnail=thumb_movie))
itemlist.append(
Item(channel=item.channel, title="Series", action="series", thumbnail=thumb_tvshow))
return itemlist
def peliculas(item):
logger.info()
thumb_search = get_thumb("search.png")
itemlist = list()
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host))
# itemlist.append(item.clone(channel=item.channel, title="Filtrar películas", action="listado", url=host))
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host, media="película",
thumbnail=thumb_search))
return itemlist
def series(item):
logger.info()
thumb_search = get_thumb("search.png")
itemlist = list()
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host + "series/"))
# itemlist.append(item.clone(channel=item.channel, title="Filtrar series", action="listado", url=host))
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host + "series/",
media="serie", thumbnail=thumb_search))
return itemlist
def search(item, texto):
logger.info("texto:" + texto)
texto = texto.replace(" ", "+")
itemlist = []
try:
url = "%s?s=%s&post_type=Buscar+%s" % (item.url, texto, item.media)
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url).data)
# logger.debug("data %s \n\n" % data)
video_section = scrapertools.find_single_match(data, '<div class="contenedor_imagenes">(.*?)</div><center>')
pattern = '<a href="(?P<url>[^"]+)">.*?<img.*?src="(?P<thumb>[^"]+)".*?class="bloque_inferior">' \
'(?P<title>.*?)</div>'
matches = re.compile(pattern, re.DOTALL).findall(video_section)
for url, thumb, title in matches:
if item.media == "serie":
action = "episodios"
else:
action = "findvideos"
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumb,
contentTitle=title, contentType="movie"))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
video_section = scrapertools.find_single_match(data, '<div class="contenedor_imagenes">(.*?)</div><center>')
# logger.debug("data %s \n\n" % video_section)
pattern = '<a href="(?P<url>[^"]+)">.*?<img.*?src="(?P<thumb>[^"]+)".*?class="bloque_superior">\s*' \
'(?P<quality>.*?)\s*</div>.*?src="(?P<lang>[^"]+)".*?class="bloque_inferior">\s*(?P<title>.*?)\s*' \
'</div>.*?class="div_inferior_date">\s*(?P<date>.*?)\s*</div>'
matches = re.compile(pattern, re.DOTALL).findall(video_section)
for url, thumb, quality, lang, title, date in matches:
title = scrapertools.htmlclean(title)
title = re.sub(r"\s{2}", " ", title)
if "/series" in item.url:
if quality:
title2 = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
quality=quality, contentTitle=title, contentType="tvshow"))
else:
if quality:
title2 = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
quality=quality, contentTitle=title, contentType="movie"))
pagination = scrapertools.find_single_match(data, '<div class="navigation">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, 'class="active"><a.*?<a.*?href="([^"]+)')
# logger.debug("next %s" % next_page)
if next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=next_page,
thumbnail=get_thumb("next.png")))
return itemlist
def episodios(item):
logger.info()
itemlist = []
dict_data = dict()
dict_data, item = get_episodes(item, dict_data)
for key in dict_data.keys():
d = dict_data[key]
quality = "[%s]" % "][".join(d["quality"])
d["s_e"] = re.sub(r"\(Contrase.*?\)\s*", "NO REPRODUCIBLE-RAR", d["s_e"])
title = "%s [%s] %s" % (d["s_e"], d["lang"], quality)
logger.debug("bla %s" % d["s_e"])
if "temporada" in d["s_e"].lower():
regex = re.compile('temporada\s*', re.I)
d["s_e"] = regex.sub("", d["s_e"])
season = scrapertools.find_single_match(d["s_e"], "(\d+)")
episode = 1
else:
season, episode = scrapertools.find_single_match(d["s_e"], "(\d+)&#215;(\d+)")
itemlist.append(item.clone(action="findvideos", title=title, thumbnail=item.thumbnail, url=d["url"],
server="torrent", contentSeason=season, contentEpisodeNumber=episode,
contentType="tvshow", fulltitle=item.title, quality=d["quality"], lang=d["lang"]))
# order list
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def get_episodes(item, dict_data):
global dict_url_seasons
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
if item.contentTitle != "":
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
# logger.debug("title es %s" % title)
if title:
item.contentTitle = title
item.show = title
if year:
item.infoLabels['year'] = year
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
# logger.debug("data %s \n\n" % data)
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<s_e>.*?)</td><td>(?P<quality>.*?)</td><td>' \
'<a href="(?P<url>[^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(links_section)
for lang, s_e, quality, url in matches:
if s_e + lang not in dict_data:
dict_data[s_e + lang] = {"url": [url], "lang": lang, "s_e": s_e,
"quality": [quality]}
else:
if quality not in dict_data[s_e+lang]["quality"]:
dict_data[s_e + lang]["quality"].append(quality)
dict_data[s_e + lang]["url"].append(url)
url_to_check = scrapertools.find_single_match(links_section, '</table><p><a .*?href="([^"]+)">Temporada.*?</a>')
# logger.debug("url es %s " % url_to_check)
# if url doesn't exist we add it into the dict
if url_to_check not in dict_url_seasons:
dict_url_seasons[url_to_check] = False
for key, value in dict_url_seasons.items():
if not value:
item.url = key
dict_url_seasons[key] = True
dict_data, item = get_episodes(item, dict_data)
# logger.debug("URL_LIST es %s " % dict_url_seasons)
return dict_data, item
def findvideos(item):
logger.info()
itemlist = []
if item.contentType == "movie":
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
if item.contentTitle != "":
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
logger.debug("title es %s" % title)
if title:
item.contentTitle = title
item.show = title
if year:
item.infoLabels['year'] = year
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
# logger.debug("data %s \n\n" % data)
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<quality>.*?)</td><td>(?P<size>.*?)</td><td>' \
'<a href="(?P<url>[^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(links_section)
for lang, quality, size, url in matches:
title = "[%s] [%s] (%s)" % (lang, quality, size)
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
fulltitle=item.title))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
else:
for index, url in enumerate(item.url):
title = "%sx%s [%s] [%s]" % (item.contentSeason, item.contentEpisodeNumber, item.lang, item.quality[index])
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
quality=item.quality[index]))
return itemlist
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
host = "https://grantorrent.com/"
dict_url_seasons = dict()
__modo_grafico__ = config.get_setting('modo_grafico', 'grantorrent')
def mainlist(item):
logger.info()
thumb_movie = get_thumb("channels_movie.png")
thumb_tvshow = get_thumb("channels_tvshow.png")
itemlist = list()
itemlist.append(
Item(channel=item.channel, title="Peliculas", action="peliculas", thumbnail=thumb_movie))
itemlist.append(
Item(channel=item.channel, title="Series", action="series", thumbnail=thumb_tvshow))
return itemlist
def peliculas(item):
logger.info()
thumb_search = get_thumb("search.png")
itemlist = list()
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host))
# itemlist.append(item.clone(channel=item.channel, title="Filtrar películas", action="listado", url=host))
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host, media="película",
thumbnail=thumb_search))
return itemlist
def series(item):
logger.info()
thumb_search = get_thumb("search.png")
itemlist = list()
itemlist.append(item.clone(channel=item.channel, title="Novedades", action="listado", url=host + "series/"))
# itemlist.append(item.clone(channel=item.channel, title="Filtrar series", action="listado", url=host))
itemlist.append(item.clone(channel=item.channel, title="Buscar", action="search", url=host + "series/",
media="serie", thumbnail=thumb_search))
return itemlist
def search(item, texto):
logger.info("texto:" + texto)
texto = texto.replace(" ", "+")
itemlist = []
try:
url = "%s?s=%s" % (item.url, texto)
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(url).data)
# logger.debug("data %s \n\n" % data)
video_section = scrapertools.find_single_match(data, '<div class="contenedor-imagen">(.*?</div>)</div></div>')
pattern = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?class="bloque-inferior">' \
'\s*(?P<title>.*?)\s*</div>'
matches = re.compile(pattern, re.DOTALL).findall(video_section)
for url, thumb, title in matches:
if item.media == "serie":
action = "episodios"
else:
action = "findvideos"
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumb,
contentTitle=title, contentType="movie"))
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def listado(item):
logger.info()
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
video_section = scrapertools.find_single_match(data, '<br><div class="contenedor-home">(.*?</div>)</div></div>')
# logger.debug("data %s \n\n" % video_section)
pattern = '<a href="(?P<url>[^"]+)"><img.*?src="(?P<thumb>[^"]+)".*?.*?class="bloque-superior">\s*' \
'(?P<quality>.*?)\s*<div class="imagen-idioma">\s*<img src=".*?icono_(?P<lang>[^\.]+).*?<div class=' \
'"bloque-inferior">\s*(?P<title>.*?)\s*</div><div class="bloque-date">\s*(?P<date>.*?)\s*</div>'
matches = re.compile(pattern, re.DOTALL).findall(video_section)
for url, thumb, quality, lang, title, date in matches:
title = scrapertools.htmlclean(title)
title = re.sub(r"\s{2}", " ", title)
if "/series" in item.url:
if quality:
title2 = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="episodios", title=title2, url=url, thumbnail=thumb,
quality=quality, contentTitle=title, contentType="tvshow"))
else:
if quality:
title2 = "%s [%s]" % (title, quality)
itemlist.append(Item(channel=item.channel, action="findvideos", title=title2, url=url, thumbnail=thumb,
quality=quality, contentTitle=title, contentType="movie"))
pagination = scrapertools.find_single_match(data, '<div class="nav-links">(.*?)</ul>')
if pagination:
next_page = scrapertools.find_single_match(pagination, "class='page-numbers current'.*?<a.*?href='([^']+)'")
# logger.debug("next %s" % next_page)
if next_page:
itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", url=next_page,
thumbnail=get_thumb("next.png")))
return itemlist
def episodios(item):
logger.info()
itemlist = []
dict_data = dict()
dict_data, item = get_episodes(item, dict_data)
for key in dict_data.keys():
d = dict_data[key]
quality = "[%s]" % "][".join(d["quality"])
d["s_e"] = re.sub(r"\(Contrase.*?\)\s*", "NO REPRODUCIBLE-RAR", d["s_e"])
title = "%s [%s] %s" % (d["s_e"], d["lang"], quality)
# logger.debug("%s" % d["s_e"])
if "temporada" in d["s_e"].lower():
regex = re.compile('temporada\s*', re.I)
d["s_e"] = regex.sub("", d["s_e"])
season = scrapertools.find_single_match(d["s_e"], "(\d+)")
episode = 1
else:
season, episode = scrapertools.find_single_match(d["s_e"], "(\d+)&#215;(\d+)")
itemlist.append(item.clone(action="findvideos", title=title, thumbnail=item.thumbnail, url=d["url"],
server="torrent", contentSeason=season, contentEpisodeNumber=episode,
contentType="tvshow", fulltitle=item.title, quality=d["quality"], lang=d["lang"]))
# order list
if len(itemlist) > 1:
itemlist = sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
return itemlist
def get_episodes(item, dict_data):
global dict_url_seasons
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
if item.contentTitle != "":
title = scrapertools.find_single_match(data, '<h3 class="bold">.*?original:\s*(.*?)[.]</h3>')
year = scrapertools.find_single_match(data, '<h3 class="bold">\s*Estreno:\s*(\d+)[.]</h')
# logger.debug("title es %s" % title)
if title:
item.contentTitle = title
item.show = title
if year:
item.infoLabels['year'] = year
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
# logger.debug("data %s \n\n" % links_section)
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<s_e>.*?)</td><td>(?P<quality>.*?)</td><td>' \
'<a class="link" href="(?P<url>[^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(links_section)
for lang, s_e, quality, url in matches:
if s_e + lang not in dict_data:
dict_data[s_e + lang] = {"url": [url], "lang": lang, "s_e": s_e,
"quality": [quality]}
else:
if quality not in dict_data[s_e+lang]["quality"]:
dict_data[s_e + lang]["quality"].append(quality)
dict_data[s_e + lang]["url"].append(url)
url_to_check = scrapertools.find_single_match(links_section, '</table><p><a.*?href="([^"]+)".*?>\s*Temporada.*?</a>')
# logger.debug("url es %s " % url_to_check)
# if url doesn't exist we add it into the dict
if url_to_check and url_to_check not in dict_url_seasons:
dict_url_seasons[url_to_check] = False
for key, value in dict_url_seasons.items():
if not value:
item.url = key
dict_url_seasons[key] = True
dict_data, item = get_episodes(item, dict_data)
# logger.debug("URL_LIST es %s " % dict_url_seasons)
return dict_data, item
def findvideos(item):
logger.info()
itemlist = []
if item.contentType == "movie":
data = re.sub(r"\n|\r|\t|\s{2,}", "", httptools.downloadpage(item.url).data)
# logger.debug("data %s \n\n" % data)
if item.contentTitle != "":
title = scrapertools.find_single_match(data, '<div class="titulo_page_exit">(.*?)[.]</div>')
year = scrapertools.find_single_match(data, '<div class="ano_page_exit">(\d+)</div>')
logger.debug("title es %s" % title)
if title:
item.contentTitle = title
item.show = title
if year:
item.infoLabels['year'] = year
links_section = scrapertools.find_single_match(data, 'div id="Tokyo" [^>]+>(.*?)</div>')
# logger.debug("data %s \n\n" % data)
pattern = 'icono_.*?png" title="(?P<lang>.*?)" [^>]+></td><td>(?P<quality>.*?)</td><td>(?P<size>.*?)</td><td>' \
'<a class="link" href="(?P<url>[^"]+)"'
matches = re.compile(pattern, re.DOTALL).findall(links_section)
for lang, quality, size, url in matches:
title = "[%s] [%s] (%s)" % (lang, quality, size)
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
fulltitle=item.title))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
else:
for index, url in enumerate(item.url):
title = "%sx%s [%s] [%s]" % (item.contentSeason, item.contentEpisodeNumber, item.lang, item.quality[index])
itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
quality=item.quality[index]))
return itemlist

View File

@@ -310,7 +310,8 @@ def fichas(item):
for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:
thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
#thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")
thumbnail = scrapedthumbnail
language = ''
title = scrapedtitle.strip()
show = title
@@ -692,12 +693,10 @@ def findvideos(item):
fanart = scrapertools.find_single_match(data, '<div style="background-image.url. ([^\s]+)')
if account:
url += "###" + id + ";" + type
it2.append(
item.clone(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=fanart, show=item.show, folder=True, infoLabels=infolabels,
contentTitle=item.title, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
contentTitle=item.show, contentType=item.contentType, tipo=option, tipo1=option1, idioma=idioma))
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:

View File

@@ -0,0 +1,7 @@
{
"id": "help",
"name": "Ayuda",
"active": false,
"adult": false,
"language": ["*"]
}

View File

@@ -0,0 +1,224 @@
# -*- coding: utf-8 -*-
import os
import xbmc
from core.item import Item
from platformcode import config, logger, platformtools
from channelselector import get_thumb
if config.is_xbmc():
import xbmcgui
class TextBox(xbmcgui.WindowXMLDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.title = kwargs.get('title')
self.text = kwargs.get('text')
self.doModal()
def onInit(self):
try:
self.getControl(5).setText(self.text)
self.getControl(1).setLabel(self.title)
except:
pass
def onClick(self, control_id):
pass
def onFocus(self, control_id):
pass
def onAction(self, action):
# self.close()
if action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]:
self.close()
def mainlist(item):
logger.info()
itemlist = []
if config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
thumbnail=get_thumb("help.png"),
folder=False))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Cómo reportar un error?",
thumbnail=get_thumb("help.png"),
folder=False, extra="report_error"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Se pueden activar/desactivar los canales?",
thumbnail=get_thumb("help.png"),
folder=False, extra="onoff_canales"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible la sincronización automática con Trakt?",
thumbnail=get_thumb("help.png"),
folder=False, extra="trakt_sync"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
thumbnail=get_thumb("help.png"),
folder=False, extra="buscador_juntos"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces tardan en aparecer.",
thumbnail=get_thumb("help.png"),
folder=False, extra="tiempo_enlaces"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - La búsqueda de contenido no se hace correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_busquedacont"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Algún canal no funciona correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="canal_fallo"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces Torrent no funcionan.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_torrent"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - No se actualiza correctamente la videoteca.",
thumbnail=get_thumb("help.png"),
folder=True, extra="prob_bib"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Enlaces de interés",
thumbnail=get_thumb("help.png"),
folder=False, extra=""))
return itemlist
def faq(item):
if item.extra == "onoff_canales":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto se puede hacer en 'Configuración'>'Activar/Desactivar canales'. "
"Puedes activar/desactivar los canales uno por uno o todos a la vez. ",
"¿Deseas gestionar ahora los canales?")
if respuesta == 1:
from channels import setting
setting.conf_tools(Item(extra='channels_onoff'))
elif item.extra == "trakt_sync":
respuesta = platformtools.dialog_yesno("Alfa",
"Actualmente se puede activar la sincronización (silenciosa) "
"tras marcar como visto un episodio (esto se hace automáticamente). "
"Esta opción se puede activar en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "tiempo_enlaces":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto puede mejorarse limitando el número máximo de "
"enlaces o mostrandolos en una ventana emergente. "
"Estas opciones se encuentran en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "prob_busquedacont":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede que no hayas escrito la ruta de la librería correctamente en "
"'Configuración'>'Preferencias'.\n"
"La ruta específicada debe ser exactamente la misma de la 'fuente' "
"introducida en 'Archivos' de la videoteca de Kodi.\n"
"AVANZADO: Esta ruta también se encuentra en 'sources.xml'.\n"
"También puedes estar experimentando problemas por estar "
"usando algun fork de Kodi y rutas con 'special://'. "
"SPMC, por ejemplo, tiene problemas con esto, y no parece tener solución, "
"ya que es un problema ajeno a Alfa que existe desde hace mucho.\n"
"Puedes intentar subsanar estos problemas en 'Configuración'>'Ajustes de "
"la videoteca', cambiando el ajuste 'Realizar búsqueda de contenido en' "
"de 'La carpeta de cada serie' a 'Toda la videoteca'."
"También puedes acudir a 'http://alfa-addon.com' en busca de ayuda.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "canal_fallo":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede ser que la página web del canal no funcione. "
"En caso de que funcione la página web puede que no seas el primero"
" en haberlo visto y que el canal este arreglado. "
"Puedes mirar en 'alfa-addon.com' o en el "
"repositorio de GitHub (github.com/alfa-addon/addon). "
"Si no encuentras el canal arreglado puedes reportar un "
"problema en el foro.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "prob_bib":
platformtools.dialog_ok("Alfa",
"Puede ser que hayas actualizado el plugin recientemente "
"y que las actualizaciones no se hayan aplicado del todo "
"bien. Puedes probar en 'Configuración'>'Otras herramientas', "
"comprobando los archivos *_data.json o "
"volviendo a añadir toda la videoteca.")
respuesta = platformtools.dialog_yesno("Alfa",
"¿Deseas acceder ahora a esa seccion?")
if respuesta == 1:
itemlist = []
from channels import setting
new_item = Item(channel="setting", action="submenu_tools", folder=True)
itemlist.extend(setting.submenu_tools(new_item))
return itemlist
elif item.extra == "prob_torrent":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puedes probar descargando el modulo 'libtorrent' de Kodi o "
"instalando algun addon como 'Quasar' o 'Torrenter', "
"los cuales apareceran entre las opciones de la ventana emergente "
"que aparece al pulsar sobre un enlace torrent. "
"'Torrenter' es más complejo pero también más completo "
"y siempre funciona.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "buscador_juntos":
respuesta = platformtools.dialog_yesno("Alfa",
"Si. La opcion de mostrar los resultados juntos "
"o divididos por canales se encuentra en "
"'setting'>'Ajustes del buscador global'>"
"'Otros ajustes'.",
"¿Deseas acceder a ahora dichos ajustes?")
if respuesta == 1:
from channels import search
search.settings("")
elif item.extra == "report_error":
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:
log_name = "kodi.log"
ruta = xbmc.translatePath("special://logpath") + log_name
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Para reportar un problema en 'http://alfa-addon.com' es necesario:\n"
" - Versión que usas de Alfa.\n"
" - Versión que usas de kodi, mediaserver, etc.\n"
" - Versión y nombre del sistema operativo que usas.\n"
" - Nombre del skin (en el caso que uses Kodi) y si se "
"te ha resuelto el problema al usar el skin por defecto.\n"
" - Descripción del problema y algún caso de prueba.\n"
" - Agregar el log en modo detallado, una vez hecho esto, "
"zipea el log y lo puedes adjuntar en un post.\n\n"
"Para activar el log en modo detallado, ingresar a:\n"
" - Configuración.\n"
" - Preferencias.\n"
" - En la pestaña General - Marcar la opción: Generar log detallado.\n\n"
"El archivo de log detallado se encuentra en la siguiente ruta: \n\n"
"%s" % ruta)
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
else:
platformtools.dialog_ok("Alfa",
"Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\n"
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.com")

File diff suppressed because it is too large Load Diff

View File

@@ -48,6 +48,22 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -265,14 +265,25 @@ def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.page = 0
try:
if categoria == "terror":
item.url = host +"/listado/terror/"
item.action = "updated"
item.page = 0
itemlist = updated(item)
itemlist = updated(item)
elif categoria == 'castellano':
item.url = host + "/estrenos/es/"
item.action = "entradas"
if itemlist[-1].action == "updated":
elif categoria == 'latino':
item.url = host + "/estrenos/la/"
item.action = "entradas"
if categoria != 'terror':
itemlist = entradas(item)
if itemlist[-1].action == item.action:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla

View File

@@ -20,6 +20,14 @@
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -12,7 +12,7 @@ from core.item import Item
from core.tmdb import Tmdb
from platformcode import logger
host = "http://www.mejortorrent.com"
host = "https://mejortorrent.website"
def mainlist(item):
@@ -29,19 +29,19 @@ def mainlist(item):
thumb_buscar = get_thumb("search.png")
itemlist.append(Item(channel=item.channel, title="Peliculas", action="getlist",
url="http://www.mejortorrent.com/torrents-de-peliculas.html", thumbnail=thumb_pelis))
url= host + "/torrents-de-peliculas.html", thumbnail=thumb_pelis))
itemlist.append(Item(channel=item.channel, title="Peliculas HD", action="getlist",
url="http://www.mejortorrent.com/torrents-de-peliculas-hd-alta-definicion.html",
url= host + "/torrents-de-peliculas-hd-alta-definicion.html",
thumbnail=thumb_pelis_hd))
itemlist.append(Item(channel=item.channel, title="Series", action="getlist",
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series))
url= host + "/torrents-de-series.html", thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, title="Series HD", action="getlist",
url="http://www.mejortorrent.com/torrents-de-series-hd-alta-definicion.html",
url= host + "/torrents-de-series-hd-alta-definicion.html",
thumbnail=thumb_series_hd))
itemlist.append(Item(channel=item.channel, title="Series Listado Alfabetico", action="listalfabetico",
url="http://www.mejortorrent.com/torrents-de-series.html", thumbnail=thumb_series_az))
url= host + "/torrents-de-series.html", thumbnail=thumb_series_az))
itemlist.append(Item(channel=item.channel, title="Documentales", action="getlist",
url="http://www.mejortorrent.com/torrents-de-documentales.html", thumbnail=thumb_docus))
url= host + "/torrents-de-documentales.html", thumbnail=thumb_docus))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", thumbnail=thumb_buscar))
return itemlist
@@ -55,10 +55,10 @@ def listalfabetico(item):
for letra in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z']:
itemlist.append(Item(channel=item.channel, action="getlist", title=letra,
url="http://www.mejortorrent.com/series-letra-" + letra.lower() + ".html"))
url= host + "/series-letra-" + letra.lower() + ".html"))
itemlist.append(Item(channel=item.channel, action="getlist", title="Todas",
url="http://www.mejortorrent.com/series-letra..html"))
url= host + "/series-letra..html"))
return itemlist
@@ -67,7 +67,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.mejortorrent.com/secciones.php?sec=buscador&valor=%s" % (texto)
item.url = host + "/secciones.php?sec=buscador&valor=%s" % (texto)
try:
return buscador(item)
@@ -81,30 +81,12 @@ def search(item, texto):
def buscador(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# pelis
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
#
# series
#
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
#
# docs
#
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
# busca series
patron = "<a href='(/serie-descargar-torrent[^']+)'[^>]+>(.*?)</a>"
patron += ".*?<span style='color:gray;'>([^']+)</span>"
patron_enlace = "/serie-descargar-torrents-\d+-\d+-(.*?)\.html"
matches = scrapertools.find_multiple_matches(data, patron)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode(
@@ -119,10 +101,7 @@ def buscador(item):
# busca pelis
patron = "<a href='(/peli-descargar-torrent-[^']+)'[^>]+>(.*?)</a>"
patron_enlace = "/peli-descargar-torrent-\d+(.*?)\.html"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle in matches:
title = scrapertools.remove_htmltags(scrapedtitle).decode('iso-8859-1').encode('utf-8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -135,10 +114,7 @@ def buscador(item):
patron += "<font Color='darkblue'>(.*?)</font>.*?"
patron += "<td align='right' width='20%'>(.*?)</td>"
patron_enlace = "/doc-descargar-torrent-\d+-\d+-(.*?)\.html"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedtitle, scrapedinfo in matches:
title = scrapedtitle.decode('iso-8859-1').encode('utf8') + " " + scrapedinfo.decode('iso-8859-1').encode('utf8')
url = urlparse.urljoin(item.url, scrapedurl)
@@ -154,23 +130,7 @@ def buscador(item):
def getlist(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# pelis
# <a href="/peli-descargar-torrent-9578-Presentimientos.html">
# <img src="/uploads/imagenes/peliculas/Presentimientos.jpg" border="1"></a
#
# series
#
# <a href="/serie-descargar-torrents-11589-11590-Ahora-o-nunca-4-Temporada.html">
# <img src="/uploads/imagenes/series/Ahora o nunca4.jpg" border="1"></a>
#
# docs
#
# <a href="/doc-descargar-torrent-1406-1407-El-sueno-de-todos.html">
# <img border="1" src="/uploads/imagenes/documentales/El sueno de todos.jpg"></a>
if item.url.find("peliculas") > -1:
patron = '<a href="(/peli-descargar-torrent[^"]+)">[^<]+'
patron += '<img src="([^"]+)"[^<]+</a>'
@@ -202,27 +162,18 @@ def getlist(item):
action = "episodios"
folder = True
extra = "docus"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl, scrapedthumbnail in matches:
title = scrapertools.get_match(scrapedurl, patron_enlace)
title = title.replace("-", " ")
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.url, urllib.quote(scrapedthumbnail))
thumbnail = host + urllib.quote(scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, thumbnail=thumbnail, plot=plot,
folder=folder, extra=extra))
matches = re.compile(patron_title, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
# Cambia el título sacado de la URL por un título con más información.
# esta implementación asume que va a encontrar las mismas coincidencias
# que en el bucle anterior, lo cual técnicamente es erróneo, pero que
# funciona mientras no cambien el formato de la página
cnt = 0
for scrapedtitle, notused, scrapedinfo in matches:
title = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
@@ -244,7 +195,6 @@ def getlist(item):
# Extrae el paginador
patronvideos = "<a href='([^']+)' class='paginar'> Siguiente >>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
@@ -267,18 +217,11 @@ def episodios(item):
item.thumbnail = scrapertools.find_single_match(data,
"src='http://www\.mejortorrent\.com(/uploads/imagenes/" + tabla + "/[a-zA-Z0-9_ ]+.jpg)'")
item.thumbnail = 'http://www.mejortorrent.com' + urllib.quote(item.thumbnail)
item.thumbnail = host + + urllib.quote(item.thumbnail)
# <form name='episodios' action='secciones.php?sec=descargas&ap=contar_varios' method='post'>
data = scrapertools.get_match(data,
"<form name='episodios' action='secciones.php\?sec=descargas\&ap=contar_varios' method='post'>(.*?)</form>")
'''
<td bgcolor='#C8DAC8' style='border-bottom:1px solid black;'><a href='/serie-episodio-descargar-torrent-18741-Juego-de-tronos-4x01.html'>4x01 - Episodio en V.O. Sub Esp.</a></td>
<td width='120' bgcolor='#C8DAC8' align='right' style='border-right:1px solid black; border-bottom:1px solid black;'><div style='color:#666666; font-size:9px; margin-right:5px;'>Fecha: 2014-04-07</div></td>
<td width='60' bgcolor='#F1F1F1' align='center' style='border-bottom:1px solid black;'>
<input type='checkbox' name='episodios[1]' value='18741'>
'''
if item.extra == "series":
patron = "<td bgcolor[^>]+><a[^>]+>([^>]+)</a></td>[^<]+"
else:
@@ -289,7 +232,6 @@ def episodios(item):
patron += "<input type='checkbox' name='([^']+)' value='([^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
tmdb_title = re.sub(r'(\s*-\s*)?\d+.*?\s*Temporada|(\s*-\s*)?\s*Miniserie\.?|\(.*\)|\[.*\]', '', item.title).strip()
logger.debug('tmdb_title=' + tmdb_title)
@@ -306,7 +248,7 @@ def episodios(item):
title = scrapedtitle + " (" + fecha + ")"
url = "http://www.mejortorrent.com/secciones.php?sec=descargas&ap=contar_varios"
url = host + "/secciones.php?sec=descargas&ap=contar_varios"
# "episodios%5B1%5D=11744&total_capis=5&tabla=series&titulo=Sea+Patrol+-+2%AA+Temporada"
post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo})
logger.debug("post=" + post)
@@ -370,20 +312,15 @@ def show_movie_info(item):
patron = "<a href='(secciones.php\?sec\=descargas[^']+)'"
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl in matches:
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("title=[" + item.title + "], url=[" + url + "], thumbnail=[" + item.thumbnail + "]")
torrent_data = httptools.downloadpage(url).data
logger.debug("torrent_data=" + torrent_data)
# <a href='/uploads/torrents/peliculas/los-juegos-del-hambre-brrip.torrent'>
link = scrapertools.get_match(torrent_data, "<a href='(/uploads/torrents/peliculas/.*?\.torrent)'>")
link = urlparse.urljoin(url, link)
logger.debug("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, fanart=item.fanart, folder=False))
@@ -402,24 +339,35 @@ def play(item):
data = httptools.downloadpage(item.url, post=item.extra).data
logger.debug("data=" + data)
# series
#
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-01_02.torrent"
# <a href="http://www.mejortorrent.com/uploads/torrents/series/falling-skies-2-03.torrent"
#
# docus
#
# <a href="http://www.mejortorrent.com/uploads/torrents/documentales/En_Suenyos_De_Todos_DVDrip.torrent">El sue–o de todos. </a>
params = dict(urlparse.parse_qsl(item.extra))
patron = '<a href="(http://www.mejortorrent.com/uploads/torrents/' + params["tabla"] + '/.*?\.torrent)"'
link = scrapertools.get_match(data, patron)
logger.info("link=" + link)
itemlist.append(Item(channel=item.channel, action="play", server="torrent", title=item.title, url=link,
thumbnail=item.thumbnail, plot=item.plot, folder=False))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = host + "/torrents-de-peliculas.html"
itemlist = getlist(item)
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()
item.url = host + "/torrents-de-series.html"
itemlist.extend(getlist(item))
if itemlist[-1].title == "Pagina siguiente >>":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -19,6 +19,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1555,3 +1555,28 @@ def busqueda(item):
from channels import search
return search.do_search(new_item, cat)
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = 'http://www.miltorrents.com'
itemlist = peliculas(item)
if itemlist[-1].title == "[COLOR khaki]siguiente[/COLOR]":
itemlist.pop()
item.url = 'http://www.miltorrents.com/series'
itemlist.extend(peliculas(item))
if itemlist[-1].title == "[COLOR khaki]siguiente[/COLOR]":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -18,6 +18,14 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -35,4 +43,4 @@
"visible": true
}
]
}
}

View File

@@ -319,61 +319,34 @@ def findvideos(item):
duplicados = []
data = get_source(item.url)
src = data
patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight'
patron = 'id=(?:div|player)(\d+)>.*?data-lazy-src=(.*?) scrolling'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, videoitem in matches:
lang = scrapertools.find_single_match(src,
'<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option)
'<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
if 'audio ' in lang.lower():
lang=lang.lower().replace('audio ','')
lang=lang.capitalize()
data = get_source(videoitem)
if 'play' in videoitem:
url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>')
else:
url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=')
video_urls = scrapertools.find_multiple_matches(data, '<li><a href=(.*?)><span')
for video in video_urls:
video_data = get_source(video)
if not 'fastplay' in video:
new_url= scrapertools.find_single_match(video_data,'<li><a href=(.*?srt)><span')
data_final = get_source(new_url)
else:
data_final=video_data
url = scrapertools.find_single_match(data_final,'iframe src=(.*?) scrolling')
quality = item.quality
server = servertools.get_server_from_url(url)
title = item.contentTitle + ' [%s] [%s]' % (server, lang)
if item.quality != '':
title = item.contentTitle + ' [%s] [%s] [%s]' % (server, quality, lang)
url_list.append([url, lang])
for video_url in url_list:
language = video_url[1]
if 'jw.miradetodo' in video_url[0]:
data = get_source('http:' + video_url[0])
patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}'
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, scrapedurl in matches:
quality = quality
title = item.contentTitle + ' (%s) %s' % (quality, language)
server = 'directo'
url = scrapedurl
url = url.replace('\/', '/')
subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label")
if url not in duplicados:
itemlist.append(item.clone(title=title,
action='play',
url=url,
quality=quality,
server=server,
subtitle=subtitle,
language=language
))
duplicados.append(url)
elif video_url != '':
itemlist.extend(servertools.find_video_items(data=video_url[0]))
import os
for videoitem in itemlist:
if videoitem.server != 'directo':
quality = item.quality
title = item.contentTitle + ' (%s) %s' % (videoitem.server, language)
if item.quality != '':
title = item.contentTitle + ' (%s) %s' % (quality, language)
videoitem.title = title
videoitem.channel = item.channel
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
"server_%s.png" % videoitem.server)
videoitem.quality = item.quality
if url!='':
itemlist.append(item.clone(title=title, url=url, action='play', server=server, language=lang))
if item.infoLabels['mediatype'] == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
@@ -409,7 +382,7 @@ def newest(categoria):
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host + 'page/1/?s'
elif categoria == 'infantiles':

View File

@@ -0,0 +1,12 @@
{
"id": "mundiseries",
"name": "Mundiseries",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://imgur.com/GdGMFi1.png",
"banner": "https://imgur.com/1bDbYY1.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import filtertools
from platformcode import config, logger
from platformcode import platformtools
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import autoplay
host = "http://mundiseries.com"
list_servers = ['okru']
list_quality = ['default']
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista-de-series")))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
matches = scrapertools.find_multiple_matches(data, patron)
for link, thumbnail, name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
return itemlist
def temporada(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info("preon,:"+data)
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for link,thumbnail,name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_show='<h1 class="h-responsive center">.+?'
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
show = scrapertools.find_single_match(data,patron_show)
for link, name,cap,temp in matches:
if '|' in cap:
cap = cap.replace('|','')
if '|' in temp:
temp = temp.replace('|','')
if '|' in name:
name = name.replace('|','')
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
url=host+link
itemlist.append(Item(channel=item.channel, action="findvideos",
title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir Temporada/Serie a la biblioteca de Kodi[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
id = ""
type = ""
data = httptools.downloadpage(item.url).data
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.extend(servertools.find_video_items(data=data))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
for videoitem in itemlist:
videoitem.channel= item.channel
autoplay.start(itemlist, item)
return itemlist

View File

@@ -20,6 +20,14 @@
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -442,3 +442,29 @@ def search(item, texto):
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
item.extra = 'pelilist'
if categoria == 'torrent':
item.url = host+'peliculas/'
itemlist = listado(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
item.url = host+'series/'
itemlist.extend(listado(item))
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -74,6 +74,26 @@ def mainlist(item):
set_category_context(new_item)
itemlist.append(new_item)
# if list_canales['Castellano']:
thumbnail = get_thumb("channels_spanish.png")
new_item = Item(channel=item.channel, action="novedades", extra="castellano", title="Castellano",
thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
# if list_canales['Latino']:
thumbnail = get_thumb("channels_latino.png")
new_item = Item(channel=item.channel, action="novedades", extra="latino", title="Latino",
thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
# if list_canales['Torrent']:
thumbnail = get_thumb("channels_torrent.png")
new_item = Item(channel=item.channel, action="novedades", extra="torrent", title="Torrent", thumbnail=thumbnail)
set_category_context(new_item)
itemlist.append(new_item)
#if list_canales['documentales']:
thumbnail = get_thumb("channels_documentary.png")
new_item = Item(channel=item.channel, action="novedades", extra="documentales", title="Documentales",
@@ -95,7 +115,8 @@ def set_category_context(item):
def get_channels_list():
logger.info()
list_canales = {'peliculas': [], 'terror': [], 'infantiles': [], 'series': [], 'anime': [], 'documentales': []}
list_canales = {'peliculas': [], 'terror': [], 'infantiles': [], 'series': [], 'anime': [],
'castellano': [], 'latino':[], 'torrent':[], 'documentales': []}
any_active = False
# Rellenar listas de canales disponibles
channels_path = os.path.join(config.get_runtime_path(), "channels", '*.json')
@@ -419,6 +440,16 @@ def menu_opciones(item):
title=" - Episodios de anime",
thumbnail=get_thumb("channels_anime.png"),
folder=False))
itemlist.append(
Item(channel=item.channel, action="setting_channel", extra="castellano", title=" - Castellano",
thumbnail=get_thumb("channels_documentary.png"), folder=False))
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="latino", title=" - Latino",
thumbnail=get_thumb("channels_documentary.png"), folder=False))
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="Torrent", title=" - Torrent",
thumbnail=get_thumb("channels_documentary.png"), folder=False))
itemlist.append(Item(channel=item.channel, action="setting_channel", extra="documentales",
title=" - Documentales",
thumbnail=get_thumb("channels_documentary.png"),

View File

@@ -18,6 +18,14 @@
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -35,4 +43,4 @@
"visible": true
}
]
}
}

View File

@@ -183,7 +183,7 @@ def newest(categoria):
logger.info()
item = Item()
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host + '/release/2017/'
elif categoria == 'infantiles':

View File

@@ -10,13 +10,45 @@
"movie"
],
"settings": [
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}
}

View File

@@ -31,6 +31,29 @@ def mainlist(item):
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = HOST
elif categoria == 'infantiles':
item.url = HOST + '/genero/animacion.html'
elif categoria == 'terror':
item.url = HOST + '/genero/terror.html'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def peliculas(item):
logger.info()

View File

@@ -18,6 +18,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -2,6 +2,7 @@
import re
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
@@ -10,74 +11,29 @@ from platformcode import logger
from platformcode import config
from core import tmdb
try:
import xbmc
import xbmcgui
except:
pass
import unicodedata
ACTION_SHOW_FULLSCREEN = 36
ACTION_GESTURE_SWIPE_LEFT = 511
ACTION_SELECT_ITEM = 7
ACTION_PREVIOUS_MENU = 10
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_DOWN = 4
ACTION_MOVE_UP = 3
OPTION_PANEL = 6
OPTIONS_OK = 5
host = "http://www.peliculasdk.com/"
def bbcode_kodi2html(text):
if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"):
import re
text = re.sub(r'\[COLOR\s([^\]]+)\]',
r'<span style="color: \1">',
text)
text = text.replace('[/COLOR]', '</span>')
text = text.replace('[CR]', '<br>')
text = text.replace('[B]', '<b>')
text = text.replace('[/B]', '</b>')
text = text.replace('"color: yellow"', '"color: gold"')
text = text.replace('"color: white"', '"color: auto"')
return text
host = "http://www.peliculasdk.com"
def mainlist(item):
logger.info()
itemlist = []
title = "Estrenos"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/ver/estrenos",
Item(channel=item.channel, title="[COLOR orange]Estrenos[/COLOR]", action="peliculas", url= host + "/ver/estrenos",
fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg",
thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg"))
title = "PelisHd"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-720/",
Item(channel=item.channel, title="[COLOR orange]PelisHd[/COLOR]", action="peliculas", url= host + "/calidad/HD-720/",
fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg",
thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg"))
title = "Pelis HD-Rip"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-320",
Item(channel=item.channel, title="[COLOR orange]Pelis HD-Rip[/COLOR]", action="peliculas", url= host + "/calidad/HD-320",
fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg",
thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg"))
title = "Pelis Audio español"
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/idioma/Espanol/",
Item(channel=item.channel, title="[COLOR orange]Pelis Audio español[/COLOR]", action="peliculas", url= host + "/idioma/Espanol/",
fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg",
thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg"))
title = "Buscar..."
title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]"))
itemlist.append(
Item(channel=item.channel, title=title, action="search", url="http://www.peliculasdk.com/calidad/HD-720/",
Item(channel=item.channel, title="[COLOR orange]Buscar...[/COLOR]", action="search", url= host + "/calidad/HD-720/",
fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg",
thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg"))
@@ -88,7 +44,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://www.peliculasdk.com/index.php?s=%s&x=0&y=0" % (texto)
item.url = host + "/index.php?s=%s&x=0&y=0" % (texto)
try:
return buscador(item)
@@ -103,11 +59,8 @@ def search(item, texto):
def buscador(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="karatula".*?'
patron += 'src="([^"]+)".*?'
patron += '<div class="tisearch"><a href="([^"]+)">'
@@ -115,57 +68,38 @@ def buscador(item):
patron += 'Audio:(.*?)</a>.*?'
patron += 'Género:(.*?)</a>.*?'
patron += 'Calidad:(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedlenguaje, scrapedgenero, scrapedcalidad in matches:
try:
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
except:
year = ""
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "",
scrapedtitle).strip()
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
scrapedcalidad = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedcalidad).strip()
scrapedlenguaje = re.sub(r"<a href.*?>|</a>|</span>", "", scrapedlenguaje).strip()
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
scrapedcalidad = scrapedcalidad.replace(scrapedcalidad,
bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]"))
scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje,
bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]"))
scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
scrapedtitle = scrapedtitle.replace(scrapedtitle,
bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]"))
extra = year + "|" + title_fan
itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
thumbnail=scrapedthumbnail, extra=extra,
scrapedcalidad = "[COLOR orange]" + scrapedcalidad + "[/COLOR]"
scrapedlenguaje = "[COLOR orange]" + scrapedlenguaje + "[/COLOR]"
title = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")"
title = "[COLOR white]" + title + "[/COLOR]"
scrapedtitle = scrapedtitle.split("(")[0].strip()
itemlist.append(Item(channel=item.channel, title=title, url=scrapedurl, action="findvideos",
thumbnail=scrapedthumbnail, contentTitle = scrapedtitle, infoLabels={'year':year},
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True))
tmdb.set_infoLabels(itemlist, True)
try:
next_page = scrapertools.get_match(data,
'<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>')
title = "siguiente>>"
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=next_page,
itemlist.append(Item(channel=item.channel, action="buscador", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
except:
pass
return itemlist
def peliculas(item):
logger.info()
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|&#.*?;", "", data)
patron = 'style="position:relative;"> '
patron += '<a href="([^"]+)" '
patron += 'title="([^<]+)">'
@@ -173,363 +107,64 @@ def peliculas(item):
patron += 'Audio:(.*?)</br>.*?'
patron += 'Calidad:(.*?)</br>.*?'
patron += 'Género:.*?tag">(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedlenguaje, scrapedcalidad, scrapedgenero in matches:
try:
year = scrapertools.get_match(scrapedtitle, '\((\d+)\)')
except:
year = ""
title_fan = re.sub(r"\[.*?\]|\(.*?\)|\d+x\d+.*?Final|-\d+|-|\d+x\d+|Temporada.*?Completa| ;", "", scrapedtitle)
year = scrapertools.find_single_match(scrapedtitle, '\((\d+)\)')
scrapedtitle = re.sub(r"\(\d+\)", "", scrapedtitle).strip()
scrapedcalidad = re.sub(r"<a href.*?>|</a>", "", scrapedcalidad).strip()
scrapedlenguaje = re.sub(r"<a href.*?>|</a>", "", scrapedlenguaje).strip()
scrapedlenguaje = scrapedlenguaje.split(',')
if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad:
scrapedtitle = scrapedtitle
extra = year + "|" + title_fan
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart",
thumbnail=scrapedthumbnail, extra=extra,
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
language=scrapedlenguaje, quality=scrapedcalidad, contentTitle= scrapedtitle, infoLabels={
'year':year})
#TODO Dividir los resultados antes
#if year:
# tmdb.set_infoLabels_item(new_item)
itemlist.append(new_item)
itemlist.append(Item(channel=item.channel,
title=scrapedtitle,
url=scrapedurl,
action="findvideos",
thumbnail=scrapedthumbnail,
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True,
language=scrapedlenguaje,
quality=scrapedcalidad,
contentTitle = scrapedtitle,
infoLabels={'year':year}
))
tmdb.set_infoLabels(itemlist)
## Paginación
next_page = scrapertools.get_match(data, '<span class="current">.*?<a href="(.*?)".*?>Siguiente &raquo;</a></div>')
title = "siguiente>>"
title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page,
itemlist.append(Item(channel=item.channel, action="peliculas", title="[COLOR red]siguiente>>[/COLOR]", url=next_page,
thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png",
fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True))
return itemlist
def fanart(item):
logger.info()
itemlist = []
url = item.url
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
title_fan = item.extra.split("|")[1]
title = re.sub(r'Serie Completa|Temporada.*?Completa', '', title_fan)
fulltitle = title
title = title.replace(' ', '%20')
title = ''.join(
(c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn'))
try:
sinopsis = scrapertools.find_single_match(data, '<span class="clms">Sinopsis: <\/span>(.*?)<\/div>')
except:
sinopsis = ""
year = item.extra.split("|")[0]
if not "series" in item.url:
# filmafinity
url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(
title, year)
data = scrapertools.downloadpage(url)
url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
if url_filmaf:
url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
data = scrapertools.downloadpage(url_filmaf)
else:
try:
url_bing = "http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" % (title.replace(' ', '+'), year)
data = browser(url_bing)
data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;', '', data)
if "myaddrproxy.php" in data:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/', '', subdata_bing)
else:
subdata_bing = scrapertools.get_match(data,
'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"')
url_filma = scrapertools.get_match(subdata_bing, '<a href="([^"]+)')
if not "http" in url_filma:
data = scrapertools.cachePage("http://" + url_filma)
else:
data = scrapertools.cachePage(url_filma)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
except:
pass
if sinopsis == " ":
try:
sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
sinopsis = sinopsis.replace("<br><br />", "\n")
sinopsis = re.sub(r"\(FILMAFFINITY\)<br />", "", sinopsis)
except:
pass
try:
rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">')
except:
rating_filma = "Sin puntuacion"
critica = ""
patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
matches_reviews = scrapertools.find_multiple_matches(data, patron)
if matches_reviews:
for review, autor, valoracion in matches_reviews:
review = dhe(scrapertools.htmlclean(review))
review += "\n" + autor + "[CR]"
review = re.sub(r'Puntuac.*?\)', '', review)
if "positiva" in valoracion:
critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
elif "neutral" in valoracion:
critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
else:
critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
else:
critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]"
print "ozuu"
print critica
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&year=" + year + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
title = re.sub(r":.*|\(.*?\)", "", title)
url = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&language=es&include_adult=false"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
extra = item.thumbnail + "|" + "" + "|" + "" + "|" + "Sin puntuación" + "|" + rating_filma + "|" + critica
show = item.fanart + "|" + "" + "|" + sinopsis
posterdb = item.thumbnail
fanart_info = item.fanart
fanart_3 = ""
fanart_2 = item.fanart
category = item.thumbnail
id_scraper = ""
itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos",
thumbnail=item.thumbnail, fanart=item.fanart, extra=extra, show=show,
category=category, library=item.library, fulltitle=fulltitle, folder=True))
for id, fan in matches:
fan = re.sub(r'\\|"', '', fan)
try:
rating = scrapertools.find_single_match(data, '"vote_average":(.*?),')
except:
rating = "Sin puntuación"
id_scraper = id + "|" + "peli" + "|" + rating + "|" + rating_filma + "|" + critica
try:
posterdb = scrapertools.get_match(data, '"page":1,.*?"poster_path":"\\\(.*?)"')
posterdb = "https://image.tmdb.org/t/p/original" + posterdb
except:
posterdb = item.thumbnail
if "null" in fan:
fanart = item.fanart
else:
fanart = "https://image.tmdb.org/t/p/original" + fan
item.extra = fanart
url = "http://api.themoviedb.org/3/movie/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
fanart_info = item.extra
fanart_3 = ""
fanart_2 = item.extra
for fanart_info, fanart_3, fanart_2 in matches:
fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info
fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3
fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2
if fanart == item.fanart:
fanart = fanart_info
# clearart, fanart_2 y logo
url = "http://webservice.fanart.tv/v3/movies/" + id + "?api_key=dffe90fba4d02c199ae7a9e71330c987"
data = scrapertools.cachePage(url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '"hdmovielogo":.*?"url": "([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '"moviedisc"' in data:
disc = scrapertools.get_match(data, '"moviedisc":.*?"url": "([^"]+)"')
if '"movieposter"' in data:
poster = scrapertools.get_match(data, '"movieposter":.*?"url": "([^"]+)"')
if '"moviethumb"' in data:
thumb = scrapertools.get_match(data, '"moviethumb":.*?"url": "([^"]+)"')
if '"moviebanner"' in data:
banner = scrapertools.get_match(data, '"moviebanner":.*?"url": "([^"]+)"')
if len(matches) == 0:
extra = posterdb
# "http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png"
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
category = posterdb
itemlist.append(
Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent",
thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category=category,
library=item.library, fulltitle=fulltitle, folder=True))
for logo in matches:
if '"hdmovieclearart"' in data:
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
if '"moviebackground"' in data:
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
else:
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
if '"moviebackground"' in data:
if '"hdmovieclearart"' in data:
clear = scrapertools.get_match(data, '"hdmovieclearart":.*?"url": "([^"]+)"')
extra = clear
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = clear
else:
extra = logo
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = logo
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,
show=show, category=category, library=item.library, fulltitle=fulltitle,
folder=True))
if not '"hdmovieclearart"' in data and not '"moviebackground"' in data:
extra = logo
show = fanart_2 + "|" + fanart_3 + "|" + sinopsis
if '"moviedisc"' in data:
category = disc
else:
category = item.extra
itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url,
thumbnail=logo, fanart=item.extra, extra=extra, show=show,
category=category, library=item.library, fulltitle=fulltitle, folder=True))
title_info = "Info"
if posterdb == item.thumbnail:
if '"movieposter"' in data:
thumbnail = poster
else:
thumbnail = item.thumbnail
else:
thumbnail = posterdb
id = id_scraper
extra = extra + "|" + id + "|" + title.encode('utf8')
title_info = title_info.replace(title_info, bbcode_kodi2html("[COLOR skyblue]" + title_info + "[/COLOR]"))
itemlist.append(Item(channel=item.channel, action="info", title=title_info, url=item.url, thumbnail=thumbnail,
fanart=fanart_info, extra=extra, category=category, show=show, folder=False))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"<!--.*?-->", "", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">')
patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>'
check = re.compile(patron, re.DOTALL).findall(bloque_tab)
servers_data_list = []
patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
if len(matches) == 0:
patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for check_tab, server, id in matches:
scrapedplot = scrapertools.get_match(data, '<span class="clms">(.*?)</div></div>')
plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot)
scrapedplot = scrapedplot.replace(scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]"))
for plot in plotformat:
scrapedplot = scrapedplot.replace(plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]"))
scrapedplot = scrapedplot.replace("</span>", "[CR]")
scrapedplot = scrapedplot.replace(":", "")
if check_tab in str(check):
idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'")
servers_data_list.append([server, id, idioma, calidad])
url = "http://www.peliculasdk.com/Js/videod.js"
data = scrapertools.cachePage(url)
url = host + "/Js/videod.js"
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '')
patron = 'function (\w+)\(id\).*?'
patron += 'data-src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for server, url in matches:
for enlace, id, idioma, calidad in servers_data_list:
if server == enlace:
video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url))
video_url = re.sub(r"'\+codigo\+'", "", video_url)
video_url = video_url.replace('embed//', 'embed/')
@@ -541,21 +176,13 @@ def findvideos(item):
video_url = scrapertools.get_match(str(url), "u'([^']+)'")
except:
continue
servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/')
servertitle = servertitle.replace("embed.", "")
servertitle = servertitle.replace("player.", "")
servertitle = servertitle.replace("api.video.", "")
servertitle = re.sub(r"hqq.tv|hqq.watch", "netutv", servertitle)
servertitle = servertitle.replace("anonymouse.org", "netu")
title = servertitle
logger.debug('servertitle: %s' % servertitle)
server = servertools.get_server_name(servertitle)
logger.debug('server: %s'%server)
title = "Ver en: %s [" + idioma + "][" + calidad + "]"
itemlist.append(
Item(channel=item.channel, title=title, url=video_url, action="play",
item.clone(title=title, url=video_url, action="play",
thumbnail=item.category,
plot=scrapedplot, fanart=item.show, server=server, language=idioma, quality=calidad))
language=idioma, quality=calidad))
tmdb.set_infoLabels(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if item.library and config.get_videolibrary_support() and len(itemlist) > 0:
infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'],
'title': item.fulltitle}
@@ -563,218 +190,29 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFff6666",
thumbnail='http://imgur.com/0gyYvuC.png'))
return itemlist
def play(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
data = scrapertools.cache_page(item.url)
listavideos = servertools.findvideos(data)
for video in listavideos:
videotitle = scrapertools.unescape(video[0])
url = item.url
server = video[2]
# xbmctools.addnewvideo( item.channel , "play" , category , server , , url , thumbnail , plot )
itemlist.append(
Item(channel=item.channel, action="play", server=server, title="Trailer - " + videotitle, url=url,
thumbnail=item.thumbnail, plot=item.plot, fulltitle=item.title,
fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False))
return itemlist
item.thumbnail = item.contentThumbnail
return [item]
def info(item):
def newest(categoria):
logger.info()
itemlist = []
url = item.url
id = item.extra
if "serie" in item.url:
try:
rating_tmdba_tvdb = item.extra.split("|")[6]
if item.extra.split("|")[6] == "":
rating_tmdba_tvdb = "Sin puntuación"
except:
rating_tmdba_tvdb = "Sin puntuación"
else:
rating_tmdba_tvdb = item.extra.split("|")[3]
rating_filma = item.extra.split("|")[4]
print "eztoquee"
print rating_filma
print rating_tmdba_tvdb
filma = "http://s6.postimg.org/6yhe5fgy9/filma.png"
item = Item()
try:
if "serie" in item.url:
title = item.extra.split("|")[8]
else:
title = item.extra.split("|")[6]
title = title.replace("%20", " ")
title = "[COLOR yellow][B]" + title + "[/B][/COLOR]"
if categoria == 'castellano':
item.url = host + "idioma/Espanol/"
item.action = "peliculas"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
title = item.title
try:
if "." in rating_tmdba_tvdb:
check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).')
else:
check_rat_tmdba = rating_tmdba_tvdb
if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8:
rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10:
rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
else:
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
print "lolaymaue"
except:
rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
if "10." in rating:
rating = re.sub(r'10\.\d+', '10', rating)
try:
check_rat_filma = scrapertools.get_match(rating_filma, '(\d)')
print "paco"
print check_rat_filma
if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8:
print "dios"
print check_rat_filma
rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]"
elif int(check_rat_filma) >= 8:
print check_rat_filma
rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]"
else:
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
print "rojo??"
print check_rat_filma
except:
rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
if not "serie" in item.url:
url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
1] + "?api_key=2e2160006592024ba87ccdf78c28f49f&append_to_response=credits&language=es"
data_plot = scrapertools.cache_page(url_plot)
plot = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",')
tagline = scrapertools.find_single_match(data_plot, '"tagline":(".*?")')
if plot == "":
plot = item.show.split("|")[2]
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
plot = re.sub(r"\\", "", plot)
else:
plot = item.show.split("|")[2]
plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
plot = re.sub(r"\\", "", plot)
if item.extra.split("|")[7] != "":
tagline = item.extra.split("|")[7]
# tagline= re.sub(r',','.',tagline)
else:
tagline = ""
if "serie" in item.url:
check2 = "serie"
icon = "http://s6.postimg.org/hzcjag975/tvdb.png"
foto = item.show.split("|")[1]
if item.extra.split("|")[5] != "":
critica = item.extra.split("|")[5]
else:
critica = "Esta serie no tiene críticas..."
if not ".png" in item.extra.split("|")[0]:
photo = "http://imgur.com/6uXGkrz.png"
else:
photo = item.extra.split("|")[0].replace(" ", "%20")
try:
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
except:
tagline = ""
else:
critica = item.extra.split("|")[5]
if "%20" in critica:
critica = "No hay críticas"
icon = "http://imgur.com/SenkyxF.png"
photo = item.extra.split("|")[0].replace(" ", "%20")
foto = item.show.split("|")[1]
try:
if tagline == "\"\"":
tagline = " "
except:
tagline = " "
tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
check2 = "pelicula"
# Tambien te puede interesar
peliculas = []
if "serie" in item.url:
url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
5] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
data_tpi = scrapertools.cachePage(url_tpi)
tpi = scrapertools.find_multiple_matches(data_tpi,
'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),')
else:
url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
1] + "/recommendations?api_key=2e2160006592024ba87ccdf78c28f49f&language=es"
data_tpi = scrapertools.cachePage(url_tpi)
tpi = scrapertools.find_multiple_matches(data_tpi,
'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),')
for idp, peli, thumb in tpi:
thumb = re.sub(r'"|}', '', thumb)
if "null" in thumb:
thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png"
else:
thumb = "https://image.tmdb.org/t/p/original" + thumb
peliculas.append([idp, peli, thumb])
check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow")
infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline,
'rating': rating}
item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma,
critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/kdfWEJ6.png")
from channels import infoplus
infoplus.start(item_info, peliculas)
def browser(url):
import mechanize
# Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
br = mechanize.Browser()
# Browser options
br.set_handle_equiv(False)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(False)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
# br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
# Open some site, let's pick a random one, the first that pops in mind
r = br.open(url)
response = r.read()
print response
if "img,divreturn" in response:
r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
print "prooooxy"
response = r.read()
return response
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -33,15 +33,14 @@ def mainlist(item):
def explorar(item):
logger.info()
itemlist = list()
url1 = str(item.url)
url1 = item.title
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info("loca :"+url1+" aaa"+data)
if 'genero' in url1:
patron = '<div class="d"><h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
if 'alfabetico' in url1:
patron = '<\/li><\/ul><h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'año' in url1:
if 'Género' in url1:
patron = '<div class="d">.+?<h3>Pel.+?neros<\/h3>(.+?)<\/h3>'
if 'Listado Alfabético' in url1:
patron = '<\/li><\/ul>.+?<h3>Pel.+?tico<\/h3>(.+?)<\/h3>'
if 'Año' in url1:
patron = '<ul class="anio"><li>(.+?)<\/ul>'
data_explorar = scrapertools.find_single_match(data, patron)
patron_explorar = '<a href="([^"]+)">([^"]+)<\/a>'
@@ -79,26 +78,22 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
url1 = str(item.url)
if 'http://www.peliculashindu.com/' in url1:
url1 = url1.replace("http://www.peliculashindu.com/", "")
if url1 != 'estrenos':
data = scrapertools.find_single_match(data, '<div id="cuerpo"><div class="iz">.+>Otras')
# data= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">.+>Otras')
data_mov= scrapertools.find_single_match(data,'<div id="cuerpo"><div class="iz">(.+)<ul class="pag">')
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"' # scrapedurl, scrapedthumbnail, scrapedtitle
matches = scrapertools.find_multiple_matches(data, patron)
matches = scrapertools.find_multiple_matches(data_mov, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches: # scrapedthumbnail, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="findvideos",
show=scrapedtitle))
# Paginacion
patron_pag = '<a href="([^"]+)" title="Siguiente .+?">'
paginasig = scrapertools.find_single_match(data, patron_pag)
logger.info("algoooosadf "+paginasig)
next_page_url = item.url + paginasig
next_page_url = host + paginasig
if paginasig != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página siguiente", url=next_page_url,
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.org/4zppxf5j9/siguiente.png'))
return itemlist
@@ -114,10 +109,9 @@ def findvideos(item):
logger.info("holaa" + data)
patron_show = '<strong>Ver Pel.+?a([^<]+) online<\/strong>'
show = scrapertools.find_single_match(data, patron_show)
logger.info("holaa" + show)
for videoitem in itemlist:
videoitem.channel = item.channel
if config.get_videolibrary_support() and len(itemlist) > 0:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))

View File

@@ -10,13 +10,45 @@
"movie"
],
"settings": [
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}
}

View File

@@ -10,28 +10,47 @@ from core.item import Item
from platformcode import logger
host = "http://www.peliculasmx.net"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(
Item(channel=item.channel, title="Últimas añadidas", action="peliculas", url="http://www.peliculasmx.net/"))
Item(channel=item.channel, title="Últimas añadidas", action="peliculas", url=host))
itemlist.append(
Item(channel=item.channel, title="Últimas por género", action="generos", url="http://www.peliculasmx.net/"))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.peliculasmx.net/"))
Item(channel=item.channel, title="Últimas por género", action="generos", url=host))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host))
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host + '/category/animacion/'
elif categoria == 'terror':
item.url = host + '/category/terror/'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def generos(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
logger.debug(data)
# <li class="cat-item cat-item-3"><a href="http://peliculasmx.net/category/accion/" >Accion</a> <span>246</span>
patron = '<li class="cat-item cat-item-.*?'
patron += '<a href="([^"]+)".*?'
patron += '>([^<]+).*?'
@@ -92,7 +111,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
try:
# Series
item.url = "http://www.peliculasmx.net/?s=%s" % texto
item.url = host + "/?s=%s" % texto
itemlist.extend(peliculas(item))
itemlist = sorted(itemlist, key=lambda Item: Item.title)

View File

@@ -35,6 +35,22 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",

View File

@@ -80,6 +80,12 @@ def newest(categoria):
item.url = host
elif categoria == "terror":
item.url = host+"terror/"
elif categoria == 'castellano':
item.url = host + "?s=Español"
elif categoria == 'latino':
item.url = host + "?s=Latino"
item.from_newest = True
item.action = "entradas"
itemlist = entradas(item)

View File

@@ -9,5 +9,63 @@
"categories": [
"direct",
"movie"
]
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -148,3 +148,41 @@ def findvideos(item):
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'documentales':
item.url = host + "genero/documental/"
elif categoria == 'infantiles':
item.url = host + "genero/animacion-e-infantil/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
elif categoria == 'castellano':
item.url = host + "idioma/castellano/"
elif categoria == 'latino':
item.url = host + "idioma/latino/"
itemlist = peliculas(item)
if itemlist[-1].action == "peliculas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -10,5 +10,63 @@
"movie",
"direct",
"VOS"
]
],
"settings":[
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -164,3 +164,41 @@ def play(item):
logger.info()
item.thumbnail = item.extra
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'documentales':
item.url = host + "/genero/documental/"
elif categoria == 'infantiles':
item.url = host + "/genero/animacion/"
elif categoria == 'terror':
item.url = host + "/genero/terror/"
elif categoria == 'castellano':
item.url = host + "/idioma/espanol-castellano/"
elif categoria == 'latino':
item.url = host + "/idioma/espanol-latino/"
itemlist = agregadas(item)
if itemlist[-1].action == "agregadas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,30 +0,0 @@
{
"id": "pelisencasa",
"name": "PelisEnCasa",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s14.postimg.org/iqiq0bxn5/pelisencasa.png",
"banner": "https://s18.postimg.org/j775ehbg9/pelisencasa_banner.png",
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,217 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from lib import jsunpack
from platformcode import config, logger
host = 'http://pelisencasa.net'
tgenero = {"Comedia": "https://s7.postimg.org/ne9g9zgwb/comedia.png",
"Suspense": "https://s13.postimg.org/wmw6vl1cn/suspenso.png",
"Drama": "https://s16.postimg.org/94sia332d/drama.png",
"Acción": "https://s3.postimg.org/y6o9puflv/accion.png",
"Aventura": "https://s10.postimg.org/6su40czih/aventura.png",
"Romance": "https://s15.postimg.org/fb5j8cl63/romance.png",
"Animación": "https://s13.postimg.org/5on877l87/animacion.png",
"Ciencia ficción": "https://s9.postimg.org/diu70s7j3/cienciaficcion.png",
"Terror": "https://s7.postimg.org/yi0gij3gb/terror.png",
"Documental": "https://s16.postimg.org/7xjj4bmol/documental.png",
"Música": "https://s29.postimg.org/bbxmdh9c7/musical.png",
"Western": "https://s23.postimg.org/lzyfbjzhn/western.png",
"Fantasía": "https://s13.postimg.org/65ylohgvb/fantasia.png",
"Guerra": "https://s23.postimg.org/71itp9hcr/belica.png",
"Misterio": "https://s1.postimg.org/w7fdgf2vj/misterio.png",
"Crimen": "https://s4.postimg.org/6z27zhirx/crimen.png",
"Historia": "https://s15.postimg.org/fmc050h1n/historia.png",
"Familia": "https://s7.postimg.org/6s7vdhqrf/familiar.png"}
tletras = {'#': 'https://s32.postimg.org/drojt686d/image.png',
'a': 'https://s32.postimg.org/llp5ekfz9/image.png',
'b': 'https://s32.postimg.org/y1qgm1yp1/image.png',
'c': 'https://s32.postimg.org/vlon87gmd/image.png',
'd': 'https://s32.postimg.org/3zlvnix9h/image.png',
'e': 'https://s32.postimg.org/bgv32qmsl/image.png',
'f': 'https://s32.postimg.org/y6u7vq605/image.png',
'g': 'https://s32.postimg.org/9237ib6jp/image.png',
'h': 'https://s32.postimg.org/812yt6pk5/image.png',
'i': 'https://s32.postimg.org/6nbbxvqat/image.png',
'j': 'https://s32.postimg.org/axpztgvdx/image.png',
'k': 'https://s32.postimg.org/976yrzdut/image.png',
'l': 'https://s32.postimg.org/fmal2e9yd/image.png',
'm': 'https://s32.postimg.org/m19lz2go5/image.png',
'n': 'https://s32.postimg.org/b2ycgvs2t/image.png',
'o': 'https://s32.postimg.org/c6igsucpx/image.png',
'p': 'https://s32.postimg.org/jnro82291/image.png',
'q': 'https://s32.postimg.org/ve5lpfv1h/image.png',
'r': 'https://s32.postimg.org/nmovqvqw5/image.png',
's': 'https://s32.postimg.org/zd2t89jol/image.png',
't': 'https://s32.postimg.org/wk9lo8jc5/image.png',
'u': 'https://s32.postimg.org/w8s5bh2w5/image.png',
'v': 'https://s32.postimg.org/e7dlrey91/image.png',
'w': 'https://s32.postimg.org/fnp49k15x/image.png',
'x': 'https://s32.postimg.org/dkep1w1d1/image.png',
'y': 'https://s32.postimg.org/um7j3zg85/image.png',
'z': 'https://s32.postimg.org/jb4vfm9d1/image.png'}
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas", action="lista", thumbnail='https://s18.postimg.org/fwvaeo6qh/todas.png',
fanart='https://s18.postimg.org/fwvaeo6qh/todas.png', url=host))
itemlist.append(
item.clone(title="Generos", action="seccion", thumbnail='https://s3.postimg.org/5s9jg2wtf/generos.png',
fanart='https://s3.postimg.org/5s9jg2wtf/generos.png', url=host, extra='generos'))
itemlist.append(
item.clone(title="Alfabetico", action="seccion", thumbnail='https://s17.postimg.org/fwi1y99en/a-z.png',
fanart='https://s17.postimg.org/fwi1y99en/a-z.png', url=host, extra='letras'))
itemlist.append(item.clone(title="Buscar", action="search", url=host + '/?s=',
thumbnail='https://s30.postimg.org/pei7txpa9/buscar.png',
fanart='https://s30.postimg.org/pei7txpa9/buscar.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra != 'letras':
patron = '<li class="TPostMv">.*?<a href="(.*?)"><div class="Image">.*?src="(.*?)\?resize=.*?".*?class="Title">(.*?)<\/h2>.*?'
patron += '<span class="Year">(.*?)<\/span>.*?<span class="Qlty">(.*?)<\/span><\/p><div class="Description"><p>(.*?)<\/p>'
else:
patron = '<td class="MvTbImg"> <a href="(.*?)".*?src="(.*?)\?resize=.*?".*?<strong>(.*?)<\/strong> <\/a><\/td><td>(.*?)<\/td><td>.*?'
patron += 'class="Qlty">(.*?)<\/span><\/p><\/td><td>(.*?)<\/td><td>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad, scrapedplot in matches:
url = scrapedurl
thumbnail = scrapedthumbnail
plot = scrapedplot
contentTitle = scrapedtitle
title = contentTitle + ' (' + calidad + ')'
year = scrapedyear
fanart = ''
itemlist.append(
Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot,
fanart=fanart, contentTitle=contentTitle, infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="(.*?)">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="lista", title='Siguiente >>>', url=next_page,
thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png'))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if item.extra == 'generos':
patron = 'menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>'
else:
patron = '<li><a href="(.*?\/letter\/.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
if item.extra == 'generos' and scrapedtitle in tgenero:
thumbnail = tgenero[scrapedtitle]
elif scrapedtitle.lower() in tletras:
thumbnail = tletras[scrapedtitle.lower()]
fanart = ''
title = scrapedtitle
url = scrapedurl
itemlist.append(
Item(channel=item.channel, action="lista", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail,
fanart=fanart, extra=item.extra))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'class="Num">.*?<\/span>.*?href="(.*?)" class="Button STPb">.*?<\/a>.*?<span>(.*?)<\/span><\/td><td><span>(.*?)<\/span><\/td><td><span>.*?<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, servidor, idioma in matches:
new_item = (item.clone(url=scrapedurl, servidor=servidor, idioma=idioma, infoLabels=infoLabels))
itemlist += get_video_urls(new_item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
itemlist.insert(len(itemlist) - 1, item.clone(channel='trailertools', action='buscartrailer',
title='[COLOR orange]Trailer en Youtube[/COLOR]'))
return itemlist
def get_video_urls(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<script type="text\/javascript">(.*?)<\/script>')
data = jsunpack.unpack(data)
patron = '"file":"(.*?)","label":"(.*?)","type":"video.*?"}'
subtitle = scrapertools.find_single_match(data, 'tracks:\[{"file":"(.*?)","label":".*?","kind":"captions"}')
matches = re.compile(patron, re.DOTALL).findall(data)
for url, calidad in matches:
if item.servidor == 'PELISENCASA':
item.servidor = 'Directo'
title = item.contentTitle + ' (' + item.idioma + ')' + ' (' + calidad + ')' + ' (' + item.servidor + ')'
itemlist.append(item.clone(title=title, url=url, calidad=calidad, action='play', subtitle=subtitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + '/category/animacion/'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -27,6 +27,14 @@
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",

View File

@@ -74,16 +74,15 @@ def lista(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.seccion != 'actor':
patron = '<li class=item-serie.*?><a href=(.*?) title=(.*?)><img src=(.*?) alt=><span '
patron += 'class=s-title><strong>.*?<\/strong><p>(.*?)<\/p><\/span><\/a><\/li>'
patron = '(?s)<li class="item-serie.*?href="([^"]+).*?title="([^"]+).*?data-src="([^"]+).*?<span '
patron += 'class="s-title">.*?<p>([^<]+)'
else:
patron = '<li><a href=(\/pelicula\/.*?)><figure><img src=(.*?) alt=><\/figure><p class=title>(.*?)<\/p><p '
patron += 'class=year>(.*?)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '(?s)<li>.*?<a href="(/pelicula/[^"]+)".*?<figure>.*?data-src="([^"]+)".*?p class="title">([^<]+).*?'
patron += 'year">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedyear in matches:
url = host + scrapedurl
if item.seccion != 'actor':
@@ -109,11 +108,11 @@ def lista(item):
# Paginacion
if itemlist != []:
actual_page = scrapertools.find_single_match(data, '<a class=active item href=.*?>(.*?)<\/a>')
actual_page = scrapertools.find_single_match(data, '<a class="active item" href=".*?">(.*?)<\/a>')
if actual_page:
next_page_num = int(actual_page) + 1
next_page = scrapertools.find_single_match(data,
'<li><a class= item href=(.*?)\?page=.*?&limit=.*?>Siguiente')
'<li><a class=" item" href="(.*?)\?page=.*?&limit=.*?">Siguiente')
next_page_url = host + next_page + '?page=%s' % next_page_num
if next_page != '':
itemlist.append(Item(channel=item.channel,
@@ -129,15 +128,14 @@ def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if item.seccion == 'generos':
patron = '<a href=(\/peliculas\/[\D].*?\/) title=Películas de .*?>(.*?)<\/a>'
patron = '<a href="(\/peliculas\/[\D].*?\/)" title="Películas de .*?>(.*?)<\/a>'
elif item.seccion == 'anios':
patron = '<li class=.*?><a href=(.*?)>(\d{4})<\/a> <\/li>'
patron = '<li class=.*?><a href="(.*?)">(\d{4})<\/a> <\/li>'
elif item.seccion == 'actor':
patron = '<li><a href=(.*?)><div.*?<div class=photopurple title=(.*?)><\/div><img src=(.*?)><\/figure>'
matches = re.compile(patron, re.DOTALL).findall(data)
patron = '<li><a href="(.*?)".*?div.*?<div class="photopurple" title="(.*?)">.*?data-src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
if item.seccion != 'actor':
for scrapedurl, scrapedtitle in matches:
title = scrapedtitle.decode('utf-8')
@@ -158,7 +156,6 @@ def seccion(item):
))
else:
for scrapedurl, scrapedname, scrapedthumbnail in matches:
thumbnail = scrapedthumbnail
fanart = ''
title = scrapedname
url = host + scrapedurl
@@ -168,14 +165,14 @@ def seccion(item):
title=title,
fulltitle=item.title,
url=url,
thumbnail=thumbnail,
thumbnail=scrapedthumbnail,
fanart=fanart,
seccion=item.seccion
))
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<li><a class= item href=(.*?)&limit=.*?>Siguiente <')
next_page = scrapertools.find_single_match(data, '<li><a class=" item" href="(.*?)&limit=.*?>Siguiente <')
next_page_url = host + next_page
if next_page != '':
itemlist.append(item.clone(action="seccion",
@@ -240,10 +237,9 @@ def findvideos(item):
))
for videoitem in templist:
data = httptools.downloadpage(videoitem.url).data
urls_list = scrapertools.find_multiple_matches(data, '{"reorder":1,"type":.*?}')
urls_list = scrapertools.find_multiple_matches(data, 'var.*?_SOURCE\s+=\s+\[(.*?)\]')
for element in urls_list:
json_data=jsontools.load(element)
id = json_data['id']
sub = json_data['srt']
url = json_data['source']
@@ -253,7 +249,6 @@ def findvideos(item):
new_url = 'https://onevideo.tv/api/player?key=90503e3de26d45e455b55e9dc54f015b3d1d4150&link' \
'=%s&srt=%s' % (url, sub)
logger.debug('new_url: %s' % new_url)
data = httptools.downloadpage(new_url).data
data = re.sub(r'\\', "", data)
@@ -261,19 +256,19 @@ def findvideos(item):
for urls in video_list:
if urls.language == '':
urls.language = videoitem.language
urls.title = item.title + '(%s) (%s)' % (urls.language, urls.server)
urls.title = item.title + urls.language + '(%s)'
for video_url in video_list:
video_url.channel = item.channel
video_url.action = 'play'
video_url.quality = quality
video_url.server = ""
video_url.infoLabels = item.infoLabels
else:
server = servertools.get_server_from_url(url)
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality,
server=server))
video_list.append(item.clone(title=item.title, url=url, action='play', quality = quality
))
video_list = servertools.get_servers_itemlist(video_list, lambda i: i.title % i.server.capitalize())
tmdb.set_infoLabels(video_list)
if config.get_videolibrary_support() and len(video_list) > 0 and item.extra != 'findvideos':
video_list.append(
Item(channel=item.channel,
@@ -292,7 +287,7 @@ def newest(categoria):
item = Item()
# categoria='peliculas'
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host + '/estrenos/'
elif categoria == 'infantiles':
item.url = host + '/peliculas/animacion/'
@@ -309,3 +304,8 @@ def newest(categoria):
return []
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -215,18 +215,20 @@ def search(item, texto):
def findvideos(item):
logger.info()
itemlist = []
duplicados = []
data = get_source(item.url)
patron = '<div class=TPlayerTbCurrent id=(.*?)><iframe.*?src=(.*?) frameborder'
patron = '<div class=TPlayer.*?\s+id=(.*?)><iframe width=560 height=315 src=(.*?) frameborder=0'
matches = re.compile(patron, re.DOTALL).findall(data)
for opt, urls_page in matches:
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.*?'
logger.debug ('option: %s' % opt)
language = scrapertools.find_single_match (data,'data-TPlayerNv=%s><span>Opción <strong>.'
'<\/strong><\/span>.*?<span>(.*?)<\/span'%opt)
data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(data,'<button id="(.*?)"')
video_data = httptools.downloadpage(urls_page).data
servers = scrapertools.find_multiple_matches(video_data,'<button id="(.*?)"')
for server in servers:
quality = item.quality
info_urls = urls_page.replace('embed','get')
video_info=httptools.downloadpage(info_urls+'/'+server).data
video_info = jsontools.load(video_info)
@@ -238,8 +240,13 @@ def findvideos(item):
url = 'https://'+video_server+'/embed/'+video_id
else:
url = 'https://'+video_server+'/e/'+video_id
title = item.title
itemlist.append(item.clone(title=title, url=url, action='play', language=language))
title = item.contentTitle + ' [%s] [%s]'%(quality, language)
itemlist.append(item.clone(title=title,
url=url,
action='play',
language=language,
quality=quality
))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist

View File

@@ -28,6 +28,14 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -334,3 +334,29 @@ def findvideos(item):
servertools.get_servers_itemlist(itemlist)
return itemlist
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'torrent':
item.url = api + "?sort_by=''&page=0"
itemlist = pelis(item)
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
item.url = api_serie + "?sort_by=''&page=0"
itemlist.extend(series(item))
if itemlist[-1].title == ">> Página siguiente":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -49,6 +49,22 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -34,9 +34,14 @@ def newest(categoria):
if categoria == 'peliculas':
item.url = host
elif categoria == 'infantiles':
item.url = host + 'genero/infantil/'
item.url = host + '/genero/infantil/'
elif categoria == 'terror':
item.url = host + 'genero/terror/'
item.url = host + '/genero/terror/'
elif categoria == 'castellano':
item.url = host +'/lenguaje/castellano/'
elif categoria == 'latino':
item.url = host +'/lenguaje/latino/'
itemlist = peliculas(item)
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
@@ -126,6 +131,7 @@ def filtro(item):
for url, title in matches:
if "eroticas" in title and config.get_setting("adult_mode") == 0:
continue
logger.debug('la url: %s' %url)
itemlist.append(item.clone(action = "peliculas",
title = title.title(),
url = url

View File

@@ -61,6 +61,22 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -143,6 +143,11 @@ def newest(categoria):
item.url = host + "genero/animacion-e-infantil/"
elif categoria == 'terror':
item.url = host + "genero/terror/"
elif categoria == 'castellano':
item.url = host + "idioma/castellano/"
elif categoria == 'latino':
item.url = host + "idioma/latino/"
else:
return []

View File

@@ -11,28 +11,47 @@
"tvshow",
"documentary",
"direct"
],
"settings": [
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": false,
"visible": false
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino"
]
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - terror",
"default": true,
"enabled": true,
"visible": true
}
]
}
}

View File

@@ -145,7 +145,9 @@ def menuseries(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.url = host + 'busqueda/?s=' + texto
if not item.extra:
item.extra = 'peliculas/'
try:
if texto != '':
return lista(item)
@@ -176,7 +178,7 @@ def lista(item):
patron = '<img.*?width="147" heigh="197".*?src="([^"]+)".*?>.*?.<i class="icon online-play"><\/i>.*?.<h2 ' \
'class="title title-.*?">.*?.<a href="([^"]+)" title="([^"]+)">.*?>'
actual = scrapertools.find_single_match(data,
'<a href="http:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
'<a href="https:\/\/www.pelisplus.tv\/.*?\/pag-([^p]+)pag-2" '
'class="page bicon last"><<\/a>')
else:
patron = '<img data-original="([^"]+)".*?width="147" heigh="197".*?src=.*?>.*?\n<i class="icon ' \
@@ -217,7 +219,7 @@ def lista(item):
else:
item.extra = item.extra.rstrip('s/')
if item.extra in url:
itemlist.append(
new_item=(
Item(channel=item.channel,
contentType=tipo,
action=accion,
@@ -236,21 +238,12 @@ def lista(item):
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Encuentra los elementos que no tienen plot y carga las paginas correspondientes para obtenerlo#
for item in itemlist:
if item.infoLabels['plot'] == '':
data = httptools.downloadpage(item.url).data
item.fanart = scrapertools.find_single_match(data, 'meta property="og:image" content="([^"]+)" \/>')
item.plot = scrapertools.find_single_match(data,
'<span>Sinopsis:<\/span>.([^<]+)<span '
'class="text-detail-hide"><\/span>.<\/p>')
# Paginacion
if item.title != 'Buscar' and actual != '':
if itemlist != []:
next_page = str(int(actual) + 1)
next_page_url = host + item.extra + 'pag-' + next_page
next_page_url = item.extra + 'pag-' + next_page
if not next_page_url.startswith("http"):
next_page_url = host + next_page_url
itemlist.append(
Item(channel=item.channel,
action="lista",
@@ -428,14 +421,17 @@ def get_vip(url):
itemlist =[]
url= url.replace('reproductor','vip')
data = httptools.downloadpage(url).data
patron = '<a href="(.*?)"> '
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)">')
video_urls = scrapertools.find_multiple_matches(data,'<a href="(.*?)".*?>')
for item in video_urls:
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
data=httptools.downloadpage(new_url, follow_redirects=False).headers
itemlist.extend(servertools.find_video_items(data=str(data)))
if 'elreyxhd' in item:
if 'plus'in item:
id, tipo, lang= scrapertools.find_single_match(item,'plus\/(\d+)\/.*?=(\d+).*?=(.*)')
new_url = 'https://www.elreyxhd.com/pelisplus.php?id=%s&tipo=%s&idioma=%s' % (id, tipo, lang)
else:
id = scrapertools.find_single_match(item,'episodes\/(\d+)')
new_url = 'https://www.elreyxhd.com/samir.php?id=%s&tipo=capitulo&idioma=latino&x=&sv=' % id
data=httptools.downloadpage(new_url, follow_redirects=False).headers.get("location", "")
itemlist.append(Item(url=data))
return itemlist
@@ -455,22 +451,17 @@ def findvideos(item):
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
# videoitem.infoLabels = item.infoLabels
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
if videoitem.quality == '' or videoitem.language == '':
videoitem.quality = 'default'
videoitem.language = 'Latino'
if videoitem.server != '':
videoitem.thumbnail = item.thumbnail
else:
videoitem.thumbnail = item.thumbnail
videoitem.server = 'directo'
videoitem.action = 'play'
videoitem.fulltitle = item.title
if videoitem.extra != 'directo' and 'youtube' not in videoitem.url:
videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'
videoitem.title = item.contentTitle + ' (%s)'
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
n = 0
for videoitem in itemlist:
if 'youtube' in videoitem.url:
@@ -482,7 +473,7 @@ def findvideos(item):
itemlist.pop(1)
# Requerido para FilterTools
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
@@ -503,18 +494,26 @@ def findvideos(item):
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
item.extra = 'estrenos/'
try:
if categoria == 'peliculas':
if categoria in ['peliculas','latino']:
item.url = host + 'estrenos/pag-1'
elif categoria == 'infantiles':
item.url = host + 'peliculas/animacion/pag-1'
elif categoria == 'terror':
item.url = host + 'peliculas/terror/pag-1'
elif categoria == 'documentales':
item.url = host + 'documentales/pag-1'
item.extra = 'documentales/'
@@ -528,6 +527,5 @@ def newest(categoria):
logger.error("{0}".format(line))
return []
itemlist = filtertools.get_links(itemlist, item, list_language)
#itemlist = filtertools.get_links(itemlist, item, list_language)
return itemlist

View File

@@ -1,114 +0,0 @@
{
"id": "playmax",
"name": "PlayMax",
"language": ["cast", "lat"],
"active": true,
"adult": false,
"thumbnail": "playmax.png",
"banner": "playmax.png",
"categories": [
"movie",
"tvshow"
],
"settings": [
{
"id": "playmaxuser",
"type": "text",
"color": "0xFF25AA48",
"label": "@30014",
"enabled": true,
"visible": true
},
{
"id": "playmaxpassword",
"type": "text",
"color": "0xFF25AA48",
"hidden": true,
"label": "@30015",
"enabled": "!eq(-1,'')",
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en búsqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Películas",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_series",
"type": "bool",
"label": "Incluir en Novedades - Series",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "menu_info",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Mostrar menú intermedio película/episodio",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"Sin color",
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
},
{
"id": "last_page",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Ocultar opción elegir página en películas (Kodi)",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "order_web",
"type": "bool",
"color": "0xFFd50b0b",
"label": "Usar el mismo orden de los enlaces que la web",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,979 +0,0 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
sid = config.get_setting("sid_playmax", "playmax")
apikey = "0ea143087685e9e0a23f98ae"
__modo_grafico__ = config.get_setting('modo_grafico', 'playmax')
__perfil__ = config.get_setting('perfil', "playmax")
__menu_info__ = config.get_setting('menu_info', 'playmax')
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFF088A08'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFF088A08'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFF088A08']]
if __perfil__ - 1 >= 0:
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "https://playmax.mx"
def login():
logger.info()
try:
user = config.get_setting("playmaxuser", "playmax")
password = config.get_setting("playmaxpassword", "playmax")
if user == "" and password == "":
return False, "Para ver los enlaces de este canal es necesario registrarse en playmax.mx"
elif user == "" or password == "":
return False, "Usuario o contraseña en blanco. Revisa tus credenciales"
data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login").data
if re.search(r'(?i)class="hb_user_data" title="%s"' % user, data):
if not config.get_setting("sid_playmax", "playmax"):
sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
if not sid_:
sid_ = scrapertools.find_single_match(config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)')
config.set_setting("sid_playmax", sid_, "playmax")
return True, ""
confirm_id = scrapertools.find_single_match(data, 'name="confirm_id" value="([^"]+)"')
sid_log = scrapertools.find_single_match(data, 'name="sid" value="([^"]+)"')
post = "username=%s&password=%s&autologin=on&agreed=true&change_lang=0&confirm_id=%s&login=&sid=%s" \
"&redirect=index.php&login=Entrar" % (user, password, confirm_id, sid_log)
data = httptools.downloadpage("https://playmax.mx/ucp.php?mode=login", post=post).data
if "contraseña incorrecta" in data:
logger.error("Error en el login")
return False, "Contraseña errónea. Comprueba tus credenciales"
elif "nombre de usuario incorrecto" in data:
logger.error("Error en el login")
return False, "Nombre de usuario no válido. Comprueba tus credenciales"
else:
logger.info("Login correcto")
sid_ = scrapertools.find_single_match(data, 'sid=([^"]+)"')
if not sid_:
sid_ = scrapertools.find_single_match(config.get_cookie_data(), 'playmax.*?_sid\s*([A-z0-9]+)')
config.set_setting("sid_playmax", sid_, "playmax")
# En el primer logueo se activa la busqueda global y la seccion novedades
if not config.get_setting("primer_log", "playmax"):
config.set_setting("include_in_global_search", True, "playmax")
config.set_setting("include_in_newest_peliculas", True, "playmax")
config.set_setting("include_in_newest_series", True, "playmax")
config.set_setting("include_in_newest_infantiles", True, "playmax")
config.set_setting("primer_log", False, "playmax")
return True, ""
except:
import traceback
logger.error(traceback.format_exc())
return False, "Error en el login. Comprueba tus credenciales o si la web está operativa"
def mainlist(item):
logger.info()
itemlist = []
item.text_color = color1
logueado, error_message = login()
if not logueado:
config.set_setting("include_in_global_search", False, "playmax")
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False))
return itemlist
itemlist.append(item.clone(title="Películas", action="", text_color=color2))
item.contentType = "movie"
itemlist.append(
item.clone(title=" Novedades", action="fichas", url=host + "/catalogo.php?tipo[]=2&ad=2&ordenar="
"novedades&con_dis=on"))
itemlist.append(
item.clone(title=" Populares", action="fichas", url=host + "/catalogo.php?tipo[]=2&ad=2&ordenar="
"pop&con_dis=on"))
itemlist.append(item.clone(title=" Índices", action="indices"))
itemlist.append(item.clone(title="Series", action="", text_color=color2))
item.contentType = "tvshow"
itemlist.append(item.clone(title=" Nuevos capítulos", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&"
"ordenar=novedades&con_dis=on"))
itemlist.append(item.clone(title=" Nuevas series", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&"
"ordenar=año&con_dis=on"))
itemlist.append(item.clone(title=" Índices", action="indices"))
item.contentType = "movie"
itemlist.append(item.clone(title="Documentales", action="fichas", text_color=color2,
url=host + "/catalogo.php?&tipo[]=3&ad=2&ordenar=novedades&con_dis=on"))
itemlist.append(item.clone(title="Listas", action="listas", text_color=color2,
url=host + "/listas.php?apikey=%s&sid=%s&start=0" % (apikey, sid), extra="listas"))
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color2))
itemlist.append(item.clone(action="acciones_cuenta", title="Tus fichas", text_color=color4))
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s/buscar.php?apikey=%s&sid=%s&buscar=%s&modo=[fichas]&start=0" % (host, apikey, sid, texto)
try:
return busqueda(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def busqueda(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = xml2dict(data)
if type(data["Data"]["Fichas"]["Ficha"]) == dict:
searched_data = [data["Data"]["Fichas"]["Ficha"]]
else:
searched_data = data["Data"]["Fichas"]["Ficha"]
for f in searched_data:
f["Title"] = f["Title"].replace("<![CDATA[", "").replace("]]>", "")
title = "%s (%s)" % (f["Title"], f["Year"])
infolab = {'year': f["Year"]}
thumbnail = f["Poster"]
url = "%s/ficha.php?f=%s" % (host, f["Id"])
action = "findvideos"
if __menu_info__:
action = "menu_info"
if f["IsSerie"] == "1":
tipo = "tvshow"
show = f["Title"]
if not __menu_info__:
action = "episodios"
else:
tipo = "movie"
show = ""
itemlist.append(Item(channel=item.channel, action=action, title=title, url=url, text_color=color2,
contentTitle=f["Title"], show=show, contentType=tipo, infoLabels=infolab,
thumbnail=thumbnail))
if __modo_grafico__:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
total = int(data["Data"]["totalResultsFichas"])
actualpage = int(scrapertools.find_single_match(item.url, "start=(\d+)"))
if actualpage + 20 < total:
next_page = item.url.replace("start=%s" % actualpage, "start=%s" % (actualpage + 20))
itemlist.append(Item(channel=item.channel, action="busqueda", title=">> Página Siguiente",
url=next_page, thumbnail=item.thumbnail))
return itemlist
def configuracion(item):
from platformcode import platformtools
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'series':
item.channel = "playmax"
item.extra = "newest"
item.url = host + "/catalogo.php?tipo[]=1&ad=2&ordenar=novedades&con_dis=on"
item.contentType = "tvshow"
itemlist = fichas(item)
if itemlist[-1].action == "fichas":
itemlist.pop()
elif categoria == 'peliculas':
item.channel = "playmax"
item.extra = "newest"
item.url = host + "/catalogo.php?tipo[]=2&ad=2&ordenar=novedades&con_dis=on"
item.contentType = "movie"
itemlist = fichas(item)
if itemlist[-1].action == "fichas":
itemlist.pop()
elif categoria == 'infantiles':
item.channel = "playmax"
item.extra = "newest"
item.url = host + "/catalogo.php?tipo[]=2&generos[]=60&ad=2&ordenar=novedades&con_dis=on"
item.contentType = "movie"
itemlist = fichas(item)
if itemlist[-1].action == "fichas":
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
def indices(item):
logger.info()
itemlist = []
tipo = "2"
if item.contentType == "tvshow":
tipo = "1"
if "Índices" in item.title:
if item.contentType == "tvshow":
itemlist.append(item.clone(title="Populares", action="fichas", url=host + "/catalogo.php?tipo[]=1&ad=2&"
"ordenar=pop&con_dis=on"))
itemlist.append(item.clone(title="Más vistas", action="fichas", url=host + "/catalogo.php?tipo[]=%s&ad=2&"
"ordenar=siempre&con_dis=on" % tipo))
itemlist.append(item.clone(title="Mejor valoradas", action="fichas", url=host + "/catalogo.php?tipo[]=%s&ad=2&"
"ordenar=valoracion&con_dis=on" % tipo))
itemlist.append(item.clone(title="Géneros", url=host + "/catalogo.php"))
itemlist.append(item.clone(title="Idiomas", url=host + "/catalogo.php"))
if item.contentType == "movie":
itemlist.append(item.clone(title="Por calidad", url=host + "/catalogo.php"))
itemlist.append(item.clone(title="Por año"))
itemlist.append(item.clone(title="Por país", url=host + "/catalogo.php"))
return itemlist
if "Géneros" in item.title:
data = httptools.downloadpage(item.url).data
patron = '<div class="sel gen" value="([^"]+)">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for value, genero in matches:
url = item.url + "?tipo[]=%s&generos[]=%s&ad=2&ordenar=novedades&con_dis=on" % (tipo, value)
itemlist.append(item.clone(action="fichas", title=genero, url=url))
elif "Idiomas" in item.title:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'oname="Idioma">Cualquier(.*?)<input')
patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for value, idioma in matches:
url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_idioma=%s" % (tipo, value)
itemlist.append(item.clone(action="fichas", title=idioma, url=url))
elif "calidad" in item.title:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'oname="Calidad">Cualquier(.*?)<input')
patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for value, calidad in matches:
url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&e_calidad=%s" % (tipo, value)
itemlist.append(item.clone(action="fichas", title=calidad, url=url))
elif "país" in item.title:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'oname="País">Todos(.*?)<input')
patron = '<div class="sel" value="([^"]+)">([^<]+)</div>'
matches = scrapertools.find_multiple_matches(bloque, patron)
for value, pais in matches:
url = item.url + "?tipo[]=%s&ad=2&ordenar=novedades&con_dis=on&pais=%s" % (tipo, value)
itemlist.append(item.clone(action="fichas", title=pais, url=url))
else:
from datetime import datetime
year = datetime.now().year
for i in range(year, 1899, -1):
url = "%s/catalogo.php?tipo[]=%s&del=%s&al=%s&año=personal&ad=2&ordenar=novedades&con_dis=on" \
% (host, tipo, i, i)
itemlist.append(item.clone(action="fichas", title=str(i), url=url))
return itemlist
def fichas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
fichas_marca = {'1': 'Siguiendo', '2': 'Pendiente', '3': 'Favorita', '4': 'Vista', '5': 'Abandonada'}
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src-data="([^"]+)".*?' \
'<div class="c_fichas_data".*?marked="([^"]*)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, marca, serie, episodio, scrapedtitle in matches:
tipo = "movie"
scrapedurl = host + scrapedurl.rsplit("-dc=")[0]
if not "-dc=" in scrapedurl:
scrapedurl += "-dc="
action = "findvideos"
if __menu_info__:
action = "menu_info"
if serie:
tipo = "tvshow"
if episodio:
title = "%s - %s" % (episodio.replace("X", "x"), scrapedtitle)
else:
title = scrapedtitle
if marca:
title += " [COLOR %s][%s][/COLOR]" % (color4, fichas_marca[marca])
new_item = Item(channel=item.channel, action=action, title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, contentType=tipo,
text_color=color2)
if new_item.contentType == "tvshow":
new_item.show = scrapedtitle
if not __menu_info__:
new_item.action = "episodios"
itemlist.append(new_item)
if itemlist and (item.extra == "listas_plus" or item.extra == "sigo"):
follow = scrapertools.find_single_match(data, '<div onclick="seguir_lista.*?>(.*?)<')
title = "Seguir Lista"
if follow == "Siguiendo":
title = "Dejar de seguir lista"
item.extra = ""
url = host + "/data.php?mode=seguir_lista&apikey=%s&sid=%s&lista=%s" % (
apikey, sid, item.url.rsplit("/l", 1)[1])
itemlist.insert(0, item.clone(action="acciones_cuenta", title=title, url=url, text_color=color4,
lista=item.title, folder=False))
next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="next"')
if next_page:
next_page = host + next_page.replace("&amp;", "&")
itemlist.append(Item(channel=item.channel, action="fichas", title=">> Página Siguiente", url=next_page))
try:
total = int(scrapertools.find_single_match(data, '<span class="page-dots">.*href.*?>(\d+)'))
except:
total = 0
if not config.get_setting("last_page", item.channel) and config.is_xbmc() and total > 2 \
and item.extra != "newest":
itemlist.append(item.clone(action="select_page", title="Ir a página... (Total:%s)" % total, url=next_page,
text_color=color5))
return itemlist
def episodios(item):
logger.info()
itemlist = []
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
if not item.infoLabels["tmdb_id"]:
item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data,
'<a href="https://www.themoviedb.org/[^/]+/(\d+)')
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
if not item.infoLabels["genre"]:
item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data,
'<a itemprop="genre"[^>]+>([^<]+)</a>'))
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
dc = scrapertools.find_single_match(data, "var dc_ic = '\?dc=([^']+)'")
patron = '<div class="f_cl_l_c f_cl_l_c_id[^"]+" c_id="([^"]+)" .*?c_num="([^"]+)" c_name="([^"]+)"' \
'.*?load_f_links\(\d+\s*,\s*(\d+).*?<div class="([^"]+)" onclick="marcar_capitulo'
matches = scrapertools.find_multiple_matches(data, patron)
lista_epis = []
for c_id, episodio, title, ficha, status in matches:
episodio = episodio.replace("X", "x")
if episodio in lista_epis:
continue
lista_epis.append(episodio)
url = "https://playmax.mx/c_enlaces_n.php?ficha=%s&c_id=%s&dc=%s" % (ficha, c_id, dc)
title = "%s - %s" % (episodio, title)
if "_mc a" in status:
title = "[COLOR %s]%s[/COLOR] %s" % (color5, u"\u0474".encode('utf-8'), title)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
fanart=item.fanart, show=item.show, infoLabels=item.infoLabels, text_color=color2,
referer=item.url, contentType="episode")
try:
new_item.infoLabels["season"], new_item.infoLabels["episode"] = episodio.split('x', 1)
except:
pass
itemlist.append(new_item)
itemlist.sort(key=lambda it: (it.infoLabels["season"], it.infoLabels["episode"]), reverse=True)
if __modo_grafico__:
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
library_path = config.get_videolibrary_path()
if config.get_videolibrary_support() and not item.extra:
title = "Añadir serie a la videoteca"
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
from core import filetools
path = filetools.join(library_path, "SERIES")
files = filetools.walk(path)
for dirpath, dirname, filename in files:
if item.infoLabels["imdb_id"] in dirpath:
for f in filename:
if f != "tvshow.nfo":
continue
from core import videolibrarytools
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, dirname, f))
canales = it.library_urls.keys()
canales.sort()
if "playmax" in canales:
canales.pop(canales.index("playmax"))
canales.insert(0, "[COLOR red]playmax[/COLOR]")
title = "Serie ya en tu videoteca. [%s] ¿Añadir?" % ",".join(canales)
break
except:
import traceback
logger.error(traceback.format_exc())
pass
itemlist.append(item.clone(action="add_serie_to_library", title=title, text_color=color5,
extra="episodios###library"))
if itemlist and not __menu_info__:
ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
itemlist.extend(acciones_fichas(item, sid, ficha))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
if item.contentType == "movie":
# Descarga la página
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
if not item.infoLabels["tmdb_id"]:
item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/'
'[^/]+/(\d+)')
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
if __modo_grafico__:
tmdb.set_infoLabels_item(item, __modo_grafico__)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
if not item.infoLabels["genre"]:
item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data, '<a itemprop="genre"[^>]+>'
'([^<]+)</a>'))
ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
if not ficha:
ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
cid = "0"
else:
ficha, cid = scrapertools.find_single_match(item.url, 'ficha=(\d+)&c_id=(\d+)')
url = "https://playmax.mx/c_enlaces_n.php?apikey=%s&sid=%s&ficha=%s&cid=%s" % (apikey, sid, ficha, cid)
data = httptools.downloadpage(url).data
data = xml2dict(data)
for k, v in data["Data"].items():
try:
if type(v) is dict:
if k == "Online":
order = 1
elif k == "Download":
order = 0
else:
order = 2
itemlist.append(item.clone(action="", title=k, text_color=color3, order=order))
if type(v["Item"]) is str:
continue
elif type(v["Item"]) is dict:
v["Item"] = [v["Item"]]
for it in v["Item"]:
try:
thumbnail = "%s/styles/prosilver/imageset/%s.png" % (host, it['Host'])
title = " %s - %s/%s" % (it['Host'].capitalize(), it['Quality'], it['Lang'])
calidad = int(scrapertools.find_single_match(it['Quality'], '(\d+)p'))
calidadaudio = it['QualityA'].replace("...", "")
subtitulos = it['Subtitles'].replace("Sin subtítulos", "")
if subtitulos:
title += " (%s)" % subtitulos
if calidadaudio:
title += " [Audio:%s]" % calidadaudio
likes = 0
if it["Likes"] != "0" or it["Dislikes"] != "0":
likes = int(it["Likes"]) - int(it["Dislikes"])
title += " (%s ok, %s ko)" % (it["Likes"], it["Dislikes"])
if type(it["Url"]) is dict:
for i, enlace in enumerate(it["Url"]["Item"]):
titulo = title + " (Parte %s)" % (i + 1)
itemlist.append(item.clone(title=titulo, url=enlace, action="play", calidad=calidad,
thumbnail=thumbnail, order=order, like=likes, ficha=ficha,
cid=cid, folder=False))
else:
url = it["Url"]
itemlist.append(item.clone(title=title, url=url, action="play", calidad=calidad,
thumbnail=thumbnail, order=order, like=likes, ficha=ficha,
cid=cid, folder=False))
except:
pass
except:
pass
if not config.get_setting("order_web", "playmax"):
itemlist.sort(key=lambda it: (it.order, it.calidad, it.like), reverse=True)
else:
itemlist.sort(key=lambda it: it.order, reverse=True)
if itemlist:
itemlist.extend(acciones_fichas(item, sid, ficha))
if not itemlist and item.contentType != "movie":
url = url.replace("apikey=%s&" % apikey, "")
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
patron = '<div id="f_fde_c"[^>]+>(.*?update_fecha\(\d+\)">)</div>'
estrenos = scrapertools.find_multiple_matches(data, patron)
for info in estrenos:
info = "Estreno en " + scrapertools.htmlclean(info)
itemlist.append(item.clone(action="", title=info))
if not itemlist:
itemlist.append(item.clone(action="", title="No hay enlaces disponibles"))
return itemlist
def menu_info(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
item.infoLabels["tmdb_id"] = scrapertools.find_single_match(data, '<a href="https://www.themoviedb.org/[^/]+/(\d+)')
item.infoLabels["year"] = scrapertools.find_single_match(data, 'class="e_new">(\d{4})')
item.infoLabels["plot"] = scrapertools.find_single_match(data, 'itemprop="description">([^<]+)</div>')
item.infoLabels["genre"] = ", ".join(scrapertools.find_multiple_matches(data,
'<a itemprop="genre"[^>]+>([^<]+)</a>'))
if __modo_grafico__:
tmdb.set_infoLabels_item(item, __modo_grafico__)
action = "findvideos"
title = "Ver enlaces"
if item.contentType == "tvshow":
action = "episodios"
title = "Ver capítulos"
itemlist.append(item.clone(action=action, title=title))
carpeta = "CINE"
tipo = "película"
action = "add_pelicula_to_library"
extra = ""
if item.contentType == "tvshow":
carpeta = "SERIES"
tipo = "serie"
action = "add_serie_to_library"
extra = "episodios###library"
library_path = config.get_videolibrary_path()
if config.get_videolibrary_support():
title = "Añadir %s a la videoteca" % tipo
if item.infoLabels["imdb_id"] and not library_path.lower().startswith("smb://"):
try:
from core import filetools
path = filetools.join(library_path, carpeta)
files = filetools.walk(path)
for dirpath, dirname, filename in files:
if item.infoLabels["imdb_id"] in dirpath:
namedir = dirpath.replace(path, '')[1:]
for f in filename:
if f != namedir + ".nfo" and f != "tvshow.nfo":
continue
from core import videolibrarytools
head_nfo, it = videolibrarytools.read_nfo(filetools.join(dirpath, f))
canales = it.library_urls.keys()
canales.sort()
if "playmax" in canales:
canales.pop(canales.index("playmax"))
canales.insert(0, "[COLOR red]playmax[/COLOR]")
title = "%s ya en tu videoteca. [%s] ¿Añadir?" % (tipo.capitalize(), ",".join(canales))
break
except:
import traceback
logger.error(traceback.format_exc())
pass
itemlist.append(item.clone(action=action, title=title, text_color=color5, extra=extra))
token_auth = config.get_setting("token_trakt", "tvmoviedb")
if token_auth and item.infoLabels["tmdb_id"]:
extra = "movie"
if item.contentType != "movie":
extra = "tv"
itemlist.append(item.clone(channel="tvmoviedb", title="[Trakt] Gestionar con tu cuenta", action="menu_trakt",
extra=extra))
itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
text_color="magenta", context=""))
itemlist.append(item.clone(action="", title=""))
ficha = scrapertools.find_single_match(item.url, '-f(\d+)-')
if not ficha:
ficha = scrapertools.find_single_match(item.url, 'f=(\d+)')
itemlist.extend(acciones_fichas(item, sid, ficha, season=True))
itemlist.append(item.clone(action="acciones_cuenta", title="Añadir a una lista", text_color=color3, ficha=ficha))
return itemlist
def acciones_fichas(item, sid, ficha, season=False):
marcarlist = []
new_item = item.clone()
new_item.infoLabels.pop("duration", None)
estados = [{'following': 'seguir'}, {'favorite': 'favorita'}, {'view': 'vista'}, {'slope': 'pendiente'}]
url = "https://playmax.mx/ficha.php?apikey=%s&sid=%s&f=%s" % (apikey, sid, ficha)
data = httptools.downloadpage(url).data
data = xml2dict(data)
try:
marked = data["Data"]["User"]["Marked"]
if new_item.contentType == "episode":
for epi in data["Data"]["Episodes"]["Season_%s" % new_item.infoLabels["season"]]["Item"]:
if int(epi["Episode"]) == new_item.infoLabels["episode"]:
epi_marked = epi["EpisodeViewed"].replace("yes", "ya")
epi_id = epi["Id"]
marcarlist.append(new_item.clone(action="marcar", title="Capítulo %s visto. ¿Cambiar?" % epi_marked,
text_color=color3, epi_id=epi_id))
break
except:
pass
try:
tipo = new_item.contentType.replace("movie", "Película").replace("episode", "Serie").replace("tvshow", "Serie")
for status in estados:
for k, v in status.items():
if k != marked:
title = "Marcar %s como %s" % (tipo.lower(), v)
action = "marcar"
else:
title = "%s marcada como %s" % (tipo, v)
action = ""
if k == "following" and tipo == "Película":
continue
elif k == "following" and tipo == "Serie":
title = title.replace("seguir", "seguida")
if k != marked:
title = "Seguir serie"
action = "marcar"
marcarlist.insert(1, new_item.clone(action=action, title=title, text_color=color4, ficha=ficha,
folder=False))
continue
marcarlist.append(new_item.clone(action="marcar", title=title, text_color=color3, ficha=ficha,
folder=False))
except:
pass
try:
if season and item.contentType == "tvshow":
seasonlist = []
for k, v in data["Data"]["Episodes"].items():
vistos = False
season = k.rsplit("_", 1)[1]
if type(v) is str:
continue
elif type(v["Item"]) is not list:
v["Item"] = [v["Item"]]
for epi in v["Item"]:
if epi["EpisodeViewed"] == "no":
vistos = True
seasonlist.append(
new_item.clone(action="marcar", title="Marcar temporada %s como vista" % season,
text_color=color1, season=int(season), ficha=ficha, folder=False))
break
if not vistos:
seasonlist.append(
new_item.clone(action="marcar", title="Temporada %s ya vista. ¿Revertir?" % season,
text_color=color1, season=int(season), ficha=ficha, folder=False))
seasonlist.sort(key=lambda it: it.season, reverse=True)
marcarlist.extend(seasonlist)
except:
pass
return marcarlist
def acciones_cuenta(item):
logger.info()
itemlist = []
if "Tus fichas" in item.title:
itemlist.append(item.clone(title="Capítulos", url="tf_block_c a", contentType="tvshow"))
itemlist.append(item.clone(title="Series", url="tf_block_s", contentType="tvshow"))
itemlist.append(item.clone(title="Películas", url="tf_block_p"))
itemlist.append(item.clone(title="Documentales", url="tf_block_d"))
return itemlist
elif "Añadir a una lista" in item.title:
data = httptools.downloadpage(host + "/c_listas.php?apikey=%s&sid=%s" % (apikey, sid)).data
data = xml2dict(data)
itemlist.append(item.clone(title="Crear nueva lista", folder=False))
if data["Data"]["TusListas"] != "\t":
import random
data = data["Data"]["TusListas"]["Item"]
if type(data) is not list:
data = [data]
for child in data:
image = ""
title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
images = []
for i in range(1, 5):
if "sinimagen.png" not in child["Poster%s" % i]:
images.append(child["Poster%s" % i].replace("/100/", "/400/"))
if images:
image = images[random.randint(0, len(images) - 1)]
url = host + "/data.php?mode=add_listas&apikey=%s&sid=%s&ficha_id=%s" % (apikey, sid, item.ficha)
post = "lista_id[]=%s" % child["Id"]
itemlist.append(item.clone(title=title, url=url, post=post, thumbnail=image, folder=False))
return itemlist
elif "Crear nueva lista" in item.title:
from platformcode import platformtools
nombre = platformtools.dialog_input("", "Introduce un nombre para la lista")
if nombre:
dict_priv = {0: 'Pública', 1: 'Privada'}
priv = platformtools.dialog_select("Privacidad de la lista", ['Pública', 'Privada'])
if priv != -1:
url = host + "/data.php?mode=create_list&apikey=%s&sid=%s" % (apikey, sid)
post = "name=%s&private=%s" % (nombre, priv)
data = httptools.downloadpage(url, post)
platformtools.dialog_notification("Lista creada correctamente",
"Nombre: %s - %s" % (nombre, dict_priv[priv]))
platformtools.itemlist_refresh()
return
elif re.search(r"(?i)Seguir Lista", item.title):
from platformcode import platformtools
data = httptools.downloadpage(item.url)
platformtools.dialog_notification("Operación realizada con éxito", "Lista: %s" % item.lista)
return
elif item.post:
from platformcode import platformtools
data = httptools.downloadpage(item.url, item.post).data
platformtools.dialog_notification("Ficha añadida a la lista", "Lista: %s" % item.title)
platformtools.itemlist_refresh()
return
data = httptools.downloadpage("https://playmax.mx/tusfichas.php").data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", data)
bloque = scrapertools.find_single_match(data, item.url + '">(.*?)(?:<div class="tf_blocks|<div class="tf_o_move">)')
matches = scrapertools.find_multiple_matches(bloque, '<div class="tf_menu_mini">([^<]+)<(.*?)<cb></cb></div>')
for category, contenido in matches:
itemlist.append(item.clone(action="", title=category, text_color=color3))
patron = '<div class="c_fichas_image"[^>]*>[^<]*<[^>]+href="\.([^"]+)".*?src="([^"]+)".*?serie="([^"]*)".*?' \
'<div class="c_fichas_title">(?:<div class="c_fichas_episode">([^<]+)</div>|)([^<]+)</div>'
entradas = scrapertools.find_multiple_matches(contenido, patron)
for scrapedurl, scrapedthumbnail, serie, episodio, scrapedtitle in entradas:
tipo = "movie"
scrapedurl = host + scrapedurl
scrapedthumbnail = host + scrapedthumbnail
action = "findvideos"
if __menu_info__:
action = "menu_info"
if serie:
tipo = "tvshow"
if episodio:
title = " %s - %s" % (episodio.replace("X", "x"), scrapedtitle)
else:
title = " " + scrapedtitle
new_item = Item(channel=item.channel, action=action, title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, contentType=tipo,
text_color=color2)
if new_item.contentType == "tvshow":
new_item.show = scrapedtitle
if not __menu_info__:
new_item.action = "episodios"
itemlist.append(new_item)
return itemlist
def marcar(item):
logger.info()
if "Capítulo" in item.title:
url = "%s/data.php?mode=capitulo_visto&apikey=%s&sid=%s&c_id=%s" % (host, apikey, sid, item.epi_id)
message = item.title.replace("no", "marcado como").replace("ya", "cambiado a no").replace(" ¿Cambiar?", "")
elif "temporada" in item.title.lower():
type_marcado = "1"
if "como vista" in item.title:
message = "Temporada %s marcada como vista" % item.season
else:
type_marcado = "2"
message = "Temporada %s marcada como no vista" % item.season
url = "%s/data.php?mode=temporada_vista&apikey=%s&sid=%s&ficha=%s&t_id=%s&type=%s" \
% (host, apikey, sid, item.ficha, item.season, type_marcado)
else:
message = item.title.replace("Marcar ", "Marcada ").replace("Seguir serie", "Serie en seguimiento")
if "favorita" in item.title:
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
% (host, apikey, sid, item.ficha, "3")
elif "pendiente" in item.title:
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
% (host, apikey, sid, item.ficha, "2")
elif "vista" in item.title:
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
% (host, apikey, sid, item.ficha, "4")
elif "Seguir" in item.title:
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
% (host, apikey, sid, item.ficha, "2")
data = httptools.downloadpage(url)
url = "%s/data.php?mode=marcar_ficha&apikey=%s&sid=%s&ficha=%s&tipo=%s" \
% (host, apikey, sid, item.ficha, "1")
data = httptools.downloadpage(url)
if data.sucess and config.get_platform() != "plex" and item.action != "play":
from platformcode import platformtools
platformtools.dialog_notification("Acción correcta", message)
def listas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = xml2dict(data)
if item.extra == "listas":
itemlist.append(Item(channel=item.channel, title="Listas más seguidas", action="listas", text_color=color1,
url=item.url + "&orden=1", extra="listas_plus"))
itemlist.append(Item(channel=item.channel, title="Listas con más fichas", action="listas", text_color=color1,
url=item.url + "&orden=2", extra="listas_plus"))
itemlist.append(Item(channel=item.channel, title="Listas aleatorias", action="listas", text_color=color1,
url=item.url + "&orden=3", extra="listas_plus"))
if data["Data"]["ListasSiguiendo"] != "\t":
itemlist.append(Item(channel=item.channel, title="Listas que sigo", action="listas", text_color=color1,
url=item.url, extra="sigo"))
if data["Data"]["TusListas"] != "\t":
itemlist.append(Item(channel=item.channel, title="Mis listas", action="listas", text_color=color1,
url=item.url, extra="mislistas"))
return itemlist
elif item.extra == "sigo":
data = data["Data"]["ListasSiguiendo"]["Item"]
elif item.extra == "mislistas":
data = data["Data"]["TusListas"]["Item"]
else:
data = data["Data"]["Listas"]["Item"]
if type(data) is not list:
data = [data]
import random
for child in data:
image = ""
title = "%s (%s fichas)" % (child["Title"], child["FichasInList"])
images = []
for i in range(1, 5):
if "sinimagen.png" not in child["Poster%s" % i]:
images.append(child["Poster%s" % i].replace("/100/", "/400/"))
if images:
image = images[random.randint(0, len(images) - 1)]
url = host + "/l%s" % child["Id"]
itemlist.append(Item(channel=item.channel, action="fichas", url=url, text_color=color3,
thumbnail=image, title=title, extra=item.extra))
if len(itemlist) == 20:
start = scrapertools.find_single_match(item.url, 'start=(\d+)')
end = int(start) + 20
url = re.sub(r'start=%s' % start, 'start=%s' % end, item.url)
itemlist.append(item.clone(title=">> Página Siguiente", url=url))
return itemlist
def play(item):
logger.info()
from core import servertools
devuelve = servertools.findvideos(item.url, True)
if devuelve:
item.url = devuelve[0][1]
item.server = devuelve[0][2]
if config.get_setting("mark_play", "playmax"):
if item.contentType == "movie":
marcar(item.clone(title="marcar como vista"))
else:
marcar(item.clone(title="Capítulo", epi_id=item.cid))
return [item]
def select_page(item):
import xbmcgui
dialog = xbmcgui.Dialog()
number = dialog.numeric(0, "Introduce el número de página")
if number != "":
number = int(number) * 60
item.url = re.sub(r'start=(\d+)', "start=%s" % number, item.url)
return fichas(item)
def xml2dict(xmldata):
"""
Lee un fichero o texto XML y retorna un diccionario json
Parametros:
file (str) -- Ruta completa al archivo XML que se desea convertir en JSON.
xmldata (str) -- Texto XML que se desea convertir en JSON.
Retorna:
Un diccionario construido a partir de los campos del XML.
"""
import sys
parse = globals().get(sys._getframe().f_code.co_name)
matches = re.compile("<(?P<tag>[^>]+)>[\n]*[\s]*[\t]*(?P<value>.*?)[\n]*[\s]*[\t]*<\/(?P=tag)\s*>",
re.DOTALL).findall(xmldata)
return_dict = {}
for tag, value in matches:
# Si tiene elementos
if "<" and "</" in value:
if tag in return_dict:
if type(return_dict[tag]) == list:
return_dict[tag].append(parse(value))
else:
return_dict[tag] = [return_dict[tag]]
return_dict[tag].append(parse(value))
else:
return_dict[tag] = parse(value)
else:
if tag in return_dict:
if type(return_dict[tag]) == list:
return_dict[tag].append(value)
else:
return_dict[tag] = [return_dict[tag]]
return_dict[tag].append(value)
else:
if value in ["true", "false"]:
if value == "true":
value = True
else:
value = False
return_dict[tag] = value
return return_dict

View File

@@ -25,7 +25,6 @@ color1, color2, color3 = ['0xFFB10021', '0xFFB10021', '0xFFB10004']
def login():
url_origen = "https://www.plusdede.com/login?popup=1"
data = httptools.downloadpage(url_origen, follow_redirects=True).data
logger.debug("dataPLUSDEDE=" + data)
if re.search(r'(?i)%s' % config.get_setting("plusdedeuser", "plusdede"), data):
return True
@@ -34,12 +33,10 @@ def login():
post = "_token=" + str(token) + "&email=" + str(
config.get_setting("plusdedeuser", "plusdede")) + "&password=" + str(
config.get_setting("plusdedepassword", "plusdede")) + "&app=2131296469"
# logger.debug("dataPLUSDEDE_POST="+post)
url = "https://www.plusdede.com/"
headers = {"Referer": url, "X-Requested-With": "XMLHttpRequest", "X-CSRF-TOKEN": token}
data = httptools.downloadpage("https://www.plusdede.com/login", post=post, headers=headers,
replace_headers=False).data
logger.debug("PLUSDEDE_DATA=" + data)
if "redirect" in data:
return True
else:
@@ -183,7 +180,6 @@ def generos(item):
tipo = item.url.replace("https://www.plusdede.com/", "")
# Descarga la pagina
data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas)
data = scrapertools.find_single_match(data,
@@ -198,7 +194,6 @@ def generos(item):
plot = ""
# https://www.plusdede.com/pelis?genre_id=1
url = "https://www.plusdede.com/" + tipo + "?genre_id=" + id_genere
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist.append(
Item(channel=item.channel, action="peliculas", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title))
@@ -229,11 +224,9 @@ def buscar(item):
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
logger.debug("content=" + json_object["content"])
data = json_object["content"]
return parse_mixed_results(item, data)
@@ -248,7 +241,6 @@ def parse_mixed_results(item, data):
patron += '.*?<div class="year">([^<]+)</div>+'
patron += '.*?<div class="value"><i class="fa fa-star"></i> ([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
logger.debug("PARSE_DATA:" + data)
if item.tipo == "lista":
following = scrapertools.find_single_match(data, '<div class="follow-lista-buttons ([^"]+)">')
data_id = scrapertools.find_single_match(data, 'data-model="10" data-id="([^"]+)">')
@@ -286,7 +278,6 @@ def parse_mixed_results(item, data):
sectionStr = "docu"
referer = urlparse.urljoin(item.url, scrapedurl)
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("PELII_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "series":
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, fanart=fanart,
@@ -294,7 +285,6 @@ def parse_mixed_results(item, data):
else:
referer = item.url
url = urlparse.urljoin(item.url, scrapedurl)
logger.debug("SERIE_title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if item.tipo != "pelis":
itemlist.append(Item(channel=item.channel, action="episodios", title=title, extra=referer, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=fulltitle, show=title, fanart=fanart,
@@ -304,7 +294,6 @@ def parse_mixed_results(item, data):
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)">')
if next_page != "":
url = urlparse.urljoin("https://www.plusdede.com", next_page).replace("amp;", "")
logger.debug("URL_SIGUIENTE:" + url)
itemlist.append(
Item(channel=item.channel, action="pag_sig", token=item.token, title=">> Página siguiente",
extra=item.extra, url=url))
@@ -323,7 +312,6 @@ def siguientes(item): # No utilizada
# Descarga la pagina
data = httptools.downloadpage(item.url).data
logger.debug("data=" + data)
# Extrae las entradas (carpetas)
bloque = scrapertools.find_single_match(data, '<h2>Siguiendo</h2>(.*?)<div class="box">')
@@ -358,7 +346,6 @@ def siguientes(item): # No utilizada
Item(channel=item.channel, action="episodio", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, show=title, fanart=fanart, extra=session + "|" + episode))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
return itemlist
@@ -369,7 +356,6 @@ def episodio(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# logger.debug("data="+data)
session = str(int(item.extra.split("|")[0]))
episode = str(int(item.extra.split("|")[1]))
@@ -377,7 +363,6 @@ def episodio(item):
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
for bloque_episodios in matchestemporadas:
logger.debug("bloque_episodios=" + bloque_episodios)
# Extrae los episodios
patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">' + episode + ' </span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?'
@@ -401,7 +386,6 @@ def episodio(item):
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot,
fulltitle=title, fanart=item.fanart, show=item.show))
logger.debug("Abrimos title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
itemlist2 = []
for capitulo in itemlist:
@@ -415,11 +399,9 @@ def peliculas(item):
# Descarga la pagina
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data_DEF_PELICULAS=" + data)
# Extrae las entradas (carpetas)
json_object = jsontools.load(data)
logger.debug("html=" + json_object["content"])
data = json_object["content"]
return parse_mixed_results(item, data)
@@ -432,24 +414,18 @@ def episodios(item):
# Descarga la pagina
idserie = ''
data = httptools.downloadpage(item.url).data
# logger.debug("dataEPISODIOS="+data)
patrontemporada = '<ul.*?<li class="season-header" >([^<]+)<(.*?)\s+</ul>'
matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data)
logger.debug(matchestemporadas)
idserie = scrapertools.find_single_match(data, 'data-model="5" data-id="(\d+)"')
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("kodi")):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
for nombre_temporada, bloque_episodios in matchestemporadas:
logger.debug("nombre_temporada=" + nombre_temporada)
logger.debug("bloque_episodios=" + bloque_episodios)
logger.debug("id_serie=" + idserie)
# Extrae los episodios
patron_episodio = '<li><a href="#"(.*?)</a></li>'
# patron = '<li><a href="#" data-id="([^"]*)".*?data-href="([^"]+)">\s*<div class="name">\s*<span class="num">([^<]+)</span>\s*([^<]+)\s*</div>.*?"show-close-footer episode model([^"]+)"'
matches = re.compile(patron_episodio, re.DOTALL).findall(bloque_episodios)
# logger.debug(matches)
for data_episodio in matches:
scrapeid = scrapertools.find_single_match(data_episodio, '<li><a href="#" data-id="([^"]*)"')
@@ -462,7 +438,6 @@ def episodios(item):
title = nombre_temporada.replace("Temporada ", "").replace("Extras de la serie", "Extras 0").replace(" ",
"") + "x" + numero + " " + scrapertools.htmlclean(
scrapedtitle)
logger.debug("CAP_VISTO:" + visto)
if visto.strip() == "seen":
title = "[visto] " + title
@@ -478,7 +453,6 @@ def episodios(item):
Item(channel=item.channel, action="findvideos", nom_serie=item.title, tipo="5", title=title, url=url,
thumbnail=thumbnail, plot=plot, fulltitle=title, fanart=fanart, show=item.show))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if config.get_videolibrary_support():
# con año y valoracion la serie no se puede actualizar correctamente, si ademas cambia la valoracion, creara otra carpeta
@@ -540,7 +514,6 @@ def parse_listas(item, bloque_lista):
thumbnail = ""
itemlist.append(
Item(channel=item.channel, action="peliculas", token=item.token, tipo="lista", title=title, url=url))
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "], tipo =[lista]")
nextpage = scrapertools.find_single_match(bloque_lista,
'<div class="onclick load-more-icon no-json" data-action="replace" data-url="([^"]+)"')
@@ -569,13 +542,10 @@ def listas(item):
patron = '<div class="content">\s*<h2>Listas populares(.*?)</div>\s*</div>\s*</div>\s*</div>\s*</div>'
data = httptools.downloadpage(item.url).data
logger.debug("dataSINHEADERS=" + data)
item.token = scrapertools.find_single_match(data, '_token" content="([^"]+)"').strip()
logger.debug("token_LISTA_" + item.token)
bloque_lista = scrapertools.find_single_match(data, patron)
logger.debug("bloque_LISTA" + bloque_lista)
return parse_listas(item, bloque_lista)
@@ -585,7 +555,6 @@ def lista_sig(item):
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
return parse_listas(item, data)
@@ -595,7 +564,6 @@ def pag_sig(item):
headers = {"X-Requested-With": "XMLHttpRequest"}
data = httptools.downloadpage(item.url, headers=headers).data
logger.debug("data=" + data)
return parse_mixed_results(item, data)
@@ -605,8 +573,6 @@ def findvideos(item, verTodos=False):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
logger.info("URL:" + item.url + " DATA=" + data)
# logger.debug("data="+data)
data_model = scrapertools.find_single_match(data, 'data-model="([^"]+)"')
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)"')
@@ -616,7 +582,6 @@ def findvideos(item, verTodos=False):
url = "https://www.plusdede.com/aportes/" + data_model + "/" + data_id + "?popup=1"
data = httptools.downloadpage(url).data
logger.debug("URL:" + url + " dataLINKS=" + data)
token = scrapertools.find_single_match(data, '_token" content="([^"]+)"')
patron = 'target="_blank" (.*?)</a>'
@@ -628,7 +593,6 @@ def findvideos(item, verTodos=False):
itemlist.append(Item(channel=item.channel, action="infosinopsis", title="INFO / SINOPSIS", url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
logger.debug("TRAILER_YOUTUBE:" + trailer)
itemlist.append(Item(channel=item.channel, action="play", title="TRAILER", url=item.url, trailer=trailer,
thumbnail=item.thumbnail, fanart=item.fanart, folder=False))
@@ -637,9 +601,6 @@ def findvideos(item, verTodos=False):
item.channel) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion
showlinks = config.get_setting("plusdedeshowlinks", item.channel) # 0:todos, 1:ver online, 2:descargar
# sortlinks = int(sortlinks) if sortlinks != '' and sortlinks !="No" else 0
# showlinks = int(showlinks) if showlinks != '' and showlinks !="No" else 0
if sortlinks != '' and sortlinks != "No":
sortlinks = int(sortlinks)
else:
@@ -651,14 +612,13 @@ def findvideos(item, verTodos=False):
showlinks = 0
for match in matches:
# logger.debug("match="+match)
jdown = scrapertools.find_single_match(match, '<span class="fa fa-download"></span>([^<]+)')
if (showlinks == 1 and jdown != '') or (
showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar
continue
idioma_1 = ""
idiomas = re.compile('<img src="https://cdn.plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
idiomas = re.compile('<img src="https://cd.*?plusdede.com/images/flags/([^"]+).png', re.DOTALL).findall(match)
idioma_0 = idiomas[0]
if len(idiomas) > 1:
idioma_1 = idiomas[1]
@@ -670,16 +630,12 @@ def findvideos(item, verTodos=False):
calidad_video = scrapertools.find_single_match(match,
'<span class="fa fa-video-camera"></span>(.*?)</div>').replace(
" ", "").replace("\n", "")
logger.debug("calidad_video=" + calidad_video)
calidad_audio = scrapertools.find_single_match(match,
'<span class="fa fa-headphones"></span>(.*?)</div>').replace(
" ", "").replace("\n", "")
logger.debug("calidad_audio=" + calidad_audio)
thumb_servidor = scrapertools.find_single_match(match, '<img src="([^"]+)">')
logger.debug("thumb_servidor=" + thumb_servidor)
nombre_servidor = scrapertools.find_single_match(thumb_servidor, "hosts/([^\.]+).png")
logger.debug("nombre_servidor=" + nombre_servidor)
if jdown != '':
title = "Download " + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip() + ", audio " + calidad_audio.strip() + ")"
@@ -696,7 +652,6 @@ def findvideos(item, verTodos=False):
url = urlparse.urljoin(item.url, scrapertools.find_single_match(match, 'href="([^"]+)"'))
thumbnail = thumb_servidor
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
if sortlinks > 0:
# orden1 para dejar los "downloads" detras de los "ver" al ordenar
# orden2 segun configuración
@@ -788,13 +743,10 @@ def play(item):
headers = {'Referer': item.extra}
data = httptools.downloadpage(item.url, headers=headers).data
# logger.debug("dataLINK="+data)
url = scrapertools.find_single_match(data,
'<a href="([^"]+)" target="_blank"><button class="btn btn-primary">visitar enlace</button>')
url = urlparse.urljoin("https://www.plusdede.com", url)
# logger.debug("DATA_LINK_FINAL:"+url)
logger.debug("URL_PLAY:" + url)
headers = {'Referer': item.url}
media_url = httptools.downloadpage(url, headers=headers, follow_redirects=False).headers.get("location")
# logger.info("media_url="+media_url)
@@ -808,7 +760,6 @@ def play(item):
videoitem.channel = item.channel
# Marcar como visto
logger.debug(item)
checkseen(item)
return itemlist
@@ -827,7 +778,6 @@ def checkseen(item):
tipo_str = "pelis"
headers = {"Referer": "https://www.plusdede.com/" + tipo_str, "X-Requested-With": "XMLHttpRequest",
"X-CSRF-TOKEN": item.token}
logger.debug("Entrando a checkseen " + url_temp + item.token)
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers, replace_headers=True).data
return True
@@ -836,7 +786,6 @@ def infosinopsis(item):
logger.info()
data = httptools.downloadpage(item.url).data
logger.debug("SINOPSISdata=" + data)
scrapedtitle = scrapertools.find_single_match(data, '<div class="media-title">([^<]+)</div>')
scrapedvalue = scrapertools.find_single_match(data, '<span class="value">([^<]+)</span>')
@@ -845,11 +794,8 @@ def infosinopsis(item):
scrapedduration = scrapertools.htmlclean(scrapertools.find_single_match(data,
'<strong>Duración</strong>\s*<div class="mini-content">([^<]+)</div>').strip().replace(
" ", "").replace("\n", ""))
logger.debug(scrapedduration)
scrapedplot = scrapertools.find_single_match(data, '<div class="plot expandable">([^<]+)<div').strip()
logger.debug("SINOPSISdataplot=" + scrapedplot)
generos = scrapertools.find_single_match(data, '<strong>Género</strong>\s*<ul>(.*?)</ul>')
logger.debug("generos=" + generos)
scrapedgenres = re.compile('<li>([^<]+)</li>', re.DOTALL).findall(generos)
scrapedcasting = re.compile(
'<a href="https://www.plusdede.com/star/[^"]+"><div class="text-main">([^<]+)</div></a>\s*<div class="text-sub">\s*([^<]+)</div>',
@@ -954,7 +900,6 @@ def plusdede_check(item):
if item.tipo_esp == "lista":
url_temp = "https://www.plusdede.com/listas/addmediapopup/" + item.tipo + "/" + item.idtemp + "?popup=1"
data = httptools.downloadpage(url_temp).data
logger.debug("DATA_CHECK_LISTA:" + data)
patron = '<div class="lista model" data-model="10" data-id="([^"]+)">+'
patron += '.*?<a href="/lista/[^"]+">([^<]+)</a>+'
@@ -986,8 +931,6 @@ def plusdede_check(item):
"X-CSRF-TOKEN": item.token}
data = httptools.downloadpage(url_temp, post="id=" + item.idtemp, headers=headers,
replace_headers=True).data.strip()
logger.debug("URL_PLUSDEDECHECK_DATA=" + url_temp + " ITEM:TIPO=" + item.tipo)
logger.debug("PLUSDEDECHECK_DATA=" + data)
dialog = platformtools
dialog.ok = platformtools.dialog_ok
if data == "1":
@@ -1002,4 +945,4 @@ def plusdede_check(item):
elif item.tipo_esp == "add_list":
dialog.ok('SUCCESS', 'Añadido a la lista!')
else:
dialog.ok('ERROR', 'No se pudo realizar la acción!')
dialog.ok('ERROR', 'No se pudo realizar la acción!')

View File

@@ -30,11 +30,6 @@ def mainlist(item):
itemlist.append(
Item(channel=item.channel, action="menudesta", title="Destacadas", url= host + "/pag/1",
thumbnail="http://img.irtve.es/v/1074982/", fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Proximos estrenos",
url= host + "/archivos/proximos-estrenos/pag/1",
thumbnail="https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcTpsRC"
"-GTYzCqhor2gIDfAB61XeymwgXWSVBHoRAKs2c5HAn29f&reload=on",
fanart=mifan))
itemlist.append(Item(channel=item.channel, action="menupelis", title="Todas las Peliculas",
url= host + "/pag/1",
thumbnail="https://freaksociety.files.wordpress.com/2012/02/logos-cine.jpg", fanart=mifan))
@@ -70,7 +65,8 @@ def menupelis(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
if item.genre:
item.extra = item.genre
if item.extra == '':
section = 'Recién Agregadas'
elif item.extra == 'year':
@@ -79,17 +75,13 @@ def menupelis(item):
section = 'de Eróticas \+18'
else:
section = 'de %s'%item.extra
patronenlaces = '<h.>Películas %s<\/h.>.*?>(.*?)<\/section>'%section
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
patronenlaces = '<h.>Películas %s</h.>.*?>(.*?)</section>'%section
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
for bloque_enlaces in matchesenlaces:
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "");
@@ -144,21 +136,14 @@ def menudesta(item):
# Peliculas de Estreno
def menuestre(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patronenlaces = '<h1>Estrenos</h1>(.*?)</section>'
matchesenlaces = re.compile(patronenlaces, re.DOTALL).findall(data)
matchesenlaces = scrapertools.find_multiple_matches(data, patronenlaces)
for bloque_enlaces in matchesenlaces:
# patron = '<a href="([^"]+)" title="([^"]+)"> <div class="poster".*?<img src="([^"]+)"'
patron = '<div class="poster-media-card">.*?'
patron += '<a href="(.*?)".*?title="(.*?)"(.*?)'
patron += '<img src="(.*?)"'
matches = re.compile(patron, re.DOTALL).findall(bloque_enlaces)
for scrapedurl, scrapedtitle, extra_info, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
@@ -255,32 +240,22 @@ def search(item, texto):
patron += '<div class="row">.*?'
patron += '<a href="(.*?)" title="(.*?)">.*?'
patron += '<img src="(.*?)"'
logger.info(patron)
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = item.url + scrapedurl
thumbnail = item.url + scrapedthumbnail
logger.info(url)
url = scrapedurl
thumbnail = scrapedthumbnail
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, fanart=thumbnail))
return itemlist
def poranyo(item):
logger.info(item.url)
itemlist = []
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<option value="([^"]+)">(.*?)</option>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -289,7 +264,6 @@ def poranyo(item):
url = item.url + scrapedurl
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra='year'))
return itemlist
@@ -300,24 +274,25 @@ def porcateg(item):
data = httptools.downloadpage(item.url).data.decode('iso-8859-1').encode('utf-8')
patron = '<li class="cat-item cat-item-3">.*?<a href="([^"]+)" title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
adult_mode = config.get_setting("adult_mode")
for scrapedurl, scrapedtitle in matches:
if "18" in scrapedtitle and adult_mode == 0:
continue
title = scrapertools.remove_show_from_title(scrapedtitle, "Ver Película")
title = title.replace("Online", "")
url = scrapedurl
logger.info(url)
# si no esta permitidas categoria adultos, la filtramos
extra = title
adult_mode = config.get_setting("adult_mode")
extra1 = title
if adult_mode != 0:
if 'erotic' in scrapedurl:
extra = 'adult'
extra1 = 'adult'
else:
extra=title
extra1=title
if (extra=='adult' and adult_mode != 0) or extra != 'adult':
if (extra1=='adult' and adult_mode != 0) or extra1 != 'adult':
itemlist.append(Item(channel=item.channel, action="menupelis", title=title, fulltitle=title, url=url,
fanart=item.fanart, extra = extra))
fanart=item.fanart, genre = extra1))
return itemlist
@@ -338,7 +313,6 @@ def decode(string):
i += 1
enc4 = keyStr.index(input[i])
i += 1
chr1 = (enc1 << 2) | (enc2 >> 4)
chr2 = ((enc2 & 15) << 4) | (enc3 >> 2)
chr3 = ((enc3 & 3) << 6) | enc4
@@ -352,4 +326,4 @@ def decode(string):
output = output.decode('utf8')
return output
return output

View File

@@ -24,6 +24,13 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="search",
title="Buscar por titulo", context=context,
thumbnail=get_thumb("search.png")))
thumbnail = get_thumb("search_star.png")
itemlist.append(Item(channel='tvmoviedb', title="Buscar actor/actriz", action="search_",
search={'url': 'search/person', 'language': 'es', 'page': 1}, star=True,
thumbnail=thumbnail))
itemlist.append(Item(channel=item.channel, action="search",
title="Buscar por categorias (búsqueda avanzada)", extra="categorias",
context=context,
@@ -290,7 +297,10 @@ def do_search(item, categories=None):
multithread = config.get_setting("multithread", "search")
result_mode = config.get_setting("result_mode", "search")
tecleado = item.extra
if item.wanted!='':
tecleado=item.wanted
else:
tecleado = item.extra
itemlist = []

View File

@@ -8,6 +8,7 @@ from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from platformcode import config, logger
IDIOMAS = {'latino': 'Latino'}
@@ -35,6 +36,7 @@ def mainlist(item):
url=host,
thumbnail='https://s27.postimg.org/iahczwgrn/series.png',
fanart='https://s27.postimg.org/iahczwgrn/series.png',
page=0
))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -49,15 +51,21 @@ def todas(item):
'Serie><span>(.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches:
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=int(min)-int(item.page)
max = min + num_items_x_pagina - 1
for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches[min:max]:
url = host + scrapedurl
calidad = scrapedcalidad
title = scrapedtitle.decode('utf-8')
thumbnail = scrapedthumbnail
fanart = 'https://s32.postimg.org/gh8lhbkb9/seodiv.png'
itemlist.append(
Item(channel=item.channel,
if not 'xxxxxx' in scrapedtitle:
itemlist.append(
Item(channel=item.channel,
action="temporadas",
title=title, url=url,
thumbnail=thumbnail,
@@ -67,7 +75,13 @@ def todas(item):
language=language,
context=autoplay.context
))
tmdb.set_infoLabels(itemlist)
if len(itemlist)>28:
itemlist.append(
Item(channel=item.channel,
title="[COLOR cyan]Página Siguiente >>[/COLOR]",
url=item.url, action="todas",
page=item.page + 1))
return itemlist
@@ -222,16 +236,31 @@ def episodiosxtemp(item):
def findvideos(item):
logger.info()
itemlist = []
lang=[]
data = httptools.downloadpage(item.url).data
video_items = servertools.find_video_items(item)
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
language_items=scrapertools.find_single_match(data,
'<ul class=tabs-sidebar-ul>(.+?)<\/ul>')
matches=scrapertools.find_multiple_matches(language_items,
'<li><a href=#ts(.+?)><span>(.+?)<\/span><\/a><\/li>')
for idl,scrapedlang in matches:
if int(idl)<5 and int(idl)!=1:
lang.append(scrapedlang)
i=0
if len(lang)!=0:
lang.reverse()
for videoitem in video_items:
videoitem.thumbnail = servertools.guess_server_thumbnail(videoitem.server)
videoitem.language = scrapertools.find_single_match(data, '<span class="f-info-title">Idioma:<\/span>\s*<span '
'class="f-info-text">(.*?)<\/span>')
if i<len(lang) and len(lang)!=0:
videoitem.language=lang[i]
else:
videoitem.language = scrapertools.find_single_match(data, '<span class=f-info-title>Idioma:<\/span>\s*<span '
'class=f-info-text>(.*?)<\/span>')
videoitem.title = item.contentSerieName + ' (' + videoitem.server + ') (' + videoitem.language + ')'
videoitem.quality = 'default'
videoitem.context = item.context
i=i+1
itemlist.append(videoitem)
# Requerido para FilterTools

View File

@@ -13,7 +13,6 @@ from platformcode import config, logger
from channels import autoplay
HOST = "https://seriesblanco.com/"
IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos': 'VOS', 'vosi': 'VOSI', 'otro': 'OVOS'}
list_idiomas = IDIOMAS.values()
@@ -21,8 +20,7 @@ list_language = ['default']
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
list_quality = CALIDADES
list_servers = ['streamix',
'powvideo',
list_servers = ['powvideo',
'streamcloud',
'openload',
'flashx',
@@ -30,7 +28,8 @@ list_servers = ['streamix',
'nowvideo',
'gamovideo',
'kingvid',
'vidabc'
'vidabc',
'streamixcloud'
]
@@ -97,17 +96,21 @@ def extract_series_from_data(item, data):
except UnicodeError:
name = unicode(name, "iso-8859-1", errors="replace").encode("utf-8")
name = name.strip()
# logger.debug("Show found: %s -> %s (%s)" % (name, url, img))
if not episode_pattern.search(url):
action = "episodios"
else:
action = "findvideos"
context1=[filtertools.context(item, list_idiomas, CALIDADES), autoplay.context]
context = filtertools.context(item, list_idiomas, CALIDADES)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url),
action=action, show=name,
thumbnail=img,
context=context1))
context=context))
more_pages = re.search('pagina=([0-9]+)">>>', data)
if more_pages:
@@ -184,6 +187,7 @@ def search(item, texto):
"id=['\"]q2[1\"] name=['\"]q2['\"] value=['\"](?P<title>.*?)['\"]", data)
for url, img, title in shows:
title = title.strip()
itemlist.append(item.clone(title=title, url=urlparse.urljoin(HOST, url), action="episodios", show=title,
thumbnail=img, context=filtertools.context(item, list_idiomas, CALIDADES)))
@@ -306,12 +310,13 @@ def findvideos(item):
for i in range(len(list_links)):
a=list_links[i].title
b=a.lstrip('Ver en')
b=a[a.find("en") + 2:]
c=b.split('[')
d=c[0].rstrip( )
d=d.lstrip( )
list_links[i].server=d
list_links[i].server=d.replace("streamix", "streamixcloud")
list_links = servertools.get_servers_itemlist(list_links)
autoplay.start(list_links, item)
return list_links
@@ -319,7 +324,6 @@ def findvideos(item):
def play(item):
logger.info("%s - %s = %s" % (item.show, item.title, item.url))
if item.url.startswith(HOST):
data = httptools.downloadpage(item.url).data
@@ -340,7 +344,7 @@ def play(item):
else:
url = item.url
itemlist = servertools.find_video_items(data=url)
itemlist = servertools.find_video_items(item=item,data=url)
titulo = scrapertoolsV2.find_single_match(item.fulltitle, "^(.*?)\s\[.+?$")
if titulo:

View File

@@ -14,10 +14,7 @@ from channels import autoplay
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload',
'okru',
'netutv',
'rapidvideo'
list_servers = ['openload'
]
list_quality = ['default']
@@ -49,7 +46,11 @@ def lista(item):
patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
if item.url==host:
a=1
else:
num=(item.url).split('-')
a=int(num[1])
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
@@ -57,17 +58,30 @@ def lista(item):
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
b=0
for link, img, name in matches[min:max]:
title = name
b=b+1
if " y " in name:
title=name.replace(" y "," & ")
else:
title = name
url = host + link
scrapedthumbnail = host + img
context1=[renumbertools.context(item), autoplay.context]
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
context=context1))
itemlist.append(
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
context=context))
if b<29:
a=a+1
url="https://serieslan.com/pag-"+str(a)
if b>10:
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
else:
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -84,7 +98,7 @@ def episodios(item):
patron_caps = '<li><span>Capitulo ([^"]+)\:<\/span><[^"]+"(.+?)">([^"]+)<[^"]+<\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
# data_info = scrapertools.find_single_match(data, '<div class="info">.+?<\/div><\/div>')
patron_info = '<img src="([^"]+)">.+?<\/span>([^"]+)<\/p><p><span>I.+?Reseña: <\/span>(.+?)<\/p><\/div>'
patron_info = '<img src="([^"]+)">.+?</span>(.*?)</p>.*?<h2>Reseña:</h2><p>(.*?)</p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail
@@ -92,6 +106,10 @@ def episodios(item):
title = ""
pat = "/"
if "Mike, Lu & Og"==item.title:
pat="&/"
if "KND" in item.title:
pat="-"
# varios episodios en un enlace
if len(name.split(pat)) > 1:
i = 0
@@ -119,7 +137,7 @@ def episodios(item):
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
@@ -133,7 +151,7 @@ def findvideos(item):
itemlist = []
url_server = "https://openload.co/embed/%s/"
url_api_get_key = "https://serieslan.com/ide.php?i=%s&k=%s"
url_api_get_key = "https://serieslan.com/idx.php?i=%s&k=%s"
def txc(key, _str):
s = range(256)
@@ -156,7 +174,7 @@ def findvideos(item):
return res
data = httptools.downloadpage(item.url).data
pattern = '<div id="video" idv="([^"]*)" ide="([^"]*)" ids="[^"]*" class="video">'
pattern = "<script type=.+?>.+?\['(.+?)','(.+?)','.+?'\]"
idv, ide = scrapertools.find_single_match(data, pattern)
thumbnail = scrapertools.find_single_match(data,
'<div id="tab-1" class="tab-content current">.+?<img src="([^"]*)">')
@@ -166,7 +184,6 @@ def findvideos(item):
data = eval(data)
if type(data) == list:
logger.debug("inside")
video_url = url_server % (txc(ide, base64.decodestring(data[2])))
server = "openload"
if " SUB" in item.title:
@@ -176,7 +193,11 @@ def findvideos(item):
else:
lang = "Latino"
title = "Enlace encontrado en " + server + " [" + lang + "]"
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
if item.contentChannel=='videolibrary':
itemlist.append(item.clone(channel=item.channel, action="play", url=video_url,
thumbnail=thumbnail, server=server, folder=False))
else:
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
thumbnail=thumbnail, server=server, folder=False))
autoplay.start(itemlist, item)
@@ -184,17 +205,3 @@ def findvideos(item):
else:
return []
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -18,6 +18,14 @@
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
@@ -43,4 +51,4 @@
"visible": true
}
]
}
}

Some files were not shown because too many files have changed in this diff Show More