151 Commits

Author SHA1 Message Date
alfa-addon
23ac80fbd6 v2.3.5 2017-11-08 21:38:26 -05:00
alfa-addon
9a5ddfbccb fixed 2017-11-08 21:38:12 -05:00
alfa-addon
50bbf7d9aa good bye playmax 2017-11-08 21:37:57 -05:00
Alfa
2aab5ae0ff Merge pull request #161 from Intel11/patch-1
Actualizado
2017-11-09 02:36:18 +01:00
Intel1
1bbc51a885 gamovideo: fix 2017-11-08 09:02:49 -05:00
Intel1
f95c3621d4 Ayuda: actualizado 2017-11-08 08:30:30 -05:00
Intel1
f05cbba109 Update channelselector.py 2017-11-08 08:29:26 -05:00
Intel1
16968f9204 seriesblanco: actualizado 2017-11-08 08:22:17 -05:00
alfa-addon
8985f3ebdd v2.3.4 2017-11-06 19:04:42 -05:00
Alfa
d60c246bbb Merge pull request #155 from Intel11/patch-3
Actualizados
2017-11-07 00:09:00 +01:00
Alfa
3b29fe47bb Merge pull request #156 from danielr460/master
Arreglos menores
2017-11-07 00:08:47 +01:00
Alfa
3093f72ce5 Merge pull request #159 from Alfa-beto/Fixes
Corregido error con extras
2017-11-07 00:08:33 +01:00
Unknown
55dcf3f091 Corregido error con extras 2017-11-05 18:21:26 -03:00
Intel1
2924b6958d Update allpeliculas.py 2017-11-04 15:01:27 -05:00
Intel1
927310c7c6 flashx: actualizado 2017-11-04 14:58:29 -05:00
danielr460
0c25891790 fix servers 2017-11-04 00:06:45 -05:00
danielr460
212c06057f Arreglos menores 2017-11-03 22:04:28 -05:00
Intel1
9c3b3e9256 allpeliculas: paginador para colecciones 2017-11-03 17:54:51 -05:00
Intel1
6dc853b41e repelis: fix categoria 2017-11-03 15:49:52 -05:00
Intel1
7afd09dfa9 streamixcloud: fix 2017-11-03 11:08:16 -05:00
Intel1
6855508eaa Update ultrapeliculashd.py 2017-11-03 10:21:18 -05:00
Intel1
2925c29671 Update ultrapeliculashd.json 2017-11-03 10:20:47 -05:00
Intel1
506e68e8a3 vshare: cambiado el orden de resoluciones 2017-11-03 10:17:12 -05:00
Intel1
9cc30152f8 vshare: actualizado patron 2017-11-03 10:15:27 -05:00
Intel1
267c9d8031 gvideo: fix 2017-11-03 10:07:46 -05:00
Intel1
bd68b83b6c flashx: fix 2017-11-01 06:47:51 -05:00
Unknown
c1f8039672 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-11-01 08:37:33 -03:00
alfa-addon
99dfa2be58 v2.3.3 2017-10-31 21:09:17 -04:00
Alfa
39e711b3cb Merge pull request #150 from danielr460/master
Arreglos en canales
2017-11-01 01:40:53 +01:00
Alfa
2d8d2b3baf Merge pull request #151 from Intel11/patch-2
Actualizados
2017-11-01 01:40:36 +01:00
Alfa
82d126c3e1 Merge pull request #152 from Alfa-beto/Fixes
Correcciones
2017-11-01 01:40:20 +01:00
Alfa
8d41fd1c64 Merge pull request #153 from danielr460/patch-1
Otros arreglos
2017-11-01 01:40:05 +01:00
Unknown
a8c2f409eb Correcciones a canales 2017-10-31 14:57:55 -03:00
Daniel Rincón Rodríguez
7b2a3c2181 Update mundiseries.json 2017-10-31 07:19:55 -05:00
Daniel Rincón Rodríguez
9e6729f0be Update danimados.json 2017-10-31 07:17:04 -05:00
Unknown
241e644dcf Correcciones 2017-10-30 15:02:57 -03:00
Intel1
ae318721ab Add files via upload 2017-10-30 10:34:08 -05:00
Intel1
8328610ffa Delete bajui2.json 2017-10-30 10:32:41 -05:00
Intel1
19101b5310 Delete bajui2.py 2017-10-30 10:32:27 -05:00
Intel1
22827e0f7e Update animemovil.json 2017-10-30 10:28:29 -05:00
Unknown
1747c9795d Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-30 10:14:07 -03:00
Unknown
f3effe9a7f Corregidas series en pelisplus 2017-10-30 10:13:40 -03:00
Intel1
0621b1fa91 gamovideo: fix 2017-10-30 04:16:22 -05:00
Intel1
16473764c9 flashx: fix 2017-10-30 04:15:00 -05:00
danielr460
6b1727a0b8 Arreglado serieslan 2017-10-29 19:50:42 -05:00
Intel1
11fceffd14 bajui2: fix 2017-10-29 10:00:39 -05:00
danielr460
3a49b8a442 Función Play eliminaba info de la serie. Corregido 2017-10-29 08:54:27 -05:00
alfa-addon
162772e9dc v2.3.2 2017-10-28 22:47:51 -04:00
alfa-addon
60d61f861b fixed 2017-10-28 22:47:33 -04:00
Alfa
cd1c7b692a Merge pull request #142 from danielr460/master
Nuevos canales
2017-10-29 06:06:34 +01:00
danielr460
10abe4a6d4 Cambio en la estructura de la página web danimados 2017-10-28 23:40:49 -05:00
danielr460
b0fa5e8a75 Eliminada sección Novedades porque la página web la elimino 2017-10-28 23:23:42 -05:00
danielr460
54d6a943f5 Arreglado Mundiseries 2017-10-28 23:19:43 -05:00
Daniel Rincón Rodríguez
44df5b6036 Corregida linea que si hacia 2017-10-28 23:00:36 -05:00
Alfa
ae67d9b5ee Merge pull request #148 from Intel11/patch-1
Actualizados
2017-10-29 02:57:15 +01:00
Alfa
895d14760d Merge pull request #149 from Alfa-beto/Fixes
Corregida pagina siguiente en pelisplus
2017-10-29 02:56:42 +01:00
Intel1
b0b4b218f0 animemovil: fast fix 2017-10-28 20:55:12 -05:00
Unknown
348787ae97 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-28 21:56:26 -03:00
Unknown
0f7c11efad Corregido pagina siguiente 2017-10-28 21:37:23 -03:00
Intel1
ae7a4a8d83 rapidvideo: actualizado test_video_exists 2017-10-28 11:26:08 -05:00
Intel1
fc58c717eb plusdede: actualizado findvideos 2017-10-28 11:17:48 -05:00
Daniel Rincón Rodríguez
b3a19f3d20 Arreglos sugeridos por Intel1 2017-10-28 11:11:51 -05:00
Daniel Rincón Rodríguez
0cac09eef5 Eliminada solución "tosca" 2017-10-28 11:00:35 -05:00
Intel1
9a1effbe25 cinetux: mostrar cantidad de películas 2017-10-28 10:58:15 -05:00
Daniel Rincón Rodríguez
44145660d0 Eliminado código innecesario 2017-10-28 10:55:55 -05:00
Daniel Rincón Rodríguez
aec2674316 Eliminada función generica 2017-10-28 10:55:01 -05:00
Daniel Rincón Rodríguez
09de611aae Update cartoonlatino.py 2017-10-28 10:54:00 -05:00
Daniel Rincón Rodríguez
74598154c2 Eliminado código innecesario 2017-10-28 10:53:19 -05:00
Daniel Rincón Rodríguez
7ab9c8bb29 Cartoonlatino en su version original 2017-10-28 10:52:30 -05:00
Daniel Rincón Rodríguez
14178974a0 Mejorado Autoplay 2017-10-28 10:51:45 -05:00
Intel1
c43162cbc2 flashx: lo dicho!!! 2017-10-28 08:40:11 -05:00
Daniel Rincón Rodríguez
aa76986a51 Dejado el canal AnitoonsTV como estaba 2017-10-28 08:30:25 -05:00
alfa-addon
357be3f648 v2.3.1 2017-10-27 21:56:26 -04:00
alfa-addon
609f3f6ebc Fixed 2017-10-27 21:55:59 -04:00
Alfa
e0ce83f2f1 Merge pull request #144 from Alfa-beto/Fixes
Fixes - again
2017-10-28 03:21:09 +02:00
Unknown
a7d3294ba3 eliminado tag version 2017-10-27 21:39:57 -03:00
Unknown
338409ca3b Agregada la opcion buscar en otros canales 2017-10-27 21:37:34 -03:00
unknown
dfea08ffee Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-27 21:33:20 -03:00
Alfa
c32dfcc476 Merge pull request #145 from alfa-jor/feateure/mediaserver_remove_updaters
removed files unused, fix tmdb for mediaserver
2017-10-28 01:47:52 +02:00
alfa_addon_10
f3e818dd29 TODO create a method to call a function from settings window 2017-10-28 01:30:02 +02:00
alfa_addon_10
4dfcdb11d3 comments 2017-10-27 20:23:01 +02:00
alfa_addon_10
863b9a5c3e removed servers unused, remove tags unused 2017-10-27 20:13:14 +02:00
alfa_addon_10
26c2b26034 removed unused tags from channels 2017-10-27 19:43:01 +02:00
alfa_addon_10
a56513b5df removed unused modules and methods 2017-10-27 19:10:12 +02:00
alfa_addon_10
f5a7f6383c fix funcionality from kodi 2017-10-27 18:58:40 +02:00
alfa_addon_10
df62398c64 remove get version from old module, updated funcionality from kodi addon 2017-10-27 18:58:01 +02:00
Daniel Rincón Rodríguez
9aae0e7a1b Arreglos comentarios de Intel1 2017-10-26 18:28:19 -05:00
danielr460
e1fe886602 Autoplay añadido 2017-10-26 15:38:36 -05:00
danielr460
19812c83a9 Añadida info de los canales desde la videoteca 2017-10-26 15:05:12 -05:00
danielr460
cabc2458e3 Añadida info de la serie para que no se borre cuando esta activo autoplay 2017-10-26 14:36:09 -05:00
danielr460
336376ecef Añadida opción de videolibrary para saber que no vengo del addon 2017-10-26 14:34:45 -05:00
Unknown
195e393e31 Reemplazado Oh-Pelis x Oh!Latino 2017-10-26 16:18:07 -03:00
danielr460
af06269e39 Añadida opción marcar como visto en autoplay 2017-10-26 13:56:37 -05:00
danielr460
f37d18ee0a Añadido contentChannel para saber en findvideos si vengo del addon o de la videolibrary 2017-10-26 13:54:14 -05:00
danielr460
6fefc3b048 Agregado Autoplay 2017-10-26 13:44:09 -05:00
Unknown
d1630a3c3d agregado Gvideo a pelisplus again 2017-10-26 13:40:58 -03:00
danielr460
ab5fe41403 Eliminar código innecesario 2017-10-26 07:49:46 -05:00
danielr460
15463ea0f8 Arreglo bug 2017-10-26 07:48:01 -05:00
unknown
e348c26d9e Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-26 08:02:22 -03:00
danielr460
badf40573c Nuevo canal mundiseries 2017-10-25 21:41:55 -05:00
danielr460
c80793e3e0 Media for danimados 2017-10-25 21:41:38 -05:00
danielr460
cbc0ff0bd0 Nuevo canal danimados 2017-10-25 21:25:31 -05:00
alfa-addon
d8889b1592 v2.3.0 2017-10-25 18:54:03 -04:00
alfa-addon
410d947e4b fixed 2017-10-25 18:53:49 -04:00
Alfa
a1339a5545 Merge pull request #137 from Intel11/patch-1
Actualizado
2017-10-26 00:56:56 +02:00
Intel1
a7e18ef813 allpeliculas: Agregado sección - Colecciones 2017-10-25 17:34:11 -05:00
Alfa
15e06d4386 Merge pull request #138 from Alfa-beto/Fixes
Correcciones varias
2017-10-26 00:24:59 +02:00
Alfa
574279c2da Merge pull request #140 from danielr460/master
Arreglos Menores
2017-10-26 00:24:44 +02:00
Alfa
2a1c1fb081 Merge pull request #141 from alfa-jor/master
cache tmdb
2017-10-26 00:24:29 +02:00
alfa_addon_10
df1fbe3b47 fix 2017-10-25 19:48:10 +02:00
Intel1
52344e42cc pelismundo: fix filtrado de genero adulto 2017-10-25 10:37:17 -05:00
Intel1
d725443479 Update animemovil.json 2017-10-25 08:19:37 -05:00
Intel1
c70f107ff1 animemovil actualizado para Alfa 2017-10-24 13:28:05 -05:00
alfa_addon_10
f29911cd52 human being text 2017-10-24 20:00:11 +02:00
alfa_addon_10
90c335df63 splited options, human readibility 2017-10-24 19:29:10 +02:00
alfa_addon_10
cfc8b41a5a Merge branch 'master' of https://github.com/alfa-addon/addon 2017-10-24 18:48:12 +02:00
alfa_addon_10
5a332243e0 tmdb cache and configuration 2017-10-24 18:47:02 +02:00
Intel1
9fc9bc1fd5 estrenosgo: actualizado url de videos 2017-10-24 10:06:27 -05:00
unknown
c91ae53fba Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-24 08:25:11 -03:00
danielr460
5f5888a539 Las primeras series de cada página se eliminaban 2017-10-23 14:32:07 -05:00
danielr460
597fa9a7e0 Eliminado Renumber tools porque era innecesario, y se agrego la renumeración a la única serie que no lo tenia (Ranma 1/2) 2017-10-23 12:22:51 -05:00
Intel1
6f0680219f streamixclud: fix test_video_exists 2017-10-23 12:22:33 -05:00
Intel1
b863f0ea20 animeflv.ru: actualizado findvideos 2017-10-23 12:11:16 -05:00
danielr460
4dcc6395be Arreglos Menores 2017-10-23 11:37:53 -05:00
Intel1
107262cef3 cinetux: patron actualizado 2017-10-23 10:38:23 -05:00
Unknown
b9b1cc6945 Mejora en el codigo de Pelisplus 2017-10-21 14:23:43 -03:00
Intel1
5fa341950c flashx: fix again 2017-10-21 12:12:34 -05:00
alfa-addon
54c818984a v2.2.4 2017-10-20 22:00:22 -04:00
Alfa
98b9c73046 Merge pull request #135 from Intel11/patch-1
Actualizado
2017-10-21 03:34:28 +02:00
Alfa
295af0e9e8 Merge pull request #136 from Alfa-beto/Fixes
Correcciones
2017-10-21 03:34:00 +02:00
Unknown
d07ae8e62b Correcciones 2017-10-20 16:47:39 -03:00
Unknown
1f2cadb689 limpieza de codigo 2017-10-20 15:36:10 -03:00
Intel1
5ed94e84fc animeflv_me: raparado paginador 2017-10-20 11:20:56 -05:00
Unknown
5afb770271 Correccion a verpeliculasnuevas 2017-10-20 13:15:49 -03:00
Unknown
4aa7ff5bc7 Correccion a Playmax 2017-10-20 11:31:16 -03:00
Intel1
6bf0100f41 downace: actualizado test_video_exists 2017-10-20 09:26:47 -05:00
Intel1
ea8acc1ea1 downace, mensaje error de servidor 2017-10-20 09:13:35 -05:00
Intel1
d70b8d95f9 Update flashx.py 2017-10-19 12:53:20 -05:00
Intel1
915952c85d Delete crimenes.py 2017-10-19 08:25:54 -05:00
Intel1
4c7a349db2 Delete crimenes.json 2017-10-19 08:25:39 -05:00
Intel1
88d26523cd Delete vixto.py 2017-10-18 17:30:00 -05:00
Intel1
cc4fc8cbde Delete vixto.json 2017-10-18 17:29:30 -05:00
Intel1
5cb64e4b41 hdfull fix marcar como visto 2017-10-18 15:31:52 -05:00
Intel1
efa960bcb7 flashx fix 2017-10-18 12:57:34 -05:00
Unknown
f01da0ddcb Corregido Gamovideo 2017-10-18 13:03:56 -03:00
Unknown
cad7e96441 Correccion divxatope 2017-10-18 11:38:38 -03:00
Intel1
341953539e Update pelismundo.py 2017-10-18 09:29:02 -05:00
Intel1
0e7c8d22ef Update pelismundo.py 2017-10-18 09:19:57 -05:00
Intel1
e20b32b7e9 pelismundo: codigo mejorado
pelismundo: código mejorado
2017-10-17 16:58:57 -05:00
Unknown
1cbca62d82 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-17 08:04:15 -03:00
unknown
5002bf0ca0 Merge remote-tracking branch 'alfa-addon/master' into Fixes 2017-10-15 14:20:01 -03:00
341 changed files with 2222 additions and 7499 deletions

View File

@@ -1,2 +1,2 @@
Debe ejecutar primero el archivo "script.py".
Debe ejecutar primero el archivo "script.py", si no lo hizo antes.
Una vez realizado el proceso podrá ejecutar como siempre "alfa.py" para iniciar el addon.

View File

@@ -12,11 +12,17 @@ import channelselector
from controller import Controller
from controller import Platformtools
from platformcode import config
from core import versiontools
from core.item import Item
from core.tmdb import Tmdb
from platformcode import launcher, logger
from core import filetools
# <addon id="plugin.video.alfa" name="Alfa" version="2.3.0" provider-name="Alfa Addon">
data = filetools.read(filetools.join(config.get_runtime_path(), "addon.xml"))
aux = re.findall('<addon id="plugin.video.alfa" name="Alfa" version="([^"]+)"', data, re.MULTILINE | re.DOTALL)
version = "???"
if len(aux) > 0:
version = aux[0]
class html(Controller):
pattern = re.compile("##")
@@ -29,8 +35,8 @@ class html(Controller):
if self.handler:
self.client_ip = handler.client.getpeername()[0]
self.send_message({"action": "connect",
"data": {"version": "Alfa %s" % versiontools.get_current_plugin_version_tag(),
"date": versiontools.get_current_plugin_date()}})
"data": {"version": "Alfa %s" % version,
"date": "--/--/----"}})
t = threading.Thread(target=launcher.start, name=ID)
t.setDaemon(True)
t.start()

View File

@@ -87,33 +87,16 @@ def run(item):
if item.action == "mainlist":
itemlist = channelselector.getmainlist("banner_")
# if config.get_setting("check_for_plugin_updates"):
# logger.info("channelselector.mainlist Verificar actualizaciones activado")
#
# from core import updater
# try:
# version = updater.checkforupdates()
#
# if version:
# platformtools.dialog_ok("Versión " + version + " disponible",
# "Ya puedes descargar la nueva versión del plugin\ndesde el listado principal")
# itemlist.insert(0, Item(title="Actualizar Alfa a la versión " + version, version=version,
# channel="updater", action="update",
# thumbnail=os.path.join(config.get_runtime_path(), "resources", "images",
# "banner", "thumb_update.png")))
# except:
# platformtools.dialog_ok("No se puede conectar", "No ha sido posible comprobar",
# "si hay actualizaciones")
# logger.info("Fallo al verificar la actualización")
#
# else:
# logger.info("Verificar actualizaciones desactivado")
if item.action == "getchanneltypes":
itemlist = channelselector.getchanneltypes("banner_")
if item.action == "filterchannels":
itemlist = channelselector.filterchannels(item.channel_type, "banner_")
elif item.action == "script":
from core import tmdb
if tmdb.drop_bd():
platformtools.dialog_notification("Alfa", "caché eliminada", time=2000, sound=False)
# Todas las demas las intenta ejecturaren el siguiente orden:
# 1. En el canal
# 2. En el launcher

View File

@@ -22,14 +22,9 @@
<setting id="adult_aux_new_password1" type="text" label="Nueva contraseña:" option="hidden" enable="!eq(-3,)" default=""/>
<setting id="adult_aux_new_password2" type="text" label="Confirmar nueva contraseña:" option="hidden" enable="!eq(-1,)" default=""/>
<!--<setting type="sep"/>-->
<!--<setting label="Actualizaciones" type="lsep"/>-->
<!--<setting id="plugin_updates_available" type="number" label="Number of updates available" default="0" visible="false"/>-->
<!--<setting id="check_for_plugin_updates" type="bool" label="30001" default="true"/>-->
<!--<setting id="check_for_channel_updates" type="bool" label="30004" default="true"/>-->
</category>
<!-- Path downloads and subtitles -->
<!-- Path downloads -->
<category label="30501">
<setting type="sep"/>
<setting id="downloadpath" type="text" label="30017" default=""/>
@@ -41,4 +36,19 @@
<setting id="folder_tvshows" type="text" label="Nombre de carpeta para 'Series'" default="SERIES"/>
<setting id="folder_movies" type="text" label="Nombre de carpeta para 'Peliculas'" default="CINE"/>
</category>
<category label="Otros">
<setting label="Info de películas/series en menú contextual" type="lsep"/>
<setting id="infoplus" type="bool" label="Mostrar opción Infoplus:" default="true"/>
<setting type="sep"/>
<setting label="TheMovieDB (obtiene datos de las películas o series)" type="lsep"/>
<setting id="tmdb_threads" type="labelenum" values="5|10|15|20|25|30" label="Búsquedas simultáneas (puede causar inestabilidad)" default="20"/>
<setting id="tmdb_plus_info" type="bool" label="Buscar información extendida (datos de actores) Aumenta el tiempo de búsqueda" default="false"/>
<setting id="tmdb_cache" type="bool" label="Usar caché (mejora las búsquedas recurrentes)" default="true"/>
<setting id="tmdb_cache_expire" type="enum" lvalues="cada 1 día|cada 7 días|cada 15 días|cada 30 días|No" label="¿Renovar caché?" enable="eq(-1,true)" default="4"/>
<!--<setting id="tmdb_clean_db_cache" type="action" label="Pulse para 'Borrar caché' guardada" action="RunPlugin(plugin://plugin.video.alfa/?ew0KICAgICJhY3Rpb24iOiAic2NyaXB0Ig0KfQ==)" />-->
</category>
</settings>

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.2.3" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.3.5" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -19,13 +19,9 @@
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
» animeyt » pelismundo
» asialiveaction » animeflv_me
» newpct1 » wopelis
» gvideo » powvideo
¤ arreglos internos
[COLOR green]Gracias a [COLOR yellow]Danielr460[/COLOR] y [COLOR yellow]robalo[/COLOR] por su colaboración en esta versión[/COLOR]
</news>
» seriesblanco » hdfull
» gamovideo ¤ arreglos internos
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
<summary lang="en">Browse web pages using Kodi</summary>
<description lang="en">Browse web pages using Kodi, you can easily watch their video content.</description>

View File

@@ -6,13 +6,6 @@
"language": ["lat"],
"thumbnail": "https://s22.postimg.org/irnlwuizh/allcalidad1.png",
"banner": "https://s22.postimg.org/9y1athlep/allcalidad2.png",
"version": 1,
"changes": [
{
"date": "14/07/2017",
"description": "Primera version"
}
],
"categories": [
"movie",
"direct"

View File

@@ -4,29 +4,6 @@
"language": ["lat"],
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Url mal escritas"
},
{
"date": "10/06/2017",
"description": "Reparado búsqueda de videos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "16/02/2017",
"description": "Añadidas nuevas opciones y servidores"
},
{
"date": "19/03/2016",
"description": "Añadido soporte para la videoteca y reparada busqueda global."
}
],
"thumbnail": "http://i.imgur.com/aWCDWtn.png",
"banner": "allpeliculas.png",
"categories": [

View File

@@ -1,7 +1,5 @@
# -*- coding: utf-8 -*-
import urlparse
from core import httptools
from core import jsontools
from core import scrapertools
@@ -35,12 +33,73 @@ def mainlist(item):
url= host + "movies/newmovies?page=1", extra1 = 0))
itemlist.append(item.clone(title="Por genero", action="generos", fanart="http://i.imgur.com/c3HS8kj.png",
url= host + "movies/getGanres"))
itemlist.append(item.clone(title="Colecciones", action="colecciones", fanart="http://i.imgur.com/c3HS8kj.png",
url= host))
itemlist.append(item.clone(title="", action=""))
itemlist.append(item.clone(title="Buscar...", action="search"))
return itemlist
def colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'href="(/peliculas[^"]+).*?'
patron += 'title_geo"><span>([^<]+).*?'
patron += 'title_eng"><span>([^<]+).*?'
patron += 'src="([^"]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedcantidad, scrapedthumbnail in matches:
if scrapedtitle == "LGTB" and config.get_setting("adult_mode") == 0:
continue
title = scrapedtitle.capitalize() + " (" + scrapedcantidad + ")"
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
page = 1,
thumbnail = host + scrapedthumbnail,
title = title,
url = host + scrapedurl
))
return itemlist
def listado_colecciones(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data_url = scrapertools.find_single_match(data, "data_url: '([^']+)")
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
patron = 'a href="(/peli[^"]+).*?'
patron += 'src="([^"]+).*?'
patron += 'class="c_fichas_title">([^<]+).*?'
patron += 'Año:.*?href="">([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
item.infoLabels['year'] = scrapedyear
itemlist.append(item.clone(channel = item.channel,
action = "findvideos",
contentTitle = scrapedtitle,
thumbnail = scrapedthumbnail,
title = scrapedtitle,
url = host + scrapedurl
))
tmdb.set_infoLabels(itemlist)
item.page += 1
post = "page=%s" %item.page
data = httptools.downloadpage(host + data_url, post=post).data
if len(data) > 50:
itemlist.append(Item(channel = item.channel,
action = "listado_colecciones",
title = "Pagina siguiente>>",
page = item.page,
url = item.url
))
return itemlist
def generos(item):
logger.info()
itemlist = []
@@ -61,6 +120,9 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Próximamente" in data:
itemlist.append(Item(channel = item.channel, title = "Próximamente"))
return itemlist
patron = 'data-link="([^"]+).*?'
patron += '>([^<]+)'
matches = scrapertools.find_multiple_matches(data, patron)
@@ -106,6 +168,7 @@ def lista(item):
params = jsontools.dump(dict_param)
data = httptools.downloadpage(item.url, post=params).data
data = data.replace("<mark>","").replace("<\/mark>","")
dict_data = jsontools.load(data)
for it in dict_data["items"]:
@@ -114,7 +177,7 @@ def lista(item):
rating = it["imdb"]
year = it["year"]
url = host + "pelicula/" + it["slug"]
thumb = urlparse.urljoin(host, it["image"])
thumb = host + it["image"]
item.infoLabels['year'] = year
itemlist.append(item.clone(action="findvideos", title=title, fulltitle=title, url=url, thumbnail=thumb,
plot=plot, context=["buscar_trailer"], contentTitle=title, contentType="movie"))
@@ -137,7 +200,7 @@ def lista(item):
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
texto = texto.replace(" ", "%20")
item.url = host + "/movies/search/" + texto
item.extra = "busqueda"
try:

View File

@@ -5,13 +5,6 @@
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/sLaXHvp.png",
"version": 1,
"changes": [
{
"date": "26/04/2017",
"description": "Release"
}
],
"categories": [
"torrent",
"movie"

View File

@@ -6,25 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "animeflv.png",
"banner": "animeflv.png",
"version": 1,
"changes": [
{
"date": "18/05/2017",
"description": "fix ultimos animes, episodios"
},
{
"date": "06/04/2017",
"description": "fix ultimos episodios"
},
{
"date": "01/03/2017",
"description": "fix nueva web"
},
{
"date": "09/07/2016",
"description": "Arreglo viewmode"
}
],
"categories": [
"anime"
],

View File

@@ -6,21 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/x9AdvBx.png",
"banner": "http://i.imgur.com/dTZwCPq.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "05/01/2017",
"description": "Actualizada url de la opción Novedades. Arreglado un error que impedia que se mostrara un solo resultado al realizar busquedas. Limpieza de código"
},
{
"date": "01/07/2016",
"description": "nuevo canal"
}
],
"categories": [
"anime"
],

View File

@@ -12,14 +12,14 @@ from core import servertools
from core.item import Item
from platformcode import config, logger
CHANNEL_HOST = "http://animeflv.me/"
CHANNEL_HOST = "http://animeflv.co"
CHANNEL_DEFAULT_HEADERS = [
["User-Agent", "Mozilla/5.0"],
["Accept-Encoding", "gzip, deflate"],
["Referer", CHANNEL_HOST]
]
REGEX_NEXT_PAGE = r"class='current'>\d+?</li><li><a href=\"([^']+?)\""
REGEX_NEXT_PAGE = "class='current'>\d+?</li><li><a href='([^']+?)'"
REGEX_TITLE = r'(?:bigChar_a" href=.+?>)(.+?)(?:</a>)'
REGEX_THUMB = r'src="(http://media.animeflv\.co/uploads/thumbs/[^"]+?)"'
REGEX_PLOT = r'<span class="info">Línea de historia:</span><p><span>(.*?)</span>'
@@ -61,14 +61,6 @@ def get_cookie_value():
header_string = "|User-Agent=Mozilla/5.0&Referer=http://animeflv.co&Cookie=" + \
get_cookie_value()
def __find_next_page(html):
"""
Busca el enlace a la pagina siguiente
"""
return scrapertools.find_single_match(html, REGEX_NEXT_PAGE)
def __extract_info_from_serie(html):
title = scrapertools.find_single_match(html, REGEX_TITLE)
title = clean_title(title)
@@ -131,15 +123,15 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="letras",
title="Por orden alfabético"))
itemlist.append(Item(channel=item.channel, action="generos", title="Por géneros",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime")))
url= CHANNEL_HOST + "/ListadeAnime"))
itemlist.append(Item(channel=item.channel, action="series", title="Por popularidad",
url=urlparse.urljoin(CHANNEL_HOST, "/ListadeAnime/MasVisto")))
url=CHANNEL_HOST + "/ListadeAnime/MasVisto"))
itemlist.append(Item(channel=item.channel, action="series", title="Novedades",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/Nuevo")))
url=CHANNEL_HOST + "/ListadeAnime/Nuevo"))
itemlist.append(Item(channel=item.channel, action="series", title="Últimos",
url=urlparse.urljoin(CHANNEL_HOST, "ListadeAnime/LatestUpdate")))
url=CHANNEL_HOST + "/ListadeAnime/LatestUpdate"))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
url=urlparse.urljoin(CHANNEL_HOST, "Buscar?s=")))
url=CHANNEL_HOST + "/Buscar?s="))
itemlist = renumbertools.show_option(item.channel, itemlist)
@@ -148,15 +140,11 @@ def mainlist(item):
def letras(item):
logger.info()
base_url = 'http://animeflv.co/ListadeAnime?c='
itemlist = list()
itemlist.append(Item(channel=item.channel, action="series", title="#", url=base_url + "#"))
for letter in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (letter, base_url + letter))
itemlist.append(Item(channel=item.channel, action="series", title=letter, url=base_url + letter))
return itemlist
@@ -172,8 +160,6 @@ def generos(item):
list_genre = re.findall(REGEX_GENERO, html)
for url, genero in list_genre:
logger.debug("title=[%s], url=[%s], thumbnail=[]" % (genero, url))
itemlist.append(Item(channel=item.channel, action="series", title=genero, url=url))
return itemlist
@@ -181,12 +167,9 @@ def generos(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "%20")
item.url = "%s%s" % (item.url, texto)
html = get_url_contents(item.url)
try:
# Se encontro un solo resultado y se redicciono a la página de la serie
if html.find('<title>Ver') >= 0:
@@ -198,9 +181,6 @@ def search(item, texto):
items = []
for show in show_list:
title, url, thumbnail, plot = show
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
except:
@@ -214,35 +194,25 @@ def search(item, texto):
def series(item):
logger.info()
page_html = get_url_contents(item.url)
show_list = __find_series(page_html)
items = []
for show in show_list:
title, url, thumbnail, plot = show
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, thumbnail))
items.append(Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
plot=plot, show=title, viewmode="movies_with_plot", context=renumbertools.context(item)))
url_next_page = __find_next_page(page_html)
url_next_page = scrapertools.find_single_match(page_html, REGEX_NEXT_PAGE)
if url_next_page:
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url=url_next_page))
items.append(Item(channel=item.channel, action="series", title=">> Página Siguiente", url= CHANNEL_HOST + url_next_page))
return items
def episodios(item):
logger.info()
itemlist = []
html_serie = get_url_contents(item.url)
info_serie = __extract_info_from_serie(html_serie)
if info_serie[3]:
plot = info_serie[3]
@@ -250,11 +220,9 @@ def episodios(item):
plot = ''
episodes = re.findall(REGEX_EPISODE, html_serie, re.DOTALL)
es_pelicula = False
for url, title, date in episodes:
episode = scrapertools.find_single_match(title, r'Episodio (\d+)')
# El enlace pertenece a un episodio
if episode:
season = 1
@@ -268,9 +236,6 @@ def episodios(item):
title = "%s (%s)" % (title, date)
item.url = url
es_pelicula = True
logger.debug("title=[%s], url=[%s], thumbnail=[%s]" % (title, url, item.thumbnail))
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=item.thumbnail,
plot=plot, show=item.show, fulltitle="%s %s" % (item.show, title)))
@@ -318,7 +283,6 @@ def findvideos(item):
videoitem.thumbnail = item.thumbnail
regex_video_list = r'var part = \[([^\]]+)'
videos_html = scrapertools.find_single_match(iframe_html, regex_video_list)
videos = re.findall('"([^"]+)"', videos_html, re.DOTALL)
for quality_id, video_url in enumerate(videos):

View File

@@ -6,22 +6,9 @@
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
"banner": "animeflv_ru.png",
"version": 1,
"compatible": {
"python": "2.7.9"
},
"changes": {
"change": [
{
"date": "06/04/2017",
"description": "fix"
},
{
"date": "01/03/2017",
"description": "fix nueva web"
}
]
},
"categories": [
"anime"
],

View File

@@ -162,27 +162,20 @@ def novedades_anime(item):
def listado(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
# logger.debug("datito %s" % data)
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
re.DOTALL).findall(data)
itemlist = []
for thumbnail, url, title, genres, plot in matches:
title = clean_title(title)
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
@@ -192,28 +185,22 @@ def listado(item):
else:
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.plot == "":
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
@@ -221,7 +208,6 @@ def episodios(item):
title = title.strip()
url = urlparse.urljoin(item.url, url)
thumbnail = item.thumbnail
try:
episode = int(scrapertools.find_single_match(title, "Episodio (\d+)"))
except ValueError:
@@ -229,42 +215,36 @@ def episodios(item):
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (item.title, season, str(episode).zfill(2))
itemlist.append(item.clone(action="findvideos", title=title, url=url, thumbnail=thumbnail, fulltitle=title,
fanart=thumbnail, contentType="episode"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
post = "embed_id=%s" % _id
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
dict_data = jsontools.load(data)
headers = dict()
headers["Referer"] = item.url
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
dict_data = jsontools.load(data)
list_videos = dict_data["playlist"][0]["sources"]
if not dict_data:
return itemlist
list_videos = dict_data["playlist"][0]
if isinstance(list_videos, list):
for video in list_videos:
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail))
else:
for video in list_videos.values():
itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
video += "|User-Agent=Mozilla/5.0"
itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail))
return itemlist

View File

@@ -6,21 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "animeid.png",
"banner": "animeid.png",
"version": 1,
"changes": [
{
"date": "17/05/2017",
"description": "Fix novedades y replace en findvideos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "04/01/16",
"description": "Arreglado problema en findvideos"
}
],
"categories": [
"anime"
],

View File

@@ -0,0 +1,43 @@
{
"id": "animemovil",
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s1.postimg.org/92ji7stii7/animemovil1.png",
"banner": "",
"categories": [
"anime"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}

View File

@@ -0,0 +1,406 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from core import httptools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = int(config.get_setting('perfil', "animemovil"))
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", thumbnail=item.thumbnail,
url="%s/_API/?src=animesRecientes&offset=0" % host, text_color=color1))
itemlist.append(Item(channel=item.channel, action="emision", title="En emisión", thumbnail=item.thumbnail,
url="%s/anime/emision" % host, text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=item.thumbnail, text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def openconfig(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
item.url = "%s/?s=%s" % (host, texto.replace(" ", "+"))
try:
return recientes(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def recientes(item):
logger.info()
item.contentType = "tvshow"
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="emision"(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
try:
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
except:
contentTitle = ""
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context(item)
if item.extra == "recientes":
action = "findvideos"
context = ""
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
tipo = "movie"
show = ""
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
action ="findvideos"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
thumb_=thumb, contentType=tipo, context=context))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.extra and itemlist:
for it in itemlist:
it.thumbnail = it.thumb_
except:
pass
return itemlist
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s" % (host, it["url"])
thumb = "http://img.animemovil.com/w440-h250-c/%s" % it["img"]
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context(item), contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if data["buttom"] and itemlist:
offset = int(scrapertools.find_single_match(item.url, 'offset=(\d+)')) + 1
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def indices(item):
logger.info()
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime/generos/" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime/" % host))
itemlist.append(item.clone(action="completo", title="Lista completa de Animes",
url="%s/anime/lista/" % host))
else:
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<div class="letras">(.*?)</div>')
patron = '<a title="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = "%s/_API/?src=animesLetra&offset=0&letra=%s" % (host, title)
else:
url = "%s/_API/?src=animesGenero&offset=0&genero=%s" % (host, title)
itemlist.append(item.clone(action="listado", url=url, title=title))
return itemlist
def completo(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="listadoAnime">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
scrapedtitle = title
thumb = thumb.replace("s90-c", "w440-h250-c")
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, action=action, title=scrapedtitle, url=url, thumbnail=thumb,
text_color=color3, contentTitle=title, contentSerieName=show, extra="completo",
context=renumbertools.context(item), contentType=tipo, infoLabels=infoLabels))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
show = scrapertools.find_single_match(data, '<title>\s*([^<]+)\s*</title>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
new_item = item.clone(action="findvideos", url=url, title=title, extra="")
if epi:
season, episode = renumbertools.numbered_for_tratk(
item.channel, show, 1, int(epi))
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.title = "%sx%s %s" % (season, episode, title)
itemlist.append(new_item)
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if config.get_videolibrary_support() and itemlist:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
thumbnail=item.thumbnail))
return itemlist
def peliculas(item):
logger.info()
itemlist = []
if item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_item(item, __modo_grafico__)
except:
pass
data = httptools.downloadpage(item.url).data
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="InfoSipnosis">.*?<p>(.*?)</p>')
bloque = scrapertools.find_single_match(data, 'ul class="lista"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
if len(matches) == 1:
item.url = host + matches[0][0]
itemlist = findvideos(item)
else:
for url, title in matches:
itemlist.append(item.clone(action="findvideos", title=title, url=url, extra=""))
return itemlist
def emision(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloques = scrapertools.find_multiple_matches(data, '<div class="horario">.*?</i>\s*(.*?)</span>(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
for dia, b in bloques:
matches = scrapertools.find_multiple_matches(b, patron)
if matches:
itemlist.append(item.clone(action="", title=dia, text_color=color1))
for url, title, thumb in matches:
url = host + url
scrapedtitle = " %s" % title
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "", title)
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
id = scrapertools.find_single_match(data, '"id":"([^"]+)"')
bloque = scrapertools.find_single_match(data, 'ul class="controles">(.*?)</ul>')
patron = '<li title="([^"]+)" id="[^"]*" host="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
for title, server in matches:
if title == "Vizard":
continue
title = "%s - %s" % (title, item.title)
post = "host=%s&id=%s" % (server, id)
itemlist.append(item.clone(action="play", url="http://server-2-stream.animemovil.com/V2/", title=title,
post=post))
downl = scrapertools.find_single_match(data, '<div class="descargarCap">.*?<a href="([^"]+)"')
if downl:
downl = downl.replace("&amp;", "&")
itemlist.append(item.clone(action="play", title="Descarga - %s" % item.title, url=downl, server="directo"))
if not itemlist:
itemlist.append(Item(channel=item.channel, title="No hay vídeos disponibles", action=""))
if item.extra == "recientes":
url = scrapertools.find_single_match(data, '<a class="CapList".*?href="([^"]+)"')
if url:
url = host + url
itemlist.append(item.clone(action="episodios", title="Ir a lista de capítulos", url=url, text_color=color1))
elif item.contentType == "movie" and config.get_library_support():
if "No hay vídeos disponibles" not in itemlist[0].title:
itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
action="add_pelicula_to_library", contentTitle=item.contentTitle, text_color=color4,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info()
if item.server:
return [item]
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url, item.post).data)
if data["jwplayer"] == False:
content = data["eval"]["contenido"]
urls = scrapertools.find_multiple_matches(content, 'file\s*:\s*"([^"]+)"')
if not urls:
urls = scrapertools.find_multiple_matches(content, '"GET","([^"]+)"')
for url in urls:
if "mediafire" in url:
data_mf = httptools.downloadpage(url).data
url = scrapertools.find_single_match(data_mf, 'kNO\s*=\s*"([^"]+)"')
ext = url[-4:]
itemlist.insert(0, ["%s [directo]" % ext, url])
else:
if data["jwplayer"].get("sources"):
for source in data["jwplayer"]["sources"]:
label = source.get("label", "")
ext = source.get("type", "")
if ext and "/" in ext:
ext = ".%s " % ext.rsplit("/", 1)[1]
url = source.get("file")
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, ["%s%s [directo]" % (ext, label), url])
elif data["jwplayer"].get("file"):
label = data["jwplayer"].get("label", "")
url = data["jwplayer"]["file"]
ext = data["jwplayer"].get("type", "")
if ext and "/" in ext:
ext = "%s " % ext.rsplit("/", 1)[1]
if "server-3-stream" in url:
url = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location")
itemlist.insert(0, [".%s%s [directo]" % (ext, label), url])
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = "http://skanime.net/"
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -6,21 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "https://s21.postimg.org/b43i3ljav/animeshd.png",
"banner": "https://s4.postimg.org/lulxulmql/animeshd-banner.png",
"version": 1,
"changes": [
{
"date": "03/06/2017",
"description": "limpieza de codigo"
},
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "19/05/2017",
"description": "First release"
}
],
"categories": [
"anime"
]

View File

@@ -5,13 +5,6 @@
"adult": false,
"language": "es",
"thumbnail": "http://i.imgur.com/dHpupFk.png",
"version": 1,
"changes": [
{
"date": "17/05/2017",
"description": "Fix novedades y replace en findvideos"
}
],
"categories": [
"anime"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/9Zu5NBc.png",
"banner": "http://i.imgur.com/JQSXCaB.png",
"version": 1,
"changes": [
{
"date": "13/06/2017",
"description": "Arreglado problema en nombre de servidores"
},
{
"date": "02/06/2017",
"description": "Primera Versión"
}
],
"categories": [
"tvshow",
"anime"

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
@@ -148,35 +148,21 @@ def findvideos(item):
itemla = scrapertools.find_multiple_matches(data_vid, '<div class="serv">.+?-(.+?)-(.+?)<\/div><.+? src="(.+?)"')
for server, quality, url in itemla:
if "Calidad Alta" in quality:
quality = quality.replace("Calidad Alta", "HQ")
server = server.lower().strip()
if "ok" == server:
server = 'okru'
if "netu" == server:
continue
quality = "HQ"
if "HQ" in quality:
quality = "HD"
if " Calidad media - Carga mas rapido" in quality:
quality = "360p"
server = server.lower().strip()
if "ok" in server:
server = 'okru'
if "rapid" in server:
server = 'rapidvideo'
if "netu" in server:
server = 'netutv'
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
thumbnail=scrapedthumbnail, plot=scrapedplot,
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
title="Enlace encontrado en: %s [%s]" % (server.capitalize(), quality)))
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail))
return itemlist

View File

@@ -4,21 +4,6 @@
"language": ["cast", "lat"],
"adult": false,
"active": true,
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "16/02/2017",
"description": "Canal reparado ya que no funcionaban los enlaces"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"banner": "areadocumental.png",
"thumbnail": "areadocumental.png",
"categories": [

View File

@@ -1,20 +1,13 @@
{
"id": "asialiveaction",
"name": "Asialiveaction.",
"name": "Asialiveaction",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "asialiveaction.png",
"banner": "https://imgur.com/B1IOAu4.png",
"version": 1,
"changes": [
{
"date": "08/10/2017",
"description": "Primera versión del canal"
}
],
"categories": [
"movie",
"serie"
"tvshow"
]
}

View File

@@ -7,6 +7,7 @@ from core import jsontools
from core.item import Item
from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
__channel__ = "autoplay"
@@ -78,7 +79,20 @@ def start(itemlist, item):
:return: intenta autoreproducir, en caso de fallar devuelve el itemlist que recibio en un principio
'''
logger.info()
for videoitem in itemlist:
#Nos dice de donde viene si del addon o videolibrary
if item.contentChannel=='videolibrary':
videoitem.contentEpisodeNumber=item.contentEpisodeNumber
videoitem.contentPlot=item.contentPlot
videoitem.contentSeason=item.contentSeason
videoitem.contentSerieName=item.contentSerieName
videoitem.contentTitle=item.contentTitle
videoitem.contentType=item.contentType
videoitem.episode_id=item.episode_id
videoitem.hasContentDetails=item.hasContentDetails
videoitem.infoLabels=item.infoLabels
videoitem.thumbnail=item.thumbnail
#videoitem.title=item.title
if not config.is_xbmc():
#platformtools.dialog_notification('AutoPlay ERROR', 'Sólo disponible para XBMC/Kodi')
return itemlist
@@ -261,8 +275,12 @@ def start(itemlist, item):
else:
videoitem = resolved_item[0]
# si no directamente reproduce
platformtools.play_video(videoitem)
# si no directamente reproduce y marca como visto
from platformcode import xbmc_videolibrary
xbmc_videolibrary.mark_auto_as_watched(item)
#platformtools.play_video(videoitem)
videoitem.contentChannel='videolibrary'
launcher.run(videoitem)
try:
if platformtools.is_playing():

View File

@@ -1,27 +1,12 @@
{
"id": "bajui2",
"name": "Bajui2",
"id": "bajui",
"name": "Bajui",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "bajui.png",
"banner": "bajui.png",
"fanart": "bajui.png",
"version": 2,
"changes": [
{
"date": "07/08/2017",
"description": "Fix URL HOST changed to Bajui2"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie",
"tvshow",

View File

@@ -13,7 +13,7 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Películas", action="menupeliculas",
url="http://www.bajui2.com/descargas/categoria/2/peliculas",
url="http://www.bajui.org/descargas/categoria/2/peliculas",
fanart=item.fanart))
itemlist.append(Item(channel=item.channel, title="Series", action="menuseries",
fanart=item.fanart))
@@ -51,13 +51,13 @@ def menuseries(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Series - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series",
url="http://www.bajui.org/descargas/categoria/3/series",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/3/series/orden:nombre",
url="http://www.bajui.org/descargas/categoria/3/series/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Series - HD", action="peliculas",
url="http://www.bajui2.com/descargas/subcategoria/11/hd/orden:nombre",
url="http://www.bajui.org/descargas/subcategoria/11/hd/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
@@ -68,10 +68,10 @@ def menudocumentales(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Documentales - Novedades", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv",
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Documentales - A-Z", action="peliculas",
url="http://www.bajui2.com/descargas/categoria/7/docus-y-tv/orden:nombre",
url="http://www.bajui.org/descargas/categoria/7/docus-y-tv/orden:nombre",
fanart=item.fanart, viewmode="movie_with_plot"))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url="",
fanart=item.fanart))
@@ -86,7 +86,7 @@ def search(item, texto, categoria=""):
texto = texto.replace(" ", "+")
logger.info("categoria: " + categoria + " url: " + url)
try:
item.url = "http://www.bajui2.com/descargas/busqueda/%s"
item.url = "http://www.bajui.org/descargas/busqueda/%s"
item.url = item.url % texto
itemlist.extend(peliculas(item))
return itemlist
@@ -118,7 +118,7 @@ def peliculas(item, paginacion=True):
scrapedtitle = title
scrapedplot = clean_plot(plot)
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = urlparse.urljoin("http://www.bajui2.com/", thumbnail.replace("_m.jpg", "_g.jpg"))
scrapedthumbnail = urlparse.urljoin("http://www.bajui.org/", thumbnail.replace("_m.jpg", "_g.jpg"))
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
# Añade al listado de XBMC
@@ -133,7 +133,7 @@ def peliculas(item, paginacion=True):
scrapertools.printMatches(matches)
if len(matches) > 0:
scrapedurl = urlparse.urljoin("http://www.bajui2.com/", matches[0])
scrapedurl = urlparse.urljoin("http://www.bajui.org/", matches[0])
pagitem = Item(channel=item.channel, action="peliculas", title=">> Página siguiente", url=scrapedurl,
fanart=item.fanart, viewmode="movie_with_plot")
if not paginacion:
@@ -197,7 +197,7 @@ def enlaces(item):
try:
item.thumbnail = scrapertools.get_match(data, '<div class="ficha-imagen"[^<]+<img src="([^"]+)"')
item.thumbnail = urlparse.urljoin("http://www.bajui2.com/", item.thumbnail)
item.thumbnail = urlparse.urljoin("http://www.bajui.org/", item.thumbnail)
except:
pass
@@ -234,8 +234,8 @@ def enlaces(item):
lista_servidores = lista_servidores[:-2]
scrapedthumbnail = item.thumbnail
# http://www.bajui2.com/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui2.com/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
# http://www.bajui.org/ajax/mostrar-enlaces.php?id=330582&code=124767d31bfbf14c3861
scrapedurl = "http://www.bajui.org/ajax/mostrar-enlaces.php?id=" + id + "&code=" + id2
scrapedplot = item.plot
scrapedtitle = "Enlaces de " + usuario + " (" + fecha + ") (" + lista_servidores + ")"

View File

@@ -6,21 +6,6 @@
"language": ["*"],
"thumbnail": "beeg.png",
"banner": "beeg.png",
"version": 1,
"changes": [
{
"date": "03/06/2017",
"description": "reliminado encoding y soporte multiples calidades"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"adult"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast"],
"thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png",
"banner": "bityouth.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"torrent",
"movie",

View File

@@ -5,17 +5,6 @@
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/BePrYmy.png",
"version": 1,
"changes": [
{
"date": "26/04/2017",
"description": "Release"
},
{
"date": "28/06/2017",
"description": "Correciones código y mejoras"
}
],
"categories": [
"torrent",
"movie",

View File

@@ -6,17 +6,6 @@
"language": ["cast"],
"thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg",
"banner": "bricocine.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"torrent",
"movie",

View File

@@ -7,13 +7,6 @@
"fanart": "https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/canalpelisbg.jpg",
"thumbnail": "http://www.canalpelis.com/wp-content/uploads/2016/11/logo_web.gif",
"banner": "",
"version": 1,
"changes": [
{
"date": "15/08/17",
"description": "Nuevo Canal"
}
],
"categories": [
"movie",
"tvshow",

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "09/01/2017",
"description": "Primera version."
}
],
"categories": [
"adult"
]

View File

@@ -43,7 +43,7 @@ def findvideos(item):
for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title
scrapedurl = host + url
scrapedthumbnail = "http:" + thumbnail
scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail))
@@ -80,7 +80,7 @@ def play(item):
itemlist = []
data = httptools.downloadpage(item.url).data
url = "http:" + scrapertools.find_single_match(data, '<source src="([^"]+)"')
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist

View File

@@ -6,13 +6,6 @@
"language": ["lat"],
"thumbnail": "http://i.imgur.com/wk6fRDZ.png",
"banner": "http://i.imgur.com/115c59F.png",
"version": 1,
"changes": [
{
"date": "07/06/2017",
"description": "Primera version del canal"
}
],
"categories": [
"tvshow"
]

View File

@@ -1,8 +1,7 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
@@ -10,6 +9,7 @@ from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.cartoon-latino.com/"
from channels import autoplay
@@ -33,7 +33,6 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
thumbnail=thumb_series))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -71,7 +70,7 @@ def lista_gen(item):
title = scrapedtitle + " [ " + scrapedlang + "]"
itemlist.append(
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle, context=renumbertools.context(item)))
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron_pag = '<a class="nextpostslink" rel="next" href="([^"]+)">'
@@ -98,7 +97,7 @@ def lista(item):
for link, name in matches:
title = name + " [Latino]"
url = link
context1=[renumbertools.context(item), autoplay.context]
context1=[autoplay.context]
itemlist.append(
item.clone(title=title, url=url, plot=title, action="episodios", show=title,
context=context1))
@@ -129,42 +128,38 @@ def episodios(item):
number = 0
ncap = 0
A = 1
tempo=1
for temp, link, name in matches:
if A != temp:
if A != temp and "Ranma" not in show:
number = 0
number = number + 1
if "Ranma" in show:
number = int(temp)
temp = str(1)
else:
number = number + 1
if number < 10:
capi = "0" + str(number)
else:
capi = str(number)
number,tempo=renumerar_ranma(number,tempo,18+1,1)
number,tempo=renumerar_ranma(number,tempo,22+1,2)
number,tempo=renumerar_ranma(number,tempo,24+1,3)
number,tempo=renumerar_ranma(number,tempo,24+1,4)
number,tempo=renumerar_ranma(number,tempo,24+1,5)
number,tempo=renumerar_ranma(number,tempo,24+1,6)
capi=str(number).zfill(2)
if "Ranma" in show:
season = 1
episode = number
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.show, season, episode)
date = name
if episode < 10:
capi = "0" + str(episode)
else:
capi = episode
title = str(season) + "x" + str(capi) + " - " + name # "{0}x{1} - ({2})".format(season, episode, date)
title = "{0}x{1} - ({2})".format(str(tempo), capi, name)
else:
title = str(temp) + "x" + capi + " - " + name
title = "{0}x{1} - ({2})".format(str(temp), capi, name)
url = link
A = temp
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir " + show + " a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def renumerar_ranma(number,tempo,final,actual):
if number==final and tempo==actual:
tempo=tempo+1
number=1
return number, tempo
def findvideos(item):
logger.info()
@@ -190,29 +185,5 @@ def findvideos(item):
server1 = server
itemlist.append(item.clone(url=url, action="play", server=server1,
title="Enlace encontrado en %s " % (server1.capitalize())))
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
# Buscamos video por servidor ...
devuelve = servertools.findvideosbyserver(item.url, item.server)
if not devuelve:
# ...sino lo encontramos buscamos en todos los servidores disponibles
devuelve = servertools.findvideos(item.url, skip=True)
if devuelve:
# logger.debug(devuelve)
itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2],
url=devuelve[0][1], thumbnail=item.thumbnail, folder=False))
return itemlist

View File

@@ -6,13 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "https://s9.postimg.org/secdb5s8v/ciberdocumentales.png",
"banner": "https://s1.postimg.org/sa486z0of/ciberdocumentales_banner.png",
"version": 1,
"changes": [
{
"date": "18/06/2016",
"descripcion": "First release"
}
],
"categories": [
"documentary"
],

View File

@@ -6,21 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5KOU8uy.png?3",
"banner": "cineasiaenlinea.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "07/02/17",
"description": "Fix bug in newest"
},
{
"date": "09/01/2017",
"description": "Primera version"
}
],
"categories": [
"movie",
"vos"

View File

@@ -6,29 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "https://s31.postimg.org/puxmvsi7v/cinecalidad.png",
"banner": "https://s32.postimg.org/kihkdpx1x/banner_cinecalidad.png",
"version": 1,
"changes": [
{
"date": "01/08/2017",
"description": "Cambio de estructura"
},
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "06/06/2017",
"description": "Compatibilidad con AutoPlay"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "18/06/2016",
"description": "First release."
}
],
"categories": [
"movie"
],

View File

@@ -4,27 +4,8 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"thumbnail": "cinefox.png",
"banner": "cinefox.png",
"changes": [
{
"date": "01/08/2017",
"description": "Agregado servidor gvideo"
},
{
"date": "05/04/2017",
"description": "Cambio en los servidores"
},
{
"date": "21/03/2017",
"description": "Adaptado a httptools y corregido episodios para videoteca"
},
{
"date": "18/07/2016",
"description": "Primera version"
}
],
"categories": [
"movie",
"tvshow",

View File

@@ -6,21 +6,6 @@
"language": ["lat"],
"thumbnail": "https://s28.postimg.org/lytn2q1tp/cinefoxtv.png",
"banner": "cinefoxtv.png",
"version": 1,
"changes": [
{
"date": "22/05/2017",
"description": "fix por cambio en la estructura"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "09/02/2017",
"description": "First release."
}
],
"categories": [
"movie"
],

View File

@@ -6,13 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"version": 1,
"changes": [
{
"date": "25/05/2017",
"description": "Primera versión completa del canal"
}
],
"categories": [
"movie"
]

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "05/08/2016",
"description": "Eliminado de sección películas"
}
],
"categories": [
"adult"
]

View File

@@ -7,25 +7,6 @@
"thumbnail": "cinetux.png",
"banner": "cinetux.png",
"fanart": "cinetux.jpg",
"version": 1,
"changes": [
{
"date": "31/07/2017",
"description": "Actualizado por cambio de estructura de la página"
},
{
"date": "12/05/2017",
"description": "Arreglada paginación y enlaces directos"
},
{
"date": "16/02/2017",
"description": "Adaptado a httptools y añadidos enlaces directos"
},
{
"date": "08/07/2016",
"description": "Correciones y adaptaciones a la nueva version"
}
],
"categories": [
"direct",
"movie"

View File

@@ -28,9 +28,9 @@ def mainlist(item):
itemlist = []
item.viewmode = viewmode
data = httptools.downloadpage(CHANNEL_HOST).data
total = scrapertools.find_single_match(data, "TENEMOS\s<b>(.*?)</b>")
titulo = "Peliculas (%s)" % total
data = httptools.downloadpage(CHANNEL_HOST + "pelicula").data
total = scrapertools.find_single_match(data, "Películas</h1><span>(.*?)</span>")
titulo = "Peliculas (%s)" %total
itemlist.append(item.clone(title=titulo, text_color=color2, action="", text_bold=True))
itemlist.append(item.clone(action="peliculas", title=" Novedades", url=CHANNEL_HOST + "pelicula",
thumbnail="https://raw.githubusercontent.com/master-1970/resources/master/images/genres"
@@ -283,7 +283,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
if type == "descarga": t_tipo = "Descargar"
data = data.replace("\n", "")
if type == "online":
patron = '(?is)class="playex.*?visualizaciones'
patron = '(?is)class="playex.*?sheader'
bloque1 = scrapertools.find_single_match(data, patron)
patron = '(?is)#(option-[^"]+).*?png">([^<]+)'
match = scrapertools.find_multiple_matches(data, patron)
@@ -303,7 +303,7 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item):
bloque2 = scrapertools.find_single_match(data, '(?s)box_links.*?dt_social_single')
bloque2 = bloque2.replace("\t", "").replace("\r", "")
patron = '(?s)optn" href="([^"]+)'
patron += '.*?title="([^\.]+)'
patron += '.*?alt="([^\.]+)'
patron += '.*?src.*?src="[^>]+"?/>([^<]+)'
patron += '.*?src="[^>]+"?/>([^<]+)'
patron += '.*?/span>([^<]+)'

View File

@@ -6,25 +6,6 @@
"language": ["cast"],
"thumbnail": "http://i.imgur.com/F7sevVu.jpg?1",
"banner": "clasicofilm.png",
"version": 1,
"changes": [
{
"date": "28/05/2017",
"description": "Corregido findvideos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "07/02/17",
"description": "Fix bug in newest"
},
{
"date": "09/01/2017",
"description": "Primera version"
}
],
"categories": [
"movie"
],

View File

@@ -1,37 +0,0 @@
{
"id": "crimenes",
"name": "Crimenes Imperfectos",
"active": true,
"adult": false,
"language": ["cast"],
"banner": "crimenes.png",
"thumbnail": "crimenes.png",
"version": 1,
"changes": [
{
"date": "19/06/2017",
"description": "correcion xml"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
}
]
}

View File

@@ -1,167 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
import xbmc
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
# Main list manual
def listav(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?'
patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?'
patronbloque += '</a><span class=.*?">(.*?)</span></h3>'
matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data)
scrapertools.printMatches(matchesbloque)
scrapedduration = ''
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matchesbloque:
scrapedtitle = '[COLOR white]' + scrapedtitle + '[/COLOR] [COLOR red]' + scrapedduration + '[/COLOR]'
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = urlparse.urljoin(item.thumbnail, scrapedthumbnail)
xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail)
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginacion
patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>'
matches = re.compile(patronbloque, re.DOTALL).findall(data)
for bloque in matches:
patronvideo = '<a href="([^"]+)"'
matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque)
for scrapedurl in matchesx:
url = urlparse.urljoin(item.url, 'https://www.youtube.com' + scrapedurl)
# solo me quedo con el ultimo enlace
itemlist.append(
Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>", url=url,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def busqueda(item):
itemlist = []
keyboard = xbmc.Keyboard("", "Busqueda")
keyboard.doModal()
if (keyboard.isConfirmed()):
myurl = keyboard.getText().replace(" ", "+")
data = scrapertools.cache_page('https://www.youtube.com/results?q=' + myurl)
data = data.replace("\n", "").replace("\t", "")
data = scrapertools.decodeHtmlentities(data)
patronbloque = '<li><div class="yt-lockup.*?<img.*?src="([^"]+)".*?'
patronbloque += '<h3 class="yt-lockup-title "><a href="([^"]+)".*?title="([^"]+)".*?'
patronbloque += '</a><span class=.*?">(.*?)</span></h3>'
matchesbloque = re.compile(patronbloque, re.DOTALL).findall(data)
scrapertools.printMatches(matchesbloque)
for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduracion in matchesbloque:
scrapedtitle = scrapedtitle + ' ' + scrapedduracion
url = scrapedurl
thumbnail = scrapedthumbnail
xbmc.log("$ " + scrapedurl + " " + scrapedtitle + " " + scrapedthumbnail)
itemlist.append(
Item(channel=item.channel, action="play", title=scrapedtitle, fulltitle=scrapedtitle, url=url,
thumbnail=thumbnail, fanart=thumbnail))
# Paginacion
patronbloque = '<div class="branded-page-box .*? spf-link ">(.*?)</div>'
matches = re.compile(patronbloque, re.DOTALL).findall(data)
for bloque in matches:
patronvideo = '<a href="([^"]+)"'
matchesx = re.compile(patronvideo, re.DOTALL).findall(bloque)
for scrapedurl in matchesx:
url = 'https://www.youtube.com' + scrapedurl
# solo me quedo con el ultimo enlace
itemlist.append(
Item(channel=item.channel, action="listav", title="Siguiente pag >>", fulltitle="Siguiente Pag >>",
url=url))
return itemlist
else:
# xbmcgui.Dialog().ok(item.channel, "nada que buscar")
# xbmc.executebuiltin("Action(up)")
xbmc.executebuiltin("Action(enter)")
# itemlist.append( Item(channel=item.channel, action="listav", title="<< Volver", fulltitle="Volver" , url="history.back()") )
def mainlist(item):
logger.info()
itemlist = []
item.url = 'https://www.youtube.com/results?q=crimenes+imperfectos&sp=CAI%253D'
scrapedtitle = "[COLOR white]Crimenes [COLOR red]Imperfectos[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g")
item.fanart = urlparse.urljoin(item.fanart,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcQ2PcyvcYIg6acvdUZrHGFFk_E3mXK9QSh-5TypP8Rk6zQ6S1yb2g")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?search_query=russian+dash+cam&sp=CAI%253D'
scrapedtitle = "[COLOR blue]Russian[/COLOR] [COLOR White]Dash[/COLOR] [COLOR red]Cam[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail, "https://i.ytimg.com/vi/-C6Ftromtig/maxresdefault.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"https://encrypted-tbn2.gstatic.com/images?q=tbn:ANd9GcRQLO-n-kO1ByY8lLhKxz0-cejJD1J7rLge_j0E0Gh9LJ2WtTbSnA")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?search_query=cuarto+milenio+programa+completo&sp=CAI%253D'
scrapedtitle = "[COLOR green]Cuarto[/COLOR] [COLOR White]Milenio[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
item.url = 'https://www.youtube.com/results?q=milenio+3&sp=CAI%253D'
scrapedtitle = "[COLOR green]Milenio[/COLOR] [COLOR White]3- Podcasts[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(
Item(channel=item.channel, action="listav", title=scrapedtitle, fulltitle=scrapedtitle, url=item.url,
thumbnail=item.thumbnail, fanart=item.fanart))
scrapedtitle = "[COLOR red]buscar ...[/COLOR]"
item.thumbnail = urlparse.urljoin(item.thumbnail,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/Cuarto-Milenio-analiza-fantasma-Granada_MDSVID20100924_0063_3.jpg")
item.fanart = urlparse.urljoin(item.fanart,
"http://cuatrostatic-a.akamaihd.net/cuarto-milenio/programas/temporada-07/t07xp32/fantasma-universidad_MDSVID20120420_0001_3.jpg")
itemlist.append(Item(channel=item.channel, action="busqueda", title=scrapedtitle, fulltitle=scrapedtitle,
thumbnail=item.thumbnail, fanart=item.fanart))
return itemlist
def play(item):
logger.info("url=" + item.url)
itemlist = servertools.find_video_items(data=item.url)
return itemlist

View File

@@ -4,13 +4,6 @@
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "16/05/2017",
"description": "Primera versión"
}
],
"thumbnail": "http://i.imgur.com/O49fDS1.png",
"categories": [
"anime",

View File

@@ -4,21 +4,6 @@
"active": true,
"adult": false,
"language": ["cast"],
"version": 1,
"changes": [
{
"date": "10/12/2016",
"description": "Reparado fanart y thumbs y correción código.Adaptado a Infoplus"
},
{
"date": "04/04/2017",
"description": "Migración a Httptools"
},
{
"date": "28/06/2017",
"description": "Correciones código.Algunas mejoras"
}
],
"thumbnail": "cuelgame.png",
"banner": "cuelgame.png",
"categories": [

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"version": 1,
"changes": [
{
"date": "04/05/17",
"description": "Corregido, usa proxy en caso de error con https"
},
{
"date": "13/01/17",
"description": "First version"
}
],
"categories": [
"adult"
]

View File

@@ -0,0 +1,12 @@
{
"id": "danimados",
"name": "Danimados",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://imgur.com/kU5Lx1S.png",
"banner": "https://imgur.com/xG5xqBq.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,186 @@
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
host = "http://www.danimados.com/"
list_servers = ['openload',
'okru',
'rapidvideo'
]
list_quality = ['default']
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="mainpage", title="Categorías", url=host,
thumbnail=thumb_series))
itemlist.append(Item(channel=item.channel, action="mainpage", title="Más Populares", url=host,
thumbnail=thumb_series))
#itemlist.append(Item(channel=item.channel, action="movies", title="Peliculas Animadas", url=host,
# thumbnail=thumb_series))
autoplay.show_option(item.channel, itemlist)
return itemlist
"""
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return lista(item)
"""
def mainpage(item):
logger.info()
itemlist = []
data1 = httptools.downloadpage(item.url).data
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
if item.title=="Más Populares":
patron_sec='<a class="lglossary" data-type.+?>(.+?)<\/ul>'
patron='<img .+? src="([^"]+)".+?<a href="([^"]+)".+?>([^"]+)<\/a>' #scrapedthumbnail, #scrapedurl, #scrapedtitle
if item.title=="Categorías":
patron_sec='<ul id="main_header".+?>(.+?)<\/ul><\/div>'
patron='<a href="([^"]+)">([^"]+)<\/a>'#scrapedurl, #scrapedtitle
data = scrapertools.find_single_match(data1, patron_sec)
matches = scrapertools.find_multiple_matches(data, patron)
if item.title=="Géneros" or item.title=="Categorías":
for scrapedurl, scrapedtitle in matches:
if "Películas Animadas"!=scrapedtitle:
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="lista"))
return itemlist
else:
for scraped1, scraped2, scrapedtitle in matches:
scrapedthumbnail=scraped1
scrapedurl=scraped2
itemlist.append(
Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data, '<div class="items">(.+?)<\/div><\/div><div class=.+?>')
patron = '<img src="([^"]+)" alt="([^"]+)">.+?<a href="([^"]+)">.+?<div class="texto">(.+?)<\/div>'
#scrapedthumbnail,#scrapedtitle, #scrapedurl, #scrapedplot
matches = scrapertools.find_multiple_matches(data_lista, patron)
for scrapedthumbnail,scrapedtitle, scrapedurl, scrapedplot in matches:
itemlist.append(
item.clone(title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
context=autoplay.context,plot=scrapedplot, action="episodios", show=scrapedtitle))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data_lista = scrapertools.find_single_match(data,
'<ul class="episodios">(.+?)<\/ul><\/div><\/div><\/div>')
show = item.title
patron_caps = '<img src="([^"]+)"><\/a><\/div><div class=".+?">([^"]+)<\/div>.+?<a .+? href="([^"]+)">([^"]+)<\/a>'
#scrapedthumbnail,#scrapedtempepi, #scrapedurl, #scrapedtitle
matches = scrapertools.find_multiple_matches(data_lista, patron_caps)
for scrapedthumbnail, scrapedtempepi, scrapedurl, scrapedtitle in matches:
tempepi=scrapedtempepi.split(" - ")
if tempepi[0]=='Pel':
tempepi[0]=0
title="{0}x{1} - ({2})".format(tempepi[0], tempepi[1].zfill(2), scrapedtitle)
itemlist.append(Item(channel=item.channel, thumbnail=scrapedthumbnail,
action="findvideos", title=title, url=scrapedurl, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR blue]Añadir " + show + " a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
data = scrapertools.find_single_match(data,
'<div id="playex" .+?>(.+?)<\/nav><\/div><\/div>')
patron='src="(.+?)"'
itemla = scrapertools.find_multiple_matches(data,patron)
for i in range(len(itemla)):
#for url in itemla:
url=itemla[i]
#verificar existencia del video (testing)
codigo=verificar_video(itemla[i])
if codigo==200:
if "ok.ru" in url:
server='okru'
else:
server=''
if "openload" in url:
server='openload'
if "google" in url:
server='gvideo'
if "rapidvideo" in url:
server='rapidvideo'
if "streamango" in url:
server='streamango'
if server!='':
title="Enlace encontrado en %s " % (server.capitalize())
else:
title="NO DISPONIBLE"
if title!="NO DISPONIBLE":
itemlist.append(item.clone(title=title,url=url, action="play", server=server))
autoplay.start(itemlist, item)
return itemlist
def verificar_video(url):
codigo=httptools.downloadpage(url).code
if codigo==200:
# Revise de otra forma
data=httptools.downloadpage(url).data
removed = scrapertools.find_single_match(data,'removed(.+)')
if len(removed) != 0:
codigo1=404
else:
codigo1=200
else:
codigo1=200
return codigo1

View File

@@ -4,17 +4,6 @@
"language": ["*"],
"active": true,
"adult": true,
"changes": [
{
"date": "28/05/2017",
"description": "Reparado por cambios en la página"
},
{
"date": "21/02/2017",
"description": "Primera versión"
}
],
"version": 1,
"thumbnail": "http://i.imgur.com/tBSWudd.png?1",
"banner": "datoporn.png",
"categories": [

View File

@@ -4,17 +4,6 @@
"language": ["cast"],
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"banner": "descargacineclasico2.png",
"thumbnail": "descargacineclasico2.png",
"categories": [

View File

@@ -3,30 +3,7 @@
"name": "DescargasMIX",
"language": ["cast", "lat"],
"active": true,
"version": 1,
"adult": false,
"changes": [
{
"date": "06/05/17",
"description": "Cambio de dominio"
},
{
"date": "17/04/17",
"description": "Mejorado en la deteccion del dominio para futuros cambios"
},
{
"date": "09/04/17",
"description": "Arreglado por cambios en la página"
},
{
"date": "27/01/17",
"description": "Sección online en películas modificada"
},
{
"date": "08/07/16",
"description": "Adaptado el canal a las nuevas funciones"
}
],
"thumbnail": "descargasmix.png",
"banner": "descargasmix.png",
"categories": [

View File

@@ -302,7 +302,7 @@ def epienlaces(item):
def findvideos(item):
logger.info()
if (item.extra and item.extra != "findvideos") or item.path:
if item.contentSeason!='':
return epienlaces(item)
itemlist = []

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "discoverymx.png",
"banner": "discoverymx.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"documentary"
]

View File

@@ -6,29 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "divxatope.png",
"banner": "divxatope.png",
"version": 1,
"changes": [
{
"date": "17/04/17",
"description": "Reparados torrents, añadidas nuevas secciones"
},
{
"date": "13/01/17",
"description": "Reparados torrents y paginacion. Añadida seccion Peliculas 4K ultraHD"
},
{
"date": "31/12/16",
"description": "Adaptado, por cambios en la web"
},
{
"date": "01/07/16",
"description": "Eliminado código innecesario."
},
{
"date": "29/04/16",
"description": "Adaptar a Buscador global y Novedades Peliculas y Series"
}
],
"categories": [
"torrent",
"movie",

View File

@@ -157,11 +157,10 @@ def lista(item):
# logger.info("data="+data)
bloque = scrapertools.find_single_match(data, '(?:<ul class="pelilist">|<ul class="buscar-list">)(.*?)</ul>')
patron = '<li[^<]+'
patron += '<a href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += '<h2[^>]*>(.*?)</h2.*?'
patron += '(?:<strong[^>]*>|<span[^>]*>)(.*?)(?:</strong>|</span>)'
patron = '<a href="([^"]+).*?' # la url
patron += '<img src="([^"]+)"[^>]+>.*?' # el thumbnail
patron += '<h2[^>]*>(.*?)</h2.*?' # el titulo
patron += '<span>([^<].*?)<' # la calidad
matches = re.compile(patron, re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
@@ -175,7 +174,7 @@ def lista(item):
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
year = scrapertools.find_single_match(scrapedthumbnail, r'-(\d{4})')
contentTitle = scrapertools.htmlclean(scrapedtitle).strip()
patron = '([^<]+)<br>'
matches = re.compile(patron, re.DOTALL).findall(calidad + '<br>')
@@ -196,7 +195,7 @@ def lista(item):
itemlist.append(Item(channel=item.channel, action="episodios", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
language=idioma, contentSeason=int(temporada),
contentEpisodeNumber=int(episodio), contentQuality=calidad))
contentEpisodeNumber=int(episodio), quality=calidad))
else:
if len(matches) == 2:
@@ -205,7 +204,7 @@ def lista(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, folder=True, contentTitle=contentTitle,
language=idioma, contentThumbnail=thumbnail, contentQuality=calidad))
language=idioma, contentThumbnail=thumbnail, quality=calidad))
next_page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">Next</a></li>')
if next_page_url != "":
@@ -262,7 +261,7 @@ def findvideos(item):
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"')
if link != "":
link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link)
@@ -275,14 +274,7 @@ def findvideos(item):
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
#patron += '<div class="box2">([^<]+)</div[^<]+'
#patron += '<div class="box3">([^<]+)</div[^<]+'
#patron += '<div class="box4">([^<]+)</div[^<]+'
#patron += '<div class="box5">(.*?)</div[^<]+'
#patron += '<div class="box6">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist_ver = []
itemlist_descargar = []
@@ -308,11 +300,8 @@ def findvideos(item):
else:
itemlist_descargar.append(new_item)
for new_item in itemlist_ver:
itemlist.append(new_item)
for new_item in itemlist_descargar:
itemlist.append(new_item)
itemlist.extend(itemlist_ver)
itemlist.extend(itemlist_descargar)
return itemlist

View File

@@ -5,17 +5,6 @@
"adult": false,
"language": ["cast"],
"thumbnail": "http://imgur.com/Madj03A.jpg",
"version": 1,
"changes": [
{
"date": "26/04/2017",
"description": "Release"
},
{
"date": "28/06/2017",
"description": "Correciones código por cambios web"
}
],
"categories": [
"torrent",
"movie",

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/qMR9sg9.png",
"banner": "documaniatv.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "11/07/2016",
"description": "Reparadas cuentas de usuario."
}
],
"categories": [
"documentary"
],

View File

@@ -5,17 +5,6 @@
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/fsrnC4m.jpg",
"version": 1,
"changes": [
{
"date": "15/04/2017",
"description": "fix novedades"
},
{
"date": "09/03/2017",
"description": "nueva web"
}
],
"categories": [
"documentary"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "doramastv.png",
"banner": "doramastv.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"tvshow"
],

View File

@@ -4,21 +4,6 @@
"active": false,
"adult": false,
"language": ["*"],
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "12/03/17",
"description": "Añadidas mas opciones de configuracion y corregidos fallos"
},
{
"date": "12/01/17",
"description": "release"
}
],
"categories": [
"movie"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "ecarteleratrailers.png",
"banner": "ecarteleratrailers.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie"
]

View File

@@ -6,21 +6,6 @@
"language": ["cast"],
"thumbnail": "elitetorrent.png",
"banner": "elitetorrent.png",
"version": 2,
"changes": [
{
"date": "2/08/2017",
"description": "arreglada url canal"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"torrent",
"movie",

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "elsenordelanillo.png",
"banner": "elsenordelanillo.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie"
]

View File

@@ -6,21 +6,6 @@
"language": ["*"],
"thumbnail": "eporner.png",
"banner": "eporner.png",
"version": 1,
"changes": [
{
"date": "03/06/2017",
"description": "reparada seccion categorias"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "28/12/16",
"description": "First version"
}
],
"categories": [
"adult"
]

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"banner": "http://www.youfreeporntube.com/uploads/custom-logo.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "26/12/2016",
"description": "Release."
}
],
"categories": [
"adult"
],

View File

@@ -6,29 +6,6 @@
"language": ["lat"],
"thumbnail": "https://s24.postimg.org/nsgit7fhh/estadepelis.png",
"banner": "https://s28.postimg.org/ud0l032ul/estadepelis_banner.png",
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "22/06/2017",
"description": "ajustes para AutoPlay"
},
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "07/02/2017",
"description": "Release"
}
],
"categories": [
"movie"
],

View File

@@ -7,21 +7,6 @@
"fanart": "https://github.com/master-1970/resources/raw/master/images/fanart/estrenosgo.png",
"thumbnail": "https://github.com/master-1970/resources/raw/master/images/squares/estrenosgo.png",
"banner": "estrenosgo.png",
"version": 1,
"changes": [
{
"date": "15/05/16",
"description": "Compatibilidad con python anteriores a la 2.7"
},
{
"date": "03/05/16",
"description": "Modificado por cambios en la web"
},
{
"date": "29/04/16",
"description": "Version inicial"
}
],
"categories": [
"movie",
"tvshow",

View File

@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import re
from channelselector import get_thumb
@@ -53,8 +53,7 @@ def listado(item):
patron += '<b>Categoria:\s*</b>([^&]+)&raquo;\s*([^<]+).*?'
patron += '<div class="OpcionesDescargasMini">(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, cat_padres, cat_hijos, opciones in matches:
# logger.debug(thumbnail + "\n" + title + "\n" + cat_padres + "\n" + cat_hijos + "\n" + opciones)
# Obtenemos el año del titulo y eliminamos lo q sobre
@@ -70,7 +69,7 @@ def listado(item):
thumbnail = HOST + thumbnail[:-5] + 'b' + thumbnail[-4:]
# Buscamos opcion de ver online
patron = '<a href="http://estrenosly.org/ver-online-([^"]+)'
patron = '<a href="http://estrenos.*?/ver-online-([^"]+)'
url_ver = scrapertools.find_single_match(opciones, patron)
if url_ver:
new_item = Item(channel=item.channel, action="findvideos", title=title,

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "filesmonster_catalogue.png",
"banner": "filesmonster_catalogue.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "05/08/2016",
"description": "Eliminado de sección películas"
}
],
"categories": [
"adult"
],

View File

@@ -4,13 +4,6 @@
"language": ["*"],
"active": true,
"adult": true,
"version": 1,
"changes": [
{
"date": "29/04/2017",
"description": "Primera versión"
}
],
"thumbnail": "http://i.imgur.com/wuzhOCt.png?1",
"categories": [
"adult"

View File

@@ -4,15 +4,8 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"thumbnail": "http://gnula.mobi/wp-content/uploads/2016/08/Untitled-6.png",
"banner": "",
"changes": [
{
"date": "25/08/2017",
"description": "Nuevo canal"
}
],
"categories": [
"movie",
"adult"

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "gnula.png",
"banner": "gnula.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie"
]

View File

@@ -6,17 +6,6 @@
"language": ["cast"],
"thumbnail": "guaridavalencianista.png",
"banner": "guaridavalencianista.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"documentary"
]

View File

@@ -6,29 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "hdfull.png",
"banner": "hdfull.png",
"version": 1,
"changes": [
{
"date": "30/05/2017",
"description": "Arreglada la extracción de enlaces por cambios en la web"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "21/03/2017",
"description": "Pequeño fix para corregir algunas urls de los vídeos que se extraían mal"
},
{
"date": "02/02/2017",
"description": "Arreglada la extracción de enlaces por cambios en la web"
},
{
"date": "05/01/2017",
"description": "Corregido debido a cloudflare"
}
],
"categories": [
"movie",
"tvshow"

View File

@@ -15,6 +15,7 @@ from platformcode import platformtools
host = "http://hdfull.tv"
A_A = {'User-Agent':'Mozilla/5.0 AppLeWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 SaFAri/537.36'}
if config.get_setting('hdfulluser', 'hdfull'):
account = True
else:
@@ -28,7 +29,7 @@ def settingCanal(item):
def login():
logger.info()
data = agrupa_datos(httptools.downloadpage(host).data)
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
patron = "<input type='hidden' name='__csrf_magic' value=\"([^\"]+)\" />"
sid = scrapertools.find_single_match(data, patron)
@@ -37,7 +38,7 @@ def login():
'hdfull') + "&password=" + config.get_setting(
'hdfullpassword', 'hdfull') + "&action=login"
httptools.downloadpage(host, post=post)
httptools.downloadpage(host, post=post, headers=A_A)
def mainlist(item):
@@ -137,7 +138,7 @@ def menuseries(item):
def search(item, texto):
logger.info()
data = agrupa_datos(httptools.downloadpage(host).data)
data = agrupa_datos(httptools.downloadpage(host, headers=A_A).data)
sid = scrapertools.get_match(data, '.__csrf_magic. value="(sid:[^"]+)"')
item.extra = urllib.urlencode({'__csrf_magic': sid}) + '&menu=search&query=' + texto
@@ -173,7 +174,7 @@ def items_usuario(item):
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
## Fichas usuario
url = item.url.split("?")[0]
@@ -187,7 +188,7 @@ def items_usuario(item):
next_page = url + "?" + post
## Carga las fichas de usuario
data = httptools.downloadpage(url, post=post).data
data = httptools.downloadpage(url, post=post, headers=A_A).data
fichas_usuario = jsontools.load(data)
for ficha in fichas_usuario:
@@ -255,7 +256,7 @@ def listado_series(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
patron = '<div class="list-item"><a href="([^"]+)"[^>]+>([^<]+)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -275,10 +276,10 @@ def fichas(item):
textoidiomas=''
infoLabels=dict()
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
if item.title == "Buscar...":
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra).data)
data = agrupa_datos(httptools.downloadpage(item.url, post=item.extra, headers=A_A).data)
s_p = scrapertools.get_match(data, '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
'<h3 class="section-title">')
@@ -290,7 +291,7 @@ def fichas(item):
else:
data = s_p[0] + s_p[1]
else:
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = re.sub(
r'<div class="span-6[^<]+<div class="item"[^<]+' + \
@@ -362,11 +363,12 @@ def fichas(item):
def episodios(item):
logger.info()
A_F = L_A
id = "0"
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
url_targets = item.url
@@ -376,7 +378,7 @@ def episodios(item):
item.url = item.url.split("###")[0]
## Temporadas
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
if id == "0":
## Se saca el id de la serie de la página cuando viene de listado_series
@@ -410,7 +412,7 @@ def episodios(item):
for scrapedurl in matches:
## Episodios
data = agrupa_datos(httptools.downloadpage(scrapedurl).data)
data = agrupa_datos(httptools.downloadpage(scrapedurl, headers=A_A).data)
sid = scrapertools.get_match(data, "<script>var sid = '(\d+)'")
ssid = scrapertools.get_match(scrapedurl, "temporada-(\d+)")
@@ -418,7 +420,7 @@ def episodios(item):
url = host + "/a/episodes"
data = httptools.downloadpage(url, post=post).data
data = httptools.downloadpage(url, post=post, headers=A_A).data
episodes = jsontools.load(data)
@@ -480,10 +482,9 @@ def episodios(item):
def novedades_episodios(item):
logger.info()
itemlist = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
## Episodios
url = item.url.split("?")[0]
@@ -495,7 +496,7 @@ def novedades_episodios(item):
post = post.replace("start=" + old_start, "start=" + start)
next_page = url + "?" + post
data = httptools.downloadpage(url, post=post).data
data = httptools.downloadpage(url, post=post, headers=A_A).data
episodes = jsontools.load(data)
@@ -567,7 +568,7 @@ def generos(item):
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/peliculas"(.*?)</ul>')
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
@@ -586,10 +587,10 @@ def generos(item):
def generos_series(item):
logger.info()
A_F= L_A
itemlist = []
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data = scrapertools.find_single_match(data, '<li class="dropdown"><a href="http://hdfull.tv/series"(.*?)</ul>')
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
@@ -612,10 +613,12 @@ def findvideos(item):
it1 = []
it2 = []
## Carga estados
status = jsontools.load(httptools.downloadpage(host + '/a/status/all').data)
status = jsontools.load(httptools.downloadpage(host + '/a/status/all', headers=A_A).data)
url_targets = item.url
## Vídeos
id = ""
type = ""
if "###" in item.url:
id = item.url.split("###")[1].split(";")[0]
type = item.url.split("###")[1].split(";")[1]
@@ -639,10 +642,10 @@ def findvideos(item):
it1.append(Item(channel=item.channel, action="set_status", title=title, fulltitle=title, url=url_targets,
thumbnail=item.thumbnail, show=item.show, folder=True))
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js").data
data_js = httptools.downloadpage("http://hdfull.tv/templates/hdfull/js/jquery.hdfull.view.min.js", headers=A_A).data
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js").data
data_js = httptools.downloadpage("http://hdfull.tv/js/providers.js", headers=A_A).data
try:
data_js = jhexdecode(data_js)
except:
@@ -655,7 +658,7 @@ def findvideos(item):
data_js = re.sub(r':(function.*?\})', r':"\g<1>"', decode_aa)
data_js = re.sub(r':(var[^,]+),', r':"\g<1>",', data_js)
data = agrupa_datos(httptools.downloadpage(item.url).data)
data = agrupa_datos(httptools.downloadpage(item.url, headers=A_A).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
@@ -698,6 +701,9 @@ def findvideos(item):
it2 = servertools.get_servers_itemlist(it2, lambda i: i.title % i.server.capitalize())
it2.sort(key=lambda it: (it.tipo1, it.idioma, it.server))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
itemlist.extend(it1)
itemlist.extend(it2)
## 2 = película
@@ -707,7 +713,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=url_targets, thumbnail = item.thumbnail,
fulltitle = item.contentTitle
))
return itemlist
@@ -718,7 +723,7 @@ def play(item):
type = item.url.split("###")[1].split(";")[1]
item.url = item.url.split("###")[0]
post = "target_id=%s&target_type=%s&target_status=1" % (id, type)
data = httptools.downloadpage(host + "/a/status", post=post).data
data = httptools.downloadpage(host + "/a/status", post=post, headers=A_A).data
devuelve = servertools.findvideosbyserver(item.url, item.server)
if devuelve:
@@ -781,7 +786,7 @@ def set_status(item):
path = "/a/favorite"
post = "like_id=" + id + "&like_type=" + type + "&like_comment=&vote=-1"
data = httptools.downloadpage(host + path, post=post).data
data = httptools.downloadpage(host + path, post=post, headers=A_A).data
title = "[COLOR green][B]OK[/B][/COLOR]"

View File

@@ -0,0 +1,7 @@
{
"id": "help",
"name": "Ayuda",
"active": false,
"adult": false,
"language": ["*"]
}

View File

@@ -0,0 +1,219 @@
# -*- coding: utf-8 -*-
import os
import xbmc
from core.item import Item
from platformcode import config, logger, platformtools
from channelselector import get_thumb
if config.is_xbmc():
import xbmcgui
class TextBox(xbmcgui.WindowXMLDialog):
""" Create a skinned textbox window """
def __init__(self, *args, **kwargs):
self.title = kwargs.get('title')
self.text = kwargs.get('text')
self.doModal()
def onInit(self):
try:
self.getControl(5).setText(self.text)
self.getControl(1).setLabel(self.title)
except:
pass
def onClick(self, control_id):
pass
def onFocus(self, control_id):
pass
def onAction(self, action):
# self.close()
if action in [xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK]:
self.close()
def mainlist(item):
logger.info()
itemlist = []
if config.is_xbmc():
itemlist.append(Item(channel=item.channel, action="", title="FAQ:",
thumbnail=get_thumb("help.png"),
folder=False))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Cómo reportar un error?",
thumbnail=get_thumb("help.png"),
folder=False, extra="report_error"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Se pueden activar/desactivar los canales?",
thumbnail=get_thumb("help.png"),
folder=False, extra="onoff_canales"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible la sincronización automática con Trakt?",
thumbnail=get_thumb("help.png"),
folder=False, extra="trakt_sync"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - ¿Es posible mostrar todos los resultados juntos en el buscador global?",
thumbnail=get_thumb("help.png"),
folder=False, extra="buscador_juntos"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces tardan en aparecer.",
thumbnail=get_thumb("help.png"),
folder=False, extra="tiempo_enlaces"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - La búsqueda de contenido no se hace correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_busquedacont"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Algún canal no funciona correctamente.",
thumbnail=get_thumb("help.png"),
folder=False, extra="canal_fallo"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Los enlaces Torrent no funcionan.",
thumbnail=get_thumb("help.png"),
folder=False, extra="prob_torrent"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - No se actualiza correctamente la videoteca.",
thumbnail=get_thumb("help.png"),
folder=True, extra="prob_bib"))
itemlist.append(Item(channel=item.channel, action="faq",
title=" - Enlaces de interés",
thumbnail=get_thumb("help.png"),
folder=False, extra=""))
return itemlist
def faq(item):
if item.extra == "onoff_canales":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto se puede hacer en 'Configuración'>'Activar/Desactivar canales'. "
"Puedes activar/desactivar los canales uno por uno o todos a la vez. ",
"¿Deseas gestionar ahora los canales?")
if respuesta == 1:
from channels import setting
setting.conf_tools(Item(extra='channels_onoff'))
elif item.extra == "trakt_sync":
respuesta = platformtools.dialog_yesno("Alfa",
"Actualmente se puede activar la sincronización (silenciosa) "
"tras marcar como visto un episodio (esto se hace automáticamente). "
"Esta opción se puede activar en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "tiempo_enlaces":
respuesta = platformtools.dialog_yesno("Alfa",
"Esto puede mejorarse limitando el número máximo de "
"enlaces o mostrandolos en una ventana emergente. "
"Estas opciones se encuentran en 'Configuración'>'Ajustes "
"de la videoteca'.",
"¿Deseas acceder a dichos ajustes?")
if respuesta == 1:
from channels import videolibrary
videolibrary.channel_config(Item(channel='videolibrary'))
elif item.extra == "prob_busquedacont":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede que no hayas escrito la ruta de la librería correctamente en "
"'Configuración'>'Preferencias'.\n"
"La ruta específicada debe ser exactamente la misma de la 'fuente' "
"introducida en 'Archivos' de la videoteca de Kodi.\n"
"AVANZADO: Esta ruta también se encuentra en 'sources.xml'.\n"
"También puedes estar experimentando problemas por estar "
"usando algun fork de Kodi y rutas con 'special://'. "
"SPMC, por ejemplo, tiene problemas con esto, y no parece tener solución, "
"ya que es un problema ajeno a Alfa que existe desde hace mucho.\n"
"Puedes intentar subsanar estos problemas en 'Configuración'>'Ajustes de "
"la videoteca', cambiando el ajuste 'Realizar búsqueda de contenido en' "
"de 'La carpeta de cada serie' a 'Toda la videoteca'."
"También puedes acudir a 'http://alfa-addon.ga' en busca de ayuda.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "canal_fallo":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puede ser que la página web del canal no funcione. "
"En caso de que funcione la página web puede que no seas el primero"
" en haberlo visto y que el canal este arreglado. "
"Puedes mirar en 'alfa-addon.ga' o en el "
"repositorio de GitHub (github.com/alfa-addon/addon). "
"Si no encuentras el canal arreglado puedes reportar un "
"problema en el foro.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "prob_bib":
platformtools.dialog_ok("Alfa",
"Puede ser que hayas actualizado el plugin recientemente "
"y que las actualizaciones no se hayan aplicado del todo "
"bien. Puedes probar en 'Configuración'>'Otras herramientas', "
"comprobando los archivos *_data.json o "
"volviendo a añadir toda la videoteca.")
respuesta = platformtools.dialog_yesno("Alfa",
"¿Deseas acceder ahora a esa seccion?")
if respuesta == 1:
itemlist = []
from channels import setting
new_item = Item(channel="setting", action="submenu_tools", folder=True)
itemlist.extend(setting.submenu_tools(new_item))
return itemlist
elif item.extra == "prob_torrent":
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Puedes probar descargando el modulo 'libtorrent' de Kodi o "
"instalando algun addon como 'Quasar' o 'Torrenter', "
"los cuales apareceran entre las opciones de la ventana emergente "
"que aparece al pulsar sobre un enlace torrent. "
"'Torrenter' es más complejo pero también más completo "
"y siempre funciona.")
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
elif item.extra == "buscador_juntos":
respuesta = platformtools.dialog_yesno("Alfa",
"Si. La opcion de mostrar los resultados juntos "
"o divididos por canales se encuentra en "
"'setting'>'Ajustes del buscador global'>"
"'Otros ajustes'.",
"¿Deseas acceder a ahora dichos ajustes?")
if respuesta == 1:
from channels import search
search.settings("")
elif item.extra == "report_error":
if config.get_platform(True)['num_version'] < 14:
log_name = "xbmc.log"
else:
log_name = "kodi.log"
ruta = xbmc.translatePath("special://logpath") + log_name
title = "Alfa - FAQ - %s" % item.title[6:]
text = ("Para reportar un problema en 'http://alfa-addon.ga' es necesario:\n"
" - Versión que usas de Alfa.\n"
" - Versión que usas de kodi, mediaserver, etc.\n"
" - Nombre del skin (en el caso que uses Kodi) y si se "
"te ha resuelto el problema al usar el skin por defecto.\n"
" - Descripción del problema y algún caso de prueba.\n"
" - Agregar el log en modo detallado, una vez hecho esto, "
"zipea el log y lo puedes adjuntar en un post.\n\n"
"El log se encuentra en: \n\n"
"%s" % ruta)
return TextBox("DialogTextViewer.xml", os.getcwd(), "Default", title=title, text=text)
else:
platformtools.dialog_ok("Alfa",
"Entérate de novedades, consejos u opciones que desconoces en Telegram: @alfa_addon.\n"
"Si tienes problemas o dudas, puedes acudir al Foro: http://alfa-addon.ga")

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "https://s11.postimg.org/cmuwcvvpf/hentaienespanol.png",
"banner": "https://s3.postimg.org/j3qkfut8z/hentaienespanol_banner.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/12/2016",
"description": "Release."
}
],
"categories": [
"adult"
],

View File

@@ -6,9 +6,6 @@
"language": ["*"],
"thumbnail": "https://dl.dropboxusercontent.com/u/30248079/hentai_id.png",
"banner": "https://dl.dropboxusercontent.com/u/30248079/hentai_id2.png",
"version": 1,
"date": "09/03/2017",
"changes": "Fix web",
"categories": [
"adult"
]

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "https://s27.postimg.org/pjq3y552b/idocumentales.png",
"banner": "https://s16.postimg.org/6d8bh1z1x/idocumentales_banner.png",
"version": 1,
"changes": [
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "18/06/2016",
"description": "First release"
}
],
"categories": [
"documentary"
],

View File

@@ -4,21 +4,6 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"changes": [
{
"date": "12/03/2017",
"description": "Reparados enlaces directos"
},
{
"date": "27/02/2017",
"description": "Añadidos enlaces directos y adaptado al uso de httptools"
},
{
"date": "16/01/2016",
"description": "Corregido por cambios en el enmascaramiento de enlaces"
}
],
"version": 1,
"thumbnail": "http://i.imgur.com/I7MxHZI.png",
"banner": "inkapelis.png",
"categories": [

View File

@@ -4,13 +4,6 @@
"language": ["*"],
"active": true,
"adult": true,
"version": 1,
"changes": [
{
"date": "29/04/2017",
"description": "Primera versión"
}
],
"thumbnail": "http://i.imgur.com/OTYwbAa.png?1",
"categories": [
"adult"

View File

@@ -6,17 +6,6 @@
"language": ["*"],
"thumbnail": "https://s15.postimg.org/pzd3h4vy3/javus.png",
"banner": "https://s21.postimg.org/5pqzedp2f/javus_banner.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/12/2016",
"description": "Release."
}
],
"categories": [
"adult"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "jkanime.png",
"banner": "jkanime.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"anime"
],

View File

@@ -4,13 +4,6 @@
"language": ["cast", "lat"],
"active": true,
"adult": false,
"version": 1,
"changes": [
{
"date": "16/05/2017",
"description": "Primera version"
}
],
"thumbnail": "http://i.imgur.com/LVdupxc.png",
"categories": [
"movie",

View File

@@ -6,29 +6,6 @@
"language": ["cast", "lat"],
"thumbnail": "https://s31.postimg.org/5worjw2nv/locopelis.png",
"banner": "https://s31.postimg.org/ng87bb9jv/locopelis_banner.png",
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "06/06/2017",
"description": "Compatibilidad con autoplay"
},
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "06/12/2016",
"description": "Release."
}
],
"categories": [
"movie"
],

View File

@@ -4,15 +4,8 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"version": 1,
"thumbnail": "http://www.maxipelis.net/wp-content/uploads/2016/12/applogo.png",
"banner": "",
"changes": [
{
"date": "25/08/2017",
"description": "Nuevo canal"
}
],
"categories": [
"movie"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast"],
"thumbnail": "mejortorrent.png",
"banner": "mejortorrent.png",
"version": 1,
"changes": [
{
"date": "17/04/2017",
"description": "Arreglado error que impedía el uso del canal"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"torrent",
"movie",

View File

@@ -6,29 +6,6 @@
"language": ["lat"],
"thumbnail": "https://s32.postimg.org/7g50yo39h/metaserie.png",
"banner": "https://s31.postimg.org/u6yddil8r/metaserie_banner.png",
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "06/06/2017",
"description": "Compatibilidad con AutoPlay"
},
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "04/01/2017",
"description": "Release."
}
],
"categories": [
"tvshow"
],

View File

@@ -4,21 +4,6 @@
"active": true,
"adult": false,
"language": ["cast"],
"version": 1,
"changes": [
{
"date": "06/12/2016",
"description": "Release"
},
{
"date": "04/04/2017",
"description": "Migración a Httptools y algunos arreglos"
},
{
"date": "28/06/2017",
"description": "Correciones código y algunas mejoras"
}
],
"thumbnail": "http://imgur.com/KZoska0.png",
"banner": "miltorrents.png",
"categories": [

View File

@@ -6,17 +6,6 @@
"language": ["lat"],
"thumbnail": "https://s17.postimg.org/e8kp12mcv/miradetodo.png",
"banner": "https://s7.postimg.org/it21t0dej/miradetodo-banner.png",
"version": 1,
"changes": [
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "05/05/2017",
"description": "First release"
}
],
"categories": [
"movie"
],

View File

@@ -319,61 +319,34 @@ def findvideos(item):
duplicados = []
data = get_source(item.url)
src = data
patron = 'id=(?:div|player)(\d+)>.*?<iframe src=.*? data-lazy-src=(.*?) marginheight'
patron = 'id=(?:div|player)(\d+)>.*?data-lazy-src=(.*?) scrolling'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, videoitem in matches:
lang = scrapertools.find_single_match(src,
'<a href=#(?:div|player)%s.*?>.*?(Doblado|Subtitulado)<\/a>' % option)
'<a href=#(?:div|player)%s.*?>.*?(.*?)<\/a>' % option)
if 'audio ' in lang.lower():
lang=lang.lower().replace('audio ','')
lang=lang.capitalize()
data = get_source(videoitem)
if 'play' in videoitem:
url = scrapertools.find_single_match(data, '<span>Ver Online<.*?<li><a href=(.*?)><span class=icon>')
else:
url = scrapertools.find_single_match(data, '<iframe src=(.*?) scrolling=')
video_urls = scrapertools.find_multiple_matches(data, '<li><a href=(.*?)><span')
for video in video_urls:
video_data = get_source(video)
if not 'fastplay' in video:
new_url= scrapertools.find_single_match(video_data,'<li><a href=(.*?srt)><span')
data_final = get_source(new_url)
else:
data_final=video_data
url = scrapertools.find_single_match(data_final,'iframe src=(.*?) scrolling')
quality = item.quality
server = servertools.get_server_from_url(url)
title = item.contentTitle + ' [%s] [%s]' % (server, lang)
if item.quality != '':
title = item.contentTitle + ' [%s] [%s] [%s]' % (server, quality, lang)
url_list.append([url, lang])
for video_url in url_list:
language = video_url[1]
if 'jw.miradetodo' in video_url[0]:
data = get_source('http:' + video_url[0])
patron = 'label:.*?(.*?),.*?file:.*?(.*?)&app.*?\}'
matches = re.compile(patron, re.DOTALL).findall(data)
for quality, scrapedurl in matches:
quality = quality
title = item.contentTitle + ' (%s) %s' % (quality, language)
server = 'directo'
url = scrapedurl
url = url.replace('\/', '/')
subtitle = scrapertools.find_single_match(data, "tracks: \[\{file: '.*?linksub=(.*?)',label")
if url not in duplicados:
itemlist.append(item.clone(title=title,
action='play',
url=url,
quality=quality,
server=server,
subtitle=subtitle,
language=language
))
duplicados.append(url)
elif video_url != '':
itemlist.extend(servertools.find_video_items(data=video_url[0]))
import os
for videoitem in itemlist:
if videoitem.server != 'directo':
quality = item.quality
title = item.contentTitle + ' (%s) %s' % (videoitem.server, language)
if item.quality != '':
title = item.contentTitle + ' (%s) %s' % (quality, language)
videoitem.title = title
videoitem.channel = item.channel
videoitem.thumbnail = os.path.join(config.get_runtime_path(), "resources", "media", "servers",
"server_%s.png" % videoitem.server)
videoitem.quality = item.quality
if url!='':
itemlist.append(item.clone(title=title, url=url, action='play', server=server, language=lang))
if item.infoLabels['mediatype'] == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':

View File

@@ -0,0 +1,12 @@
{
"id": "mundiseries",
"name": "Mundiseries",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://imgur.com/GdGMFi1.png",
"banner": "https://imgur.com/1bDbYY1.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import filtertools
from platformcode import config, logger
from platformcode import platformtools
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import autoplay
host = "http://mundiseries.com"
list_servers = ['okru']
list_quality = ['default']
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=urlparse.urljoin(host, "/lista-de-series")))
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)"><img src="([^"]+)" alt="ver ([^"]+) online'
matches = scrapertools.find_multiple_matches(data, patron)
for link, thumbnail, name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail, action="temporada"))
return itemlist
def temporada(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info("preon,:"+data)
patron = '<a href="([^"]+)"><div class="item-temporada"><img alt=".+?" src="([^"]+)"><div .+?>Ver ([^"]+)<\/div><\/a>'
matches = scrapertools.find_multiple_matches(data, patron)
for link,thumbnail,name in matches:
itemlist.append(item.clone(title=name, url=host+link, thumbnail=host+thumbnail,action="episodios",context=autoplay.context))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron_caps = 'href="http:.+?\/mundiseries.+?com([^"]+)" alt="([^"]+) Capitulo ([^"]+) Temporada ([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_show='<h1 class="h-responsive center">.+?'
patron_show+='<font color=".+?>([^"]+)<\/a><\/font>'
show = scrapertools.find_single_match(data,patron_show)
for link, name,cap,temp in matches:
if '|' in cap:
cap = cap.replace('|','')
if '|' in temp:
temp = temp.replace('|','')
if '|' in name:
name = name.replace('|','')
title = "%sx%s %s"%(temp, str(cap).zfill(2),name)
url=host+link
itemlist.append(Item(channel=item.channel, action="findvideos",
title=title, url=url, show=show))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir Temporada/Serie a la biblioteca de Kodi", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
id = ""
type = ""
data = httptools.downloadpage(item.url).data
it2 = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.extend(servertools.find_video_items(data=data))
for item in it2:
if "###" not in item.url:
item.url += "###" + id + ";" + type
for videoitem in itemlist:
videoitem.channel= item.channel
autoplay.start(itemlist, item)
return itemlist

View File

@@ -6,33 +6,6 @@
"language": ["lat"],
"thumbnail": "https://s32.postimg.org/h1ewz9hhx/mundoflv.png",
"banner": "mundoflv.png",
"version": 1,
"changes": [
{
"date": "24/06/2017",
"description": "Cambios para autoplay"
},
{
"date": "06/06/2017",
"description": "Compatibilidad con AutoPlay"
},
{
"date": "03/06/2017",
"description": "Reparado por mala subida"
},
{
"date": "25/05/2017",
"description": "cambios esteticos"
},
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "28/01/2017",
"description": "Release."
}
],
"categories": [
"tvshow"
],

View File

@@ -6,17 +6,6 @@
"language": ["cast"],
"thumbnail": "newpct.png",
"banner": "newpct.png",
"version": 1,
"changes": [
{
"date": "15/03/2017",
"description": "limpieza código"
},
{
"date": "01/07/2016",
"description": "Eliminado código innecesario."
}
],
"categories": [
"movie",
"tvshow",

Some files were not shown because too many files have changed in this diff Show More