Merge remote-tracking branch 'upstream/master'

This commit is contained in:
thepasto
2019-04-29 14:51:27 +02:00
1389 changed files with 102916 additions and 129895 deletions

6
.directory Normal file
View File

@@ -0,0 +1,6 @@
[Dolphin]
Timestamp=2019,4,23,18,58,8
Version=4
[Settings]
HiddenFilesShown=true

2
.gitignore vendored
View File

@@ -1,5 +1,3 @@
*
!plugin.video.alfa
*.pyo
*.pyc
.DS_Store

0
LICENSE Executable file → Normal file
View File

19
README.md Executable file → Normal file
View File

@@ -1,13 +1,8 @@
# Alfa addon
### Es un proyecto sin ánimo de lucro y con fines educativos, basado en el código open source de pelisalacarta-ce, que permite utilizar Kodi u otra plataforma compatible como "navegador" para ver vídeos alojados en paginas web.
# Kodi On Demand
### Un fork italiano di [Alfa](https://github.com/alfa-addon)
Ognuno è libero (anzi, invitato!) a collaborare, per farlo è possibile utilizzare i pull request.
### Este addon es totalmente gratuito por lo tanto esta prohibida su venta en solitario o como parte de software integrado en cualquier dispositivo, es absolutamente para uso educativo y personal.
## Colaborar
Si deseas colaborar con el proyecto eres bienvenido a hacerlo, pero ten en cuenta por favor estas pautas
- Para colaborar con sus Pull Request se pide seguir unas reglas de estilo basadas en las de python PEP8. Esto se puede
hacer desde diferentes IDEs que lo traen integrado como pydev, pycharm o ninjaIDE, o extensiones que se pueden
agregar a por ejemplo sublime, atom.
- Utilizar nombre de clases, métodos y variables en inglés para favorecer la comprensión de personas no hispano parlantes.
KOD come Alfa è sotto licenza GPL v3, pertanto, siete liberi di utilizzare parte del codice, a patto di rispettare i termini di suddetta licenza, che si possono riassumere in:
- Il tuo addon deve essere rilasciando secondo la stessa licenza, ovvero essere open source (il fatto che lo zip sia visibile da chiunque non ha importanza, è necessario avere un repository git come questo)
- Aggiungere i crediti a tutto ciò che copiate/modificate, ad esempio aggiungendo un commento nel file in questione o meglio facendo un cherry-pick (in modo da preservare lo storico)

32
addon.xml Normal file
View File

@@ -0,0 +1,32 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.kod" name="Kodi on Demand" version="0.1.1" provider-name="KOD Team">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
</requires>
<extension point="xbmc.python.pluginsource" library="default.py">
<provides>video</provides>
</extension>
<extension point="xbmc.addon.metadata">
<summary lang="en">Kodi on Demand is a Kodi add-on to search and watch contents on the web.</summary>
<summary lang="it">Kodi on Demand è un addon di Kodi per cercare e guardare contenuti sul web.</summary>
<assets>
<icon>logo.png</icon>
<fanart>fanart.jpg</fanart>
<screenshot>resources/media/themes/ss/1.png</screenshot>
<screenshot>resources/media/themes/ss/2.png</screenshot>
<screenshot>resources/media/themes/ss/3.png</screenshot>
</assets>
<news>Benvenuto su KOD!</news>
<description lang="it">Naviga velocemente sul web e guarda i contenuti presenti</description>
<disclaimer>[COLOR red]The owners and submitters to this addon do not host or distribute any of the content displayed by these addons nor do they have any affiliation with the content providers.[/COLOR]
[COLOR yellow]Kodi © is a registered trademark of the XBMC Foundation. We are not connected to or in any other way affiliated with Kodi, Team Kodi, or the XBMC Foundation. Furthermore, any software, addons, or products offered by us will receive no support in official Kodi channels, including the Kodi forums and various social networks.[/COLOR]</disclaimer>
<platform>all</platform>
<license>GNU GPL v3</license>
<website>https://kodiondemand.github.io/</website>
<forum>https://t.me/kodiondemand</forum>
<source>https://github.com/kodiondemand/addon</source>
</extension>
<extension point="xbmc.service" library="videolibrary_service.py" start="login|startup">
</extension>
</addon>

View File

@@ -114,7 +114,7 @@ def peliculas(item):
fulltitle = scrapedtitle
if subDiv:
fulltitle += support.typo(subText + ' _ () color limegreen')
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color blue')
fulltitle += support.typo(scrapedquality.strip()+ ' _ [] color kod')
itemlist.append(
Item(channel=item.channel,
@@ -158,6 +158,6 @@ def findvideos(item):
# Requerido para AutoPlay
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item, 'color blue')
support.videolibrary(itemlist, item, 'color kod')
return itemlist

View File

@@ -104,6 +104,6 @@ def findvideos(item):
itemlist = filtertools.get_links(itemlist, item, list_language)
autoplay.start(itemlist, item)
support.videolibrary(itemlist, item ,'color blue bold')
support.videolibrary(itemlist, item ,'color kod bold')
return itemlist

View File

@@ -1,7 +1,7 @@
{
"id": "altadefinizionehd",
"name": "AltadefinizioneHD",
"active": true,
"active": false,
"adult": false,
"language": ["ita"],
"thumbnail": "https://altadefinizione.doctor/wp-content/uploads/2019/02/logo.png",

View File

@@ -1,52 +1,52 @@
{
"id": "animespace",
"name": "AnimeSpace",
"active": true,
"adult": false,
"language": [],
"thumbnail": "",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}
{
"id": "animespace",
"name": "AnimeSpace",
"active": true,
"adult": false,
"language": [],
"thumbnail": "",
"banner": "",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,264 +1,264 @@
# -*- coding: utf-8 -*-
# -*- Channel AnimeSpace -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from channels import renumbertools
host = "https://animespace.tv/"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animespace')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animespace')
IDIOMAS = {'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url=host + 'emision'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'animes'))
itemlist.append(Item(channel=item.channel, title="Anime",
action="list_all",
thumbnail=get_thumb('anime', auto=True),
url=host + 'categoria/anime'))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + 'categoria/pelicula'))
itemlist.append(Item(channel=item.channel, title="OVAs",
action="list_all",
thumbnail='',
url=host + 'categoria/ova'))
itemlist.append(Item(channel=item.channel, title="ONAs",
action="list_all",
thumbnail='',
url=host + 'categoria/ona'))
itemlist.append(Item(channel=item.channel, title="Especiales",
action="list_all",
thumbnail='',
url=host + 'categoria/especial'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
type = type.strip().lower()
url = scrapedurl
thumbnail = scrapedthumbnail
lang = 'VOSE'
title = scrapedtitle
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
new_item= Item(channel=item.channel,
action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
language = lang,
infoLabels={'year':year}
)
if type != 'anime':
new_item.contentTitle=title
else:
new_item.plot=type
new_item.contentSerieName=title
new_item.context = context
itemlist.append(new_item)
# Paginacion
next_page = scrapertools.find_single_match(data,
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
if next_page != "":
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=actual_page + next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
url = scrapedurl
lang = 'VOSE'
title = '%s - %s' % (scrapedtitle, epi)
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
action='findvideos', language=lang))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="item" href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl in matches:
episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)')
lang = 'VOSE'
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
url = scrapedurl
infoLabels['season'] = season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
import urllib
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
server = ''
scrapedurl = scrapedurl.replace('&quot;', '')
new_data = get_source(scrapedurl)
if "/stream/" in scrapedurl:
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
server = "directo"
else:
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
scrapedurl = urllib.unquote(scrapedurl)
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
language = item.language, infoLabels=item.infoLabels, server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist
# -*- coding: utf-8 -*-
# -*- Channel AnimeSpace -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from core import httptools
from core import scrapertools
from core import servertools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from channels import renumbertools
host = "https://animespace.tv/"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animespace')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animespace')
IDIOMAS = {'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['directo', 'openload', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Nuevos Episodios",
action="new_episodes",
thumbnail=get_thumb('new_episodes', auto=True),
url=host))
itemlist.append(Item(channel=item.channel, title="Ultimas",
action="list_all",
thumbnail=get_thumb('last', auto=True),
url=host + 'emision'))
itemlist.append(Item(channel=item.channel, title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'animes'))
itemlist.append(Item(channel=item.channel, title="Anime",
action="list_all",
thumbnail=get_thumb('anime', auto=True),
url=host + 'categoria/anime'))
itemlist.append(Item(channel=item.channel, title="Películas",
action="list_all",
thumbnail=get_thumb('movies', auto=True),
url=host + 'categoria/pelicula'))
itemlist.append(Item(channel=item.channel, title="OVAs",
action="list_all",
thumbnail='',
url=host + 'categoria/ova'))
itemlist.append(Item(channel=item.channel, title="ONAs",
action="list_all",
thumbnail='',
url=host + 'categoria/ona'))
itemlist.append(Item(channel=item.channel, title="Especiales",
action="list_all",
thumbnail='',
url=host + 'categoria/especial'))
itemlist.append(Item(channel=item.channel, title="Buscar",
action="search",
url=host + 'search?q=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article.*?href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<h3 class="Title">([^<]+)</h3>.*?"fecha">([^<]+)<.*?</i>([^<]+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
type = type.strip().lower()
url = scrapedurl
thumbnail = scrapedthumbnail
lang = 'VOSE'
title = scrapedtitle
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
new_item= Item(channel=item.channel,
action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
language = lang,
infoLabels={'year':year}
)
if type != 'anime':
new_item.contentTitle=title
else:
new_item.plot=type
new_item.contentSerieName=title
new_item.context = context
itemlist.append(new_item)
# Paginacion
next_page = scrapertools.find_single_match(data,
'"page-item active">.*?</a>.*?<a class="page-link" href="([^"]+)">')
if next_page != "":
actual_page = scrapertools.find_single_match(item.url, '([^\?]+)?')
itemlist.append(Item(channel=item.channel,
action="list_all",
title=">> Página siguiente",
url=actual_page + next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'
))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def new_episodes(item):
logger.info()
itemlist = []
full_data = get_source(item.url)
data = scrapertools.find_single_match(full_data, '<section class="caps">.*?</section>')
patron = '<article.*?<a href="([^"]+)">.*?src="([^"]+)".*?'
patron += '<span class="episode">.*?</i>([^<]+)</span>.*?<h2 class="Title">([^<]+)</h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, epi, scrapedtitle in matches:
url = scrapedurl
lang = 'VOSE'
title = '%s - %s' % (scrapedtitle, epi)
itemlist.append(Item(channel=item.channel, title=title, url=url, thumbnail=scrapedthumbnail,
action='findvideos', language=lang))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a class="item" href="([^"]+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl in matches:
episode = scrapertools.find_single_match(scrapedurl, '.*?capitulo-(\d+)')
lang = 'VOSE'
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
url = scrapedurl
infoLabels['season'] = season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=lang, infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if item.contentSerieName != '' and config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName,
extra1='library'))
return itemlist
def findvideos(item):
import urllib
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="Opt\d+">.*?src=(.*?) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
server = ''
scrapedurl = scrapedurl.replace('&quot;', '')
new_data = get_source(scrapedurl)
if "/stream/" in scrapedurl:
scrapedurl = scrapertools.find_single_match(new_data, '<source src="([^"]+)"')
server = "directo"
else:
scrapedurl = scrapertools.find_single_match(scrapedurl, '.*?url=([^&]+)?')
scrapedurl = urllib.unquote(scrapedurl)
if scrapedurl != '':
itemlist.append(Item(channel=item.channel, title='%s', url=scrapedurl, action='play',
language = item.language, infoLabels=item.infoLabels, server=server))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def newest(categoria):
itemlist = []
item = Item()
if categoria == 'anime':
item.url=host
itemlist = new_episodes(item)
return itemlist

View File

@@ -30,12 +30,12 @@ def mainlist(item):
itemlist =[]
support.menu(itemlist, '[B] > Anime ITA[/B]', 'build_menu', host+'/filter?language[]=1')
support.menu(itemlist, '[B] > Anime SUB[/B]', 'build_menu', host+'/filter?language[]=0')
support.menu(itemlist, ' > Anime A-Z', 'alfabetico', host+'/az-list')
support.menu(itemlist, 'Anime ITA submenu bold', 'build_menu', host+'/filter?language[]=1')
support.menu(itemlist, 'Anime SUB submenu bold', 'build_menu', host+'/filter?language[]=0')
support.menu(itemlist, 'Anime A-Z sub', 'alfabetico', host+'/az-list')
support.menu(itemlist, 'Anime - Ultimi Aggiunti', 'alfabetico', host+'/newest')
support.menu(itemlist, 'Anime - Ultimi Episodi', 'alfabetico', host+'/newest')
support.menu(itemlist, '[COLOR blue]Cerca...[/COLOR]', 'search')
support.menu(itemlist, 'Cerca...', 'search')
autoplay.init(item.channel, list_servers, list_quality)
@@ -52,9 +52,7 @@ def build_menu(item):
channel=item.channel,
action="video",
title="[B]Tutti[/B]",
url=item.url,
thumbnail=CategoriaThumbnail,
fanart=CategoriaFanart))
url=item.url))
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\t','',data)
@@ -73,7 +71,9 @@ def build_menu(item):
fulltitle=title,
show=title,
url=item.url,
html=html))
html=html,
thumbnail=item.thumbnail,
fanart=item.fanart))
# Elimina FLingua dal Menu
itemlist.pop(6)
@@ -413,13 +413,3 @@ def findvideos(item):
return itemlist
# riferimenti di servizio ====================================================
AnimeThumbnail = "http://img15.deviantart.net/f81c/i/2011/173/7/6/cursed_candies_anime_poster_by_careko-d3jnzg9.jpg"
AnimeFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CategoriaThumbnail = "http://static.europosters.cz/image/750/poster/street-fighter-anime-i4817.jpg"
CategoriaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
CercaThumbnail = "http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"
CercaFanart = "https://i.ytimg.com/vi/IAlbvyBdYdY/maxresdefault.jpg"
AvantiTxt = config.get_localized_string(30992)
AvantiImg = "http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"

File diff suppressed because it is too large Load Diff

View File

@@ -59,7 +59,11 @@ def add_season(data=None):
if season != "":
heading = config.get_localized_string(70687)
episode = platformtools.dialog_numeric(0, heading)
if episode != "":
if episode == "0":
heading = config.get_localized_string(70688)
special = platformtools.dialog_numeric(0, heading)
return [int(season), int(episode), int(special)]
elif episode != '':
return [int(season), int(episode)]
@@ -90,14 +94,19 @@ def write_data(channel, show, data):
def renumber(itemlist, item='', typography=''):
log()
# log(itemlist)
# key_list= item.title
# sorted_list = sorted(itemlist, key=key_list)
# log(sorted_list)
if item:
try:
dict_series = jsontools.get_node_from_file(item.channel, TAG_TVSHOW_RENUMERATE)
SERIES = dict_series[item.show]['season_episode']
SERIES = dict_series[item.show.rstrip()]['season_episode']
S = SERIES[0]
E = SERIES[1]
ID = SERIES[2]
SP = SERIES[2]
ID = SERIES[3]
page = 1
epList = []
@@ -110,16 +119,24 @@ def renumber(itemlist, item='', typography=''):
if data:
for episodes in data['data']:
if episodes['airedSeason'] >= S:
if E == 0:
epList.append([0, SP])
E = 1
if episodes['airedEpisodeNumber'] >= E:
epList.append(str(episodes['airedSeason']) + 'x' + str(episodes['airedEpisodeNumber']))
epList.append([episodes['airedSeason'], episodes['airedEpisodeNumber']])
page = page + 1
else:
exist = False
epList.sort()
ep = 0
for item in itemlist:
item.title = typo(epList[ep] + ' - ', typography) + item.title
s = str(epList[ep][0])
e = str(epList[ep][1])
item.title = typo(s + 'x'+ e + ' - ', typography) + item.title
ep = ep + 1
except:
return itemlist
else:

View File

@@ -1,12 +1,12 @@
{
"id": "canalporno",
"name": "Canalporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"categories": [
"adult"
]
{
"id": "canalporno",
"name": "Canalporno",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "http://i.imgur.com/gAbPcvT.png?1",
"banner": "canalporno.png",
"categories": [
"adult"
]
}

View File

@@ -1,87 +1,87 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
host = "http://www.canalporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="findvideos", title="Útimos videos", url=host))
itemlist.append(item.clone(action="categorias", title="Listado Categorias",
url=host + "/categorias"))
itemlist.append(item.clone(action="search", title="Buscar", url=host + "/search/?q=%s"))
return itemlist
def search(item, texto):
logger.info()
try:
item.url = item.url % texto
itemlist = findvideos(item)
return sorted(itemlist, key=lambda it: it.title)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<img src="([^"]+)".*?alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \
'<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title
scrapedurl = host + url
scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail))
patron = '<div class="paginacion">.*?<span class="selected">.*?<a href="([^"]+)">([^"]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title in matches:
url = host + url
title = "Página %s" % title
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">'
'(.*?)<\/ul>')
#patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
patron = "<li><a href='([^']+)'\s?title='([^']+)'>.*?<\/a><\/li>"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches:
url = host + url
#thumbnail = "http:" + thumbnail
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
host = "http://www.canalporno.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="findvideos", title="Útimos videos", url=host))
itemlist.append(item.clone(action="categorias", title="Listado Categorias",
url=host + "/categorias"))
itemlist.append(item.clone(action="search", title="Buscar", url=host + "/search/?q=%s"))
return itemlist
def search(item, texto):
logger.info()
try:
item.url = item.url % texto
itemlist = findvideos(item)
return sorted(itemlist, key=lambda it: it.title)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<img src="([^"]+)".*?alt="([^"]+)".*?<h2><a href="([^"]+)">.*?' \
'<div class="duracion"><span class="ico-duracion sprite"></span> ([^"]+) min</div>'
matches = scrapertools.find_multiple_matches(data, patron)
for thumbnail, title, url, time in matches:
scrapedtitle = time + " - " + title
scrapedurl = host + url
scrapedthumbnail = thumbnail
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail))
patron = '<div class="paginacion">.*?<span class="selected">.*?<a href="([^"]+)">([^"]+)</a>'
matches = scrapertools.find_multiple_matches(data, patron)
for url, title in matches:
url = host + url
title = "Página %s" % title
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, '<ul class="ordenar-por ordenar-por-categoria">'
'(.*?)<\/ul>')
#patron = '<div class="muestra-categorias">.*?<a class="thumb" href="([^"]+)".*?<img class="categorias" src="([^"]+)".*?<div class="nombre">([^"]+)</div>'
patron = "<li><a href='([^']+)'\s?title='([^']+)'>.*?<\/a><\/li>"
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title in matches:
url = host + url
#thumbnail = "http:" + thumbnail
itemlist.append(item.clone(action="findvideos", title=title, url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
url = scrapertools.find_single_match(data, '<source src="([^"]+)"')
itemlist.append(item.clone(url=url, server="directo"))
return itemlist

View File

@@ -1,14 +1,14 @@
{
"id": "cat3plus",
"name": "Cat3plus",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
"banner": "https://i.imgur.com/bXUyk6m.png",
"categories": [
"movie",
"vo"
]
{
"id": "cat3plus",
"name": "Cat3plus",
"active": true,
"adult": true,
"language": [],
"thumbnail": "https://i.imgur.com/SJxXKa2.png",
"fanart": "https://i.imgur.com/ejCwTxT.jpg",
"banner": "https://i.imgur.com/bXUyk6m.png",
"categories": [
"movie",
"vo"
]
}

View File

@@ -1,130 +1,130 @@
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cat3plus.com/'
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def years(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
patron += 'src="([^"]+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, year, img in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extraer la marca de siguiente página
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "search?q=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
video.title = video.server.capitalize()
# Opción "Añadir esta pelicula a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
# -*- coding: utf-8 -*-
# -*- Channel SleazeMovies -*-
# -*- Created for Alfa-addon -*-
# -*- By Sculkurt -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
host = 'http://www.cat3plus.com/'
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True)))
itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True)))
return itemlist
def years(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url, cookies=False).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = "<a dir='ltr' href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = "<h2 class='post-title entry-title'><a href='([^']+)'>([^(]+).*?\(([^)]+).*?"
patron += 'src="([^"]+).*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, year, img in matches:
itemlist.append(Item(channel = item.channel,
title = scrapedtitle,
url = scrapedurl,
action = "findvideos",
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
# Extraer la marca de siguiente página
next_page = scrapertools.find_single_match(data, "<a class='blog-pager-older-link' href='([^']+)'")
if next_page != "":
itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True))
return itemlist
def search(item, texto):
logger.info()
if texto != "":
texto = texto.replace(" ", "+")
item.url = host + "search?q=" + texto
item.extra = "busqueda"
try:
return list_all(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<h2>\s*<a href="([^"]+)" target="_blank">.*?</a></h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
data = httptools.downloadpage(url, headers={'Referer': item.url}).data
itemlist.extend(servertools.find_video_items(data=data))
for video in itemlist:
video.channel = item.channel
video.contentTitle = item.contentTitle
video.title = video.server.capitalize()
# Opción "Añadir esta pelicula a la videoteca"
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel = item.channel,
title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url = item.url,
action = "add_pelicula_to_library",
extra = "findvideos",
contentTitle = item.contentTitle,
thumbnail = item.thumbnail
))
return itemlist

View File

@@ -45,13 +45,13 @@ def mainlist(item):
support.menu(itemlist, 'HD submenu', 'menu', host, args="Film HD Streaming")
support.menu(itemlist, 'Per genere submenu', 'menu', host, args="Film per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host, args="Film per Anno")
support.menu(itemlist, 'Cerca... submenu color blue', 'search', host, args='film')
support.menu(itemlist, 'Cerca film... submenu', 'search', host, args='film')
support.menu(itemlist, 'Serie TV bold', 'peliculas', host + '/serietv/', contentType='episode')
support.menu(itemlist, 'Per Lettera submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Lettera")
support.menu(itemlist, 'Per Genere submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Genere")
support.menu(itemlist, 'Per anno submenu', 'menu', host + '/serietv/', contentType='episode', args="Serie-Tv per Anno")
support.menu(itemlist, 'Cerca... submenu color blue', 'search', host + '/serietv/', contentType='episode', args='serie')
support.menu(itemlist, 'Cerca serie... submenu', 'search', host + '/serietv/', contentType='episode', args='serie')
autoplay.show_option(item.channel, itemlist)
@@ -139,7 +139,7 @@ def findvideos(item):
def load_links(itemlist, re_txt, color, desc_txt, quality=""):
streaming = scrapertoolsV2.find_single_match(data, re_txt).replace('"', '')
support.log('STREAMING=',streaming)
patron = '<td><a.*?href=(.*?) target[^>]+>([^<]+)<'
patron = '<td><a.*?href=(.*?) (?:target|rel)[^>]+>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(streaming)
for scrapedurl, scrapedtitle in matches:
logger.debug("##### findvideos %s ## %s ## %s ##" % (desc_txt, scrapedurl, scrapedtitle))

View File

@@ -1,13 +1,13 @@
{
"id": "cinehindi",
"name": "CineHindi",
"active": true,
"adult": false,
"language": ["vos"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"categories": [
"movie",
"vos"
]
{
"id": "cinehindi",
"name": "CineHindi",
"active": true,
"adult": false,
"language": ["vos"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"categories": [
"movie",
"vos"
]
}

View File

@@ -1,163 +1,163 @@
# -*- coding: UTF-8 -*-
import re
import urlparse
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
IDIOMAS = {'Hindi': 'Hindi'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'netutv']
host = "http://www.cinehindi.com/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
# url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
patron = '<option class=.*? value=([^<]+)>'
patron += '([^<]+)<\/option>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if 'Próximas Películas' in scrapedtitle:
continue
itemlist.append(item.clone(action='lista', title=scrapedtitle, cat=scrapedurl))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def proximas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<span class="player">.+?<span class="year">([^"]+)<\/span>' # scrapedyear
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
if "ver" in scrapedurl:
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]"
else:
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]" + '(Proximamente)'
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="findvideos", extra=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
context=["buscar_trailer"]))
# Paginacion
patron_pag = '<a rel=.+?nofollow.+? class=.+?page larger.+? href=.+?(.+?)proximamente.+?>([^"]+)<\/a>'
pagina = scrapertools.find_multiple_matches(data, patron_pag)
for next_page_url, i in pagina:
if int(i) == 2:
item.url = next_page_url + 'proximamente/page/' + str(i) + '/'
itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
if not item.cat:
data = httptools.downloadpage(item.url).data
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
#patron = '<span class="year">([^<]+)' # scrapedyear
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
#if scrapedquality:
# title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron = 'rel="next" href="([^"]+)'
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data
itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist1:
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
tmdb.set_infoLabels(itemlist, True)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]
# -*- coding: UTF-8 -*-
import re
import urlparse
from channelselector import get_thumb
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
IDIOMAS = {'Hindi': 'Hindi'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'netutv']
host = "http://www.cinehindi.com/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
# url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
patron = '<option class=.*? value=([^<]+)>'
patron += '([^<]+)<\/option>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if 'Próximas Películas' in scrapedtitle:
continue
itemlist.append(item.clone(action='lista', title=scrapedtitle, cat=scrapedurl))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
def proximas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc...
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<span class="player">.+?<span class="year">([^"]+)<\/span>' # scrapedyear
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
if "ver" in scrapedurl:
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]"
else:
scrapedtitle = scrapedtitle + " [" + scrapedyear + "]" + '(Proximamente)'
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="findvideos", extra=scrapedtitle,
show=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie",
context=["buscar_trailer"]))
# Paginacion
patron_pag = '<a rel=.+?nofollow.+? class=.+?page larger.+? href=.+?(.+?)proximamente.+?>([^"]+)<\/a>'
pagina = scrapertools.find_multiple_matches(data, patron_pag)
for next_page_url, i in pagina:
if int(i) == 2:
item.url = next_page_url + 'proximamente/page/' + str(i) + '/'
itemlist.append(Item(channel=item.channel, action="proximas", title=">> Página siguiente", url=item.url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def lista(item):
logger.info()
itemlist = []
if not item.cat:
data = httptools.downloadpage(item.url).data
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
#patron = '<span class="year">([^<]+)' # scrapedyear
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
#if scrapedquality:
# title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron = 'rel="next" href="([^"]+)'
next_page_url = scrapertools.find_single_match(data, patron)
if next_page_url != "":
item.url = next_page_url
itemlist.append(Item(channel=item.channel, action="lista", title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=next_page_url,
thumbnail='https://s32.postimg.cc/4zppxf5j9/siguiente.png'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
itemlist1 = []
data = httptools.downloadpage(item.url).data
itemlist1.extend(servertools.find_video_items(data=data))
patron_show = '<div class="data"><h1 itemprop="name">([^<]+)<\/h1>'
show = scrapertools.find_single_match(data, patron_show)
for videoitem in itemlist1:
videoitem.channel = item.channel
videoitem.infoLabels = item.infoLabels
for i in range(len(itemlist1)):
if not 'youtube' in itemlist1[i].title:
itemlist.append(itemlist1[i])
tmdb.set_infoLabels(itemlist, True)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.contentChannel!='videolibrary':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=show))
return itemlist
def play(item):
logger.info()
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -6,7 +6,7 @@ import base64
import re
import urlparse
from channels import autoplay
from channels import autoplay, support
from channels import filtertools
from core import scrapertools, servertools, httptools
from platformcode import logger, config
@@ -18,14 +18,14 @@ from core import tmdb
# Necessario per Autoplay
IDIOMAS = {'Italiano': 'IT'}
list_language = IDIOMAS.values()
list_servers = ['wstream', 'openload', 'streamango', 'akstream', 'clipwatching', 'cloudvideo', 'youtube']
list_servers = ['akstream', 'wstream', 'openload', 'streamango']
list_quality = ['default']
# Necessario per Verifica Link
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'cinemalibero')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'cinemalibero')
host = 'https://www.cinemalibero.center'
host = 'https://www.cinemalibero.icu'
headers = [['Referer', host]]
@@ -37,47 +37,20 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality) # Necessario per Autoplay
# Menu Principale
itemlist = [Item(channel=item.channel,
action='video',
title='Film',
url=host+'/category/film/',
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='sottomenu_film',
title='Generi Film',
url=host,
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Serie TV',
url=host+'/category/serie-tv/',
contentType='episode',
extra='tv',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Anime',
url=host+'/category/anime-giapponesi/',
contentType='episode',
thumbnail=''),
Item(channel=item.channel,
action='video',
title='Sport',
url=host+'/category/sport/',
contentType='movie',
thumbnail=''),
Item(channel=item.channel,
action='search',
title='[B]Cerca...[/B]',
thumbnail=''),
]
itemlist = []
support.menu(itemlist, 'Film bold', 'video', host+'/category/film/')
support.menu(itemlist, 'Generi submenu', 'genres', host)
support.menu(itemlist, 'Cerca film submenu', 'search', host)
support.menu(itemlist, 'Serie TV bold', 'video', host+'/category/serie-tv/', contentType='episode')
support.menu(itemlist, 'Anime submenu', 'video', host+'/category/anime-giapponesi/', contentType='episode')
support.menu(itemlist, 'Cerca serie submenu', 'search', host, contentType='episode')
support.menu(itemlist, 'Sport bold', 'video', host+'/category/sport/')
autoplay.show_option(item.channel, itemlist) # Necessario per Autoplay (Menu Configurazione)
return itemlist
def search(item, texto):
logger.info("[cinemalibero.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
@@ -91,13 +64,20 @@ def search(item, texto):
return []
def genres(item):
return support.scrape(item, patron_block=r'<div id="bordobar" class="dropdown-menu(.*?)</li>', patron=r'<a class="dropdown-item" href="([^"]+)" title="([A-z]+)"', listGroups=['url', 'title'], action='video')
def video(item):
logger.info('[cinemalibero.py] video')
itemlist = []
if host not in item.url:
item.url = host + item.url
# Carica la pagina
data = httptools.downloadpage(item.url).data.replace('\n','').replace('\t','')
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12">(.*?)<div class=(?:"container"|"bg-dark ")>')
block = scrapertools.find_single_match(data, '<div class="container">.*?class="col-md-12[^"]*?">(.*?)<div class=(?:"container"|"bg-dark ")>')
# Estrae i contenuti
matches = re.compile(r'<div class="col-lg-3">(.*?)<\/a><\/div>', re.DOTALL).findall(block)
@@ -133,7 +113,7 @@ def video(item):
else:
tipologia = 'movie'
action = 'select'
itemlist.append(
Item(channel=item.channel,
action=action,
@@ -143,7 +123,7 @@ def video(item):
quality=quality,
url=url,
thumbnail=thumb,
infoLabels=year,
infoLabels={'year': year},
show=title))
# Next page
@@ -202,6 +182,9 @@ def findvideos(item): # Questa def. deve sempre essere nominata findvideos
logger.info('[cinemalibero.py] findvideos')
itemlist = []
if item.args == 'direct':
return servertools.find_video_items(item)
if item.contentType == 'episode':
data = item.url.lower()
block = scrapertools.find_single_match(data,r'>streaming.*?<\/strong>*?<\/h2>(.*?)<\/div>')
@@ -252,15 +235,14 @@ def findvideos(item): # Questa def. deve sempre essere nominata findvideos
return itemlist
def episodios(item): # Questa def. deve sempre essere nominata episodios
logger.info('[cinemalibero.py] episodios')
itemlist = []
extra =''
extra = ''
# Carica la pagina
data = httptools.downloadpage(item.url, headers=headers).data
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)<\/div>')
block = scrapertools.find_single_match(data, r'<div class="col-md-8 bg-white rounded-left p-5"><div>(.*?)at-below-post')
if re.findall('rel="category tag">serie', data, re.IGNORECASE):
# logger.info('select = ### è una serie ###')
extra='serie'
@@ -268,9 +250,6 @@ def episodios(item): # Questa def. deve sempre essere nominata episodios
if re.findall('episodi', block, re.IGNORECASE):
# logger.info('select = ### è un anime ###')
extra='anime'
block = re.sub(r'<h2>.*?<\/h2>','',block)
block = block.replace('<p>','').replace('<p style="text-align: left;">','').replace('<','<').replace('-<','<').replace('&#8211;<','<').replace('&#8211; <','<').replace('<strong>','<stop><start><strong>')+'<stop>'
@@ -280,21 +259,35 @@ def episodios(item): # Questa def. deve sempre essere nominata episodios
if extra == 'serie':
block = block.replace('<br /> <a','<a')
matches = re.compile(r'<start>.*?(?:stagione|Stagione)(.*?)<\/(?:strong|span)><\/p>(.*?)<stop>', re.DOTALL).findall(block)
for lang, html in matches:
lang = re.sub('<.*?>','',lang)
html = html.replace('<br />','\n').replace('</p>','\n')
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
for scrapedtitle, html in matches:
if not matches:
matches = scrapertools.find_multiple_matches(block, r'<a href="([^"]+)"[^>]+>(Episodio [0-9]+)</a>')
for scrapedurl, scrapedtitle in matches:
scrapedtitle = re.sub(r'Episodio ([0-9]+)', r'Episodio 1x\1', scrapedtitle)
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle + ' - (' + lang + ')',
fulltitle=scrapedtitle,
show=scrapedtitle,
url=html))
action="findvideos",
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=item.fulltitle,
url=scrapedurl,
args='direct'))
else:
for lang, html in matches:
lang = re.sub('<.*?>','',lang)
html = html.replace('<br />','\n').replace('</p>', '\n')
matches = re.compile(r'([^<]+)([^\n]+)\n', re.DOTALL).findall(html)
for scrapedtitle, html in matches:
itemlist.append(
Item(channel=item.channel,
action="findvideos",
contentType='episode',
title=scrapedtitle + ' - (' + lang + ')',
fulltitle=scrapedtitle,
show=item.fulltitle,
url=html))
elif extra == 'anime':
block = re.sub(r'<start.*?(?:download:|Download:).*?<stop>','',block)
@@ -310,7 +303,7 @@ def episodios(item): # Questa def. deve sempre essere nominata episodios
contentType='episode',
title=scrapedtitle,
fulltitle=scrapedtitle,
show=scrapedtitle,
show=item.fulltitle,
url=html))
else:

View File

@@ -1,12 +1,12 @@
{
"id": "cinetemagay",
"name": "Cinetemagay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"categories": [
"adult"
]
{
"id": "cinetemagay",
"name": "Cinetemagay",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cinetemagay.png",
"banner": "cinetemagay.png",
"categories": [
"adult"
]
}

View File

@@ -1,128 +1,128 @@
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import config, logger
IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay')
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano",
url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay",
url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)",
url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ"))
itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay",
url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="PGPA",
url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4"))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[3]
scrapedtitle = scrapedtitle.replace("&apos;", "'")
scrapedtitle = scrapedtitle.replace("&quot;", "'")
scrapedtitle = scrapedtitle.replace("&amp;amp;", "'")
scrapedtitle = scrapedtitle.replace("&amp;#39;", "'")
scrapedurl = match[2]
scrapedthumbnail = match[0]
imagen = ""
scrapedplot = match[1]
tipo = match[1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
scrapedplot = "<" + scrapedplot
scrapedplot = scrapedplot.replace("&gt;", ">")
scrapedplot = scrapedplot.replace("&lt;", "<")
scrapedplot = scrapedplot.replace("</div>", "\n")
scrapedplot = scrapedplot.replace("<br />", "\n")
scrapedplot = scrapedplot.replace("&amp;", "")
scrapedplot = scrapedplot.replace("nbsp;", "")
scrapedplot = strip_tags(scrapedplot)
itemlist.append(
Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedurl + scrapedplot, folder=True))
variable = item.url.split("index=")[1]
variable = int(variable)
variable += 100
variable = str(variable)
variable_url = item.url.split("index=")[0]
url_nueva = variable_url + "index=" + variable
itemlist.append(
Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")",
url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva))
return itemlist
def detail(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")
data = data.replace("%3D", "=")
data = data.replace("%3", "?")
data = data.replace("%26", "&")
descripcion = ""
plot = ""
patrondescrip = 'SINOPSIS:(.*?)'
matches = re.compile(patrondescrip, re.DOTALL).findall(data)
if len(matches) > 0:
descripcion = matches[0]
descripcion = descripcion.replace("&nbsp;", "")
descripcion = descripcion.replace("<br/>", "")
descripcion = descripcion.replace("\r", "")
descripcion = descripcion.replace("\n", " ")
descripcion = descripcion.replace("\t", " ")
descripcion = re.sub("<[^>]+>", " ", descripcion)
descripcion = descripcion
try:
plot = unicode(descripcion, "utf-8").encode("iso-8859-1")
except:
plot = descripcion
# Busca los enlaces a los videos de servidores
video_itemlist = servertools.find_video_items(data=data)
for video_item in video_itemlist:
itemlist.append(Item(channel=item.channel, action="play", server=video_item.server,
title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail,
plot=video_item.url, folder=False))
return itemlist
# -*- coding: utf-8 -*-
import os
import re
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import config, logger
IMAGES_PATH = os.path.join(config.get_runtime_path(), 'resources', 'images', 'cinetemagay')
def strip_tags(value):
return re.sub(r'<[^>]*?>', '', value)
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay latinoamericano",
url="http://cinegaylatinoamericano.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.americaeconomia.com/sites/default/files/imagecache/foto_nota/homosexual1.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine y cortos gay",
url="http://cineycortosgay.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://www.elmolar.org/wp-content/uploads/2015/05/cortometraje.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="Cine gay online (México)",
url="http://cinegayonlinemexico.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTmmqL6tS2Ced1VoxlGQT0q-ibPEz1DCV3E1waHFDI5KT0pg1lJ"))
itemlist.append(Item(channel=item.channel, action="lista", title="Sentido gay",
url="http://www.sentidogay.blogspot.com.es//feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://1.bp.blogspot.com/-epOPgDD_MQw/VPGZGQOou1I/AAAAAAAAAkI/lC25GrukDuo/s1048/SentidoGay.jpg"))
itemlist.append(Item(channel=item.channel, action="lista", title="PGPA",
url="http://pgpa.blogspot.com.es/feeds/posts/default/?max-results=100&start-index=1",
thumbnail="http://themes.googleusercontent.com/image?id=0BwVBOzw_-hbMNTRlZjk2YWMtYTVlMC00ZjZjLWI3OWEtMWEzZDEzYWVjZmQ4"))
return itemlist
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
patronvideos += "(.*?)<link rel='alternate' type='text/html' href='([^']+)' title='([^']+)'.*?>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[3]
scrapedtitle = scrapedtitle.replace("&apos;", "'")
scrapedtitle = scrapedtitle.replace("&quot;", "'")
scrapedtitle = scrapedtitle.replace("&amp;amp;", "'")
scrapedtitle = scrapedtitle.replace("&amp;#39;", "'")
scrapedurl = match[2]
scrapedthumbnail = match[0]
imagen = ""
scrapedplot = match[1]
tipo = match[1]
logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
scrapedplot = "<" + scrapedplot
scrapedplot = scrapedplot.replace("&gt;", ">")
scrapedplot = scrapedplot.replace("&lt;", "<")
scrapedplot = scrapedplot.replace("</div>", "\n")
scrapedplot = scrapedplot.replace("<br />", "\n")
scrapedplot = scrapedplot.replace("&amp;", "")
scrapedplot = scrapedplot.replace("nbsp;", "")
scrapedplot = strip_tags(scrapedplot)
itemlist.append(
Item(channel=item.channel, action="detail", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
plot=scrapedurl + scrapedplot, folder=True))
variable = item.url.split("index=")[1]
variable = int(variable)
variable += 100
variable = str(variable)
variable_url = item.url.split("index=")[0]
url_nueva = variable_url + "index=" + variable
itemlist.append(
Item(channel=item.channel, action="lista", title="Ir a la página siguiente (desde " + variable + ")",
url=url_nueva, thumbnail="", plot="Pasar a la página siguiente (en grupos de 100)\n\n" + url_nueva))
return itemlist
def detail(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")
data = data.replace("%3D", "=")
data = data.replace("%3", "?")
data = data.replace("%26", "&")
descripcion = ""
plot = ""
patrondescrip = 'SINOPSIS:(.*?)'
matches = re.compile(patrondescrip, re.DOTALL).findall(data)
if len(matches) > 0:
descripcion = matches[0]
descripcion = descripcion.replace("&nbsp;", "")
descripcion = descripcion.replace("<br/>", "")
descripcion = descripcion.replace("\r", "")
descripcion = descripcion.replace("\n", " ")
descripcion = descripcion.replace("\t", " ")
descripcion = re.sub("<[^>]+>", " ", descripcion)
descripcion = descripcion
try:
plot = unicode(descripcion, "utf-8").encode("iso-8859-1")
except:
plot = descripcion
# Busca los enlaces a los videos de servidores
video_itemlist = servertools.find_video_items(data=data)
for video_item in video_itemlist:
itemlist.append(Item(channel=item.channel, action="play", server=video_item.server,
title=item.title + " " + video_item.title, url=video_item.url, thumbnail=item.thumbnail,
plot=video_item.url, folder=False))
return itemlist

View File

@@ -1,33 +1,33 @@
{
"id": "community",
"name": "Community",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "",
"banner": "",
"fanart": "",
"categories": [
"direct",
"movie",
"tvshow",
"vo"
],
"settings": [
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"No Filtrar",
"LAT",
"CAST",
"VO",
"VOSE"
]
}
]
}
{
"id": "community",
"name": "Community",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "",
"banner": "",
"fanart": "",
"categories": [
"direct",
"movie",
"tvshow",
"vo"
],
"settings": [
{
"id": "filterlanguages",
"type": "list",
"label": "Mostrar enlaces del canal en idioma...",
"default": 3,
"enabled": true,
"visible": true,
"lvalues": [
"No Filtrar",
"LAT",
"CAST",
"VO",
"VOSE"
]
}
]
}

View File

@@ -1,299 +1,299 @@
# -*- coding: utf-8 -*-
# -*- Channel Community -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import os
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config, platformtools
from channels import autoplay
from channels import filtertools
list_data = {}
list_language = ['LAT', 'CAST', 'VO', 'VOSE']
list_servers = ['directo']
list_quality = ['SD', '720', '1080', '4k']
def mainlist(item):
logger.info()
path = os.path.join(config.get_data_path(), 'community_channels.json')
if not os.path.exists(path):
with open(path, "w") as file:
file.write('{"channels":{}}')
file.close()
autoplay.init(item.channel, list_servers, list_quality)
return show_channels(item)
def show_channels(item):
logger.info()
itemlist = []
context = [{"title": "Eliminar este canal",
"action": "remove_channel",
"channel": "community"}]
path = os.path.join(config.get_data_path(), 'community_channels.json')
file = open(path, "r")
json = jsontools.load(file.read())
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70676), action='add_channel', thumbnail=get_thumb('add.png')))
for key, channel in json['channels'].items():
if 'poster' in channel:
poster = channel['poster']
else:
poster = ''
itemlist.append(Item(channel=item.channel, title=channel['channel_name'], url=channel['path'],
thumbnail=poster, action='show_menu', channel_id = key, context=context))
return itemlist
def load_json(item):
logger.info()
if item.url.startswith('http'):
json_file = httptools.downloadpage(item.url).data
else:
json_file = open(item.url, "r").read()
json_data = jsontools.load(json_file)
return json_data
def show_menu(item):
global list_data
logger.info()
itemlist = []
json_data = load_json(item)
if "menu" in json_data:
for option in json_data['menu']:
itemlist.append(Item(channel=item.channel, title=option['title'], action='show_menu', url=option['link']))
autoplay.show_option(item.channel, itemlist)
return itemlist
if "movies_list" in json_data:
item.media_type='movies_list'
elif "tvshows_list" in json_data:
item.media_type = 'tvshows_list'
elif "episodes_list" in json_data:
item.media_type = 'episodes_list'
return list_all(item)
return itemlist
def list_all(item):
logger.info()
itemlist = []
media_type = item.media_type
json_data = load_json(item)
for media in json_data[media_type]:
quality, language, plot, poster = set_extra_values(media)
title = media['title']
title = set_title(title, language, quality)
new_item = Item(channel=item.channel, title=title, quality=quality,
language=language, plot=plot, thumbnail=poster)
if 'movies_list' in json_data:
new_item.url = media
new_item.contentTitle = media['title']
new_item.action = 'findvideos'
if 'year' in media:
new_item.infoLabels['year'] = media['year']
else:
new_item.url = media['seasons_list']
new_item.contentSerieName = media['title']
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def seasons(item):
logger.info()
itemlist = []
infoLabels = item.infoLabels
list_seasons = item.url
for season in list_seasons:
infoLabels['season'] = season['season']
title = config.get_localized_string(60027) % season['season']
itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason',
contentSeasonNumber=season['season'], infoLabels=infoLabels))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist = sorted(itemlist, key=lambda i: i.title)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
json_data = load_json(item)
infoLabels = item.infoLabels
season_number = infoLabels['season']
for episode in json_data['episodes_list']:
episode_number = episode['number']
infoLabels['season'] = season_number
infoLabels['episode'] = episode_number
title = config.get_localized_string(70677) % (season_number, episode_number, episode_number)
itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos',
contentEpisodeNumber=episode_number, infoLabels=infoLabels))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
for url in item.url['links']:
quality, language, plot, poster = set_extra_values(url)
title = ''
title = set_title(title, language, quality)
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality,
language=language, infoLabels = item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def add_channel(item):
logger.info()
import xbmc
import xbmcgui
channel_to_add = {}
json_file = ''
result = platformtools.dialog_select(config.get_localized_string(70676), [config.get_localized_string(70678), config.get_localized_string(70679)])
if result == -1:
return
if result==0:
file_path = xbmcgui.Dialog().browseSingle(1, config.get_localized_string(70680), 'files')
try:
channel_to_add['path'] = file_path
json_file = jsontools.load(open(file_path, "r").read())
channel_to_add['channel_name'] = json_file['channel_name']
except:
pass
elif result==1:
url = platformtools.dialog_input("", config.get_localized_string(70681), False)
try:
channel_to_add['path'] = url
json_file = jsontools.load(httptools.downloadpage(url).data)
except:
pass
if len(json_file) == 0:
return
if "episodes_list" in json_file:
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(70682))
return
channel_to_add['channel_name'] = json_file['channel_name']
path = os.path.join(config.get_data_path(), 'community_channels.json')
community_json = open(path, "r")
community_json = jsontools.load(community_json.read())
id = len(community_json['channels']) + 1
community_json['channels'][id]=(channel_to_add)
with open(path, "w") as file:
file.write(jsontools.dump(community_json))
file.close()
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70683) % json_file['channel_name'])
return
def remove_channel(item):
logger.info()
import xbmc
import xbmcgui
path = os.path.join(config.get_data_path(), 'community_channels.json')
community_json = open(path, "r")
community_json = jsontools.load(community_json.read())
id = item.channel_id
to_delete = community_json['channels'][id]['channel_name']
del community_json['channels'][id]
with open(path, "w") as file:
file.write(jsontools.dump(community_json))
file.close()
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70684) % to_delete)
platformtools.itemlist_refresh()
return
def set_extra_values(dict):
logger.info()
quality = ''
language = ''
plot = ''
poster = ''
if 'quality' in dict and dict['quality'] != '':
quality = dict['quality'].upper()
if 'language' in dict and dict['language'] != '':
language = dict['language'].upper()
if 'plot' in dict and dict['plot'] != '':
plot = dict['plot']
if 'poster' in dict and dict['poster'] != '':
poster = dict['poster']
return quality, language, plot, poster
def set_title(title, language, quality):
logger.info()
if not config.get_setting('unify'):
if quality != '':
title += ' [%s]' % quality
if language != '':
if not isinstance(language, list):
title += ' [%s]' % language.upper()
else:
title += ' '
for lang in language:
title += '[%s]' % lang.upper()
return title.capitalize()
# -*- coding: utf-8 -*-
# -*- Channel Community -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
import os
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config, platformtools
from channels import autoplay
from channels import filtertools
list_data = {}
list_language = ['LAT', 'CAST', 'VO', 'VOSE']
list_servers = ['directo']
list_quality = ['SD', '720', '1080', '4k']
def mainlist(item):
logger.info()
path = os.path.join(config.get_data_path(), 'community_channels.json')
if not os.path.exists(path):
with open(path, "w") as file:
file.write('{"channels":{}}')
file.close()
autoplay.init(item.channel, list_servers, list_quality)
return show_channels(item)
def show_channels(item):
logger.info()
itemlist = []
context = [{"title": "Eliminar este canal",
"action": "remove_channel",
"channel": "community"}]
path = os.path.join(config.get_data_path(), 'community_channels.json')
file = open(path, "r")
json = jsontools.load(file.read())
itemlist.append(Item(channel=item.channel, title=config.get_localized_string(70676), action='add_channel', thumbnail=get_thumb('add.png')))
for key, channel in json['channels'].items():
if 'poster' in channel:
poster = channel['poster']
else:
poster = ''
itemlist.append(Item(channel=item.channel, title=channel['channel_name'], url=channel['path'],
thumbnail=poster, action='show_menu', channel_id = key, context=context))
return itemlist
def load_json(item):
logger.info()
if item.url.startswith('http'):
json_file = httptools.downloadpage(item.url).data
else:
json_file = open(item.url, "r").read()
json_data = jsontools.load(json_file)
return json_data
def show_menu(item):
global list_data
logger.info()
itemlist = []
json_data = load_json(item)
if "menu" in json_data:
for option in json_data['menu']:
itemlist.append(Item(channel=item.channel, title=option['title'], action='show_menu', url=option['link']))
autoplay.show_option(item.channel, itemlist)
return itemlist
if "movies_list" in json_data:
item.media_type='movies_list'
elif "tvshows_list" in json_data:
item.media_type = 'tvshows_list'
elif "episodes_list" in json_data:
item.media_type = 'episodes_list'
return list_all(item)
return itemlist
def list_all(item):
logger.info()
itemlist = []
media_type = item.media_type
json_data = load_json(item)
for media in json_data[media_type]:
quality, language, plot, poster = set_extra_values(media)
title = media['title']
title = set_title(title, language, quality)
new_item = Item(channel=item.channel, title=title, quality=quality,
language=language, plot=plot, thumbnail=poster)
if 'movies_list' in json_data:
new_item.url = media
new_item.contentTitle = media['title']
new_item.action = 'findvideos'
if 'year' in media:
new_item.infoLabels['year'] = media['year']
else:
new_item.url = media['seasons_list']
new_item.contentSerieName = media['title']
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def seasons(item):
logger.info()
itemlist = []
infoLabels = item.infoLabels
list_seasons = item.url
for season in list_seasons:
infoLabels['season'] = season['season']
title = config.get_localized_string(60027) % season['season']
itemlist.append(Item(channel=item.channel, title=title, url=season['link'], action='episodesxseason',
contentSeasonNumber=season['season'], infoLabels=infoLabels))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist = sorted(itemlist, key=lambda i: i.title)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
json_data = load_json(item)
infoLabels = item.infoLabels
season_number = infoLabels['season']
for episode in json_data['episodes_list']:
episode_number = episode['number']
infoLabels['season'] = season_number
infoLabels['episode'] = episode_number
title = config.get_localized_string(70677) % (season_number, episode_number, episode_number)
itemlist.append(Item(channel=item.channel, title=title, url=episode, action='findvideos',
contentEpisodeNumber=episode_number, infoLabels=infoLabels))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
for url in item.url['links']:
quality, language, plot, poster = set_extra_values(url)
title = ''
title = set_title(title, language, quality)
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url['url'], action='play', quality=quality,
language=language, infoLabels = item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def add_channel(item):
logger.info()
import xbmc
import xbmcgui
channel_to_add = {}
json_file = ''
result = platformtools.dialog_select(config.get_localized_string(70676), [config.get_localized_string(70678), config.get_localized_string(70679)])
if result == -1:
return
if result==0:
file_path = xbmcgui.Dialog().browseSingle(1, config.get_localized_string(70680), 'files')
try:
channel_to_add['path'] = file_path
json_file = jsontools.load(open(file_path, "r").read())
channel_to_add['channel_name'] = json_file['channel_name']
except:
pass
elif result==1:
url = platformtools.dialog_input("", config.get_localized_string(70681), False)
try:
channel_to_add['path'] = url
json_file = jsontools.load(httptools.downloadpage(url).data)
except:
pass
if len(json_file) == 0:
return
if "episodes_list" in json_file:
platformtools.dialog_ok(config.get_localized_string(20000), config.get_localized_string(70682))
return
channel_to_add['channel_name'] = json_file['channel_name']
path = os.path.join(config.get_data_path(), 'community_channels.json')
community_json = open(path, "r")
community_json = jsontools.load(community_json.read())
id = len(community_json['channels']) + 1
community_json['channels'][id]=(channel_to_add)
with open(path, "w") as file:
file.write(jsontools.dump(community_json))
file.close()
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70683) % json_file['channel_name'])
return
def remove_channel(item):
logger.info()
import xbmc
import xbmcgui
path = os.path.join(config.get_data_path(), 'community_channels.json')
community_json = open(path, "r")
community_json = jsontools.load(community_json.read())
id = item.channel_id
to_delete = community_json['channels'][id]['channel_name']
del community_json['channels'][id]
with open(path, "w") as file:
file.write(jsontools.dump(community_json))
file.close()
platformtools.dialog_notification(config.get_localized_string(20000), config.get_localized_string(70684) % to_delete)
platformtools.itemlist_refresh()
return
def set_extra_values(dict):
logger.info()
quality = ''
language = ''
plot = ''
poster = ''
if 'quality' in dict and dict['quality'] != '':
quality = dict['quality'].upper()
if 'language' in dict and dict['language'] != '':
language = dict['language'].upper()
if 'plot' in dict and dict['plot'] != '':
plot = dict['plot']
if 'poster' in dict and dict['poster'] != '':
poster = dict['poster']
return quality, language, plot, poster
def set_title(title, language, quality):
logger.info()
if not config.get_setting('unify'):
if quality != '':
title += ' [%s]' % quality
if language != '':
if not isinstance(language, list):
title += ' [%s]' % language.upper()
else:
title += ' '
for lang in language:
title += '[%s]' % lang.upper()
return title.capitalize()

View File

@@ -1,12 +1,12 @@
{
"id": "cumlouder",
"name": "Cumlouder",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"categories": [
"adult"
]
{
"id": "cumlouder",
"name": "Cumlouder",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "cumlouder.png",
"banner": "cumlouder.png",
"categories": [
"adult"
]
}

View File

@@ -1,210 +1,210 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
config.set_setting("url_error", False, "cumlouder")
itemlist.append(item.clone(title="Últimos videos", action="videos", url="https://www.cumlouder.com/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="https://www.cumlouder.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="https://www.cumlouder.com/girls/"))
itemlist.append(item.clone(title="Listas", action="series", url="https://www.cumlouder.com/series/"))
itemlist.append(item.clone(title="Buscar", action="search", url="https://www.cumlouder.com/search?q=%s"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a girl-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+'
patron += '<img class="thumb" src="([^"]+)" [^<]+<h2[^<]+<span[^<]+</span[^<]+</h2[^<]+'
patron += '<span[^<]+<span[^<]+<span[^<]+</span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a tag-url=.*?href="([^"]+)" title="([^"]+)".*?<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def series(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title, count in matches:
itemlist.append(
item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def videos(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)"[^<]+<img class="thumb" src="([^"]+)".*?<span class="minutos"> <span class="ico-minutos sprite"></span> ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin("https://www.cumlouder.com", url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>')
matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage)
if not matches:
matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\' />'
url, type, res = re.compile(patron, re.DOTALL).findall(data)[0]
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
elif not url.startswith("http"):
url = "http:" + url.replace("&amp;", "&")
itemlist.append(
Item(channel='cumlouder', action="play", title='Video' + res, fulltitle=type.upper() + ' ' + res, url=url,
server="directo", folder=False))
return itemlist
def get_data(url_orig):
try:
if config.get_setting("url_error", "cumlouder"):
raise Exception
response = httptools.downloadpage(url_orig)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
raise Exception
except:
config.set_setting("url_error", True, "cumlouder")
import random
server_random = ['nl', 'de', 'us']
server = server_random[random.randint(0, 2)]
url = "https://%s.hideproxy.me/includes/process.php?action=update" % server
post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \
% (urllib.quote(url_orig), server)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
else:
break
return response.data
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from core import httptools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
def mainlist(item):
logger.info()
itemlist = []
config.set_setting("url_error", False, "cumlouder")
itemlist.append(item.clone(title="Últimos videos", action="videos", url="https://www.cumlouder.com/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="https://www.cumlouder.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="https://www.cumlouder.com/girls/"))
itemlist.append(item.clone(title="Listas", action="series", url="https://www.cumlouder.com/series/"))
itemlist.append(item.clone(title="Buscar", action="search", url="https://www.cumlouder.com/search?q=%s"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "abcdefghijklmnopqrstuvwxyz":
itemlist.append(item.clone(title=letra.upper(), url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a girl-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+'
patron += '<img class="thumb" src="([^"]+)" [^<]+<h2[^<]+<span[^<]+</span[^<]+</h2[^<]+'
patron += '<span[^<]+<span[^<]+<span[^<]+</span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a tag-url=.*?href="([^"]+)" title="([^"]+)".*?<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=url, action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def series(item):
logger.info()
itemlist = []
data = get_data(item.url)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title, count in matches:
itemlist.append(
item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
# Paginador
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def videos(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)"[^<]+<img class="thumb" src="([^"]+)".*?<span class="minutos"> <span class="ico-minutos sprite"></span> ([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
thumbnail = urllib.unquote(thumbnail.split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin("https://www.cumlouder.com", url)
if not thumbnail.startswith("https"):
thumbnail = "https:%s" % thumbnail
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
nextpage = scrapertools.find_single_match(data, '<ul class="paginador"(.*?)</ul>')
matches = re.compile('<a href="([^"]+)" rel="nofollow">Next »</a>', re.DOTALL).findall(nextpage)
if not matches:
matches = re.compile('<li[^<]+<a href="([^"]+)">Next »</a[^<]+</li>', re.DOTALL).findall(nextpage)
if matches:
if "go.php?" in matches[0]:
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
else:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
return itemlist
def play(item):
logger.info()
itemlist = []
data = get_data(item.url)
patron = '<source src="([^"]+)" type=\'video/([^\']+)\' label=\'[^\']+\' res=\'([^\']+)\' />'
url, type, res = re.compile(patron, re.DOTALL).findall(data)[0]
if "go.php?" in url:
url = urllib.unquote(url.split("/go.php?u=")[1].split("&")[0])
elif not url.startswith("http"):
url = "http:" + url.replace("&amp;", "&")
itemlist.append(
Item(channel='cumlouder', action="play", title='Video' + res, fulltitle=type.upper() + ' ' + res, url=url,
server="directo", folder=False))
return itemlist
def get_data(url_orig):
try:
if config.get_setting("url_error", "cumlouder"):
raise Exception
response = httptools.downloadpage(url_orig)
if not response.data or "urlopen error [Errno 1]" in str(response.code):
raise Exception
except:
config.set_setting("url_error", True, "cumlouder")
import random
server_random = ['nl', 'de', 'us']
server = server_random[random.randint(0, 2)]
url = "https://%s.hideproxy.me/includes/process.php?action=update" % server
post = "u=%s&proxy_formdata_server=%s&allowCookies=1&encodeURL=0&encodePage=0&stripObjects=0&stripJS=0&go=" \
% (urllib.quote(url_orig), server)
while True:
response = httptools.downloadpage(url, post, follow_redirects=False)
if response.headers.get("location"):
url = response.headers["location"]
post = ""
else:
break
return response.data

View File

@@ -1,12 +1,12 @@
{
"id": "datoporn",
"name": "DatoPorn",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/tBSWudd.png?1",
"banner": "datoporn.png",
"categories": [
"adult"
]
{
"id": "datoporn",
"name": "DatoPorn",
"language": ["*"],
"active": true,
"adult": true,
"thumbnail": "http://i.imgur.com/tBSWudd.png?1",
"banner": "datoporn.png",
"categories": [
"adult"
]
}

View File

@@ -1,73 +1,73 @@
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+")
return lista(item)
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
# Extrae las entradas
patron = '<div class="videobox">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)<\/span><\/div><\/a>.*?class="title">(.*?)<\/a><span class="views">.*?<\/a><\/span><\/div> '
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
#scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
scrapedtitle += ' gb'
scrapedtitle = scrapedtitle.replace(":", "'")
#logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
#next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches:
if numero:
scrapedtitle = "%s (%s)" % (scrapedtitle, numero)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
return itemlist
# -*- coding: utf-8 -*-
import re
from core import httptools
from core import scrapertools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(action="categorias", title="Categorías", url="http://dato.porn/categories_all", contentType="movie", viewmode="movie"))
itemlist.append(item.clone(title="Buscar...", action="search", contentType="movie", viewmode="movie"))
return itemlist
def search(item, texto):
logger.info()
item.url = "http://dato.porn/?k=%s&op=search" % texto.replace(" ", "+")
return lista(item)
def lista(item):
logger.info()
itemlist = []
# Descarga la pagina
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url).data)
# Extrae las entradas
patron = '<div class="videobox">\s*<a href="([^"]+)".*?url\(\'([^\']+)\'.*?<span>(.*?)<\/span><\/div><\/a>.*?class="title">(.*?)<\/a><span class="views">.*?<\/a><\/span><\/div> '
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, duration, scrapedtitle in matches:
if "/embed-" not in scrapedurl:
#scrapedurl = scrapedurl.replace("dato.porn/", "dato.porn/embed-") + ".html"
scrapedurl = scrapedurl.replace("datoporn.co/", "datoporn.co/embed-") + ".html"
if duration:
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
scrapedtitle += ' gb'
scrapedtitle = scrapedtitle.replace(":", "'")
#logger.debug(scrapedurl + ' / ' + scrapedthumbnail + ' / ' + duration + ' / ' + scrapedtitle)
itemlist.append(item.clone(action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
server="datoporn", fanart=scrapedthumbnail.replace("_t.jpg", ".jpg")))
# Extrae la marca de siguiente página
#next_page = scrapertools.find_single_match(data, '<a href=["|\']([^["|\']+)["|\']>Next')
next_page = scrapertools.find_single_match(data, '<a class=["|\']page-link["|\'] href=["|\']([^["|\']+)["|\']>Next')
if next_page and itemlist:
itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
return itemlist
def categorias(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patron = '<div class="vid_block">\s*<a href="([^"]+)".*?url\((.*?)\).*?<span>(.*?)</span>.*?<b>(.*?)</b>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, numero, scrapedtitle in matches:
if numero:
scrapedtitle = "%s (%s)" % (scrapedtitle, numero)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail))
return itemlist

View File

@@ -1,36 +1,36 @@
{
"id": "doramasmp4",
"name": "DoramasMP4",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://s14.postimg.cc/ibh4znkox/doramasmp4.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
}
]
}
{
"id": "doramasmp4",
"name": "DoramasMP4",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://s14.postimg.cc/ibh4znkox/doramasmp4.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
}
]
}

View File

@@ -1,235 +1,235 @@
# -*- coding: utf-8 -*-
# -*- Channel DoramasMP4 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www4.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Variedades", action="list_all",
url=host + 'catalogue?format%5B%5D=varieties&sort=latest',
thumbnail='', type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?format%5B%5D=movie&sort=latest',
thumbnail=get_thumb('movies', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def doramas_menu(item):
logger.info()
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all",
url=host + 'catalogue?format%5B%5D=drama&sort=latest', thumbnail=get_thumb('all', auto=True),
type='dorama'))
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
media_type = item.type
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
if media_type != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.type = item.type
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodios'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
title = '%s %s' % (scrapedtitle, scrapedep)
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle, scrapedep in matches:
url = scrapedurl
contentEpisodeNumber = scrapedep
infoLabels['season'] = 1
infoLabels['episode'] = contentEpisodeNumber
if scrapedtitle != '':
title = '%sx%s - %s' % ('1',scrapedep, scrapedtitle)
else:
title = 'episodio %s' % scrapedep
infoLabels = item.infoLabels
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color='yellow'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
new_dom=scrapertools.find_single_match(data,"var web = { domain: '(.*?)'")
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
# if item.type !='episode' and item.type != 'movie':
# item.type = 'dorama'
# item.contentSerieName = item.contentTitle
# item.contentTitle = ''
# return episodios(item)
# else:
for video_url in matches:
headers = {'referer': video_url}
token = scrapertools.find_single_match(video_url, 'token=(.*)')
if 'fast.php' in video_url:
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = new_dom+'api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
if len(itemlist) == 0 and item.type == 'search':
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodios(item)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist
# -*- coding: utf-8 -*-
# -*- Channel DoramasMP4 -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www4.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'directo', 'mp4upload']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel= item.channel, title="Doramas", action="doramas_menu",
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Variedades", action="list_all",
url=host + 'catalogue?format%5B%5D=varieties&sort=latest',
thumbnail='', type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'catalogue?format%5B%5D=movie&sort=latest',
thumbnail=get_thumb('movies', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def doramas_menu(item):
logger.info()
itemlist =[]
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all",
url=host + 'catalogue?format%5B%5D=drama&sort=latest', thumbnail=get_thumb('all', auto=True),
type='dorama'))
itemlist.append(Item(channel=item.channel, title="Nuevos capitulos", action="latest_episodes",
url=host + 'latest-episodes', thumbnail=get_thumb('new episodes', auto=True), type='dorama'))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<div class="col-lg-2 col-md-3 col-6 mb-3"><a href="([^"]+)".*?<img src="([^"]+)".*?'
patron += 'txt-size-12">(\d{4})<.*?text-truncate">([^<]+)<.*?description">([^<]+)<.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
media_type = item.type
for scrapedurl, scrapedthumbnail, year, scrapedtitle, scrapedplot in matches:
url = scrapedurl
scrapedtitle = scrapedtitle
thumbnail = scrapedthumbnail
new_item = Item(channel=item.channel, title=scrapedtitle, url=url,
thumbnail=thumbnail, type=media_type, infoLabels={'year':year})
if media_type != 'dorama':
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
new_item.type = item.type
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodios'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" aria-label="Netx">')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=host+'catalogue'+next_page, thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png',
type=item.type))
return itemlist
def latest_episodes(item):
logger.info()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = 'shadow-lg rounded" href="([^"]+)".*?src="([^"]+)".*?style="">([^<]+)<.*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedep in matches:
title = '%s %s' % (scrapedtitle, scrapedep)
contentSerieName = scrapedtitle
itemlist.append(Item(channel=item.channel, action='findvideos', url=scrapedurl, thumbnail=scrapedthumbnail,
title=title, contentSerieName=contentSerieName, type='episode'))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<a itemprop="url".*?href="([^"]+)".*?title="(.*?) Cap.*?".*?>Capítulo (\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle, scrapedep in matches:
url = scrapedurl
contentEpisodeNumber = scrapedep
infoLabels['season'] = 1
infoLabels['episode'] = contentEpisodeNumber
if scrapedtitle != '':
title = '%sx%s - %s' % ('1',scrapedep, scrapedtitle)
else:
title = 'episodio %s' % scrapedep
infoLabels = item.infoLabels
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color='yellow'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
new_dom=scrapertools.find_single_match(data,"var web = { domain: '(.*?)'")
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
if '</strong> ¡Este capítulo no tiene subtítulos, solo audio original! </div>' in data:
language = IDIOMAS['vo']
else:
language = IDIOMAS['sub']
#if item.type !='episode' and '<meta property=article:section content=Pelicula>' not in data:
# if item.type !='episode' and item.type != 'movie':
# item.type = 'dorama'
# item.contentSerieName = item.contentTitle
# item.contentTitle = ''
# return episodios(item)
# else:
for video_url in matches:
headers = {'referer': video_url}
token = scrapertools.find_single_match(video_url, 'token=(.*)')
if 'fast.php' in video_url:
video_url = 'https://player.rldev.in/fast.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = new_dom+'api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')
new_item = Item(channel=item.channel, title='[%s] [%s]', url=url, action='play', language = language)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
if len(itemlist) == 0 and item.type == 'search':
item.contentSerieName = item.contentTitle
item.contentTitle = ''
return episodios(item)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -1,174 +1,174 @@
{
"id": "downloads",
"name": "Descargas",
"active": false,
"adult": false,
"language": ["*"],
"categories": [
"movie"
],
"settings": [
{
"type": "label",
"label": "@70229",
"enabled": true,
"visible": true
},
{
"id": "library_add",
"type": "bool",
"label": "@70230",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "library_move",
"type": "bool",
"label": "@70231",
"default": false,
"enabled": "eq(-1,true)",
"visible": true
},
{
"id": "browser",
"type": "bool",
"label": "@70232",
"default": true,
"enabled": true,
"visible": true
},
{
"type": "label",
"label": "@70243",
"enabled": true,
"visible": true
},
{
"id": "block_size",
"type": "list",
"label": "@70233",
"lvalues": [
"128 KB",
"256 KB",
"512 KB",
"1 MB",
"2 MB"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "part_size",
"type": "list",
"label": "@70234",
"lvalues": [
"1 MB",
"2 MB",
"4 MB",
"8 MB",
"16 MB",
"32 MB"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "max_connections",
"type": "list",
"label": "@70235",
"lvalues": [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"default": 4,
"enabled": true,
"visible": true
},
{
"id": "max_buffer",
"type": "list",
"label": "@70236",
"lvalues": [
"0",
"2",
"4",
"6",
"8",
"10",
"12",
"14",
"16",
"18",
"20"
],
"default": 5,
"enabled": true,
"visible": true
},
{
"type": "label",
"label": "@70237",
"enabled": true,
"visible": true
},
{
"id": "server_reorder",
"type": "list",
"label": "@70238",
"lvalues": [
"@70244",
"Reordenar"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "language",
"type": "list",
"label": "@70246",
"lvalues": [
"Esp, Lat, Sub, Eng, Vose",
"Esp, Sub, Lat, Eng, Vose",
"Eng, Sub, Vose, Esp, Lat",
"Vose, Eng, Sub, Esp, Lat"
],
"default": 0,
"enabled": "eq(-1,'Reordenar')",
"visible": true
},
{
"id": "quality",
"type": "list",
"label": "@70240",
"lvalues": [
"@70241",
"HD 1080",
"HD 720",
"SD"
],
"default": 0,
"enabled": "eq(-2,'Reordenar')",
"visible": true
},
{
"id": "server_speed",
"type": "bool",
"label": "@70242",
"default": true,
"enabled": "eq(-3,'Reordenar')",
"visible": true
}
]
}
{
"id": "downloads",
"name": "Descargas",
"active": false,
"adult": false,
"language": ["*"],
"categories": [
"movie"
],
"settings": [
{
"type": "label",
"label": "@70229",
"enabled": true,
"visible": true
},
{
"id": "library_add",
"type": "bool",
"label": "@70230",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "library_move",
"type": "bool",
"label": "@70231",
"default": false,
"enabled": "eq(-1,true)",
"visible": true
},
{
"id": "browser",
"type": "bool",
"label": "@70232",
"default": true,
"enabled": true,
"visible": true
},
{
"type": "label",
"label": "@70243",
"enabled": true,
"visible": true
},
{
"id": "block_size",
"type": "list",
"label": "@70233",
"lvalues": [
"128 KB",
"256 KB",
"512 KB",
"1 MB",
"2 MB"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "part_size",
"type": "list",
"label": "@70234",
"lvalues": [
"1 MB",
"2 MB",
"4 MB",
"8 MB",
"16 MB",
"32 MB"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "max_connections",
"type": "list",
"label": "@70235",
"lvalues": [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10"
],
"default": 4,
"enabled": true,
"visible": true
},
{
"id": "max_buffer",
"type": "list",
"label": "@70236",
"lvalues": [
"0",
"2",
"4",
"6",
"8",
"10",
"12",
"14",
"16",
"18",
"20"
],
"default": 5,
"enabled": true,
"visible": true
},
{
"type": "label",
"label": "@70237",
"enabled": true,
"visible": true
},
{
"id": "server_reorder",
"type": "list",
"label": "@70238",
"lvalues": [
"@70244",
"Reordenar"
],
"default": 1,
"enabled": true,
"visible": true
},
{
"id": "language",
"type": "list",
"label": "@70246",
"lvalues": [
"Esp, Lat, Sub, Eng, Vose",
"Esp, Sub, Lat, Eng, Vose",
"Eng, Sub, Vose, Esp, Lat",
"Vose, Eng, Sub, Esp, Lat"
],
"default": 0,
"enabled": "eq(-1,'Reordenar')",
"visible": true
},
{
"id": "quality",
"type": "list",
"label": "@70240",
"lvalues": [
"@70241",
"HD 1080",
"HD 720",
"SD"
],
"default": 0,
"enabled": "eq(-2,'Reordenar')",
"visible": true
},
{
"id": "server_speed",
"type": "bool",
"label": "@70242",
"default": true,
"enabled": "eq(-3,'Reordenar')",
"visible": true
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,46 +1,46 @@
{
"id": "dramasjc",
"name": "DramasJC",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://www.dramasjc.com/wp-content/uploads/2018/03/logo.png",
"banner": "",
"version": 1,
"categories": [
"tvshow",
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}
{
"id": "dramasjc",
"name": "DramasJC",
"active": true,
"adult": false,
"language": [],
"thumbnail": "https://www.dramasjc.com/wp-content/uploads/2018/03/logo.png",
"banner": "",
"version": 1,
"categories": [
"tvshow",
"movie",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,282 +1,282 @@
# -*- coding: utf-8 -*-
# -*- Channel DramasJC -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.dramasjc.com/'
IDIOMAS = {'VOSE': 'VOSE', 'VO':'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['okru', 'mailru', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Doramas", action="menu_doramas",
thumbnail=get_thumb('doramas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'peliculas/',
type='movie', thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_doramas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="list_all", url=host + 'series',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
data = scrapertools.find_single_match(data, '<ul class="MovieList NoLmtxt.*?>(.*?)</ul>')
patron = '<article id="post-.*?<a href="([^"]+)">.*?(?:<img |-)src="([^"]+)".*?alt=".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.?(?:</a>|<span class="Year">(\d{4})<\/span>).*?'
patron += '(movie|TV)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentname = scrapedtitle[0].strip()
else:
contentname = scrapedtitle
contentname = re.sub('\(.*?\)','', contentname)
title = '%s [%s]'%(contentname, year)
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year}
)
if type == 'movie':
new_item.contentTitle = contentname
new_item.action = 'findvideos'
else:
new_item.contentSerieName = contentname
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
full_data = get_source(host)
data = scrapertools.find_single_match(full_data, '<a href="#">Dramas por Genero</a>(.*?)</ul>')
patron = '<a href="([^ ]+)">([^<]+)<'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
new_item = Item(channel=item.channel, title= title, url=url, action=action)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class="Title AA-Season On" data-tab="1">Temporada <span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Temporada <span>%s</span>(.*?)</ul>' % season)
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s' % (epi, epi)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
if 'próximamente' not in scrapedtitle.lower():
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
# patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="(?!about:blank)([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)"')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>([^<]+)</span>'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if 'sub' in language.lower():
language='VOSE'
else:
language = 'VO'
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play'))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host+'peliculas/'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
# -*- coding: utf-8 -*-
# -*- Channel DramasJC -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'https://www.dramasjc.com/'
IDIOMAS = {'VOSE': 'VOSE', 'VO':'VO'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['okru', 'mailru', 'openload']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Doramas", action="menu_doramas",
thumbnail=get_thumb('doramas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all", url=host+'peliculas/',
type='movie', thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_doramas(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, title="Todos", action="list_all", url=host + 'series',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section",
thumbnail=get_thumb('genres', auto=True)))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
full_data = data
data = scrapertools.find_single_match(data, '<ul class="MovieList NoLmtxt.*?>(.*?)</ul>')
patron = '<article id="post-.*?<a href="([^"]+)">.*?(?:<img |-)src="([^"]+)".*?alt=".*?'
patron += '<h3 class="Title">([^<]+)<\/h3>.?(?:</a>|<span class="Year">(\d{4})<\/span>).*?'
patron += '(movie|TV)'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, type in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentname = scrapedtitle[0].strip()
else:
contentname = scrapedtitle
contentname = re.sub('\(.*?\)','', contentname)
title = '%s [%s]'%(contentname, year)
thumbnail = 'http:'+scrapedthumbnail
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels={'year':year}
)
if type == 'movie':
new_item.contentTitle = contentname
new_item.action = 'findvideos'
else:
new_item.contentSerieName = contentname
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def section(item):
logger.info()
itemlist = []
full_data = get_source(host)
data = scrapertools.find_single_match(full_data, '<a href="#">Dramas por Genero</a>(.*?)</ul>')
patron = '<a href="([^ ]+)">([^<]+)<'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
url = data_one
title = data_two
new_item = Item(channel=item.channel, title= title, url=url, action=action)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class="Title AA-Season On" data-tab="1">Temporada <span>([^<]+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodesxseason',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
season = item.contentSeasonNumber
data = get_source(item.url)
data = scrapertools.find_single_match(data, '>Temporada <span>%s</span>(.*?)</ul>' % season)
patron = '<a href="([^"]+)">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
ep = 1
for scrapedurl, scrapedtitle in matches:
epi = str(ep)
title = season + 'x%s - Episodio %s' % (epi, epi)
url = scrapedurl
contentEpisodeNumber = epi
item.infoLabels['episode'] = contentEpisodeNumber
if 'próximamente' not in scrapedtitle.lower():
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
ep += 1
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
# patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="(?!about:blank)([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)"')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>([^<]+)</span>'%option).split('-')
language = opt_data[0].strip()
quality = opt_data[1].strip()
if 'sub' in language.lower():
language='VOSE'
else:
language = 'VO'
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play'))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
try:
itemlist.append(trailer)
except:
pass
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return list_all(item)
else:
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas']:
item.url = host+'peliculas/'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -1,12 +1,12 @@
{
"id": "eporner",
"name": "Eporner",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "eporner.png",
"banner": "eporner.png",
"categories": [
"adult"
]
{
"id": "eporner",
"name": "Eporner",
"active": true,
"adult": true,
"language": ["*"],
"thumbnail": "eporner.png",
"banner": "eporner.png",
"categories": [
"adult"
]
}

View File

@@ -1,145 +1,145 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Últimos videos", action="videos", url="http://www.eporner.com/0/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="http://www.eporner.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="http://www.eporner.com/pornstars/"))
itemlist.append(item.clone(title="Buscar", action="search", url="http://www.eporner.com/search/%s/"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(item.clone(title=letra, url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="mbtit" itemprop="name"><a href="([^"]+)" title="([^"]+)">[^<]+</a></div> '
patron += '<a href="[^"]+" title="[^"]+"> <img src="([^"]+)" alt="[^"]+" style="width:190px;height:152px;" /> </a> '
patron += '<div class="mbtim"><span>Videos: </span>([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=urlparse.urljoin(item.url, url), action="videos",
thumbnail=thumbnail))
# Paginador
patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(item.clone(title="Pagina siguiente", url=urlparse.urljoin(item.url, matches[0])))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="categoriesbox" id="[^"]+"> <div class="ctbinner"> <a href="([^"]+)" title="[^"]+"> <img src="([^"]+)" alt="[^"]+"> <h2>([^"]+)</h2> </a> </div> </div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title in matches:
itemlist.append(
item.clone(title=title, url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
return sorted(itemlist, key=lambda i: i.title)
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)" title="([^"]+)" id="[^"]+">.*?<img id="[^"]+" src="([^"]+)"[^>]+>.*?<div class="mbtim">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(item.clone(title="Página siguiente", url=urlparse.urljoin(item.url, matches[0])))
return itemlist
def int_to_base36(num):
"""Converts a positive integer into a base36 string."""
assert num >= 0
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower()
res = ''
while not res or num > 0:
num, i = divmod(num, 36)
res = digits[i] + res
return res
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = "EP: { vid: '([^']+)', hash: '([^']+)'"
vid, hash = re.compile(patron, re.DOTALL).findall(data)[0]
hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36(
int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16))
url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash)
data = httptools.downloadpage(url).data
jsondata = jsontools.load(data)
for source in jsondata["sources"]["mp4"]:
url = jsondata["sources"]["mp4"][source]["src"]
title = source.split(" ")[0]
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
return sorted(itemlist, key=lambda i: int(i[0].split("p")[0]))
# -*- coding: utf-8 -*-
import re
import urlparse
from core import httptools
from core import jsontools
from platformcode import logger
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Últimos videos", action="videos", url="http://www.eporner.com/0/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url="http://www.eporner.com/categories/"))
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="http://www.eporner.com/pornstars/"))
itemlist.append(item.clone(title="Buscar", action="search", url="http://www.eporner.com/search/%s/"))
return itemlist
def search(item, texto):
logger.info()
item.url = item.url % texto
item.action = "videos"
try:
return videos(item)
except:
import traceback
logger.error(traceback.format_exc())
return []
def pornstars_list(item):
logger.info()
itemlist = []
for letra in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
itemlist.append(item.clone(title=letra, url=urlparse.urljoin(item.url, letra), action="pornstars"))
return itemlist
def pornstars(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="mbtit" itemprop="name"><a href="([^"]+)" title="([^"]+)">[^<]+</a></div> '
patron += '<a href="[^"]+" title="[^"]+"> <img src="([^"]+)" alt="[^"]+" style="width:190px;height:152px;" /> </a> '
patron += '<div class="mbtim"><span>Videos: </span>([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, count in matches:
itemlist.append(
item.clone(title="%s (%s videos)" % (title, count), url=urlparse.urljoin(item.url, url), action="videos",
thumbnail=thumbnail))
# Paginador
patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(item.clone(title="Pagina siguiente", url=urlparse.urljoin(item.url, matches[0])))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="categoriesbox" id="[^"]+"> <div class="ctbinner"> <a href="([^"]+)" title="[^"]+"> <img src="([^"]+)" alt="[^"]+"> <h2>([^"]+)</h2> </a> </div> </div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumbnail, title in matches:
itemlist.append(
item.clone(title=title, url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
return sorted(itemlist, key=lambda i: i.title)
def videos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)" title="([^"]+)" id="[^"]+">.*?<img id="[^"]+" src="([^"]+)"[^>]+>.*?<div class="mbtim">([^<]+)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title, thumbnail, duration in matches:
itemlist.append(item.clone(title="%s (%s)" % (title, duration), url=urlparse.urljoin(item.url, url),
action="play", thumbnail=thumbnail, contentThumbnail=thumbnail,
contentType="movie", contentTitle=title))
# Paginador
patron = "<span style='color:#FFCC00;'>[^<]+</span></a> <a href='([^']+)' title='[^']+'><span>[^<]+</span></a>"
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(item.clone(title="Página siguiente", url=urlparse.urljoin(item.url, matches[0])))
return itemlist
def int_to_base36(num):
"""Converts a positive integer into a base36 string."""
assert num >= 0
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'.lower()
res = ''
while not res or num > 0:
num, i = divmod(num, 36)
res = digits[i] + res
return res
def play(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = "EP: { vid: '([^']+)', hash: '([^']+)'"
vid, hash = re.compile(patron, re.DOTALL).findall(data)[0]
hash = int_to_base36(int(hash[0:8], 16)) + int_to_base36(int(hash[8:16], 16)) + int_to_base36(
int(hash[16:24], 16)) + int_to_base36(int(hash[24:32], 16))
url = "https://www.eporner.com/xhr/video/%s?hash=%s" % (vid, hash)
data = httptools.downloadpage(url).data
jsondata = jsontools.load(data)
for source in jsondata["sources"]["mp4"]:
url = jsondata["sources"]["mp4"][source]["src"]
title = source.split(" ")[0]
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
return sorted(itemlist, key=lambda i: int(i[0].split("p")[0]))

Some files were not shown because too many files have changed in this diff Show More