Merge pull request #7 from alfa-addon/master

fetch
This commit is contained in:
Alfa-beto
2019-03-22 15:42:25 -03:00
committed by GitHub
277 changed files with 10100 additions and 7510 deletions

69
cuevana3.json Normal file
View File

@@ -0,0 +1,69 @@
{
"id": "cuevana3",
"name": "Cuevana 3",
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://www.cuevana3.co/wp-content/themes/cuevana3/public/img/cnt/cuevana3.png",
"banner": "",
"version": 1,
"categories": [
"movie"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"CAST",
"VOSE"
]
},
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_infantiles",
"type": "bool",
"label": "Incluir en Novedades - Infantiles",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_terror",
"type": "bool",
"label": "Incluir en Novedades - Terror",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_documentales",
"type": "bool",
"label": "Incluir en Novedades - Documentales",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<addon id="plugin.video.alfa" name="Alfa" version="2.7.21" provider-name="Alfa Addon">
<addon id="plugin.video.alfa" name="Alfa" version="2.7.31" provider-name="Alfa Addon">
<requires>
<import addon="xbmc.python" version="2.1.0"/>
<import addon="script.module.libtorrent" optional="true"/>
@@ -18,13 +18,15 @@
<screenshot>resources/media/themes/ss/4.jpg</screenshot>
</assets>
<news>[B]Estos son los cambios para esta versión:[/B]
[COLOR green][B]Arreglos[/B][/COLOR]
¤ Grantorrent ¤ Zonatorrent ¤ pack +18
¤ erotik ¤ pelis24 ¤ cineasiaenlinea
¤ pelisgratis ¤ repelis ¤ descargacineclasico
¤ Goovie ¤ PelisFox ¤ PelisPlus
¤ TvSeriesDk ¤ UqLoad ¤ Vi2
¤ gnula.biz
[COLOR green][B]Arreglos[/B][/COLOR]
¤ maxipelis24 ¤ cuevana3 ¤ pelisplusco
¤ mejortorrent ¤ newpct1
[COLOR green][B]Novedades[/B][/COLOR]
¤ Mundopelis ¤ thevideobee ¤ tusfiles
¤ vup
¤ Agradecimientos a @mac12m99 y @chivmalev por colaborar con ésta versión
</news>
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>

View File

@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.likuoo.video'
@@ -16,7 +14,8 @@ host = 'http://www.likuoo.video'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -46,16 +45,24 @@ def categorias(item):
scrapedplot = ""
scrapedthumbnail = "https:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="item">.*?<a href="([^"]+)" title="(.*?)">.*?src="(.*?)".*?<div class="runtime">(.*?)</div>'
patron = '<div class="item">.*?'
patron += '<a href="([^"]+)" title="(.*?)">.*?'
patron += 'src="(.*?)".*?'
patron += '<div class="runtime">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
url = urlparse.urljoin(item.url,scrapedurl)
@@ -64,12 +71,12 @@ def peliculas(item):
contentTitle = title
thumbnail = "https:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'...<a href="([^"]+)" class="next">&#187;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -2,14 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.txxx.com'
@@ -17,10 +14,10 @@ host = 'http://www.txxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/"))
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -31,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -44,19 +41,22 @@ def catalogo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="channel-thumb">.*?<a href="([^"]+)" title="([^"]+)".*?<img src="([^"]+)".*?<span>(.*?)</span>'
patron = '<div class="channel-thumb">.*?'
patron += '<a href="([^"]+)" title="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += '<span>(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,num in matches:
scrapedplot = ""
scrapedurl = host + scrapedurl
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" ,
text_color="blue", url=next_page_url , folder=True) )
text_color="blue", url=next_page) )
return itemlist
@@ -73,33 +73,36 @@ def categorias(item):
scrapedthumbnail = ""
scrapedplot = ""
title = scrapedtitle + "[COLOR yellow] " + num + "[/COLOR]"
itemlist.append( Item(channel=item.channel, action="peliculas", title=title , url=url ,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=title , url=url ,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'data-video-id="\d+">.*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="thumb__duration">(.*?)</span>'
patron += '</div>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
for scrapedurl,scrapedthumbnail,scrapedtitle,scrapedtime in matches:
contentTitle = scrapedtitle
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
scrapedhd = scrapertools.find_single_match(scrapedtime, '<span class="thumb__hd">(.*?)</span>')
duration = scrapertools.find_single_match(scrapedtime, '<span class="thumb__duration">(.*?)</span>')
if scrapedhd != '':
title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle
else:
title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next" href="([^"]+)" title="Next Page"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" ,
text_color="blue", url=next_page_url , folder=True) )
plot=plot, contentTitle=title) )
next_page = scrapertools.find_single_match(data,'<a class=" btn btn--size--l btn--next.*?" href="([^"]+)" title="Next Page"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -7,9 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.absoluporn.es'
@@ -18,10 +15,10 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="peliculas", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="peliculas", url=host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/wall-date-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas valorados" , action="lista", url=host + "/wall-note-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/wall-main-1.html"))
itemlist.append( Item(channel=item.channel, title="Mas largos" , action="lista", url=host + "/wall-time-1.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
@@ -33,7 +30,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search-%s-1.html" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -52,12 +49,12 @@ def categorias(item):
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace(".html", "_date.html")
scrapedurl = host +"/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -72,14 +69,13 @@ def peliculas(item):
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data, '<span class="text16">\d+</span> <a href="..([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ) )
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist

View File

@@ -0,0 +1,12 @@
{
"id": "abtoon",
"name": "abtoon",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "http://i.imgur.com/EpNUqsD.png",
"banner": "http://i.imgur.com/c1YTgNT.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,225 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
from lib import gktools
IDIOMAS = {'latino': 'Latino'}
list_language = IDIOMAS.values()
list_servers = ['openload'
]
list_quality = ['default']
host = "https://abtoon.net"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="lista", title="Series", contentSerieName="Series", url=host, thumbnail=thumb_series, page=0))
#itemlist.append(
# Item(channel=item.channel, action="lista", title="Live Action", contentSerieName="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
#itemlist.append(
# Item(channel=item.channel, action="peliculas", title="Películas", contentSerieName="Películas", url=host+"/peliculas", thumbnail=thumb_series, page=0))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)" '
if item.contentSerieName == "Series":
patron += 'class="link">.+?<img src="([^"]+)".*?'
else:
patron += 'class="link-la">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
if item.url==host or item.url==host+"/liveaction":
a=1
else:
num=(item.url).split('-')
a=int(num[1])
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
b=0
for link, img, name in matches[min:max]:
b=b+1
if " y " in name:
title=name.replace(" y "," & ")
else:
title = name
url = host + link
scrapedthumbnail = host + img
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title,
context=context))
if b<29:
a=a+1
url=host+"/p/pag-"+str(a)
if b>10:
itemlist.append(
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
else:
itemlist.append(
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div class="pel play" dt="(.+?)" .+?><img src="(.+?)" .+? title="(.*?)"><span class=".+?">(.+?)<\/span><a href="(.+?)" class.+?>'
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
b=0
for scrapedplot,scrapedthumbnail, scrapedtitle, scrapedyear, scrapedurl in matches[min:max]:
b=b+1
url = host + scrapedurl
thumbnail = host +scrapedthumbnail
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=scrapedtitle+"-"+scrapedyear, url=url, action="findvideos", thumbnail=thumbnail, plot=scrapedplot,
show=scrapedtitle,contentSerieName=scrapedtitle,context=context))
if b<29:
pass
else:
itemlist.append(
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="peliculas", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# obtener el numero total de episodios
total_episode = 0
patron_caps = '<li><a href="(.*?)">(.*?) - (.*?)<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_info = '<img src="([^"]+)" .+?>.+?<h1>([^"]+)<\/h1><p .+?>(.+?)<\/p>'
scrapedthumbnail, show, scrapedplot = scrapertools.find_single_match(data, patron_info)
scrapedthumbnail = host + scrapedthumbnail
for link, cap, name in matches:
title = ""
pat = "$%&"
# varios episodios en un enlace
if len(name.split(pat)) > 1:
i = 0
for pos in name.split(pat):
i = i + 1
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode)
if len(name.split(pat)) == i:
title += "%sx%s " % (season, str(episode).zfill(2))
else:
title += "%sx%s_" % (season, str(episode).zfill(2))
else:
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode)
title += "%sx%s " % (season, str(episode).zfill(2))
url = host + "/" + link
if "DISPONIBLE" in name:
title += "No Disponible aún"
else:
title += name
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot,
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
import base64
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
_sl = scrapertools.find_single_match(data, 'var abi = ([^;]+);')
sl = eval(_sl)
buttons = scrapertools.find_multiple_matches(data,'class="bsel" sl="(.+?)"')#[0,1,2,3,4]
for ids in buttons:
id = int(ids)
url_end = golink(id,sl)
new_url = "https://abtoon.net/" + "embed/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2] + url_end
data_new = httptools.downloadpage(new_url).data
data_new = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_new)
logger.info("asdasdasdcc"+data_new)
valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*", "([^"]+)", "[^"]*", "[^"]*", "([^"]+)"\];')
try:
url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1)))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
except Exception as e:
logger.info(e)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def golink(ida,sl):
a=ida
b=[3,10,5,22,31]
c=1
d=""
e=sl[2]
for i in range(len(b)):
d=d+substr(e,b[i]+a,c)
return d
def substr(st,a,b):
return st[a:a+b]

View File

@@ -19,7 +19,7 @@ list_servers = ['rapidvideo', 'streamango', 'fastplay', 'flashx', 'openload', 'v
__channel__='allcalidad'
host = "http://allcalidad.net/"
host = "https://allcalidad.net/"
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
@@ -198,17 +198,16 @@ def findvideos(item):
autoplay.start(itemlist, item)
if itemlist:
if itemlist and item.contentChannel != "videolibrary":
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist

View File

@@ -7,15 +7,14 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.alsoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Top" , action="peliculas", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Top" , action="lista", url=host + "/g/All/top/1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -26,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/=%s/" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -46,8 +45,8 @@ def catalogo(item):
for scrapedurl,cantidad,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -62,12 +61,12 @@ def categorias(item):
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
@@ -82,14 +81,12 @@ def peliculas(item):
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url , folder=True) )
plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)" target="_self"><span class="alsoporn_page">NEXT</span></a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -7,17 +7,17 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.analdin.com/es'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/más-reciente/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
@@ -29,7 +29,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -48,13 +48,12 @@ def catalogo(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -71,12 +70,12 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -91,14 +90,13 @@ def peliculas(item):
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
contentTitle = title))
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist

View File

@@ -15,7 +15,7 @@ from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import filtertools
from channels import renumbertools
host = "https://animeboom.net/"
@@ -84,6 +84,7 @@ def mainlist(item):
))
autoplay.show_option(item.channel, itemlist)
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
@@ -113,12 +114,16 @@ def list_all(item):
else:
lang = 'VOSE'
title = re.sub('Audio Latino', '', scrapedtitle)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action='episodios',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=title,
language = lang,
context = context,
infoLabels={'year':year}
))
@@ -153,11 +158,17 @@ def search_results(item):
url = scrapedurl
title = re.sub('online|Audio|Latino', '', scrapedtitle)
title = title.lstrip()
title = title.rstrip()
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel,
action="episodios",
title=title,
contentSerieName=title,
url=url,
context = context,
thumbnail=scrapedthumbnail))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
@@ -217,9 +228,10 @@ def episodios(item):
lang='Latino'
else:
lang = 'VOSE'
title = "1x" + episode + " - Episodio %s" % episode
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = "%sx%s - %s" % (season, str(episode).zfill(2),item.contentSerieName)
url = scrapedurl
infoLabels['season'] = '1'
infoLabels['season'] = season
infoLabels['episode'] = episode
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,

View File

@@ -25,6 +25,19 @@
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"LAT"
]
}
]
}

View File

@@ -10,12 +10,24 @@ from core import servertools
from core import scrapertools
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
IDIOMAS = {'LAT': 'LAT','SUB': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['directo', 'rapidvideo', 'streamango', 'yourupload', 'mailru', 'netutv', 'okru']
list_quality = ['default']
HOST = "https://animeflv.net/"
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
@@ -31,6 +43,9 @@ def mainlist(item):
itemlist.append(Item(channel=item.channel, action="search_section", title=" Estado", url=HOST + "browse",
extra="status"))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -50,6 +65,10 @@ def search(item, texto):
_id = e["id"]
url = "%sanime/%s/%s" % (HOST, _id, e["slug"])
title = e["title"]
#if "&#039;" in title:
# title = title.replace("&#039;","")
#if "&deg;" in title:
# title = title.replace("&deg;","")
thumbnail = "%suploads/animes/covers/%s.jpg" % (HOST, e["id"])
new_item = item.clone(action="episodios", title=title, url=url, thumbnail=thumbnail)
if e["type"] != "movie":
@@ -173,47 +192,57 @@ def episodios(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
info = eval(scrapertools.find_single_match(data, 'anime_info = (.*?);'))
episodes = eval(scrapertools.find_single_match(data, 'var episodes = (.*?);'))
info = scrapertools.find_single_match(data, "anime_info = \[(.*?)\];")
info = eval(info)
episodes = eval(scrapertools.find_single_match(data, "var episodes = (.*?);"))
for episode in episodes:
url = '%s/ver/%s/%s-%s' % (HOST, episode[1], info[2], episode[0])
title = '1x%s Episodio %s' % (episode[0], episode[0])
itemlist.append(item.clone(title=title, url=url, action='findvideos', show=info[1]))
season = 1
season, episodeRenumber = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, season, int(episode[0]))
#title = '1x%s Episodio %s' % (episode[0], episode[0])
title = '%sx%s Episodio %s' % (season, str(episodeRenumber).zfill(2), episodeRenumber)
itemlist.append(item.clone(title=title, url=url, action='findvideos', contentSerieName=item.contentSerieName))
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios"))
action="add_serie_to_library", extra="episodios", show=item.contentSerieName))
return itemlist
def findvideos(item):
logger.info()
from core import jsontools
itemlist = []
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", httptools.downloadpage(item.url).data)
list_videos = scrapertools.find_multiple_matches(data, 'video\[\d\]\s=\s\'<iframe.+?src="([^"]+)"')
download_list = scrapertools.find_multiple_matches(data, 'video\[\d+\] = \'<iframe .*?src="(.*?)"')
for url in download_list:
data = httptools.downloadpage(url).data
if 'izanagi' in url:
new_url = url.replace('embed', 'check')
new_data = httptools.downloadpage(new_url).data
url = scrapertools.find_single_match(new_data, '"file":"(.*?)"')
else:
url = scrapertools.find_single_match(data, 'var redir = "(.*?)"')
if url != '':
url = url.replace("\\","")
itemlist.append(item.clone(title='%s', url=url, action='play'))
videos = scrapertools.find_single_match(data, 'var videos = (.*?);')
videos_json = jsontools.load(videos)
for video_lang in videos_json.items():
language = video_lang[0]
matches = scrapertools.find_multiple_matches(str(video_lang[1]), 'src="([^"]+)"')
for source in matches:
new_data = httptools.downloadpage(source).data
if 'redirector' in source:
url = scrapertools.find_single_match(new_data, 'window.location.href = "([^"]+)"')
elif 'embed' in source:
source = source.replace('embed', 'check')
new_data = httptools.downloadpage(source).data
json_data = jsontools.load(new_data)
try:
url = json_data['file']
except:
continue
itemlist.append(Item(channel=item.channel, url=url, title='%s', action='play', language=language))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
def play(item):
logger.info()
itemlist = []
if item.video_urls:
for it in item.video_urls:
title = ".%s %sp [directo]" % (it[1].replace("video/", ""), it[0])
itemlist.append([title, it[2]])
return itemlist
else:
return [item]

View File

@@ -1,22 +0,0 @@
{
"id": "animeflv_me",
"name": "Animeflv.ME",
"active": false,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/x9AdvBx.png",
"banner": "http://i.imgur.com/dTZwCPq.png",
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,43 +1,45 @@
{
"id": "animemovil",
"name": "Animemovil",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://s1.postimg.cc/92ji7stii7/animemovil1.png",
"banner": "",
"categories": [
"anime"
],
"settings": [
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "perfil",
"type": "list",
"label": "Perfil de color",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"Perfil 3",
"Perfil 2",
"Perfil 1"
]
}
]
}
{
"id": "animeflv_ru",
"name": "AnimeFLV.RU",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://i.imgur.com/5nRR9qq.png",
"banner": "animeflv_ru.png",
"compatible": {
"python": "2.7.9"
},
"categories": [
"anime"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_anime",
"type": "bool",
"label": "Incluir en Novedades - Episodios de anime",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE"
]
}
]
}

View File

@@ -5,10 +5,18 @@ import urlparse
from channels import renumbertools
from core import httptools
from core import servertools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import logger
from channels import autoplay
IDIOMAS = {'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['directo']
list_quality = ['default']
HOST = "https://animeflv.ru/"
@@ -16,25 +24,25 @@ HOST = "https://animeflv.ru/"
def mainlist(item):
logger.info()
itemlist = list()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="novedades_episodios", title="Últimos episodios", url=HOST))
itemlist.append(Item(channel=item.channel, action="novedades_anime", title="Últimos animes", url=HOST))
itemlist.append(Item(channel=item.channel, action="listado", title="Animes", url=HOST + "animes/nombre/lista"))
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
itemlist.append(Item(channel=item.channel, action="search", title=" Título"))
itemlist.append(Item(channel=item.channel, action="search_section", title=" Género", url=HOST + "animes",
extra="genre"))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def clean_title(title):
year_pattern = r'\([\d -]+?\)'
return re.sub(year_pattern, '', title).strip()
@@ -45,32 +53,26 @@ def search(item, texto):
texto = texto.replace(" ", "+")
post = "value=%s" % texto
data = httptools.downloadpage(item.url, post=post).data
try:
dict_data = jsontools.load(data)
for e in dict_data:
title = clean_title(scrapertools.htmlclean(e["name"]))
url = e["url"]
plot = e["description"]
thumbnail = HOST + e["thumb"]
thumbnail = e["thumb"]
new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail)
if "Pelicula" in e["genre"]:
new_item.contentType = "movie"
new_item.contentTitle = title
else:
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist
@@ -79,41 +81,32 @@ def search_section(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
patron = 'id="%s_filter"[^>]+><div class="inner">(.*?)</div></div>' % item.extra
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<a href="([^"]+)"[^>]+>(.*?)</a>', re.DOTALL).findall(data)
for url, title in matches:
url = "%s/nombre/lista" % url
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url,
context=renumbertools.context(item)))
return itemlist
def newest(categoria):
itemlist = []
if categoria == 'anime':
itemlist = novedades_episodios(Item(url=HOST))
return itemlist
def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListEpisodios[^>]+>(.*?)</ul>')
matches = re.compile('href="([^"]+)"[^>]+>.+?<img src="([^"]+)".+?"Capi">(.*?)</span>'
'<strong class="Title">(.*?)</strong>', re.DOTALL).findall(data)
itemlist = []
for url, thumbnail, str_episode, show in matches:
try:
episode = int(str_episode.replace("Ep. ", ""))
except ValueError:
@@ -121,42 +114,31 @@ def novedades_episodios(item):
episode = 1
else:
season, episode = renumbertools.numbered_for_tratk(item.channel, item.show, 1, episode)
title = "%s: %sx%s" % (show, season, str(episode).zfill(2))
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, thumbnail=thumbnail,
fulltitle=title)
itemlist.append(new_item)
return itemlist
def novedades_anime(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, '<ul class="ListAnimes[^>]+>(.*?)</ul>')
matches = re.compile('<img src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
itemlist = []
for thumbnail, url, title in matches:
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
title = clean_title(title)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title)
new_item.show = title
new_item.context = renumbertools.context(item)
itemlist.append(new_item)
return itemlist
@@ -166,19 +148,16 @@ def listado(item):
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
url_pagination = scrapertools.find_single_match(data, '<li class="current">.*?</li>[\s]<li><a href="([^"]+)">')
data = scrapertools.find_single_match(data, '</div><div class="full">(.*?)<div class="pagination')
matches = re.compile('<img.+?src="([^"]+)".+?<a href="([^"]+)">(.*?)</a>.+?'
'<div class="full item_info genres_info">(.*?)</div>.+?class="full">(.*?)</p>',
re.DOTALL).findall(data)
itemlist = []
for thumbnail, url, title, genres, plot in matches:
title = clean_title(title)
url = urlparse.urljoin(HOST, url)
thumbnail = urlparse.urljoin(HOST, thumbnail)
new_item = Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail,
fulltitle=title, plot=plot)
if "Pelicula Anime" in genres:
new_item.contentType = "movie"
new_item.contentTitle = title
@@ -189,7 +168,6 @@ def listado(item):
if url_pagination:
url = urlparse.urljoin(HOST, url_pagination)
title = ">> Pagina Siguiente"
itemlist.append(Item(channel=item.channel, action="listado", title=title, url=url))
return itemlist
@@ -203,7 +181,6 @@ def episodios(item):
item.plot = scrapertools.find_single_match(data, 'Description[^>]+><p>(.*?)</p>')
data = scrapertools.find_single_match(data, '<div class="Sect Episodes full">(.*?)</div>')
matches = re.compile('<a href="([^"]+)"[^>]+>(.+?)</a', re.DOTALL).findall(data)
for url, title in matches:
title = title.strip()
url = urlparse.urljoin(item.url, url)
@@ -224,27 +201,27 @@ def episodios(item):
def findvideos(item):
logger.info()
itemlist = []
_id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
post = "embed_id=%s" % _id
data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
dict_data = jsontools.load(data)
headers = dict()
headers["Referer"] = item.url
data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
dict_data = jsontools.load(data)
if not dict_data:
return itemlist
list_videos = dict_data["playlist"][0]
if isinstance(list_videos, list):
for video in list_videos:
itemlist.append(Item(channel=item.channel, action="play", url=video["file"],
show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail))
else:
for video in list_videos.values():
video += "|User-Agent=Mozilla/5.0"
itemlist.append(Item(channel=item.channel, action="play", url=video, show=re.escape(item.show),
title=item.title, plot=item.plot, fulltitle=item.title,
thumbnail=item.thumbnail))
_id = scrapertools.find_single_match(item.url, HOST + 'ver/([^/]+)/')
data = httptools.downloadpage(item.url).data
bloque = scrapertools.find_single_match(data, 'atrl(.*?)choose_quality')
matches = scrapertools.find_multiple_matches(bloque, '<option value="([^"]+)')
headers = {"Referer" : item.url}
for url in matches:
post = "embed_id=%s" % _id
xserver = scrapertools.find_single_match(url, 's=(\w+)')
data = httptools.downloadpage(HOST + "get_video_info_v2?s=%s" %xserver, post=post).data
dict_data = jsontools.load(data)
data = httptools.downloadpage(dict_data["value"], headers=headers).data
matches = scrapertools.find_multiple_matches(data, '"file":"([^"]+)"')
for url in matches:
url = url.replace("\\","")
itemlist.append(item.clone(action="play", url=url, title='%s',
fulltitle=item.title, language='VOSE'
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -9,11 +9,19 @@ from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from channels import renumbertools, autoplay
CHANNEL_HOST = "https://www.animeid.tv/"
IDIOMAS = {'Latino': 'LAT', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['animeid']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="novedades_series", title="Últimas series", url=CHANNEL_HOST))
@@ -25,6 +33,9 @@ def mainlist(item):
Item(channel=item.channel, action="letras", title="Listado alfabetico", url=CHANNEL_HOST))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar..."))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -59,7 +70,7 @@ def search(item, texto):
["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"])
headers.append(["Referer", CHANNEL_HOST])
headers.append(["X-Requested-With", "XMLHttpRequest"])
data = scrapertools.cache_page(item.url, headers=headers)
data = httptools.downloadpage(item.url, headers=headers).data
data = data.replace("\\", "")
patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -69,9 +80,12 @@ def search(item, texto):
url = urlparse.urljoin(item.url, scrapedurl)
thumbnail = scrapedthumbnail
plot = ""
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
show=title, viewmode="movie_with_plot"))
context=context, show=title, viewmode="movie_with_plot"))
return itemlist
@@ -93,8 +107,11 @@ def novedades_series(item):
for url, tipo, title in matches:
scrapedtitle = title + " (" + tipo + ")"
scrapedurl = urlparse.urljoin(item.url, url)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
show=title, viewmode="movie_with_plot"))
context=context, show=title, viewmode="movie_with_plot"))
return itemlist
@@ -102,7 +119,7 @@ def novedades_episodios(item):
logger.info()
data = httptools.downloadpage(item.url).data
data = scrapertools.find_single_match(data, '<section class="lastcap">(.*?)</section>')
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
itemlist = []
for url, title, thumbnail, plot in matches:
@@ -165,13 +182,16 @@ def series(item):
scrapedurl = urlparse.urljoin(item.url, url)
scrapedthumbnail = thumbnail
scrapedplot = plot
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle,
thumbnail=scrapedthumbnail, plot=scrapedplot, show=scrapedtitle, context=context,
viewmode="movie_with_plot"))
itemlist = sorted(itemlist, key=lambda it: it.title)
try:
page_url = scrapertools.find_single_match(data, '<li><a href="([^"]+)">&gt;</a></li>')
itemlist.append(Item(channel=item.channel, action="series", title=">> Página siguiente",
itemlist.append(Item(channel=item.channel, action="series", title="[COLOR cyan]>> Página siguiente[/COLOR]",
url=urlparse.urljoin(item.url, page_url), viewmode="movie_with_plot", thumbnail="",
plot=""))
except:
@@ -185,33 +205,39 @@ def episodios(item, final=True):
data = httptools.downloadpage(item.url).data
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)')
CHANNEL_HEADERS = [
["Host", "m.animeid.tv"],
["X-Requested-With", "XMLHttpRequest"]
["Host", "m.animeid.tv"],
["X-Requested-With", "XMLHttpRequest"]
]
page = 0
while True:
page += 1
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" %(data_id, page)
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" % (data_id, page)
data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data
# Cuando ya no hay datos devuelve: "list":[]
if '"list":[]' in data:
break
dict_data = jsontools.load(data)
list = dict_data['list']
list = dict_data['list'][::-1]
for dict in list:
itemlist.append(Item(action = "findvideos",
channel = item.channel,
title = "1x" + dict["numero"] + " - " + dict["date"],
url = CHANNEL_HOST + dict['href'],
thumbnail = item.thumbnail,
show = item.show,
viewmode = "movie_with_plot"
))
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1,
int(dict["numero"]))
title = "%sx%s - %s" % (season, str(episode).zfill(2), dict["date"])
itemlist.append(Item(action="findvideos",
channel=item.channel,
title=title,
url=CHANNEL_HOST + dict['href'],
thumbnail=item.thumbnail,
show=item.show,
viewmode="movie_with_plot"
))
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
itemlist.append(
Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=item.show))
itemlist.append(
Item(channel=item.channel, title="[COLOR white]Descargar todos los episodios de la serie[/COLOR]",
url=item.url,
action="download_all_episodes", extra="episodios", show=item.show))
return itemlist
@@ -249,4 +275,8 @@ def findvideos(item):
itemlist.append(Item(channel=item.channel, action="findvideos", title="Siguiente: " + title_siguiente,
url=CHANNEL_HOST + url_siguiente, thumbnail=item.thumbnail, plot=item.plot, show=item.show,
fanart=item.thumbnail, folder=True))
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -1,338 +0,0 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from core import httptools
from core import servertools
from core import jsontools
from core import scrapertools
from core.item import Item
from platformcode import platformtools, config, logger
__modo_grafico__ = config.get_setting('modo_grafico', 'animemovil')
__perfil__ = ''
# Fijar perfil de color
perfil = [['0xFFFFE6CC', '0xFFFFCE9C', '0xFF994D00', '0xFFFE2E2E', '0xFFFFD700'],
['0xFFA5F6AF', '0xFF5FDA6D', '0xFF11811E', '0xFFFE2E2E', '0xFFFFD700'],
['0xFF58D3F7', '0xFF2E9AFE', '0xFF2E64FE', '0xFFFE2E2E', '0xFFFFD700']]
if __perfil__ < 3:
color1, color2, color3, color4, color5 = perfil[__perfil__]
else:
color1 = color2 = color3 = color4 = color5 = ""
host = "http://animemovil.com"
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(Item(channel=item.channel, action="recientes", title="Episodios Recientes", thumbnail=item.thumbnail,
url=host, text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="listado", title="Anime", thumbnail=item.thumbnail,
url=host+'/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20', text_color=color1, contentType="tvshow", extra="recientes"))
itemlist.append(Item(channel=item.channel, action="list_by_json", title="En emisión", thumbnail=item.thumbnail,
text_color=color2, contentType="tvshow"))
itemlist.append(Item(channel=item.channel, action="indices", title="Índices", thumbnail=item.thumbnail,
text_color=color2))
itemlist.append(Item(channel=item.channel, action="search", title="Buscar...",
thumbnail=item.thumbnail, text_color=color3))
itemlist.append(item.clone(title="Configurar canal", action="openconfig", text_color=color5, folder=False))
if renumbertools.context:
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def openconfig(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
item.url = "%s/api/buscador?q=%s&letra=ALL&genero=ALL&estado=2&offset=0&limit=30" % (host, texto.replace(" ", "+"))
return list_by_json(item)
def recientes(item):
logger.info()
item.contentType = "tvshow"
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}','', data)
bloque = scrapertools.find_single_match(data, '<ul class="hover">(.*?)</ul>')
patron = '<li><a href="([^"]+)" title="([^"]+)".*?src="([^"]+)"'
matches = scrapertools.find_multiple_matches(bloque, patron)
for url, title, thumb in matches:
url = host + url
try:
contentTitle = re.split(r"(?i) \d+ (?:Sub Español|Audio Español|Español Latino)", title)[0]
except:
contentTitle = ""
contentTitle = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", contentTitle)
tipo = "tvshow"
show = contentTitle
action = "episodios"
context = renumbertools.context(item)
if item.extra == "recientes":
action = "findvideos"
context = ""
if not item.extra and (url.endswith("-pelicula/") or url.endswith("-pelicula")):
tipo = "movie"
show = ""
action = "peliculas"
if not thumb.startswith("http"):
thumb = "http:%s" % thumb
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=contentTitle, contentSerieName=show, infoLabels=infoLabels,
thumb_=thumb, contentType=tipo, context=context))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
if item.extra and itemlist:
for it in itemlist:
it.thumbnail = it.thumb_
except:
pass
return itemlist
def listado(item):
logger.info()
itemlist = []
data = jsontools.load(httptools.downloadpage(item.url).data)
status = data.get('status')
data= data.get('result')
for it in data.get("items", []):
scrapedtitle = it["title"]
url = "%s/%s/" % (host, it["slug"])
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % it['id']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", scrapedtitle)
tipo = "tvshow"
show = title
action = "episodios"
if url.endswith("-pelicula/") or url.endswith("-pelicula"):
tipo = "movie"
show = ""
action = "peliculas"
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(item.clone(action=action, title=scrapedtitle, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=show, infoLabels=infoLabels,
context=renumbertools.context(item), contentType=tipo))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def indices(item):
logger.info()
itemlist = []
if "Índices" in item.title:
itemlist.append(item.clone(title="Por Género", url="%s/anime" % host))
itemlist.append(item.clone(title="Por Letra", url="%s/anime" % host))
itemlist.append(item.clone(action="list_by_json", title="Lista completa de Animes",
url="%s/api/buscador?q=&letra=ALL&genero=ALL&estado=2&offset=0&limit=20" % host))
else:
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\s{2,}', '', data)
if 'Letra' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="letra"(.*?)</select>')
patron = '<option value="(\w)"'
elif 'Género' in item.title:
bloque = scrapertools.find_single_match(data, '<select name="genero"(.*?)</select>')
patron = '<option value="(\d+.*?)/'
matches = scrapertools.find_multiple_matches(bloque, patron)
for title in matches:
if "Letra" in item.title:
url = '%s/api/buscador?q=&letra=%s&genero=ALL&estado=2&offset=0&limit=20' % (host, title)
else:
value = scrapertools.find_single_match(title, '(\d+)"')
title = scrapertools.find_single_match(title, '\d+">(.*?)<')
url = '%s/api/buscador?q=&letra=ALL&genero=%s&estado=2&offset=0&limit=20' % (host, value)
itemlist.append(item.clone(action="list_by_json", url=url, title=title))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub('\n|\s{2,}', '', data)
show = scrapertools.find_single_match(data, '<div class="x-title">(.*?)</div>')
show = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub| Español| Peliculas| Audio| Latino", "", show)
if not item.infoLabels["plot"]:
item.infoLabels["plot"] = scrapertools.find_single_match(data, '<div class="x-sinopsis">\s*(.*?)</div>')
bloque = scrapertools.find_single_match(data, '<ul class="list"(.*?)</ul>')
matches = scrapertools.find_multiple_matches(bloque, '<li><a href="([^"]+)" title="([^"]+)"')
for url, title in matches:
url = host + url
epi = scrapertools.find_single_match(title, '.+?(\d+) (?:Sub|Audio|Español)')
#epi = scrapertools.find_single_match(title, '(?i)%s.*? (\d+) (?:Sub|Audio|Español)' % item.contentSerieName)
new_item = item.clone(action="findvideos", url=url, title=title, extra="")
if epi:
if "Especial" in title:
epi=0
season, episode = renumbertools.numbered_for_tratk(
item.channel, item.contentSerieName, 1, int(epi))
new_item.infoLabels["episode"] = episode
new_item.infoLabels["season"] = season
new_item.title = "%sx%s %s" % (season, episode, title)
itemlist.append(new_item)
if item.infoLabels.get("tmdb_id") or item.extra == "recientes" or item.extra == "completo":
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
except:
pass
if config.get_videolibrary_support() and itemlist:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la videoteca", url=item.url,
action="add_serie_to_library", extra="episodios", contentTitle=item.contentTitle,
contentSerieName=item.contentSerieName, text_color=color4, fanart=item.fanart,
thumbnail=item.thumbnail))
return itemlist
def list_by_json(item):
logger.info()
itemlist = []
repeat = 1
status = False
if item.url =='':
item.url = host+"/api/buscador?limit=30&estado=1&dia=%s"
repeat = 6
for element in range(0,repeat):
if repeat != 1:
data = jsontools.load(httptools.downloadpage(item.url % element).data)
else:
data = jsontools.load(httptools.downloadpage(item.url).data)
status = data.get('status')
json_data = data.get('result')
elem_data = json_data['items']
for item_data in elem_data:
url = '%s/%s/' % (host, item_data['slug'])
title = item_data['title']
title = re.sub(r"(?i) Ova| Especiales| \(Pelicula[s]*\)| \(Película[s]*\)| Sub Español| Peliculas", "",
title)
thumb = 'http://media.animemovil.com/animes/%s/wallpaper_small.jpg' % item_data['id']
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(
item.clone(action="episodios", title=title, url=url, thumbnail=thumb, text_color=color3,
contentTitle=title, contentSerieName=title, extra="recientes",
context=renumbertools.context(item), infoLabels=infoLabels))
if status and itemlist:
offset = scrapertools.find_single_match(item.url, 'offset=(\d+)')
if offset:
offset = int(offset) + 2
else:
offset = 0
url = re.sub(r'offset=\d+', 'offset=%s' % offset, item.url)
itemlist.append(Item(channel=item.channel, action="listado", url=url, title=">> Página Siguiente",
thumbnail=item.thumbnail, text_color=color2))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'\n|\s{2,}', '', data)
strm_id = scrapertools.find_single_match(data, '"id": (.*?),')
streams = scrapertools.find_single_match(data, '"stream": (.*?)};')
dict_strm = jsontools.load(streams)
base_url = 'http:%s%s/' % (dict_strm['accessPoint'], strm_id)
for server in dict_strm['servers']:
expire = dict_strm['expire']
signature = dict_strm['signature']
last_modify = dict_strm['last_modify']
callback = 'playerWeb'
strm_url = base_url +'%s?expire=%s&callback=%s&signature=%s&last_modify=%s' % (server, expire, callback,
signature, last_modify)
try:
strm_data = httptools.downloadpage(strm_url).data
strm_data = scrapertools.unescape(strm_data)
title = '%s'
language = ''
if server not in ['fire', 'meph']:
urls = scrapertools.find_multiple_matches(strm_data, '"(?:file|src)"*?:.*?"(.*?)"')
for url in urls:
if url != '':
url = url.replace ('\\/','/')
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play'))
elif server in ['fire', 'mpeh']:
url = scrapertools.find_single_match(strm_data, 'xmlhttp.open(\"GET\", \"(.*?)\"')
if url != '':
url = url.replace('\\/', '/')
itemlist.append(Item(channel=item.channel, title=url, url=url, action='play'))
else:
continue
except:
pass
servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server)
return itemlist
def newest(categoria):
logger.info()
item = Item()
try:
item.url = host
item.extra = "novedades"
itemlist = recientes(item)
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -10,7 +10,7 @@ from channelselector import get_thumb
from core import tmdb
from core.item import Item
from platformcode import logger, config
from channels import autoplay
from channels import autoplay, renumbertools
from channels import filtertools
tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
@@ -33,8 +33,8 @@ tgenero = {"Comedia": "https://s7.postimg.cc/ne9g9zgwb/comedia.png",
host = "http://www.animeshd.tv"
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'poseidonhd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'poseidonhd')
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', 'animeshd')
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', 'animeshd')
IDIOMAS = {'Castellano':'CAST','Latino': 'LAT', 'Subtitulado': 'VOSE'}
@@ -83,7 +83,7 @@ def mainlist(item):
))
itemlist = filtertools.show_option(itemlist, item.channel, list_language, list_quality)
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
@@ -113,7 +113,9 @@ def lista(item):
patron = 'class="anime"><a href="([^"]+)">'
patron +='<div class="cover" style="background-image: url\((.*?)\)">.*?<h2>([^<]+)<\/h2>'
matches = re.compile(patron, re.DOTALL).findall(data)
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
thumbnail = host + scrapedthumbnail
@@ -191,10 +193,11 @@ def episodios(item):
infoLabels = item.infoLabels
for scrapedurl, scrapedlang, scrapedtitle, episode in matches:
language = scrapedlang
title = scrapedtitle + " " + "1x" + episode
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(episode))
title = scrapedtitle + " " + str(season) +"x" + str(episode)
url = scrapedurl
infoLabels['season'] ='1'
infoLabels['episode'] = episode
infoLabels['season'] = str(season)
infoLabels['episode'] = str(episode)
itemlist.append(Item(channel=item.channel, title=title, contentSerieName=item.contentSerieName, url=url,
action='findvideos', language=IDIOMAS[language], infoLabels=infoLabels))
@@ -211,22 +214,32 @@ def episodios(item):
def findvideos(item):
logger.info()
from channels.pelisplus import add_vip
itemlist = []
data = get_source(item.url)
patron = "<option value=(.*?) data-content=.*?width='16'> (.*?) <span class='text-muted'>"
patron = "<option value=\"([^\"]+)\" data-content=.*?width='16'> (.*?) <span class='text-muted'>"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, language in matches:
if 'jpg' in scrapedurl:
vip_data = httptools.downloadpage(scrapedurl, follow_redirects=False)
scrapedurl = vip_data.headers['location']
title = '%s [%s]'
itemlist.append(item.clone(title=title, url=scrapedurl.strip(), action='play',
language=IDIOMAS[language]))
vip = False
if not config.get_setting('unify'):
title = ' [%s]' % IDIOMAS[language]
else:
title = ''
if 'pelisplus.net' in scrapedurl:
itemlist += add_vip(item, scrapedurl, IDIOMAS[language])
vip = True
elif 'server' in scrapedurl:
new_data = get_source(scrapedurl)
scrapedurl = scrapertools.find_single_match(new_data, '<iframe src="([^"]+)"')
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % (x.server.capitalize(), x.language))
if not vip:
itemlist.append(item.clone(title='%s'+title, url=scrapedurl.strip(), action='play',
language=IDIOMAS[language]))
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)

View File

@@ -1,30 +0,0 @@
{
"id": "animeyt",
"name": "AnimeYT",
"active": true,
"adult": false,
"language": "cast, lat",
"thumbnail": "http://i.imgur.com/dHpupFk.png",
"categories": [
"anime",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "modo_grafico",
"type": "bool",
"label": "información extra",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,510 +0,0 @@
# -*- coding: utf-8 -*-
import re
import urlparse
from channels import renumbertools
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from core import tmdb
from platformcode import config,logger
import gktools, random, time, urllib
__modo_grafico__ = config.get_setting('modo_grafico', 'animeyt')
HOST = "http://animeyt.tv/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="novedades", url=HOST))
itemlist.append(Item(channel=item.channel, title="Recientes", action="recientes", url=HOST))
itemlist.append(Item(channel=item.channel, title="Alfabético", action="alfabetico", url=HOST))
itemlist.append(Item(channel=item.channel, title="Búsqueda", action="search", url=urlparse.urljoin(HOST, "busqueda?terminos=")))
itemlist = renumbertools.show_option(item.channel, itemlist)
return itemlist
def novedades(item):
logger.info()
itemlist = list()
if not item.pagina:
item.pagina = 0
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_novedades = '<div class="capitulos-portada">[\s\S]+?<h2>Comentarios</h2>'
data_novedades = scrapertools.find_single_match(data, patron_novedades)
patron = 'href="([^"]+)"[\s\S]+?src="([^"]+)"[^<]+alt="([^"]+) (\d+)([^"]+)'
matches = scrapertools.find_multiple_matches(data_novedades, patron)
for url, img, scrapedtitle, eps, info in matches[item.pagina:item.pagina + 20]:
title = scrapedtitle + " " + "1x" + eps + info
title = title.replace("Sub Español", "").replace("sub español", "")
infoLabels = {'filtro': {"original_language": "ja"}.items()}
itemlist.append(Item(channel=item.channel, title=title, url=url, thumb=img, action="findvideos", contentTitle=scrapedtitle, contentSerieName=scrapedtitle, infoLabels=infoLabels, contentType="tvshow"))
try:
from core import tmdb
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
for it in itemlist:
it.thumbnail = it.thumb
except:
pass
if len(matches) > item.pagina + 20:
pagina = item.pagina + 20
itemlist.append(item.clone(channel=item.channel, action="novedades", url=item.url, title=">> Página Siguiente", pagina=pagina))
return itemlist
def alfabetico(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
for letra in '0ABCDEFGHIJKLMNOPQRSTUVWXYZ':
titulo = letra
if letra == "0":
letra = "num"
itemlist.append(Item(channel=item.channel, action="recientes", title=titulo,
url=urlparse.urljoin(HOST, "animes?tipo=0&genero=0&anio=0&letra={letra}".format(letra=letra))))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ","+")
item.url = item.url+texto
if texto!='':
return recientes(item)
def recientes(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron_recientes = '<article class="anime">[\s\S]+?</main>'
data_recientes = scrapertools.find_single_match(data, patron_recientes)
patron = '<a href="([^"]+)"[^<]+<img src="([^"]+)".+?js-synopsis-reduce">(.*?)<.*?<h3 class="anime__title">(.*?)<small>(.*?)</small>'
matches = scrapertools.find_multiple_matches(data_recientes, patron)
for url, thumbnail, plot, title, cat in matches:
itemlist.append(item.clone(title=title, url=url, action="episodios", show=title, thumbnail=thumbnail, plot=plot, cat=cat, context=renumbertools.context(item)))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
paginacion = scrapertools.find_single_match(data, '<a class="pager__link icon-derecha last" href="([^"]+)"')
paginacion = scrapertools.decodeHtmlentities(paginacion)
if paginacion:
itemlist.append(Item(channel=item.channel, action="recientes", title=">> Página Siguiente", url=paginacion))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<span class="icon-triangulo-derecha"></span>.*?<a href="([^"]+)">([^"]+) (\d+)'
matches = scrapertools.find_multiple_matches(data, patron)
for url, scrapedtitle, episode in matches:
season = 1
episode = int(episode)
season, episode = renumbertools.numbered_for_tratk(item.channel, scrapedtitle, season, episode)
title = "%sx%s %s" % (season, str(episode).zfill(2), scrapedtitle)
itemlist.append(item.clone(title=title, url=url, action='findvideos'))
if config.get_videolibrary_support:
itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
duplicados = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
from collections import OrderedDict # cambiado dict por OrderedDict para mantener el mismo orden que en la web
matches = scrapertools.find_multiple_matches(data, '<li><a id="mirror(\d*)" class="link-veranime[^"]*" href="[^"]*">([^<]*)')
d_links = OrderedDict(matches)
matches = scrapertools.find_multiple_matches(data, 'if \(mirror == (\d*)\).*?iframe src="([^"]*)"')
d_frames = OrderedDict(matches)
for k in d_links:
if k in d_frames and d_frames[k] != '':
tit = scrapertools.find_single_match(d_frames[k], '/([^\./]*)\.php\?')
if tit == '':
tit = 'mega' if 'mega.nz/' in d_frames[k] else 'dailymotion' if 'dailymotion.com/' in d_frames[k] else'noname'
if tit == 'id' and 'yourupload.com/' in d_frames[k]: tit = 'yourupload'
title = 'Opción %s (%s)' % (d_links[k], tit)
itemlist.append(item.clone(channel=item.channel, folder=False, title=title, action="play", url=d_frames[k], referer=item.url))
if item.extra != "library":
if config.get_videolibrary_support() and item.extra:
itemlist.append(item.clone(channel=item.channel, title="[COLOR yellow]Añadir pelicula a la videoteca[/COLOR]", url=item.url, action="add_pelicula_to_library", extra="library", contentTitle=item.show, contentType="movie"))
return itemlist
def play(item):
logger.info()
itemlist = []
if item.url.startswith('https://www.dailymotion.com/'):
itemlist.append(item.clone(url=item.url, server='dailymotion'))
elif item.url.startswith('https://mega.nz/'):
itemlist.append(item.clone(url=item.url.replace('embed',''), server='mega'))
elif item.url.startswith('https://s2.animeyt.tv/rakuten.php?'):
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="google-site-verification" content="([^"]*)"')
if not gsv: return itemlist
suto = gktools.md5_dominio(item.url)
sufijo = '3497510'
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
link, subtitle = gktools.get_play_link_id(data, item.url)
url = 'https://s2.animeyt.tv/rakuten/plugins/gkpluginsphp.php'
post = "link=%s&token=%s" % (link, token)
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer, subtitle)
elif item.url.startswith('https://s3.animeyt.tv/amz.php?'):
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
v_token = scrapertools.find_single_match(data, "var v_token='([^']*)'")
if not gsv or not v_token: return itemlist
suto = gktools.md5_dominio(item.url)
sufijo = '9457610'
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
url = 'https://s3.animeyt.tv/amz_animeyts.php'
post = "v_token=%s&token=%s&handler=%s" % (v_token, token, 'Animeyt')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s2.animeyt.tv/lola.php?'):
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_file = scrapertools.find_single_match(data, "var cd='([^']*)';\s*var file='([^']*)'")
if not gsv or not s_cd or not s_file: return itemlist
suto = gktools.md5_dominio(item.url)
sufijo = '8134976'
token = gktools.generar_token('"'+gsv+'"', suto+'yt'+suto+sufijo)
url = 'https://s2.animeyt.tv/minha_animeyt.php'
post = "cd=%s&file=%s&token=%s&handler=%s" % (s_cd, s_file, token, 'Animeyt')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s4.animeyt.tv/chumi.php?'): #https://s4.animeyt.tv/chumi.php?cd=3481&file=4
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_file = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&file=([^&]*)')
if not gsv or not s_cd or not s_file: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s4.animeyt.tv/minha/minha_animeyt.php'
post = "cd=%s&id=%s&archive=%s&ip=%s&Japan=%s" % (s_cd, s_file, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s3.animeyt.tv/mega.php?'): #https://s3.animeyt.tv/mega.php?v=WmpHMEVLVTNZZktyaVAwai9sYzhWV1ZRTWh0WTZlNGZ3VzFVTXhMTkx2NGlOMjRYUHhZQlMvaUFsQlJFbHBVTA==
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_v = scrapertools.find_single_match(item.url, '\?v=([^&]*)')
if not gsv or not s_v: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s3.animeyt.tv/mega_animeyts.php'
post = "v=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_v, archive, item.url, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s2.animeyt.tv/naruto/naruto.php?'): #https://s2.animeyt.tv/naruto/naruto.php?id=3477&file=11.mp4
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_id, s_file = scrapertools.find_single_match(item.url, '\?id=([^&]*)&file=([^&]*)')
if not gsv or not s_id or not s_file: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s2.animeyt.tv/naruto/narutos_animeyt.php'
post = "id=%s&file=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_id, s_file, archive, urllib.quote(item.url), ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s4.animeyt.tv/facebook.php?'): #https://s4.animeyt.tv/facebook.php?cd=3481&id=4
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_id = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&id=([^&]*)')
if not gsv or not s_cd or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s4.animeyt.tv/facebook/facebook_animeyts.php'
post = "cd=%s&id=%s&archive=%s&referer=%s&ip=%s&Japan=%s" % (s_cd, s_id, archive, urllib.quote(item.url), ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s.animeyt.tv/v4/media.php?'): #https://s.animeyt.tv/v4/media.php?id=SmdMQ2Y0NUhFK2hOZlYzbVJCbnE3QT09
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_id = scrapertools.find_single_match(item.url, '\?id=([^&]*)')
if not gsv or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '8049762' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s.animeyt.tv/v4/gsuite_animeyts.php'
post = "id=%s&archive=%s&ip=%s&Japan=%s" % (s_id, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s10.animeyt.tv/yourupload.com/id.php?'): #https://s10.animeyt.tv/yourupload.com/id.php?id=62796D77774A4E4363326642
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_id = scrapertools.find_single_match(item.url, '\?id=([^&]*)')
if not gsv or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '8049762' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s10.animeyt.tv/yourupload.com/chinese_streaming.php'
post = "id=%s&archive=%s&ip=%s&Japan=%s" % (s_id, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
elif item.url.startswith('https://s4.animeyt.tv/onedrive.php?'): #https://s4.animeyt.tv/onedrive.php?cd=3439&id=12
# 1- Descargar
data, ck = gktools.get_data_and_cookie(item)
# 2- Calcular datos
gsv = scrapertools.find_single_match(data, '<meta name="Animeyt-Token" content="([^"]*)"')
s_cd, s_id = scrapertools.find_single_match(item.url, '\?cd=([^&]*)&id=([^&]*)')
if not gsv or not s_cd or not s_id: return itemlist
ip = gktools.toHex(gsv) + str(1000000 + random.randint(0,9000000)) + str(100000 + random.randint(0,900000))
gsv_bis = gktools.transforma_gsv(gsv, '159753')
p1 = '1705f5652bb6546ab3643ff698e' + gsv[-5:]
p2 = '8388ca3fd07' + gsv[-5:] + gsv_bis
texto = gktools.toHex(gktools.encode_rijndael(gsv, p1, p2))
suto = gktools.md5_dominio(item.url)
sufijo = '147268278' + gsv[-5:]
prefijo = gsv[:-5] + gsv_bis
token = gktools.generar_token('"'+texto+'"', prefijo+suto+'yt'+suto+sufijo)
archive = gktools.toHex(token)
url = 'https://s4.animeyt.tv/onedrive/onedrive_animeyts.php'
post = "cd=%s&id=%s&archive=%s&ip=%s&Japan=%s" % (s_cd, s_id, archive, ip, 'Asia')
# 3- Descargar json
data = gktools.get_data_json(url, post, ck, item.url)
# 4- Extraer enlaces
itemlist = gktools.extraer_enlaces_json(data, item.referer)
return itemlist

View File

@@ -116,7 +116,7 @@ def lista(item):
itemlist.append(item.clone(title=scrapedtitle, contentSerieName=show,url=scrapedurl, plot=scrapedplot,
thumbnail=scrapedthumbnail, action="episodios", context=context))
tmdb.set_infoLabels(itemlist, seekTmdb=True)
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.title, action='lista'))
itemlist.append(Item(channel=item.channel, url=item.url, range=next_page, title='Pagina Siguente >>>', contentTitle=item.contentTitle, action='lista'))
return itemlist

View File

@@ -25,14 +25,14 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, action="lista", title="Peliculas",
url=urlparse.urljoin(host, "p/peliculas.html"), type='pl', first=0))
itemlist.append(Item(channel=item.channel, action="lista", title="Series",
url=urlparse.urljoin(host, "p/series.html"), type='sr', first=0))
url=urlparse.urljoin(host, "/category/pelicula"), type='pl', pag=1))
#itemlist.append(Item(channel=item.channel, action="lista", title="Series",
# url=urlparse.urljoin(host, "/category/serie"), type='sr', pag=1))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros", url=host, cat='genre'))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad", url=host, cat='quality'))
itemlist.append(Item(channel=item.channel, action="category", title="Orden Alfabético", url=host, cat='abc'))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno", url=host, cat='year'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
#itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+"/search?q="))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -43,18 +43,18 @@ def category(item):
data = httptools.downloadpage(host).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.cat == 'abc':
data = scrapertools.find_single_match(data, '<span>Orden Alfabético</span>.*?</ul>')
data = scrapertools.find_single_match(data, '<div class="Body Container">(.+?)<main>')
elif item.cat == 'genre':
data = scrapertools.find_single_match(data, '<span>Géneros</span>.*?</ul>')
data = scrapertools.find_single_match(data, '<a>Géneros<\/a><ul class="sub.menu">(.+?)<a>Año<\/a>')
elif item.cat == 'year':
data = scrapertools.find_single_match(data, '<span>Año</span>.*?</ul>')
data = scrapertools.find_single_match(data, '<a>Año<\/a><ul class="sub.menu">(.+?)<a>Idioma<\/a>')
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<span>Calidad</span>.*?</ul>')
patron = "<li.*?>([^<]+)<a href='([^']+)'>"
data = scrapertools.find_single_match(data, '<a>Calidad<\/a><ul class="sub-menu">(.+?)<a>Géneros<\/a>')
patron = '<li.*?><a href="(.*?)">(.*?)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
for scrapedurl,scrapedtitle in matches:
if scrapedtitle != 'Próximas Películas':
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', first=0))
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=host+scrapedurl, type='cat', pag=0))
return itemlist
@@ -63,6 +63,7 @@ def search_results(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
logger.info(data)
patron = '<span class=.post-labels.>([^<]+)</span>.*?class="poster-bg" src="([^"]+)"/>.*?<h4>.*?'
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
matches = scrapertools.find_multiple_matches(data, patron)
@@ -90,28 +91,6 @@ def search(item, texto):
if texto != '':
return search_results(item)
def episodios(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(item.url).data
data = data.replace('"ep0','"epp"')
patron = '(?is)<div id="ep(\d+)".*?'
patron += 'src="([^"]+)".*?'
patron += '(href.*?)fa fa-download'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedepi, scrapedthumbnail, scrapedurls in matches:
title="1x%s - %s" % (scrapedepi, item.contentSerieName)
urls = scrapertools.find_multiple_matches(scrapedurls, 'href="([^"]+)')
itemlist.append(item.clone(action='findvideos', title=title, url=item.url, thumbnail=scrapedthumbnail, type=item.type,
urls = urls, infoLabels=item.infoLabels))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]",
url=item.url, action="add_serie_to_library", extra="episodios",
contentSerieName=item.contentSerieName))
return itemlist
def lista(item):
logger.info()
next = True
@@ -119,64 +98,37 @@ def lista(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
css_data = scrapertools.find_single_match(data, "<style id='page-skin-1' type='text/css'>(.*?)</style>")
data = scrapertools.find_single_match(data, "itemprop='headline'>.*?</h2>.*?</ul>")
patron = '<span class="([^"]+)">.*?<figure class="poster-bg">(.*?)<img src="([^"]+)" />'
patron += '(.*?)</figure><h6>([^<]+)</h6><a href="([^"]+)"></a>'
patron = '<article .*?">'
patron += '<a href="([^"]+)"><.*?><figure.*?>' #scrapedurl
patron += '<img.*?src="([^"]+)".*?>.*?' #scrapedthumbnail
patron += '<h3 class=".*?">([^"]+)<\/h3>' #scrapedtitle
patron += '<span.*?>([^"]+)<\/span>.+?' #scrapedyear
patron += '<a.+?>([^"]+)<\/a>' #scrapedtype
matches = scrapertools.find_multiple_matches(data, patron)
first = int(item.first)
last = first + 19
if last > len(matches):
last = len(matches)
next = False
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedtype in matches:
title="%s - %s" % (scrapedtitle,scrapedyear)
for scrapedtype, scrapedyear, scrapedthumbnail, scrapedquality, scrapedtitle, scrapedurl in matches[first:last]:
year = scrapertools.find_single_match(scrapedyear, '<span>(\d{4})</span>')
new_item = Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':scrapedyear})
if not year:
class_year = scrapertools.find_single_match(scrapedyear, 'class="([^\"]+)"')
year = scrapertools.find_single_match(css_data, "\." + class_year + ":after {content:'(\d{4})';}")
if not year:
year = scrapertools.find_single_match(data, "headline'>(\d{4})</h2>")
qual = ""
if scrapedquality:
patron_qualities='<i class="([^"]+)"></i>'
qualities = scrapertools.find_multiple_matches(scrapedquality, patron_qualities)
for quality in qualities:
patron_desc = "\." + quality + ":after {content:'([^\']+)';}"
quality_desc = scrapertools.find_single_match(css_data, patron_desc)
qual = qual+ "[" + quality_desc + "] "
title="%s [%s] %s" % (scrapedtitle,year,qual)
new_item = Item(channel=item.channel, title=title, url=host+scrapedurl, thumbnail=scrapedthumbnail,
type=scrapedtype, infoLabels={'year':year})
if scrapedtype.strip() == 'sr':
if scrapedtype == 'sr':
new_item.contentSerieName = scrapedtitle
new_item.action = 'episodios'
else:
new_item.contentTitle = scrapedtitle
new_item.action = 'findvideos'
if scrapedtype == item.type or item.type == 'cat':
itemlist.append(new_item)
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
#pagination
url_next_page = item.url
first = last
if next:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', first=first))
pag = item.pag + 1
url_next_page = item.url+"/page/"+str(pag)+"/"
if len(itemlist)>19:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='lista', pag=pag))
return itemlist

View File

@@ -9,6 +9,7 @@ from platformcode import config, logger
from platformcode import platformtools
from platformcode import launcher
from time import sleep
from platformcode.config import get_setting
__channel__ = "autoplay"
@@ -117,7 +118,7 @@ def start(itemlist, item):
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
if settings_node['active']:
if get_setting('autoplay') or settings_node['active']:
url_list_valid = []
autoplay_list = []
autoplay_b = []
@@ -142,7 +143,7 @@ def start(itemlist, item):
# 2: Solo servidores
# 3: Solo calidades
# 4: No ordenar
if settings_node['custom_servers'] and settings_node['custom_quality']:
if (settings_node['custom_servers'] and settings_node['custom_quality']) or get_setting('autoplay'):
priority = settings_node['priority'] # 0: Servidores y calidades o 1: Calidades y servidores
elif settings_node['custom_servers']:
priority = 2 # Solo servidores
@@ -391,14 +392,15 @@ def init(channel, list_servers, list_quality, reset=False):
# Se comprueba que no haya calidades ni servidores duplicados
if 'default' not in list_quality:
list_quality.append('default')
list_servers = list(set(list_servers))
list_quality = list(set(list_quality))
# list_servers = list(set(list_servers))
# list_quality = list(set(list_quality))
# Creamos el nodo del canal y lo añadimos
channel_node = {"servers": list_servers,
"quality": list_quality,
"settings": {
"active": False,
"plan_b": True,
"custom_servers": False,
"custom_quality": False,
"priority": 0}}
@@ -455,7 +457,7 @@ def check_value(channel, itemlist):
for item in itemlist:
if item.server.lower() not in server_list and item.server !='':
server_list.append(item.server)
server_list.append(item.server.lower())
change = True
if item.quality not in quality_list and item.quality !='':
quality_list.append(item.quality)
@@ -672,7 +674,7 @@ def is_active(channel):
# Obtiene los ajustes des autoplay para este canal
settings_node = channel_node.get('settings', {})
return settings_node.get('active', False)
return settings_node.get('active', False) or get_setting('autoplay')
def reset(item, dict):

View File

@@ -3,7 +3,7 @@
import re
import urllib
from core import jsontools as json
from core import jsontools as json, httptools
from core import scrapertools
from core.item import Item
from platformcode import logger
@@ -12,6 +12,7 @@ url_api = ""
beeg_salt = ""
Host = "https://beeg.com"
def get_api_url():
global url_api
global beeg_salt
@@ -53,7 +54,7 @@ def mainlist(item):
itemlist = []
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
viewmode="movie"))
#itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
# itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
# url=url_api + "/index/main/0/pc", extra="popular"))
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias completo",
url=url_api + "/index/main/0/pc", extra="nonpopular"))
@@ -65,7 +66,7 @@ def mainlist(item):
def videos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
for Video in JSONData["videos"]:
@@ -90,14 +91,14 @@ def videos(item):
def listcategorias(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
JSONData = json.load(data)
#for Tag in JSONData["tags"][item.extra]:
# for Tag in JSONData["tags"][item.extra]:
for Tag in JSONData["tags"]:
url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"]
title = '%s - %s' % (str(Tag["tag"]), str(Tag["videos"]))
#title = title[:1].upper() + title[1:]
# title = title[:1].upper() + title[1:]
itemlist.append(
Item(channel=item.channel, action="videos", title=title, url=url, folder=True, viewmode="movie"))
@@ -109,7 +110,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = item.url % (texto)
try:
return videos(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
@@ -136,7 +137,8 @@ def play(item):
viedokey = re.compile("key=(.*?)%2Cend=", re.DOTALL).findall(url)[0]
url = url.replace(viedokey, decode(viedokey))
if not url.startswith("https:"): url = "https:" + url
if not url.startswith("https:"):
url = "https:" + url
title = videourl
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])

View File

@@ -8,8 +8,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.bravoporn.com'
@@ -17,8 +15,8 @@ host = 'http://www.bravoporn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host +"/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host +"/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/c/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -29,7 +27,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/s/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -51,12 +49,12 @@ def categorias(item):
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedthumbnail = "http:" + scrapedthumbnail
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/latest/"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -70,14 +68,12 @@ def peliculas(item):
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
thumbnail = "https:" + scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
plot=plot, contentTitle = scrapedtitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next" title="Next">Next</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -7,20 +7,16 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.camwhoresbay.com'
# EN CATALOGO Y BUSQUEDA LA PAGINACION FUNCIONA CON UN AJAX
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -28,10 +24,11 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return peliculas(item)
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
@@ -51,12 +48,12 @@ def categorias(item):
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -70,14 +67,31 @@ def peliculas(item):
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ) )
contentTitle = scrapedtitle, fanart=scrapedthumbnail))
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(\d+)')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
@@ -94,7 +108,7 @@ def play(item):
scrapedurl = scrapertools.find_single_match(data, 'video_url: \'([^\']+)\'')
itemlist.append(Item(channel=item.channel, action="play", title=scrapedurl, fulltitle=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo"))
return itemlist

View File

@@ -40,7 +40,6 @@ else:
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -105,7 +104,6 @@ def sub_search(item):
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
action="findvideos", infoLabels={"year": year},
thumbnail=scrapedthumbnail, text_color=color3, page=0))
@@ -167,7 +165,6 @@ def peliculas(item):
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
@@ -212,7 +209,7 @@ def generos(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
@@ -231,14 +228,13 @@ def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
# logger.info(data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
@@ -289,9 +285,9 @@ def temporadas(item):
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas
patron += '<img src="([^"]+)"></a></div>' # capitulos
patron = "<span class='title'>([^<]+)<i>.*?" # numeros de temporadas
patron += "<img src='([^']+)'>" # capitulos
# logger.info(datas)
matches = scrapertools.find_multiple_matches(datas, patron)
if len(matches) > 1:
for scrapedseason, scrapedthumbnail in matches:
@@ -331,14 +327,13 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
datas = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
# logger.info(datas)
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url cap, img
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
patron = "<div class='imagen'>.*?"
patron += "<div class='numerando'>(.*?)</div>.*?"
patron += "<a href='([^']+)'>([^<]+)</a>"
matches = scrapertools.find_multiple_matches(datas, patron)
for scrapedurl, scrapedtitle, scrapedname in matches:
for scrapedtitle, scrapedurl, scrapedname in matches:
scrapedtitle = scrapedtitle.replace('--', '0')
patron = '(\d+) - (\d+)'
match = re.compile(patron, re.DOTALL).findall(scrapedtitle)
@@ -366,7 +361,7 @@ def episodios(item):
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']

View File

@@ -248,8 +248,8 @@ def findvideos(item):
itemlist.append(Item(channel = item.channel))
itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
text_color="magenta"))
# Opción "Añadir esta película a la biblioteca de KODI"
if item.extra != "library":
# Opción "Añadir esta película a la videoteca de KODI"
if item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,

View File

@@ -41,7 +41,6 @@ else:
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
['Referer', host]]
parameters = channeltools.get_channel_parameters(__channel__)
fanart_host = parameters['fanart']
thumbnail_host = parameters['thumbnail']
@@ -121,10 +120,10 @@ def peliculas(item):
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}|&nbsp;", "", data)
data = scrapertools.decodeHtmlentities(data)
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
patron += '<img src="([^"]+)".*?' # img
patron += '</figure>(.*?)' # tipo
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
patron += '<span class="Year">([^<]+)</span>.*?' # year
matches = scrapertools.find_multiple_matches(data, patron)
@@ -173,7 +172,6 @@ def genresYears(item):
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
return itemlist
@@ -183,13 +181,12 @@ def year_release(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
@@ -203,13 +200,12 @@ def series(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\(.*?\)|&nbsp;|<br>", "", data)
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
patron += '<img src="([^"]+)".*?' # img
patron += '<h3 class="Title">([^<]+)</h3>' # title
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
contentSerieName=scrapedtitle, show=scrapedtitle,
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
@@ -274,7 +270,7 @@ def episodios(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
matches = scrapertools.find_multiple_matches(data, patron)
@@ -307,7 +303,7 @@ def episodios(item):
if i.infoLabels['title']:
# Si el capitulo tiene nombre propio añadirselo al titulo del item
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
'episode'], i.infoLabels['title'])
'episode'], i.infoLabels['title'])
if i.infoLabels.has_key('poster_path'):
# Si el capitulo tiene imagen propia remplazar al poster
i.thumbnail = i.infoLabels['poster_path']
@@ -340,7 +336,8 @@ def findvideos(item):
lang, quality = match[0]
quality = quality.strip()
headers = {'Referer': item.url}
url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
url_1 = scrapertools.find_single_match(data,
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
new_data = httptools.downloadpage(url_1, headers=headers).data
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}|&nbsp;", "", new_data)
new_data = scrapertools.decodeHtmlentities(new_data)

View File

@@ -130,7 +130,7 @@ def anyos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="([^"]+)">([^<]+)</a><br'
patron = '<a href=([^>]+)>([^<]+)</a><br'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
@@ -171,8 +171,8 @@ def generos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li id="menu-item-.*?" class="menu-item menu-item-type-taxonomy menu-item-object-category ' \
'menu-item-.*?"><a href="([^"]+)">([^<]+)<\/a></li>'
patron = '<li id=menu-item-.*? class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?'
patron +='"><a href=([^>]+)>([^<]+)<\/a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = urlparse.urljoin(item.url, scrapedurl)
@@ -206,8 +206,8 @@ def peliculas(item):
data = httptools.downloadpage(item.url).data
patron = '<div class="home_post_cont.*? post_box">.*?<a href="(.*?)".*?'
patron += 'src="(.*?)".*?title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
patron = '<div class="home_post_cont.*? post_box">.*?<a href=([^>]+)>.*?src=([^ ]+).*?'
patron += 'title="(.*?) \((.*?)\).*?".*?p&gt;(.*?)&lt'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedplot in matches:
@@ -232,7 +232,7 @@ def peliculas(item):
))
try:
patron = "<link rel='next' href='([^']+)' />"
patron = "<link rel=next href=([^>]+)>"
next_page = re.compile(patron, re.DOTALL).findall(data)
itemlist.append(Item(channel=item.channel,
action="peliculas",
@@ -298,7 +298,7 @@ def findvideos(item):
lang = 'latino'
data = httptools.downloadpage(item.url).data
patron = 'target="_blank".*? service=".*?" data="(.*?)"><li>(.*?)<\/li>'
patron = 'target=_blank.*? service=.*? data="(.*?)"><li>(.*?)<\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
server_url = {'YourUpload': 'https://www.yourupload.com/embed/',
@@ -315,7 +315,6 @@ def findvideos(item):
if server_id not in ['Mega', 'MediaFire', 'Trailer', '']:
video_id = dec(video_cod, dec_value)
logger.debug('server_id %s' % server_id)
if server_id in server_url:
server = server_id.lower()
thumbnail = item.thumbnail

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# -*- Channel CinemaHD -*-
# -*- Channel CineDeTodo -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
@@ -16,39 +16,61 @@ from channels import autoplay
from channels import filtertools
host = 'http://www.cinedetodo.com/'
host = 'https://www.cinedetodo.net/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(item.clone(title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
itemlist.append(item.clone(title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
# itemlist.append(item.clone(title="Por Calidad", action="section", section='quality',
# thumbnail=get_thumb('quality', auto=True)))
itemlist.append(item.clone(title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(item.clone(title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
def sub_menu(item):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -57,60 +79,86 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
patron += '<td>(\d{4})</td>'
full_data = data
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h3 class=Title>(.*?)<\/h3>.*?<span class=Year>(.*?)<\/span>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
url = scrapedurl
if year == '':
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
cleantitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
cleantitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
cleantitle = re.sub('\(.*?\)', '', cleantitle)
title = '%s [%s]'%(contentTitle, year)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year':year}
))
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class=next.*?href=(.*?)>')
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = '<a href=(http:.*?) class=Button STPb>(.*?)</a>'
elif item.section == 'year':
patron = 'custom menu-item-15\d+><a href=(.*?\?s.*?)>(\d{4})<\/a><\/li>'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
@@ -118,38 +166,104 @@ def section(item):
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = language.replace('(','').replace(')','')
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(item.clone(title='%s', url=url, language=IDIOMAS[language], quality=quality, action='play'))
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = item.clone(title='Trailer', url=url, action='play', server='youtube')
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s [%s] [%s]'%(i.server.capitalize(),
i.language, i.quality))
tmdb.set_infoLabels_itemlist(itemlist, True)
try:
itemlist.append(trailer)
except:
@@ -175,7 +289,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
@@ -190,11 +304,11 @@ def newest(categoria):
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'/animacion'
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'/terror'
elif categoria == 'documentales':
item.url = host+'/documental'
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()

View File

@@ -3,7 +3,7 @@
"name": "CineHindi",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"language": ["vos"],
"thumbnail": "cinehindi.png",
"banner": "http://i.imgur.com/cau9TVe.png",
"categories": [

View File

@@ -27,8 +27,8 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, action="genero", title="Generos", url=host, thumbnail = get_thumb("genres", auto = True)))
itemlist.append(Item(channel=item.channel, action="lista", title="Novedades", url=host, thumbnail = get_thumb("newest", auto = True)))
itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
url=urlparse.urljoin(host, "proximamente")))
#itemlist.append(Item(channel=item.channel, action="proximas", title="Próximas Películas",
# url=urlparse.urljoin(host, "proximamente")))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=urlparse.urljoin(host, "?s="), thumbnail = get_thumb("search", auto = True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
@@ -38,8 +38,8 @@ def genero(item):
logger.info()
itemlist = list()
data = httptools.downloadpage(host).data
patron = 'level-0.*?value="([^"]+)"'
patron += '>([^<]+)'
patron = '<option class=.*? value=([^<]+)>'
patron += '([^<]+)<\/option>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle in matches:
if 'Próximas Películas' in scrapedtitle:
@@ -94,28 +94,29 @@ def lista(item):
else:
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
data = httptools.downloadpage(url).data
bloque = scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href="([^"]+).*?' # scrapedurl
patron += '<img src="([^"]+).*?' # scrapedthumbnail
patron += 'alt="([^"]+).*?' # scrapedtitle
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
bloque = data#scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
patron = '<div id=mt.+?>' # Todos los items de peliculas (en esta web) empiezan con esto
patron += '<a href=([^"]+)\/><div class=image>' # scrapedurl
patron += '<img src=([^"]+) alt=.*?' # scrapedthumbnail
patron += '<span class=tt>([^"]+)<\/span>' # scrapedtitle
patron += '<span class=ttx>([^"]+)<div class=degradado>.*?' # scrapedplot
patron += '<span class=year>([^"]+)<\/span><\/div><\/div>' # scrapedfixyear
matches = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
patron = '<span class="year">([^<]+)' # scrapedyear
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot, scrapedyear in matches:
#patron = '<span class="year">([^<]+)' # scrapedyear
#scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
scrapedtitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle,'\(\d{4}\)'),'').strip()
title = scrapedtitle
if scrapedyear:
title += ' (%s)' % (scrapedyear)
item.infoLabels['year'] = int(scrapedyear)
patron = '<span class="calidad2">([^<]+).*?' # scrapedquality
scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
if scrapedquality:
title += ' [%s]' % (scrapedquality)
#scrapedquality = scrapertools.find_single_match(scrapedfixyear, patron)
#if scrapedquality:
# title += ' [%s]' % (scrapedquality)
itemlist.append(
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, plot=scrapedplot, contentType="movie", context=["buscar_trailer"]))
tmdb.set_infoLabels(itemlist)
# Paginacion
patron = 'rel="next" href="([^"]+)'

View File

@@ -16,41 +16,61 @@ from channels import autoplay
from channels import filtertools
host = 'http://www.cinemahd.co/'
host = 'https://www.cinemahd.co/'
IDIOMAS = {'Latino': 'LAT'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['fastplay', 'rapidvideo', 'streamplay', 'flashx', 'streamito', 'streamango', 'vidoza']
list_servers = ['gounlimited', 'rapidvideo', 'vshare', 'clipwatching', 'jawclowd', 'streamango']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host, thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Calidad", action="section", section='quality',
thumbnail=get_thumb('quality', auto=True)))
itemlist.append(Item(channel=item.channel, title="Por Año", action="section", section='year',
thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
itemlist.append(Item(channel=item.channel, title="Películas", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='MovieList'))
itemlist.append(Item(channel=item.channel, title="Series", action="sub_menu", url=host,
thumbnail=get_thumb('last', auto=True), type='Series'))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host + '?s=',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url):
def sub_menu(item):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True), type=item.type))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True), type=item.type ))
if item.type != 'Series':
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True), type=item.type))
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -60,14 +80,18 @@ def list_all(item):
data = get_source(item.url)
full_data = data
data = scrapertools.find_single_match(data, '<ul class=MovieList NoLmtxt.*?</ul>')
if item.section != '':
data = scrapertools.find_single_match(data, 'class="MovieList NoLmtxt(.*?)</ul>')
else:
data = scrapertools.find_single_match(data, '<!--<%s>.*?class="MovieList NoLmtxt(.*?)</ul>' % item.type)
if item.section == 'alpha':
patron = '<span class=Num>\d+.*?<a href=(.*?) class.*?<img src=(.*?) alt=.*?<strong>(.*?)</strong>.*?'
patron += '<td>(\d{4})</td>'
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?<img src="([^"]+)" alt=.*?'
patron += '<strong>([^"]+)</strong>.*?<td>(\d{4})</td>'
matches = re.compile(patron, re.DOTALL).findall(full_data)
else:
patron = '<article id=post-.*?<a href=(.*?)>.*?<img src=(.*?) alt=.*?'
patron += '<h3 class=Title>(.*?)<\/h3>(?:</a>|<span class=Year>(.*?)<\/span>)'
patron = '<article.*?<a href="(.*?)">.*?<img src="(.*?)" alt=.*?'
patron += '<h3 class="Title">(.*?)<\/h3>.*?date_range">(\d+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
@@ -77,45 +101,64 @@ def list_all(item):
year = '-'
if "|" in scrapedtitle:
scrapedtitle= scrapedtitle.split("|")
contentTitle = scrapedtitle[0].strip()
cleantitle = scrapedtitle[0].strip()
else:
contentTitle = scrapedtitle
cleantitle = scrapedtitle
contentTitle = re.sub('\(.*?\)','', contentTitle)
cleantitle = re.sub('\(.*?\)', '', cleantitle)
title = '%s [%s]'%(contentTitle, year)
if not config.get_setting('unify'):
title = '%s [%s]'%(cleantitle, year)
else:
title = cleantitle
thumbnail = 'http:'+scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year':year}
))
new_item = Item(channel=item.channel,
title=title,
url=url,
thumbnail=thumbnail,
infoLabels = {'year': year}
)
if 'series' not in url:
new_item.contentTitle = cleantitle
new_item.action='findvideos'
else:
new_item.contentSerieName = cleantitle
new_item.action = 'seasons'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, True)
# Paginación
url_next_page = scrapertools.find_single_match(full_data,'<a class=next.*?href=(.*?)>')
url_next_page = scrapertools.find_single_match(full_data,'<a class="next.*?href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all'))
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
type=item.type))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
if item.type == 'Series':
url = host + '?tr_post_type=2'
else:
url = host + '?tr_post_type=1'
data = get_source(url)
action = 'list_all'
if item.section == 'quality':
patron = 'menu-item-object-category.*?menu-item-\d+ menu-category-list><a href=(.*?)>(.*?)<\/a>'
elif item.section == 'genre':
patron = '<a href=([^ ]+) class=Button STPb>(.*?)</a>'
elif item.section == 'year':
patron = '<li><a href=([^>]+)>(\d{4})<\/a><\/li>'
if item.section == 'genre':
patron = '<a href="([^ ]+)" class="Button STPb">(.*?)</a>'
elif item.section == 'alpha':
patron = '<li><a href=(.*?letters.*?)>(.*?)</a>'
patron = '<li><a href="(.*?letter.*?)">(.*?)</a>'
action = 'list_all'
matches = re.compile(patron, re.DOTALL).findall(data)
for data_one, data_two in matches:
@@ -123,32 +166,99 @@ def section(item):
url = data_one
title = data_two
if title != 'Ver más':
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section)
if item.type == 'Series':
url =url + '?tr_post_type=2'
else:
url = url + '?tr_post_type=1'
if 'serie'in title.lower():
continue
new_item = Item(channel=item.channel, title= title, url=url, action=action, section=item.section,
type=item.type)
itemlist.append(new_item)
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='Temporada <span>(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for season in matches:
season = season.lower().replace('temporada','')
infoLabels['season']=season
title = 'Temporada %s' % season
itemlist.append(Item(channel=item.channel, title=title, url=item.url, action='episodesxseasons',
infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist = []
full_data=get_source(item.url)
data = scrapertools.find_single_match(full_data, 'Temporada <span>\d+.*?</ul>')
patron='<span class="Num">(\d+)<.*?<a href="([^"]+)".*?"MvTbTtl".*?">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedepisode, scrapedurl, scrapedtitle in matches:
infoLabels['episode'] = scrapedepisode
url = scrapedurl
title = '%sx%s - %s' % (infoLabels['season'], infoLabels['episode'], scrapedtitle)
itemlist.append(Item(channel=item.channel, title= title, url=url, action='findvideos', infoLabels=infoLabels))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.decodeHtmlentities(data)
patron = 'id=(Opt\d+)>.*?src=(.*?) frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
data_video = get_source(scrapedurl)
url = scrapertools.find_single_match(data_video, '<div class=Video>.*?src=(.*?) frameborder')
opt_data = scrapertools.find_single_match(data,'%s><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
url = scrapertools.find_single_match(data_video, '<div class="Video">.*?src="([^"]+)" frameborder')
opt_data = scrapertools.find_single_match(data,'"%s"><span>.*?</span>.*?<span>(.*?)</span>'%option).split('-')
language = opt_data[0].strip()
language = re.sub('\(|\)', '', language)
quality = opt_data[1].strip()
if url != '' and 'youtube' not in url:
itemlist.append(Item(channel=item.channel, title='%s', url=url, language=IDIOMAS[language], quality=quality,
action='play'))
action='play', infoLabels=item.infoLabels))
elif 'youtube' in url:
trailer = Item(channel=item.channel, title='Trailer', url=url, action='play', server='youtube')
@@ -179,7 +289,7 @@ def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.section = 'search'
if texto != '':
return list_all(item)
else:
@@ -194,9 +304,11 @@ def newest(categoria):
if categoria in ['peliculas','latino']:
item.url = host
elif categoria == 'infantiles':
item.url = host+'/animacion'
item.url = host+'animacion/?tr_post_type=1'
elif categoria == 'terror':
item.url = host+'/terror'
item.url = host+'terror/?tr_post_type=1'
item.type = 'MovieList'
item.section = 'search'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>':
itemlist.pop()

View File

@@ -0,0 +1,63 @@
{
"id": "cineonline",
"name": "cineonline",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "https://www.cine-online.eu/wp-content/uploads/2015/04/CINE-logo-bueno.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'https://www.cine-online.eu'
IDIOMAS = {'Español': 'ESP', 'Cast': 'ESP', 'Latino': 'LAT', 'Lat': 'LAT', 'Subtitulado': 'VOSE', 'Sub': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['Streamango', 'Vidoza', 'Openload', 'Streamcherry', 'Netutv']
list_quality = []
__channel__='cineonline'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title ="Películas", action ="mainlist_pelis"))
itemlist.append(item.clone(title="Series" , action="lista", url= host + "/serie/"))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def mainlist_pelis(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Novedades" , action="lista", url= host))
itemlist.append(item.clone(title="Castellano" , action="lista", url= host + "/tag/castellano/"))
itemlist.append(item.clone(title="Latino" , action="lista", url= host + "/tag/latino/"))
itemlist.append(item.clone(title="Subtituladas" , action="lista", url= host + "/tag/subtitulado/"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Año" , action="categorias", url= host))
itemlist.append(item.clone( title = 'Buscar', action = 'search', search_type = 'movie' ))
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Año" in item.title:
data = scrapertools.get_match(data,'<h3>Año de estreno(.*?)</ul>')
patron = '<li><a href="([^"]+)">(\d+)</(\w)>'
else:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)">([^"]+)</a> <span>(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, cantidad in matches:
scrapedplot = ""
scrapedthumbnail = ""
title = scrapedtitle + " %s" % cantidad
itemlist.append(item.clone(channel=item.channel, action="lista", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div id="mt-\d+".*?<a href="([^"]+)".*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="year">(\d+)</span>.*?'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear in matches:
scrapedtitle = scrapedtitle.replace("Ver", "").replace("online", "")
title = '%s (%s)' % (scrapedtitle, scrapedyear)
url = scrapedurl
new_item = Item(channel=item.channel,
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels={'year':scrapedyear})
if '/serie/' in url:
new_item.action = 'temporadas'
new_item.contentSerieName = scrapedtitle
else:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
itemlist.append(new_item)
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)">Siguiente</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" ,
text_color="blue", url=next_page_url) )
return itemlist
def temporadas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<span class="se-t">(\d+)</span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for numtempo in matches:
itemlist.append(item.clone( action='episodesxseason', title='Temporada %s' % numtempo, url = item.url,
contentType='season', contentSeason=numtempo ))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName))
# return sorted(itemlist, key=lambda it: it.title)
return itemlist
def episodios(item):
logger.info()
itemlist = []
templist = temporadas(item)
for tempitem in templist:
itemlist += episodesxseason(tempitem)
return itemlist
def episodesxseason(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="numerando">%s x (\d+)</div>.*?' % item.contentSeason
patron += '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for episode, url, title in matches:
titulo = '%sx%s %s' % (item.contentSeason, episode, title)
itemlist.append(item.clone( action='findvideos', url=url, title=titulo,
contentType='episode', contentEpisodeNumber=episode ))
tmdb.set_infoLabels(itemlist)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = 'id="plays-(\d+)">\s*([^<]+)</div'
matches = scrapertools.find_multiple_matches(data, patron)
for xnumber, xname in matches:
if "/episodios/" in item.url:
lang = scrapertools.find_single_match(data, '#player2%s">([^<]+)</a>' % xnumber)
else:
lang = scrapertools.find_single_match(data, '#div%s">([^<]+)<' % xnumber)
if "lat" in lang.lower(): lang= "Lat"
if 'cast' in lang.lower(): lang= "Cast"
if 'sub' in lang.lower(): lang= "Sub"
if lang in IDIOMAS:
lang = IDIOMAS[lang]
post= {"nombre":xname}
url= httptools.downloadpage("https://www.cine-online.eu/ecrypt", post=urllib.urlencode(post)).data
url = scrapertools.find_single_match(url,'<(?:IFRAME SRC|iframe src)="([^"]+)"')
if not config.get_setting('unify'):
title = ' (%s)' % (lang)
else:
title = ''
if url != '':
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if not "/episodios/" in item.url:
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos':
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -5,6 +5,7 @@ import re
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import config, logger
@@ -43,7 +44,7 @@ def lista(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '&lt;img .*?src=&quot;(.*?)&quot;'
@@ -92,7 +93,7 @@ def detail(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = data.replace("%3A", ":")
data = data.replace("%2F", "/")

View File

@@ -244,8 +244,8 @@ def findvideos(item):
url = scrapertools.find_single_match(new_data, "src='([^']+)'")
url = get_url(url.replace('\\/', '/'))
if url:
itemlist.append(Item(channel=item.channel, title ='%s'+title, url=url, action='play', quality=item.quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
itemlist.append(item.clone(title ='%s'+title, url=url, action='play',
language=IDIOMAS[language], text_color = ""))
patron = "<a class='optn' href='([^']+)'.*?<img src='.*?>([^<]+)<.*?<img src='.*?>([^<]+)<"
matches = scrapertools.find_multiple_matches(data, patron)
for hidden_url, quality, language in matches:
@@ -258,10 +258,23 @@ def findvideos(item):
url = get_url(url.replace('\\/', '/'))
if url:
itemlist.append(Item(channel=item.channel, title='%s'+title, url=url, action='play', quality=quality,
language=IDIOMAS[language], infoLabels=item.infoLabels))
language=IDIOMAS[language], infoLabels=item.infoLabels, text_color = ""))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
itemlist.sort(key=lambda it: (it.language, it.server, it.quality))
tmdb.set_infoLabels(itemlist, __modo_grafico__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if itemlist:
if item.contentChannel != "videolibrary":
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist

View File

@@ -7,8 +7,7 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.cliphunter.com'
@@ -16,8 +15,8 @@ host = 'https://www.cliphunter.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/categories/All"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/ratings/yesterday"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/categories/All"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/ratings/yesterday"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="catalogo", url=host + "/pornstars/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
@@ -29,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -47,13 +46,13 @@ def catalogo(item):
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
scrapedplot = ""
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/movies"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
url=next_page) )
return itemlist
@@ -68,12 +67,12 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -88,11 +87,10 @@ def peliculas(item):
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot,
contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
next_page = scrapertools.find_single_match(data,'<li class="arrow"><a rel="next" href="([^"]+)">&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -2,7 +2,6 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
@@ -16,7 +15,7 @@ host ='http://www.coomelonitas.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host+ "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -43,12 +42,12 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -61,9 +60,8 @@ def peliculas(item):
thumbnail = scrapertools.find_single_match(match,'<img src="([^"]+)"')
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, viewmode="movie") )
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page_url!="":
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="siguiente">')
if next_page!="":
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -31,23 +31,29 @@ def mainlist(item):
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Ultimas", action="list_all", url=host,
thumbnail=get_thumb('last', auto=True)))
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host+'peliculas',
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Estrenos", action="list_all", url=host+'estrenos',
thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Mas vistas", action="list_all", url=host+'peliculas-mas-vistas',
thumbnail=get_thumb('more watched', auto=True)))
itemlist.append(Item(channel=item.channel, title="Mas votadas", action="list_all", url=host+'peliculas-mas-valoradas',
thumbnail=get_thumb('more voted', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url= host+'espanol',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'latino',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'subtitulado',
thumbnail=get_thumb('audio', auto=True)))
itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
thumbnail=get_thumb('alphabet', auto=True)))
# itemlist.append(Item(channel=item.channel, title="Castellano", action="list_all", url= host+'espanol',
# thumbnail=get_thumb('audio', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Latino", action="list_all", url=host + 'latino',
# thumbnail=get_thumb('audio', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="VOSE", action="list_all", url=host + 'subtitulado',
# thumbnail=get_thumb('audio', auto=True)))
#
# itemlist.append(Item(channel=item.channel, title="Alfabetico", action="section", section='alpha',
# thumbnail=get_thumb('alphabet', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
@@ -69,17 +75,16 @@ def list_all(item):
itemlist = []
try:
data = get_source(item.url)
if item.section == 'alpha':
patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?'
patron += 'src="([^"]+)" class.*?<strong>([^<]+)</strong>.*?<td>(\d{4})</td>'
else:
patron = '<article id="post-\d+".*?<a href="([^"]+)">.*?'
patron += 'src="([^"]+)".*?<h2 class="Title">([^<]+)<\/h2>.*?<span class="Year">([^<]+)<\/span>'
# if item.section == 'alpha':
# patron = '<span class="Num">\d+.*?<a href="([^"]+)" class.*?'
# patron += 'src="([^"]+)" class.*?<strong>([^<]+)</strong>.*?<td>(\d{4})</td>'
# else:
patron = '<article class="TPost C post-\d+.*?<a href="([^"]+)">.*?'
patron +='"Year">(\d{4})<.*?src="([^"]+)".*?"Title">([^"]+)</h2>'
data = get_source(item.url)
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year in matches:
for scrapedurl, year, scrapedthumbnail, scrapedtitle in matches:
url = scrapedurl
if "|" in scrapedtitle:
@@ -103,7 +108,7 @@ def list_all(item):
# Paginación
url_next_page = scrapertools.find_single_match(data,'<a class="next.*?" rel="next" href="([^"]+)"')
url_next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next page-numbers">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))

View File

@@ -3,21 +3,18 @@
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://czechvideo.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -28,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/tags/%s/" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -40,45 +37,47 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<ul class="cat_menu" id="cat_menu_c0">(.*?)</ul>')
data = scrapertools.get_match(data,'<div class="category">(.*?)</ul>')
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
scrapedurl = host + scrapedurl
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="short-story">.*?<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?div class="short-time">(.*?)</div>'
patron = '<div class="short-story">.*?'
patron += '<a href="([^"]+)" title="([^"]+)"><img src="([^"]+)".*?'
patron += 'div class="short-time">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
scrapedthumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<del><a href="([^"]+)">Next</a></del>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
url = scrapertools.find_single_match(data,'<iframe src=.*?<iframe src="([^"]+)"')
url = "http:" + url
itemlist = servertools.find_video_items(data=url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle

View File

@@ -167,7 +167,8 @@ def findvideos(item):
headers = {"X-Requested-With":"XMLHttpRequest"}
for scrapedserver, scrapeduser in matches:
data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
url = base64.b64decode(scrapertools.find_single_match(data1, 'hashUser = "([^"]+)'))
data1 = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data1)
url = base64.b64decode(scrapertools.find_single_match(data1, '<iframe data-source="([^"]+)"'))
url1 = devuelve_enlace(url)
if "drive.google" in url1:
url1 = url1.replace("view","preview")

View File

@@ -5,6 +5,7 @@ import urlparse
from core import scrapertools
from core import servertools
from core import httptools
from core.item import Item
from platformcode import logger
@@ -30,7 +31,7 @@ def DocuSeries(item):
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
# Extrae las entradas (carpetas)
patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>'
@@ -54,7 +55,7 @@ def DocuTag(item):
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
@@ -76,7 +77,7 @@ def DocuARCHIVO(item):
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+"
patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>"
matches = re.compile(patronvideos, re.DOTALL).findall(data)
@@ -102,7 +103,7 @@ def listvideos(item):
scrapedplot = ""
# Descarga la página
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
patronvideos = "<h3 class='post-title entry-title'[^<]+"
patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?"
patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>"
@@ -156,7 +157,7 @@ def findvideos(item):
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>")
# Busca los enlaces a los videos

View File

@@ -525,7 +525,7 @@ def findvideos(item):
#Bajamos los datos de la página
data = ''
patron = '<a onclick="eventDownloadTorrent\(.*?\)".?class="linktorrent" href="([^"]+)">'
patron = '<a onclick="eventDownloadTorrent\(.*?\)".?class="linktorrent" href="([^"]+)"'
if item.contentType == 'movie': #Es una peli
try:
data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(item.url, timeout=timeout).data)
@@ -588,7 +588,7 @@ def findvideos(item):
#Buscamos si ya tiene tamaño, si no, los buscamos en el archivo .torrent
size = scrapertools.find_single_match(item_local.quality, '\s\[(\d+,?\d*?\s\w\s?[b|B])\]')
if not size and not item.armagedon:
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
size = generictools.get_torrent_size(scrapedurl) #Buscamos el tamaño en el .torrent
if size:
item_local.title = re.sub(r'\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título

View File

@@ -15,12 +15,10 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'latino': 'Latino'}
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
CALIDADES = {'1080p': '1080p', '720p': '720p', '480p': '480p', '360p': '360p'}
list_quality = CALIDADES.values()
list_servers = ['directo', 'openload']
list_quality = []
list_servers = ['dostream', 'openload']
host = 'http://doomtv.net/'
@@ -28,6 +26,8 @@ host = 'http://doomtv.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
@@ -65,19 +65,29 @@ def mainlist(item):
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista(item):
logger.info()
itemlist = []
next = False
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'movie-id=.*?href=(.*?) data-url.*?quality>(.*?)'
patron += '<img data-original=(.*?) class.*?<h2>(.*?)<\/h2>.*?<p>(.*?)<\/p>'
data = get_source(item.url)
patron = 'movie-id=.*?href="([^"]+)" data-url.*?quality">([^<]+)<.*?img data-original="([^"]+)" class.*?'
patron += '<h2>([^<]+)<\/h2>.*?<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -89,9 +99,9 @@ def lista(item):
for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]:
url = scrapedurl
thumbnail = scrapedthumbnail
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "")
url = host+scrapedurl
thumbnail = 'https:'+scrapedthumbnail.strip()
filtro_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w185", "")
filtro_list = {"poster_path": filtro_thumb.strip()}
filtro_list = filtro_list.items()
title = scrapedtitle
@@ -114,7 +124,7 @@ def lista(item):
url_next_page = item.url
first = last
else:
url_next_page = scrapertools.find_single_match(data, "<a href=([^ ]+) class=page-link aria-label=Next>")
url_next_page = scrapertools.find_single_match(data, "<li class='active'>.*?class='page larger' href='([^']+)'")
first = 0
if url_next_page:
@@ -128,14 +138,14 @@ def seccion(item):
itemlist = []
duplicado = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'menu-item-object-category menu-item-\d+><a href=(.*?)>(.*?)<\/a><\/li>'
data = get_source(item.url)
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<\/a><\/li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
url = scrapedurl
url = host+scrapedurl
title = scrapedtitle
thumbnail = ''
if url not in duplicado:
@@ -163,7 +173,6 @@ def newest(categoria):
logger.info()
itemlist = []
item = Item()
# categoria='peliculas'
try:
if categoria in ['peliculas', 'latino']:
item.url = host +'peliculas/page/1'
@@ -186,23 +195,38 @@ def newest(categoria):
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|frameborder|><\/script>)'
data = get_source(item.url)
patron = 'id="(tab\d+)"><div class="movieplay">.*?src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, urls in matches:
language = 'Latino'
if 'http' not in urls:
urls = 'https:'+urls
if not config.get_setting('unify'):
title = ' [%s]' % language
else:
title = '%s'
new_item = Item(
channel=item.channel,
url=urls,
title=item.title,
title= '%s'+ title,
contentTitle=item.title,
action='play',
language = IDIOMAS[language],
infoLabels = item.infoLabels
)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
@@ -214,4 +238,5 @@ def findvideos(item):
contentTitle=item.contentTitle,
))
return itemlist

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://www2.doramasmp4.com/'
host = 'https://www4.doramasmp4.com/'
IDIOMAS = {'sub': 'VOSE', 'VO': 'VO'}
list_language = IDIOMAS.values()
@@ -166,6 +166,8 @@ def findvideos(item):
itemlist = []
data = get_source(item.url)
new_dom=scrapertools.find_single_match(data,"var web = { domain: '(.*?)'")
patron = 'link="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -191,7 +193,7 @@ def findvideos(item):
video_data = httptools.downloadpage(video_url, headers=headers).data
url = scrapertools.find_single_match(video_data, "'file':'([^']+)'")
else:
video_url = 'https://www2.doramasmp4.com/api/redirect.php?token=%s' % token
video_url = new_dom+'api/redirect.php?token=%s' % token
video_data = httptools.downloadpage(video_url, headers=headers, follow_redirects=False).headers
url = scrapertools.find_single_match(video_data['location'], '\d+@@@(.*?)@@@')

View File

@@ -4,7 +4,7 @@
"active": true,
"adult": false,
"language": ["lat", "cast"],
"thumbnail": "https://www.dospelis.com/wp-content/uploads/2018/07/dospelislogo.png",
"thumbnail": "https://www.dospelis.net/wp-content/uploads/2019/02/logodospelisamor.png",
"banner": "",
"categories": [
"movie",

View File

@@ -90,11 +90,11 @@ def section(item):
logger.info()
itemlist=[]
duplicados=[]
data = get_source(host+'/'+item.type)
data = get_source(host+item.type)
if 'Genero' in item.title:
patron = '<li class="cat-item cat-item-\d+"><a href="([^"]+)" >(.*?)/i>'
patron = '<liclass="cat-item cat-item-\d+"><ahref=([^ ]+) .*?>(.*?)/i>'
elif 'Año' in item.title:
patron = '<li><a href="(.*?release.*?)">([^<]+)</a>'
patron = '<li><ahref=(.*?release.*?)>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -102,7 +102,7 @@ def section(item):
title = scrapedtitle
plot=''
if 'Genero' in item.title:
quantity = scrapertools.find_single_match(scrapedtitle,'</a> <i>(.*?)<')
quantity = scrapertools.find_single_match(scrapedtitle,'<i>(.*?)<')
title = scrapertools.find_single_match(scrapedtitle,'(.*?)</')
title = title
plot = '%s elementos' % quantity.replace('.','')
@@ -123,9 +123,8 @@ def list_all(item):
data = get_source(item.url)
if item.type == 'movies':
patron = '<article id="post-\d+" class="item movies"><div class="poster">.?<img src="([^"]+)" alt="([^"]+)">.*?'
patron +='"quality">([^<]+)</span><\/div>.?<a href="([^"]+)">.*?'
patron +='<\/h3>.?<span>([^"]+)<\/span><\/div>.*?"flags"(.*?)metadata'
patron = '<articleid=post-\d+ class="item movies"><divclass=poster>.?<imgsrc=([^ ]+) alt="([^"]+)">.*?'
patron += 'quality>([^<]+)<.*?<ahref=([^>]+)>.*?<\/h3><span>([^<]+)<.*?flags(.*?)metadata'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -148,8 +147,8 @@ def list_all(item):
infoLabels={'year':year}))
elif item.type == 'tvshows':
patron = '<article id="post-\d+" class="item tvshows">.?<div class="poster">.?<img src="([^"]+)"'
patron += ' alt="([^"]+)">.*?<a href="([^"]+)">.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
patron = '<articleid=post-\d+ class="item tvshows">.?<divclass=poster>.?<imgsrc=([^ ]+)'
patron += ' alt="([^"]+)">.*?<ahref=([^>]+)>.*?<\/h3>.?<span>(.*?)<\/span><\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedthumbnail, scrapedtitle, scrapedurl, year in matches:
@@ -168,7 +167,7 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
url_next_page = scrapertools.find_single_match(data,'<linkrel=next href=([^>]+)>')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
@@ -180,7 +179,7 @@ def seasons(item):
itemlist=[]
data=get_source(item.url)
patron='Temporada.?\d+'
patron='title>Temporada.?(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -214,7 +213,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron='class="numerando">%s - (\d+)</div>.?<div class="episodiotitle">.?<a href="([^"]+)">([^<]+)<' % item.infoLabels['season']
patron='class=numerando>%s - (\d+)</div>.?<divclass=episodiotitle>.?<ahref=([^>]+)>([^<]+)<' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -236,12 +235,15 @@ def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'id="option-(\d+)".*?rptss" src="([^"]+)" frameborder'
patron = 'id=option-(\d+).*?src=([^ ]+) frameborder'
matches = re.compile(patron, re.DOTALL).findall(data)
lang=''
for option, scrapedurl in matches:
lang = scrapertools.find_single_match(data, 'href=#option-%s>.*?/flags/(.*?).png' % option)
quality = ''
if 'goo.gl' in scrapedurl:
new_data = httptools.downloadpage(scrapedurl, follow_redirects=False).headers
scrapedurl = new_data['location']
if lang not in IDIOMAS:
lang = 'en'
title = '%s %s'
@@ -291,8 +293,7 @@ def search_results(item):
itemlist=[]
data=get_source(item.url)
patron = '<article>.*?<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)" \/>.*?meta.*?'
patron += '"year">([^<]+)<(.*?)<p>([^<]+)<\/p>'
patron = '<article>.*?<ahref=([^>]+)><imgsrc=([^ ]+) alt="([^"]+)">.*?year>([^<]+)<(.*?)<p>([^<]+)<\/p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumb, scrapedtitle, year, lang_data, scrapedplot in matches:

View File

@@ -18,7 +18,7 @@
"id": "library_add",
"type": "bool",
"label": "@70230",
"default": true,
"default": false,
"enabled": true,
"visible": true
},
@@ -26,7 +26,7 @@
"id": "library_move",
"type": "bool",
"label": "@70231",
"default": true,
"default": false,
"enabled": "eq(-1,true)",
"visible": true
},
@@ -34,7 +34,7 @@
"id": "browser",
"type": "bool",
"label": "@70232",
"default": false,
"default": true,
"enabled": true,
"visible": true
},

View File

@@ -6,8 +6,11 @@
import os
import re
import time
import unicodedata
from core import filetools
from core import jsontools
from core import scraper
from core import scrapertools
from core import servertools
@@ -53,7 +56,7 @@ def mainlist(item):
title = TITLE_TVSHOW % (
STATUS_COLORS[i.downloadStatus], i.downloadProgress, i.contentSerieName, i.contentChannel)
itemlist.append(Item(title=title, channel="descargas", action="mainlist", contentType="tvshow",
itemlist.append(Item(title=title, channel="downloads", action="mainlist", contentType="tvshow",
contentSerieName=i.contentSerieName, contentChannel=i.contentChannel,
downloadStatus=i.downloadStatus, downloadProgress=[i.downloadProgress],
fanart=i.fanart, thumbnail=i.thumbnail))
@@ -308,7 +311,6 @@ def update_json(path, params):
def save_server_statistics(server, speed, success):
from core import jsontools
if os.path.isfile(STATS_FILE):
servers = jsontools.load(open(STATS_FILE, "rb").read())
else:
@@ -330,7 +332,6 @@ def save_server_statistics(server, speed, success):
def get_server_position(server):
from core import jsontools
if os.path.isfile(STATS_FILE):
servers = jsontools.load(open(STATS_FILE, "rb").read())
else:
@@ -360,7 +361,6 @@ def get_match_list(data, match_list, order_list=None, only_ascii=False, ignoreca
coincidira con "Idioma Español" pero no con "Español" ya que la coincidencia mas larga tiene prioridad.
"""
import unicodedata
match_dict = dict()
matches = []

View File

@@ -208,7 +208,8 @@ def findvideos(item):
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
# patron = 'id="(Opt\d+)">.*?src="([^"]+)" frameborder.*?</iframe>'
patron = 'id="(Opt\d+)">.*?src="(?!about:blank)([^"]+)" frameborder.*?</iframe>'
matches = re.compile(patron, re.DOTALL).findall(data)
for option, scrapedurl in matches:

View File

@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.elreyx.com'
@@ -17,11 +15,11 @@ def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Escenas" , action="escenas", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Productora" , action="productora", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/peliculasporno.html") )
itemlist.append( Item(channel=item.channel, title="Escenas" , action="lista", url=host + "/index.html"))
itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host + "/index.html") )
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html") )
itemlist.append( Item(channel=item.channel, title="Buscar", action="search") )
return itemlist
@@ -30,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search-%s" % texto + ".html"
try:
return escenas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -38,43 +36,33 @@ def search(item, texto):
return []
def productora(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^<]+)" title="View Category ([^<]+)">(.*?)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
thumbnail="https:" + scrapedthumbnail
url="https:" + scrapedurl
itemlist.append( Item(channel=item.channel, action="escenas", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>'
if item.title == "Categorias" :
patron = '<td><a href="([^<]+)" title="Movies ([^<]+)">.*?</a>'
else:
patron = '<a href="([^<]+)" title="View Category ([^<]+)">.*?</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
url="https:" + scrapedurl
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def escenas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)"'
if not "/peliculasporno" in item.url:
patron = '<div class="notice_image">.*?<a title="([^"]+)" href="([^"]+)">.*?<img src="(.*?)">'
else:
patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
@@ -87,28 +75,7 @@ def escenas(item):
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!= "":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="escenas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist
def peliculas(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="captura"><a title="([^"]+)" href="([^"]+)".*?><img src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
scrapedplot = ""
url="https:" + scrapedurl
thumbnail="https:" + scrapedthumbnail
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=url, thumbnail=thumbnail,
plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li><a href=\'([^\']+)\' title=\'Pagina \d+\'>&raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -2,13 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://www.eroticage.net'
@@ -16,7 +14,7 @@ host = 'http://www.eroticage.net'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -45,12 +43,12 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -61,24 +59,21 @@ def peliculas(item):
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
plot=plot, fanart=scrapedthumbnail, contentTitle=contentTitle ))
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page) )
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videochannel=item.channel
return itemlist

View File

@@ -7,15 +7,14 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.peliculaseroticasonline.tv'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -26,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -43,12 +42,12 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -63,8 +62,7 @@ def peliculas(item):
next_page = scrapertools.find_single_match(data, '<div class="naviright"><a href="([^"]+)">Siguiente &raquo;</a>')
if next_page:
next_page = urlparse.urljoin(item.url, next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page ))
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page ))
return itemlist

View File

@@ -0,0 +1,38 @@
{
"id": "estrenosdoramas",
"name": "Estrenos Doramas",
"active": true,
"adult": false,
"language": ["VOSE","LAT"],
"thumbnail": "https://www.estrenosdoramas.net/wp-content/uploads/2016/08/estrenos-doramasss-net3.png",
"banner": "",
"categories": [
"tvshow",
"vos"
],
"settings": [
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"VOSE",
"VO",
"LAT"
]
}
]
}

View File

@@ -0,0 +1,296 @@
# -*- coding: utf-8 -*-
# -*- Channel Estreno Doramas -*-
# -*- Created for Alfa-addon -*-
# -*- By the BDamian (Based on channels from Alfa Develop Group) -*-
import re
from channels import autoplay
from channels import filtertools
from core import httptools
from core import scrapertools
from core import servertools
from core import jsontools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
import ast
host = 'https://www.estrenosdoramas.net/'
IDIOMAS = {'Latino': 'LAT', 'Vo':'VO', 'Vose': 'VOSE'}
IDIOMA = "no filtrar"
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['openload', 'streamango', 'netutv', 'okru', 'mp4upload']
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel= item.channel, title="Doramas", action="list_all",
url=host + 'category/doramas-online',
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Películas", action="list_all",
url=host + 'category/peliculas',
thumbnail=get_thumb('movies', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title="Últimos capítulos", action="list_all",
url=host + 'category/ultimos-capitulos-online',
thumbnail=get_thumb('doramas', auto=True), type='movie'))
itemlist.append(Item(channel=item.channel, title="Por Genero", action="menu_generos",
url=host,
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title="Doblado Latino", action="list_all",
url=host + 'category/latino',
thumbnail=get_thumb('doramas', auto=True), type='dorama'))
itemlist.append(Item(channel=item.channel, title = 'Buscar', action="search", url= host+'search/',
thumbnail=get_thumb('search', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def menu_generos(item):
logger.info()
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<div id="genuno">(.*?)</div>')
itemlist = []
patron = '<li><a.*?href="(.*?)">(.*?)</a>.*?</li>'
matches = re.compile(patron, re.DOTALL).findall(data)
media_type = item.type
for scrapedurl, scrapedtitle in matches:
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl,
thumbnail=item.thumbnail, type=item.type, action="list_all")
itemlist.append(new_item)
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<h3 class="widgetitulo">Resultados</h3>.*?<div id="sidebar-wrapper">')
patron = '<div.*?<a href="(.*?)"><img src="(.*?)" alt="(.*?)".*?</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail)
if scrapedtitle.startswith("Pelicula") or item.type == "movie":
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
new_item.contentSerieName=scrapedtitle
new_item.action = 'episodios'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
patron = '<a class="nextpostslink" rel="next" href="(.*?)">'
matches = re.compile(patron, re.DOTALL).findall(data)
if matches:
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>',
url=matches[0], type=item.type))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
plot_regex = '(<span class="clms"><b>Nombre.*?)<\/div>'
plot_match = re.compile(plot_regex, re.DOTALL).findall(data)
if plot_match:
plot = scrapertools.htmlclean(plot_match[0].replace('<br />', '\n'))
data = scrapertools.find_single_match(data, '<ul class="lcp_catlist".*?</ul>')
patron = '<li.*?<a href="(.*?)" title="(.*?)">.*?(\d*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
for scrapedurl, scrapedtitle, scrapedep in matches:
if item.url == scrapedurl:
continue
url = scrapedurl
contentEpisodeNumber = scrapedep
if contentEpisodeNumber == "":
title = '1xEE - ' + scrapedtitle
else:
title = '1x' + ("0" + contentEpisodeNumber)[-2:] + " - " + scrapedtitle
# title = ("0" + contentEpisodeNumber)[-2:]
infoLabels['season'] = 1
infoLabels['episode'] = contentEpisodeNumber
infoLabels = item.infoLabels
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, plot=plot,
contentEpisodeNumber=contentEpisodeNumber, type='episode', infoLabels=infoLabels))
itemlist.sort(key=lambda x: x.title)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color='yellow'))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data, '<div id="marco-post">.*?<div id="sidebar">')
data = scrapertools.unescape(data)
data = scrapertools.decodeHtmlentities(data)
options_regex = '<a href="#tab.*?">.*?<b>(.*?)</b>'
option_matches = re.compile(options_regex, re.DOTALL).findall(data)
video_regex = '<iframe.*?src="(.*?)".*?</iframe>'
video_matches = re.compile(video_regex, re.DOTALL).findall(data)
# for option, scrapedurl in matches:
for option, scrapedurl in map(None, option_matches, video_matches):
if scrapedurl is None:
continue
scrapedurl = scrapedurl.replace('"','').replace('&#038;','&')
logger.info(scrapedurl)
try:
data_video = get_source(scrapedurl)
except Exception as e:
logger.info('Error en url: ' + scrapedurl)
continue
# logger.info(data_video)
# Este sitio pone multiples páginas intermedias, cada una con sus reglas.
source_headers = dict()
source_headers["Content-Type"] = "application/x-www-form-urlencoded; charset=UTF-8"
source_headers["X-Requested-With"] = "XMLHttpRequest"
if scrapedurl.find("https://repro") != 0:
logger.info("Caso 0: url externa")
url = scrapedurl
itemlist.append(Item(channel=item.channel, title=option, url=url, action='play', language=IDIOMA))
elif scrapedurl.find("pi76823.php") > 0:
logger.info("Caso 1")
source_data = get_source(scrapedurl)
source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
for source_page, source_acc, source_id, source_tk in source_matches:
source_url = scrapedurl[0:scrapedurl.find("pi76823.php")] + source_page
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
source_id + '&tk=' + source_tk, source_headers)
if source_result.code == 200:
source_json = jsontools.load(source_result.data)
itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
elif scrapedurl.find("pi7.php") > 0:
logger.info("Caso 2")
source_data = get_source(scrapedurl)
source_regex = 'post\( "(.*?)", { acc: "(.*?)", id: \'(.*?)\', tk: \'(.*?)\' }'
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
for source_page, source_acc, source_id, source_tk in source_matches:
source_url = scrapedurl[0:scrapedurl.find("pi7.php")] + source_page
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
source_id + '&tk=' + source_tk, source_headers)
if source_result.code == 200:
source_json = jsontools.load(source_result.data)
itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
elif scrapedurl.find("reproducir120.php") > 0:
logger.info("Caso 3")
source_data = get_source(scrapedurl)
videoidn = scrapertools.find_single_match(source_data, 'var videoidn = \'(.*?)\';')
tokensn = scrapertools.find_single_match(source_data, 'var tokensn = \'(.*?)\';')
source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
for source_page, source_acc in source_matches:
source_url = scrapedurl[0:scrapedurl.find("reproducir120.php")] + source_page
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
videoidn + '&tk=' + tokensn, source_headers)
if source_result.code == 200:
source_json = jsontools.load(source_result.data)
urlremoto_regex = "file:'(.*?)'"
urlremoto_matches = re.compile(urlremoto_regex, re.DOTALL).findall(source_json['urlremoto'])
if len(urlremoto_matches) == 1:
itemlist.append(Item(channel=item.channel, title=option, url=urlremoto_matches[0], action='play', language=IDIOMA))
elif scrapedurl.find("reproducir14.php") > 0:
logger.info("Caso 4")
source_data = get_source(scrapedurl)
source_regex = '<div id="player-contenido" vid="(.*?)" name="(.*?)"'
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
videoidn = source_matches[0][0]
tokensn = source_matches[0][1]
source_regex = 'post\( "(.*?)", { acc: "(.*?)"'
source_matches = re.compile(source_regex, re.DOTALL).findall(source_data)
for source_page, source_acc in source_matches:
source_url = scrapedurl[0:scrapedurl.find("reproducir14.php")] + source_page
source_result = httptools.downloadpage(source_url, 'acc=' + source_acc + '&id=' +
videoidn + '&tk=' + tokensn, source_headers)
if source_result.code == 200:
source_json = jsontools.load(source_result.data)
itemlist.append(Item(channel=item.channel, title=option, url=source_json['urlremoto'], action='play', language=IDIOMA))
else:
logger.info("Caso nuevo")
itemlist = servertools.get_servers_itemlist(itemlist)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def search(item, texto):
logger.info()
import urllib
itemlist = []
texto = texto.replace(" ", "+")
item.url = item.url + texto
item.type = 'search'
if texto != '':
try:
return list_all(item)
except:
itemlist.append(item.clone(url='', title='No hay elementos...', action=''))
return itemlist

View File

@@ -16,7 +16,7 @@ from core.item import Item
from platformcode import config, logger
from core import tmdb
host = "http://fanpelis.com/"
host = "https://fanpelis.com/"
def mainlist(item):
logger.info()

View File

@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://fapality.com'
@@ -16,9 +14,9 @@ host = 'https://fapality.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/newest/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top/"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
@@ -30,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -51,12 +49,12 @@ def categorias(item):
scrapedplot = ""
scrapedtitle = scrapedtitle.replace("movies", "") + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -75,8 +73,7 @@ def peliculas(item):
next_page_url = scrapertools.find_single_match(data,'<li itemprop="url" class="current">.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist

View File

@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.fetishshrine.com'
@@ -16,9 +14,9 @@ host = 'https://www.fetishshrine.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Mejor Valorado" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -29,7 +27,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -42,45 +40,50 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" title="([^"]+) porn tube" class="thumb">.*?<img src="([^"]+)".*?<span class="total">([^"]+)</span>'
patron = '<a href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span class="vids">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a itemprop="url" href="([^"]+)">.*?<img src="([^"]+)".*?alt="([^"]+)">.*?<span class="duration">(.*?)</span>'
patron = '<a href="([^"]+)" itemprop="url">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
patron += '<span itemprop="duration" class="length">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
contentTitle = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a.*?href="([^"]+)" title="Next">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>", text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, fanart=scrapedthumbnail, contentTitle = contentTitle ))
next_page = scrapertools.find_single_match(data,'<li><a data=\'\d+\' href="([^"]+)" title="Next">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
patron = 'video_url: \'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
itemlist.append(Item(channel=item.channel, action="play", title=item.title, url=scrapedurl,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist

View File

@@ -2,14 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
from core import jsontools
def mainlist(item):
@@ -19,27 +16,28 @@ def mainlist(item):
item.url = "http://www.filmovix.net/videoscategory/porno/"
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<h1 class="cat_head">XXX</h1>(.*?)<h3> Novo dodato </h3>')
patron = '<li class="clearfix">.*?src="([^"]+)".*?<p class="title"><a href="([^"]+)" rel="bookmark" title="([^"]+)">'
patron = '<li class="clearfix">.*?'
patron += 'src="([^"]+)".*?'
patron += '<p class="title"><a href="([^"]+)" rel="bookmark" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
contentTitle = scrapedtitle
title = scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, fanart=thumbnail, plot=plot, contentTitle=contentTitle))
next_page_url = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="mainlist" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append(item.clone(action="mainlist", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
data = httptools.downloadpage(item.url).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle

View File

@@ -2,14 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.streamxxxx.com'
@@ -17,7 +14,7 @@ host = 'http://www.streamxxxx.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url= host))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url= host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
return itemlist
@@ -32,11 +29,12 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -48,10 +46,11 @@ def peliculas(item):
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
plot = ""
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=plot , viewmode="movie", folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)">Next')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)">Next')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -7,15 +7,13 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://es.foxtube.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -26,7 +24,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/buscador/%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -45,29 +43,37 @@ def categorias(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = host + scrapedurl
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<a class="thumb tco1" href="([^"]+)">.*?src="([^"]+)".*?alt="([^"]+)".*?<i class="m tc2">(.*?)</i>'
patron = '<a class="thumb tco1" href="([^"]+)">.*?'
patron += 'src="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += '<span class="t">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = host + scrapedurl
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
contentTitle = title
url = urlparse.urljoin(item.url,scrapedurl)
contentTitle = scrapedtitle
time = scrapertools.find_single_match(duracion, '<i class="m tc2">([^"]+)</i>')
if not 'HD' in duracion :
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
else:
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail + "|Referer=%s" %host
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">&gt</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<a class="bgco2 tco3" rel="next" href="([^"]+)">&gt</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -76,7 +82,7 @@ def play(item):
itemlist = []
url = scrapertools.find_single_match(scrapertools.cachePage(item.url),'<iframe src="([^"]+)"')
data = httptools.downloadpage(url).data
patron = 'html5player.setVideoUrlHigh\\(\'([^\']+)\''
patron = 'html5player.setVideoHLS\\(\'([^\']+)\''
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl in matches:
scrapedurl = scrapedurl.replace("\/", "/")

View File

@@ -7,16 +7,15 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://frprn.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-raped/"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-raped/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -40,22 +39,30 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li class="thumb thumb-category">.*?<a href="([^"]+)">.*?<img class="lazy" data-original="([^"]+)">.*?<div class="name">([^"]+)</div>.*?<div class="count">(\d+)</div>'
patron = '<li class="thumb thumb-category">.*?'
patron += '<a href="([^"]+)">.*?'
patron += '<img class="lazy" data-original="([^"]+)">.*?'
patron += '<div class="name">([^"]+)</div>.*?'
patron += '<div class="count">(\d+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="thumb">.*?<a href="([^"]+)".*?<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?<span class="duration">([^"]+)</span>'
patron = '<div class="thumb">.*?'
patron += '<a href="([^"]+)".*?'
patron += '<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="duration">([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
@@ -64,11 +71,12 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li class="pagination-next"><a href="([^"]+)">')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -2,14 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://freepornstreams.org'
@@ -17,8 +14,8 @@ host = 'http://freepornstreams.org'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/free-full-porn-movies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="peliculas", url=host + "/free-stream-porn/"))
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/free-full-porn-movies/"))
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/free-stream-porn/"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host))
itemlist.append( Item(channel=item.channel, title="Categoria" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
@@ -30,7 +27,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -49,7 +46,7 @@ def catalogo(item):
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -65,12 +62,12 @@ def categorias(item):
scrapedplot = ""
scrapedthumbnail = ""
scrapedurl = scrapedurl.replace ("http://freepornstreams.org/freepornst/stout.php?s=100,75,65:*&#038;u=" , "")
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -85,11 +82,10 @@ def peliculas(item):
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, fulltitle=title) )
next_page_url = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue",
url=next_page_url) )
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -4,35 +4,59 @@
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://www.gnula.mobi/wp-content/uploads/2018/05/Captura-1.png?%3E",
"thumbnail": "https://gnula.mobi/wp-content/uploads/2018/12/gnula-logo.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": false,
"visible": false
},
{
"id": "include_in_newest_castellano",
"type": "bool",
"label": "Incluir en Novedades - Castellano",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_latino",
"type": "bool",
"label": "Incluir en Novedades - Latino",
"default": true,
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": true
}
]
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -3,36 +3,60 @@
# Alfa
# ------------------------------------------------------------
import re
import re, urllib, urlparse
import base64
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from platformcode import logger
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'http://www.gnula.mobi'
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'ESP', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['rapidvideo', 'streamgo', 'openload']
list_quality = ['HD', 'BR-S', 'TS']
__channel__='gmobi'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
host = 'http://www.gnula.mobi/'
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(item.clone(title="Novedades", action="peliculas", url=host))
itemlist.append(item.clone(title="Castellano", action="peliculas",
url="http://www.gnula.mobi/tag/espanol/"))
itemlist.append(item.clone(title="Latino", action="peliculas", url="http://gnula.mobi/tag/latino/"))
itemlist.append(item.clone(title="VOSE", action="peliculas", url="http://gnula.mobi/tag/subtitulada/"))
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Novedades", action="lista", url=host + "/categorias/estrenos"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "http://gnula.mobi/?s=%s" % texto
item.url = host + "/?s=%s" % texto
try:
return sub_search(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -40,55 +64,49 @@ def search(item, texto):
return []
def sub_search(item):
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="row">.*?<a href="([^"]+)" title="([^"]+)">.*?<img src="(.*?)" title'
matches = scrapertools.find_multiple_matches(data, patron)
for url, name, img in matches:
itemlist.append(item.clone(title=name, url=url, action="findvideos", thumbnail=img))
paginacion = scrapertools.find_single_match(data, '<a href="([^"]+)" ><i class="glyphicon '
'glyphicon-chevron-right" aria-hidden="true"></i>')
if paginacion:
itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>", url=paginacion))
data = scrapertools.get_match(data,'<a>CATEGORÍAS</a>(.*?)</ul>')
patron = '<a href="([^"]+)">([^"]+)</a>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="col-mt-5 postsh">.*?href="(.*?)" title="(.*?)".*?<.*?src="(.*?)"'
patron = '<article id="post-\d+".*?'
patron += '<a href="([^"]+)".*?'
patron += '<div class="Image">(.*?)</div>.*?'
patron += '"Title">([^"]+)</h2>.*?'
patron += '"Year">(\d+)</span>.*?'
patron += '<span class="Qlty">\w+ \(([^"]+)\)</span>'
matches = scrapertools.find_multiple_matches(data, patron)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
filter_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w300", "")
filter_list = {"poster_path": filter_thumb}
filter_list = filter_list.items()
itemlist.append(Item(channel=item.channel,
action="findvideos",
title=scrapedtitle,
fulltitle = scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
infoLabels={'filtro': filter_list}))
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad in matches:
thumbnail = scrapertools.find_single_match(scrapedthumbnail, 'src="([^"]+)"')
scrapedtitle = scrapedtitle.replace("(%s)" % scrapedyear, "")
if not config.get_setting('unify'):
title = title = '%s [COLOR red] %s [/COLOR] (%s)' % (scrapedtitle, calidad , scrapedyear)
else:
title = ''
if not '>TV<' in scrapedthumbnail:
itemlist.append(item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
contentTitle = scrapedtitle, quality=calidad, infoLabels={'year':scrapedyear}) )
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data, '<link rel="next" href="(.*?)"')
next_page_url = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
if next_page_url != "":
next_page_url = next_page_url
itemlist.append(item.clone(action="peliculas", title="Siguiente >>", text_color="yellow",
itemlist.append(item.clone(action="lista", title="Siguiente >>", text_color="yellow",
url=next_page_url))
return itemlist
@@ -96,50 +114,46 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = 'data-src="([^"]+)".*?'
patron += 'data-toggle="tab">([^<]+)'
patron = '"server":"[^"]+",'
patron += '"lang":"([^"]+)",'
patron += '"quality":"\w+ \(([^"]+)\)",'
patron += '"link":"https:.*?=([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for url, language in matches:
url = url.replace("&amp;", "&")
response = httptools.downloadpage(url, follow_redirects=False, add_referer=True)
if response.data:
url = scrapertools.find_single_match(response.data, 'src="([^"]+)"')
for lang, quality, url in matches:
if lang in IDIOMAS:
lang = IDIOMAS[lang]
url = base64.b64decode(url + "==")
if not config.get_setting('unify'):
title = '[COLOR red] %s [/COLOR] (%s)' % (quality , lang)
else:
url = response.headers.get("location", "")
url = url.replace("&quot","")
titulo = "Ver en %s (" + language + ")"
itemlist.append(item.clone(
action = "play",
title = titulo,
url = url,
language = language))
tmdb.set_infoLabels(itemlist, True)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
title = ''
itemlist.append(item.clone(action = "play", title = '%s'+ title, url = url, language=lang, quality=quality,
fulltitle = item.title))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == 'peliculas':
item.url = host
elif categoria == 'castellano':
item.url = host +'tag/espanol/'
elif categoria == 'latino':
item.url = host +'tag/latino/'
itemlist = peliculas(item)
if "Pagina" in itemlist[-1].title:
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.fulltitle
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist

View File

@@ -1,354 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel GoodPelis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
host = 'https://goodpelis.net/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="menu_peliculas",
thumbnail=get_thumb('movies', auto=True),
))
# itemlist.append(item.clone(title="Series",
# action="menu_series",
# thumbnail=get_thumb('tvshows', auto=True),
# ))
itemlist.append(item.clone(title="Buscar", action="search",
thumbnail=get_thumb('search', auto=True),
url=host + '?s='
))
return itemlist
def menu_peliculas(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="list_all",
thumbnail=get_thumb('all', auto=True),
url=host + 'page/1/?s'
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host + 'page/1/?s',
thumbnail=get_thumb('genres', auto=True),
seccion='generos-pelicula'
))
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host + 'page/1/?s',
thumbnail=get_thumb('year', auto=True),
seccion='fecha-estreno'
))
itemlist.append(item.clone(title="Calidad",
action="seccion",
url=host + 'page/1/?s',
thumbnail=get_thumb('quality', auto=True),
seccion='calidad'
))
return itemlist
def menu_series(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="list_all", thumbnail=get_thumb('all', auto=True),
url=host + 'series/page/1/',
))
itemlist.append(item.clone(title="Generos",
action="seccion",
url=host + 'series/page/1/',
thumbnail=get_thumb('genres', auto=True),
seccion='generos-serie'
))
itemlist.append(item.clone(title="Por Año",
action="seccion",
url=host + 'series/page/1/',
thumbnail=get_thumb('year', auto=True),
seccion='series-lanzamiento'
))
return itemlist
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = 'class=item.*?<a href=(.*?)><div class=image.*?<img src=(.*?) alt=(.*?) (?:\(\d{4}|width).*?'
patron += 'fixyear><h2>.*?<\/h2>.*?<span class=year>(.*?)<\/span><\/div>(.*?)<\/div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, scrapedquality in matches:
url = scrapedurl
action = 'findvideos'
thumbnail = scrapedthumbnail
plot = ''
contentSerieName = ''
contentTitle = scrapedtitle
title = contentTitle
quality = 'Full HD'
if scrapedquality != '':
quality = scrapertools.find_single_match(scrapedquality, 'calidad2>(.*?)<')
title = contentTitle + ' (%s)' % quality
year = scrapedyear
if 'series' in item.url or 'series' in url:
action = 'seasons'
contentSerieName = contentTitle
quality = ''
new_item = Item(channel=item.channel,
action=action,
title=title,
url=url,
thumbnail=thumbnail,
plot=plot,
quality=quality,
infoLabels={'year': year}
)
if 'series' not in item.url:
new_item.contentTitle = contentTitle
else:
new_item.contentSerieName = contentSerieName
if 'temporada' not in url:
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if itemlist != []:
next_page = scrapertools.find_single_match(data,
'<div class=pag_b><a href=(.*?)>Siguiente</a>')
if next_page != '':
itemlist.append(Item(channel=item.channel,
action="list_all",
title='Siguiente >>>',
url=next_page,
))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = get_source(item.url)
if item.seccion == 'generos-pelicula':
patron = '<li class=cat-item cat-item-.*?><a href=(.*?)>(.*?<\/a> <span>.*?)<\/span><\/li>'
elif item.seccion == 'generos-serie':
patron = '<li class=cat-item cat-item-.*?><a href=(.*?\/series-genero\/.*?)>(.*?<\/a> <span>.*?)<\/span><\/li>'
elif item.seccion in ['fecha-estreno', 'series-lanzamiento']:
patron = '<li><a href=%sfecha-estreno(.*?)>(.*?)<\/a>' % host
elif item.seccion == 'calidad':
patron = '<li><a href=%scalidad(.*?)>(.*?)<\/a>' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
thumbnail = ''
if 'generos' in item.seccion:
cantidad = scrapertools.find_single_match(scrapedtitle, '<span>(\d+)')
title = scrapertools.find_single_match(scrapedtitle, '(.*?)<')
url = scrapedurl
title = scrapertools.decodeHtmlentities(title)
title = title + ' (%s)' % cantidad
elif item.seccion in ['series-lanzamiento', 'fecha-estreno', 'calidad']:
title = scrapedtitle
url = '%s%s%s' % (host, item.seccion, scrapedurl)
itemlist.append(item.clone(action='list_all',
title=title,
url=url,
thumbnail=thumbnail
))
return itemlist
def seasons(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<span class=title>.*?- Temporada (.*?)<\/span>'
matches = re.compile(patron, re.DOTALL).findall(data)
for temporada in matches:
title = 'Temporada %s' % temporada
contentSeasonNumber = temporada
item.infoLabels['season'] = contentSeasonNumber
itemlist.append(item.clone(action='episodiosxtemp',
title=title,
contentSeasonNumber=contentSeasonNumber
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
url=item.url,
action="add_serie_to_library",
extra="episodios",
contentSerieName=item.contentSerieName,
contentSeasonNumber=contentSeasonNumber
))
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li><div class=numerando>(\d+).*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtemp, scrapedep, scrapedurl, scrapedtitle in matches:
temporada = scrapedtemp
title = temporada + 'x%s %s' % (scrapedep, scrapedtitle)
url = scrapedurl
contentEpisodeNumber = scrapedep
item.infoLabels['episode'] = contentEpisodeNumber
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
return itemlist
def episodiosxtemp(item):
logger.info()
itemlist = []
data = get_source(item.url)
temporada = item.contentSeasonNumber
patron = '<li><div class=numerando>%s.*?x.*?(\d+)<\/div>.*?<a href=(.*?)> (.*?)<\/a>.*?<\/i>' % temporada
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedep, scrapedurl, scrapedtitle in matches:
title = temporada + 'x%s %s' % (scrapedep, scrapedtitle)
url = scrapedurl
contentEpisodeNumber = scrapedep
item.infoLabels['episode'] = contentEpisodeNumber
itemlist.append(item.clone(action='findvideos',
title=title,
url=url,
contentEpisodeNumber=contentEpisodeNumber,
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist
def findvideos(item):
logger.info()
url_list = []
itemlist = []
duplicados = []
data = get_source(item.url)
src = data
patron = '<(?:iframe|IFRAME).*?(?:src|SRC)=(.*?) (?:scrolling|frameborder|FRAMEBORDER)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url in matches:
lang = 'LAT'
quality = item.quality
title = '[%s] [%s]'
if url != '':
itemlist.append(item.clone(title=title, url=url, action='play', language=lang))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % (i.server, i.language))
if item.infoLabels['mediatype'] == 'movie':
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
try:
if texto != '':
return list_all(item)
else:
return []
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria in ['peliculas', 'latino']:
item.url = host + 'page/1/?s'
elif categoria == 'infantiles':
item.url = host + 'category/animacion/'
elif categoria == 'terror':
item.url = host + 'category/terror/'
itemlist = list_all(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data

View File

@@ -44,6 +44,8 @@ def mainlist(item):
thumbnail= get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, title='Series', action='sub_menu', type='series',
thumbnail= get_thumb('tvshows', auto=True)))
itemlist.append(Item(channel=item.channel, title='Colecciones', action='list_collections',
url= host+'listas=populares', thumbnail=get_thumb('colections', auto=True)))
itemlist.append(
item.clone(title="Buscar", action="search", url=host + 'search?go=', thumbnail=get_thumb("search", auto=True),
extra='movie'))
@@ -66,10 +68,13 @@ def sub_menu(item):
return itemlist
def get_source(url):
def get_source(url, referer=None):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
@@ -93,7 +98,7 @@ def section(item):
data = scrapertools.find_single_match(data, 'Generos.*?</ul>')
elif 'Año' in item.title:
data = scrapertools.find_single_match(data, 'Años.*?</ul>')
patron = "<li onclick=filter\(this, '([^']+)', \d+\);>"
patron = '<li onclick="filter\(this, \'([^\']+)\', \d+\);">'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle in matches:
@@ -112,8 +117,8 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
patron = '<article class=Item><a href=([^>]+)><div class=Poster>'
patron += '<img src=(.+?)(?:>|alt).*?<h2>([^>]+)</h2>.*?</article>'
patron = '<article class="Item"><a href="([^>]+)"><div class="Poster"><img src="([^"]+)".*?'
patron += '<h2>([^>]+)</h2>.*?</article>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
@@ -128,10 +133,9 @@ def list_all(item):
title=title,
url=url,
thumbnail=thumbnail,
plot=thumbnail,
infoLabels={'filtro':filter_list})
if item.type == 'peliculas':
if item.type == 'peliculas' or 'peliculas' in url:
new_item.action = 'findvideos'
new_item.contentTitle = scrapedtitle
else:
@@ -143,19 +147,38 @@ def list_all(item):
tmdb.set_infoLabels(itemlist, seekTmdb=True)
# Paginación
url_next_page = scrapertools.find_single_match(data,"<link rel=next href=(.*?) />")
url_next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_all'))
return itemlist
def list_collections(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<li><a href="([^"]+)">.*?"first-lIMG"><img src="([^"]+)">.*?<h2>([^<]+)</h2>.*?Fichas:?\s(\d+)'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, thumb, title, cant in matches:
plot = 'Contiene %s elementos' % cant
itemlist.append(Item(channel=item.channel, action='list_all', title=title, url=url, thumbnail=thumb, plot=plot))
url_next_page = scrapertools.find_single_match(data, 'class="PageActiva">\d+</a><a href="([^"]+)"')
if url_next_page:
itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, action='list_collections'))
return itemlist
def seasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron='<div class=season temporada-(\d+)>'
patron='<div class="season temporada-(\d+)">'
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -189,7 +212,7 @@ def episodesxseasons(item):
itemlist = []
data=get_source(item.url)
patron= "<li><a href=([^>]+)><b>%s - (\d+)</b><h2 class=eTitle>([^>]+)</h2>" % item.infoLabels['season']
patron= '<li><a href="([^"]+)"><b>%s - (\d+)</b><h2 class="eTitle">([^>]+)</h2>' % item.infoLabels['season']
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels = item.infoLabels
@@ -207,31 +230,52 @@ def episodesxseasons(item):
return itemlist
def findvideos(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
data = get_source(item.url)
patron = "onclick=clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);>"
data = data.replace('"', "'")
patron = "onclick='clickLink\(this, '([^']+)', '([^']+)', '([^']+)'\);'>.*?<b>([a-zA-Z]+)"
matches = re.compile(patron, re.DOTALL).findall(data)
headers = {'referer': item.url}
for url, quality, language in matches:
for url, quality, language, server in matches:
url = privatedecrypt(url, headers)
if url != '':
language = IDIOMAS[language]
if quality.lower() == 'premium':
quality = '720p'
quality = CALIDADES[quality]
title = ' [%s] [%s]' % (language, quality)
if 'visor/vdz' in url:
server = 'powvideo'
itemlist.append(Item(channel=item.channel, title='%s' + title, url=url, action='play', language=language,
quality=quality, infoLabels=item.infoLabels))
quality=quality, server=server, headers=headers, infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return sorted(itemlist, key=lambda i: i.language)
def play(item):
from lib.generictools import privatedecrypt
logger.info()
itemlist = []
url = ''
item.server = ''
data = httptools.downloadpage(item.url, headers=item.headers, follow_redirects=False)
if 'visor/vdz' in item.url:
url = scrapertools.find_single_match(data.data, 'IFRAME SRC="([^"]+)"')
elif 'visor/if' in item.url:
url = data.headers['location']
itemlist.append(Item(channel=item.channel, url=url, action='play', server=item.server,
infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist)
return itemlist
def search(item, texto):
logger.info()

View File

@@ -113,6 +113,14 @@
"14",
"15"
]
},
{
"id": "include_in_newest_4k",
"type": "bool",
"label": "Incluir en Novedades - 4K",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1086,3 +1086,29 @@ def search(item, texto):
for line in sys.exc_info():
logger.error("ERROR: %s: SEARCH" % line)
return []
def newest(categoria):
logger.info()
itemlist = []
item = Item()
try:
if categoria == '4k':
item.url = host + "categoria/4k/"
item.extra = "peliculas"
item.channel = channel
item.category_new= 'newest'
itemlist = listado(item)
if ">> Página siguiente" in itemlist[-1].title:
itemlist.pop()
# Se captura la excepción, para no interrumpir al canal novedades si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("{0}".format(line))
return []
return itemlist

View File

@@ -7,11 +7,10 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.hclips.com'
def mainlist(item):
logger.info()
itemlist = []
@@ -41,12 +40,16 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)" class="thumb">.*?src="([^"]+)".*?<strong class="title">([^"]+)</strong>.*?<b>(.*?)</b>'
patron = '<a href="([^"]+)" class="thumb">.*?'
patron += 'src="([^"]+)".*?'
patron += '<strong class="title">([^"]+)</strong>.*?'
patron += '<b>(.*?)</b>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
scrapedplot = ""
title = scrapedtitle + " \(" + vidnum + "\)"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
@@ -54,19 +57,21 @@ def peliculas(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
patron = '<a href="([^"]+)" class="thumb">.*?<img src="([^"]+)" alt="([^"]+)".*?<span class="dur">(.*?)</span>'
patron = '<a href="([^"]+)" class="thumb">.*?'
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="dur">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, plot=plot, contentTitle = contentTitle))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page">Next</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist

View File

@@ -179,7 +179,7 @@ def genres(item):
logger.info()
itemlist = []
data = scrapertools.cache_page(item.url)
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'

View File

@@ -533,17 +533,17 @@ def findvideos(item):
key = scrapertools.find_single_match(data_js, 'JSON.parse\(atob.*?substrings\((.*?)\)')
data_js = httptools.downloadpage("%s/js/providers.js" % host).data
try:
from lib import alfaresolver
provs = alfaresolver.hdfull_providers(data_js)
if provs == '': return []
except:
return []
decoded = jhexdecode(data_js)
providers_pattern = 'p\[(\d+)\]= {"t":"([^"]+)","d":".*?","e":.function.*?,"l":.function.*?return "([^"]+)".*?};'
providers = scrapertools.find_multiple_matches (decoded, providers_pattern)
provs = {}
for provider, e, l in providers:
provs[provider]=[e,l]
data = agrupa_datos(httptools.downloadpage(item.url).data)
data_obf = scrapertools.find_single_match(data, "var ad\s*=\s*'([^']+)'")
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
data_decrypt = jsontools.load(obfs(base64.b64decode(data_obf), 126 - int(key)))
infolabels = {}
year = scrapertools.find_single_match(data, '<span>A&ntilde;o:\s*</span>.*?(\d{4})')
infolabels["year"] = year
@@ -552,7 +552,7 @@ def findvideos(item):
if match['provider'] in provs:
try:
embed = provs[match['provider']][0]
url = eval(provs[match['provider']][1].replace('_code_', "match['code']"))
url = provs[match['provider']][1]+match['code']
matches.append([match['lang'], match['quality'], url, embed])
except:
pass
@@ -691,7 +691,6 @@ def get_status(status, type, id):
## --------------------------------------------------------------------------------
## --------------------------------------------------------------------------------
def jhexdecode(t):
r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
r = re.sub(r'_\d+x\w+', 'var_0', r)

View File

@@ -7,17 +7,15 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://www.hdzog.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/new/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="peliculas", url=host + "/popular/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/"))
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/new/"))
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/popular/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/longest/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -27,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -41,33 +39,40 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
data = scrapertools.get_match(data,'<ul class="cf">(.*?)</ul>')
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li>.*?<a href="([^"]+)".*?<img class="thumb" src="([^"]+)" alt="([^"]+)".*?<span class="videos-count">(\d+)</span>'
patron = '<li>.*?<a href="([^"]+)".*?'
patron += '<img class="thumb" src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="videos-count">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,vidnum in matches:
scrapedplot = ""
url= scrapedurl + "?sortby=post_date"
title = scrapedtitle + " \(" + vidnum + "\)"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = scrapertools.get_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>')
patron = '<li>.*?<a href="([^"]+)".*?src="([^"]+)" alt="([^"]+)" />.*?<span class="time">(.*?)</span>'
patron = '<li>.*?<a href="([^"]+)".*?'
patron += 'src="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="time">(.*?)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,time in matches:
contentTitle = scrapedtitle
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page" data-page-num="\d+">Next page &raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl,
thumbnail=thumbnail, plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" title="Next Page" data-page-num="\d+">Next page &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -7,15 +7,13 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://hellporno.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/?page=1"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/?page=1"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -26,7 +24,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -39,25 +37,30 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?<span>(\d+) videos</span>'
patron = '<a href="([^"]+)">.*?'
patron += '<img src="([^"]+)" alt="([^"]+) - Porn videos">.*?'
patron += '<span>(\d+) videos</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page &raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias" , title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?<span class="time">([^<]+)</span>.*?<video poster="([^"]+)"'
patron = '<div class="video-thumb"><a href="([^"]+)" class="title".*?>([^"]+)</a>.*?'
patron += '<span class="time">([^<]+)</span>.*?'
patron += '<video poster="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,duracion,scrapedthumbnail in matches:
url = scrapedurl
@@ -65,12 +68,12 @@ def peliculas(item):
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page &raquo;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<a href="([^"]+)" class="next">Next page &raquo;</a>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -78,10 +81,6 @@ def play(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
#<source src="https://hellporno.com/get_file/6/b44b40165f7e95e4aeff4d9c33100447/202000/202198/202198_360p.mp4/?br=390" title="360p" type="video/mp4" />
#<source data-fluid-hd src="https://hellporno.com/get_file/6/4ec6bbf8288123603094c76d9cd8ede4/202000/202198/202198.mp4/?br=1440" title="720p" type="video/mp4" />
scrapedurl = scrapertools.find_single_match(data,'<source data-fluid-hd src="([^"]+)/?br=\d+"')
if scrapedurl=="":
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)/?br=\d+"')

View File

@@ -7,17 +7,15 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://hotmovs.com'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/most-popular/?sort_by=video_viewed_week"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/top-rated/?sort_by=rating_week"))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/most-popular/?sort_by=video_viewed_week"))
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/?sort_by=rating_week"))
itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels/?sort_by=cs_viewed"))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/models/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/?sort_by=title"))
@@ -30,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -43,18 +41,22 @@ def catalogo(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="thumbnail" href="([^"]+)">.*?<img src="([^"]+)".*?<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?<h5>([^"]+)</h5>'
patron = '<a class="thumbnail" href="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<span class="thumbnail__info__right">\s+([^"]+)\s+</span>.*?'
patron += '<h5>([^"]+)</h5>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,cantidad,scrapedtitle in matches:
scrapedplot = ""
cantidad = cantidad.replace(" ", "")
scrapedtitle = scrapedtitle + " (" + cantidad +")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="catalogo" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
@@ -63,22 +65,25 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?<img src="([^"]+)".*?<i class="mdi mdi-video"></i>([^"]+)</div>'
patron = '<a class="thumbnail" href="([^"]+)" title="([^"]+)">.*?'
patron += '<img src="([^"]+)".*?'
patron += '<i class="mdi mdi-video"></i>([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedplot = ""
cantidad = cantidad.replace(" ", "")
scrapedtitle = scrapedtitle + " (" + cantidad +")"
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="categorias" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
@@ -90,12 +95,12 @@ def peliculas(item):
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title))
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -28,6 +28,8 @@ list_language = IDIOMAS.values()
list_quality = ['Cam', 'TSHQ', 'Dvdrip', 'Blurayrip', 'HD Rip 320p', 'hd rip 320p', 'HD Real 720p', 'Full HD 1080p']
list_servers = ['openload', 'gamovideo', 'streamplay', 'streamango', 'vidoza']
host = 'https://www.inkapelis.to/'
def mainlist(item):
logger.info()
@@ -35,28 +37,28 @@ def mainlist(item):
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", url="http://www.inkapelis.com/",
itemlist.append(Item(channel=item.channel, title="Novedades", action="entradas", url=host,
extra="Novedades", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
#itemlist.append(Item(channel=item.channel, title="Estrenos", action="entradas", url="http://www.inkapelis.com/genero/estrenos/",
# text_color=color1, thumbnail=get_thumb('premieres', auto=True)))
itemlist.append(Item(channel=item.channel, title="Castellano", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s=",
url=host+"?anio=&genero=&calidad=&idioma=Castellano&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('espanolas', auto=True)))
itemlist.append(Item(channel=item.channel, title="Latino", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s=",
url=host+"?anio=&genero=&calidad=&idioma=Latino&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('latino', auto=True)))
itemlist.append(Item(channel=item.channel, title="VOSE", action="entradas",
url="https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Subtitulada&s=",
url=host+"?anio=&genero=&calidad=&idioma=Subtitulada&s=",
extra="Buscar", text_color=color1, thumbnail=get_thumb('newest', auto=True)))
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url="http://www.inkapelis.com/", text_color=color1,
itemlist.append(Item(channel=item.channel, title="Géneros", action="generos", url=host, text_color=color1,
thumbnail=get_thumb('genres', auto=True),))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url="http://www.inkapelis.com/?s=", text_color=color1))
itemlist.append(Item(channel=item.channel, title="Buscar...", action="search", url=host+"?s=", text_color=color1))
itemlist.append(Item(channel=item.channel, action="", title=""))
itemlist.append(
Item(channel=item.channel, action="filtro", title="Filtrar películas", url="http://www.inkapelis.com/?s=", text_color=color1))
Item(channel=item.channel, action="filtro", title="Filtrar películas", url=host+"?s=", text_color=color1))
# Filtros personalizados para peliculas
for i in range(1, 4):
filtros = config.get_setting("pers_peliculas" + str(i), item.channel)
@@ -65,7 +67,7 @@ def mainlist(item):
new_item = item.clone()
new_item.values = filtros
itemlist.append(
new_item.clone(action="filtro", title=title, url="http://www.inkapelis.com/?s=", text_color=color2))
new_item.clone(action="filtro", title=title, url=host+"?s=", text_color=color2))
itemlist.append(Item(channel=item.channel, action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
autoplay.show_option(item.channel, itemlist)
@@ -86,21 +88,21 @@ def newest(categoria):
item = Item()
try:
if categoria == "peliculas":
item.url = "http://www.inkapelis.com/"
item.url = host
item.action = "entradas"
item.extra = "Novedades"
if categoria == "terror":
item.url = "https://www.inkapelis.com/genero/terror/"
item.url = host+"genero/terror/"
item.action = "entradas"
if categoria == "castellano":
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Castellano&s="
item.url = host+"?anio=&genero=&calidad=&idioma=Castellano&s="
item.extra = "Buscar"
item.action = "entradas"
if categoria == "latino":
item.url = "https://www.inkapelis.com/?anio=&genero=&calidad=&idioma=Latino&s="
item.url = host+"?anio=&genero=&calidad=&idioma=Latino&s="
item.extra = "Buscar"
item.action = "entradas"
itemlist = entradas(item)
@@ -122,7 +124,7 @@ def search(item, texto):
logger.info()
itemlist = []
item.extra = "Buscar"
item.url = "http://www.inkapelis.com/?s=%s" % texto
item.url = host+"?s=%s" % texto
try:
return entradas(item)
@@ -254,7 +256,7 @@ def filtrado(item, values):
item.valores = "Filtro: " + ", ".join(sorted(strings))
item.strings = ""
item.url = "http://www.inkapelis.com/?anio=%s&genero=%s&calidad=%s&idioma=%s&s=%s" % \
item.url = host+"?anio=%s&genero=%s&calidad=%s&idioma=%s&s=%s" % \
(year, genero, calidad, idioma, texto)
item.extra = "Buscar"
@@ -292,7 +294,8 @@ def entradas(item):
else:
# Extrae las entradas
if item.extra == "Novedades":
data2 = data.split("<h3>Últimas Películas Agregadas</h3>", 1)[1]
data2 = data.split("<h2>Últimas Películas Agregadas y Actualizadas</h2>", 1)[1]
entradas = scrapertools.find_multiple_matches(data2, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')
else:
entradas = scrapertools.find_multiple_matches(data, '<div class="col-mt-5 postsh">(.*?)</div></div></div>')

View File

@@ -38,9 +38,6 @@ def mainlist(item):
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel= item.channel, title="Generos", action="genres", url=host + 'ultimas-series-agregadas/',
thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel= item.channel, title="Recomendadas", action="list_all",
url=host + 'ultimas-series-agregadas/', type='recomended',
thumbnail=get_thumb('recomended', auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=',
thumbnail=get_thumb('search', auto=True)))
@@ -54,18 +51,9 @@ def list_all(item):
itemlist = []
data = get_source(item.url)
if item.type != 'search':
if item.type == 'recomended':
class_type = 'list_mt'
else:
class_type = 'info'
patron = '<a class=poster href=(.*?) title=(.*?)> <img src=(.*?) alt.*?class=%s' % class_type
else:
patron = 'item> <a class=poster href=(.*?) title=(.*?)> <img src=(.*?) alt.*?class=info'
matches = re.compile(patron, re.DOTALL).findall(data)
data1 = scrapertools.find_single_match(data, '<div class=col-md-80 lado2(.*?)</div></div></div>')
patron = '<a class=poster href=(.*?) title=(.*?)> <img.*?src=(.*?) alt'
matches = re.compile(patron, re.DOTALL).findall(data1)
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
url = scrapedurl
@@ -82,7 +70,7 @@ def list_all(item):
if itemlist != []:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+) ><span aria-hidden=true>&raquo;</span>')
next_page = scrapertools.find_single_match(data, '<li><a href=([^ ]+)><span aria-hidden=true>&raquo;</span>')
if next_page != '':
itemlist.append(Item(channel=item.channel, action="list_all", title='Siguiente >>>', url=next_page,
thumbnail='https://s16.postimg.cc/9okdu7hhx/siguiente.png'))
@@ -160,13 +148,13 @@ def genres(item):
itemlist = []
norep = []
data = get_source(item.url)
patron = '<a href=([^>]+)><span.*?<i>(.*?)</i>'
patron = '<a href=([^>]+)><span.*?<i>(.*?)</i>.*?>(.*?)</b>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
for scrapedurl, scrapedtitle, cantidad in matches:
url = scrapedurl
title = scrapedtitle.capitalize()
title = "%s - %s" % (scrapedtitle.capitalize(), cantidad)
itemactual = Item(channel=item.channel, action='list_all', title=title, url=url)
if title not in norep:

View File

@@ -2,14 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
host = 'http://javl.in'
@@ -17,7 +14,7 @@ host = 'http://javl.in'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="lista" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
return itemlist
@@ -28,7 +25,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host+ "/?s=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -48,11 +45,12 @@ def categorias(item):
scrapedtitle = str(scrapedtitle) + " ("+ str(number) + ")"
scrapedurl = "http://javl.in/?cat=" + scrapedurl
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
@@ -60,23 +58,12 @@ def peliculas(item):
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<span class=\'currenttext\'>.*?href=\'([^\']+)\' class=\'inactive\'>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
return itemlist
def play(item):
logger.info()
data = scrapertools.cachePage(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<span class=\'currenttext\'>.*?href=\'([^\']+)\' class=\'inactive\'')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel , action="lista", title="Página Siguiente >>", text_color="blue",
url=next_page) )
return itemlist

View File

@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.javwhores.com/'
@@ -17,10 +15,9 @@ host = 'https://www.javwhores.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="peliculas", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Nuevos" , action="lista", url=host + "/latest-updates/"))
itemlist.append( Item(channel=item.channel, title="Mejor valorados" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistos" , action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -31,7 +28,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/%s/" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -51,30 +48,35 @@ def categorias(item):
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
scrapedplot = ""
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="video-item ">.*?'
patron += '<a href="([^"]+)" title="([^"]+)" class="thumb">.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<i class="fa fa-clock-o"></i>(.*?)</div>'
patron += '<span class="ico-fav-1(.*?)<p class="inf">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle
time = scrapertools.find_single_match(duracion, '<i class="fa fa-clock-o"></i>([^"]+)</div>')
if not 'HD' in duracion :
title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle
else:
title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = title))
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
if next_page:
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
itemlist.append(item.clone(action="lista", title="Página Siguiente >>" , text_color="blue", url=next_page ) )
return itemlist

View File

@@ -7,8 +7,6 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://jizzbunker.com/es'
@@ -42,14 +40,17 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><figure>.*?<a href="([^"]+)".*?<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?<span class="score">(\d+)</span>'
patron = '<li><figure>.*?<a href="([^"]+)".*?'
patron += '<img class="lazy" data-original="([^"]+)" alt="([^"]+)".*?'
patron += '<span class="score">(\d+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedurl = scrapedurl.replace("channel", "channel30")
scrapedtitle = scrapedtitle + " (" + cantidad + ")"
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
@@ -58,7 +59,9 @@ def peliculas(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<li><figure>.*?<a href="([^"]+)/([^"]+).html".*?<img class="lazy" data-original="([^"]+)".*?<time datetime=".*?">([^"]+)</time>'
patron = '<li><figure>.*?<a href="([^"]+)/([^"]+).html".*?'
patron += '<img class="lazy" data-original="([^"]+)".*?'
patron += '<time datetime=".*?">([^"]+)</time>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
url = scrapedurl + "/" + scrapedtitle + ".html"
@@ -66,12 +69,12 @@ def peliculas(item):
contentTitle = title
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)" rel="next">&rarr;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist

View File

@@ -2,14 +2,11 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://xxx.justporno.tv'
@@ -17,9 +14,9 @@ host = 'http://xxx.justporno.tv'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host + "/latest-updates/1/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="peliculas", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/latest-updates/1/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas Vistas", action="lista", url=host + "/most-popular/"))
itemlist.append( Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/"))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
return itemlist
@@ -27,10 +24,11 @@ def mainlist(item):
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/search/%s" % texto
item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
item.extra = texto
try:
return peliculas(item)
return lista(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
@@ -42,33 +40,62 @@ def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?<div class="videos">(\d+) video.*?</div>'
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
patron += '<div class="videos">(\d+) video.*?</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,numero in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle + " (" + numero + ")"
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?data-original="([^"]+)".*?<div class="duration">(.*?)</div>'
patron = '<a href="http://xxx.justporno.tv/videos/(\d+)/.*?" title="([^"]+)" >.*?'
patron += 'data-original="([^"]+)".*?'
patron += '<div class="duration">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches:
scrapedplot = ""
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
scrapedurl = "http://xxx.justporno.tv/embed/" + scrapedurl
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
next_page_url = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
if item.extra:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from_videos\+from_albums:(.*?)>')
if next_page:
if "from_videos=" in item.url:
next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result"\
"&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
else:
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
if next_page and not next_page.startswith("#"):
next_page = urlparse.urljoin(host, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
else:
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
if next_page:
if "from" in item.url:
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
else:
next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list" \
"&sort_by=post_date&from=%s" % (item.url, next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
# if next_page!="":
# next_page = urlparse.urljoin(item.url,next_page)
# itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

View File

@@ -1,13 +1,15 @@
{
"id": "mundoflv",
"name": "MundoFlv",
"active": false,
"id": "legalmentegratis",
"name": "Legalmente Gratis (clasicos)",
"active": true,
"adult": false,
"language": ["lat"],
"thumbnail": "https://s32.postimg.cc/h1ewz9hhx/mundoflv.png",
"banner": "mundoflv.png",
"language": ["cast"],
"thumbnail": "https://i.postimg.cc/NFGv0pN3/legalgratis.png",
"banner": "",
"version": 1,
"categories": [
"tvshow"
"movie",
"vos"
],
"settings": [
{
@@ -27,12 +29,9 @@
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"Español",
"VOS",
"VOSE",
"VO"
"CAST",
"VOSE"
]
}
]
}
}

View File

@@ -0,0 +1,139 @@
# -*- coding: utf-8 -*-
# -*- Channel Legalmente Gratis -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
import urllib
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import autoplay
from channels import filtertools
host = 'http://legalmentegratis.com/'
IDIOMAS = {'español':'CAST', 'VOSE': 'VOSE'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['youtube']
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Todas", action="list_all", url=host,
thumbnail=get_thumb('all', auto=True)))
itemlist.append(Item(channel=item.channel, title="Generos", action="section", section='genre',
thumbnail=get_thumb('genres', auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def list_all(item):
logger.info()
itemlist = []
data = get_source(item.url)
patron = '<article id="post-\d+".*?href="([^"]+)".*?src="([^"]+)".*?<p>(.*?) (\(?\d{4}\)?)([^<]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, year, scrapedplot in matches:
url = scrapedurl
contentTitle = scrapedtitle
year = re.sub(r'\(|\)','', year)
title = '%s [%s]' % (contentTitle, year)
thumbnail = 'http:' + scrapedthumbnail
itemlist.append(Item(channel=item.channel, action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentTitle=contentTitle,
infoLabels={'year': year}
))
tmdb.set_infoLabels_itemlist(itemlist, True)
itemlist = sorted(itemlist, key=lambda it: it.contentTitle)
# Paginación
url_next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)">')
if url_next_page:
itemlist.append(Item(channel=item.channel, title="Siguiente >>", url=url_next_page, action='list_all',
section=item.section))
return itemlist
def section(item):
logger.info()
itemlist = []
data = get_source(host)
action = 'list_all'
if item.section == 'genre':
data = scrapertools.find_single_match(data, '>Género(.*?)</ul>')
patron = 'href="([^"]+)".*?>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for url, title in matches:
new_item = Item(channel=item.channel, title=title, url=url, action=action, section=item.section)
itemlist.append(new_item)
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = get_source(item.url)
lang_data = scrapertools.find_single_match(data, '<p><strong(.*?)</strong></p>')
if 'español' in lang_data:
language = 'español'
else:
language = 'VOSE'
url = scrapertools.find_single_match (data, '<iframe.*?src="([^"]+)"')
if 'gloria.tv' in url:
new_data = get_source(url)
url = 'https://gloria.tv'+ scrapertools.find_single_match(new_data, '<source type=".*?" src="([^"]+)">')
itemlist.append(Item(channel=item.channel, title='%s', action='play', url=url,
language=IDIOMAS[language], infoLabels=item.infoLabels))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % '%s' % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url, action="add_pelicula_to_library", extra="findvideos",
contentTitle=item.contentTitle))
return itemlist

View File

@@ -1,33 +0,0 @@
{
"id": "mastorrents",
"name": "MasTorrents",
"active": true,
"adult": false,
"language": ["cast","lat"],
"thumbnail": "https://s33.postimg.cc/3y8720l9b/mastorrents.png",
"banner": "",
"version": 1,
"categories": [
"movie",
"tvshow",
"torrent"
],
"settings": [
{
"id": "include_in_newest_peliculas",
"type": "bool",
"label": "Incluir en Novedades - Peliculas",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_newest_torrent",
"type": "bool",
"label": "Incluir en Novedades - Torrent",
"default": true,
"enabled": true,
"visible": true
}
]
}

View File

@@ -1,323 +0,0 @@
# -*- coding: utf-8 -*-
# -*- Channel MasTorrents -*-
# -*- Created for Alfa-addon -*-
# -*- By the Alfa Develop Group -*-
import re
from channelselector import get_thumb
from platformcode import logger
from platformcode import config
from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
host = 'http://www.mastorrents.com/'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Peliculas",
action="movie_list",
thumbnail=get_thumb("channels_movie.png")
))
itemlist.append(item.clone(title="Series",
action="series_list",
thumbnail=get_thumb("channels_tvshow.png")
))
return itemlist
def movie_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host+'peliculas',
extra='movie',
thumbnail=get_thumb('all', auto=True)
))
itemlist.append(item.clone(title="Generos",
action="genres",
url=host,
extra='movie',
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + '?pTit=', thumbnail=get_thumb('search', auto=True),
extra='movie'
))
return itemlist
def series_list(item):
logger.info()
itemlist = []
itemlist.append(item.clone(title="Todas",
action="lista",
url=host + 'series',
extra='serie',
thumbnail=get_thumb('all', auto=True)
))
itemlist.append(item.clone(title="Generos",
action="genres",
url=host + 'series/',
extra='serie',
thumbnail=get_thumb('genres', auto=True)
))
itemlist.append(item.clone(title="Buscar",
action="search",
url=host + 'series/?pTit=',
extra='serie',
thumbnail=get_thumb('search', auto=True)
))
return itemlist
def get_source(url):
logger.info()
data = httptools.downloadpage(url).data
data = re.sub(r'"|\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data)
return data
def lista (item):
logger.info ()
itemlist = []
infoLabels = dict()
data = get_source(item.url)
patron = "<div class=moviesbox>.*?</div><a href=(.*?)><div class=moviesbox_img style=background-image:url\('("
patron += ".*?)'\)>.*?tooltipbox>(.*?)(?: <i>| <br /><i>)(.*?)<"
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle, extra_data in matches:
extra_data = extra_data.replace('(','').replace(')','')
url = scrapedurl
thumbnail = scrapedthumbnail
contentTitle = scrapedtitle.decode('latin1').encode('utf8')
title = contentTitle
tvshow = False
if 'x' in extra_data:
tvshow = True
filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w200_and_h300_bestv2", "")
filtro_list = {"poster_path": filtro_thumb}
filtro_list = filtro_list.items()
infoLabels['filtro']= filtro_list
else:
infoLabels['year']=extra_data
new_item=(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
infoLabels=infoLabels,
extra=item.extra
))
if tvshow:
new_item.contentSerieName = contentTitle
new_item.action = 'seasons'
else:
new_item.contentTitle = contentTitle
new_item.action = 'findvideos'
itemlist.append(new_item)
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb =True)
#Paginacion
if itemlist !=[]:
actual_page_url = item.url
next_page = scrapertools.find_single_match(data,'<span class=pagination_next><a href=(.*?)>')
import inspect
if next_page !='':
itemlist.append(item.clone(action = "lista",
title = 'Siguiente >>>',
url = next_page
))
return itemlist
def genres(item):
logger.info()
itemlist = []
data = get_source(item.url)
data = scrapertools.find_single_match(data,'G&eacute;neros</option>(.+)</select></div>')
patron = '<option value=(.*?)>(.*?)</option>'
matches = re.compile(patron,re.DOTALL).findall(data)
for value, title in matches:
url = item.url + value
title = title.decode('latin1').encode('utf8')
itemlist.append(Item(channel=item.channel, title=title, url=url, action='lista'))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = item.url + texto
if texto != '':
return lista(item)
else:
return []
def seasons(item):
logger.info()
itemlist=[]
infoLabels = item.infoLabels
data=get_source(item.url)
patron ='href=javascript:showSeasson\(.*?\); id=.*?>Temporada (.*?)<\/a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for season in matches:
title='Temporada %s' % season
infoLabels['season'] = season
itemlist.append(Item(channel=item.channel,
title= title,
url=item.url,
action='episodesxseasons',
contentSeasonNumber=season,
contentSerieName=item.contentSerieName,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
itemlist = itemlist[::-1]
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(
Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url,
action="add_serie_to_library", extra="all_episodes", contentSerieName=item.contentSerieName))
return itemlist
def all_episodes(item):
logger.info()
itemlist = []
templist = seasons(item)
for tempitem in templist:
itemlist += episodesxseasons(tempitem)
return itemlist
def episodesxseasons(item):
logger.info()
itemlist=[]
data=get_source(item.url)
patron = "<div class=corner-episode>%sx(.\d+)<\/div><a href=(.*?)>.*?" % item.contentSeasonNumber
patron += "image:url\('(.*?)'.*?href.*?>(%s)<" % item.contentSerieName
matches = re.compile(patron, re.DOTALL).findall(data)
infoLabels=item.infoLabels
for episode, scrapedurl, scrapedthumbnail, scrapedtitle in matches:
contentEpisodeNumber=episode
season = item.contentSeasonNumber
url=scrapedurl
thumbnail=scrapedthumbnail
infoLabels['episode']=episode
title = '%sx%s - %s' % (season, episode, item.contentSerieName)
itemlist.append(Item(channel=item.channel,
action='findvideos',
title=title,
url=url,
thumbnail=thumbnail,
contentSerieName=item.contentSerieName,
contentEpisodeNumber=contentEpisodeNumber,
infoLabels=infoLabels
))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
return itemlist[::-1]
def findvideos(item):
logger.info()
itemlist=[]
data = get_source(item.url)
patron = "showDownload\(([^\)]+)\);.*?alt=.*?torrent (.*?) "
matches = re.compile(patron, re.DOTALL).findall(data)
for extra_info, quality in matches:
extra_info= extra_info.replace(",'",'|')
extra_info= extra_info.split('|')
title = '%s [%s]' % ('Torrent', quality.strip())
if item.extra == 'movie':
url = extra_info[2].strip("'")
else:
url = extra_info[3].strip("'")
server = 'torrent'
if not '.torrent' in url:
if 'tvsinpagar' in url:
url = url.replace('http://','http://www.')
try:
from_web = httptools.downloadpage(url, follow_redirects=False)
url = from_web.headers['location']
except:
pass
if '.torrent' in url:
itemlist.append(Item(channel=item.channel,
title=title,
contentTitle= item.title,
url=url,
action='play',
quality=quality,
server=server,
thumbnail = item.infoLabels['thumbnail'],
infoLabels=item.infoLabels
))
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
itemlist.append(Item(channel=item.channel,
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
url=item.url,
action="add_pelicula_to_library",
extra="findvideos",
contentTitle=item.contentTitle
))
return itemlist
def newest(category):
logger.info()
item = Item()
try:
if category in ['peliculas', 'torrent']:
item.url = host + 'estrenos-de-cine'
item.extra='movie'
itemlist = lista(item)
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
if category == 'torrent':
item.url = host+'series'
item.extra = 'serie'
itemlist.extend(lista(item))
if itemlist[-1].title == 'Siguiente >>>':
itemlist.pop()
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
return itemlist

View File

@@ -1,12 +1,27 @@
{
"id": "maxipelis24",
"id": "maxipelis24",
"name": "Maxipelis24",
"active": true,
"adult": false,
"language": ["lat"],
"language": ["cast", "lat", "vose"],
"thumbnail": "maxipelis24.png",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"Latino",
"CAST",
"VOSE"
]
}
]
}

View File

@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
import re
import urlparse
import urllib
from channels import autoplay
from channels import filtertools
from core import tmdb
from core import servertools
from core import httptools
@@ -14,19 +14,32 @@ from channelselector import get_thumb
host = "https://maxipelis24.tv"
IDIOMAS = {'Latino': 'Latino', 'Sub':'VOSE', 'Subtitulado': 'VOSE', 'Español': 'CAST', 'Castellano':'CAST'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['rapidvideo', 'vidoza', 'openload', 'streamango', 'okru']
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(Item(channel = item.channel, title = "peliculas", action = "movies", url = host, thumbnail = get_thumb('movies', auto = True)))
itemlist.append(Item(channel = item.channel, action = "category", title = "Año de Estreno", url = host, cat = 'year', thumbnail = get_thumb('year', auto = True)))
itemlist.append(Item(channel = item.channel, action = "category", title = "Géneros", url = host, cat = 'genre', thumbnail = get_thumb('genres', auto = True)))
itemlist.append(Item(channel = item.channel, action = "category", title = "Calidad", url = host, cat = 'quality', thumbnail = get_thumb("quality", auto = True)))
itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "?s=", thumbnail = get_thumb("search", auto = True)))
itemlist.append(Item(channel=item.channel, title="Peliculas",
action="movies", url=host, page=0, thumbnail=get_thumb('movies', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Año de Estreno",
url=host, cat='year', thumbnail=get_thumb('year', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Géneros",
url=host, cat='genre', thumbnail=get_thumb('genres', auto=True)))
itemlist.append(Item(channel=item.channel, action="category", title="Calidad",
url=host, cat='quality', thumbnail=get_thumb("quality", auto=True)))
itemlist.append(Item(channel=item.channel, title="Buscar", action="search",
url=host + "?s=", page=0, thumbnail=get_thumb("search", auto=True)))
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
@@ -34,105 +47,132 @@ def search(item, texto):
if texto != '':
return movies(item)
def category(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
if item.cat == 'genre':
data = scrapertools.find_single_match(data, '<h3>Géneros <span class="icon-sort">.*?</ul>')
patron = '<li class="cat-item cat-item.*?<a href="([^"]+)" >([^<]+)<'
data = scrapertools.find_single_match(
data, '<h3>Géneros <span class="icon-sort">.*?</ul>')
patron = '<li class="cat-item cat-item.*?<a href="([^"]+)".*?>([^<]+)<'
elif item.cat == 'year':
data = scrapertools.find_single_match(data, '<h3>Año de estreno.*?</div>')
patron = 'li><a href="([^"]+)">([^<]+).*?<'
data = scrapertools.find_single_match(
data, '<h3>Año de estreno.*?</div>')
patron = 'li><a href="([^"]+)".*?>([^<]+).*?<'
elif item.cat == 'quality':
data = scrapertools.find_single_match(data, '<h3>Calidad.*?</div>')
patron = 'li><a href="([^"]+)">([^<]+)<'
patron = 'li><a href="([^"]+)".*?>([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(Item(channel = item.channel, action = 'movies', title =scrapedtitle, url = scrapedurl, type = 'cat', first = 0))
itemlist.append(Item(channel=item.channel, action='movies',
title=scrapedtitle, url=scrapedurl, type='cat', page=0))
return itemlist
def movies(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div id="mt.+?href="([^"]+)".+?'
patron += '<img src="([^"]+)" alt="([^"]+)".+?'
patron += '<span class="ttx">([^<]+).*?'
patron += 'class="year">([^<]+).+?class="calidad2">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, img, scrapedtitle, resto, year, quality in matches:
scrapedtitle = re.sub(r'\d{4}|[()]','', scrapedtitle)
for scrapedurl, img, scrapedtitle, resto, year, quality in matches[item.page:item.page + 20]:
scrapedtitle = re.sub(r' \((\d+)\)', '', scrapedtitle)
plot = scrapertools.htmlclean(resto).strip()
title = ' %s [COLOR red][%s][/COLOR]' % (scrapedtitle, quality)
itemlist.append(Item(channel = item.channel,
title = title,
url = scrapedurl,
action = "findvideos",
plot = plot,
thumbnail = img,
contentTitle = scrapedtitle,
contentType = "movie",
quality = quality,
infoLabels = {'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
#Paginacion
matches = re.compile('class="respo_pag"><div class="pag.*?<a href="([^"]+)" >Siguiente</a><', re.DOTALL).findall(data)
if matches:
url = urlparse.urljoin(item.url, matches[0])
itemlist.append(Item(channel = item.channel, action = "movies", title = "Página siguiente >>", url = url))
itemlist.append(Item(channel=item.channel,
title=title,
url=scrapedurl,
action="findvideos",
plot=plot,
thumbnail=img,
contentTitle=scrapedtitle,
contentType="movie",
quality=quality,
infoLabels={'year': year}))
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
# Paginacion
if item.page + 20 < len(matches):
itemlist.append(item.clone(page=item.page + 20, title=">> Siguiente"))
else:
next_page = scrapertools.find_single_match(
data, '<link rel="next" href="([^"]+)" />')
if next_page:
itemlist.append(item.clone(url=next_page, page=0,
title=" Siguiente »"))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","", data)
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<div id="div(\d+)".*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^&]+)&'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
for ot, link in matches:
data1 = scrapertools.find_single_match(data, '<ul class="idTabs">.*?</ul></div>')
patron = 'li>.*?href="#div%s.*?>.*?([^<|\s]+)' % ot
matches1 = re.compile(patron, re.DOTALL).findall(data1)
for lang in matches1:
if "VIP" in lang:
continue
idioma = lang
if 'ok.ru' in link:
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
if not link.startswith("https"):
url = "https:%s" % link
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url,
action='play', language=IDIOMAS[idioma], infoLabels=item.infoLabels)
itemlist.append(new_item)
if 'maxipelis24.tv/hideload/?' in link:
if 'id=' in link:
id_type = 'id'
ir_type = 'ir'
elif 'ud=' in link:
id_type = 'ud'
ir_type = 'ur'
elif 'od=' in link:
id_type = 'od'
ir_type = 'or'
elif 'ad=' in link:
id_type = 'ad'
ir_type = 'ar'
elif 'ed=' in link:
id_type = 'ed'
ir_type = 'er'
id_letter = scrapertools.find_single_match(link, '?(\w)d')
id_type = '%sd' % id_letter
ir_type = '%sr' % id_letter
id = scrapertools.find_single_match(link, '%s=(.*)' % id_type)
base_link = scrapertools.find_single_match(link, '(.*?)%s=' % id_type)
base_link = scrapertools.find_single_match(
link, '(.*?)%s=' % id_type)
ir = id[::-1]
referer = base_link+'%s=%s&/' % (id_type, ir)
video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer':referer},
follow_redirects=False)
video_data = httptools.downloadpage('%s%s=%s' % (base_link, ir_type, ir), headers={'Referer': referer},
follow_redirects=False)
url = video_data.headers['location']
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url, action='play', language = '', infoLabels=item.infoLabels)
itemlist.append(new_item)
else:
patron = '<div id="div.*?<div class="movieplay".*?(?:iframe.*?src|IFRAME SRC)="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for link in matches:
url = link
title = '%s'
new_item = Item(channel=item.channel, title=title, url=url, action='play', language = '', infoLabels=item.infoLabels)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize())
new_item = Item(channel=item.channel, title=title, url=url,
action='play', language=IDIOMAS[idioma], infoLabels=item.infoLabels)
itemlist.append(new_item)
itemlist = servertools.get_servers_itemlist(
itemlist, lambda i: i.title % '%s [%s]' % (i.server.capitalize(), i.language))
#itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
if itemlist:
if config.get_videolibrary_support():
itemlist.append(Item(channel = item.channel, action = ""))
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
contentTitle = item.contentTitle
))
return itemlist
itemlist.append(Item(channel=item.channel, action=""))
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
action="add_pelicula_to_library", url=item.url, thumbnail=item.thumbnail,
contentTitle=item.contentTitle
))
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -18,7 +18,7 @@
"id": "domain_name",
"type": "text",
"label": "URL actual de la Web Mejor Torrent",
"default": "http://www.mejortorrent.org/",
"default": "http://www.mejortorrent.tv/",
"enabled": true,
"visible": true
},

View File

@@ -26,7 +26,8 @@ list_servers = ['torrent']
channel = "mejortorrent"
host = 'http://www.mejortorrent.org/'
host = 'http://www.mejortorrent.tv/'
host_sufix = '.tv'
#host = config.get_setting('domain_name', channel)
categoria = channel.capitalize()
@@ -296,8 +297,8 @@ def listado(item):
item_local.title = scrapertools.get_match(scrapedurl, patron_enlace)
item_local.title = item_local.title.replace("-", " ")
item_local.url = urlparse.urljoin(item_local.url, scrapedurl)
item_local.thumbnail = host + urllib.quote(scrapedthumbnail)
item_local.url = verificar_url(urlparse.urljoin(item_local.url, scrapedurl))
item_local.thumbnail = verificar_url(host + urllib.quote(scrapedthumbnail))
item_local.contentThumbnail = item_local.thumbnail
item_local.infoLabels['year'] = '-' # Al no saber el año, le ponemos "-" y TmDB lo calcula automáticamente
@@ -660,7 +661,7 @@ def listado_busqueda(item):
item_local.quality = scrapertools.remove_htmltags(scrapedinfo).decode('iso-8859-1').encode('utf8')
item_local.quality = item_local.quality.replace("(", "").replace(")", "").replace("[", "").replace("]", "").replace("Documental", "").replace("documental", "")
item_local.url = urlparse.urljoin(item.url, scrapedurl)
item_local.url = verificar_url(urlparse.urljoin(item.url, scrapedurl))
#Preparamos la información básica para TMDB
if "/serie-" in scrapedurl or "/doc-" in scrapedurl:
@@ -829,10 +830,10 @@ def findvideos(item):
for scrapedurl in matches:
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
url = urlparse.urljoin(item.url, scrapedurl)
url = verificar_url(urlparse.urljoin(item.url, scrapedurl))
# Localiza el .torrent en el siguiente link
if not item.post and not item.armagedon: # Si no es llamada con Post, hay que bajar un nivel más
if not item.post and not item.armagedon: # Si no es llamada con Post, hay que bajar un nivel más
try:
torrent_data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", httptools.downloadpage(url).data)
except: #error
@@ -849,15 +850,15 @@ def findvideos(item):
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#logger.debug(torrent_data)
if not item.armagedon:
item_local.url = scrapertools.get_match(torrent_data, ">Pincha.*?<a href='(.*?\/uploads\/torrents\/\w+\/.*?\.torrent)'")
item_local.url = urlparse.urljoin(url, item_local.url)
item_local.url = verificar_url(urlparse.urljoin(url, item_local.url))
elif not item.armagedon:
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
item_local.url = url # Ya teníamos el link desde el primer nivel (documentales)
item_local.url = item_local.url.replace(" ", "%20")
if item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
@@ -867,10 +868,10 @@ def findvideos(item):
del item.emergency_urls[0][0]
if not item.armagedon and item.emergency_urls and not item.videolibray_emergency_urls:
if len(item.emergency_urls[0]):
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent ALTERNATIVA
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la primera url del .Torrent ALTERNATIVA
if item.videolibray_emergency_urls:
item.emergency_urls[0].append(item_local.url) #Salvamnos la url...
item.emergency_urls[0].verificar_url(append(item_local.url)) #Salvamnos la url...
# Poner la calidad, si es necesario
if not item_local.quality:
@@ -1003,7 +1004,7 @@ def episodios(item):
item_local.title = ''
item_local.context = "['buscar_trailer']"
item_local.url = urlparse.urljoin(host, scrapedurl)
item_local.url = verificar_url(urlparse.urljoin(host, scrapedurl))
scrapedtitle = re.sub('\r\n', '', scrapedtitle).decode('iso-8859-1').encode('utf8').strip()
if scrapedtitle.endswith('.'):
@@ -1030,7 +1031,7 @@ def episodios(item):
else: #Se prepara el Post para documentales
item_local.contentSeason = 1
item_local.contentEpisodeNumber = 1
item_local.url = host + "/secciones.php?sec=descargas&ap=contar_varios"
item_local.url = verificar_url(host + "/secciones.php?sec=descargas&ap=contar_varios")
item_local.post = urllib.urlencode({name: value, "total_capis": total_capis, "tabla": tabla, "titulo": titulo_post})
if year:
@@ -1050,6 +1051,15 @@ def episodios(item):
item, itemlist = generictools.post_tmdb_episodios(item, itemlist)
return itemlist
def verificar_url(url):
if '.com' in url or '.net' in url or '.org' in url:
url = url.replace('.com', '.tv').replace('.net', '.tv').replace('.org', '.tv')
url = url.replace('torrents/tmp/torrent.php?table=peliculas/&name=', 'torrents/peliculas/')
url = url.replace('torrents/tmp/torrent.php?table=series/&name=', 'torrents/series/')
url = url.replace('torrents/tmp/torrent.php?table=documentales/&name=', 'torrents/documentales/')
return url
def actualizar_titulos(item):

View File

@@ -18,7 +18,7 @@
"id": "domain_name",
"type": "text",
"label": "URL actual de la Web Mejor Torrent",
"default": "https://mejortorrent1.com/",
"default": "https://mejortorrent1.net/",
"enabled": true,
"visible": true
},

View File

@@ -809,10 +809,10 @@ def listado_busqueda(item):
def findvideos(item):
logger.info()
itemlist = []
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
itemlist_t = [] #Itemlist total de enlaces
itemlist_f = [] #Itemlist de enlaces filtrados
if not item.language:
item.language = ['CAST'] #Castellano por defecto
item.language = ['CAST'] #Castellano por defecto
matches = []
#logger.debug(item)
@@ -827,7 +827,7 @@ def findvideos(item):
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if item.videolibray_emergency_urls:
item.emergency_urls = []
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
item.emergency_urls.append([]) #Reservamos el espacio para los .torrents locales
#Bajamos los datos de la página de todo menos de Documentales y Varios
if not item.post:
@@ -836,7 +836,7 @@ def findvideos(item):
data = data.replace('"', "'")
patron = "<form (?:.*?)?"
patron += "name='episodios'.+action='([^']+)' method='post'>.*?"
patron += "<input.+type='[^']+'.+name='([^']+)'.+value='([^']+)'>.*<input.+type='hidden'.+value='([^']+)'.+name='([^']+)'>"
patron += "<input\s*type='[^']+'\s*name='([^']+)'\s*value='([^']+)'>\s*<input\s*type='[^']+'\s*value='([^']+)'\s*name='([^']+)'>(?:\s*<input\s*type='[^']+'\s*value='([^']+)'\s*name='([^']+)'\s*id='([^']+)'>)?"
except:
pass
if not data:
@@ -846,10 +846,10 @@ def findvideos(item):
matches = item.emergency_urls[1] #Restauramos matches
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
if not item.armagedon: #Si es un proceso normal, seguimos
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -878,55 +878,72 @@ def findvideos(item):
if item.videolibray_emergency_urls:
item.emergency_urls.append(matches) #Salvamnos matches...
for scrapedurl, name1, value1, value2, name2 in matches: #Hacemos el FOR aunque solo habrá un item
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
for scrapedurl, name1, value1, value2, name2, value3, name3, id3 in matches: #Hacemos el FOR aunque solo habrá un item
url = scrapedurl
# Localiza el .torrent en el siguiente link con Post
post = '%s=%s&%s=%s' % (name1, value1, name2, value2)
#post = '%s=%s&%s=%s&%s=%s' % (name1, value1, name2, value2, name3, value3)
if not item.armagedon:
try:
torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False)
except: #error
pass
else:
#Viene de SERIES y DOCUMENTALES. Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
try: #Localiza el .torrent en el siguiente link con Post. Viene de Documentales y Varios
url = item.url_post
torrent_data = httptools.downloadpage(url, post=item.post, headers=headers, follow_redirects=False)
del item.url_post
post = item.post
torrent_data = httptools.downloadpage(url, post=post, headers=headers, follow_redirects=False)
except:
pass
if not torrent_data or not 'location' in torrent_data.headers or not torrent_data.headers['location']:
if not torrent_data and not ('location' in torrent_data.headers or 'zip' in torrent_data.headers['content-type']):
item = generictools.web_intervenida(item, data) #Verificamos que no haya sido clausurada
if item.intervencion: #Sí ha sido clausurada judicialmente
item, itemlist = generictools.post_tmdb_findvideos(item, itemlist) #Llamamos al método para el pintado del error
elif not item.armagedon:
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / DATA: " + data)
logger.error("ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web " + " / URL: " + url + " / POST: " + post + " / DATA: " + str(torrent_data.headers))
itemlist.append(item.clone(action='', title=item.channel.capitalize() + ': ERROR 02: FINDVIDEOS: El archivo Torrent no existe o ha cambiado la estructura de la Web. Verificar en la Web y reportar el error con el log'))
if item.emergency_urls and not item.videolibray_emergency_urls: #Hay urls de emergencia?
item_local.url = item.emergency_urls[0][0] #Restauramos la url del .torrent
item.url = item.emergency_urls[0][0] #Restauramos la url del .torrent
item.armagedon = True #Marcamos la situación como catastrófica
else:
if item.videolibray_emergency_urls: #Si es llamado desde creación de Videoteca...
return item #Devolvemos el Item de la llamada
else:
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
return itemlist #si no hay más datos, algo no funciona, pintamos lo que tenemos
#Si el torrent viene en un .zip en vez de desde una url, lo preparamos todo para el play
referer_zip = None
post_zip = None
if 'location' not in torrent_data.headers and 'zip' in torrent_data.headers['content-type'] and not item.armagedon:
item.referer = item.url
referer_zip = item.referer
item.url = url
item.post = post
post_zip = item.post
#Generamos una copia de Item para trabajar sobre ella
item_local = item.clone()
#Capturamos la url del .torrent desde el Header
if not item.armagedon:
item_local.url = torrent_data.headers['location'] if 'location' in torrent_data.headers else item.url_post
item_local.url = torrent_data.headers['location'] if 'location' in torrent_data.headers else item.url
item_local.url = item_local.url.replace(" ", "%20") #Quitamos espacios
if item.emergency_urls:
item_local.torrent_alt = item.emergency_urls[0][0] #Guardamos la url del .Torrent ALTERNATIVA
#Si es un lookup para cargar las urls de emergencia en la Videoteca...
if item.videolibray_emergency_urls:
item.emergency_urls[0].append(item_local.url) #Salvamnos la url...
if 'location' in torrent_data.headers or config.get_setting("emergency_urls_torrents", item_local.channel):
item.emergency_urls[0].append(item_local.url) #Salvamnos la url...
elif not config.get_setting("emergency_urls_torrents", item_local.channel):
item.emergency_urls[0].append(item_local.referer) #Salvamnos el referer...
return item #... y nos vamos
# Poner la calidad, si es necesario
@@ -947,7 +964,7 @@ def findvideos(item):
size = scrapertools.find_single_match(item_local.url, '(\d{1,3},\d{1,2}?\w+)\.torrent')
size = size.upper().replace(".", ",").replace("G", " G ").replace("M", " M ") #sustituimos . por , porque Unify lo borra
if not size and not item.armagedon:
size = generictools.get_torrent_size(item_local.url) #Buscamos el tamaño en el .torrent
size = generictools.get_torrent_size(item_local.url, referer_zip, post_zip) #Buscamos el tamaño en el .torrent
if size:
item_local.title = re.sub('\s\[\d+,?\d*?\s\w[b|B]\]', '', item_local.title) #Quitamos size de título, si lo traía
item_local.title = '%s [%s]' % (item_local.title, size) #Agregamos size al final del título
@@ -1021,7 +1038,7 @@ def episodios(item):
# Prepara el patrón de búsqueda
patron = "<form (?:style='[^']+'\s)?name='episodios' action='([^']+)'"
url = scrapertools.find_single_match(data, patron) #Salvamos la url de descarga
url = url.replace('descargar_series.php', 'descargar.php') #ESTA DESCARGARÍA EL TORRENT EN VEZ DEL ENLACE
url = url.replace('descargar_tv.php', 'descargar_post.php') #ESTA DESCARGARÍA EL TORRENT EN VEZ DEL ENLACE
patron = "<form (?:style='[^']+'\s)?name='episodios' action='[^']+'.*?<input type='hidden' value='([^']+)' name='([^']+)'>"
value2 = '' #Patrón general para Documentales (1)
name2 = ''

View File

@@ -0,0 +1,62 @@
{
"id": "mirapeliculas",
"name": "MiraPeliculas",
"active": true,
"adult": false,
"language": ["cast", "lat"],
"thumbnail": "http://mirapeliculas.net/favicon.ico",
"banner": "",
"categories": [
"movie"
],
"settings": [
{
"id": "filter_languages",
"type": "list",
"label": "Mostrar enlaces en idioma...",
"default": 0,
"enabled": true,
"visible": true,
"lvalues": [
"No filtrar",
"LAT",
"ESP",
"VOSE"
]
},
{
"id": "modo_grafico",
"type": "bool",
"label": "Buscar información extra",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "include_in_global_search",
"type": "bool",
"label": "Incluir en busqueda global",
"default": true,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces",
"type": "bool",
"label": "Verificar si los enlaces existen",
"default": false,
"enabled": true,
"visible": true
},
{
"id": "comprueba_enlaces_num",
"type": "list",
"label": "Número de enlaces a verificar",
"default": 1,
"enabled": true,
"visible": "eq(-1,true)",
"lvalues": [ "5", "10", "15", "20" ]
}
]
}

View File

@@ -0,0 +1,141 @@
# -*- coding: utf-8 -*-
#------------------------------------------------------------
import re, urllib, urlparse
from channels import autoplay
from platformcode import config, logger, platformtools
from core.item import Item
from core import httptools, scrapertools, jsontools, tmdb
from core import servertools
from channels import filtertools
host = 'http://mirapeliculas.net'
IDIOMAS = {'Latino': 'LAT', 'Español': 'ESP', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
list_servers = ['streamango', 'streamplay', 'openload', 'okru']
list_quality = ['BR-Rip', 'HD-Rip', 'DVD-Rip', 'TS-HQ', 'TS-Screner', 'Cam']
__channel__='mirapeliculas'
__comprueba_enlaces__ = config.get_setting('comprueba_enlaces', __channel__)
__comprueba_enlaces_num__ = config.get_setting('comprueba_enlaces_num', __channel__)
try:
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
except:
__modo_grafico__ = True
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Novedades" , action="lista", url= host))
itemlist.append(item.clone(title="Castellano" , action="lista", url= host + "/repelis/castellano/"))
itemlist.append(item.clone(title="Latino" , action="lista", url= host + "/repelis/latino/"))
itemlist.append(item.clone(title="Subtituladas" , action="lista", url= host + "/repelis/subtituladas/"))
itemlist.append(item.clone(title="Categorias" , action="categorias", url= host))
itemlist.append(item.clone(title="Buscar", action="search"))
itemlist.append(item.clone(title="Configurar canal...", text_color="gold", action="configuracion", folder=False))
autoplay.show_option(item.channel, itemlist)
return itemlist
def configuracion(item):
ret = platformtools.show_channel_settings()
platformtools.itemlist_refresh()
return ret
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = host + "/buscar/?q=%s" % texto
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<li class="cat-item cat-item-3"><a href="([^"]+)" title="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
itemlist.append(item.clone(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<div class="col-mt-5 postsh">.*?<a href="([^"]+)".*?'
patron += '<span class="under-title-gnro">([^"]+)</span>.*?'
patron += '<p>(\d+)</p>.*?'
patron += '<img src="([^"]+)".*?'
patron += 'title="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl, calidad, scrapedyear, scrapedthumbnail, scrapedtitle in matches:
scrapedplot = ""
title = '%s [COLOR red] %s [/COLOR] (%s)' % (scrapedtitle, calidad , scrapedyear)
itemlist.append(item.clone(action="findvideos", title=title , url=scrapedurl ,
thumbnail=scrapedthumbnail , contentTitle = scrapedtitle, plot=scrapedplot ,
quality=calidad, infoLabels={'year':scrapedyear}) )
tmdb.set_infoLabels(itemlist, True)
next_page_url = scrapertools.find_single_match(data,'<span class="current">\d+</span>.*?<a href="([^"]+)"')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append(item.clone(channel=item.channel , action="lista" , title="Next page >>" ,
text_color="blue", url=next_page_url) )
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
patron = '<td><a rel="nofollow" href=.*?'
patron += '<td>([^<]+)</td>.*?'
patron += '<td>([^<]+)</td>.*?'
patron += '<img src=".*?=([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
for lang, calidad, url in matches:
if lang in IDIOMAS:
lang = IDIOMAS[lang]
if not config.get_setting('unify'):
title = '[COLOR red] %s [/COLOR] (%s)' % (calidad , lang)
else:
title = ''
itemlist.append(item.clone(action="play", title='%s'+title, url=url, language=lang, quality=calidad ))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para Filtrar enlaces
if __comprueba_enlaces__:
itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
extra="findvideos", contentTitle=item.contentTitle))
return itemlist

View File

@@ -0,0 +1,12 @@
{
"id": "mixtoon",
"name": "MixToon",
"active": true,
"adult": false,
"language": ["cast"],
"thumbnail": "http://i.imgur.com/iZzF8gE.png",
"banner": "http://i.imgur.com/c1YTgNT.png",
"categories": [
"tvshow"
]
}

View File

@@ -0,0 +1,162 @@
# -*- coding: utf-8 -*-
import re
from channels import renumbertools
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channels import filtertools
from channels import autoplay
from lib import gktools
IDIOMAS = {'castellano': 'Castellano'}
list_language = IDIOMAS.values()
list_servers = ['openload'
]
list_quality = ['default']
host = "https://mixtoon.com"
def mainlist(item):
logger.info()
thumb_series = get_thumb("channels_tvshow.png")
autoplay.init(item.channel, list_servers, list_quality)
itemlist = list()
itemlist.append(
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
itemlist = renumbertools.show_option(item.channel, itemlist)
autoplay.show_option(item.channel, itemlist)
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
patron = '<a href="([^"]+)" '
patron += 'class="link">.+?<img src="([^"]+)".*?'
patron += 'title="([^"]+)">'
matches = scrapertools.find_multiple_matches(data, patron)
# Paginacion
num_items_x_pagina = 30
min = item.page * num_items_x_pagina
min=min-item.page
max = min + num_items_x_pagina - 1
b=0
for link, img, name in matches[min:max]:
b=b+1
if " y " in name:
title=name.replace(" y "," & ")
else:
title = name
url = host + link
scrapedthumbnail = host + img
context = renumbertools.context(item)
context2 = autoplay.context
context.extend(context2)
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title,
context=context))
if b<29:
pass
else:
itemlist.append(
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
tmdb.set_infoLabels(itemlist)
return itemlist
def episodios(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
# obtener el numero total de episodios
total_episode = 0
patron_caps = '<li><a href="(.*?)">(.*?)-(.*?)<\/a><\/li>'
matches = scrapertools.find_multiple_matches(data, patron_caps)
patron_info = '<img src="([^"]+)"><div class="ds"><p>(.*?)<\/p>'
scrapedthumbnail, scrapedplot = scrapertools.find_single_match(data, patron_info)
show = item.title
scrapedthumbnail = host + scrapedthumbnail
for link, cap, name in matches:
title = ""
pat = "/"
if "Mike, Lu & Og"==item.title:
pat="&/"
if "KND" in item.title:
pat="-"
# varios episodios en un enlace
if len(name.split(pat)) > 1:
i = 0
for pos in name.split(pat):
i = i + 1
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, total_episode)
if len(name.split(pat)) == i:
title += "%sx%s " % (season, str(episode).zfill(2))
else:
title += "%sx%s_" % (season, str(episode).zfill(2))
else:
total_episode += 1
season, episode = renumbertools.numbered_for_tratk(item.channel,item.contentSerieName, 1, total_episode)
title += "%sx%s " % (season, str(episode).zfill(2))
url = host + "/" + link
if "disponible" in link:
title += "No Disponible aún"
else:
title += name
itemlist.append(
Item(channel=item.channel, action="findvideos", title=title, url=url, show=show, plot=scrapedplot,
thumbnail=scrapedthumbnail))
if config.get_videolibrary_support() and len(itemlist) > 0:
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
action="add_serie_to_library", extra="episodios", show=show))
return itemlist
def findvideos(item):
import base64
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
_sl = scrapertools.find_single_match(data, 'var _dt=([^;]+);')
sl = eval(_sl)
buttons = [0,1]
for id in buttons:
new_url = "https://videoeb.xyz/" + "eb/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2]
data_new = httptools.downloadpage(new_url).data
valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)')
try:
url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1)))
if 'download' in url:
url = url.replace('download', 'preview')
title = '%s'
itemlist.append(Item(channel=item.channel, title=title, url=url, action='play', language='latino',
infoLabels=item.infoLabels))
except Exception as e:
logger.info(e)
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist

View File

@@ -2,24 +2,21 @@
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import jsontools as json
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import config, logger
from core import httptools
from core import tmdb
from core import jsontools
host = 'http://mporno.tv'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/most-recent/", plot="/most-recent/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/", plot="/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url=host + "/most-viewed/", plot="/most-viewed/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/", plot="/longest/"))
itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/most-recent/"))
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/"))
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="peliculas", url=host + "/most-viewed/"))
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/longest/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/channels/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -45,10 +42,11 @@ def categorias(item):
patron = '<h3><a href="([^"]+)">(.*?)</a> <small>(.*?)</small></h3>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,cantidad in matches:
scrapedplot = scrapedurl.replace("http://mporno.unblckd.org/", "").replace("page1.html", "")
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = scrapedtitle + " " + cantidad
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
@@ -64,13 +62,13 @@ def peliculas(item):
title = scrapedtitle
scrapedurl = scrapedurl.replace("/thumbs/", "/videos/") + ".mp4"
thumbnail = scrapedthumbnail
plot = item.plot
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} ))
plot = ""
itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, thumbnail=thumbnail,
plot=plot, contentTitle=contentTitle))
next_page_url = scrapertools.find_single_match(data,'<a href=\'([^\']+)\' class="next">Next &gt;&gt;</a>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append(item.clone(action="peliculas", title="Página Siguiente >>", text_color="blue", url=next_page_url) )
return itemlist

View File

@@ -7,15 +7,15 @@ from core import scrapertools
from core.item import Item
from core import servertools
from core import httptools
from core import tmdb
from core import jsontools
host = 'https://www.muchoporno.xxx'
def mainlist(item):
logger.info()
itemlist = []
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host))
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host))
itemlist.append( Item(channel=item.channel, title="Pornstars" , action="categorias", url=host + "/pornstars/"))
#itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/sitios/"))
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorias/"))
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
return itemlist
@@ -26,7 +26,7 @@ def search(item, texto):
texto = texto.replace(" ", "+")
item.url = host + "/search/?q=%s" % texto
try:
return peliculas(item)
return lista(item)
except:
import sys
for line in sys.exc_info():
@@ -39,22 +39,47 @@ def categorias(item):
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"'
if "/sitios/" in item.url:
patron = '<div class="muestra-escena muestra-canales">.*?href="(.*?)">.*?'
patron += 'src="(.*?)".*?'
patron += '<a title="(.*?)".*?'
patron += '</span> (.*?) videos</span>'
if "/pornstars/" in item.url:
patron = '<a class="muestra-escena muestra-pornostar" href="([^"]+)">.*?'
patron += 'src="([^"]+)".*?'
patron += 'alt="([^"]+)".*?'
patron += '</span> (\d+) videos</span>'
else:
patron = '<a class="muestra-escena muestra-categoria" href="([^"]+)" title="[^"]+">.*?'
patron += 'src="([^"]+)".*?'
patron += '</span> ([^"]+) </h2>(.*?)>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
scrapedplot = ""
scrapedtitle = scrapedtitle
cantidad = " (" + cantidad + ")"
if "</a" in cantidad:
cantidad = ""
scrapedtitle = scrapedtitle + cantidad
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail , plot=scrapedplot) )
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist
def peliculas(item):
def lista(item):
logger.info()
itemlist = []
data = scrapertools.cachePage(item.url)
data = re.sub(r"\n|\r|\t|&nbsp;|<br>", "", data)
patron = '<a class="muestra-escena"\s*href="([^"]+)".*?data-stats-video-name="([^"]+)".*?<img src="([^"]+)".*?<span class="ico-minutos sprite" title="Length"></span>([^"]+)</span>'
patron = '<a class="muestra-escena"\s*href="([^"]+)".*?'
patron += 'data-stats-video-name="([^"]+)".*?'
patron += '<img src="([^"]+)".*?'
patron += '<span class="ico-minutos sprite" title="Length"></span>([^"]+)</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle,scrapedthumbnail,duracion in matches:
url = urlparse.urljoin(item.url,scrapedurl)
@@ -63,11 +88,12 @@ def peliculas(item):
thumbnail = scrapedthumbnail
plot = ""
year = ""
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
next_page_url = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
if next_page_url!="":
next_page_url = urlparse.urljoin(item.url,next_page_url)
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
plot=plot, contentTitle = contentTitle))
next_page = scrapertools.find_single_match(data,'<li><a href="([^"]+)">Siguiente</a></li>')
if next_page!="":
next_page = urlparse.urljoin(item.url,next_page)
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
return itemlist

Some files were not shown because too many files have changed in this diff Show More