@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.8" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.8.1" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,13 +19,13 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Arreglos[/B][/COLOR]
|
||||
¤ maxipelis24 ¤ cinecalidad ¤ inkapelis
|
||||
¤ seriesmetro ¤ inkaseries
|
||||
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ Siska ¤ xxxfreeinhd
|
||||
¤ allpelicuas ¤ asialiveaction ¤ danimados
|
||||
¤ MixToon ¤ pack +18 ¤ AbToon
|
||||
¤ hdfull
|
||||
|
||||
¤ Agradecimientos a @chivmalev por colaborar con ésta versión.
|
||||
[COLOR green][B]Novedades[/B][/COLOR]
|
||||
¤ uploadmp4 ¤ HomeCine ¤ CinemaUpload
|
||||
|
||||
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
|
||||
@@ -109,7 +109,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -32,11 +32,23 @@ def mainlist(item):
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Series", contentSerieName="Series", url=host, thumbnail=thumb_series, page=0))
|
||||
#itemlist.append(
|
||||
# Item(channel=item.channel, action="lista", title="Live Action", contentSerieName="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0))
|
||||
#itemlist.append(
|
||||
# Item(channel=item.channel, action="peliculas", title="Películas", contentSerieName="Películas", url=host+"/peliculas", thumbnail=thumb_series, page=0))
|
||||
Item(channel=item.channel, action="lista", title="Series Actuales", url=host+'/p/actuales',
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Series Clasicas", url=host+'/p/clasicas',
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Series Anime", url=host + '/p/anime',
|
||||
thumbnail=thumb_series))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Series Live Action", url=host + '/p/live-action',
|
||||
thumbnail=thumb_series))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", thumbnail=''))
|
||||
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
@@ -47,29 +59,15 @@ def lista(item):
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a href="([^"]+)" '
|
||||
if item.contentSerieName == "Series":
|
||||
patron += 'class="link">.+?<img src="([^"]+)".*?'
|
||||
else:
|
||||
patron += 'class="link-la">.+?<img src="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)">'
|
||||
if item.url==host or item.url==host+"/liveaction":
|
||||
a=1
|
||||
else:
|
||||
num=(item.url).split('-')
|
||||
a=int(num[1])
|
||||
full_data = httptools.downloadpage(item.url).data
|
||||
full_data = re.sub(r"\n|\r|\t|\s{2}| ", "", full_data)
|
||||
data = scrapertools.find_single_match(full_data, 'class="sl">(.*?)<div class="pag">')
|
||||
patron = '<a href="([^"]+)".*?<img src="([^"]+)".*?title="([^"]+)">'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
# Paginacion
|
||||
num_items_x_pagina = 30
|
||||
min = item.page * num_items_x_pagina
|
||||
min=min-item.page
|
||||
max = min + num_items_x_pagina - 1
|
||||
b=0
|
||||
for link, img, name in matches[min:max]:
|
||||
b=b+1
|
||||
|
||||
for link, img, name in matches:
|
||||
if " y " in name:
|
||||
title=name.replace(" y "," & ")
|
||||
else:
|
||||
@@ -80,17 +78,15 @@ def lista(item):
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
|
||||
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title,
|
||||
context=context))
|
||||
if b<29:
|
||||
a=a+1
|
||||
url=host+"/p/pag-"+str(a)
|
||||
if b>10:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
|
||||
else:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
|
||||
itemlist.append(Item(channel=item.channel, title=title, url=url, action="episodios", thumbnail=scrapedthumbnail,
|
||||
contentSerieName=title, context=context))
|
||||
|
||||
# Paginacion
|
||||
|
||||
next_page = scrapertools.find_single_match(full_data, '<a class="sel">\d+</a><a href="([^"]+)">\d+</a>')
|
||||
if next_page != '':
|
||||
itemlist.append(Item(channel=item.channel, contentSerieName=item.contentSerieName,
|
||||
title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=host+next_page, action="lista"))
|
||||
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
@@ -211,6 +207,48 @@ def findvideos(item):
|
||||
|
||||
return itemlist
|
||||
|
||||
def search_results(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url, post=item.post).data
|
||||
if len(data) > 0:
|
||||
results = eval(data)
|
||||
else:
|
||||
return itemlist
|
||||
|
||||
for result in results:
|
||||
try:
|
||||
thumbnail = host + "/tb/%s.jpg" % result[0]
|
||||
title = u'%s' % result[1]
|
||||
logger.debug(title)
|
||||
url = host + "/s/%s" % result[2]
|
||||
itemlist.append(Item(channel=item.channel, thumbnail=thumbnail, title=title, url=url, contentSerieName=title,
|
||||
action='episodios'))
|
||||
except:
|
||||
pass
|
||||
|
||||
tmdb.set_infoLabels(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
import urllib
|
||||
|
||||
if texto != "":
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host+"/b.php"
|
||||
post = {'k':texto, "pe":"", "te":""}
|
||||
item.post = urllib.urlencode(post)
|
||||
|
||||
try:
|
||||
return search_results(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
def golink(ida,sl):
|
||||
a=ida
|
||||
b=[3,10,5,22,31]
|
||||
|
||||
@@ -75,7 +75,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<a href="([^"]+)" class="thumb">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<span class="dur">(.*?)</span>'
|
||||
@@ -78,7 +78,7 @@ def peliculas(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -56,7 +56,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<ul class="cf">(.*?)<h2>Advertisement</h2>')
|
||||
patron = '<li>.*?<a href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)" alt="([^"]+)".*?'
|
||||
@@ -79,7 +79,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info(item)
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url\+="([^"]*)"')
|
||||
partes = video_url.split('||')
|
||||
|
||||
70
plugin.video.alfa/channels/homecine.json
Normal file
70
plugin.video.alfa/channels/homecine.json
Normal file
@@ -0,0 +1,70 @@
|
||||
{
|
||||
"id": "homecine",
|
||||
"name": "HomeCine",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat","cast"],
|
||||
"thumbnail": "https://homecine.net/wp-content/uploads/2018/05/homedark-1-3.png",
|
||||
"banner": "",
|
||||
"version": 1,
|
||||
"categories": [
|
||||
"movie",
|
||||
"direct"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": false,
|
||||
"visible": false
|
||||
},
|
||||
{
|
||||
"id": "filter_languages",
|
||||
"type": "list",
|
||||
"label": "Mostrar enlaces en idioma...",
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"visible": true,
|
||||
"lvalues": [
|
||||
"No filtrar",
|
||||
"LAT",
|
||||
"CAST",
|
||||
"VOSE"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_peliculas",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Peliculas",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_infantiles",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Infantiles",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_terror",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - terror",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_latino",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Latino",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
358
plugin.video.alfa/channels/homecine.py
Normal file
358
plugin.video.alfa/channels/homecine.py
Normal file
@@ -0,0 +1,358 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
from core import httptools
|
||||
from core import jsontools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = ['HD 720p', 'HD 1080p', '480p', '360p']
|
||||
list_servers = ['cinemaupload']
|
||||
|
||||
host = 'https://homecine.net'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = []
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('last', auto=True),
|
||||
url='%s%s' % (host, '/release-year/2019'),
|
||||
first=0
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,title="Películas",
|
||||
action="sub_menu",
|
||||
thumbnail=get_thumb('movies', auto=True),
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,title="Series",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('tvshows', auto=True),
|
||||
url='%s%s'%(host,'/series/'),
|
||||
first=0
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Documentales",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('documentaries', auto=True),
|
||||
url='%s%s' % (host, '/documentales/'),
|
||||
first=0
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,title="Buscar",
|
||||
action="search",
|
||||
url=host+'/?s=',
|
||||
thumbnail=get_thumb('search', auto=True),
|
||||
))
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
def sub_menu(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
|
||||
|
||||
itemlist.append(Item(channel=item.channel,title="Todas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('all', auto=True),
|
||||
url='%s%s' % (host, '/peliculas/'),
|
||||
first=0
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, title="Mas vistas",
|
||||
action="list_all",
|
||||
thumbnail=get_thumb('more watched', auto=True),
|
||||
url='%s%s' % (host, '/most-viewed/'),
|
||||
first=0
|
||||
))
|
||||
|
||||
itemlist.append(Item(channel=item.channel,title="Generos",
|
||||
action="seccion",
|
||||
thumbnail=get_thumb('genres', auto=True),
|
||||
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
|
||||
url=host,
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def get_source(url, referer=None):
|
||||
logger.info()
|
||||
if referer is None:
|
||||
data = httptools.downloadpage(url).data
|
||||
else:
|
||||
data = httptools.downloadpage(url, headers={'Referer':referer}).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
return data
|
||||
|
||||
def list_all(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
next = False
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = 'movie-id="\d+".*?<a href="([^"]+)".*?<.*?original="([^"]+)".*?<h2>([^<]+)</h2>.*?jtip(.*?)clearfix'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
first = item.first
|
||||
last = first + 19
|
||||
if last > len(matches):
|
||||
last = len(matches)
|
||||
next = True
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, extra_info in matches[first:last]:
|
||||
|
||||
year = scrapertools.find_single_match(extra_info, '"tag">(\d{4})<')
|
||||
url = host+scrapedurl
|
||||
thumbnail = host+scrapedthumbnail.strip()
|
||||
title = scrapedtitle
|
||||
new_item = Item(channel=item.channel,
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
infoLabels = {'year': year}
|
||||
)
|
||||
if 'series' in scrapedurl:
|
||||
new_item.action = 'seasons'
|
||||
new_item.contentSerieName = title
|
||||
else:
|
||||
new_item.action = 'findvideos'
|
||||
new_item.contentTitle = title
|
||||
|
||||
|
||||
|
||||
itemlist.append(new_item)
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
|
||||
|
||||
if not next:
|
||||
url_next_page = item.url
|
||||
first = last
|
||||
else:
|
||||
url_next_page = scrapertools.find_single_match(data, "<li class='active'>.*?class='page larger' href='([^']+)'")
|
||||
url_next_page = host+url_next_page
|
||||
first = 0
|
||||
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=item.channel,title="Siguiente >>", url=url_next_page, action='list_all',
|
||||
first=first))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def seccion(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
duplicado = []
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = 'menu-item-object-category menu-item-\d+"><a href="([^"]+)">([^<]+)<\/a><\/li>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
url = host+scrapedurl
|
||||
title = scrapedtitle
|
||||
thumbnail = ''
|
||||
if url not in duplicado:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action='list_all',
|
||||
title=title,
|
||||
url=url,
|
||||
thumbnail=thumbnail,
|
||||
first=0
|
||||
))
|
||||
return itemlist
|
||||
|
||||
|
||||
def seasons(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
|
||||
patron = '<strong>Season (\d+)</strong>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedseason in matches:
|
||||
contentSeasonNumber = scrapedseason
|
||||
title = 'Temporada %s' % scrapedseason
|
||||
infoLabels['season'] = contentSeasonNumber
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action='episodesxseason',
|
||||
url=item.url,
|
||||
title=title,
|
||||
contentSeasonNumber=contentSeasonNumber,
|
||||
infoLabels=infoLabels
|
||||
))
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_serie_to_library",
|
||||
extra="episodios",
|
||||
contentSerieName=item.contentSerieName,
|
||||
extra1='library'
|
||||
))
|
||||
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
templist = seasons(item)
|
||||
for tempitem in templist:
|
||||
itemlist += episodesxseason(tempitem)
|
||||
return itemlist
|
||||
|
||||
def episodesxseason(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
season = item.contentSeasonNumber
|
||||
data = get_source(item.url)
|
||||
data = scrapertools.find_single_match(data, '<strong>Season %s</strong>.*?class="les-content"(.*?)</div>' % season)
|
||||
patron = '<a href="([^"]+)">Episode (\d+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
infoLabels = item.infoLabels
|
||||
for scrapedurl, dataep in matches:
|
||||
url = host+scrapedurl
|
||||
contentEpisodeNumber = dataep
|
||||
try:
|
||||
title = '%sx%s - Episodio %s' % (season, dataep, dataep)
|
||||
except:
|
||||
title = 'episodio %s' % dataep
|
||||
infoLabels['episode'] = dataep
|
||||
infoLabels = item.infoLabels
|
||||
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
action="findvideos",
|
||||
title=title,
|
||||
url=url,
|
||||
contentEpisodeNumber=contentEpisodeNumber,
|
||||
infoLabels=infoLabels
|
||||
))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url + texto
|
||||
item.first=0
|
||||
if texto != '':
|
||||
return list_all(item)
|
||||
|
||||
|
||||
def newest(categoria):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item = Item()
|
||||
try:
|
||||
if categoria in ['peliculas']:
|
||||
item.url = host +'/peliculas'
|
||||
elif categoria == 'infantiles':
|
||||
item.url = host + '/animacion/'
|
||||
elif categoria == 'terror':
|
||||
item.url = host + '/terror/'
|
||||
item.first=0
|
||||
itemlist = list_all(item)
|
||||
if itemlist[-1].title == 'Siguiente >>>':
|
||||
itemlist.pop()
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
logger.error("{0}".format(line))
|
||||
return []
|
||||
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_source(item.url)
|
||||
patron = '<div id="tab(\d+)".*?<iframe.*?src="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for option, url in matches:
|
||||
extra_info = scrapertools.find_single_match(data, '<a href="#tab%s">(.*?)<' % option)
|
||||
if '-' in extra_info:
|
||||
quality, language = scrapertools.find_single_match(extra_info, '(.*?) - (.*)')
|
||||
else:
|
||||
language = ''
|
||||
quality = extra_info
|
||||
|
||||
if 'https:' not in url:
|
||||
url = 'https:'+url
|
||||
title = ''
|
||||
if not config.get_setting('unify'):
|
||||
if language != '':
|
||||
title += ' [%s]' % IDIOMAS[language]
|
||||
if quality != '':
|
||||
title += ' [%s]' % quality
|
||||
|
||||
new_item = Item(channel=item.channel,
|
||||
url=url,
|
||||
title= '%s'+ title,
|
||||
contentTitle=item.title,
|
||||
action='play',
|
||||
infoLabels = item.infoLabels
|
||||
)
|
||||
if language != '':
|
||||
new_item.language = IDIOMAS[language]
|
||||
if quality != '':
|
||||
new_item.quality = quality
|
||||
|
||||
itemlist.append(new_item)
|
||||
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
|
||||
# Requerido para FilterTools
|
||||
|
||||
itemlist = filtertools.get_links(itemlist, item, list_language)
|
||||
|
||||
# Requerido para AutoPlay
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
|
||||
itemlist.append(
|
||||
Item(channel=item.channel,
|
||||
title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
|
||||
url=item.url,
|
||||
action="add_pelicula_to_library",
|
||||
extra="findvideos",
|
||||
contentTitle=item.contentTitle,
|
||||
))
|
||||
|
||||
|
||||
return itemlist
|
||||
@@ -86,7 +86,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<article class="item" data-video-id="([^"]+)">.*?src="([^"]+)" alt="([^"]+)".*?<div class="thumbnail__info__right">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
@@ -107,7 +107,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"')
|
||||
video_url += scrapertools.find_single_match(data, 'video_url\+=\'([^\']+)\'')
|
||||
partes = video_url.split('||')
|
||||
|
||||
@@ -137,14 +137,16 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
_sl = scrapertools.find_single_match(data, 'var _dt=([^;]+);')
|
||||
sl = eval(_sl)
|
||||
buttons = [0,1]
|
||||
for id in buttons:
|
||||
new_url = "https://videoeb.xyz/" + "eb/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2]
|
||||
data_new = httptools.downloadpage(new_url).data
|
||||
valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)')
|
||||
data_new = httptools.downloadpage(new_url, headers={'Referer': item.url}).data
|
||||
try:
|
||||
valor1, valor2 = scrapertools.find_single_match(data_new,
|
||||
'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)')
|
||||
url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1)))
|
||||
if 'download' in url:
|
||||
url = url.replace('download', 'preview')
|
||||
|
||||
@@ -11,9 +11,9 @@ from platformcode import logger
|
||||
host = 'http://sexkino.to'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info("pelisalacarta.sexkino mainlist")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="peliculas", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="New" , action="lista", url= host + "/movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
|
||||
@@ -26,7 +26,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -35,9 +35,9 @@ def search(item, texto):
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info("pelisalacarta.sexkino categorias")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="cat-item cat-item-.*?<a href="(.*?)" >(.*?)</a> <i>(.*?)</i>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -45,52 +45,77 @@ def categorias(item):
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle + " ("+cantidad+")"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
def anual(item):
|
||||
logger.info("pelisalacarta.sexkino anual")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li><a href="([^<]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
logger.info("pelisalacarta.sexkino peliculas")
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
#hay que hacer que coincida con el buscador
|
||||
patron = '<article.*?<a href="([^"]+)">.*?<img src="([^"]+)" alt="([^"]+)".*?>(\d+)</span>'
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="poster">.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)">.*?'
|
||||
patron += '<span class="quality">([^"]+)</span>.*?'
|
||||
patron += '<a href="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,date in matches:
|
||||
for scrapedthumbnail,scrapedtitle,calidad,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle + " (" + date + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
next_page_url = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
scrapedtitle = scrapedtitle + " (" + calidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'resppages.*?<a href="([^"]+)" ><span class="icon-chevron-right">')
|
||||
if next_page != "":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info("pelisalacarta.a0 findvideos")
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# <th>Watch online</th><th>Quality</th><th>Language</th><th>Added</th></tr></thead>
|
||||
# <tbody>
|
||||
# <tr id='link-3848'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidzella.me'> <a href='http://sexkino.to/links/69321-5/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3847'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=flashx.tv'> <a href='http://sexkino.to/links/69321-4/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3844'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=openload.co'> <a href='http://sexkino.to/links/69321-3/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3843'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=vidoza.net'> <a href='http://sexkino.to/links/69321-2/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# <tr id='link-3842'><td><img src='https://s2.googleusercontent.com/s2/favicons?domain=rapidvideo.ws'> <a href='http://sexkino.to/links/69321/' target='_blank'>Watch online</a></td>
|
||||
# <td><strong class='quality'>DVDRip</strong></td><td>German</td><td>2 years</td></tr>
|
||||
# </tbody></table></div></div></div></div>
|
||||
|
||||
|
||||
|
||||
patron = '<tr id=(.*?)</tr>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for match in matches:
|
||||
url = scrapertools.find_single_match(match,'href="([^"]+)" target')
|
||||
title = scrapertools.find_single_match(match,'<td><img src=.*?> (.*?)</td>')
|
||||
itemlist.append(item.clone(action="play", title=title, url=url))
|
||||
|
||||
# <a id="link" href="https://vidzella.me/play#GS7D" class="btn" style="background-color:#1e73be">Continue</a>
|
||||
|
||||
patron = '<iframe class="metaframe rptss" src="([^"]+)".*?<li><a class="options" href="#option-\d+">\s+(.*?)\s+<'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -101,8 +126,8 @@ def findvideos(item):
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info("pelisalacarta.sexkino play")
|
||||
data = scrapertools.cachePage(item.url)
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -7,18 +7,16 @@ from core import scrapertools
|
||||
from core.item import Item
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core import tmdb
|
||||
from core import jsontools
|
||||
|
||||
host = 'https://www.spankwire.com'
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="peliculas", url=host + "/home1/Straight/Month/Duration"))
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/recentvideos/straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/home1/Straight/Month/Views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/home1/Straight/Month/Rating"))
|
||||
itemlist.append( Item(channel=item.channel, title="Longitud" , action="lista", url=host + "/home1/Straight/Month/Duration"))
|
||||
#itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/Straight"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
@@ -29,7 +27,7 @@ def search(item, texto):
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = host + "/search/?q=%s" % texto
|
||||
try:
|
||||
return peliculas(item)
|
||||
return lista(item)
|
||||
except:
|
||||
import sys
|
||||
for line in sys.exc_info():
|
||||
@@ -42,7 +40,9 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?<img src="([^"]+)" alt="([^"]+)" />.*?<span>([^"]+)</span>'
|
||||
patron = '<div class="category-thumb"><a href="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)" />.*?'
|
||||
patron += '<span>([^"]+)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,cantidad in matches:
|
||||
@@ -50,16 +50,20 @@ def categorias(item):
|
||||
scrapedthumbnail = "http:" + scrapedthumbnail
|
||||
scrapedtitle = scrapedtitle + " (" + cantidad +")"
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl) + "/Submitted/59"
|
||||
itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def peliculas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video_thumb_wrapper">.*?<a href="([^"]+)".*?data-original="([^"]+)".*?title="([^"]+)".*?<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
patron = '<div class="video_thumb_wrapper">.*?'
|
||||
patron += '<a href="([^"]+)".*?data-original="([^"]+)".*?'
|
||||
patron += 'title="([^"]+)".*?'
|
||||
patron += '<div class="video_thumb_wrapper__thumb_info video_thumb_wrapper__duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
@@ -67,23 +71,23 @@ def peliculas(item):
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} ))
|
||||
next_page_url = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<link rel="next" href="([^"]+)" />')
|
||||
#Para el buscador
|
||||
if next_page_url=="":
|
||||
next_page_url = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
if next_page_url!="":
|
||||
next_page_url = urlparse.urljoin(item.url,next_page_url)
|
||||
itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) )
|
||||
if next_page=="":
|
||||
next_page = scrapertools.find_single_match(data,'<div class="paginator_wrapper__buttons"><a class="" href="([^"]+)"')
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append( Item(channel=item.channel , action="lista" , title="Página Siguiente >>" , text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'Copy Embed Code(.*?)For Desktop')
|
||||
patron = '<div class="shareDownload_container__item__dropdown">.*?<a href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -56,7 +56,7 @@ def lista(item):
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
|
||||
@@ -38,7 +38,7 @@ def novedades(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
# <a href="http://tubehentai.com/videos/slave_market_¨c_ep1-595.html"><img class="img" width="145" src="http://tubehentai.com/media/thumbs/5/9/5/./f/595/595.flv-3.jpg" alt="Slave_Market_¨C_Ep1" id="4f4fbf26f36
|
||||
patron = '<a href="(http://tubehentai.com/videos/[^"]+)"><img.*?src="(http://tubehentai.com/media/thumbs/[^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -80,7 +80,7 @@ def play(item):
|
||||
# s1.addParam("flashvars","overlay=http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4
|
||||
# http://tubehentai.com/media/thumbs/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4
|
||||
# http://tubehentai.com/media/videos/5/2/3/9/c/5239cf74632cbTHLaBlueGirlep3%20%20Segment2000855.000001355.000.mp4?start=0
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
url = scrapertools.get_match(data, 's1.addParam\("flashvars","bufferlength=1&autostart=true&overlay=(.*?\.mp4)')
|
||||
url = url.replace("/thumbs", "/videos")
|
||||
# url = url+"?start=0"
|
||||
|
||||
@@ -9,16 +9,16 @@ from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
host = 'http://www.vidz7.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url="http://www.vidz7.com/"))
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Útimos videos", url=host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="categorias", title="Categorias", url="http://www.vidz7.com/category/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar",
|
||||
url="http://www.vidz7.com/?s="))
|
||||
|
||||
Item(channel=item.channel, action="categorias", title="Categorias", url=host + "/category/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar", url="http://www.vidz7.com"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ def search(item, texto):
|
||||
logger.info()
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = "{0}{1}".format(item.url, texto)
|
||||
item.url = host + "/?s=%s" % texto
|
||||
try:
|
||||
return lista(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -52,38 +52,15 @@ def categorias(item):
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
|
||||
# Descarga la página
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
|
||||
# <article id='post-id-40630' class='video-index ceil'>
|
||||
# <div class='thumb-wrapp'>
|
||||
# <a href='http://www.vidz78.com/2019/03/22/deux-blacks-tbm-pour-julia-30ans/189/' class='thumb' style='background-image:url("https://pp.userapi.com/c855416/v855416475/ab7f/utBev5x7QuA.jpg")'>
|
||||
# <div class='overlay'></div>
|
||||
# <div class='vl'>
|
||||
# <div class="hd">HD</div> <div class="duration">36:28</div> </div>
|
||||
# </a>
|
||||
# </div>
|
||||
# <div class='info-card'>
|
||||
# <h6><a class='hp' href='http://www.vidz78.com/2019/03/22/deux-blacks-tbm-pour-julia-30ans/189/'>Jacquieetmicheltv - Deux blacks TBM pour Julia, 30ans !</a></h6>
|
||||
|
||||
# <time class="video-date" datetime="2019-03-22T10:32:46+00:00">Mar 22, 2019</time>
|
||||
|
||||
# <span> / 5.1k views</span>
|
||||
|
||||
|
||||
|
||||
|
||||
# </div>
|
||||
# </article>
|
||||
# Extrae las entradas de la pagina seleccionada
|
||||
patron = "<a href='.*?.' class='thumb' style='background-image:url\(\"([^\"]+)\"\).*?"
|
||||
patron += "<div class=\"hd\">(.*?)</div>.*?"
|
||||
patron += "<div class=\"duration\">(.*?)</div>.*?"
|
||||
patron += "<h6><a class='hp' href='([^']+)'>(.*?)</a></h6>"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
|
||||
for scrapedthumbnail, scrapedhd, duration, scrapedurl, scrapedtitle in matches:
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
@@ -93,13 +70,9 @@ def lista(item):
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, thumbnail=thumbnail, fanart=thumbnail,
|
||||
fulltitle=title, url=url,
|
||||
viewmode="movie", folder=True))
|
||||
|
||||
paginacion = scrapertools.find_single_match(data,
|
||||
'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">')
|
||||
|
||||
paginacion = scrapertools.find_single_match(data,'<a class="active".*?.>\d+</a><a class="inactive" href ="([^"]+)">')
|
||||
if paginacion:
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=">> Página Siguiente", url=paginacion))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -116,5 +89,5 @@ def play(item):
|
||||
videoitem.action = "play"
|
||||
videoitem.folder = False
|
||||
videoitem.title = item.title
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -88,12 +88,14 @@ def lista(item):
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video">.*?'
|
||||
patron += '<a href="([^"]+)".*?'
|
||||
patron += '<span class="time">(.*?)</span>.*?'
|
||||
patron += '<span class="time">(.*?)</span>(.*?)</span>.*?'
|
||||
patron += '<img src="([^"]+)" alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,time,scrapedthumbnail,scrapedtitle in matches:
|
||||
for scrapedurl,time,calidad,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.replace(", ", " & ").replace("(", "(").replace(")", ")")
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + scrapedtitle
|
||||
if "hd-marker is-hd" in calidad:
|
||||
title = "[COLOR yellow]" + time + " [/COLOR]" + "[COLOR red]" + "HD" + " [/COLOR]" + scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl,
|
||||
|
||||
@@ -64,7 +64,7 @@ def categorias(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?<a href="([^"]+)" title="([^"]+)">.*?'
|
||||
@@ -91,7 +91,7 @@ def lista(item):
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
variable = scrapertools.find_single_match(data,'<script type=\'text/javascript\'> str=\'([^\']+)\'')
|
||||
resuelta = re.sub("@[A-F0-9][A-F0-9]", lambda m: m.group()[1:].decode('hex'), variable)
|
||||
url = scrapertools.find_single_match(resuelta,'<iframe src="([^"]+)"')
|
||||
|
||||
@@ -20,7 +20,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
logger.debug(data)
|
||||
patron = '<meta property="og:video" content="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
|
||||
42
plugin.video.alfa/servers/cinemaupload.json
Normal file
42
plugin.video.alfa/servers/cinemaupload.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "https://cinemaupload.com/embed/([a-zA-Z0-9]+)",
|
||||
"url": "https://cinemaupload.com/embed/\\1/"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "cinemaupload",
|
||||
"name": "cinemaupload",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://cinemaupload.com/static/img/logo1.png"
|
||||
}
|
||||
28
plugin.video.alfa/servers/cinemaupload.py
Normal file
28
plugin.video.alfa/servers/cinemaupload.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector Cinemaupload By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
import re
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url)
|
||||
if data.code == 404:
|
||||
return False, "[CinemaUpload] El archivo no existe o ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
|
||||
logger.info("url=" + page_url)
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
data = re.sub(r'\n|\r|\t| |<br>|\s{2,}', "", data)
|
||||
patron = "source: '([^']+)',"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url in matches:
|
||||
video_urls.append(['.m3u8 [CinemaUpload]', url])
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user