Merge pull request #90 from Intel11/ultimo

Actualizados
This commit is contained in:
Alfa
2017-09-18 23:46:44 +02:00
committed by GitHub
14 changed files with 354 additions and 171 deletions

View File

@@ -258,12 +258,11 @@ def findvideos(item):
# Descarga la pagina
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'href="http://(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
if link != "":
link = "http://www.divxatope1.com/" + link
logger.info("torrent=" + link)
@@ -272,12 +271,16 @@ def findvideos(item):
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
parentContent=item))
patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
patron += '<div class="box2">([^<]+)</div[^<]+'
patron += '<div class="box3">([^<]+)</div[^<]+'
patron += '<div class="box4">([^<]+)</div[^<]+'
patron += '<div class="box5">(.*?)</div[^<]+'
patron += '<div class="box6">([^<]+)<'
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
#patron += '<div class="box2">([^<]+)</div[^<]+'
#patron += '<div class="box3">([^<]+)</div[^<]+'
#patron += '<div class="box4">([^<]+)</div[^<]+'
#patron += '<div class="box5">(.*?)</div[^<]+'
#patron += '<div class="box6">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)

View File

@@ -151,8 +151,15 @@ def peliculas(item):
itemlist = []
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot="", fulltitle=scrapedtitle))
fulltitle = scrapedtitle.replace(scrapertools.find_single_match(scrapedtitle, '\([0-9]+\)' ), "")
itemlist.append(Item(channel = item.channel,
action = "findvideos",
title = scrapedtitle,
url = scrapedurl,
thumbnail = scrapedthumbnail,
plot = "",
fulltitle = fulltitle
))
next_page = scrapertools.find_single_match(data, 'rel="next" href="([^"]+)')
if next_page != "":
@@ -170,46 +177,30 @@ def findvideos(item):
patron = 'hand" rel="([^"]+).*?title="(.*?)".*?<span>([^<]+)</span>.*?</span><span class="q">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
itemlist = []
encontrados = []
itemtemp = []
for scrapedurl, server_name, language, quality in matches:
if scrapedurl in encontrados:
continue
encontrados.append(scrapedurl)
language = language.strip()
quality = quality.strip()
if "youapihd" in server_name.lower():
server_name = "gvideo"
if "pelismundo" in scrapedurl:
data = httptools.downloadpage(scrapedurl, add_referer = True).data
patron = 'sources.*?}],'
bloque = scrapertools.find_single_match(data, patron)
patron = 'file.*?"([^"]+)".*?label:"([^"]+)"'
match = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl1, scrapedlabel1 in match:
itemtemp.append([scrapedlabel1, scrapedurl1])
itemtemp.sort(key=lambda it: int(it[0].replace("p", "")))
for videoitem in itemtemp:
itemlist.append(Item(channel = item.channel,
action = "play",
extra = "hdvids",
fulltitle = item.title,
server = "directo",
thumbnail = item.thumbnail,
title = server_name + " (" + language + ") (Calidad " + videoitem[0] + ")",
url = videoitem[1],
language = language,
quality = videoitem[0]
))
else:
itemlist.append(Item(channel=item.channel,
action = "play",
extra = "",
fulltitle = item.title,
server = "",
title = server_name + " (" + language + ") (Calidad " + quality + ")",
thumbnail = item.thumbnail,
url = scrapedurl,
folder = False,
language = language,
quality = quality
))
itemlist = servertools.get_servers_itemlist(itemlist)
itemlist.append(Item(channel=item.channel,
action = "play",
extra = "",
fulltitle = item.fulltitle,
title = "%s (" + language + ") (" + quality + ")",
thumbnail = item.thumbnail,
url = scrapedurl,
folder = False,
language = language,
quality = quality
))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
return itemlist
def play(item):
item.thumbnail = item.contentThumbnail
return [item]

View File

@@ -1,10 +1,15 @@
# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from channelselector import get_thumb
from core import httptools
from core import scrapertools
from core import servertools
from core.item import Item
from platformcode import logger
@@ -362,3 +367,100 @@ def episodios(item):
return sorted(itemlist, key=lambda it: (int(it.contentSeason), int(it.contentEpisodeNumber)))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>')
item.plot = scrapertools.htmlclean(item.plot).strip()
item.contentPlot = item.plot
link = scrapertools.find_single_match(data, 'href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=([^"]+)"')
if link != "":
link = host + link
logger.info("torrent=" + link)
itemlist.append(
Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title,
url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False,
parentContent=item))
patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)'
patron += '<\/div[^<]+<div class="box4">([^<]+)<\/div[^<]+<div class="box5"><a href=(.*?) rel.*?'
patron += '<\/div[^<]+<div class="box6">([^<]+)<'
#patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
#patron += '<div class="box2">([^<]+)</div[^<]+'
#patron += '<div class="box3">([^<]+)</div[^<]+'
#patron += '<div class="box4">([^<]+)</div[^<]+'
#patron += '<div class="box5">(.*?)</div[^<]+'
#patron += '<div class="box6">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapertools.printMatches(matches)
itemlist_ver = []
itemlist_descargar = []
for servername, idioma, calidad, scrapedurl, comentarios in matches:
title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier")
if comentarios.strip() != "":
title = title + " (" + comentarios.strip() + ")"
url = urlparse.urljoin(item.url, scrapedurl)
mostrar_server = servertools.is_server_enabled(servername)
if mostrar_server:
thumbnail = servertools.guess_server_thumbnail(title)
plot = ""
logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
action = "play"
if "partes" in title:
action = "extract_url"
new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
thumbnail=thumbnail, plot=plot, parentContent=item, server = servername)
if comentarios.startswith("Ver en"):
itemlist_ver.append(new_item)
else:
itemlist_descargar.append(new_item)
for new_item in itemlist_ver:
itemlist.append(new_item)
for new_item in itemlist_descargar:
itemlist.append(new_item)
return itemlist
def extract_url(item):
logger.info()
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
videoitem.url) + ")"
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
def play(item):
logger.info()
if item.server != "torrent":
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
videoitem.url) + ")"
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
else:
itemlist = [item]
return itemlist

View File

@@ -1,53 +1,53 @@
{
"active": true,
"changes": [
{
"date": "09/05/2017",
"description": "Agregado otro tipo de url de los videos y detección de subtítulos"
},
{
"date": "16/02/2017",
"description": "Primera versión"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
"url": "https://raptu.com/embed/\\1"
}
]
},
"free": true,
"id": "raptu",
"name": "raptu",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "http://i.imgur.com/quVK1j0.png?1",
"version": 1
}
{
"active": true,
"changes": [
{
"date": "18/09/2017",
"description": "Versión inicial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "https://www.bitporno.com/e/([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
},
{
"pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
"url": "https://www.bitporno.com/e/\\1"
}
]
},
"free": true,
"id": "bitp",
"name": "bitp",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s26.postimg.org/maiur9tmx/bitp1.png",
"version": 1
}

View File

@@ -0,0 +1,34 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para bitporno
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
return False, "[bitp] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
videourl = scrapertools.find_multiple_matches(data, 'file":"([^"]+).*?label":"([^"]+)')
scrapertools.printMatches(videourl)
for scrapedurl, scrapedquality in videourl:
if "loadthumb" in scrapedurl:
continue
scrapedurl = scrapedurl.replace("\\","")
video_urls.append([scrapedquality + " [bitp]", scrapedurl])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -4,14 +4,19 @@ import urllib
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
if 'googleusercontent' in page_url:
return True, ""
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
if "No+tienes+permiso" in response.data:
return False, "[gvideo] No tiene permiso para acceder a este video"
return True, ""
@@ -19,22 +24,39 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
urls = []
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
streams =[]
logger.debug('page_url: %s'%page_url)
if 'googleusercontent' in page_url:
data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
url=data.headers['location']
logger.debug('url: %s' % url)
logger.debug("data.headers: %s" % data.headers)
quality = scrapertools.find_single_match (url, '.itag=(\d+).')
logger.debug('quality: %s' % quality)
streams.append((quality, url))
logger.debug('streams: %s' % streams)
headers_string=""
else:
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
cookies = ""
cookie = response.headers["set-cookie"].split("HttpOnly, ")
for c in cookie:
cookies += c.split(";", 1)[0] + "; "
data = response.data.decode('unicode-escape')
data = urllib.unquote_plus(urllib.unquote_plus(data))
headers_string = "|Cookie=" + cookies
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
streams = scrapertools.find_multiple_matches(url_streams,
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '43': '360p', '59': '480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags[itag], video_url])
urls.append(video_url)
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
return video_urls

View File

@@ -10,7 +10,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "<title>watch </title>" in data.lower():
if "<title>watch </title>" in data.lower() or "File was deleted" in data:
return False, "[kingvid] El archivo no existe o ha sido borrado"
return True, ""

View File

@@ -52,5 +52,6 @@
"visible": false
}
],
"thumbnail": "https://s26.postimg.org/6ebn509jd/mailru1.png",
"version": 1
}
}

View File

@@ -0,0 +1,49 @@
{
"active": true,
"changes": [
{
"date": "18/09/2017",
"description": "Versión inicial"
}
],
"find_videos": {
"ignore_urls": [],
"patterns": [
{
"pattern": "http://www.pelismundo.com/gkvip/vip/playervip3/.*?id=([A-z0-9]+)",
"url": "http://www.pelismundo.com/gkvip/vip/playervip3/player.php?id=\\1"
}
]
},
"free": true,
"id": "pelismundo",
"name": "pelismundo",
"settings": [
{
"default": false,
"enabled": true,
"id": "black_list",
"label": "Incluir en lista negra",
"type": "bool",
"visible": true
},
{
"default": 0,
"enabled": true,
"id": "favorites_servers_list",
"label": "Incluir en lista de favoritos",
"lvalues": [
"No",
"1",
"2",
"3",
"4",
"5"
],
"type": "list",
"visible": false
}
],
"thumbnail": "https://s26.postimg.org/72c9mr3ux/pelismundo1.png",
"version": 1
}

View File

@@ -0,0 +1,33 @@
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Alfa addon - KODI Plugin
# Conector para pelismundo
# https://github.com/alfa-addon
# ------------------------------------------------------------
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
return False, "[pelismundo] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
video_urls = []
data = httptools.downloadpage(page_url, add_referer = True).data
patron = 'sources.*?}],'
bloque = scrapertools.find_single_match(data, patron)
patron = 'file.*?"([^"]+)".*?label:"([^"]+)"'
match = scrapertools.find_multiple_matches(bloque, patron)
for scrapedurl, scrapedquality in match:
video_urls.append([scrapedquality + " [pelismundo]", scrapedurl])
#video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls

View File

@@ -1,53 +0,0 @@
# -*- coding: utf-8 -*-
from core import httptools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
try:
response = httptools.downloadpage(page_url)
except:
pass
if not response.data or "urlopen error [Errno 1]" in str(response.code):
from platformcode import config
if config.is_xbmc():
return False, "[Raptu] Este conector solo funciona a partir de Kodi 17"
elif config.get_platform() == "plex":
return False, "[Raptu] Este conector no funciona con tu versión de Plex, intenta actualizarla"
elif config.get_platform() == "mediaserver":
return False, "[Raptu] Este conector requiere actualizar python a la versión 2.7.9 o superior"
if "Object not found" in response.data:
return False, "[Raptu] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
video_urls = []
# Detección de subtítulos
subtitulo = ""
videos = scrapertools.find_multiple_matches(data, '"file"\s*:\s*"([^"]+)","label"\s*:\s*"([^"]+)"')
for video_url, calidad in videos:
video_url = video_url.replace("\\", "")
extension = scrapertools.get_filename_from_url(video_url)[-4:]
if ".srt" in extension:
subtitulo = "https://www.raptu.com" + video_url
else:
video_urls.append(["%s %s [raptu]" % (extension, calidad), video_url, 0, subtitulo])
try:
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
except:
pass
for video_url in video_urls:
logger.info(" %s - %s" % (video_url[0], video_url[1]))
return video_urls

View File

@@ -35,7 +35,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if not media_url.startswith("http"):
media_url = "http:" + media_url
video_urls.append([".%s %sp [streamango]" % (ext, quality), media_url])
video_urls.append([".%s %sp [streamcherry]" % (ext, quality), media_url])
video_urls.reverse()
for video_url in video_urls:

View File

@@ -14,7 +14,7 @@
"ignore_urls": [],
"patterns": [
{
"pattern": "thevideo.me/(?:embed-|)([A-z0-9]+)",
"pattern": "(?:thevideo.me|tvad.me)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
}
]
@@ -48,5 +48,6 @@
"visible": false
}
],
"thumbnail": "https://s26.postimg.org/fzmu2c761/thevideo.me1.png",
"version": 1
}
}

View File

@@ -13,7 +13,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
if "borrado" in data:
if "borrado" in data or "Deleted" in data:
return False, "[vidlox] El fichero ha sido borrado"
return True, ""