([^<]+)<\/div[^<]+
([^<]+)'
+ patron += '<\/div[^<]+
([^<]+)<\/div[^<]+
([^<]+)
(.*?)
([^<]+)<'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ scrapertools.printMatches(matches)
+
+ itemlist_ver = []
+ itemlist_descargar = []
+
+ for servername, idioma, calidad, scrapedurl, comentarios in matches:
+ title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
+ servername = servername.replace("uploaded", "uploadedto").replace("1fichier", "onefichier")
+ if comentarios.strip() != "":
+ title = title + " (" + comentarios.strip() + ")"
+ url = urlparse.urljoin(item.url, scrapedurl)
+ mostrar_server = servertools.is_server_enabled(servername)
+ if mostrar_server:
+ thumbnail = servertools.guess_server_thumbnail(title)
+ plot = ""
+ logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]")
+ action = "play"
+ if "partes" in title:
+ action = "extract_url"
+ new_item = Item(channel=item.channel, action=action, title=title, fulltitle=title, url=url,
+ thumbnail=thumbnail, plot=plot, parentContent=item, server = servername)
+ if comentarios.startswith("Ver en"):
+ itemlist_ver.append(new_item)
+ else:
+ itemlist_descargar.append(new_item)
+
+ for new_item in itemlist_ver:
+ itemlist.append(new_item)
+
+ for new_item in itemlist_descargar:
+ itemlist.append(new_item)
+
+ return itemlist
+
+
+def extract_url(item):
+ logger.info()
+
+ itemlist = servertools.find_video_items(data=item.url)
+
+ for videoitem in itemlist:
+ videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
+ videoitem.url) + ")"
+ videoitem.fulltitle = item.fulltitle
+ videoitem.thumbnail = item.thumbnail
+ videoitem.channel = item.channel
+
+ return itemlist
+
+
+def play(item):
+ logger.info()
+
+ if item.server != "torrent":
+ itemlist = servertools.find_video_items(data=item.url)
+
+ for videoitem in itemlist:
+ videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
+ videoitem.url) + ")"
+ videoitem.fulltitle = item.fulltitle
+ videoitem.thumbnail = item.thumbnail
+ videoitem.channel = item.channel
+ else:
+ itemlist = [item]
+
+ return itemlist
diff --git a/plugin.video.alfa/servers/raptu.json b/plugin.video.alfa/servers/bitp.json
old mode 100755
new mode 100644
similarity index 64%
rename from plugin.video.alfa/servers/raptu.json
rename to plugin.video.alfa/servers/bitp.json
index 088063bd..aeb7c8be
--- a/plugin.video.alfa/servers/raptu.json
+++ b/plugin.video.alfa/servers/bitp.json
@@ -1,53 +1,53 @@
-{
- "active": true,
- "changes": [
- {
- "date": "09/05/2017",
- "description": "Agregado otro tipo de url de los videos y detección de subtítulos"
- },
- {
- "date": "16/02/2017",
- "description": "Primera versión"
- }
- ],
- "find_videos": {
- "ignore_urls": [],
- "patterns": [
- {
- "pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
- "url": "https://raptu.com/embed/\\1"
- }
- ]
- },
- "free": true,
- "id": "raptu",
- "name": "raptu",
- "settings": [
- {
- "default": false,
- "enabled": true,
- "id": "black_list",
- "label": "Incluir en lista negra",
- "type": "bool",
- "visible": true
- },
- {
- "default": 0,
- "enabled": true,
- "id": "favorites_servers_list",
- "label": "Incluir en lista de favoritos",
- "lvalues": [
- "No",
- "1",
- "2",
- "3",
- "4",
- "5"
- ],
- "type": "list",
- "visible": false
- }
- ],
- "thumbnail": "http://i.imgur.com/quVK1j0.png?1",
- "version": 1
-}
\ No newline at end of file
+{
+ "active": true,
+ "changes": [
+ {
+ "date": "18/09/2017",
+ "description": "Versión inicial"
+ }
+ ],
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "https://www.bitporno.com/e/([A-z0-9]+)",
+ "url": "https://www.bitporno.com/e/\\1"
+ },
+ {
+ "pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)",
+ "url": "https://www.bitporno.com/e/\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "bitp",
+ "name": "bitp",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "Incluir en lista negra",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "Incluir en lista de favoritos",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://s26.postimg.org/maiur9tmx/bitp1.png",
+ "version": 1
+}
diff --git a/plugin.video.alfa/servers/bitp.py b/plugin.video.alfa/servers/bitp.py
new file mode 100644
index 00000000..7ca99b97
--- /dev/null
+++ b/plugin.video.alfa/servers/bitp.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# ------------------------------------------------------------
+# Alfa addon - KODI Plugin
+# Conector para bitporno
+# https://github.com/alfa-addon
+# ------------------------------------------------------------
+
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
+ return False, "[bitp] El archivo no existe o ha sido borrado"
+
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ video_urls = []
+ data = httptools.downloadpage(page_url).data
+ videourl = scrapertools.find_multiple_matches(data, 'file":"([^"]+).*?label":"([^"]+)')
+ scrapertools.printMatches(videourl)
+ for scrapedurl, scrapedquality in videourl:
+ if "loadthumb" in scrapedurl:
+ continue
+ scrapedurl = scrapedurl.replace("\\","")
+ video_urls.append([scrapedquality + " [bitp]", scrapedurl])
+ video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
+ return video_urls
diff --git a/plugin.video.alfa/servers/gvideo.py b/plugin.video.alfa/servers/gvideo.py
index 575af8de..85a2be53 100644
--- a/plugin.video.alfa/servers/gvideo.py
+++ b/plugin.video.alfa/servers/gvideo.py
@@ -4,14 +4,19 @@ import urllib
from core import httptools
from core import scrapertools
+from platformcode import logger
def test_video_exists(page_url):
+ if 'googleusercontent' in page_url:
+ return True, ""
response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
if "no+existe" in response.data:
return False, "[gvideo] El video no existe o ha sido borrado"
if "Se+ha+excedido+el" in response.data:
return False, "[gvideo] Se ha excedido el número de reproducciones permitidas"
+ if "No+tienes+permiso" in response.data:
+ return False, "[gvideo] No tiene permiso para acceder a este video"
return True, ""
@@ -19,22 +24,39 @@ def test_video_exists(page_url):
def get_video_url(page_url, user="", password="", video_password=""):
video_urls = []
urls = []
- response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
- cookies = ""
- cookie = response.headers["set-cookie"].split("HttpOnly, ")
- for c in cookie:
- cookies += c.split(";", 1)[0] + "; "
- data = response.data.decode('unicode-escape')
- data = urllib.unquote_plus(urllib.unquote_plus(data))
- headers_string = "|Cookie=" + cookies
- url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
- streams = scrapertools.find_multiple_matches(url_streams,
- 'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
+ streams =[]
+ logger.debug('page_url: %s'%page_url)
+ if 'googleusercontent' in page_url:
+ data = httptools.downloadpage(page_url, follow_redirects = False, headers={"Referer": page_url})
+ url=data.headers['location']
+ logger.debug('url: %s' % url)
+ logger.debug("data.headers: %s" % data.headers)
+ quality = scrapertools.find_single_match (url, '.itag=(\d+).')
+ logger.debug('quality: %s' % quality)
+
+ streams.append((quality, url))
+ logger.debug('streams: %s' % streams)
+ headers_string=""
+
+ else:
+ response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url})
+ cookies = ""
+ cookie = response.headers["set-cookie"].split("HttpOnly, ")
+ for c in cookie:
+ cookies += c.split(";", 1)[0] + "; "
+ data = response.data.decode('unicode-escape')
+ data = urllib.unquote_plus(urllib.unquote_plus(data))
+ headers_string = "|Cookie=" + cookies
+ url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
+ streams = scrapertools.find_multiple_matches(url_streams,
+ 'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
+
itags = {'18': '360p', '22': '720p', '34': '360p', '35': '480p', '37': '1080p', '43': '360p', '59': '480p'}
for itag, video_url in streams:
if not video_url in urls:
video_url += headers_string
video_urls.append([itags[itag], video_url])
urls.append(video_url)
- video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
+ video_urls.sort(key=lambda video_urls: int(video_urls[0].replace("p", "")))
+
return video_urls
diff --git a/plugin.video.alfa/servers/kingvid.py b/plugin.video.alfa/servers/kingvid.py
index 14489900..b669ccca 100755
--- a/plugin.video.alfa/servers/kingvid.py
+++ b/plugin.video.alfa/servers/kingvid.py
@@ -10,7 +10,7 @@ def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
- if "
watch " in data.lower():
+ if "
watch " in data.lower() or "File was deleted" in data:
return False, "[kingvid] El archivo no existe o ha sido borrado"
return True, ""
diff --git a/plugin.video.alfa/servers/mailru.json b/plugin.video.alfa/servers/mailru.json
index b7d1d70d..e29ef6d4 100755
--- a/plugin.video.alfa/servers/mailru.json
+++ b/plugin.video.alfa/servers/mailru.json
@@ -52,5 +52,6 @@
"visible": false
}
],
+ "thumbnail": "https://s26.postimg.org/6ebn509jd/mailru1.png",
"version": 1
-}
\ No newline at end of file
+}
diff --git a/plugin.video.alfa/servers/pelismundo.json b/plugin.video.alfa/servers/pelismundo.json
new file mode 100644
index 00000000..1d9e3395
--- /dev/null
+++ b/plugin.video.alfa/servers/pelismundo.json
@@ -0,0 +1,49 @@
+{
+ "active": true,
+ "changes": [
+ {
+ "date": "18/09/2017",
+ "description": "Versión inicial"
+ }
+ ],
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "http://www.pelismundo.com/gkvip/vip/playervip3/.*?id=([A-z0-9]+)",
+ "url": "http://www.pelismundo.com/gkvip/vip/playervip3/player.php?id=\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "pelismundo",
+ "name": "pelismundo",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "Incluir en lista negra",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "Incluir en lista de favoritos",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://s26.postimg.org/72c9mr3ux/pelismundo1.png",
+ "version": 1
+}
diff --git a/plugin.video.alfa/servers/pelismundo.py b/plugin.video.alfa/servers/pelismundo.py
new file mode 100644
index 00000000..c86ad7f3
--- /dev/null
+++ b/plugin.video.alfa/servers/pelismundo.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# ------------------------------------------------------------
+# Alfa addon - KODI Plugin
+# Conector para pelismundo
+# https://github.com/alfa-addon
+# ------------------------------------------------------------
+
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "Object not found" in data or "no longer exists" in data or '"sources": [false]' in data:
+ return False, "[pelismundo] El archivo no existe o ha sido borrado"
+
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ video_urls = []
+ data = httptools.downloadpage(page_url, add_referer = True).data
+ patron = 'sources.*?}],'
+ bloque = scrapertools.find_single_match(data, patron)
+ patron = 'file.*?"([^"]+)".*?label:"([^"]+)"'
+ match = scrapertools.find_multiple_matches(bloque, patron)
+ for scrapedurl, scrapedquality in match:
+ video_urls.append([scrapedquality + " [pelismundo]", scrapedurl])
+ #video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
+ return video_urls
diff --git a/plugin.video.alfa/servers/raptu.py b/plugin.video.alfa/servers/raptu.py
deleted file mode 100755
index d833e279..00000000
--- a/plugin.video.alfa/servers/raptu.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- coding: utf-8 -*-
-
-from core import httptools
-from core import scrapertools
-from platformcode import logger
-
-
-def test_video_exists(page_url):
- logger.info("(page_url='%s')" % page_url)
- try:
- response = httptools.downloadpage(page_url)
- except:
- pass
-
- if not response.data or "urlopen error [Errno 1]" in str(response.code):
- from platformcode import config
- if config.is_xbmc():
- return False, "[Raptu] Este conector solo funciona a partir de Kodi 17"
- elif config.get_platform() == "plex":
- return False, "[Raptu] Este conector no funciona con tu versión de Plex, intenta actualizarla"
- elif config.get_platform() == "mediaserver":
- return False, "[Raptu] Este conector requiere actualizar python a la versión 2.7.9 o superior"
-
- if "Object not found" in response.data:
- return False, "[Raptu] El archivo no existe o ha sido borrado"
-
- return True, ""
-
-
-def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
-
- data = httptools.downloadpage(page_url).data
- video_urls = []
- # Detección de subtítulos
- subtitulo = ""
- videos = scrapertools.find_multiple_matches(data, '"file"\s*:\s*"([^"]+)","label"\s*:\s*"([^"]+)"')
- for video_url, calidad in videos:
- video_url = video_url.replace("\\", "")
- extension = scrapertools.get_filename_from_url(video_url)[-4:]
- if ".srt" in extension:
- subtitulo = "https://www.raptu.com" + video_url
- else:
- video_urls.append(["%s %s [raptu]" % (extension, calidad), video_url, 0, subtitulo])
-
- try:
- video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0].rsplit(" ")[1]))
- except:
- pass
- for video_url in video_urls:
- logger.info(" %s - %s" % (video_url[0], video_url[1]))
-
- return video_urls
diff --git a/plugin.video.alfa/servers/streamcherry.py b/plugin.video.alfa/servers/streamcherry.py
index 61b3c28b..5aa5f7e0 100644
--- a/plugin.video.alfa/servers/streamcherry.py
+++ b/plugin.video.alfa/servers/streamcherry.py
@@ -35,7 +35,7 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
if not media_url.startswith("http"):
media_url = "http:" + media_url
- video_urls.append([".%s %sp [streamango]" % (ext, quality), media_url])
+ video_urls.append([".%s %sp [streamcherry]" % (ext, quality), media_url])
video_urls.reverse()
for video_url in video_urls:
diff --git a/plugin.video.alfa/servers/thevideome.json b/plugin.video.alfa/servers/thevideome.json
index b5fac37f..b353ed66 100755
--- a/plugin.video.alfa/servers/thevideome.json
+++ b/plugin.video.alfa/servers/thevideome.json
@@ -14,7 +14,7 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "thevideo.me/(?:embed-|)([A-z0-9]+)",
+ "pattern": "(?:thevideo.me|tvad.me)/(?:embed-|)([A-z0-9]+)",
"url": "http://thevideo.me/embed-\\1.html"
}
]
@@ -48,5 +48,6 @@
"visible": false
}
],
+ "thumbnail": "https://s26.postimg.org/fzmu2c761/thevideo.me1.png",
"version": 1
-}
\ No newline at end of file
+}
diff --git a/plugin.video.alfa/servers/vidlox.py b/plugin.video.alfa/servers/vidlox.py
index 492cd93b..6dfd69f8 100644
--- a/plugin.video.alfa/servers/vidlox.py
+++ b/plugin.video.alfa/servers/vidlox.py
@@ -13,7 +13,7 @@ from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
- if "borrado" in data:
+ if "borrado" in data or "Deleted" in data:
return False, "[vidlox] El fichero ha sido borrado"
return True, ""