.*?'
patron += 'class="frame image" href="([^"]+)".*?'
patron += 'data-original="([^"]+)" />.*?'
patron += '
.*?'
patron += '>(.*?).*?'
patron += '(.*?)'
- matches = re.compile(patron,re.DOTALL).findall(data)
- for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
- url = urlparse.urljoin(item.url,scrapedurl)
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches:
+ url = urlparse.urljoin(item.url, scrapedurl)
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
- quality= ""
- if '-720-' in scrapedthumbnail : quality = "720"
- if '-1080-' in scrapedthumbnail : quality = "1080"
+ quality = ""
+ if '-720-' in scrapedthumbnail:
+ quality = "720"
+ if '-1080-' in scrapedthumbnail:
+ quality = "1080"
if quality:
- title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
+ title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
contentTitle = title
thumbnail = "http:" + scrapedthumbnail
plot = ""
year = ""
- itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
- plot=plot, quality= quality, contentTitle = contentTitle))
- next_page = scrapertools.find_single_match(data,'
')
- if next_page!="":
- next_page = urlparse.urljoin(item.url,next_page)
- itemlist.append(item.clone(action="lista", title="Página Siguiente >>" , text_color="blue", url=next_page) )
+ itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
+ plot=plot, quality=quality, contentTitle=contentTitle))
+ next_page = scrapertools.find_single_match(data, '')
+ if next_page != "":
+ next_page = urlparse.urljoin(item.url, next_page)
+ itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
return itemlist
def play(item):
logger.info()
itemlist = []
- data = scrapertools.cache_page(item.url)
- data = scrapertools.get_match(data,'var encodings(.*?)var')
- if '360' in data:
+ data = httptools.downloadpage(item.url).data
+ data = scrapertools.get_match(data, 'var encodings(.*?)var')
+ if '360' in data:
patron = '"360".*?"filename"\:"(.*?)"'
- if '720' in data:
+ if '720' in data:
patron = '"720".*?"filename"\:"(.*?)"'
- if '1080' in data:
+ if '1080' in data:
patron = '"1080".*?"filename"\:"(.*?)"'
media_url = scrapertools.find_single_match(data, patron)
media_url = "https:" + media_url.replace("\\", "")
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
return itemlist
-
diff --git a/plugin.video.alfa/channels/youtube_channel.py b/plugin.video.alfa/channels/youtube_channel.py
index 24dc4368..3ce7ab47 100755
--- a/plugin.video.alfa/channels/youtube_channel.py
+++ b/plugin.video.alfa/channels/youtube_channel.py
@@ -19,7 +19,7 @@ def youtube_api_call(method, parameters):
url = "https://www.googleapis.com/youtube/v3/" + method + "?" + encoded_parameters + "&key=" + YOUTUBE_V3_API_KEY;
logger.info("url=" + url)
- data = scrapertools.cache_page(url)
+ data = httptools.downloadpage(url).data
logger.info("data=" + data)
json_object = jsontools.load(data)
@@ -37,7 +37,7 @@ def youtube_get_user_playlists(user_id, pageToken=""):
{"part": "snippet,contentDetails", "channelId": channel_id, "maxResults": 50,
"pageToken": pageToken})
- return json_object;
+ return json_object
def youtube_get_playlist_items(playlist_id, pageToken=""):
diff --git a/plugin.video.alfa/servers/thevid.json b/plugin.video.alfa/servers/thevid.json
index e90af13e..574cb21a 100644
--- a/plugin.video.alfa/servers/thevid.json
+++ b/plugin.video.alfa/servers/thevid.json
@@ -38,5 +38,5 @@
"visible": false
}
],
- "thumbnail": ""
+ "thumbnail": "http://thevid.net/imgs/thevid.png"
}
diff --git a/plugin.video.alfa/servers/thevideobee.json b/plugin.video.alfa/servers/thevideobee.json
new file mode 100644
index 00000000..3e1fb059
--- /dev/null
+++ b/plugin.video.alfa/servers/thevideobee.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(https://thevideobee.to/embed-[A-z0-9]+.html)",
+ "url": "\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "thevideobee",
+ "name": "thevideobee",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://thevideobee.to/img/logo.png"
+}
diff --git a/plugin.video.alfa/servers/thevideobee.py b/plugin.video.alfa/servers/thevideobee.py
new file mode 100644
index 00000000..a0b51967
--- /dev/null
+++ b/plugin.video.alfa/servers/thevideobee.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+# --------------------------------------------------------
+# Conector thevideobee By Alfa development Group
+# --------------------------------------------------------
+
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "no longer exists" in data or "to copyright issues" in data:
+ return False, "[thevideobee] El video ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ video_urls = []
+ videourl = scrapertools.find_single_match(data, 'src: "([^"]+)')
+ video_urls.append([".MP4 [thevideobee]", videourl])
+
+ return video_urls
diff --git a/plugin.video.alfa/servers/tusfiles.json b/plugin.video.alfa/servers/tusfiles.json
new file mode 100644
index 00000000..1221c6a9
--- /dev/null
+++ b/plugin.video.alfa/servers/tusfiles.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(https://tusfiles.com/embed-[A-z0-9]+.html)",
+ "url": "\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "tusfiles",
+ "name": "tusfiles",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://tusfiles.com/i/TFLOGO.png"
+}
diff --git a/plugin.video.alfa/servers/tusfiles.py b/plugin.video.alfa/servers/tusfiles.py
new file mode 100644
index 00000000..ef37b5e8
--- /dev/null
+++ b/plugin.video.alfa/servers/tusfiles.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+# --------------------------------------------------------
+# Conector tusfiles By Alfa development Group
+# --------------------------------------------------------
+
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "no longer exists" in data or "to copyright issues" in data:
+ return False, "[tusfiles] El video ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ video_urls = []
+ videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
+ video_urls.append([".MP4 [tusfiles]", videourl])
+
+ return video_urls
diff --git a/plugin.video.alfa/servers/vup.json b/plugin.video.alfa/servers/vup.json
new file mode 100644
index 00000000..1d5ecb23
--- /dev/null
+++ b/plugin.video.alfa/servers/vup.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(https://vup.to/embed-[A-z0-9]+.html)",
+ "url": "\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "vup",
+ "name": "vup",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://i.postimg.cc/ZKjvqXxj/vup.png"
+}
diff --git a/plugin.video.alfa/servers/vup.py b/plugin.video.alfa/servers/vup.py
new file mode 100644
index 00000000..9eba027d
--- /dev/null
+++ b/plugin.video.alfa/servers/vup.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# --------------------------------------------------------
+# Conector vup By Alfa development Group
+# --------------------------------------------------------
+
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "no longer exists" in data or "to copyright issues" in data:
+ return False, "[vup] El video ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ bloque = scrapertools.find_single_match(data, 'sources:.*?\]')
+ video_urls = []
+ videourl = scrapertools.find_multiple_matches(bloque, '"(http[^"]+)')
+ for video in videourl:
+ video_urls.append([".MP4 [vup]", video])
+ video_urls = video_urls[::-1]
+ return video_urls