|
", "", plot)
-
- if item.extra.split("|")[7] != "":
- tagline = item.extra.split("|")[7]
- # tagline= re.sub(r',','.',tagline)
- else:
- tagline = ""
- except:
- title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
- plot = "Esta pelicula no tiene informacion..."
- plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]")
- photo = "http://s6.postimg.cc/nm3gk1xox/noinfosup2.png"
- foto = "http://s6.postimg.cc/ub7pb76c1/noinfo.png"
- info = ""
-
- if "serie" in item.url:
- check2 = "serie"
- icon = "http://s6.postimg.cc/hzcjag975/tvdb.png"
- foto = item.show.split("|")[1]
- if item.extra.split("|")[5] != "":
- critica = item.extra.split("|")[5]
- else:
- critica = "Esta serie no tiene críticas..."
-
- photo = item.extra.split("|")[0].replace(" ", "%20")
- try:
- tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
- except:
- tagline = ""
-
- else:
- critica = item.extra.split("|")[5]
- if "%20" in critica:
- critica = "No hay críticas"
- icon = "http://imgur.com/SenkyxF.png"
-
- photo = item.extra.split("|")[0].replace(" ", "%20")
- foto = item.show.split("|")[1]
-
- try:
- if tagline == "\"\"":
- tagline = " "
- except:
- tagline = " "
- tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
- check2 = "pelicula"
- # Tambien te puede interesar
- peliculas = []
- if "serie" in item.url:
-
- url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
- 5] + "/recommendations?api_key=" + api_key + "&language=es"
- data_tpi = httptools.downloadpage(url_tpi).data
- tpi = scrapertools.find_multiple_matches(data_tpi,
- 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),"popularity"')
-
- else:
- url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
- 1] + "/recommendations?api_key=" + api_key + "&language=es"
- data_tpi = httptools.downloadpage(url_tpi).data
- tpi = scrapertools.find_multiple_matches(data_tpi,
- 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),"popularity"')
-
- for idp, peli, thumb in tpi:
-
- thumb = re.sub(r'"|}', '', thumb)
- if "null" in thumb:
- thumb = "http://s6.postimg.cc/tw1vhymj5/noposter.png"
- else:
- thumb = "https://image.tmdb.org/t/p/original" + thumb
- peliculas.append([idp, peli, thumb])
-
- check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow")
- infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline,
- 'rating': rating}
- item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma,
- critica=critica, contentType=check2, thumb_busqueda="http://imgur.com/OZ1Vg3D.png")
- from channels import infoplus
- infoplus.start(item_info, peliculas)
-
-
-def info_capitulos(item):
- logger.info()
-
- url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[
- 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es"
-
- if "/0" in url:
- url = url.replace("/0", "/")
-
- data = httptools.downloadpage(url).data
- data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
-
- patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"'
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- if len(matches) == 0:
-
- url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[
- 2] + "/" + item.extra.split("|")[3] + "/es.xml"
- if "/0" in url:
- url = url.replace("/0", "/")
- data = httptools.downloadpage(url).data
- data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
-
- patron = '
.*?([^<]+).*?(.*?).*?(.*?)'
-
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- if len(matches) == 0:
-
- title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
- plot = "Este capitulo no tiene informacion..."
- plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
- image = "http://s6.postimg.cc/ub7pb76c1/noinfo.png"
- foto = "http://s6.postimg.cc/nm3gk1xox/noinfosup2.png"
- rating = ""
-
-
- else:
-
- for name_epi, info, rating in matches:
- if "episodes" in data:
- foto = scrapertools.get_match(data, '.*?(.*?)')
- fanart = "http://thetvdb.com/banners/" + foto
- else:
- fanart = item.extra.split("|")[1]
- plot = info
- plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]"
- title = name_epi.upper()
- title = "[COLOR bisque][B]" + title + "[/B][/COLOR]"
- image = fanart
- foto = item.extra.split("|")[0]
- if not ".png" in foto:
- foto = "http://imgur.com/IqYaDrC.png"
- foto = re.sub(r'\(.*?\)|" "|" "', '', foto)
- foto = re.sub(r' ', '', foto)
- try:
-
- check_rating = scrapertools.get_match(rating, '(\d+).')
-
- if int(check_rating) >= 5 and int(check_rating) < 8:
- rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) >= 8 and int(check_rating) < 10:
- rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) == 10:
- rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]"
- else:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
-
- except:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
- if "10." in rating:
- rating = re.sub(r'10\.\d+', '10', rating)
- else:
- for name_epi, info, fanart, rating in matches:
- if info == "" or info == "\\":
- info = "Sin informacion del capítulo aún..."
- plot = info
- plot = re.sub(r'/n', '', plot)
- plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]"
- title = name_epi.upper()
- title = "[COLOR bisque][B]" + title + "[/B][/COLOR]"
- image = fanart
- image = re.sub(r'"|}', '', image)
- if "null" in image:
- image = "http://imgur.com/ZiEAVOD.png"
- else:
- image = "https://image.tmdb.org/t/p/original" + image
- foto = item.extra.split("|")[0]
- if not ".png" in foto:
- foto = "http://imgur.com/IqYaDrC.png"
- foto = re.sub(r'\(.*?\)|" "|" "', '', foto)
- foto = re.sub(r' ', '', foto)
- try:
-
- check_rating = scrapertools.get_match(rating, '(\d+).')
-
- if int(check_rating) >= 5 and int(check_rating) < 8:
- rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) >= 8 and int(check_rating) < 10:
- rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) == 10:
- rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]"
- else:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
-
- except:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
- if "10." in rating:
- rating = re.sub(r'10\.\d+', '10', rating)
- ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
- ventana.doModal()
-
-
-class TextBox2(xbmcgui.WindowDialog):
- """ Create a skinned textbox window """
-
- def __init__(self, *args, **kwargs):
- self.getTitle = kwargs.get('title')
- self.getPlot = kwargs.get('plot')
- self.getThumbnail = kwargs.get('thumbnail')
- self.getFanart = kwargs.get('fanart')
- self.getRating = kwargs.get('rating')
-
- self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/133aoMw.jpg')
- self.title = xbmcgui.ControlTextBox(120, 60, 430, 50)
- self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45)
- self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100)
- self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail)
- self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart)
-
- self.addControl(self.background)
- self.background.setAnimations(
- [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',),
- ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)])
- self.addControl(self.thumbnail)
- self.thumbnail.setAnimations([('conditional',
- 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',),
- ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)])
- self.addControl(self.plot)
- self.plot.setAnimations(
- [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), (
- 'conditional',
- 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',),
- ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)])
- self.addControl(self.fanart)
- self.fanart.setAnimations(
- [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), (
- 'conditional',
- 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',),
- ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)])
- self.addControl(self.title)
- self.title.setText(self.getTitle)
- self.title.setAnimations(
- [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',),
- ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)])
- self.addControl(self.rating)
- self.rating.setText(self.getRating)
- self.rating.setAnimations(
- [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',),
- ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)])
- xbmc.sleep(200)
-
- try:
- self.plot.autoScroll(7000, 6000, 30000)
- except:
-
- xbmc.executebuiltin(
- 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")')
- self.plot.setText(self.getPlot)
-
- def get(self):
- self.show()
-
- def onAction(self, action):
- if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
- self.close()
-
-
-def test():
- return True
-
-
-def browser(url):
- import mechanize
-
- # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing
- br = mechanize.Browser()
- # Browser options
- br.set_handle_equiv(False)
- br.set_handle_gzip(True)
- br.set_handle_redirect(True)
- br.set_handle_referer(False)
- br.set_handle_robots(False)
- # Follows refresh 0 but not hangs on refresh > 0
- br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
- # Want debugging messages?
- # br.set_debug_http(True)
- # br.set_debug_redirects(True)
- # br.set_debug_responses(True)
-
- # User-Agent (this is cheating, ok?)
- br.addheaders = [('User-agent',
- 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')]
- # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')]
- # Open some site, let's pick a random one, the first that pops in mind
- r = br.open(url)
- response = r.read()
- print response
- # if not ".ftrH,.ftrHd,.ftrD>" in response:
- if "img,divreturn" in response:
- r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)
- response = r.read()
-
- return response
-
-
-def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
- i = 0
- while i < len(text):
- m = match(text, i)
- s = m.group(m.lastindex)
- i = m.end()
- if m.lastindex == 2:
- yield "s"
- yield text[i:i + int(s)]
- i = i + int(s)
- else:
- yield s
-
-
-def decode_item(next, token):
- if token == "i":
- # integer: "i" value "e"
- data = int(next())
- if next() != "e":
- raise ValueError
- elif token == "s":
- # string: "s" value (virtual tokens)
- data = next()
- elif token == "l" or token == "d":
- # container: "l" (or "d") values "e"
- data = []
- tok = next()
- while tok != "e":
- data.append(decode_item(next, tok))
- tok = next()
- if token == "d":
- data = dict(zip(data[0::2], data[1::2]))
- else:
- raise ValueError
- return data
-
-
-def decode(text):
- try:
- src = tokenize(text)
- data = decode_item(src.next, src.next())
- for token in src: # look for more tokens
- raise SyntaxError("trailing junk")
- except (AttributeError, ValueError, StopIteration):
- try:
- data = data
- except:
- data = src
-
- return data
-
-
-def convert_size(size):
- import math
- if (size == 0):
- return '0B'
- size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
- i = int(math.floor(math.log(size, 1024)))
- p = math.pow(1024, i)
- s = round(size / p, 2)
- return '%s %s' % (s, size_name[i])
diff --git a/plugin.video.alfa/servers/clipwatching.py b/plugin.video.alfa/servers/clipwatching.py
index 839c7290..2362fc4b 100644
--- a/plugin.video.alfa/servers/clipwatching.py
+++ b/plugin.video.alfa/servers/clipwatching.py
@@ -9,7 +9,7 @@ from platformcode import logger, config
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url).data
- if "File Not Found" in data:
+ if "File Not Found" in data or "File was deleted" in data:
return False, config.get_localized_string(70292) % "ClipWatching"
return True, ""
diff --git a/plugin.video.alfa/servers/thevid.json b/plugin.video.alfa/servers/thevid.json
new file mode 100644
index 00000000..e90af13e
--- /dev/null
+++ b/plugin.video.alfa/servers/thevid.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(thevid.net/e/\\w+)",
+ "url": "https://\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "thevid",
+ "name": "thevid",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": ""
+}
diff --git a/plugin.video.alfa/servers/thevid.py b/plugin.video.alfa/servers/thevid.py
new file mode 100644
index 00000000..8d9320bc
--- /dev/null
+++ b/plugin.video.alfa/servers/thevid.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+from core import httptools
+from core import scrapertools
+from lib import jsunpack
+from platformcode import logger, config
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "Video not found..." in data:
+ return False, config.get_localized_string(70292) % "Thevid"
+ return True, ""
+
+
+def get_video_url(page_url, user="", password="", video_password=""):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ packed = scrapertools.find_multiple_matches(data, "(?s)")
+ for pack in packed:
+ unpacked = jsunpack.unpack(pack)
+ if "file" in unpacked:
+ videos = scrapertools.find_multiple_matches(unpacked, 'file.="(//[^"]+)')
+ video_urls = []
+ for video in videos:
+ video = "https:" + video
+ video_urls.append(["mp4 [Thevid]", video])
+ logger.info("Url: %s" % videos)
+ return video_urls
diff --git a/plugin.video.alfa/servers/thevideome.json b/plugin.video.alfa/servers/thevideome.json
index 568f0c90..4fb0f381 100755
--- a/plugin.video.alfa/servers/thevideome.json
+++ b/plugin.video.alfa/servers/thevideome.json
@@ -4,7 +4,7 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "(?:thevideo.me|tvad.me|thevid.net|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)",
+ "pattern": "(?:thevideo.me|tvad.me|thevideo.ch|thevideo.us)/(?:embed-|)([A-z0-9]+)",
"url": "https://thevideo.me/embed-\\1.html"
}
]
diff --git a/plugin.video.alfa/servers/vevio.json b/plugin.video.alfa/servers/vevio.json
new file mode 100644
index 00000000..d91e95bf
--- /dev/null
+++ b/plugin.video.alfa/servers/vevio.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "(vev.io/embed/[A-z0-9]+)",
+ "url": "https://\\1"
+ }
+ ]
+ },
+ "free": true,
+ "id": "vevio",
+ "name": "vevio",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://s8.postimg.cc/opp2c3p6d/vevio1.png"
+}
diff --git a/plugin.video.alfa/servers/vevio.py b/plugin.video.alfa/servers/vevio.py
new file mode 100644
index 00000000..3f74f993
--- /dev/null
+++ b/plugin.video.alfa/servers/vevio.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+
+import urllib
+from core import httptools
+from core import scrapertools
+from platformcode import logger, config
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url).data
+ if "File was deleted" in data or "Page Cannot Be Found" in data or "Video not found" in data:
+ return False, "[vevio] El archivo ha sido eliminado o no existe"
+ return True, ""
+
+
+def get_video_url(page_url, premium=False, user="", password="", video_password=""):
+ logger.info("url=" + page_url)
+ video_urls = []
+ post = {}
+ post = urllib.urlencode(post)
+ url = page_url
+ data = httptools.downloadpage("https://vev.io/api/serve/video/" + scrapertools.find_single_match(url, "embed/([A-z0-9]+)"), post=post).data
+ bloque = scrapertools.find_single_match(data, 'qualities":\{(.*?)\}')
+ matches = scrapertools.find_multiple_matches(bloque, '"([^"]+)":"([^"]+)')
+ for res, media_url in matches:
+ video_urls.append(
+ [scrapertools.get_filename_from_url(media_url)[-4:] + " (" + res + ") [vevio.me]", media_url])
+ return video_urls