')
- patron = '(.*?)'
- matches = scrapertools.find_multiple_matches(bloque, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail, info, scrapedcat in matches:
- if not [True for c in contenido if c in scrapedcat]:
- continue
- scrapedurl = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedurl))
- scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail))
- if not scrapedthumbnail.startswith("http"):
- scrapedthumbnail = "http:" + scrapedthumbnail
- scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
- if ("Películas" in scrapedcat or "Documentales" in scrapedcat) and "Series" not in scrapedcat:
- titulo = scrapedtitle.split("[")[0]
- if info:
- scrapedtitle += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
- itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl, contentTitle=titulo,
- thumbnail=scrapedthumbnail, fulltitle=titulo, contentType="movie"))
- else:
- itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl,
- thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, contentTitle=scrapedtitle,
- show=scrapedtitle, contentType="tvshow"))
-
- next_page = scrapertools.find_single_match(data, '(.*?)
')
- contenido = ["series", "deportes", "anime", 'miniseries', 'programas']
- c_match = [True for match in contenido if match in item.url]
- # Patron dependiendo del contenido
- if True in c_match:
- patron = ''
- matches = scrapertools.find_multiple_matches(bloque, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches:
- scrapedurl = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedurl))
- if scrapedinfo != "":
- scrapedinfo = scrapedinfo.replace(" ", "").replace("-", " ")
-
- scrapedinfo = " [%s]" % unicode(scrapedinfo, "utf-8").capitalize().encode("utf-8")
- titulo = scrapedtitle + scrapedinfo
- titulo = scrapertools.decodeHtmlentities(titulo)
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
-
- scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail))
- if not scrapedthumbnail.startswith("http"):
- scrapedthumbnail = "http:" + scrapedthumbnail
- scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
- scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
- urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
- if "series" in item.url or "anime" in item.url:
- item.show = scrapedtitle
- itemlist.append(item.clone(action="episodios", title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
- fulltitle=scrapedtitle, contentTitle=scrapedtitle, contentType="tvshow"))
- else:
- patron = '(.*?)'
- matches = scrapertools.find_multiple_matches(bloque, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail, info, categoria in matches:
- scrapedurl = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedurl))
- titulo = scrapertools.decodeHtmlentities(scrapedtitle)
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.split("[")[0])
- action = "findvideos"
- show = ""
- if "Series" in categoria:
- action = "episodios"
- show = scrapedtitle
- elif categoria and categoria != "Películas" and categoria != "Documentales":
- try:
- titulo += " [%s]" % categoria.rsplit(", ", 1)[1]
- except:
- titulo += " [%s]" % categoria
- if 'l-espmini' in info:
- titulo += " [ESP]"
- if 'l-latmini' in info:
- titulo += " [LAT]"
- if 'l-vosemini' in info:
- titulo += " [VOSE]"
-
- if info:
- titulo += " [%s]" % unicode(info, "utf-8").capitalize().encode("utf-8")
- year = scrapertools.find_single_match(titulo,'\[\d{4}\]')
- scrapedthumbnail = urllib.unquote(re.sub(r'&b=4|/go\.php\?u=', '', scrapedthumbnail))
- if not scrapedthumbnail.startswith("http"):
- scrapedthumbnail = "http:" + scrapedthumbnail
- scrapedthumbnail = scrapedthumbnail.replace("-129x180", "")
- scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0] + "/" + \
- urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
-
- itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
- fulltitle=scrapedtitle, contentTitle=scrapedtitle, viewmode="movie_with_plot",
- show=show, contentType="movie", infoLabels={'year':year}))
- tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
- # Paginación
- next_page = scrapertools.find_single_match(data, '> Siguiente", url=next_page, text_color=color3))
-
- return itemlist
-
-
-def episodios(item):
- logger.info()
- itemlist = []
-
- data = get_data(item.url)
- patron = '(
- .*?
)'
- bloque = scrapertools.find_single_match(data, patron)
- matches = scrapertools.find_multiple_matches(bloque, '
(.*?)
')
- for scrapedtitle in matches:
- scrapedtitle = scrapedtitle.strip()
- new_item = item.clone()
- new_item.infoLabels['season'] = scrapedtitle.split(" ", 1)[0].split("x")[0]
- new_item.infoLabels['episode'] = scrapedtitle.split(" ", 1)[0].split("x")[1]
- if item.fulltitle != "Añadir esta serie a la videoteca":
- title = item.fulltitle + " " + scrapedtitle.strip()
- else:
- title = scrapedtitle.strip()
- itemlist.append(new_item.clone(action="findvideos", title=title, extra=scrapedtitle, fulltitle=title,
- contentType="episode"))
-
- itemlist.sort(key=lambda it: it.title, reverse=True)
- item.plot = scrapertools.find_single_match(data, 'SINOPSIS:(.*?)')
- if item.show != "" and item.extra == "":
- itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
- text_color="magenta"))
- if config.get_videolibrary_support():
- itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", url=item.url,
- action="add_serie_to_library", extra="episodios", show=item.show,
- text_color="green"))
-
- try:
- from core import tmdb
- tmdb.set_infoLabels_itemlist(itemlist[:-2], __modo_grafico__)
- except:
- pass
-
- return itemlist
-
-
-def episode_links(item):
- logger.info()
- itemlist = []
- item.text_color = color3
-
- data = get_data(item.url)
- data = data.replace("\n", "").replace("\t", "")
-
- # Bloque de enlaces
- patron = '%s(.*?)(?:
)' % item.extra.strip()
- bloque = scrapertools.find_single_match(data, patron)
-
- patron = '
(.*?).*?data-sourcelk="([^"]+)"' \
- '.*?data-server="([^"]+)"' \
- '.*?
" in data:
- old_format = True
- matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
- for scrapedurl in matches:
- scrapedurl = scrapertools.find_single_match(scrapedurl, '(magnet.*)')
- scrapedurl = urllib.unquote(re.sub(r'&b=4', '', scrapedurl))
- title = "[Torrent] "
- title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
- itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl,
- text_color="green"))
-
- # Patron online
- data_online = scrapertools.find_single_match(data, 'Ver online(.*?)
'
- matches = scrapertools.find_multiple_matches(bloque, patron)
-
- itemlist.append(item.clone(action="", title="Enlaces Online/Descarga", text_color=color1))
- lista_enlaces = []
- for scrapedurl, scrapedserver, scrapedcalidad in matches:
- if scrapedserver == "ul":
- scrapedserver = "uploadedto"
- if scrapedserver == "streamin":
- scrapedserver = "streaminto"
- titulo = " %s [%s]" % (unicode(scrapedserver, "utf-8").capitalize().encode("utf-8"), scrapedcalidad)
- # Enlaces descarga
- if scrapedserver == "magnet":
- itemlist.insert(0,
- item.clone(action="play", title=titulo, server="torrent", url=scrapedurl, extra=item.url))
- else:
- if servertools.is_server_enabled(scrapedserver):
- try:
- # servers_module = __import__("servers." + scrapedserver)
- lista_enlaces.append(item.clone(action="play", title=titulo, server=scrapedserver, url=scrapedurl,
- extra=item.url))
- except:
- pass
- lista_enlaces.reverse()
- itemlist.extend(lista_enlaces)
-
- if itemlist[0].server == "torrent":
- itemlist.insert(0, item.clone(action="", title="Enlaces Torrent", text_color=color1))
-
- return itemlist
-
-
-def findvideos(item):
- logger.info()
- if item.contentSeason != '':
- return episode_links(item)
-
- itemlist = []
- item.text_color = color3
-
- data = get_data(item.url)
-
- item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:|):(.*?)')
- year = scrapertools.find_single_match(data, '(?:|)AÑO(?:|):\s*(\d+)')
- if year:
- try:
- from core import tmdb
- item.infoLabels['year'] = year
- tmdb.set_infoLabels_item(item, __modo_grafico__)
- except:
- pass
-
- old_format = False
- # Patron torrent antiguo formato
- if "Enlaces de descarga')
- if data_online:
- title = "Enlaces Online"
- if '"l-latino2"' in data_online:
- title += " [LAT]"
- elif '"l-esp2"' in data_online:
- title += " [ESP]"
- elif '"l-vose2"' in data_online:
- title += " [VOSE]"
-
- patron = 'make_links.*?,[\'"]([^"\']+)["\']'
- matches = scrapertools.find_multiple_matches(data_online, patron)
- for i, code in enumerate(matches):
- enlace = show_links(code)
- links = servertools.findvideos(data=enlace[0])
- if links and "peliculas.nu" not in links:
- if i == 0:
- extra_info = scrapertools.find_single_match(data_online, '(.*?)')
- size = scrapertools.find_single_match(data_online, '(?i)TAMAÑO:\s*(.*?)<').strip()
-
- if size:
- title += " [%s]" % size
- new_item = item.clone(title=title, action="", text_color=color1)
- if extra_info:
- extra_info = scrapertools.htmlclean(extra_info)
- new_item.infoLabels["plot"] = extra_info
- new_item.title += " +INFO"
- itemlist.append(new_item)
-
- title = " Ver vídeo en " + links[0][2]
- itemlist.append(item.clone(action="play", server=links[0][2], title=title, url=links[0][1]))
- scriptg = scrapertools.find_single_match(data, "")
- js_data = unPack(js_data)
- media_url = scrapertools.get_match(js_data, """.setup\({file:"([^"]+)",image""")
-
- if media_url.endswith("v.mp4"):
- media_url_mp42flv = re.sub(r'/v.mp4$', '/v.flv', media_url)
- video_urls.append(
- [scrapertools.get_filename_from_url(media_url_mp42flv)[-4:] + " [streaminto]", media_url_mp42flv])
- if media_url.endswith("v.flv"):
- media_url_flv2mp4 = re.sub(r'/v.flv$', '/v.mp4', media_url)
- video_urls.append(
- [scrapertools.get_filename_from_url(media_url_flv2mp4)[-4:] + " [streaminto]", media_url_flv2mp4])
- video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [streaminto]", media_url])
-
- for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
-
- return video_urls
-
-
-def unPack(packed):
- pattern = "}\('(.*)', *(\d+), *(\d+), *'(.*)'\.split\('([^']+)'\)"
- d = [d for d in re.search(pattern, packed, re.DOTALL).groups()]
-
- p = d[0];
- a = int(d[1]);
- c = int(d[2]);
- k = d[3].split(d[4])
-
- if a <= 62:
- toString = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
- else:
- toString = """ !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~"""
-
- def e(c):
- return toString[c] if c < a else toString[c // a] + toString[c % a]
-
- while c > 0:
- c -= 1
- if k[c]:
- x = e(c)
- else:
- x = k[c]
- y = k[c]
- p = re.sub(r"(\b%s\b)" % x, y, p)
-
- return p
diff --git a/plugin.video.alfa/servers/tunepk.json b/plugin.video.alfa/servers/tunepk.json
deleted file mode 100755
index 3d3452ce..00000000
--- a/plugin.video.alfa/servers/tunepk.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "active": true,
- "find_videos": {
- "ignore_urls": [],
- "patterns": [
- {
- "pattern": "tune.pk/player/embed_player.php\\?vid\\=(\\d+)",
- "url": "http://embed.tune.pk/play/\\1?autoplay=no"
- }
- ]
- },
- "free": true,
- "id": "tunepk",
- "name": "tunepk",
- "settings": [
- {
- "default": false,
- "enabled": true,
- "id": "black_list",
- "label": "@60654",
- "type": "bool",
- "visible": true
- },
- {
- "default": 0,
- "enabled": true,
- "id": "favorites_servers_list",
- "label": "@60655",
- "lvalues": [
- "No",
- "1",
- "2",
- "3",
- "4",
- "5"
- ],
- "type": "list",
- "visible": false
- }
- ]
-}
\ No newline at end of file
diff --git a/plugin.video.alfa/servers/tunepk.py b/plugin.video.alfa/servers/tunepk.py
deleted file mode 100755
index cde33744..00000000
--- a/plugin.video.alfa/servers/tunepk.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import re
-
-from core import scrapertools
-from platformcode import logger
-
-
-# Returns an array of possible video url's from the page_url
-def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
-
- video_urls = []
-
- data = scrapertools.cache_page(page_url)
- logger.info(data)
- patron = 'file: "([^"]+)",\s+'
- patron += 'width: "[^"]+",\s+'
- patron += 'height: "[^"]+",\s+'
- patron += 'label : "([^"]+)",\s+'
- patron += 'type : "([^"]+)"'
- matches = re.compile(patron, re.DOTALL).findall(data)
- scrapertools.printMatches(matches)
-
- for url, calidad, formato in matches:
- video_url = ["%s %s [tune.pk]" % (calidad, formato), url]
- video_urls.append(video_url)
-
- for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
-
- return video_urls
diff --git a/plugin.video.alfa/servers/tutv.json b/plugin.video.alfa/servers/tutv.json
deleted file mode 100755
index e7c11eff..00000000
--- a/plugin.video.alfa/servers/tutv.json
+++ /dev/null
@@ -1,45 +0,0 @@
-{
- "active": true,
- "find_videos": {
- "ignore_urls": [],
- "patterns": [
- {
- "pattern": "(http://(?:www.)?tu.tv[^\"]+)",
- "url": "\\1"
- },
- {
- "pattern": "tu.tv/(iframe/\\d+)",
- "url": "http://tu.tv/\\1"
- }
- ]
- },
- "free": true,
- "id": "tutv",
- "name": "tutv",
- "settings": [
- {
- "default": false,
- "enabled": true,
- "id": "black_list",
- "label": "@60654",
- "type": "bool",
- "visible": true
- },
- {
- "default": 0,
- "enabled": true,
- "id": "favorites_servers_list",
- "label": "@60655",
- "lvalues": [
- "No",
- "1",
- "2",
- "3",
- "4",
- "5"
- ],
- "type": "list",
- "visible": false
- }
- ]
-}
\ No newline at end of file
diff --git a/plugin.video.alfa/servers/tutv.py b/plugin.video.alfa/servers/tutv.py
deleted file mode 100755
index 475652ba..00000000
--- a/plugin.video.alfa/servers/tutv.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import re
-import urllib
-
-from core import scrapertools
-from platformcode import logger
-
-
-# Returns an array of possible video url's from the page_url
-def get_video_url(page_url, premium=False, user="", password="", video_password=""):
- logger.info("(page_url='%s')" % page_url)
-
- # Busca el ID en la URL
- id = extract_id(page_url)
-
- # Si no lo tiene, lo extrae de la página
- if id == "":
- # La descarga
- data = scrapertools.cache_page(page_url)
- patron = ''
- matches = re.compile(patron, re.DOTALL).findall(data)
- if len(matches) > 0:
- id = extract_id(matches[0])
- else:
- id = ""
-
- if id == "":
- id = scrapertools.get_match(page_url, "tu.tv/iframe/(\d+)")
-
- # Descarga el descriptor
- url = "http://tu.tv/visualizacionExterna2.php?web=undefined&codVideo=" + id
- data = scrapertools.cache_page(url)
-
- # Obtiene el enlace al vídeo
- patronvideos = 'urlVideo0=([^\&]+)\&'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
- # scrapertools.printMatches(matches)
- url = urllib.unquote_plus(matches[0])
- video_urls = [["[tu.tv]", url]]
-
- for video_url in video_urls:
- logger.info("%s - %s" % (video_url[0], video_url[1]))
-
- return video_urls
-
-
-def extract_id(text):
- patron = "xtp\=([a-zA-Z0-9]+)"
- matches = re.compile(patron, re.DOTALL).findall(text)
- if len(matches) > 0:
- devuelve = matches[0]
- else:
- devuelve = ""
-
- return devuelve