diff --git a/plugin.video.alfa/addon.xml b/plugin.video.alfa/addon.xml index 30c0e1f9..8c77c2e7 100755 --- a/plugin.video.alfa/addon.xml +++ b/plugin.video.alfa/addon.xml @@ -1,5 +1,5 @@  - + @@ -19,11 +19,12 @@ [B]Estos son los cambios para esta versión:[/B] [COLOR green][B]Canales agregados y arreglos[/B][/COLOR] - » gnula » playpornx - » plusdede » yaske - » streamplay » bdupload - » bitertv » userscloud - » canalpelis ¤ arreglos internos + » yaske »divxatope + » javtasty »bitp + » serviporno »gvideo + » vk »cinetux + » ciberpeliculashd + ¤ arreglos internos Navega con Kodi por páginas web para ver sus videos de manera fácil. Browse web pages using Kodi diff --git a/plugin.video.alfa/channels/animeid.py b/plugin.video.alfa/channels/animeid.py index b7a52c8d..520d8c3c 100755 --- a/plugin.video.alfa/channels/animeid.py +++ b/plugin.video.alfa/channels/animeid.py @@ -134,8 +134,7 @@ def novedades_episodios(item): contentTitle = scrapedtitle.replace('#' + episodio, '') itemlist.append(Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, - hasContentDetails=True, contentSeason=1, contentTitle=contentTitle)) + thumbnail=scrapedthumbnail, plot=scrapedplot, contentSeason=1, contentTitle=contentTitle)) return itemlist diff --git a/plugin.video.alfa/channels/autoplay.py b/plugin.video.alfa/channels/autoplay.py index 467d7621..c513be6b 100644 --- a/plugin.video.alfa/channels/autoplay.py +++ b/plugin.video.alfa/channels/autoplay.py @@ -89,7 +89,6 @@ def start(itemlist, item): videoitem.contentTitle=item.contentTitle videoitem.contentType=item.contentType videoitem.episode_id=item.episode_id - videoitem.hasContentDetails=item.hasContentDetails #videoitem.infoLabels=item.infoLabels videoitem.thumbnail=item.thumbnail #videoitem.title=item.title diff --git a/plugin.video.alfa/channels/bityouth.json b/plugin.video.alfa/channels/bityouth.json deleted file mode 100755 index 143ef328..00000000 --- a/plugin.video.alfa/channels/bityouth.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "bityouth", - "name": "Bityouth", - "active": true, - "adult": false, - "language": ["cast"], - "thumbnail": "http://s6.postimg.org/6ash180up/bityoulogo.png", - "banner": "bityouth.png", - "categories": [ - "torrent", - "movie", - "tvshow" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/bityouth.py b/plugin.video.alfa/channels/bityouth.py deleted file mode 100755 index 99a720da..00000000 --- a/plugin.video.alfa/channels/bityouth.py +++ /dev/null @@ -1,1762 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import re -import urllib -import urllib2 -import urlparse - -from core import scrapertools -from core.item import Item -from platformcode import logger - -try: - import xbmc - import xbmcgui -except: - pass - -host = "http://bityouth.com/" - - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - br.addheaders = [('User-agent', - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=D=4210979&AF=NOFORM; domain=.bing.com; expires=Wednesday, 09-Nov-06 23:12:40 GMT; MUIDB=36F71C46589F6EAD0BE714175C9F68FC; domain=www.bing.com; expires=15 de enero de 2018 08:43:26 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - # if "z{a:1}" in response: - if not ".ftrH,.ftrHd,.ftrD>" in response: - print "proooxyy" - r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url) - response = r.read() - return response - ###def proxy(url): - '''from lib import requests - proxies = {"http": "http://anonymouse.org/cgi-bin/anon-www.cgi/"+url} - print "zorro" - print proxies - rsp = requests.get(url, proxies=proxies,stream=True) - print rsp.raw._fp.fp._sock.getpeername() - print rsp.content - response = rsp.content - return response''' - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Generos[/B][/COLOR]", action="generos", - url="http://bityouth.com", thumbnail="http://s6.postimg.org/ybey4gxu9/bityougenerosthum3.png", - fanart="http://s18.postimg.org/l4judlx09/bityougenerosfan.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Puntuacion[/B][/COLOR]", action="scraper", - url="http://bityouth.com/more_elements/0/?o=pd", - thumbnail="http://s6.postimg.org/n1qtn9i6p/bityoupuntothum4.png", - fanart="http://s6.postimg.org/qrh9oof9t/bityoupuntofan.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Novedades[/B][/COLOR]", action="scraper", - url="http://bityouth.com/more_elements/0/?o=", - thumbnail="http://s6.postimg.org/bry3sbd5d/bityounovedathum2.png", - fanart="http://s6.postimg.org/ys4r4naz5/bityounovedadfan.jpg")) - import xbmc - if xbmc.Player().isPlaying(): - xbmc.executebuiltin('xbmc.PlayMedia(Stop)') - SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Series[/B][/COLOR]", action="scraper", - url="http://bityouth.com/more_elements/0/genero/serie_de_tv?o=", - thumbnail="http://s6.postimg.org/59j1km53l/bityouseriesthum.png", - fanart="http://s6.postimg.org/45yx8nkgh/bityouseriesfan3.jpg")) - if xbmc.Player().isPlaying(): - xbmc.executebuiltin('xbmc.PlayMedia(Stop)') - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - try: - os.remove(SEARCHDESTFILE) - print "Custom search.txt borrado" - except: - print "No hay search.txt" - - try: - os.remove(TRAILERDESTFILE) - print "Custom Trailer.txt borrado" - except: - print "No hay Trailer.txt" - itemlist.append(Item(channel=item.channel, title="[COLOR skyblue][B]Buscar...[/B][/COLOR]", action="search", url="", - thumbnail="http://s6.postimg.org/48isvho41/bityousearchthum.png", - fanart="http://s6.postimg.org/ic5hcegk1/bityousearchfan.jpg", plot="search")) - - return itemlist - - -def search(item, texto): - logger.info() - - itemlist = [] - - if item.url == "": - item.url = "http://bityouth.com/busqueda/" - - item.url = item.url + texto - item.url = item.url.replace(" ", "%20") - - data = scrapertools.cache_page(item.url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '
.*?title="([^<]+)" ' - patron += 'href="([^"]+)".*?' - patron += '

([^<]+)

.*?' - patron += '" in data: - itemlist.append(Item(channel=item.channel, title="[COLOR gold][B]No hay mas paginas...[/B][/COLOR]", - thumbnail="http://s6.postimg.org/f4es4kyfl/bityou_Sorry.png", - fanart="http://s6.postimg.org/y1uehu24x/bityougeneralfan.jpg", folder=False)) - else: - - current_page_number = int(scrapertools.get_match(item.url, 'more_elements/(\d+)')) - item.url = re.sub(r"more_elements/\d+", "more_elements/{0}", item.url) - - next_page_number = current_page_number + 40 - next_page = item.url.format(next_page_number) - - title = "[COLOR skyblue]Pagina siguiente>>[/COLOR]" - - itemlist.append(Item(channel=item.channel, title=title, url=next_page, - fanart="http://s6.postimg.org/y1uehu24x/bityougeneralfan.jpg", - thumbnail="http://s6.postimg.org/kbzv91f0x/bityouflecha2.png", - action="scraper", folder=True)) - - return itemlist - - -def fanart(item): - # Vamos a sacar todos los fanarts y arts posibles - logger.info() - itemlist = [] - url = item.url - data = scrapertools.cachePage(url) - data = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| ", "", data) - year = item.show.split("|")[1] - title = item.show.split("|")[0] - trailer = item.show.split("|")[2] - print "joder" - print title - if title == "Érase una vez (Serie de TV)": - title = "Once upon in time" - - import xbmc - xbmc.executebuiltin('Action(reloadkeymaps)') - title = title.replace('á', 'a') - title = title.replace('Á', 'A') - title = title.replace('é', 'e') - title = title.replace('É', 'E') - title = title.replace('í', 'i') - title = title.replace('Í', 'i') - title = title.replace('ó', 'o') - title = title.replace('Ó', 'o') - title = title.replace('ú', 'u') - title = title.replace('Ú', 'U') - title = title.replace('ñ', 'n') - title = title.replace('Ñ', 'N') - if not "_serie_de_tv" in item.url and not item.extra == "series": - title = title.replace("(Serie de TV)", "") - title = title.replace("torrent", "") - - try: - try: - ###Busqueda en Tmdb la peli por titulo y año - title_tmdb = title.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') - plot = scrapertools.get_match(data, '"page":1.*?,"overview":"(.*?)",') - except: - if ":" in title or "(" in title: - title_tmdb = title.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') - plot = scrapertools.get_match(data, '"page":1.*?,"overview":"(.*?)",') - else: - title_tmdb = title.replace(" ", "%20") - title_tmdb = re.sub(r"(:.*)|\(.*?\)", "", title_tmdb) - url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') - plot = scrapertools.get_match(data, '"page":1.*?,"overview":"(.*?)",') - - - except: - ###Si no hay coincidencia realiza busqueda por bing del id Imdb - urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title.replace(' ', '+'), year) - data = browser(urlbing_imdb) - '''if "z{a:1}"in data: - data = proxy(urlbing_imdb)''' - try: - subdata_imdb = scrapertools.get_match(data, '
  • (.*?)h="ID') - subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) - except: - pass - - try: - url_imdb = scrapertools.get_match(subdata_imdb, '(.*?)h="ID') - subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) - except: - pass - try: - url_imdb = scrapertools.get_match(subdata_imdb, '.*?src="([^"]+)"') - poster_imdb = poster_imdb.replace("._.*?jpg", "._V1_SX640_SY720_.jpg") - - except: - poster_imdb = posterdb - - try: - url_photo = scrapertools.get_match(data, - '
    .*?(.*?)h="ID') - except: - pass - try: - url_tvt = scrapertools.get_match(subdata_tvt, '.*?type="hidden" value="(.*?)"') - song = song.replace(" ", "%20") - - xbmc.executebuiltin('xbmc.PlayMedia(' + song + ')') - import xbmc, time - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/test.py", - TESTPYDESTFILE) - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customkey.xml", - KEYMAPDESTFILE) - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/remote.xml", - REMOTEDESTFILE) - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customapp.xml", - APPCOMMANDDESTFILE) - - xbmc.executebuiltin('Action(reloadkeymaps)') - - except: - pass - try: - os.remove(TRAILERDESTFILE) - print "Trailer.txt borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except: - print "No hay Trailer.txt" - xbmc.executebuiltin('Action(reloadkeymaps)') - if os.path.exists(SEARCHDESTFILE): - - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - os.remove(SEARCHDESTFILE) - print "search.txt borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - print "No hay customs" - xbmc.executebuiltin('Action(reloadkeymaps)') - - # Busqueda bing de Imdb serie id - url_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (title.replace(' ', '+'), year) - print url_imdb - data = browser(url_imdb) - '''if "z{a:1}"in data: - data = proxy(url_imdb)''' - print "perro" - print data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - try: - subdata_imdb = scrapertools.get_match(data, '
  • (.*?)h="ID') - print "ostia" - print subdata_imdb - except: - pass - print "joder" - try: - imdb_id = scrapertools.get_match(subdata_imdb, '.*?posters/(.*?)') - postertvdb = "http://thetvdb.com/banners/_cache/posters/" + postertvdb - except: - postertvdb = item.thumbnail - - if len(matches) == 0: - extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" - show = title + "|" + year + "|" + "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" - fanart_info = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" - fanart_trailer = "http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg" - itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", - thumbnail=postertvdb, fanart="http://s6.postimg.org/6ucl96lsh/bityouthnofan.jpg", - category=category, extra=extra, show=show, folder=True)) - - for fan in matches: - fanart = "http://thetvdb.com/banners/" + fan - fanart_1 = fanart - patron = '.*?.*?.*?.*?(.*?).*?.*?(.*?).*?.*?(.*?)' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - fanart_info = fanart_1 - fanart_trailer = fanart_1 - fanart_2 = fanart_1 - show = title + "|" + year + "|" + fanart_1 - extra = postertvdb - itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", - thumbnail=postertvdb, fanart=fanart_1, category=category, extra=extra, - show=show, folder=True)) - for fanart_info, fanart_trailer, fanart_2 in matches: - fanart_info = "http://thetvdb.com/banners/" + fanart_info - fanart_trailer = "http://thetvdb.com/banners/" + fanart_trailer - fanart_2 = "http://thetvdb.com/banners/" + fanart_2 - # clearart, fanart_2 y logo - for id in matches: - url_fanartv = "http://webservice.fanart.tv/v3/tv/" + id_serie + "?api_key=dffe90fba4d02c199ae7a9e71330c987" - data = scrapertools.cachePage(url_fanartv) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '"clearlogo":.*?"url": "([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if '"tvposter"' in data: - tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') - if '"tvbanner"' in data: - tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') - if '"tvthumb"' in data: - tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') - if '"hdtvlogo"' in data: - hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') - if '"hdclearart"' in data: - hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') - if len(matches) == 0: - item.thumbnail = postertvdb - if '"hdtvlogo"' in data: - if "showbackground" in data: - - if '"hdclearart"' in data: - thumbnail = hdtvlogo - extra = hdtvclear - show = title + "|" + year + "|" + fanart_2 - else: - thumbnail = hdtvlogo - extra = thumbnail - show = title + "|" + year + "|" + fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=fanart_1, category=category, - extra=extra, show=show, plot=item.plot, folder=True)) - - - else: - if '"hdclearart"' in data: - thumbnail = hdtvlogo - extra = hdtvclear - show = title + "|" + year + "|" + fanart_2 - else: - thumbnail = hdtvlogo - extra = thumbnail - show = title + "|" + year + "|" + fanart_2 - - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, - show=show, category=category, plot=item.plot, folder=True)) - else: - extra = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" - show = title + "|" + year + "|" + fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=item.thumbnail, fanart=fanart_1, extra=extra, - show=show, category=category, plot=item.plot, folder=True)) - for logo in matches: - if '"hdtvlogo"' in data: - thumbnail = hdtvlogo - elif not '"hdtvlogo"' in data: - if '"clearlogo"' in data: - thumbnail = logo - else: - thumbnail = item.thumbnail - if '"clearart"' in data: - clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') - if "showbackground" in data: - - extra = clear - show = title + "|" + year + "|" + fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, - category=category, plot=item.plot, folder=True)) - else: - extra = clear - show = title + "|" + year + "|" + fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, - category=category, plot=item.plot, folder=True)) - - if "showbackground" in data: - - if '"clearart"' in data: - clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') - extra = clear - show = title + "|" + year + "|" + fanart_2 - else: - extra = logo - show = title + "|" + year + "|" + fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, - category=category, plot=item.plot, folder=True)) - - if not '"clearart"' in data and not '"showbackground"' in data: - if '"hdclearart"' in data: - extra = hdtvclear - show = title + "|" + year + "|" + fanart_2 - else: - extra = thumbnail - show = title + "|" + year + "|" + fanart_2 - itemlist.append( - Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, server="torrent", - thumbnail=thumbnail, fanart=fanart_1, extra=extra, show=show, category=category, - plot=item.plot, folder=True)) - - title = "Info" - if not "_serie_de_tv" in item.url and not item.extra == "series": - thumbnail = posterdb - if "_serie_de_tv" in item.url or item.extra == "series": - if '"tvposter"' in data: - thumbnail = tvposter - else: - thumbnail = postertvdb - - if "tvbanner" in data: - category = tvbanner - else: - category = item.show.split("|")[2] - - title = title.replace(title, "[COLOR cyan]" + title + "[/COLOR]") - itemlist.append( - Item(channel=item.channel, action="info", title=title, url=item.url, thumbnail=thumbnail, fanart=fanart_info, - extra=extra, plot=plot, category=category, show=show, folder=False)) - ###trailer - - - title = "[COLOR gold]Trailer[/COLOR]" - - if "_serie_de_tv" in item.url or item.extra == "series": - if '"tvthumb"' in data: - thumbnail = tvthumb - else: - thumbnail = postertvdb - if '"tvbanner"' in data: - extra = tvbanner - elif '"tvthumb"' in data: - extra = tvthumb - else: - extra = item.thumbnail - else: - if '"moviethumb"' in data: - thumbnail = thumb - else: - thumbnail = posterdb - - if '"moviedisc"' in data: - extra = disc - else: - if '"moviethumb"' in data: - extra = thumb - - else: - extra = posterdb - - itemlist.append( - Item(channel=item.channel, action="trailer", title=title, url=item.url, thumbnail=thumbnail, plot=item.plot, - fanart=fanart_trailer, extra=extra, show=trailer, folder=True)) - - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - import xbmc - SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - if xbmc.Player().isPlaying(): - if not os.path.exists(TESTPYDESTFILE): - import xbmc - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/search.txt", - SEARCHDESTFILE) - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/test.py", - TESTPYDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customkey.xml", - KEYMAPDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remote.xml", - REMOTEDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", - APPCOMMANDDESTFILE) - - xbmc.executebuiltin('Action(reloadkeymaps)') - - if not xbmc.Player().isPlaying(): - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - - data = scrapertools.cache_page(item.url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = '(.*?)') - except: - plot = item.plot - - plot = plot.replace(plot, "[COLOR bisque][B]" + plot + "[/B][/COLOR]") - plot = plot.replace("", "") - plot = plot.replace("
    ", "") - plot = plot.replace("
    ", "") - plot = plot.replace("“", "") - plot = plot.replace("", "") - plot = plot.replace("", "") - plot = plot.replace(" ​​", "") - plot = scrapertools.decodeHtmlentities(plot) - plot = plot.replace(""", "") - except: - - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Esta serie no tiene informacion..." - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" - foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" - info = "" - quit = "Pulsa" + " [COLOR blue][B]INTRO [/B][/COLOR]" + "para quitar" - try: - scrapedinfo = scrapertools.get_match(data, '
    |>,', - '', info) - info = info.replace("

    ", " ") - info = info.replace("#", ",") - info = info.replace(">", "") - except: - info = "[COLOR skyblue][B]Sin informacion adicional...[/B][/COLOR]" - if "_serie_de_tv" in item.url: - foto = item.show.split("|")[2] - - else: - foto = item.category - if item.show == item.thumbnail: - foto = "http://s6.postimg.org/mh3umjzkh/bityouthnofanventanuco.jpg" - photo = item.extra - quit = "Pulsa" + " [COLOR blue][B]INTRO [/B][/COLOR]" + "para quitar" - if "_serie_de_tv" in item.url: - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", - NOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/remotenoback.xml", - REMOTENOBACKDESTFILE) - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", - APPNOBACKDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - - ventana2 = TextBox1(title=title, plot=plot, info=info, thumbnail=photo, fanart=foto, quit=quit) - ventana2.doModal() - - -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 - - -class TextBox1(xbmcgui.WindowDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - - self.getTitle = kwargs.get('title') - self.getPlot = kwargs.get('plot') - self.getInfo = kwargs.get('info') - self.getThumbnail = kwargs.get('thumbnail') - self.getFanart = kwargs.get('fanart') - self.getQuit = kwargs.get('quit') - - self.background = xbmcgui.ControlImage(70, 20, 1150, 630, - 'http://s6.postimg.org/58jknrvtd/backgroundventana5.png') - self.title = xbmcgui.ControlTextBox(140, 60, 1130, 50) - self.quit = xbmcgui.ControlTextBox(145, 90, 1030, 45) - self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 140) - self.info = xbmcgui.ControlFadeLabel(120, 310, 1056, 100) - self.thumbnail = xbmcgui.ControlImage(813, 43, 390, 100, self.getThumbnail) - self.fanart = xbmcgui.ControlImage(120, 365, 1060, 250, self.getFanart) - - self.addControl(self.background) - self.addControl(self.title) - self.addControl(self.quit) - self.addControl(self.plot) - self.addControl(self.thumbnail) - self.addControl(self.fanart) - self.addControl(self.info) - - self.title.setText(self.getTitle) - self.quit.setText(self.getQuit) - try: - self.plot.autoScroll(7000, 6000, 30000) - except: - print "Actualice a la ultima version de kodi para mejor info" - import xbmc - xbmc.executebuiltin( - 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') - self.plot.setText(self.getPlot) - self.info.addLabel(self.getInfo) - - def get(self): - self.show() - - def onAction(self, action): - if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: - import os - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - try: - os.remove(NOBACKDESTFILE) - os.remove(REMOTENOBACKDESTFILE) - os.remove(APPNOBACKDESTFILE) - if os.path.exists(TESTPYDESTFILE): - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customapp.xml", - APPCOMMANDDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - except: - xbmc.executebuiltin('Action(reloadkeymaps)') - self.close() - - -def info_capitulos(item): - logger.info() - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(APPCOMMANDDESTFILE) - except: - pass - item.category = item.show.split("|")[0] - item.thumbnail = item.show.split("|")[1] - - url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.show.split("|")[0] + "/default/" + \ - item.extra.split("|")[0] + "/" + item.extra.split("|")[2] + "/es.xml" - data = scrapertools.cache_page(url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '.*?([^<]+).*?' - patron += '(.*?).*?' - - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Este capitulo no tiene informacion..." - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" - image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" - quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" - - else: - - for name_epi, info in matches: - if "episodes" in data: - foto = scrapertools.get_match(data, '.*?(.*?)') - fanart = "http://thetvdb.com/banners/" + foto - else: - fanart = item.show.split("|")[1] - - plot = info - plot = plot.replace(plot, "[COLOR burlywood][B]" + plot + "[/B][/COLOR]") - title = name_epi.upper() - title = title.replace(title, "[COLOR skyblue][B]" + title + "[/B][/COLOR]") - image = fanart - foto = item.show.split("|")[1] - if not ".png" in item.show.split("|")[1]: - foto = "http://s6.postimg.org/rv2mu3pap/bityouthsinopsis2.png" - quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", - NOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/remotenoback.xml", - REMOTENOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", - APPNOBACKDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, quit=quit) - ventana.doModal() - - -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 - - -class TextBox2(xbmcgui.WindowDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - self.getTitle = kwargs.get('title') - self.getPlot = kwargs.get('plot') - self.getThumbnail = kwargs.get('thumbnail') - self.getFanart = kwargs.get('fanart') - self.getQuit = kwargs.get('quit') - - self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://s6.postimg.org/n3ph1uxn5/ventana.png') - self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) - self.quit = xbmcgui.ControlTextBox(145, 90, 1030, 45) - self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) - self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) - self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) - - self.addControl(self.background) - self.addControl(self.title) - self.addControl(self.quit) - self.addControl(self.plot) - self.addControl(self.thumbnail) - self.addControl(self.fanart) - - self.title.setText(self.getTitle) - self.quit.setText(self.getQuit) - try: - self.plot.autoScroll(7000, 6000, 30000) - except: - print "Actualice a la ultima version de kodi para mejor info" - import xbmc - xbmc.executebuiltin( - 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') - self.plot.setText(self.getPlot) - - def get(self): - self.show() - - def onAction(self, action): - if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: - import os - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - try: - os.remove(NOBACKDESTFILE) - os.remove(REMOTENOBACKDESTFILE) - os.remove(APPNOBACKDESTFILE) - if os.path.exists(TESTPYDESTFILE): - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bityouth/customapp.xml", - APPCOMMANDDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - except: - xbmc.executebuiltin('Action(reloadkeymaps)') - self.close() - - -def translate(to_translate, to_langage="auto", langage="auto"): - ###Traducción atraves de Google - '''Return the translation using google translate - you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) - if you don't define anything it will detect it or use english by default - Example: - print(translate("salut tu vas bien?", "en")) - hello you alright?''' - agents = { - 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} - before_trans = 'class="t0">' - link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) - request = urllib2.Request(link, headers=agents) - page = urllib2.urlopen(request).read() - result = page[page.find(before_trans) + len(before_trans):] - result = result.split("<")[0] - return result - - -if __name__ == '__main__': - to_translate = 'Hola como estas?' - print("%s >> %s" % (to_translate, translate(to_translate))) - print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) -# should print Hola como estas >> Hello how are you -# and Hola como estas? >> Bonjour comment allez-vous? diff --git a/plugin.video.alfa/channels/borrachodetorrent.json b/plugin.video.alfa/channels/borrachodetorrent.json deleted file mode 100755 index dc55a7a5..00000000 --- a/plugin.video.alfa/channels/borrachodetorrent.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "id": "borrachodetorrent", - "name": "BorrachodeTorrent", - "active": true, - "adult": false, - "language": ["cast"], - "thumbnail": "http://imgur.com/BePrYmy.png", - "categories": [ - "torrent", - "movie", - "tvshow" - ], - "settings": [ - { - "id": "modo_grafico", - "type": "bool", - "label": "Buscar información extra", - "default": true, - "enabled": true, - "visible": true - }, - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/borrachodetorrent.py b/plugin.video.alfa/channels/borrachodetorrent.py deleted file mode 100755 index 85d1f0cf..00000000 --- a/plugin.video.alfa/channels/borrachodetorrent.py +++ /dev/null @@ -1,1047 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import re -import ssl -from threading import Thread - -import xbmc -import xbmcgui -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe -from platformcode import config, logger - -try: - _create_unverified_https_context = ssl._create_unverified_context -except AttributeError: - # Legacy Python that doesn't verify HTTPS certificates by default - pass -else: - # Handle target environment that doesn't support HTTPS verification - ssl._create_default_https_context = _create_unverified_https_context - -ACTION_SHOW_FULLSCREEN = 36 -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 -ACTION_PREVIOUS_MENU = 10 -ACTION_MOVE_LEFT = 1 -ACTION_MOVE_RIGHT = 2 -ACTION_MOVE_DOWN = 4 -ACTION_MOVE_UP = 3 -OPTION_PANEL = 6 -OPTIONS_OK = 5 - -__modo_grafico__ = config.get_setting('modo_grafico', "borrachodetorrent") - - -# Para la busqueda en bing evitando baneos - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - if "img,divreturn" in response: - r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) - print "prooooxy" - response = r.read() - - return response - - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" - - -def mainlist(item): - logger.info() - itemlist = [] - itemlist.append(item.clone(title="[COLOR floralwhite][B]Películas[/B][/COLOR]", action="scraper", - url="https://www.borrachodetorrent.com/peliculas-torrent/", - thumbnail="http://imgur.com/tBvoGIk.png", fanart="http://imgur.com/AqUvMW3.jpg", - contentType="movie")) - itemlist.append(item.clone(title="[COLOR floralwhite][B] Estrenos[/B][/COLOR]", action="scraper", - url="https://www.borrachodetorrent.com/peliculas-estrenos-torrent/", - thumbnail="http://imgur.com/tBvoGIk.png", fanart="http://imgur.com/AqUvMW3.jpg", - contentType="movie")) - itemlist.append(item.clone(title="[COLOR floralwhite][B] Esenciales[/B][/COLOR]", action="scraper", - url="https://www.borrachodetorrent.com/peliculas-torrent-deberias-haber-visto/", - thumbnail="http://imgur.com/tBvoGIk.png", fanart="http://imgur.com/AqUvMW3.jpg", - contentType="movie")) - - itemlist.append(itemlist[-1].clone(title="[COLOR floralwhite][B]Series[/B][/COLOR]", action="scraper", - url="https://www.borrachodetorrent.com/series-torrent/", - thumbnail="http://imgur.com/lMHcNwc.png", contentType="tvshow")) - - itemlist.append(itemlist[-1].clone(title="[COLOR cadetblue][B]Buscar[/B][/COLOR]", action="search", - thumbnail="http://imgur.com/NrIVpps.png", - fanart="http://imgur.com/AqUvMW3.jpg", )) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "https://www.borrachodetorrent.com/?s=" + texto - item.extra = "search" - try: - return buscador(item) - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def buscador(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = scrapertools.find_multiple_matches(data, - '
    .*?([^"]+)(.*?)([^"]+)') - - for url, thumb, title, check_year, calidad in patron: - - if "SERIE" in calidad or "&#" in title: - if "&#" in title: - item.extra = "" - - checkmt = "tvshow" - - else: - checkmt = "movie" - year = scrapertools.find_single_match(check_year, '([^"]+)') - if year == "": - year = " " - titulo = "[COLOR teal]" + title + "[/COLOR]" + " " + "[COLOR floralwhite]" + calidad + "[/COLOR]" - title = re.sub(r"!|¡", "", title) - title = re.sub(r"’|PRE-Estreno|\d+&#.*", "'", title) - - if checkmt == "movie": - new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, - contentTitle=title, contentType="movie", extra=year, library=True) - else: - if item.extra == "search": - new_item = item.clone(action="findtemporadas", title=titulo, url=url, thumbnail=thumb, fulltitle=title, - contentTitle=title, show=title, contentType="tvshow", library=True) - else: - new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, - contentTitle=title, show=title, contentType="tvshow", library=True) - new_item.infoLabels['year'] = year - itemlist.append(new_item) - - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - if not "Siguiente >>" in item.title: - if "0." in str(item.infoLabels['rating']): - item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" - else: - item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" - item.title = item.title + " " + str(item.infoLabels['rating']) - except: - pass - - return itemlist - - -def scraper(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - if item.contentType == "movie": - patron = scrapertools.find_multiple_matches(data, - '.*?([^"]+)(.*?)([^"]+)') - - for url, thumb, title, check_year, calidad in patron: - - year = scrapertools.find_single_match(check_year, '([^"]+)') - if year == "": - year = " " - - titulo = "[COLOR teal]" + title + "[/COLOR]" + " " + "[COLOR floralwhite]" + calidad + "[/COLOR]" - title = re.sub(r"!|¡", "", title) - title = re.sub(r"’|PRE-Estreno", "'", title) - - new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, fulltitle=title, - contentTitle=title, contentType="movie", extra=year, library=True) - new_item.infoLabels['year'] = year - itemlist.append(new_item) - - else: - - data = re.sub(r'×', 'x', data) - patron = scrapertools.find_multiple_matches(data, - 'id="busca_a" class="busca_a" href="([^"]+)">.*?([^"]+).*?([^"]+)') - - for url, thumb, title, calidad in patron: - titulo = "[COLOR teal]" + title + "[/COLOR]" + " " + "[COLOR floralwhite]" + calidad + "[/COLOR]" - title = re.sub(r'\d+x\d+', '', title) - title = re.sub(r"’", "'", title) - filtro_thumb = thumb.replace("https://image.tmdb.org/t/p/w300", "") - filtro_list = {"poster_path": filtro_thumb} - filtro_list = filtro_list.items() - - new_item = item.clone(action="findvideos", title=titulo, url=url, thumbnail=thumb, - fulltitle=title, infoLabels={'filtro': filtro_list}, - contentTitle=title, show=title, contentType="tvshow", library=True) - itemlist.append(new_item) - - ## Paginación - next = scrapertools.find_single_match(data, "
    .*?.*?href='([^']+)'") - if len(next) > 0: - url = next - - itemlist.append(item.clone(title="[COLOR dodgerblue][B]Siguiente >>[/B][/COLOR]", url=url)) - try: - from core import tmdb - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - if not "Siguiente >>" in item.title: - if "0." in str(item.infoLabels['rating']): - item.infoLabels['rating'] = "[COLOR indianred]Sin puntuacíon[/COLOR]" - else: - item.infoLabels['rating'] = "[COLOR springgreen]" + str(item.infoLabels['rating']) + "[/COLOR]" - item.title = item.title + " " + str(item.infoLabels['rating']) - except: - pass - - for item_tmdb in itemlist: - logger.info(str(item_tmdb.infoLabels['tmdb_id'])) - - return itemlist - - -def findtemporadas(item): - logger.info() - itemlist = [] - if item.extra == "search": - th = Thread(target=get_art(item)) - th.setDaemon(True) - th.start() - data = httptools.downloadpage(item.url).data - if len(item.extra.split("|")): - if len(item.extra.split("|")) >= 4: - fanart = item.extra.split("|")[2] - extra = item.extra.split("|")[3] - try: - fanart_extra = item.extra.split("|")[4] - except: - fanart_extra = item.extra.split("|")[3] - try: - fanart_info = item.extra.split("|")[5] - except: - fanart_extra = item.extra.split("|")[3] - elif len(item.extra.split("|")) == 3: - fanart = item.extra.split("|")[2] - extra = item.extra.split("|")[0] - fanart_extra = item.extra.split("|")[0] - fanart_info = item.extra.split("|")[1] - elif len(item.extra.split("|")) == 2: - fanart = item.extra.split("|")[1] - extra = item.extra.split("|")[0] - fanart_extra = item.extra.split("|")[0] - fanart_info = item.extra.split("|")[1] - else: - extra = item.extra - fanart_extra = item.extra - fanart_info = item.extra - try: - logger.info(fanart_extra) - logger.info(fanart_info) - except: - fanart_extra = item.fanart - fanart_info = item.fanart - bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+) -(.*?)') - for temporada, bloque_epis in bloque_episodios: - if 'no data' in bloque_epis or '' in bloque_epis: continue - item.infoLabels = item.InfoLabels - item.infoLabels['season'] = temporada - itemlist.append(item.clone(action="epis", - title="[COLOR royalblue][B]Temporada [/B][/COLOR]" + "[COLOR antiquewhite][B]" + temporada + "[/B][/COLOR]", - url=bloque_epis, fanart=fanart, contentType=item.contentType, - contentTitle=item.contentTitle, show=item.show, extra=item.extra, - fanart_extra=fanart_extra, fanart_info=fanart_info, datalibrary=data, folder=True)) - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - item.fanart = fanart - item.extra = extra - if config.get_videolibrary_support() and itemlist: - - if len(bloque_episodios) == 1: - extra = "epis" - else: - extra = "epis###serie_add" - infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'tvdb_id': item.infoLabels['tvdb_id'], - 'imdb_id': item.infoLabels['imdb_id']} - itemlist.append(Item(channel=item.channel, title="Añadir esta serie a la videoteca", text_color="0xFF00ffff", - action="add_serie_to_library", extra=extra, url=item.url, - contentSerieName=item.fulltitle, infoLabels=infoLabels, - thumbnail='http://imgur.com/BbafXw7.png', datalibrary=data)) - - return itemlist - - -def epis(item): - logger.info() - itemlist = [] - if item.extra == "serie_add": - item.url = item.datalibrary - patron = scrapertools.find_multiple_matches(item.url, - '
    ([^"]+)
    .*?href="([^"]+)">([^"]+)
    ') - - for epi, url, title in patron: - epi = epi.replace(" ", "") - episodio = epi - episodio = scrapertools.find_single_match(episodio, '\d+x(\d+)') - item.infoLabels['episode'] = episodio - itemlist.append( - item.clone(title="[COLOR blue][B]" + epi + "[/B][/COLOR]", url=url, action="findvideos", show=item.show, - fanart=item.extra, extra=item.extra, fanart_extra=item.fanart_extra, - fanart_info=item.fanart_info, folder=True)) - if item.extra != "serie_add": - tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) - for item in itemlist: - item.fanart = item.extra - if item.infoLabels['title']: title = "[COLOR lightblue]" + item.infoLabels['title'] + "[/COLOR]" - item.title = item.title + "[CR]\"" + title + "\"" - return itemlist - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - if not item.infoLabels['episode']: - th = Thread(target=get_art(item)) - th.setDaemon(True) - th.start() - url = scrapertools.find_single_match(data, '
    .*?href="([^"]+)"').strip() - if item.contentType != "movie": - check_online = '
    ' - if not item.infoLabels['episode']: - capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)') - url_serie = re.sub(r'-\d+x\d+.*', '', item.url) - url_serie = re.sub(r'\/episodio', '/series', url_serie) - if len(item.extra.split("|")) >= 2: - extra = item.extra - else: - extra = item.fanart - else: - capitulo = scrapertools.find_single_match(item.title, '\d+x\d+') - try: - fanart = item.fanart_extra - except: - fanart = item.extra.split("|")[0] - if not url and item.library: - itemlist.append(Item(channel=item.channel, title="[COLOR slateblue][B]No disponible[/B][/COLOR]", url=url, - fanart=fanart, thumbnail=item.thumbnail, extra=item.extra, folder=False)) - else: - - title = "[COLOR darkturquoise][B]Torrent [/B][/COLOR]" + "[COLOR aliceblue][B]" + capitulo + "[/B][/COLOR]" - - title = re.sub(r'\".*', '', title) - itemlist.append( - Item(channel=item.channel, title=title, url=url, action="play", server="torrent", fanart=fanart, - thumbnail=item.thumbnail, extra=item.extra, folder=False)) - - - else: - - check_online = '
    ' - item.infoLabels['year'] = None - itemlist.append( - Item(channel=item.channel, title="[COLOR deepskyblue][B]Torrent[/B][/COLOR]", url=url, action="play", - server="torrent", fanart=item.fanart, thumbnail=item.thumbnail, extra=item.extra, - InfoLabels=item.infoLabels, folder=False)) - if item.library and config.get_videolibrary_support() and len(itemlist) > 0: - infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], - 'title': item.infoLabels['title']} - itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", - action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, - text_color="0xFF00ffff", - thumbnail='http://imgur.com/BbafXw7.png')) - - dd = scrapertools.find_multiple_matches(data, '

    (.*?)' + check_online + '') - if dd: - if item.library: - extra = dd - itemlist.append( - Item(channel=item.channel, title="[COLOR floralwhite][B] Online[/B][/COLOR]", url=item.url, - action="dd_y_o", thumbnail="http://imgur.com/hYgra9W.png", fanart=item.extra.split("|")[0], - contentType=item.contentType, extra=str(extra) + "|" + item.extra, folder=True)) - else: - - patron = scrapertools.find_multiple_matches(str(dd), - '
  • .*?href="([^"]+)".*?([^"]+).*?([^"]+)') - - for url, idioma, calidad, in patron: - idioma = re.sub(r'\\xc3\\xb1', 'ñ', idioma) - idioma = re.sub(r'\\xc3\\xa9', 'é', idioma) - videolist = servertools.find_video_items(data=url) - for video in videolist: - icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", - "server_" + video.server + ".png") - if not os.path.exists(icon_server): - icon_server = "" - itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, - title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]" + " " + "[COLOR powderblue]" + idioma + "[/COLOR]" + "[COLOR deepskyblue]--" + calidad + "[/COLOR]", - thumbnail=icon_server, fanart=fanart, action="play", folder=False)) - if not item.infoLabels['episode'] and item.contentType != "movie": - itemlist.append( - Item(channel=item.channel, title="[COLOR paleturquoise][B]Todos los episodios[/B][/COLOR]", url=url_serie, - action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1], thumbnail=item.thumbnail, - thumb_art=item.thumb_art, thumb_info=item.thumb_info, extra=item.extra + "|" + item.thumbnail, - contentType=item.contentType, contentTitle=item.contentTitle, InfoLabels=item.infoLabels, - library=item.library, fulltitle=item.fulltitle, folder=True)) - if item.infoLabels['episode'] and item.library: - - thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg') - if thumbnail == "": - thumbnail = item.thumbnail - if not "assets.fanart" in item.fanart_info: - fanart = item.fanart_info - else: - fanart = item.fanart - itemlist.append( - Item(channel=item.channel, title="[COLOR steelblue][B] info[/B][/COLOR]", url=url, action="info_capitulos", - fanart=item.extra.split("|")[0], thumbnail=item.thumb_art, thumb_info=item.thumb_info, - extra=item.extra, show=item.show, InfoLabels=item.infoLabels, folder=False)) - return itemlist - - -def dd_y_o(item): - logger.info() - itemlist = [] - if item.contentType == "movie": - enlaces = item.extra.split("|")[0] - fanart = item.extra.split("|")[2] - - else: - enlaces = item.extra.split("|")[0] - fanart = "" - patron = scrapertools.find_multiple_matches(enlaces, - '
  • .*?href="([^"]+)".*?([^"]+).*?([^"]+)') - for url, idioma, calidad, in patron: - idioma = re.sub(r'\\xc3\\xb1', 'ñ', idioma) - idioma = re.sub(r'\\xc3\\xa9', 'é', idioma) - videolist = servertools.find_video_items(data=url) - for video in videolist: - icon_server = os.path.join(config.get_runtime_path(), "resources", "images", "servers", - "server_" + video.server + ".png") - if not os.path.exists(icon_server): - icon_server = "" - itemlist.append(Item(channel=item.channel, url=video.url, server=video.server, - title="[COLOR floralwhite][B]" + video.server + "[/B][/COLOR]" + " " + "[COLOR powderblue]" + idioma + "[/COLOR]" + "[COLOR deepskyblue]--" + calidad + "[/COLOR]", - thumbnail=icon_server, fanart=fanart, action="play", folder=False)) - return itemlist - - -def info_capitulos(item, images={}): - logger.info() - try: - url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + str(item.InfoLabels['tvdb_id']) + "/default/" + str( - item.InfoLabels['season']) + "/" + str(item.InfoLabels['episode']) + "/es.xml" - if "/0" in url: - url = url.replace("/0", "/") - from core import jsontools - data = httptools.downloadpage(url).data - if "episodes" in data: - image = scrapertools.find_single_match(data, '.*?(.*?)') - image = "http://thetvdb.com/banners/" + image - else: - try: - image = item.InfoLabels['episodio_imagen'] - except: - image = "http://imgur.com/ZiEAVOD.png" - - foto = item.thumb_info - if not ".png" in foto: - foto = "http://imgur.com/AdGHzKS.png" - try: - title = item.InfoLabels['episodio_titulo'] - except: - title = "" - title = "[COLOR red][B]" + title + "[/B][/COLOR]" - - try: - plot = item.InfoLabels['episodio_sinopsis'] - except: - plot = scrapertools.find_single_match(data, '(.*?)') - if plot == "": - plot = "Sin información todavia" - try: - rating = item.InfoLabels['episodio_vote_average'] - except: - rating = 0 - try: - - if rating >= 5 and rating < 8: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR springgreen][B]" + str(rating) + "[/B][/COLOR]" - elif rating >= 8 and rating < 10: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR yellow][B]" + str(rating) + "[/B][/COLOR]" - elif rating == 10: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR orangered][B]" + str(rating) + "[/B][/COLOR]" - else: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" - except: - rating = "[COLOR yellow]Puntuación[/COLOR] " + "[COLOR crimson][B]" + str(rating) + "[/B][/COLOR]" - if "10." in rating: - rating = re.sub(r'10\.\d+', '10', rating) - - - except: - - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Este capitulo no tiene informacion..." - plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]" - image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" - foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" - rating = "" - - ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating) - ventana.doModal() - - -class TextBox2(xbmcgui.WindowDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - self.getTitle = kwargs.get('title') - self.getPlot = kwargs.get('plot') - self.getThumbnail = kwargs.get('thumbnail') - self.getFanart = kwargs.get('fanart') - self.getRating = kwargs.get('rating') - - self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/PKOYIzX.jpg') - self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) - self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45) - self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) - self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) - self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) - - self.addControl(self.background) - self.background.setAnimations( - [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',), - ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)]) - self.addControl(self.thumbnail) - self.thumbnail.setAnimations([('conditional', - 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',), - ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)]) - self.addControl(self.plot) - self.plot.setAnimations( - [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), ( - 'conditional', - 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',), - ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)]) - self.addControl(self.fanart) - self.fanart.setAnimations( - [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), ( - 'conditional', - 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',), - ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)]) - self.addControl(self.title) - self.title.setText(self.getTitle) - self.title.setAnimations( - [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',), - ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)]) - self.addControl(self.rating) - self.rating.setText(self.getRating) - self.rating.setAnimations( - [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',), - ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)]) - xbmc.sleep(200) - - try: - self.plot.autoScroll(7000, 6000, 30000) - except: - - xbmc.executebuiltin( - 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') - self.plot.setText(self.getPlot) - - def get(self): - self.show() - - def onAction(self, action): - if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92: - self.close() - - -def test(): - return True - - -def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match): - i = 0 - while i < len(text): - m = match(text, i) - s = m.group(m.lastindex) - i = m.end() - if m.lastindex == 2: - yield "s" - yield text[i:i + int(s)] - i = i + int(s) - else: - yield s - - -def decode_item(next, token): - if token == "i": - # integer: "i" value "e" - data = int(next()) - if next() != "e": - raise ValueError - elif token == "s": - # string: "s" value (virtual tokens) - data = next() - elif token == "l" or token == "d": - # container: "l" (or "d") values "e" - data = [] - tok = next() - while tok != "e": - data.append(decode_item(next, tok)) - tok = next() - if token == "d": - data = dict(zip(data[0::2], data[1::2])) - else: - raise ValueError - return data - - -def decode(text): - try: - src = tokenize(text) - data = decode_item(src.next, src.next()) - for token in src: # look for more tokens - raise SyntaxError("trailing junk") - except (AttributeError, ValueError, StopIteration): - try: - data = data - except: - data = src - - return data - - -def convert_size(size): - import math - if (size == 0): - return '0B' - size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") - i = int(math.floor(math.log(size, 1024))) - p = math.pow(1024, i) - s = round(size / p, 2) - return '%s %s' % (s, size_name[i]) - - -def fanartv(item, id_tvdb, id, images={}): - headers = [['Content-Type', 'application/json']] - from core import jsontools - if item.contentType == "movie": - url = "http://webservice.fanart.tv/v3/movies/%s?api_key=cab16e262d72fea6a6843d679aa10300" \ - % id - else: - url = "http://webservice.fanart.tv/v3/tv/%s?api_key=cab16e262d72fea6a6843d679aa10300" % id_tvdb - try: - data = jsontools.load(scrapertools.downloadpage(url, headers=headers)) - if data and not "error message" in data: - for key, value in data.items(): - if key not in ["name", "tmdb_id", "imdb_id", "thetvdb_id"]: - images[key] = value - else: - images = [] - - except: - images = [] - return images - - -def filmaffinity(item, infoLabels): - title = infoLabels["title"].replace(" ", "+") - try: - year = infoLabels["year"] - except: - year = "" - sinopsis = infoLabels["sinopsis"] - - if year == "": - if item.contentType != "movie": - tipo = "serie" - url_bing = "http://www.bing.com/search?q=%s+Serie+de+tv+site:filmaffinity.com" % title - else: - tipo = "película" - url_bing = "http://www.bing.com/search?q=%s+site:filmaffinity.com" % title - try: - data = browser(url_bing) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - if "myaddrproxy.php" in data: - subdata_bing = scrapertools.get_match(data, - 'li class="b_algo">

    (

    (\s*

    (

    ((.*?)') - sinopsis_f = sinopsis_f.replace("

    ", "\n") - sinopsis_f = re.sub(r"\(FILMAFFINITY\)
    ", "", sinopsis_f) - try: - year_f = scrapertools.get_match(data, '
    Año
    .*?>(\d+)') - except: - year_f = "" - try: - rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') - except: - rating_filma = "Sin puntuacion" - critica = "" - patron = '
    (.*?)
    .*?itemprop="author">(.*?)\s*(.*?)h="ID.*?.*?TV Series') - else: - urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( - item.fulltitle.replace(' ', '+'), year) - data = browser(urlbing_imdb) - data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", data) - subdata_imdb = scrapertools.find_single_match(data, '
  • (.*?)h="ID.*?') - try: - imdb_id = scrapertools.get_match(subdata_imdb, '(.*?)h="ID.*?.*?TV Series') - else: - urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % ( - title.replace(' ', '+'), year) - data = browser(urlbing_imdb) - data = re.sub(r"\n|\r|\t|\s{2}| |http://ssl-proxy.my-addr.org/myaddrproxy.php/", "", - data) - subdata_imdb = scrapertools.find_single_match(data, - '
  • (.*?)h="ID.*?') - try: - imdb_id = scrapertools.get_match(subdata_imdb, - '= 4: - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] - - else: - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[1] + "|" + imagenes[3] - elif imagenes[2] != check_fanart: - item.extra = imagenes[2] + "|" + imagenes[3] - else: - item.extra = imagenes[3] + "|" + imagenes[3] - elif len(imagenes) == 3: - - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - - if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] - - - else: - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[0] + "|" + imagenes[1] - elif imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] - else: - item.extra = imagenes[1] + "|" + imagenes[1] - elif len(imagenes) == 2: - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[0] + "|" + imagenes[1] - else: - item.extra = imagenes[1] + "|" + imagenes[0] - elif len(imagenes) == 1: - item.extra = imagenes[0] + "|" + imagenes[0] - else: - item.extra = item.fanart + "|" + item.fanart - id_tvdb = "" - else: - # item.infoLabels['year']=None - # item.infoLabels['filtro']=None - - if itmdb.result.get("external_ids").get("tvdb_id"): - id_tvdb = itmdb.result.get("external_ids").get("tvdb_id") - else: - id_tvdb = "" - if len(imagenes) >= 6: - - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + \ - imagenes[5] - - else: - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ - imagenes[2] - elif imagenes[2] != check_fanart: - item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + \ - imagenes[1] - else: - item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[5] + "|" + imagenes[2] + "|" + \ - imagenes[1] - elif len(imagenes) == 5: - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] - - - else: - - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] - elif imagenes[2] != check_fanart: - item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] + "|" + imagenes[1] - else: - item.extra = imagenes[3] + "|" + imagenes[4] + "|" + imagenes[2] + "|" + imagenes[1] - elif len(imagenes) == 4: - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] + "|" + imagenes[3] + "|" + imagenes[4] - - else: - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[1] + "|" + imagenes[3] + "|" + imagenes[2] - elif imagenes[2] != check_fanart: - item.extra = imagenes[2] + "|" + imagenes[3] + "|" + imagenes[1] - else: - item.extra = imagenes[3] + "|" + imagenes[2] + "|" + imagenes[1] - - elif len(imagenes) == 3: - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - - if imagenes[1] != check_fanart and imagenes[1] != item.fanart and imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] - - else: - - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[0] + "|" + imagenes[1] - elif imagenes[2] != check_fanart: - item.extra = imagenes[1] + "|" + imagenes[2] - else: - item.extra = imagenes[1] + "|" + imagenes[1] - elif len(imagenes) == 2: - if imagenes[0] != check_fanart: - item.fanart = imagenes[0] - else: - item.fanart = imagenes[1] - if imagenes[1] != check_fanart and imagenes[1] != item.fanart: - item.extra = imagenes[0] + "|" + imagenes[1] - else: - item.extra = imagenes[1] + "|" + imagenes[0] - elif len(imagenes) == 1: - item.extra = imagenes[0] + "|" + imagenes[0] - else: - item.extra = item.fanart + "|" + item.fanart - item.extra = item.extra - images_fanarttv = fanartv(item, id_tvdb, id) - if images_fanarttv: - if item.contentType == "movie": - if images_fanarttv.get("moviedisc"): - item.thumbnail = images_fanarttv.get("moviedisc")[0].get("url") - elif images_fanarttv.get("hdmovielogo"): - item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") - elif images_fanarttv.get("moviethumb"): - item.thumbnail = images_fanarttv.get("moviethumb")[0].get("url") - elif images_fanarttv.get("moviebanner"): - item.thumbnail_ = images_fanarttv.get("moviebanner")[0].get("url") - else: - item.thumbnail = item.thumbnail - else: - if images_fanarttv.get("hdtvlogo"): - item.thumbnail = images_fanarttv.get("hdtvlogo")[0].get("url") - elif images_fanarttv.get("clearlogo"): - item.thumbnail = images_fanarttv.get("hdmovielogo")[0].get("url") - item.thumb_info = item.thumbnail - if images_fanarttv.get("tvbanner"): - item.thumb_art = images_fanarttv.get("tvbanner")[0].get("url") - elif images_fanarttv.get("tvthumb"): - item.thumb_art = images_fanarttv.get("tvthumb")[0].get("url") - else: - item.thumb_art = item.thumbnail - - else: - item.extra = item.extra + "|" + item.thumbnail diff --git a/plugin.video.alfa/channels/bricocine.json b/plugin.video.alfa/channels/bricocine.json deleted file mode 100755 index f12e800e..00000000 --- a/plugin.video.alfa/channels/bricocine.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "id": "bricocine", - "name": "Bricocine", - "active": true, - "adult": false, - "language": ["cast"], - "thumbnail": "http://s6.postimg.org/9u8m1ep8x/bricocine.jpg", - "banner": "bricocine.png", - "categories": [ - "torrent", - "movie", - "tvshow" - ], - "settings": [ - { - "id": "include_in_global_search", - "type": "bool", - "label": "Incluir en busqueda global", - "default": true, - "enabled": true, - "visible": true - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/bricocine.py b/plugin.video.alfa/channels/bricocine.py deleted file mode 100755 index 3da0d7bf..00000000 --- a/plugin.video.alfa/channels/bricocine.py +++ /dev/null @@ -1,2308 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import re -import urllib -import urllib2 - -import xbmcgui -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - - -## Cargar los datos con la librería 'requests' -def get_page(url): - from lib import requests - response = requests.get(url) - return response.content - - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - br.addheaders = [('User-agent', - 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - if not ".ftrH,.ftrHd,.ftrD>" in response: - r = br.open("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url) - print "prooooxy" - response = r.read() - return response - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append( - Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis MicroHD[/B][/COLOR]", action="peliculas", - url="http://www.bricocine.com/c/hd-microhd/", thumbnail="http://s6.postimg.org/5vgi38jf5/HD_brico10.jpg", - fanart="http://s16.postimg.org/6g9tc2nyt/brico_pelifan.jpg")) - itemlist.append( - Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis Bluray-Rip[/B][/COLOR]", action="peliculas", - url="http://www.bricocine.com/c/bluray-rip/", thumbnail="http://s6.postimg.org/5w82dorpt/blueraybrico.jpg", - fanart="http://i59.tinypic.com/11rdnjm.jpg")) - itemlist.append( - Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis DVD-Rip[/B][/COLOR]", action="peliculas", - url="http://www.bricocine.com/c/dvdrip/", thumbnail="http://s6.postimg.org/d2dlld4y9/dvd2.jpg", - fanart="http://s6.postimg.org/hcehbq5w1/brico_blue_fan.jpg")) - itemlist.append(Item(channel=item.channel, title="[COLOR sandybrown][B]Pelis 3D[/B][/COLOR]", action="peliculas", - url="http://www.bricocine.com/c/3d/", - thumbnail="http://www.eias3d.com/wp-content/uploads/2011/07/3d2_5.png", - fanart="http://s6.postimg.org/u18rvec0h/bric3dd.jpg")) - import xbmc - ###Para musica(si hay) y borra customkeys - if xbmc.Player().isPlaying(): - xbmc.executebuiltin('xbmc.PlayMedia(Stop)') - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - - itemlist.append(Item(channel=item.channel, title="[COLOR sandybrown][B]Series[/B][/COLOR]", action="peliculas", - url="http://www.bricocine.com/c/series", - thumbnail="http://img0.mxstatic.com/wallpapers/bc795faa71ba7c490fcf3961f3b803bf_large.jpeg", - fanart="http://s6.postimg.org/z1ath370x/bricoseries.jpg", extra="Series")) - import xbmc - if xbmc.Player().isPlaying(): - print "PLAYIIING" - xbmc.executebuiltin('xbmc.PlayMedia(Stop)') - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - SEARCHDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "search.txt") - TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - try: - os.remove(SEARCHDESTFILE) - print "Custom search.txt borrado" - except: - print "No hay search.txt" - - try: - os.remove(TRAILERDESTFILE) - print "Custom Trailer.txt borrado" - except: - print "No hay Trailer.txt" - itemlist.append(Item(channel=item.channel, title="[COLOR sandybrown][B]Buscar[/B][/COLOR]", action="search", url="", - thumbnail="http://fc04.deviantart.net/fs70/i/2012/285/3/2/poltergeist___tv_wallpaper_by_elclon-d5hmmlp.png", - fanart="http://s6.postimg.org/f44w84o5t/bricosearch.jpg", extra="search")) - - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = "http://www.bricocine.com/index.php/?s=%s" % texto - - try: - return peliculas(item, texto.replace("+", " ")) - # Se captura la excepción, para no interrumpir al buscador global si un canal falla - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - -def peliculas(item, texto=""): - logger.info() - itemlist = [] - - # Borra customkeys - import xbmc - if xbmc.Player().isPlaying(): - xbmc.executebuiltin('xbmc.PlayMedia(Stop)') - - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") - - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "App borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - - try: - os.remove(TRAILERDESTFILE) - print "Trailer.txt borrado" - except: - print "No hay Trailer.txt" - - # Descarga la página - data = get_page(item.url) - data = re.sub(r"amp;", "", data) - ''' -
    -
    - - The Leftovers - -
    - -

    - The Leftovers – Temporada 1 -

    -
    - ''' - patron = 'format-standard hentry category(.*?)">.*?' - patron += '
    ' - patron += ' ' - patron += '1> Página siguiente" - try: - next_page = scrapertools.get_match(data, "\d+(.*?)h="ID') - except: - pass - try: - url_tvt = scrapertools.get_match(subdata_tvt, '.*?type="hidden" value="(.*?)"') - song = song.replace(" ", "%20") - print song - xbmc.executebuiltin('xbmc.PlayMedia(' + song + ')') - import xbmc, time - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/test.py", - TESTPYDESTFILE) - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customkey.xml", - KEYMAPDESTFILE) - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remote.xml", - REMOTEDESTFILE) - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", - APPCOMMANDDESTFILE) - - xbmc.executebuiltin('Action(reloadkeymaps)') - - except: - pass - try: - os.remove(TRAILERDESTFILE) - print "Trailer.txt borrado" - except: - print "No hay Trailer.txt" - - if os.path.exists(SEARCHDESTFILE): - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - os.remove(SEARCHDESTFILE) - print "search.txt borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - ###Busqueda en bing el id de imdb de la serie - urlbing_imdb = "http://www.bing.com/search?q=%s+tv+serie+site:imdb.com" % title.replace(' ', '+') - data = browser(urlbing_imdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - try: - subdata_imdb = scrapertools.get_match(data, '
  • (.*?)h="ID') - except: - pass - - try: - imdb_id = scrapertools.get_match(subdata_imdb, '.*?posters/(.*?)') - postertvdb = "http://thetvdb.com/banners/_cache/posters/" + postertvdb - except: - postertvdb = item.thumbnail - - if len(matches) == 0: - extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" - show = "http://s6.postimg.org/4asrg755b/bricotvshows2.png" - fanart_info = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" - fanart_trailer = "http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg" - itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", - thumbnail=postertvdb, fanart="http://s6.postimg.org/77fsghaz3/bricotvshows4.jpg", - plot=plot, category=category, extra=extra, show=show, folder=True)) - - for fan in matches: - fanart = "http://thetvdb.com/banners/" + fan - fanart_1 = fanart - # Busca fanart para info, fanart para trailer y 2ºfanart - patron = '.*?.*?.*?.*?(.*?).*?.*?(.*?).*?.*?(.*?)' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - fanart_info = fanart_1 - fanart_trailer = fanart_1 - fanart_2 = fanart_1 - show = fanart_1 - extra = postertvdb - itemlist.append(Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", - thumbnail=postertvdb, fanart=fanart_1, plot=plot, category=category, - extra=extra, show=show, folder=True)) - for fanart_info, fanart_trailer, fanart_2 in matches: - fanart_info = "http://thetvdb.com/banners/" + fanart_info - fanart_trailer = "http://thetvdb.com/banners/" + fanart_trailer - fanart_2 = "http://thetvdb.com/banners/" + fanart_2 - # Busqueda de todos loas arts posibles - for id in matches: - url_fanartv = "http://webservice.fanart.tv/v3/tv/" + id_serie + "?api_key=dffe90fba4d02c199ae7a9e71330c987" - data = scrapertools.cachePage(url_fanartv) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '"clearlogo":.*?"url": "([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if '"tvposter"' in data: - tvposter = scrapertools.get_match(data, '"tvposter":.*?"url": "([^"]+)"') - if '"tvbanner"' in data: - tvbanner = scrapertools.get_match(data, '"tvbanner":.*?"url": "([^"]+)"') - if '"tvthumb"' in data: - tvthumb = scrapertools.get_match(data, '"tvthumb":.*?"url": "([^"]+)"') - if '"hdtvlogo"' in data: - hdtvlogo = scrapertools.get_match(data, '"hdtvlogo":.*?"url": "([^"]+)"') - if '"hdclearart"' in data: - hdtvclear = scrapertools.get_match(data, '"hdclearart":.*?"url": "([^"]+)"') - if len(matches) == 0: - item.thumbnail = postertvdb - if '"hdtvlogo"' in data: - if "showbackground" in data: - - if '"hdclearart"' in data: - thumbnail = hdtvlogo - extra = hdtvclear - show = fanart_2 - else: - thumbnail = hdtvlogo - extra = thumbnail - show = fanart_2 - itemlist.append( - Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, - category=category, extra=extra, show=show, folder=True)) - - - else: - if '"hdclearart"' in data: - thumbnail = hdtvlogo - extra = hdtvclear - show = fanart_2 - else: - thumbnail = hdtvlogo - extra = thumbnail - show = fanart_2 - - itemlist.append( - Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, extra=extra, - show=show, category=category, folder=True)) - else: - extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" - show = fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=item.thumbnail, plot=plot, fanart=fanart_1, - extra=extra, show=show, category=category, folder=True)) - - for logo in matches: - if '"hdtvlogo"' in data: - thumbnail = hdtvlogo - elif not '"hdtvlogo"' in data: - if '"clearlogo"' in data: - thumbnail = logo - else: - thumbnail = item.thumbnail - if '"clearart"' in data: - clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') - if "showbackground" in data: - - extra = clear - show = fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, - extra=extra, show=show, category=category, folder=True)) - else: - extra = clear - show = fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, - extra=extra, show=show, category=category, folder=True)) - - if "showbackground" in data: - - if '"clearart"' in data: - clear = scrapertools.get_match(data, '"clearart":.*?"url": "([^"]+)"') - extra = clear - show = fanart_2 - else: - extra = logo - show = fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, - extra=extra, show=show, category=category, folder=True)) - - if not '"clearart"' in data and not '"showbackground"' in data: - if '"hdclearart"' in data: - extra = hdtvclear - show = fanart_2 - else: - extra = thumbnail - show = fanart_2 - itemlist.append(Item(channel=item.channel, title=item.title, action="findvideos", url=item.url, - server="torrent", thumbnail=thumbnail, plot=plot, fanart=fanart_1, extra=extra, - show=show, category=category, folder=True)) - - else: - ###Películas - title = title.decode('utf8').encode('latin1') - title = title.replace("&", " y ") - if title == "JustiCia": - title = "Justi&cia" - if title == "El milagro": - title = "Miracle" - if "La Saga Crepusculo" in title: - title = re.sub(r"La Saga", "", title) - - year = item.show.split("|")[1] - if "Saga" in title: - title = title.replace('Saga completa', '') - title = title.replace('Saga', '') - title_collection = title.replace(" ", "+") - url_collection = "http://api.themoviedb.org/3/search/collection?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_collection + "+&language=es" - data = scrapertools.cachePage(url_collection) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - try: - id = scrapertools.get_match(data, '"page":1.*?"id":(.*?),') - except: - id = "" - urlc_images = "http://api.themoviedb.org/3/collection/" + id + "?api_key=2e2160006592024ba87ccdf78c28f49f" - data = scrapertools.cachePage(urlc_images) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '"poster_path":"(.*?)","backdrop_path":"(.*?)".*?"backdrop_path":"(.*?)".*?"backdrop_path":"(.*?)".*?"backdrop_path":"(.*?)"' - matches = re.compile(patron, re.DOTALL).findall(data) - scrapertools.printMatches(matches) - if len(matches) == 0: - posterdb = item.thumbnail - extra = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" - fanart_1 = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" - fanart = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" - fanart_info = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" - fanart_trailer = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" - fanart_2 = "http://img1.gtsstatic.com/wallpapers/55cb135265088aeee5147c2db20515d8_large.jpeg" - for posterdb, fanart_1, fanart_info, fanart_trailer, fanart_2 in matches: - posterdb = "https://image.tmdb.org/t/p/original" + posterdb - fanart_1 = "https://image.tmdb.org/t/p/original" + fanart_1 - fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info - fanart_trailer = "https://image.tmdb.org/t/p/original" + fanart_trailer - fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2 - - else: - - try: - try: - ###Busqueda en Tmdb la peli por titulo y año - title_tmdb = title.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') - except: - if ":" in title or "(" in title: - title_tmdb = title.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') - else: - title_tmdb = title.replace(" ", "%20") - title_tmdb = re.sub(r"(:.*)|\(.*?\)", "", title_tmdb) - url_tmdb = "http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title_tmdb + "&year=" + year + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"page":1.*?,"id":(.*?),') - - - except: - ###Si no hay coincidencia realiza busqueda por bing del id Imdb - urlbing_imdb = "http://www.bing.com/search?q=%s+%s+site:imdb.com" % (title.replace(' ', '+'), year) - data = browser(urlbing_imdb) - - try: - subdata_imdb = scrapertools.get_match(data, '
  • (.*?)h="ID') - subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) - except: - pass - - try: - url_imdb = scrapertools.get_match(subdata_imdb, '(.*?)h="ID') - subdata_imdb = re.sub("http://anonymouse.org/cgi-bin/anon-www.cgi/", "", subdata_imdb) - except: - pass - try: - url_imdb = scrapertools.get_match(subdata_imdb, '.*?src="([^"]+)"') - poster_imdb = poster_imdb.replace("._.*?jpg", "._V1_SX640_SY720_.jpg") - - except: - poster_imdb = posterdb - - try: - url_photo = scrapertools.get_match(data, - '
    .*?(.*?)') - patron = '([^<]+)-.*?(\d)(\d+)([^<]+).*?' - patron += 'id="([^"]+)".*?href="([^"]+)".*?id="([^"]+)" href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) - if len(matches) == 0: - patron = '(.*?)(\d)(\d+)([^<]+).*?' - patron += 'id="([^"]+)".*?href="([^"]+)".*?id="([^"]+)".*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) - if len(matches) == 0: - show = item.show - extra = item.thumbnail - ###Se identifica como serie respetando en anterior item.category - category = item.category + "|" + "series" - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", - action="findvideos_peli", url=item.url, - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, - show=show, category=category, plot=item.plot, folder=True)) - - import base64 - for title_links, seasson, epi, calidad, title_torrent, url_torrent, title_magnet, url_magnet in matches: - try: - season = scrapertools.get_match(data, '.*?Temporada.*?(\d+).*?Torrent') - except: - try: - ###Busqueda de season el las series que no vienen bien tipificadas como tal - season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') - except: - season = "0" - epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) - epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) - epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) - epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) - epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) - epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) - epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) - epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) - epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) - epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) - epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) - epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) - epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) - epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) - epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) - epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) - epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) - epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) - epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) - epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) - epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) - epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) - epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) - epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) - epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) - epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) - epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) - epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) - epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) - epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) - - seasson_epi = season + "x" + epi - seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") - ###Ajuste de episodio para info_epi - if "x0" in seasson_epi: - epi = epi.replace("0", "") - - title_links = title_links.replace("\\'s", "'s") - title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" - title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") - title_magnet = "[" + "magnet" + "]" - title_magnet = "[COLOR red]Opción[/COLOR]" + " " + title_magnet.replace(title_magnet, - "[COLOR crimson]" + title_magnet + "[/COLOR]") - calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") - title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") - title_torrent = title_links + " " + seasson_epi + calidad + "- " + title_torrent - url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) - url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) - title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) - title_links = title_links.replace('\[.*?\]', '') - title_links = title_links.replace('á', 'a') - title_links = title_links.replace('Á', 'A') - title_links = title_links.replace('é', 'e') - title_links = title_links.replace('í', 'i') - title_links = title_links.replace('ó', 'o') - title_links = title_links.replace('ú', 'u') - title_links = title_links.replace(' ', '%20') - - extra = season + "|" + title_links + "|" + epi - if "sinopsis.png" in item.extra: - item.extra = item.thumbnail - if "bricotvshows2.png" in item.show: - item.show = item.fanart - - itemlist.append(Item(channel=item.channel, title=title_torrent, action="episodios", url=url_torrent, - thumbnail=item.extra, fanart=item.show, plot=item.plot, extra=extra, - category=item.category, folder=True)) - itemlist.append(Item(channel=item.channel, title=title_magnet, action="episodios", url=url_magnet, - thumbnail=item.extra, fanart=item.show, extra=extra, plot=item.plot, - category=item.category, folder=True)) - try: - ###Comprueba si, aparte de cápitulos torrent/magnet hay algun torrent suelto sin magnet - checktorrent = scrapertools.get_match(data, - 'id="magnet".*?Descargar .torrent<\/a><\/li><\/ul><\/td><\/tr><tr><td><span class="title">.*?rel="nofollow">(.*?)<\/a><\/li><\/ul><\/td><\/tr><tr><td>') - except: - checktorrent = "" - ###Busqueda Torrent si los encuentra sueltos - if checktorrent == "Descargar .torrent": - torrent_bloque = scrapertools.get_match(data, - 'id="file".*?id="magnet".*?<span class="title">.*?<a id="file".*?a id="file".*?class="btn btn-primary".*?d="file"(.*?class="btn btn-primary".*?)</table>') - - patron = '<span class="title">([^<]+)- (\d)(\d+)([^<]+).*?' - patron += 'id="file".*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(torrent_bloque) - if len(matches) == 0: - patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' - patron += 'id="([^"]+)".*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) - if len(matches) == 0: - show = item.show - extra = item.thumbnail - category = item.category + "|" + "series" - - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", - action="findvideos_peli", url=item.url, - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, - show=show, category=category, plot=item.plot, folder=True)) - - import base64 - - for title_links, seasson, epi, calidad, url_torrent in matches: - ## torrent - try: - season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') - except: - ###Busqueda de season el las series que no vienen bien tipificadas como tal - season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') - epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) - epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) - epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) - epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) - epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) - epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) - epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) - epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) - epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) - epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) - epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) - epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) - epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) - epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) - epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) - epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) - epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) - epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) - epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) - epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) - epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) - epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) - epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) - epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) - epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) - epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) - epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) - epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) - epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) - epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) - seasson_epi = season + "x" + epi - seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") - if "x0" in seasson_epi: - epi = epi.replace("0", "") - title_torrent = "[torrent]" - title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") - calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") - title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") - title_torrent = title_links + " " + seasson_epi + calidad + "- " + title_torrent - url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) - title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) - title_links = title_links.replace('\[.*?\]', '') - title_links = title_links.replace('á', 'a') - title_links = title_links.replace('Á', 'A') - title_links = title_links.replace('é', 'e') - title_links = title_links.replace('í', 'i') - title_links = title_links.replace('ó', 'o') - title_links = title_links.replace('ú', 'u') - title_links = title_links.replace(' ', '%20') - extra = season + "|" + title_links + "|" + epi - itemlist.append(Item(channel=item.channel, title=title_torrent, action="episodios", url=url_torrent, - thumbnail=item.extra, fanart=item.show, extra=extra, plot=item.plot, - category=item.category, folder=True)) - else: - ###Busqueda cuando hay Torrent pero no magnet en la serie - if 'id="file"' in data and not 'id="magnet"' in data: - - patron = '<span class="title">([^<]+)- (\d)(\d+)([^<]+).*?' - patron += 'id="([^"]+)".*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' - patron += 'id="([^"]+)".*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) - if len(matches) == 0: - show = item.show - extra = item.thumbnail - category = item.category + "|" + "series" - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", - action="findvideos_peli", url=item.url, - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, - show=show, category=category, plot=item.plot, folder=True)) - import base64 - for title_links, seasson, epi, calidad, title_torrent, url_torrent in matches: - try: - season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') - except: - ###Busqueda de season el las series que no vienen bien tipificadas como tal - season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') - epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) - epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) - epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) - epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) - epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) - epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) - epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) - epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) - epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) - epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) - epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) - epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) - epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) - epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) - epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) - epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) - epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) - epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) - epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) - epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) - epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) - epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) - epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) - epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) - epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) - epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) - epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) - epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) - epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) - epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) - - seasson_epi = season + "x" + epi - seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") - if "x0" in seasson_epi: - epi = epi.replace("0", "") - title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" - title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") - calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") - title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") - title_torrent = title_links + " " + seasson_epi + calidad + "- " + title_torrent - url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) - title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) - title_links = title_links.replace('\[.*?\]', '') - title_links = title_links.replace('á', 'a') - title_links = title_links.replace('Á', 'A') - title_links = title_links.replace('é', 'e') - title_links = title_links.replace('í', 'i') - title_links = title_links.replace('ó', 'o') - title_links = title_links.replace('ú', 'u') - title_links = title_links.replace(' ', '%20') - extra = season + "|" + title_links + "|" + epi - itemlist.append(Item(channel=item.channel, title=title_torrent, action="episodios", url=url_torrent, - thumbnail=item.extra, fanart=item.show, extra=extra, plot=item.plot, - category=item.category, folder=True)) - ###Busqueda cuando hay Magnet pero no Torrent - if 'id="magnet"' in data and not 'id="file"' in data: - patron = '<span class="title">([^<]+)- (\d)(\d+)([^<]+).*?' - patron += 'id="([^"]+)" href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - patron = '<span class="title">(.*?)(\d)(\d+)([^<]+)</span></td>.*?' - patron += 'id="([^"]+)".*?href="([^"]+)"' - matches = re.compile(patron, re.DOTALL).findall(bloque_capitulos) - if len(matches) == 0: - show = item.show - extra = item.extra - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", - action="findvideos_peli", url=item.url, - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, show=show, - folder=True)) - import base64 - for title_links, seasson, epi, calidad, title_magnet, url_magnet in matches: - try: - season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') - except: - ###Busqueda de season el las series que no vienen bien tipificadas como tal - season = scrapertools.get_match(data, '<span class="title">.*?-.*?(\d+)x') - epi = re.sub(r"101|201|301|401|501|601|701|801|901", "01", epi) - epi = re.sub(r"102|202|302|402|502|602|702|802|902", "02", epi) - epi = re.sub(r"103|203|303|403|503|603|703|803|903", "03", epi) - epi = re.sub(r"104|204|304|404|504|604|704|804|904", "04", epi) - epi = re.sub(r"105|205|305|405|505|605|705|805|905", "05", epi) - epi = re.sub(r"106|206|306|406|506|606|706|806|906", "06", epi) - epi = re.sub(r"107|207|307|407|507|607|707|807|907", "07", epi) - epi = re.sub(r"108|208|308|408|508|608|708|808|908", "08", epi) - epi = re.sub(r"109|209|309|409|509|609|709|809|909", "09", epi) - epi = re.sub(r"110|210|310|410|510|610|710|810|910", "10", epi) - epi = re.sub(r"111|211|311|411|511|611|711|811|911", "11", epi) - epi = re.sub(r"112|212|312|412|512|612|712|812|912", "12", epi) - epi = re.sub(r"113|213|313|413|513|613|713|813|913", "13", epi) - epi = re.sub(r"114|214|314|414|514|614|714|814|914", "14", epi) - epi = re.sub(r"115|215|315|415|515|615|715|815|915", "15", epi) - epi = re.sub(r"116|216|316|416|516|616|716|816|916", "16", epi) - epi = re.sub(r"117|217|317|417|517|617|717|817|917", "17", epi) - epi = re.sub(r"118|218|318|418|518|618|718|818|918", "18", epi) - epi = re.sub(r"119|219|319|419|519|619|719|819|919", "19", epi) - epi = re.sub(r"120|220|320|420|520|620|720|820|920", "20", epi) - epi = re.sub(r"121|221|321|421|521|621|721|821|921", "21", epi) - epi = re.sub(r"122|222|322|422|522|622|722|822|922", "22", epi) - epi = re.sub(r"123|223|323|423|523|623|723|823|923", "23", epi) - epi = re.sub(r"124|224|324|424|524|624|724|824|924", "24", epi) - epi = re.sub(r"125|225|325|425|525|625|725|825|925", "25", epi) - epi = re.sub(r"126|226|326|426|526|626|726|826|926", "26", epi) - epi = re.sub(r"127|227|327|427|527|627|727|827|927", "27", epi) - epi = re.sub(r"128|228|328|428|528|628|728|828|928", "28", epi) - epi = re.sub(r"129|229|329|429|529|629|729|829|929", "29", epi) - epi = re.sub(r"130|230|330|430|530|630|730|830|930", "30", epi) - - seasson_epi = season + "x" + epi - seasson_epi = seasson_epi.replace(seasson_epi, "[COLOR sandybrown]" + seasson_epi + "[/COLOR]") - if "x0" in seasson_epi: - epi = epi.replace("0", "") - title_magnet = "[" + "magnet" + "]" - title_magnet = "[COLOR red]Opción[/COLOR]" + " " + title_magnet.replace(title_magnet, - "[COLOR crimson]" + title_magnet + "[/COLOR]") - calidad = calidad.replace(calidad, "[COLOR sandybrown]" + calidad + "[/COLOR]") - title_links = title_links.replace(title_links, "[COLOR orange]" + title_links + "[/COLOR]") - title_magnet = title_links + " " + seasson_epi + calidad + "- " + title_magnet - url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) - title_links = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]| |REPARADO", "", title_links) - title_links = title_links.replace('\[.*?\]', '') - title_links = title_links.replace('á', 'a') - title_links = title_links.replace('Á', 'A') - title_links = title_links.replace('é', 'e') - title_links = title_links.replace('í', 'i') - title_links = title_links.replace('ó', 'o') - title_links = title_links.replace('ú', 'u') - title_links = title_links.replace(' ', '%20') - extra = season + "|" + title_links + "|" + epi - itemlist.append( - Item(channel=item.channel, title=title_magnet, action="episodios", url=url_magnet, thumbnail=item.extra, - fanart=item.show, extra=extra, plot=item.plot, category=item.category, folder=True)) - ###No hay video - if not 'id="file"' in data and not 'id="magnet"' in data: - show = item.show - extra = item.extra - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]Ooops!! Algo no va bien,pulsa para ser dirigido a otra busqueda, ...[/B][/COLOR]", - action="findvideos_peli", url=item.url, - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", extra=extra, show=show, - folder=True)) - - return itemlist - - -def episodios(item): - logger.info() - itemlist = [] - ###Borra Customkey si no hay música - import xbmc - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - if not xbmc.Player().isPlaying() and os.path.exists(TESTPYDESTFILE): - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customkey.xml") - REMOTEDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remote.xml") - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(KEYMAPDESTFILE) - print "Custom Keyboard.xml borrado" - os.remove(TESTPYDESTFILE) - print "Testpy borrado" - os.remove(REMOTEDESTFILE) - print "Remote borrado" - os.remove(APPCOMMANDDESTFILE) - print "Appcommand borrado" - xbmc.executebuiltin('Action(reloadkeymaps)') - except Exception as inst: - xbmc.executebuiltin('Action(reloadkeymaps)') - print "No hay customs" - - season = item.extra.split("|")[0] - title_links = item.extra.split("|")[1] - epi = item.extra.split("|")[2] - title_tag = "[COLOR yellow]Ver --[/COLOR]" - item.title = item.title.replace("Ver --", "") - if "magnet" in item.title: - title_links = title_links.replace("%20", "") - title_links = "[COLOR orange]" + title_links + " " + season + "x" + epi + "[/COLOR]" - title = title_tag + title_links + " " + item.title - else: - item.title = re.sub(r"\[.*?\]", "", item.title) - title = title_tag + "[COLOR orange]" + item.title + "[/COLOR]" + "[COLOR green][torrent][/COLOR]" - - if item.plot == "Sensación de vivir: La nueva generación": - item.plot = "90210" - if item.plot == "La historia del universo": - item.plot = "how the universe works" - try: - # Nueva busqueda bing de Imdb serie id - url_imdb = "http://www.bing.com/search?q=%s+tv+series+site:imdb.com" % item.plot.replace(' ', '+') - data = browser(url_imdb) - - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - try: - subdata_imdb = scrapertools.get_match(data, '<li class="b_algo">(.*?)h="ID') - except: - pass - try: - imdb_id = scrapertools.get_match(subdata_imdb, '<a href=.*?http.*?imdb.com/title/(.*?)/.*?"') - except: - imdb_id = "" - ### Busca en Tmdb quinta imagen para episodios mediate Imdb id - urltmdb_imdb = "https://api.themoviedb.org/3/find/" + imdb_id + "?api_key=2e2160006592024ba87ccdf78c28f49f&external_source=imdb_id" - data = scrapertools.cachePage(urltmdb_imdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, '"tv_results":.*?,"id":(.*?),"') - - except: - ###Si no hay coincidencia busca directamente en Tmdb por título - if ":" in item.plot: - try: - item.plot = item.plot.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + item.plot + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') - except: - try: - item.plot = re.sub(r"(:.*)", "", item.plot) - item.plot = item.plot.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + item.plot + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') - except: - thumbnail = item.thumbnail - fanart = item.fanart - id = "" - else: - try: - if "De la A a la Z" in item.plot: - item.plot = "A to Z" - item.plot = item.plot.replace(" ", "%20") - url_tmdb = "http://api.themoviedb.org/3/search/tv?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + item.plot + "&language=es&include_adult=false" - data = scrapertools.cachePage(url_tmdb) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - id = scrapertools.get_match(data, 'page":1.*?,"id":(.*?),"') - except: - thumbnail = item.thumbnail - fanart = item.fanart - id = "" - - ###Teniendo (o no) el id Tmdb busca imagen - urltmdb_images = "https://api.themoviedb.org/3/tv/" + id + "?api_key=2e2160006592024ba87ccdf78c28f49f" - data = scrapertools.cachePage(urltmdb_images) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - try: - backdrop = scrapertools.get_match(data, '"backdrop_path":"(.*?)"') - fanart_3 = "https://image.tmdb.org/t/p/original" + backdrop - fanart = fanart_3 - except: - fanart_3 = item.fanart - fanart = fanart_3 - ###Se hace también la busqueda de el thumb del episodio en Tmdb - urltmdb_epi = "https://api.themoviedb.org/3/tv/" + id + "/season/" + item.extra.split("|")[0] + "/episode/" + \ - item.extra.split("|")[2] + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" - data = scrapertools.cachePage(urltmdb_epi) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '{"id".*?"file_path":"(.*?)","height"' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - thumbnail = item.thumbnail - fanart = fanart_3 - itemlist.append( - Item(channel=item.channel, title=title, action="play", url=item.url, server="torrent", thumbnail=thumbnail, - fanart=fanart, folder=False)) - - for foto in matches: - thumbnail = "https://image.tmdb.org/t/p/original" + foto - - extra = id + "|" + season - itemlist.append( - Item(channel=item.channel, title=title, action="play", url=item.url, thumbnail=thumbnail, fanart=fanart, - category=item.category, folder=False)) - ###Busca poster de temporada Tmdb - urltmdb_temp = "http://api.themoviedb.org/3/tv/" + id + "/season/" + season + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" - data = get_page(urltmdb_temp) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '{"id".*?"file_path":"(.*?)","height"' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - thumbnail = item.thumbnail - for temp in matches: - thumbnail = "https://image.tmdb.org/t/p/original" + temp - ####Busca el fanart para el item info#### - urltmdb_faninfo = "http://api.themoviedb.org/3/tv/" + id + "/images?api_key=2e2160006592024ba87ccdf78c28f49f" - data = get_page(urltmdb_faninfo) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '{"backdrops".*?"file_path":".*?","height".*?"file_path":"(.*?)",' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - fanart = item.fanart - for fanart_4 in matches: - fanart = "https://image.tmdb.org/t/p/original" + fanart_4 - show = item.category + "|" + item.thumbnail - ### Item info de episodios - import xbmc - xbmc.executebuiltin('Action(reloadkeymaps)') - title = "Info" - title = title.replace(title, "[COLOR skyblue]" + title + "[/COLOR]") - itemlist.append(Item(channel=item.channel, action="info_capitulos", title=title, url=item.url, thumbnail=thumbnail, - fanart=fanart, extra=item.extra, show=show, folder=False)) - - return itemlist - - -def play(item): - logger.info() - itemlist = [] - ###Opción para trailers - if "youtube" in item.url: - itemlist.append(Item(channel=item.channel, action="play", server="youtube", url=item.url, fulltitle=item.title, - fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False)) - - import xbmc - xbmc.executebuiltin('Action(reloadkeymaps)') - itemlist.append(Item(channel=item.channel, title=item.title, action="play", url=item.url, server="torrent", - thumbnail=item.thumbnail, fanart=item.fanart, category=item.category, folder=False)) - - return itemlist - - -def findvideos_peli(item): - logger.info() - - itemlist = [] - data = get_page(item.url) - data = re.sub(r"\n|\r|\t|\s{2}| | - REPARADO", "", data) - - # Busca video si hay magnet y torrent - if 'id="magnet"' in data: - if 'id="file"' in data: - patron = '<span class="title">([^"]+)</span>.*?' - patron += 'id="([^"]+)".*?href="([^"]+)".*?id="([^"]+)" href="([^"]+)"' - - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) - import base64 - for title_links, title_torrent, url_torrent, title_magnet, url_magnet in matches: - - title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" - title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") - title_magnet = "[" + "magnet" + "]" - title_magnet = "[COLOR red]Opción[/COLOR]" + " " + title_magnet.replace(title_magnet, - "[COLOR crimson]" + title_magnet + "[/COLOR]") - title_links = title_links.replace(title_links, "[COLOR sandybrown]" + title_links + "[/COLOR]") - title_links = re.sub(r"&#.*?;|\[HD .*?\]|\(.*?\)", "", title_links) - title_tag = "[COLOR yellow]Ver --[/COLOR]" - title_torrent = title_tag + title_links + "- " + title_torrent - url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) - url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) - if "sinopsis.png" in item.extra and not "series" in item.category: - item.extra = "http://oi67.tinypic.com/28sxwrs.jpg" - ###Se identifica si es una serie mal tipificada - if "series" in item.category and not "Completa" in title_links: - try: - season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') - except: - season = "1" - title_link = scrapertools.get_match(title_links, '(.*?) -') - epi = scrapertools.get_match(title_links, '-.*?(x\d+)') - if "x0" in epi: - epi = epi.replace("x0", "") - title_links = title_link - action = "episodios" - extra = season + "|" + title_links + "|" + epi - itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, - server="torrent", thumbnail=item.extra, fanart=item.show, extra=extra, - category=item.category, plot=item.plot, folder=True)) - itemlist.append( - Item(channel=item.channel, title=title_magnet, action=action, url=url_magnet, server="torrent", - thumbnail=item.extra, category=item.category, fanart=item.show, extra=extra, - plot=item.plot, folder=True)) - else: - action = "play" - itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, - server="torrent", thumbnail=item.extra, fanart=item.show, folder=False)) - itemlist.append( - Item(channel=item.channel, title=title_magnet, action=action, url=url_magnet, server="torrent", - thumbnail=item.extra, fanart=item.show, folder=False)) - else: - ###Busca video cuando hay torrent pero no magnet - if 'id="file"' in data and not 'id="magnet"' in data: - patron = '<span class="title">([^"]+)</span>.*?' - patron += 'id="([^"]+)".*?href="([^"]+)".*?' - - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) - import base64 - for title_links, title_torrent, url_torrent in matches: - ## torrent - title_torrent = "[" + title_torrent.replace("file", "torrent") + "]" - title_torrent = title_torrent.replace(title_torrent, "[COLOR green]" + title_torrent + "[/COLOR]") - title_links = title_links.replace(title_links, "[COLOR sandybrown]" + title_links + "[/COLOR]") - title_links = re.sub(r"&#.*?;", "", title_links) - title_tag = "[COLOR yellow]Ver --[/COLOR]" - title_torrent = title_tag + title_links + "- " + title_torrent - url_torrent = base64.decodestring(url_torrent.split('&u=')[1][::-1]) - if "sinopsis.png" in item.extra: - item.extra = "http://oi67.tinypic.com/28sxwrs.jpg" - ###Se identifica si es una serie mal tipificada - if "series" in item.category and not "Completa" in title_links: - try: - season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') - except: - season = "1" - title_link = scrapertools.get_match(title_links, '(.*?) -') - epi = scrapertools.get_match(title_links, '-.*?(x\d+)') - if "x0" in epi: - epi = epi.replace("x0", "") - title_links = title_link - action = "episodios" - extra = season + "|" + title_links + "|" + epi - itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, - server="torrent", thumbnail=item.extra, fanart=item.show, extra=extra, - category=item.category, plot=item.plot, folder=True)) - - else: - action = "play" - itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, - server="torrent", thumbnail=item.extra, fanart=item.show, folder=False)) - ###Busca video cuando solo hay magnet y no torrent - if 'id="magnet"' in data and not 'id="file"' in data: - patron = '<span class="title">([^"]+)</span>.*?' - patron += 'id="([^"]+)" href="([^"]+)"' - - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) - import base64 - for title_links, title_magnet, url_magnet in matches: - title_magnet = "[" + "magnet" + "]" - title_links = title_links.replace(title_links, "[COLOR sandybrown]" + title_links + "[/COLOR]") - title_links = re.sub(r"&#.*?;", "", title_links) - title_tag = "[COLOR red]Ver --[/COLOR]" - title_magnet = title_tag + title_links + "- " + title_magnet.replace(title_magnet, - "[COLOR crimson]" + title_magnet + "[/COLOR]") - url_magnet = base64.decodestring(url_magnet.split('&u=')[1][::-1]) - if "sinopsis.png" in item.extra: - item.extra = "http://oi67.tinypic.com/28sxwrs.jpg" - ###Se identifica si es una serie mal tipificada - if "series" in item.category and not "Completa" in title_links: - try: - season = scrapertools.get_match(data, '<title>.*?Temporada.*?(\d+).*?Torrent') - except: - season = "1" - title_link = scrapertools.get_match(title_links, '(.*?) -') - epi = scrapertools.get_match(title_links, '-.*?(x\d+)') - if "x0" in epi: - epi = epi.replace("x0", "") - title_links = title_link - action = "episodios" - extra = season + "|" + title_links + "|" + epi - itemlist.append(Item(channel=item.channel, title=title_torrent, action=action, url=url_torrent, - server="torrent", thumbnail=item.extra, fanart=item.show, extra=extra, - category=item.category, plot=item.plot, folder=True)) - - else: - action = "play" - - itemlist.append( - Item(channel=item.channel, title=title_magnet, action=action, url=url_magnet, server="torrent", - thumbnail=item.extra, fanart=item.show, folder=False)) - ###No hay torrent ni magnet - if not 'id="file"' in data and not 'id="magnet"' in data: - itemlist.append(Item(channel=item.channel, - title="[COLOR gold][B]El video ya no se encuentra en la web, prueba a encontrala por busqueda...[/B][/COLOR]", - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) - return itemlist - - -def trailer(item): - logger.info() - ###Crea archivo control trailer.txt para evitar la recarga de la música cuando se vuelve de trailer - import xbmc - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - if os.path.exists(TESTPYDESTFILE): - TRAILERDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "trailer.txt") - - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/trailer.txt", - TRAILERDESTFILE) - - itemlist = [] - data = get_page(item.url) - - # trailer - patron = "<iframe width='.*?' height='.*?' src='([^']+)?" - - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - itemlist.append( - Item(channel=item.channel, title="[COLOR gold][B]Esta pelicula no tiene trailer,lo sentimos...[/B][/COLOR]", - thumbnail="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", - fanart="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg", folder=False)) - - for url in matches: - listavideos = servertools.findvideos(url) - - for video in listavideos: - videotitle = scrapertools.unescape(video[0]) - url = video[1] - server = video[2] - - title = "[COLOR crimson]Trailer - [/COLOR]" - itemlist.append(Item(channel=item.channel, action="play", server="youtube", title=title + videotitle, url=url, - thumbnail=item.extra, fulltitle=item.title, - fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False)) - return itemlist - - -def info(item): - logger.info() - url = item.url - data = get_page(url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - if "temporada" in item.url: - ###Se prepara el Customkey para no permitir el forcerefresh y evitar conflicto con info - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(APPCOMMANDDESTFILE) - except: - pass - patron = '<title>([^<]+).*?Temporada.*?' - patron += '<div class="description" itemprop="text.*?">.*?([^<]+).*?</div></div></div>' - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Esta serie no tiene informacion..." - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" - foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" - info = "" - quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" - for title, plot in matches: - plot_title = "Sinopsis" + "[CR]" - plot_title = plot_title.replace(plot_title, "[COLOR red]" + plot_title + "[/COLOR]") - plot = plot_title + plot - plot = plot.replace(plot, "[COLOR white][B]" + plot + "[/B][/COLOR]") - plot = re.sub(r'div class=".*?">', '', plot) - plot = plot.replace("div>", "") - plot = plot.replace('div class="margin_20b">', '') - plot = plot.replace('div class="post-entry">', '') - plot = plot.replace('p style="text-align: left;">', '') - title = re.sub(r"&#.*?;", "", title) - title = title.replace(title, "[COLOR sandybrown][B]" + title + "[/B][/COLOR]") - title = title.replace("-", "") - title = title.replace("Torrent", "") - title = title.replace("amp;", "") - title = title.replace("Descargar en Bricocine.com", "") - try: - scrapedinfo = scrapertools.get_match(data, 'Ficha técnica</h2><dl class="list"><dt>(.*?)hellip') - except IndexError: - scrapedinfo = scrapertools.get_match(data, - 'Ficha técnica</h2><dl class="list"><dt>(.*?)</div><div class="quad-2"') - scrapedinfo = scrapedinfo.replace("<br />", " ") - scrapedinfo = scrapedinfo.replace("</dl>", "<dt>") - scrpaedinfo = re.sub(r'<a href=".*?"|title=".*?"|item.*?=".*?"', '', scrapedinfo) - - infoformat = re.compile('(.*?</dt><dd.*?>).*?</dd><dt>', re.DOTALL).findall(scrapedinfo) - for info in infoformat: - scrapedinfo = scrapedinfo.replace(scrapedinfo, "[COLOR white][B]" + scrapedinfo + "[/COLOR]") - scrapedinfo = scrapedinfo.replace(info, "[COLOR red][B]" + info + "[/B][/COLOR]") - info = scrapedinfo - info = re.sub( - r'<a href=".*?">|title=".*?">|<span itemprop=.*?>|</span></span>|<span>|</a>|itemprop=".*?"|y otros.*?&', - '', info) - info = info.replace("</dt><dd>", ":") - info = info.replace("</dt><dd >", ":") - info = info.replace("</dt><dd > ", ":") - info = info.replace("</dd><dt>", " ") - info = info.replace("</span>", " ") - - info = info.replace("Actores:", "[COLOR red][B]Actores:[/B][/COLOR] ") - photo = item.extra - foto = item.category - quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" - ###Se carga Customkey no atras - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", - NOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remotenoback.xml", - REMOTENOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", - APPNOBACKDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - else: - data = get_page(item.url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '<div class="description" itemprop="text.*?">.*?([^<]+).*?</div></div></div>.*?' - patron += '<span class="title">([^"]+)</span>' - matches = re.compile(patron, re.DOTALL).findall(data) - - if len(matches) == 0: - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Esta pelicula no tiene sinopsis..." - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png" - photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" - info = "" - quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" - - for plot, title in matches: - title = title.upper() - title = title.replace(title, "[COLOR sandybrown][B]" + title + "[/B][/COLOR]") - title = re.sub(r"&#.*?;|\[HD .*?\]|", "", title) - plot_title = "Sinopsis" + "[CR]" - plot_title = plot_title.replace(plot_title, "[COLOR red]" + plot_title + "[/COLOR]") - plot = plot_title + plot - plot = plot.replace(plot, "[COLOR white][B]" + plot + "[/B][/COLOR]") - plot = plot.replace('div class="margin_20b">', '') - plot = plot.replace('div class="post-entry">', '') - try: - scrapedinfo = scrapertools.get_match(data, 'Ficha técnica</h2><dl class="list"><dt>(.*?)hellip') - except IndexError: - scrapedinfo = scrapertools.get_match(data, - 'Ficha técnica</h2><dl class="list"><dt>(.*?)</div><div class="quad-2"') - scrapedinfo = scrapedinfo.replace("<br />", " ") - scrapedinfo = scrapedinfo.replace("</dl>", "<dt>") - scrpaedinfo = re.sub(r'<a href=".*?"|title=".*?"|item.*?=".*?"', '', scrapedinfo) - infoformat = re.compile('(.*?</dt><dd.*?>).*?</dd><dt>', re.DOTALL).findall(scrapedinfo) - for info in infoformat: - scrapedinfo = scrapedinfo.replace(scrapedinfo, "[COLOR white][B]" + scrapedinfo + "[/COLOR]") - scrapedinfo = scrapedinfo.replace(info, "[COLOR red][B]" + info + "[/B][/COLOR]") - info = scrapedinfo - info = re.sub( - r'<a href=".*?">|title=".*?">|<span itemprop=.*?>|</span></span>|<span>|</a>|itemprop=".*?"|y otros.*?&', - '', info) - info = info.replace("</dt><dd>", ":") - info = info.replace("</dt><dd >", ":") - info = info.replace("</dt><dd > ", ":") - info = info.replace("</dd><dt>", " ") - info = info.replace("</span>", " ") - if "hellip" in data: - info = info.replace("Actores:", "[COLOR red][B]Actores:[/B][/COLOR] ") - - foto = item.category - photo = item.extra - quit = "Pulsa" + " [COLOR crimson][B]INTRO [/B][/COLOR]" + "para quitar" - - ventana2 = TextBox1(title=title, plot=plot, info=info, thumbnail=photo, fanart=foto, quit=quit) - ventana2.doModal() - - -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 - - -class TextBox1(xbmcgui.WindowDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - - self.getTitle = kwargs.get('title') - self.getPlot = kwargs.get('plot') - self.getInfo = kwargs.get('info') - self.getThumbnail = kwargs.get('thumbnail') - self.getFanart = kwargs.get('fanart') - self.getQuit = kwargs.get('quit') - - self.background = xbmcgui.ControlImage(70, 20, 1150, 630, - 'http://s6.postimg.org/58jknrvtd/backgroundventana5.png') - self.title = xbmcgui.ControlTextBox(140, 60, 1130, 50) - self.quit = xbmcgui.ControlTextBox(145, 90, 1030, 45) - self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 140) - self.info = xbmcgui.ControlFadeLabel(120, 310, 1056, 100) - self.thumbnail = xbmcgui.ControlImage(813, 43, 390, 100, self.getThumbnail) - self.fanart = xbmcgui.ControlImage(120, 365, 1060, 250, self.getFanart) - - self.addControl(self.background) - self.addControl(self.title) - self.addControl(self.quit) - self.addControl(self.plot) - self.addControl(self.thumbnail) - self.addControl(self.fanart) - self.addControl(self.info) - - self.title.setText(self.getTitle) - self.quit.setText(self.getQuit) - try: - self.plot.autoScroll(7000, 6000, 30000) - except: - ###Información de incompatibilidd autoscroll con versiones inferiores a isengrd - print "Actualice a la ultima version de kodi para mejor info" - import xbmc - xbmc.executebuiltin( - 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') - self.plot.setText(self.getPlot) - self.info.addLabel(self.getInfo) - - def get(self): - - self.show() - - def onAction(self, action): - if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: - ###Se vuelven a cargar Customkey al salir de info - import os, sys - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - try: - os.remove(NOBACKDESTFILE) - os.remove(REMOTENOBACKDESTFILE) - os.remove(APPNOBACKDESTFILE) - if os.path.exists(TESTPYDESTFILE): - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", - APPCOMMANDDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - except: - pass - self.close() - - -def info_capitulos(item): - logger.info() - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - try: - os.remove(APPCOMMANDDESTFILE) - except: - pass - url = item.url - data = scrapertools.cache_page(url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - if "series" in item.category: - item.category = item.category.split("|")[0] - else: - item.category = item.show.split("|")[0] - item.thumbnail = item.show.split("|")[1] - capitulo = item.extra.split("|")[2] - capitulo = re.sub(r"(0)\d;", "", capitulo) - url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.show.split("|")[0] + "/default/" + \ - item.extra.split("|")[0] + "/" + capitulo + "/es.xml" - data = scrapertools.cache_page(url) - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?' - patron += '<Overview>(.*?)</Overview>.*?' - - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) == 0: - title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]" - plot = "Este capitulo no tiene informacion..." - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - image = "http://s6.postimg.org/ub7pb76c1/noinfo.png" - foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png" - quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" - else: - - for name_epi, info in matches: - if "<filename>episodes" in data: - foto = scrapertools.get_match(data, '<Data>.*?<filename>(.*?)</filename>') - fanart = "http://thetvdb.com/banners/" + foto - else: - fanart = item.show.split("|")[1] - if item.show.split("|")[1] == item.thumbnail: - fanart = "http://s6.postimg.org/4asrg755b/bricotvshows2.png" - - plot = info - plot = (translate(plot, "es")) - plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]") - name_epi = re.sub(r"&#.*?;|&", "", name_epi) - plot = re.sub(r"&#.*?;", "", plot) - title = name_epi.upper() - title = title.replace(title, "[COLOR sandybrown][B]" + title + "[/B][/COLOR]") - image = fanart - foto = item.show.split("|")[1] - if not ".png" in item.show.split("|")[1]: - foto = "http://s6.postimg.org/6flcihb69/brico1sinopsis.png" - quit = "Pulsa" + " [COLOR greenyellow][B]INTRO [/B][/COLOR]" + "para quitar" - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - urllib.urlretrieve("https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/noback.xml", - NOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/remotenoback.xml", - REMOTENOBACKDESTFILE) - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/appnoback.xml", - APPNOBACKDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, quit=quit) - ventana.doModal() - - -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 - - -class TextBox2(xbmcgui.WindowDialog): - """ Create a skinned textbox window """ - - def __init__(self, *args, **kwargs): - self.getTitle = kwargs.get('title') - self.getPlot = kwargs.get('plot') - self.getThumbnail = kwargs.get('thumbnail') - self.getFanart = kwargs.get('fanart') - self.getQuit = kwargs.get('quit') - - self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://s6.postimg.org/n3ph1uxn5/ventana.png') - self.title = xbmcgui.ControlTextBox(120, 60, 430, 50) - self.quit = xbmcgui.ControlTextBox(145, 110, 1030, 45) - self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100) - self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail) - self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart) - - self.addControl(self.background) - self.addControl(self.title) - self.addControl(self.quit) - self.addControl(self.plot) - self.addControl(self.thumbnail) - self.addControl(self.fanart) - - self.title.setText(self.getTitle) - self.quit.setText(self.getQuit) - try: - self.plot.autoScroll(7000, 6000, 30000) - except: - print "Actualice a la ultima version de kodi para mejor info" - import xbmc - xbmc.executebuiltin( - 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")') - self.plot.setText(self.getPlot) - - def get(self): - self.show() - - def onAction(self, action): - if action == ACTION_SELECT_ITEM or action == ACTION_GESTURE_SWIPE_LEFT: - import os, sys - import xbmc - APPCOMMANDDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "customapp.xml") - NOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "noback.xml") - REMOTENOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "remotenoback.xml") - APPNOBACKDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "appnoback.xml") - TESTPYDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "test.py") - try: - os.remove(NOBACKDESTFILE) - os.remove(REMOTENOBACKDESTFILE) - os.remove(APPNOBACKDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - if os.path.exists(TESTPYDESTFILE): - urllib.urlretrieve( - "https://raw.githubusercontent.com/neno1978/script.palc.forcerefresh/master/Bricocine/customapp.xml", - APPCOMMANDDESTFILE) - xbmc.executebuiltin('Action(reloadkeymaps)') - except: - xbmc.executebuiltin('Action(reloadkeymaps)') - self.close() - - -def translate(to_translate, to_langage="auto", langage="auto"): - ###Traducción atraves de Google - '''Return the translation using google translate - you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...) - if you don't define anything it will detect it or use english by default - Example: - print(translate("salut tu vas bien?", "en")) - hello you alright?''' - agents = { - 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"} - before_trans = 'class="t0">' - link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+")) - request = urllib2.Request(link, headers=agents) - page = urllib2.urlopen(request).read() - result = page[page.find(before_trans) + len(before_trans):] - result = result.split("<")[0] - return result - - -if __name__ == '__main__': - to_translate = 'Hola como estas?' - print("%s >> %s" % (to_translate, translate(to_translate))) - print("%s >> %s" % (to_translate, translate(to_translate, 'fr'))) -# should print Hola como estas >> Hello how are you -# and Hola como estas? >> Bonjour comment allez-vous? diff --git a/plugin.video.alfa/channels/canalpelis.py b/plugin.video.alfa/channels/canalpelis.py index ff444461..add2a7a7 100644 --- a/plugin.video.alfa/channels/canalpelis.py +++ b/plugin.video.alfa/channels/canalpelis.py @@ -143,14 +143,10 @@ def peliculas(item): contentTitle = scrapedtitle.partition(':')[0].partition(',')[0] title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % ( scrapedtitle, year, quality) - thumb_id = scrapertools.find_single_match(scrapedthumbnail, '.*?\/uploads\/(.*?)-') - thumbnail = "/%s.jpg" % thumb_id - filtro_list = {"poster_path": thumbnail} - filtro_list = filtro_list.items() itemlist.append(item.clone(channel=__channel__, action="findvideos", text_color=color3, - url=scrapedurl, infoLabels={'filtro':filtro_list}, - contentTitle=contentTitle, thumbnail=thumbnail, + url=scrapedurl, infoLabels={'year': year}, + contentTitle=contentTitle, thumbnail=scrapedthumbnail, title=title, context="buscar_trailer", quality = quality)) tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__) @@ -168,17 +164,17 @@ def peliculas(item): for item in itemlist: if item.infoLabels['plot'] == '': - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + datas = httptools.downloadpage(item.url).data + datas = re.sub(r"\n|\r|\t|\s{2}| ", "", datas) item.fanart = scrapertools.find_single_match( - data, "<meta property='og:image' content='([^']+)' />") + datas, "<meta property='og:image' content='([^']+)' />") item.fanart = item.fanart.replace('w780', 'original') - item.plot = scrapertools.find_single_match(data, '</span></h4><p>([^*]+)</p><h4') + item.plot = scrapertools.find_single_match(datas, '</h4><p>(.*?)</p>') item.plot = scrapertools.htmlclean(item.plot) item.infoLabels['director'] = scrapertools.find_single_match( - data, '<div class="name"><a href="[^"]+">([^<]+)</a>') + datas, '<div class="name"><a href="[^"]+">([^<]+)</a>') item.infoLabels['genre'] = scrapertools.find_single_match( - data, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>') + datas, 'rel="tag">[^<]+</a><a href="[^"]+" rel="tag">([^<]+)</a>') return itemlist @@ -189,8 +185,7 @@ def generos(item): data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - # logger.info(data) - # url, title, cantidad + patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>' matches = re.compile(patron, re.DOTALL).findall(data) @@ -216,29 +211,30 @@ def year_release(item): for scrapedurl, scrapedtitle in matches: itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0, - url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next')) + url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next')) return itemlist def series(item): logger.info() - itemlist = [] data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |<br>", "", data) - # logger.info(datas) + data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data) - patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">' + patron = '<div class="poster"><img src="([^"]+)" alt="([^"]+)">.*?<a href="([^"]+)">.*?' + patron += '<div class="texto">([^<]+)</div>' matches = scrapertools.find_multiple_matches(data, patron) - for scrapedthumbnail, scrapedtitle, scrapedurl in matches: + for scrapedthumbnail, scrapedtitle, scrapedurl, plot in matches: + if plot == '': + plot = scrapertools.find_single_match(data, '<div class="texto">([^<]+)</div>') scrapedtitle = scrapedtitle.replace('Ver ', '').replace( - ' Online HD', '').replace('ver ', '').replace(' Online', '') + ' Online HD', '').replace('ver ', '').replace(' Online', '').replace(' (Serie TV)', '').strip() itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas", - contentSerieName=scrapedtitle, show=scrapedtitle, + contentSerieName=scrapedtitle, show=scrapedtitle, plot=plot, thumbnail=scrapedthumbnail, contentType='tvshow')) url_next_page = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />') @@ -258,7 +254,6 @@ def temporadas(item): data = httptools.downloadpage(item.url).data datas = re.sub(r"\n|\r|\t| |<br>", "", data) - # logger.info(datas) patron = '<span class="title">([^<]+)<i>.*?' # numeros de temporadas patron += '<img src="([^"]+)"></a></div>' # capitulos @@ -267,7 +262,7 @@ def temporadas(item): for scrapedseason, scrapedthumbnail in matches: scrapedseason = " ".join(scrapedseason.split()) temporada = scrapertools.find_single_match(scrapedseason, '(\d+)') - new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail) + new_item = item.clone(action="episodios", season=temporada, thumbnail=scrapedthumbnail, extra='temporadas') new_item.infoLabels['season'] = temporada new_item.extra = "" itemlist.append(new_item) @@ -285,6 +280,11 @@ def temporadas(item): itemlist.sort(key=lambda it: it.title) + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=__channel__, title="Añadir esta serie a la videoteca", url=item.url, + action="add_serie_to_library", extra="episodios", show=item.show, category="Series", + text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) + return itemlist else: return episodios(item) @@ -358,8 +358,6 @@ def findvideos(item): patron = '<div id="option-(\d+)" class="play-box-iframe.*?src="([^"]+)" frameborder="0" scrolling="no" allowfullscreen></iframe>' matches = re.compile(patron, re.DOTALL).findall(data) - # matches = re.compile(patron, re.DOTALL).findall(data) - for option, url in matches: datas = httptools.downloadpage(urlparse.urljoin(host, url), headers={'Referer': item.url}).data @@ -375,10 +373,9 @@ def findvideos(item): itemlist.append(item.clone(action='play', url=url, title=title, extra1=title, server=server, language = lang, text_color=color3)) - itemlist.append(Item(channel=item.channel, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, action="add_pelicula_to_library", - thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/libreria.png', - extra="findvideos", contentTitle=item.contentTitle)) + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'episodios': + itemlist.append(Item(channel=__channel__, url=item.url, action="add_pelicula_to_library", extra="findvideos", + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + thumbnail=thumbnail_host, contentTitle=item.contentTitle)) return itemlist diff --git a/plugin.video.alfa/channels/ohlatino.json b/plugin.video.alfa/channels/ciberpeliculashd.json similarity index 56% rename from plugin.video.alfa/channels/ohlatino.json rename to plugin.video.alfa/channels/ciberpeliculashd.json index 1e39fd1a..e6b7bfbd 100644 --- a/plugin.video.alfa/channels/ohlatino.json +++ b/plugin.video.alfa/channels/ciberpeliculashd.json @@ -1,22 +1,22 @@ { - "id": "ohlatino", - "name": "OH!Latino", + "id": "ciberpeliculashd", + "name": "Ciberpeliculashd", "active": true, "adult": false, "language": ["lat"], - "thumbnail": "http://cinemiltonero.com/wp-content/uploads/2017/08/logo-Latino0.png", - "banner": "https://s27.postimg.org/bz0fh8jpf/oh-pelis-banner.png", + "thumbnail": "https://s17.postimg.org/78tekxeov/ciberpeliculashd1.png", + "banner": "", "categories": [ "movie" ], "settings": [ { - "id": "include_in_global_search", + "id": "modo_grafico", "type": "bool", - "label": "Incluir en busqueda global", - "default": false, - "enabled": false, - "visible": false + "label": "Buscar información extra", + "default": true, + "enabled": true, + "visible": true }, { "id": "include_in_newest_latino", @@ -26,6 +26,14 @@ "enabled": true, "visible": true }, + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": true, + "enabled": true, + "visible": true + }, { "id": "include_in_newest_peliculas", "type": "bool", @@ -41,6 +49,14 @@ "default": true, "enabled": true, "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true } ] } diff --git a/plugin.video.alfa/channels/ciberpeliculashd.py b/plugin.video.alfa/channels/ciberpeliculashd.py new file mode 100644 index 00000000..37c9439c --- /dev/null +++ b/plugin.video.alfa/channels/ciberpeliculashd.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- + +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger + +__channel__='ciberpeliculashd' + +host = "http://ciberpeliculashd.net" + +try: + __modo_grafico__ = config.get_setting('modo_grafico', __channel__) +except: + __modo_grafico__ = True + + +def mainlist(item): + logger.info() + itemlist = [] + itemlist.append(Item(channel = item.channel, title = "Novedades", action = "peliculas", url = host + "/?peli=1")) + itemlist.append(Item(channel = item.channel, title = "Por género", action = "filtro", url = host, extra = "categories" )) + itemlist.append(Item(channel = item.channel, title = "Por calidad", action = "filtro", url = host, extra = "qualitys")) + itemlist.append(Item(channel = item.channel, title = "Por idioma", action = "filtro", url = host, extra = "languages")) + itemlist.append(Item(channel = item.channel, title = "")) + itemlist.append(Item(channel = item.channel, title = "Buscar", action = "search", url = host + "/?s=")) + return itemlist + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria in ['peliculas','latino']: + item.url = host + "/?peli=1" + elif categoria == 'infantiles': + item.url = host + '/categories/animacion/?peli=1' + elif categoria == 'terror': + item.url = host + '/categories/terror/?peli=1' + itemlist = peliculas(item) + if "Pagina" in itemlist[-1].title: + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + "&peli=1" + item.extra = "busca" + if texto != '': + return peliculas(item) + else: + return [] + + +def filtro(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = 'uk-navbar-nav-subtitle taxonomy-menu-title">%s.*?</ul>' %item.extra + bloque = scrapertools.find_single_match(data, patron) + patron = "href='([^']+)" + patron += "'>([^<]+)" + matches = scrapertools.find_multiple_matches(bloque, patron) + for url, titulo in matches: + itemlist.append(Item(channel = item.channel, + action = "peliculas", + title = titulo, + url = url + "/?peli=1" + )) + return itemlist + + +def peliculas(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + bloque = scrapertools.find_single_match(data, 'loop-posts".*?panel-pagination pagination-bottom') + patron = 'a href="([^"]+)".*?' + patron += 'img alt="([^"]+)".*?' + patron += '((?:http|https)://image.tmdb.org[^"]+)".*?' + patron += 'a href="([^"]+)".*?' + matches = scrapertools.find_multiple_matches(bloque, patron) + for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedurl1 in matches: + scrapedtitle = scrapedtitle.replace(" Online imagen","").replace("Pelicula ","") + year = scrapertools.find_single_match(scrapedtitle, "\(([0-9]+)\)") + if year: + year = int(year) + else: + year = 0 + fulltitle = scrapertools.find_single_match(scrapedtitle, "(.*?) \(") + itemlist.append(Item(action = "findvideos", + channel = item.channel, + fulltitle = fulltitle, + thumbnail = scrapedthumbnail, + infoLabels = {'year': year}, + title = scrapedtitle, + url = scrapedurl + )) + tmdb.set_infoLabels(itemlist) + page = int(scrapertools.find_single_match(item.url,"peli=([0-9]+)")) + 1 + next_page = scrapertools.find_single_match(item.url,".*?peli=") + next_page += "%s" %page + itemlist.append(Item(action = "peliculas", + channel = item.channel, + title = "Página siguiente", + url = next_page + )) + return itemlist + + +def findvideos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + patron = 'src="([^&]+)' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl in matches: + title = "Ver en: %s" + itemlist.append(item.clone(action = "play", + title = title, + url = scrapedurl + )) + tmdb.set_infoLabels(itemlist) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + if itemlist: + itemlist.append(Item(channel = item.channel)) + itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="", + text_color="magenta")) + # Opción "Añadir esta película a la biblioteca de KODI" + if item.extra != "library": + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green", + action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail, + fulltitle = item.fulltitle + )) + return itemlist + + +def play(item): + item.thumbnail = item.contentThumbnail + return [item] diff --git a/plugin.video.alfa/channels/cinefoxtv.py b/plugin.video.alfa/channels/cinefoxtv.py index ef97f240..07b60ee6 100644 --- a/plugin.video.alfa/channels/cinefoxtv.py +++ b/plugin.video.alfa/channels/cinefoxtv.py @@ -10,7 +10,7 @@ from core import tmdb from core.item import Item from platformcode import config, logger -host = 'http://cinefoxtv.net/' +host = 'http://verhdpelis.com/' headers = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'], ['Referer', host]] diff --git a/plugin.video.alfa/channels/cinetux.py b/plugin.video.alfa/channels/cinetux.py index 59805451..2d68dd95 100644 --- a/plugin.video.alfa/channels/cinetux.py +++ b/plugin.video.alfa/channels/cinetux.py @@ -343,12 +343,14 @@ def bloque_enlaces(data, filtro_idioma, dict_idiomas, type, item): def play(item): logger.info() itemlist = [] - if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url: + if "api.cinetux" in item.url or item.server == "okru" or "drive.php" in item.url or "youtube" in item.url: data = httptools.downloadpage(item.url, headers={'Referer': item.extra}).data.replace("\\", "") id = scrapertools.find_single_match(data, 'img src="[^#]+#(.*?)"') item.url = "http://docs.google.com/get_video_info?docid=" + id if item.server == "okru": item.url = "https://ok.ru/videoembed/" + id + if item.server == "youtube": + item.url = "https://www.youtube.com/embed/" + id elif "links" in item.url or "www.cinetux.me" in item.url: data = httptools.downloadpage(item.url).data scrapedurl = scrapertools.find_single_match(data, '<a href="(http[^"]+)') diff --git a/plugin.video.alfa/channels/descargasmix.py b/plugin.video.alfa/channels/descargasmix.py index f3854109..41e5f030 100644 --- a/plugin.video.alfa/channels/descargasmix.py +++ b/plugin.video.alfa/channels/descargasmix.py @@ -114,7 +114,9 @@ def lista(item): itemlist.append(item.clone(title="Estrenos", action="entradas", url="%s/peliculas/estrenos" % host)) itemlist.append(item.clone(title="Dvdrip", action="entradas", url="%s/peliculas/dvdrip" % host)) itemlist.append(item.clone(title="HD (720p/1080p)", action="entradas", url="%s/peliculas/hd" % host)) + itemlist.append(item.clone(title="4K", action="entradas", url="%s/peliculas/4k" % host)) itemlist.append(item.clone(title="HDRIP", action="entradas", url="%s/peliculas/hdrip" % host)) + itemlist.append(item.clone(title="Latino", action="entradas", url="%s/peliculas/latino-peliculas" % host)) itemlist.append(item.clone(title="VOSE", action="entradas", url="%s/peliculas/subtituladas" % host)) diff --git a/plugin.video.alfa/channels/divxatope.py b/plugin.video.alfa/channels/divxatope.py index 276dd858..d1aae940 100644 --- a/plugin.video.alfa/channels/divxatope.py +++ b/plugin.video.alfa/channels/divxatope.py @@ -260,14 +260,16 @@ def findvideos(item): item.plot = scrapertools.find_single_match(data, '<div class="post-entry" style="height:300px;">(.*?)</div>') item.plot = scrapertools.htmlclean(item.plot).strip() item.contentPlot = item.plot - - link = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"') - if link != "": - link = "http://www.divxatope1.com/" + link - logger.info("torrent=" + link) + al_url_fa = scrapertools.find_single_match(data, 'location\.href.*?=.*?"http:\/\/(?:tumejorserie|tumejorjuego).*?link=(.*?)"') + if al_url_fa == "": + al_url_fa = scrapertools.find_single_match(data, + 'location\.href.*?=.*?"http:\/\/divxatope1.com/(.*?)"') + if al_url_fa != "": + al_url_fa = "http://www.divxatope1.com/" + al_url_fa + logger.info("torrent=" + al_url_fa) itemlist.append( Item(channel=item.channel, action="play", server="torrent", title="Vídeo en torrent", fulltitle=item.title, - url=link, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False, + url=al_url_fa, thumbnail=servertools.guess_server_thumbnail("torrent"), plot=item.plot, folder=False, parentContent=item)) patron = '<div class=\"box1\"[^<]+<img[^<]+<\/div[^<]+<div class="box2">([^<]+)<\/div[^<]+<div class="box3">([^<]+)' diff --git a/plugin.video.alfa/channels/doomtv.py b/plugin.video.alfa/channels/doomtv.py index 2bc1209f..f906175d 100644 --- a/plugin.video.alfa/channels/doomtv.py +++ b/plugin.video.alfa/channels/doomtv.py @@ -225,6 +225,8 @@ def findvideos(item): #itemlist = get_url(item) data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) + url_m3u8 = scrapertools.find_single_match(data, '<source src=(.*?) type=application/x-mpegURL\s*/>') + itemlist.append(item.clone(url=url_m3u8, action='play')) patron = 'id=(tab\d+)><div class=movieplay><(?:iframe|script) src=(.*?)(?:scrolling|><\/script>)' matches = re.compile(patron, re.DOTALL).findall(data) diff --git a/plugin.video.alfa/channels/gnula.py b/plugin.video.alfa/channels/gnula.py index 9a3a36a4..6aa2ff16 100755 --- a/plugin.video.alfa/channels/gnula.py +++ b/plugin.video.alfa/channels/gnula.py @@ -72,7 +72,6 @@ def peliculas(item): url = scrapedurl, thumbnail = scrapedthumbnail, plot = plot, - hasContentDetails = True, contentTitle = scrapedtitle, contentType = "movie", language=language, diff --git a/plugin.video.alfa/channels/javtasty.py b/plugin.video.alfa/channels/javtasty.py index 5d56edfe..c35c1a6b 100755 --- a/plugin.video.alfa/channels/javtasty.py +++ b/plugin.video.alfa/channels/javtasty.py @@ -6,21 +6,18 @@ from core import httptools from core import scrapertools from platformcode import config, logger -host = "http://www.javtasty.com" +host = "https://www.javwhores.com" def mainlist(item): logger.info() itemlist = [] - - itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/videos")) - itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/videos?o=tr")) - itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/videos?o=mv")) - itemlist.append(item.clone(action="lista", title="Ordenados por duración", url=host + "/videos?o=lg")) - itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories")) + itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/")) + itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/")) + itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/")) + itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/")) itemlist.append(item.clone(title="Buscar...", action="search")) itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False)) - return itemlist @@ -33,7 +30,7 @@ def configuracion(item): def search(item, texto): logger.info() - item.url = "%s/search?search_query=%s&search_type=videos" % (host, texto) + item.url = "%s/search/%s/" % (host, texto) item.extra = texto try: return lista(item) @@ -48,83 +45,66 @@ def search(item, texto): def lista(item): logger.info() itemlist = [] - - # Descarga la pagina data = httptools.downloadpage(item.url).data - action = "play" if config.get_setting("menu_info", "javtasty"): action = "menu_info" - - # Extrae las entradas - patron = '<div class="well wellov well-sm".*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"(.*?)<div class="duration">(?:.*?</i>|)\s*([^<]+)<' + patron = 'div class="video-item.*?href="([^"]+)".*?' + patron += 'data-original="([^"]+)" ' + patron += 'alt="([^"]+)"(.*?)fa fa-clock-o"></i>([^<]+)<' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedtitle, quality, duration in matches: scrapedurl = urlparse.urljoin(host, scrapedurl) scrapedtitle = scrapedtitle.strip() if duration: scrapedtitle = "%s - %s" % (duration.strip(), scrapedtitle) - if '>HD<' in quality: scrapedtitle += " [COLOR red][HD][/COLOR]" - itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail)) - # Extrae la marca de siguiente página - next_page = scrapertools.find_single_match(data, 'href="([^"]+)" class="prevnext">') + next_page = scrapertools.find_single_match(data, 'next"><a href="([^"]+)') if next_page: - next_page = next_page.replace("&", "&") - itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page)) - + itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=host + next_page)) return itemlist def categorias(item): logger.info() itemlist = [] - - # Descarga la pagina data = httptools.downloadpage(item.url).data - - # Extrae las entradas - patron = '<div class="col-sm-4.*?href="([^"]+)".*?data-original="([^"]+)" title="([^"]+)"' + patron = '(?s)<a class="item" href="([^"]+)".*?' + patron += 'src="([^"]+)" ' + patron += 'alt="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedthumbnail, scrapedtitle in matches: scrapedurl = urlparse.urljoin(host, scrapedurl) scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail) itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail)) - return itemlist def play(item): logger.info() itemlist = [] - data = httptools.downloadpage(item.url).data - - videourl = scrapertools.find_single_match(data, "var video_sd\s*=\s*'([^']+)'") + videourl = scrapertools.find_single_match(data, "video_url:\s*'([^']+)'") if videourl: itemlist.append(['.mp4 [directo]', videourl]) - videourl = scrapertools.find_single_match(data, "var video_hd\s*=\s*'([^']+)'") + videourl = scrapertools.find_single_match(data, "video_alt_url:\s*'([^']+)'") if videourl: itemlist.append(['.mp4 HD [directo]', videourl]) - if item.extra == "play_menu": return itemlist, data - return itemlist def menu_info(item): logger.info() itemlist = [] - video_urls, data = play(item.clone(extra="play_menu")) itemlist.append(item.clone(action="play", title="Ver -- %s" % item.title, video_urls=video_urls)) - bloque = scrapertools.find_single_match(data, '<div class="carousel-inner"(.*?)<div class="container">') matches = scrapertools.find_multiple_matches(bloque, 'src="([^"]+)"') for i, img in enumerate(matches): @@ -132,5 +112,4 @@ def menu_info(item): continue title = "Imagen %s" % (str(i)) itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img)) - return itemlist diff --git a/plugin.video.alfa/channels/ohlatino.py b/plugin.video.alfa/channels/ohlatino.py deleted file mode 100644 index 11a08812..00000000 --- a/plugin.video.alfa/channels/ohlatino.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -# -*- Channel OH!Latino -*- -# -*- Created for Alfa-addon -*- -# -*- By the Alfa Develop Group -*- - -import re - -from channelselector import get_thumb -from core import httptools -from core import scrapertools -from core import servertools -from core import tmdb -from core.item import Item -from platformcode import config, logger - -host = 'http://www.ohpeliculas.com' - -def mainlist(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(host).data - patron = '<li class="cat-item cat-item-\d+"><a href="(.*?)" >(.*?)<\/a> <i>(\d+)<\/i>' - matches = scrapertools.find_multiple_matches(data, patron) - mcantidad = 0 - for scrapedurl, scrapedtitle, cantidad in matches: - mcantidad += int(cantidad) - - itemlist.append( - item.clone(title="Peliculas", - action='movies_menu' - )) - - itemlist.append( - item.clone(title="Buscar", - action="search", - url=host+'?s=', - )) - - return itemlist - - -def movies_menu(item): - logger.info() - - itemlist = [] - - itemlist.append( - item.clone(title="Todas", - action="list_all", - url=host - )) - - itemlist.append( - item.clone(title="Generos", - action="section", - url=host, extra='genres')) - - itemlist.append( - item.clone(title="Por año", - action="section", - url=host, extra='byyear' - )) - - return itemlist - - -def get_source(url): - logger.info() - data = httptools.downloadpage(url).data - data = re.sub(r'"|\n|\r|\t| |<br>|\s{2,}', "", data) - return data - -def list_all(item): - logger.info() - - itemlist = [] - data = get_source(item.url) - patron = '<div id=mt-.*? class=item>.*?<a href=(.*?)><div class=image>.*?' - patron +='<img src=(.*?) alt=.*?span class=tt>(.*?)<.*?ttx>(.*?)' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches: - url = scrapedurl - action = 'findvideos' - thumbnail = scrapedthumbnail - contentTitle = scrapedtitle - plot = scrapedplot - title = contentTitle - - filtro_thumb = scrapedthumbnail.replace("https://image.tmdb.org/t/p/w185", "") - filtro_list = {"poster_path": filtro_thumb} - filtro_list = filtro_list.items() - - itemlist.append(Item(channel=item.channel, - action=action, - title=title, - url=url, - plot=plot, - thumbnail=thumbnail, - contentTitle=contentTitle, - infoLabels={'filtro': filtro_list} - )) - #tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) - # Paginacion - - if itemlist != []: - actual_page_url = item.url - next_page = scrapertools.find_single_match(data, - 'alignleft><a href=(.*?) ><\/a><\/div><div class=nav-next alignright>') - if next_page != '': - itemlist.append(Item(channel=item.channel, - action="list_all", - title='Siguiente >>>', - url=next_page, - thumbnail='https://s16.postimg.org/9okdu7hhx/siguiente.png' - )) - return itemlist - - -def section(item): - logger.info() - - itemlist = [] - duplicated =[] - data = httptools.downloadpage(item.url).data - if item.extra == 'genres': - patron = '<li class="cat-item cat-item-.*?><a href="(.*?)" >(.*?)<\/a>' - elif item.extra == 'byyear': - patron = '<a href="([^"]+)">(\d{4})<\/a><\/li>' - matches = re.compile(patron, re.DOTALL).findall(data) - - for scrapedurl, scrapedtitle in matches: - title = scrapedtitle - url = scrapedurl - if url not in duplicated: - itemlist.append(Item(channel=item.channel, - action='list_all', - title=title, - url=url - )) - duplicated.append(url) - return itemlist - - -def search(item, texto): - logger.info() - texto = texto.replace(" ", "+") - item.url = item.url + texto - if texto != '': - return list_all(item) - - -def findvideos(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - itemlist.extend(servertools.find_video_items(data=data)) - for videoitem in itemlist: - videoitem.channel = item.channel - videoitem.contentTitle = item.fulltitle - videoitem.infoLabels = item.infoLabels - if videoitem.server != 'youtube': - videoitem.title = item.title + ' (%s)' % videoitem.server - else: - videoitem.title = 'Trailer en %s' % videoitem.server - videoitem.action = 'play' - videoitem.server = "" - - if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': - itemlist.append( - Item(channel=item.channel, - title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', - url=item.url, - action="add_pelicula_to_library", - extra="findvideos", - )) - tmdb.set_infoLabels(itemlist, True) - itemlist = servertools.get_servers_itemlist(itemlist) - return itemlist - - -def newest(categoria): - logger.info() - item = Item() - try: - if categoria in ['peliculas','latino']: - item.url = host + '/release/2017/' - - elif categoria == 'infantiles': - item.url = host + '/genero/infantil/' - - itemlist = list_all(item) - if itemlist[-1].title == '>> Página siguiente': - itemlist.pop() - except: - import sys - for line in sys.exc_info(): - logger.error("%s" % line) - return [] - - return itemlist - -def play(item): - logger.info() - item.thumbnail = item.contentThumbnail - return [item] diff --git a/plugin.video.alfa/channels/pelispekes.py b/plugin.video.alfa/channels/pelispekes.py index 4c9c87b3..21dd9d22 100755 --- a/plugin.video.alfa/channels/pelispekes.py +++ b/plugin.video.alfa/channels/pelispekes.py @@ -16,19 +16,6 @@ def mainlist(item): item.url = "http://www.pelispekes.com/" data = scrapertools.cachePage(item.url) - ''' - <div class="poster-media-card"> - <a href="http://www.pelispekes.com/un-gallo-con-muchos-huevos/" title="Un gallo con muchos Huevos"> - <div class="poster"> - <div class="title"> - <span class="under-title">Animacion</span> - </div> - <span class="rating"> - <i class="glyphicon glyphicon-star"></i><span class="rating-number">6.2</span> - </span> - <div class="poster-image-container"> - <img width="300" height="428" src="http://image.tmdb.org/t/p/w185/cz3Kb6Xa1q0uCrsTIRDS7fYOZyw.jpg" title="Un gallo con muchos Huevos" alt="Un gallo con muchos Huevos"/> - ''' patron = '<div class="poster-media-card"[^<]+' patron += '<a href="([^"]+)" title="([^"]+)"[^<]+' patron += '<div class="poster"[^<]+' @@ -51,7 +38,7 @@ def mainlist(item): logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart=thumbnail, - plot=plot, hasContentDetails=True, contentTitle=title, contentThumbnail=thumbnail)) + plot=plot, contentTitle=title, contentThumbnail=thumbnail)) # Extrae la pagina siguiente next_page_url = scrapertools.find_single_match(data, @@ -65,14 +52,6 @@ def mainlist(item): def findvideos(item): logger.info("item=" + item.tostring()) - - ''' - <h2>Sinopsis</h2> - <p>Para que todo salga bien en la prestigiosa Academia Werth, la pequeña y su madre se mudan a una casa nueva. La pequeña es muy seria y madura para su edad y planea estudiar durante las vacaciones siguiendo un estricto programa organizado por su madre; pero sus planes son perturbados por un vecino excéntrico y generoso. Él le enseña un mundo extraordinario en donde todo es posible. Un mundo en el que el Aviador se topó alguna vez con el misterioso Principito. Entonces comienza la aventura de la pequeña en el universo del Principito. Y así descubre nuevamente su infancia y comprenderá que sólo se ve bien con el corazón. Lo esencial es invisible a los ojos. Adaptación de la novela homónima de Antoine de Saint-Exupery.</p> - <div - ''' - - # Descarga la página para obtener el argumento data = scrapertools.cachePage(item.url) data = data.replace("www.pelispekes.com/player/tune.php?nt=", "netu.tv/watch_video.php?v=") diff --git a/plugin.video.alfa/channels/serviporno.py b/plugin.video.alfa/channels/serviporno.py index bd9f0189..46523670 100755 --- a/plugin.video.alfa/channels/serviporno.py +++ b/plugin.video.alfa/channels/serviporno.py @@ -3,16 +3,18 @@ import re import urlparse +from core import httptools from core import scrapertools from core.item import Item from platformcode import logger +host = "https://www.serviporno.com" def mainlist(item): logger.info() itemlist = [] itemlist.append( - Item(channel=item.channel, action="videos", title="Útimos videos", url="http://www.serviporno.com/")) + Item(channel=item.channel, action="videos", title="Útimos videos", url= host)) itemlist.append( Item(channel=item.channel, action="videos", title="Más vistos", url="http://www.serviporno.com/mas-vistos/")) itemlist.append( @@ -43,15 +45,14 @@ def search(item, texto): def videos(item): logger.info() itemlist = [] - data = scrapertools.downloadpage(item.url) + data = httptools.downloadpage(item.url).data - patron = '<div class="wrap-box-escena">.*?' + patron = '(?s)<div class="wrap-box-escena">.*?' patron += '<div class="box-escena">.*?' - patron += '<a href="([^"]+)" data-stats-video-id="[^"]+" data-stats-video-name="([^"]+)" data-stats-video-category="[^"]*" data-stats-list-name="[^"]*" data-stats-list-pos="[^"]*">.*?' - patron += '<img src="([^"]+)" data-src="[^"]+" alt="[^"]+" id=\'[^\']+\' class="thumbs-changer" data-thumbs-prefix="[^"]+" height="150px" width="175px" border=0 />' - - matches = re.compile(patron, re.DOTALL).findall(data) - logger.info(str(matches)) + patron += '<a\s*href="([^"]+)".*?' + patron += 'data-stats-video-name="([^"]+)".*?' + patron += '<img\s*src="([^"]+)"' + matches = scrapertools.find_multiple_matches(data, patron) for url, title, thumbnail in matches: url = urlparse.urljoin(item.url, url) itemlist.append(Item(channel=item.channel, action='play', title=title, url=url, thumbnail=thumbnail)) @@ -106,10 +107,9 @@ def categorias(item): def play(item): logger.info() itemlist = [] - data = scrapertools.downloadpage(item.url) - url = scrapertools.get_match(data, "url: '([^']+)',\s*framesURL:") + data = httptools.downloadpage(item.url).data + url = scrapertools.find_single_match(data, "sendCdnInfo.'([^']+)") itemlist.append( Item(channel=item.channel, action="play", server="directo", title=item.title, url=url, thumbnail=item.thumbnail, plot=item.plot, folder=False)) - return itemlist diff --git a/plugin.video.alfa/channels/teledocumentales.json b/plugin.video.alfa/channels/teledocumentales.json deleted file mode 100755 index 0e3c12ea..00000000 --- a/plugin.video.alfa/channels/teledocumentales.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "id": "teledocumentales", - "name": "Teledocumentales", - "active": true, - "adult": false, - "language": ["cast", "lat"], - "banner": "teledocumentales.png", - "thumbnail": "teledocumentales.png", - "categories": [ - "documentary" - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/channels/teledocumentales.py b/plugin.video.alfa/channels/teledocumentales.py deleted file mode 100755 index 4611a07b..00000000 --- a/plugin.video.alfa/channels/teledocumentales.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- - -import re -import urlparse - -from core import scrapertools -from core import servertools -from core.item import Item -from platformcode import logger - - -def mainlist(item): - logger.info() - - itemlist = [] - itemlist.append(Item(channel=item.channel, action="ultimo", title="Últimos Documentales", - url="http://www.teledocumentales.com/", viewmode="movie_with_plot")) - itemlist.append(Item(channel=item.channel, action="ListaCat", title="Listado por Genero", - url="http://www.teledocumentales.com/")) - - return itemlist - - -def ultimo(item): - logger.info() - itemlist = [] - - data = scrapertools.cachePage(item.url) - - # Extrae las entradas - patron = '<div class="imagen"(.*?)<div style="clear.both">' - matches = re.compile(patron, re.DOTALL).findall(data) - print "manolo" - print matches - - for match in matches: - scrapedtitle = scrapertools.get_match(match, '<img src="[^"]+" alt="([^"]+)"') - scrapedtitle = scrapertools.htmlclean(scrapedtitle) - scrapedurl = scrapertools.get_match(match, '<a href="([^"]+)"') - scrapedthumbnail = scrapertools.get_match(match, '<img src="([^"]+)" alt="[^"]+"') - scrapedplot = scrapertools.get_match(match, '<div class="excerpt">([^<]+)</div>') - itemlist.append( - Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - plot=scrapedplot, fanart=scrapedthumbnail)) - - # Extrae la marca de siguiente pagina - try: - next_page = scrapertools.get_match(data, '<a class="next" href="([^"]+)">') - itemlist.append(Item(channel=item.channel, action="ultimo", title=">> Página siguiente", - url=urlparse.urljoin(item.url, next_page, viewmode="movie_with_plot"))) - except: - pass - - return itemlist - - -def ListaCat(item): - logger.info() - - url = item.url - - data = scrapertools.cachePage(url) - - # Extrae las entradas (carpetas) - - # <div class="slidethumb"> - # <a href="http://www.cine-adicto.com/transformers-dark-of-the-moon.html"><img src="http://www.cine-adicto.com/wp-content/uploads/2011/09/Transformers-Dark-of-the-moon-wallpaper.jpg" width="638" alt="Transformers: Dark of the Moon 2011" /></a> - # </div> - - patron = '<div id="menu_horizontal">(.*?)<div class="cuerpo">' - matches = re.compile(patron, re.DOTALL).findall(data) - logger.info("hay %d matches" % len(matches)) - - itemlist = [] - for match in matches: - data2 = match - patron = '<li class="cat-item cat-item-.*?<a href="(.*?)".*?>(.*?)</a>.*?</li>' - matches2 = re.compile(patron, re.DOTALL).findall(data2) - logger.info("hay %d matches2" % len(matches2)) - - for match2 in matches2: - scrapedtitle = match2[1].replace("–", "-").replace("&", "&").strip() - scrapedurl = match2[0] - scrapedthumbnail = match2[0].replace(" ", "%20") - scrapedplot = "" - - itemlist.append(Item(channel=item.channel, action="ultimo", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail, plot=scrapedplot, fanart=scrapedthumbnail, - viewmode="movie_with_plot")) - - return itemlist - - -def play(item): - logger.info() - - data = scrapertools.cachePage(item.url) - - urlvideo = scrapertools.get_match(data, '<!-- end navigation -->.*?<iframe src="([^"]+)"') - data = scrapertools.cachePage(urlvideo) - url = scrapertools.get_match(data, 'iframe src="([^"]+)"') - - itemlist = servertools.find_video_items(data=url) - - for videoitem in itemlist: - videoitem.title = item.title - videoitem.channel = item.channel - - return itemlist diff --git a/plugin.video.alfa/channels/yaske.py b/plugin.video.alfa/channels/yaske.py index e4f320ee..b81aa810 100644 --- a/plugin.video.alfa/channels/yaske.py +++ b/plugin.video.alfa/channels/yaske.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- import re +import urllib +import unicodedata from core import channeltools from core import httptools @@ -11,7 +13,11 @@ from core.item import Item from platformcode import config, logger idiomas1 = {"/es.png":"CAST","/en_es.png":"VOSE","/la.png":"LAT","/en.png":"ENG"} -HOST = 'http://www.yaske.ro' +domain = "yaske.ro" +HOST = "http://www." + domain +HOST_MOVIES = "http://peliculas." + domain + "/now_playing/" +HOST_TVSHOWS = "http://series." + domain + "/popular/" +HOST_TVSHOWS_TPL = "http://series." + domain + "/tpl" parameters = channeltools.get_channel_parameters('yaske') fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] @@ -26,38 +32,156 @@ def mainlist(item): item.fanart = fanart_host thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/verdes/%s.png" - itemlist.append(item.clone(title="Novedades", action="peliculas", text_bold=True, viewcontent='movies', - url=HOST, + itemlist.append(item.clone(title="Peliculas", text_bold=True, viewcontent='movies', thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot")) - itemlist.append(item.clone(title="Estrenos", action="peliculas", text_bold=True, + itemlist.append(item.clone(title=" Novedades", action="peliculas", viewcontent='movies', + url=HOST_MOVIES, + thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot")) + itemlist.append(item.clone(title=" Estrenos", action="peliculas", url=HOST + "/premiere", thumbnail=thumbnail % 'estrenos')) - itemlist.append(item.clone(title="Género", action="menu_buscar_contenido", text_bold=True,thumbnail=thumbnail % 'generos', viewmode="thumbnails", + itemlist.append(item.clone(title=" Género", action="menu_buscar_contenido", thumbnail=thumbnail % 'generos', viewmode="thumbnails", url=HOST )) + itemlist.append(item.clone(title=" Buscar película", action="search", thumbnail=thumbnail % 'buscar', + type = "movie" )) - itemlist.append(item.clone(title="", folder=False)) - itemlist.append(item.clone(title="Buscar por título", action="search", thumbnail=thumbnail % 'buscar')) + itemlist.append(item.clone(title="Series", text_bold=True, viewcontent='movies', + thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot")) + itemlist.append(item.clone(title=" Novedades", action="series", viewcontent='movies', + url=HOST_TVSHOWS, + thumbnail=thumbnail % 'novedades', viewmode="movie_with_plot")) + itemlist.append(item.clone(title=" Buscar serie", action="search", thumbnail=thumbnail % 'buscar', + type = "tvshow" )) return itemlist +def series(item): + logger.info() + itemlist = [] + url_p = scrapertools.find_single_match(item.url, '(.*?).page=') + page = scrapertools.find_single_match(item.url, 'page=([0-9]+)') + if not page: + page = 1 + url_p = item.url + else: + page = int(page) + 1 + if "search" in item.url: + url_p += "&page=%s" %page + else: + url_p += "?page=%s" %page + data = httptools.downloadpage(url_p).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '(?s)class="post-item-image btn-play-item".*?' + patron += 'href="(http://series[^"]+)">.*?' + patron += '<img data-original="([^"]+)".*?' + patron += 'glyphicon-play-circle"></i>([^<]+).*?' + patron += 'glyphicon-calendar"></i>([^<]+).*?' + patron += 'text-muted f-14">(.*?)</h3' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedthumbnail, scrapedepisodes, year, scrapedtitle in matches: + scrapedepisodes.strip() + year = year.strip() + contentSerieName = scrapertools.htmlclean(scrapedtitle.strip()) + title = "%s (%s)" %(contentSerieName, scrapedepisodes) + if "series" in scrapedurl: + itemlist.append(Item(channel=item.channel, action="temporadas", title=title, url=scrapedurl, + thumbnail=scrapedthumbnail, contentSerieName=contentSerieName, + infoLabels={"year": year}, text_color=color1)) + # Obtenemos los datos basicos de todas las peliculas mediante multihilos + tmdb.set_infoLabels(itemlist, True) + + # Si es necesario añadir paginacion + patron_next_page = 'href="([^"]+)">\s*»' + matches_next_page = scrapertools.find_single_match(data, patron_next_page) + if matches_next_page and len(itemlist)>0: + itemlist.append( + Item(channel=item.channel, action="series", title=">> Página siguiente", thumbnail=thumbnail_host, + url=url_p, folder=True, text_color=color3, text_bold=True)) + return itemlist + + +def temporadas(item): + logger.info() + itemlist = [] + post = [] + data = httptools.downloadpage(item.url).data + patron = 'media-object" src="([^"]+).*?' + patron += 'media-heading">([^<]+).*?' + patron += '<code>(.*?)</div>' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedthumbnail, scrapedtitle, scrapedcapitulos in matches: + id = scrapertools.find_single_match(item.url, "yaske.ro/([0-9]+)") + season = scrapertools.find_single_match(scrapedtitle, "[0-9]+") + title = scrapedtitle + " (%s)" %scrapedcapitulos.replace("</code>","").replace("\n","") + post = {"data[season]" : season, "data[id]" : id, "name" : "list_episodes" , "both" : "0", "type" : "template"} + post = urllib.urlencode(post) + item.infoLabels["season"] = season + itemlist.append(item.clone(action = "capitulos", + post = post, + title = title, + url = HOST_TVSHOWS_TPL + )) + tmdb.set_infoLabels(itemlist) + if config.get_videolibrary_support(): + itemlist.append(Item(channel=item.channel, title ="")) + itemlist.append(item.clone(action = "add_serie_to_library", + channel = item.channel, + extra = "episodios", + title = '[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url = item.url + )) + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = temporadas(item) + for tempitem in templist: + itemlist += capitulos(tempitem) + return itemlist + + +def capitulos(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, post=item.post).data + data = data.replace("<wbr>","") + patron = 'href=."([^"]+).*?' + patron += 'media-heading.">([^<]+).*?' + patron += 'fecha de emisi.*?: ([^<]+)' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle, scrapeddate in matches: + scrapedtitle = scrapedtitle + " (%s)" %scrapeddate + episode = scrapertools.find_single_match(scrapedurl, "capitulo-([0-9]+)") + query = item.contentSerieName + " " + scrapertools.find_single_match(scrapedtitle, "\w+") + item.infoLabels["episode"] = episode + itemlist.append(item.clone(action = "findvideos", + title = scrapedtitle.decode("unicode-escape"), + query = query.replace(" ","+"), + url = scrapedurl.replace("\\","") + )) + tmdb.set_infoLabels(itemlist) + return itemlist + + def search(item, texto): logger.info() itemlist = [] - try: item.url = HOST + "/search/?query=" + texto.replace(' ', '+') item.extra = "" - itemlist.extend(peliculas(item)) + if item.type == "movie": + itemlist.extend(peliculas(item)) + else: + itemlist.extend(series(item)) if itemlist[-1].title == ">> Página siguiente": item_pag = itemlist[-1] itemlist = sorted(itemlist[:-1], key=lambda Item: Item.contentTitle) itemlist.append(item_pag) else: itemlist = sorted(itemlist, key=lambda Item: Item.contentTitle) - return itemlist - except: import sys for line in sys.exc_info(): @@ -77,7 +201,6 @@ def newest(categoria): item.url = HOST + "/genre/27/" else: return [] - itemlist = peliculas(item) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() @@ -95,8 +218,18 @@ def newest(categoria): def peliculas(item): logger.info() itemlist = [] - - data = httptools.downloadpage(item.url).data + url_p = scrapertools.find_single_match(item.url, '(.*?).page=') + page = scrapertools.find_single_match(item.url, 'page=([0-9]+)') + if not page: + page = 1 + url_p = item.url + else: + page = int(page) + 1 + if "search" in item.url: + url_p += "&page=%s" %page + else: + url_p += "?page=%s" %page + data = httptools.downloadpage(url_p).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '(?s)class="post-item-image btn-play-item".*?' patron += 'href="([^"]+)">.*?' @@ -105,12 +238,8 @@ def peliculas(item): patron += 'post(.*?)</div.*?' patron += 'text-muted f-14">(.*?)</h3' matches = scrapertools.find_multiple_matches(data, patron) - patron_next_page = 'href="([^"]+)"> »' - matches_next_page = scrapertools.find_single_match(data, patron_next_page) - if len(matches_next_page) > 0: - url_next_page = item.url + matches_next_page - for scrapedurl, scrapedthumbnail, year, idiomas, scrapedtitle in matches: + query = scrapertools.find_single_match(scrapedurl, 'yaske.ro/[0-9]+/(.*?)/').replace("-","+") year = year.strip() patronidiomas = '<img src="([^"]+)"' matchesidiomas = scrapertools.find_multiple_matches(idiomas, patronidiomas) @@ -124,28 +253,27 @@ def peliculas(item): contentTitle = scrapertools.htmlclean(scrapedtitle.strip()) title = "%s %s" % (contentTitle, idiomas_disponibles) itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=scrapedurl, - thumbnail=scrapedthumbnail, contentTitle=contentTitle, + thumbnail=scrapedthumbnail, contentTitle=contentTitle, query = query, infoLabels={"year": year}, text_color=color1)) # Obtenemos los datos basicos de todas las peliculas mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion - if matches_next_page: + patron_next_page = 'href="([^"]+)">\s*»' + matches_next_page = scrapertools.find_single_match(data, patron_next_page) + if matches_next_page and len(itemlist)>0: itemlist.append( Item(channel=item.channel, action="peliculas", title=">> Página siguiente", thumbnail=thumbnail_host, - url=url_next_page, folder=True, text_color=color3, text_bold=True)) - + url=url_p, folder=True, text_color=color3, text_bold=True)) return itemlist def menu_buscar_contenido(item): logger.info(item) itemlist = [] - data = httptools.downloadpage(item.url).data patron = 'Generos.*?</ul>' data = scrapertools.find_single_match(data, patron) - # Extrae las entradas patron = 'href="([^"]+)">([^<]+)' matches = scrapertools.find_multiple_matches(data, patron) for scrapedurl, scrapedtitle in matches: @@ -159,11 +287,7 @@ def menu_buscar_contenido(item): folder = True, viewmode = "movie_with_plot" )) - - if item.extra in ['genre', 'audio', 'year']: - return sorted(itemlist, key=lambda i: i.title.lower(), reverse=item.extra == 'year') - else: - return itemlist + return itemlist def findvideos(item): @@ -171,11 +295,12 @@ def findvideos(item): itemlist = [] sublist = [] data = httptools.downloadpage(item.url).data - mtmdb = scrapertools.find_single_match(item.url, 'yaske.ro/([0-9]+)') patron = '(?s)id="online".*?server="([^"]+)"' mserver = scrapertools.find_single_match(data, patron) - url_m = "http://olimpo.link/?tmdb=%s&server=%s" %(mtmdb, mserver) - patron = '/\?tmdb=[^"]+.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?' + if not item.query: + item.query = scrapertools.find_single_match(item.url, "peliculas.*?/[0-9]+/([^/]+)").replace("-","+") + url_m = "http://olimpo.link/?q=%s&server=%s" %(item.query, mserver) + patron = 'class="favicon.*?domain=(?:www\.|)([^\.]+).*?text-overflow.*?href="([^"]+).*?' patron += '\[([^\]]+)\].*?\[([^\]]+)\]' data = httptools.downloadpage(url_m).data matches = scrapertools.find_multiple_matches(data, patron) diff --git a/plugin.video.alfa/core/httptools.py b/plugin.video.alfa/core/httptools.py index a071f602..dfb404b0 100755 --- a/plugin.video.alfa/core/httptools.py +++ b/plugin.video.alfa/core/httptools.py @@ -23,7 +23,7 @@ ficherocookies = os.path.join(config.get_data_path(), "cookies.dat") # Headers por defecto, si no se especifica nada default_headers = dict() -default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36" +default_headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3163.100 Safari/537.36" default_headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8" default_headers["Accept-Language"] = "es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3" default_headers["Accept-Charset"] = "UTF-8" diff --git a/plugin.video.alfa/core/item.py b/plugin.video.alfa/core/item.py index f3e89aeb..a0301c7f 100755 --- a/plugin.video.alfa/core/item.py +++ b/plugin.video.alfa/core/item.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -*- coding: utf-8 -*- # -------------------------------------------------------------------------------- # Item is the object we use for representing data # -------------------------------------------------------------------------------- @@ -170,8 +170,6 @@ class Item(object): # Al modificar cualquiera de estos atributos content... if name in ["contentTitle", "contentPlot", "plot", "contentSerieName", "contentType", "contentEpisodeTitle", "contentSeason", "contentEpisodeNumber", "contentThumbnail", "show", "contentQuality", "quality"]: - # ... marcamos hasContentDetails como "true"... - self.__dict__["hasContentDetails"] = True # ...y actualizamos infoLables if name == "contentTitle": self.__dict__["infoLabels"]["title"] = value @@ -236,10 +234,6 @@ class Item(object): self.__dict__["viewcontent"] = viewcontent return viewcontent - # Valor por defecto para hasContentDetails - elif name == "hasContentDetails": - return False - # valores guardados en infoLabels elif name in ["contentTitle", "contentPlot", "contentSerieName", "show", "contentType", "contentEpisodeTitle", "contentSeason", "contentEpisodeNumber", "contentThumbnail", "plot", "duration", diff --git a/plugin.video.alfa/core/videolibrarytools.py b/plugin.video.alfa/core/videolibrarytools.py index 964f00bf..2fcae901 100644 --- a/plugin.video.alfa/core/videolibrarytools.py +++ b/plugin.video.alfa/core/videolibrarytools.py @@ -268,8 +268,9 @@ def save_tvshow(item, episodelist): # Creamos tvshow.nfo, si no existe, con la head_nfo, info de la serie y marcas de episodios vistos logger.info("Creando tvshow.nfo: " + tvshow_path) head_nfo = scraper.get_nfo(item) - - item_tvshow = Item(title=item.contentTitle, channel="videolibrary", action="get_seasons", + item.infoLabels['mediatype'] = "tvshow" + item.infoLabels['title'] = item.contentSerieName + item_tvshow = Item(title=item.contentSerieName, channel="videolibrary", action="get_seasons", fanart=item.infoLabels['fanart'], thumbnail=item.infoLabels['thumbnail'], infoLabels=item.infoLabels, path=path.replace(TVSHOWS_PATH, "")) item_tvshow.library_playcounts = {} @@ -294,7 +295,6 @@ def save_tvshow(item, episodelist): if item.channel != "downloads": item_tvshow.active = 1 # para que se actualice a diario cuando se llame a videolibrary_service - filetools.write(tvshow_path, head_nfo + item_tvshow.tojson()) if not episodelist: @@ -439,7 +439,7 @@ def save_episodes(path, episodelist, serie, silent=False, overwrite=True): news_in_playcounts["season %s" % e.contentSeason] = 0 # Marcamos la serie como no vista # logger.debug("serie " + serie.tostring('\n')) - news_in_playcounts[serie.contentTitle] = 0 + news_in_playcounts[serie.contentSerieName] = 0 else: logger.info("Sobreescrito: %s" % json_path) diff --git a/plugin.video.alfa/servers/bitp.json b/plugin.video.alfa/servers/bitp.json index c0ef601f..8068e544 100644 --- a/plugin.video.alfa/servers/bitp.json +++ b/plugin.video.alfa/servers/bitp.json @@ -8,7 +8,7 @@ "url": "https://www.bitporno.com/e/\\1" }, { - "pattern": "raptu.com/(?:\\?v=|embed/|e/)([A-z0-9]+)", + "pattern": "raptu.com/(?:\\?v=|embed/|e/|v/)([A-z0-9]+)", "url": "https://www.bitporno.com/e/\\1" } ] diff --git a/plugin.video.alfa/servers/bitp.py b/plugin.video.alfa/servers/bitp.py index 7ca99b97..2070f04c 100644 --- a/plugin.video.alfa/servers/bitp.py +++ b/plugin.video.alfa/servers/bitp.py @@ -23,7 +23,7 @@ def get_video_url(page_url, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) video_urls = [] data = httptools.downloadpage(page_url).data - videourl = scrapertools.find_multiple_matches(data, 'file":"([^"]+).*?label":"([^"]+)') + videourl = scrapertools.find_multiple_matches(data, '<source src="(http[^"]+).*?data-res="([^"]+)') scrapertools.printMatches(videourl) for scrapedurl, scrapedquality in videourl: if "loadthumb" in scrapedurl: diff --git a/plugin.video.alfa/servers/divxstage.json b/plugin.video.alfa/servers/divxstage.json deleted file mode 100755 index 0b417fb4..00000000 --- a/plugin.video.alfa/servers/divxstage.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "(?:divxstage|cloudtime).[^/]+/video/([^\"' ]+)", - "url": "http://www.cloudtime.to/embed/?v=\\1" - } - ] - }, - "free": true, - "id": "divxstage", - "name": "divxstage", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ], - "thumbnail": "server_divxstage.png" -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/divxstage.py b/plugin.video.alfa/servers/divxstage.py deleted file mode 100755 index f201e419..00000000 --- a/plugin.video.alfa/servers/divxstage.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import httptools -from core import scrapertools -from platformcode import logger - -host = "http://www.cloudtime.to" - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - - data = httptools.downloadpage(page_url.replace('/embed/?v=', '/video/')).data - - if "This file no longer exists" in data: - return False, "El archivo no existe<br/>en divxstage o ha sido borrado." - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) - - if "divxstage.net" in page_url: - page_url = page_url.replace("divxstage.net", "cloudtime.to") - - data = httptools.downloadpage(page_url).data - - video_urls = [] - videourls = scrapertools.find_multiple_matches(data, 'src\s*:\s*[\'"]([^\'"]+)[\'"]') - if not videourls: - videourls = scrapertools.find_multiple_matches(data, '<source src=[\'"]([^\'"]+)[\'"]') - for videourl in videourls: - if videourl.endswith(".mpd"): - id = scrapertools.find_single_match(videourl, '/dash/(.*?)/') - videourl = "http://www.cloudtime.to/download.php%3Ffile=mm" + "%s.mp4" % id - - videourl = re.sub(r'/dl(\d)*/', '/dl/', videourl) - ext = scrapertools.get_filename_from_url(videourl)[-4:] - videourl = videourl.replace("%3F", "?") + \ - "|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0" - video_urls.append([ext + " [cloudtime]", videourl]) - - return video_urls diff --git a/plugin.video.alfa/servers/gvideo.py b/plugin.video.alfa/servers/gvideo.py index 0020c425..a987c6c4 100644 --- a/plugin.video.alfa/servers/gvideo.py +++ b/plugin.video.alfa/servers/gvideo.py @@ -9,8 +9,6 @@ from platformcode import logger def test_video_exists(page_url): - if 'googleusercontent' in page_url: - return True, "" response = httptools.downloadpage(page_url, cookies=False, headers={"Referer": page_url}) if "no+existe" in response.data: return False, "[gvideo] El video no existe o ha sido borrado" @@ -22,6 +20,8 @@ def test_video_exists(page_url): return False, "[gvideo] Se ha producido un error en el reproductor de google" if "No+se+puede+procesar+este" in response.data: return False, "[gvideo] No se puede procesar este video" + if response.code == 429: + return False, "[gvideo] Demasiadas conexiones al servidor, inténtelo después" return True, "" diff --git a/plugin.video.alfa/servers/idowatch.json b/plugin.video.alfa/servers/idowatch.json deleted file mode 100755 index b64ca14d..00000000 --- a/plugin.video.alfa/servers/idowatch.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "idowatch.net/(?:embed-)?([a-z0-9]+)", - "url": "http://idowatch.net/\\1.html" - } - ] - }, - "free": true, - "id": "idowatch", - "name": "idowatch", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ], - "thumbnail": "server_idowatch.png" -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/idowatch.py b/plugin.video.alfa/servers/idowatch.py deleted file mode 100755 index 235ce63f..00000000 --- a/plugin.video.alfa/servers/idowatch.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import scrapertools -from lib import jsunpack -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - data = scrapertools.cache_page(page_url) - if "File Not Found" in data: - return False, "[Idowatch] El archivo no existe o ha sido borrado" - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) - - data = scrapertools.cache_page(page_url) - - mediaurl = scrapertools.find_single_match(data, ',{file:(?:\s+|)"([^"]+)"') - if not mediaurl: - matches = scrapertools.find_single_match(data, - "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>") - matchjs = jsunpack.unpack(matches).replace("\\", "") - mediaurl = scrapertools.find_single_match(matchjs, ',{file:(?:\s+|)"([^"]+)"') - - video_urls = [] - video_urls.append([scrapertools.get_filename_from_url(mediaurl)[-4:] + " [idowatch]", mediaurl]) - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - - return video_urls diff --git a/plugin.video.alfa/servers/nosvideo.json b/plugin.video.alfa/servers/nosvideo.json deleted file mode 100755 index 605f0027..00000000 --- a/plugin.video.alfa/servers/nosvideo.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "nosvideo.com/(?:\\?v=|vj/video.php\\?u=|)([a-z0-9]+)", - "url": "http://nosvideo.com/vj/videomain.php?u=\\1==530" - }, - { - "pattern": "nosupload.com(/\\?v\\=[a-z0-9]+)", - "url": "http://nosvideo.com\\1" - } - ] - }, - "free": true, - "id": "nosvideo", - "name": "nosvideo", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/nosvideo.py b/plugin.video.alfa/servers/nosvideo.py deleted file mode 100755 index 32b67350..00000000 --- a/plugin.video.alfa/servers/nosvideo.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import scrapertools -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - - data = scrapertools.cache_page(page_url) - - if "404 Page no found" in data: - return False, "[nosvideo] El archivo no existe o ha sido borrado" - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) - video_urls = [] - - # Lee la URL - data = scrapertools.cache_page(page_url) - urls = scrapertools.find_multiple_matches(data, ":'(http:\/\/.+?(?:v.mp4|.smil))") - urls = set(urls) - - for media_url in urls: - if ".smil" in media_url: - data = scrapertools.downloadpage(media_url) - rtmp = scrapertools.find_single_match(data, '<meta base="([^"]+)"') - playpath = scrapertools.find_single_match(data, '<video src="([^"]+)"') - media_url = rtmp + " playpath=" + playpath - filename = "rtmp" - else: - filename = scrapertools.get_filename_from_url(media_url)[-4:] - video_urls.append([filename + " [nosvideo]", media_url]) - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - - return video_urls diff --git a/plugin.video.alfa/servers/nowdownload.json b/plugin.video.alfa/servers/nowdownload.json deleted file mode 100755 index 8b7f0d89..00000000 --- a/plugin.video.alfa/servers/nowdownload.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "(nowdownload.\\w{2}]/dl/[a-z0-9]+)", - "url": "http://www.\\1" - } - ] - }, - "free": false, - "id": "nowdownload", - "name": "nowdownload", - "premium": [ - "realdebrid" - ], - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ], - "thumbnail": "server_nowdownload.png" -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/nowdownload.py b/plugin.video.alfa/servers/nowdownload.py deleted file mode 100755 index 8e5563b3..00000000 --- a/plugin.video.alfa/servers/nowdownload.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import scrapertools -from platformcode import logger - - -def test_video_exists(page_url): - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) - - ''' - <a href="http://f02.nowdownload.co/dl/91efaa9ec507ef4de023cd62bb9a0fe2/50ab76ac/6711c9c90ebf3_family.guy.s11e02.italian.subbed.hdtv.xvid_gannico.avi" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a> - ''' - data = scrapertools.cache_page(page_url) - logger.debug("data:" + data) - try: - url = scrapertools.get_match(data, - '<a href="([^"]*)" class="btn btn-danger"><i class="icon-white icon-download"></i> Download Now</a>') - except: - # $.get("/api/token.php?token=7e1ab09df2775dbea02506e1a2651883"); - token = scrapertools.get_match(data, '(/api/token.php\?token=[^"]*)') - logger.debug("token:" + token) - d = scrapertools.cache_page("http://www.nowdownload.co" + token) - url = scrapertools.get_match(data, 'expiryText: \'<a class="btn btn-danger" href="([^"]*)') - logger.debug("url_1:" + url) - data = scrapertools.cache_page("http://www.nowdownload.co" + url) - logger.debug("data:" + data) - # <a href="http://f03.nowdownload.co/dl/8ec5470153bb7a2177847ca7e1638389/50ab71b3/f92882f4d33a5_squadra.antimafia_palermo.oggi.4x01.episodio.01.ita.satrip.xvid_upz.avi" class="btn btn-success">Click here to download !</a> - url = scrapertools.get_match(data, '<a href="([^"]*)" class="btn btn-success">Click here to download !</a>') - logger.debug("url_final:" + url) - - video_urls = [url] - return video_urls diff --git a/plugin.video.alfa/servers/pcloud.json b/plugin.video.alfa/servers/pcloud.json deleted file mode 100755 index 88fd42e4..00000000 --- a/plugin.video.alfa/servers/pcloud.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "active": true, - "free": true, - "id": "pcloud", - "name": "pcloud", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/pcloud.py b/plugin.video.alfa/servers/pcloud.py deleted file mode 100755 index cb97aaff..00000000 --- a/plugin.video.alfa/servers/pcloud.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import scrapertools -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - - data = scrapertools.cache_page(page_url) - if "Invalid link" in data: return False, "[pCloud] El archivo no existe o ha sido borrado" - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) - - data = scrapertools.cache_page(page_url) - media_url = scrapertools.find_single_match(data, '"downloadlink":.*?"([^"]+)"') - media_url = media_url.replace("\\", "") - - video_urls = [] - video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [pCloud]", media_url]) - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - - return video_urls diff --git a/plugin.video.alfa/servers/stagevu.json b/plugin.video.alfa/servers/stagevu.json deleted file mode 100755 index 8040f2c2..00000000 --- a/plugin.video.alfa/servers/stagevu.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "(http://stagevu.com/video/[A-Z0-9a-z]+)", - "url": "\\1" - }, - { - "pattern": "http://stagevu.com.*?uid\\=([A-Z0-9a-z]+)", - "url": "http://stagevu.com/video/\\1" - }, - { - "pattern": "http://[^\\.]+\\.stagevu.com/v/[^/]+/(.*?).avi", - "url": "http://stagevu.com/video/\\1" - } - ] - }, - "free": true, - "id": "stagevu", - "name": "stagevu", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/stagevu.py b/plugin.video.alfa/servers/stagevu.py deleted file mode 100755 index 5b44e05d..00000000 --- a/plugin.video.alfa/servers/stagevu.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from core import scrapertools -from platformcode import logger - - -# Returns an array of possible video url's from the page_url -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("(page_url='%s')" % page_url) - - video_urls = [] - - # Descarga la página del vídeo - data = scrapertools.cache_page(page_url) - - # Busca el vídeo de dos formas distintas - patronvideos = '<param name="src" value="([^"]+)"' - matches = re.compile(patronvideos, re.DOTALL).findall(data) - - if len(matches) > 0: - video_urls = [["[stagevu]", matches[0]]] - else: - patronvideos = 'src="([^"]+stagevu.com/[^i][^"]+)"' # Forma src="XXXstagevu.com/ y algo distinto de i para evitar images e includes - matches = re.findall(patronvideos, data) - if len(matches) > 0: - video_urls = [["[stagevu]", matches[0]]] - - for video_url in video_urls: - logger.info("%s - %s" % (video_url[0], video_url[1])) - - return video_urls diff --git a/plugin.video.alfa/servers/stormo.json b/plugin.video.alfa/servers/stormo.json deleted file mode 100755 index e139a61c..00000000 --- a/plugin.video.alfa/servers/stormo.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "stormo.tv/(?:videos/|embed/)([0-9]+)", - "url": "http://stormo.tv/embed/\\1" - } - ] - }, - "free": true, - "id": "stormo", - "name": "stormo", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ], - "thumbnail": "http://i.imgur.com/mTYCw5E.png" -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/stormo.py b/plugin.video.alfa/servers/stormo.py deleted file mode 100755 index e2ad5511..00000000 --- a/plugin.video.alfa/servers/stormo.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import httptools -from core import scrapertools -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - - response = httptools.downloadpage(page_url) - if "video_error.mp4" in response.data: - return False, "[Stormo] El archivo no existe o ha sido borrado" - if response.code == 451: - return False, "[Stormo] El archivo ha sido borrado por problemas legales." - - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info(" url=" + page_url) - - video_urls = [] - data = httptools.downloadpage(page_url).data - media_url = scrapertools.find_single_match(data, "file\s*:\s*['\"]([^'\"]+)['\"]") - if media_url.endswith("/"): - media_url = media_url[:-1] - - video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [stormo]", media_url]) - for video_url in video_urls: - logger.info(" %s - %s" % (video_url[0], video_url[1])) - - return video_urls diff --git a/plugin.video.alfa/servers/turbovideos.json b/plugin.video.alfa/servers/turbovideos.json deleted file mode 100755 index ec36ef6d..00000000 --- a/plugin.video.alfa/servers/turbovideos.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "active": true, - "find_videos": { - "ignore_urls": [], - "patterns": [ - { - "pattern": "turbovideos.net/embed-([a-z0-9A-Z]+)", - "url": "http://turbovideos.net/embed-\\1.html" - } - ] - }, - "free": true, - "id": "turbovideos", - "name": "turbovideos", - "settings": [ - { - "default": false, - "enabled": true, - "id": "black_list", - "label": "Incluir en lista negra", - "type": "bool", - "visible": true - }, - { - "default": 0, - "enabled": true, - "id": "favorites_servers_list", - "label": "Incluir en lista de favoritos", - "lvalues": [ - "No", - "1", - "2", - "3", - "4", - "5" - ], - "type": "list", - "visible": false - } - ] -} \ No newline at end of file diff --git a/plugin.video.alfa/servers/turbovideos.py b/plugin.video.alfa/servers/turbovideos.py deleted file mode 100755 index 8a5b6ca6..00000000 --- a/plugin.video.alfa/servers/turbovideos.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- - -from core import scrapertools -from lib import jsunpack -from platformcode import logger - - -def test_video_exists(page_url): - logger.info("(page_url='%s')" % page_url) - return True, "" - - -def get_video_url(page_url, premium=False, user="", password="", video_password=""): - logger.info("url=" + page_url) - - if "embed" not in page_url: - page_url = page_url.replace("http://turbovideos.net/", "http://turbovideos.net/embed-") + ".html" - - data = scrapertools.cache_page(page_url) - logger.info("data=" + data) - - data = scrapertools.find_single_match(data, - "<script type='text/javascript'>(eval\(function\(p,a,c,k,e,d.*?)</script>") - logger.info("data=" + data) - - data = jsunpack.unpack(data) - logger.info("data=" + data) - - video_urls = [] - # {file:"http://ultra.turbovideos.net/73ciplxta26xsbj2bqtkqcd4rtyxhgx5s6fvyzed7ocf4go2lxjnd6e5kjza/v.mp4",label:"360" - media_urls = scrapertools.find_multiple_matches(data, 'file:"([^"]+)",label:"([^"]+)"') - for media_url, label in media_urls: - - if not media_url.endswith("srt"): - video_urls.append( - [scrapertools.get_filename_from_url(media_url)[-4:] + " " + label + " [turbovideos]", media_url]) - - return video_urls diff --git a/plugin.video.alfa/servers/vk.py b/plugin.video.alfa/servers/vk.py index d3e8cb8e..b5062745 100755 --- a/plugin.video.alfa/servers/vk.py +++ b/plugin.video.alfa/servers/vk.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +from core import httptools from core import scrapertools from platformcode import logger @@ -7,35 +8,28 @@ from platformcode import logger def test_video_exists(page_url): logger.info("(page_url='%s')" % page_url) - data = scrapertools.cache_page(page_url) + data = httptools.downloadpage(page_url).data - if "This video has been removed from public access" in data: + if "This video has been removed from public access" in data or "Video not found." in data: return False, "El archivo ya no esta disponible<br/>en VK (ha sido borrado)" - else: - return True, "" + return True, "" # Returns an array of possible video url's from the page_url def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("(page_url='%s')" % page_url) - video_urls = [] try: oid, id = scrapertools.find_single_match(page_url, 'oid=([^&]+)&id=(\d+)') except: oid, id = scrapertools.find_single_match(page_url, 'video(\d+)_(\d+)') - - from core import httptools headers = {'User-Agent': 'Mozilla/5.0'} url = "http://vk.com/al_video.php?act=show_inline&al=1&video=%s_%s" % (oid, id) data = httptools.downloadpage(url, headers=headers).data - matches = scrapertools.find_multiple_matches(data, '<source src="([^"]+)" type="video/(\w+)') for media_url, ext in matches: calidad = scrapertools.find_single_match(media_url, '(\d+)\.%s' % ext) video_urls.append(["." + ext + " [vk:" + calidad + "]", media_url]) - for video_url in video_urls: logger.info("%s - %s" % (video_url[0], video_url[1])) - return video_urls