# -*- coding: utf-8 -*- import re from core import scrapertools from core import servertools from core.item import Item from core.scrapertools import decodeHtmlentities as dhe from platformcode import logger from platformcode import config from core import tmdb try: import xbmc import xbmcgui except: pass import unicodedata ACTION_SHOW_FULLSCREEN = 36 ACTION_GESTURE_SWIPE_LEFT = 511 ACTION_SELECT_ITEM = 7 ACTION_PREVIOUS_MENU = 10 ACTION_MOVE_LEFT = 1 ACTION_MOVE_RIGHT = 2 ACTION_MOVE_DOWN = 4 ACTION_MOVE_UP = 3 OPTION_PANEL = 6 OPTIONS_OK = 5 host = "http://www.peliculasdk.com/" def bbcode_kodi2html(text): if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): import re text = re.sub(r'\[COLOR\s([^\]]+)\]', r'', text) text = text.replace('[/COLOR]', '') text = text.replace('[CR]', '
') text = text.replace('[B]', '') text = text.replace('[/B]', '') text = text.replace('"color: yellow"', '"color: gold"') text = text.replace('"color: white"', '"color: auto"') return text def mainlist(item): logger.info() itemlist = [] title = "Estrenos" title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) itemlist.append( Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/ver/estrenos", fanart="http://s24.postimg.org/z6ulldcph/pdkesfan.jpg", thumbnail="http://s16.postimg.org/st4x601d1/pdkesth.jpg")) title = "PelisHd" title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) itemlist.append( Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-720/", fanart="http://s18.postimg.org/wzqonq3w9/pdkhdfan.jpg", thumbnail="http://s8.postimg.org/nn5669ln9/pdkhdthu.jpg")) title = "Pelis HD-Rip" title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) itemlist.append( Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/calidad/HD-320", fanart="http://s7.postimg.org/3pmnrnu7f/pdkripfan.jpg", thumbnail="http://s12.postimg.org/r7re8fie5/pdkhdripthub.jpg")) title = "Pelis Audio español" title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) itemlist.append( Item(channel=item.channel, title=title, action="peliculas", url="http://www.peliculasdk.com/idioma/Espanol/", fanart="http://s11.postimg.org/65t7bxlzn/pdkespfan.jpg", thumbnail="http://s13.postimg.org/sh1034ign/pdkhsphtub.jpg")) title = "Buscar..." title = title.replace(title, bbcode_kodi2html("[COLOR orange]" + title + "[/COLOR]")) itemlist.append( Item(channel=item.channel, title=title, action="search", url="http://www.peliculasdk.com/calidad/HD-720/", fanart="http://s14.postimg.org/ceqajaw2p/pdkbusfan.jpg", thumbnail="http://s13.postimg.org/o85gsftyv/pdkbusthub.jpg")) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = "http://www.peliculasdk.com/index.php?s=%s&x=0&y=0" % (texto) try: return buscador(item) # Se captura la excepciÛn, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def buscador(item): logger.info() itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) patron = '
||", "", scrapedcalidad).strip() scrapedlenguaje = re.sub(r"||", "", scrapedlenguaje).strip() if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad: scrapedcalidad = scrapedcalidad.replace(scrapedcalidad, bbcode_kodi2html("[COLOR orange]" + scrapedcalidad + "[/COLOR]")) scrapedlenguaje = scrapedlenguaje.replace(scrapedlenguaje, bbcode_kodi2html("[COLOR orange]" + scrapedlenguaje + "[/COLOR]")) scrapedtitle = scrapedtitle + "-(Idioma: " + scrapedlenguaje + ")" + "-(Calidad: " + scrapedcalidad + ")" scrapedtitle = scrapedtitle.replace(scrapedtitle, bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]")) extra = year + "|" + title_fan itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, extra=extra, fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True)) try: next_page = scrapertools.get_match(data, '.*?Siguiente »
') title = "siguiente>>" title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]")) itemlist.append(Item(channel=item.channel, action="buscador", title=title, url=next_page, thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png", fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True)) except: pass return itemlist def peliculas(item): logger.info() itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| |&#.*?;", "", data) patron = 'style="position:relative;"> ' patron += '|", "", scrapedcalidad).strip() scrapedlenguaje = re.sub(r"|", "", scrapedlenguaje).strip() scrapedlenguaje = scrapedlenguaje.split(',') if not "Adultos" in scrapedgenero and not "Adultos" in scrapedlenguaje and not "Adultos" in scrapedcalidad: scrapedtitle = scrapedtitle extra = year + "|" + title_fan new_item = Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="fanart", thumbnail=scrapedthumbnail, extra=extra, fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", library=True, folder=True, language=scrapedlenguaje, quality=scrapedcalidad, contentTitle= scrapedtitle, infoLabels={ 'year':year}) #TODO Dividir los resultados antes #if year: # tmdb.set_infoLabels_item(new_item) itemlist.append(new_item) ## Paginación next_page = scrapertools.get_match(data, '.*?Siguiente »') title = "siguiente>>" title = title.replace(title, bbcode_kodi2html("[COLOR red]" + title + "[/COLOR]")) itemlist.append(Item(channel=item.channel, action="peliculas", title=title, url=next_page, thumbnail="http://s6.postimg.org/uej03x4r5/bricoflecha.png", fanart="http://s18.postimg.org/h9kb22mnt/pdkfanart.jpg", folder=True)) return itemlist def fanart(item): logger.info() itemlist = [] url = item.url data = scrapertools.cachePage(url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) title_fan = item.extra.split("|")[1] title = re.sub(r'Serie Completa|Temporada.*?Completa', '', title_fan) fulltitle = title title = title.replace(' ', '%20') title = ''.join( (c for c in unicodedata.normalize('NFD', unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn')) try: sinopsis = scrapertools.find_single_match(data, 'Sinopsis: <\/span>(.*?)<\/div>') except: sinopsis = "" year = item.extra.split("|")[0] if not "series" in item.url: # filmafinity url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format( title, year) data = scrapertools.downloadpage(url) url_filmaf = scrapertools.find_single_match(data, '
\s*

(

((.*?)') sinopsis = sinopsis.replace("

", "\n") sinopsis = re.sub(r"\(FILMAFFINITY\)
", "", sinopsis) except: pass try: rating_filma = scrapertools.get_match(data, 'itemprop="ratingValue" content="(.*?)">') except: rating_filma = "Sin puntuacion" critica = "" patron = '
(.*?)
.*?itemprop="author">(.*?)\s*", "", data) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) bloque_tab = scrapertools.find_single_match(data, '
') plotformat = re.compile('(.*?:) ', re.DOTALL).findall(scrapedplot) scrapedplot = scrapedplot.replace(scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]")) for plot in plotformat: scrapedplot = scrapedplot.replace(plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]")) scrapedplot = scrapedplot.replace("", "[CR]") scrapedplot = scrapedplot.replace(":", "") if check_tab in str(check): idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'") servers_data_list.append([server, id, idioma, calidad]) url = "http://www.peliculasdk.com/Js/videod.js" data = scrapertools.cachePage(url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data = data.replace('