From 1e2d7fbca764469d38e5a230224a6b640f0d2d94 Mon Sep 17 00:00:00 2001
From: Unknown
", "", data)
+
+ folder = filetools.join(config.get_data_path(), 'thumbs_disko')
+ patron = 'data-file-id(.*?)
([^<]+)<')
+ except:
+ pass
+ else:
+ new_item.folderurl = item.url.rsplit("/", 1)[0]
+ new_item.foldername = item.foldername
+ new_item.fanart = item.thumbnail
+ itemlist.append(new_item)
+ next_page = scrapertools.find_single_match(data, 'class="pageSplitter.*?" data-nextpage-number="([^"]+)"')
+ if next_page:
+ if item.post:
+ post = re.sub(r'pageNumber=(\d+)', "pageNumber=" + next_page, item.post)
+ url = item.url
+ else:
+ url = re.sub(r',\d+\?ref=pager', ",%s?ref=pager" % next_page, item.url)
+ post = ""
+ itemlist.append(Item(channel=item.channel, action="listado", title=">> Página Siguiente (%s)" % next_page,
+ url=url, post=post, extra=item.extra))
+ return itemlist
+
+
+def findvideos(item):
+ logger.info()
+ itemlist = []
+ itemlist.append(item.clone(action="play", title="Reproducir/Descargar", server="diskokosmiko"))
+ usuario = scrapertools.find_single_match(item.url, '%s/([^/]+)/' % item.extra)
+ url_usuario = item.extra + "/" + usuario
+ if item.folderurl and not item.folderurl.startswith(item.extra):
+ item.folderurl = item.extra + item.folderurl
+ if item.post:
+ itemlist.append(item.clone(action="listado", title="Ver colección: %s" % item.foldername,
+ url=item.folderurl + "/gallery,1,1?ref=pager", post=""))
+ data = httptools.downloadpage(item.folderurl).data
+ token = scrapertools.find_single_match(data,
+ 'data-action="followChanged.*?name="__RequestVerificationToken".*?value="([^"]+)"')
+ collection_id = item.folderurl.rsplit("-", 1)[1]
+ post = "__RequestVerificationToken=%s&collectionId=%s" % (token, collection_id)
+ url = "%s/action/Follow/Follow" % item.extra
+ title = "Seguir Colección: %s" % item.foldername
+ if "dejar de seguir" in data:
+ title = "Dejar de seguir la colección: %s" % item.foldername
+ url = "%s/action/Follow/UnFollow" % item.extra
+ itemlist.append(item.clone(action="seguir", title=title, url=url, post=post, text_color=color5, folder=False))
+ itemlist.append(
+ item.clone(action="colecciones", title="Ver colecciones del usuario: %s" % usuario, url=url_usuario))
+ return itemlist
+
+
+def colecciones(item):
+ logger.info()
+ itemlist = []
+ usuario = False
+ data = httptools.downloadpage(item.url).data
+ if "Ver colecciones del usuario" not in item.title and not item.index:
+ data = jsontools.load(data)["Data"]
+ content = data["Content"]
+ content = re.sub(r"\n|\r|\t|\s{2}| | (.*?)
", "", content)
+ else:
+ usuario = True
+ if item.follow:
+ content = scrapertools.find_single_match(data,
+ 'id="followed_collections"(.*?)
|\s{2,}', "", data)
+ return data
+
+
+def categories(item):
+ logger.info()
+ itemlist = []
+
+ data = get_source(item.url)
+ if item.title == 'Generos':
+ patron = 'menu-item-object-category menu-item-\d+">([^<]+)<'
+ else:
+ patron = 'menu-item-object-release-year menu-item-\d+">([^<]+)<'
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for url, title in matches:
+ itemlist.append(Item(channel=item.channel,
+ action="list_all",
+ title=title,
+ url=url
+ ))
+
+ return itemlist
+
+
+def search(item, texto):
+ logger.info()
+ texto = texto.replace(" ", "+")
+ try:
+ if texto != '':
+ item.texto = texto
+ return list_all(item)
+ else:
+ return []
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+
+def list_all(item):
+ logger.info()
+ itemlist = []
+
+ if item.texto != '':
+ url = item.url + "?s=%s" % item.texto
+ else:
+ url = item.url
+
+ try:
+ data = get_source(url)
+ except:
+ return itemlist
+ data = data.replace("'", '"')
+
+ pattern = 'class="ml-item.*?">'
+ matches = scrapertools.find_multiple_matches(data, pattern)
+
+ for url, title, thumb, info in matches:
+ year = scrapertools.find_single_match(info, 'rel="tag">(\d{4})<')
+ new_item = Item(channel=item.channel,
+ title=title,
+ url=url,
+ thumbnail=thumb,
+ infoLabels = {'year': year}
+ )
+ if 'series' in url:
+ new_item.action = 'seasons'
+ new_item.contentSerieName = title
+ else:
+ new_item.action = 'findvideos'
+ new_item.contentTitle = title
+
+ itemlist.append(new_item)
+
+ tmdb.set_infoLabels(itemlist, seekTmdb=True)
+
+ active_page = scrapertools.find_single_match(data, '