# -*- coding: utf-8 -*- # -*- Channel TVSeriesdk -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- import re from channelselector import get_thumb from core import httptools from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger host = 'http://www.tvseriesdk.com/' def mainlist(item): logger.info() itemlist = list() itemlist.append(item.clone(title="Ultimos", action="last_episodes", url=host)) itemlist.append(item.clone(title="Todas", action="list_all", url=host)) itemlist.append(item.clone(title="Buscar", action="search", url='http://www.tvseriesdk.com/index.php?s=')) return itemlist def get_source(url): logger.info() data = httptools.downloadpage(url).data data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) return data def list_all(item): logger.info() global i templist = [] data = get_source(item.url) patron = '
  • (.*?)<\/a>' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) > 10: if item.next_page != 10: url_next_page = item.url matches = matches[:10] next_page = 10 item.i = 0 else: patron = matches[item.i:][:10] next_page = 10 url_next_page = item.url for scrapedurl, scrapedplot, scrapedtitle in matches: url = scrapedurl plot = scrapedplot contentSerieName = scrapedtitle title = contentSerieName templist.append(item.clone(action='episodios', title=title, url=url, thumbnail='', plot=plot, contentErieName=contentSerieName )) itemlist = serie_thumb(templist) # Paginación if url_next_page: itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, next_page=next_page, i=item.i)) return itemlist def last_episodes(item): logger.info() itemlist = [] data = get_source(item.url) patron = '
    .*?
    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        url = scrapedurl
        title = scrapedtitle
        thumbnail = scrapedthumbnail

        itemlist.append(item.clone(action=)
    return itemlist


def search_list(item):
    logger.info()
    itemlist = []

    data = get_source(item.url)
    patron = (.*?)<\/a>' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumb, scrapedurl, scrapedtitle in matches: title = scrapedtitle url = scrapedurl thumbnail = scrapedthumb itemlist.append(item.clone(title=title, url=url, thumbnail=thumbnail, action='findvideos')) # Pagination < link next_page = scrapertools.find_single_match(data, '') if next_page: itemlist.append(Item(channel=item.channel, action="search_list", title='>> Pagina Siguiente', url=next_page, thumbnail = get_thumb('thumb_next.png'))) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") item.url = item.url + texto if texto != '': return search_list(item) else: return [] def findvideos(item): logger.info() itemlist = [] servers = {'netu': 'http://hqq.tv/player/embed_player.php?vid=', 'open': 'https://openload.co/embed/', 'netv': 'http://goo.gl/', 'gamo': 'http://gamovideo.com/embed-', 'powvideo': 'http://powvideo.net/embed-', 'play': 'http://streamplay.to/embed-', 'vido': 'http://vidoza.net/embed-', 'net': 'http://hqq.tv/player/embed_player.php?vid=' } data = get_source(item.url) noemitido = scrapertools.find_single_match(data, '

    <\/p>') patron = 'id=tab\d+.*?class=tab_content>