# -*- coding: utf-8 -*- import re from channels import autoplay from channels import filtertools from core import httptools from core import scrapertools from core import servertools from core.item import Item from core import tmdb from platformcode import config, logger IDIOMAS = {'latino': 'Latino'} list_language = IDIOMAS.values() list_servers = ['openload', 'okru', 'myvideo', 'sendvid' ] list_quality = ['default'] host = 'http://www.seodiv.com' language = 'latino' def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [] itemlist.append( Item(channel=item.channel, title="Todos", action="todas", url=host, thumbnail='https://s27.postimg.cc/iahczwgrn/series.png', fanart='https://s27.postimg.cc/iahczwgrn/series.png', page=0 )) autoplay.show_option(item.channel, itemlist) return itemlist def todas(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r'"|\n|\r|\t| |
|\s{2,}', "", data) patron = '
.*?quality(.*?)<.*?Ver ' \ 'Serie>(.*?)<\/span>' matches = re.compile(patron, re.DOTALL).findall(data) # Paginacion num_items_x_pagina = 30 min = item.page * num_items_x_pagina min=int(min)-int(item.page) max = min + num_items_x_pagina - 1 for scrapedurl, scrapedthumbnail, scrapedcalidad, scrapedtitle in matches[min:max]: url = host + scrapedurl calidad = scrapedcalidad title = scrapedtitle.decode('utf-8') thumbnail = scrapedthumbnail fanart = 'https://s32.postimg.cc/gh8lhbkb9/seodiv.png' if not 'xxxxxx' in scrapedtitle: itemlist.append( Item(channel=item.channel, action="temporadas", title=title, url=url, thumbnail=thumbnail, fanart=fanart, contentSerieName=title, extra='', language=language, context=autoplay.context )) tmdb.set_infoLabels(itemlist) if len(itemlist)>28: itemlist.append( Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="todas", page=item.page + 1)) return itemlist def temporadas(item): logger.info() itemlist = [] data = get_source(item.url) url_base = item.url patron = '
  • (.*?) <\/a>' matches = re.compile(patron, re.DOTALL).findall(data) temp = 1 if matches: for scrapedtitle in matches: url = url_base tempo = re.findall(r'\d+', scrapedtitle) # if tempo: # title = 'Temporada' + ' ' + tempo[0] # else: title = scrapedtitle thumbnail = item.thumbnail plot = item.plot fanart = scrapertools.find_single_match(data, '.*?') itemlist.append( Item(channel=item.channel, action="episodiosxtemp", title=title, fulltitle=item.title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, temp=str(temp), contentSerieName=item.contentSerieName, context=item.context )) temp = temp + 1 if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1=item.extra1, temp=str(temp) )) return itemlist else: itemlist = episodiosxtemp(item) if config.get_videolibrary_support() and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', url=item.url, action="add_serie_to_library", extra="episodios", contentSerieName=item.contentSerieName, extra1=item.extra1, temp=str(temp) )) return itemlist def episodios(item): logger.info() itemlist = [] templist = temporadas(item) for tempitem in templist: itemlist += episodiosxtemp(tempitem) return itemlist def get_source(url): logger.info() data = httptools.downloadpage(url, add_referer=True).data data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) return data def episodiosxtemp(item): logger.info() logger.info() itemlist = [] item.title = 'Temporada %s' % item.temp.zfill(2) patron_temp = '
  • %s <\/a>