# -*- coding: utf-8 -*-
import re
import urllib
import urlparse
from channels import autoplay
from channels import filtertools
from core import httptools
from core import jsontools
from core import scrapertools
from core import servertools
from core import tmdb
from core.item import Item
from platformcode import config, logger
from channelselector import get_thumb
IDIOMAS = {'Latino': 'Latino'}
list_language = IDIOMAS.values()
list_quality = []
list_servers = ['dostream', 'openload']
host = 'http://doomtv.net/'
def mainlist(item):
logger.info()
autoplay.init(item.channel, list_servers, list_quality)
itemlist = []
itemlist.append(
item.clone(title="Todas",
action="lista",
thumbnail=get_thumb('all', auto=True),
fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png',
url='%s%s'%(host,'peliculas/'),
first=0
))
itemlist.append(
item.clone(title="Generos",
action="seccion",
thumbnail=get_thumb('genres', auto=True),
fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
url='%s%s' % (host, 'peliculas/'),
))
itemlist.append(
item.clone(title="Mas Vistas",
action="lista",
thumbnail=get_thumb('more watched', auto=True),
fanart='https://s9.postimg.cc/wmhzu9d7z/vistas.png',
url='%s%s'%(host,'top-imdb/'),
first=0
))
itemlist.append(
item.clone(title="Buscar",
action="search",
url='http://doomtv.net/?s=',
thumbnail=get_thumb('search', auto=True),
fanart='https://s30.postimg.cc/pei7txpa9/buscar.png'
))
autoplay.show_option(item.channel, itemlist)
return itemlist
def get_source(url, referer=None):
logger.info()
if referer is None:
data = httptools.downloadpage(url).data
else:
data = httptools.downloadpage(url, headers={'Referer':referer}).data
data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data)
return data
def lista(item):
logger.info()
itemlist = []
next = False
data = get_source(item.url)
patron = 'movie-id=.*?href="([^"]+)" data-url.*?quality">([^<]+)<.*?img data-original="([^"]+)" class.*?'
patron += '
([^<]+)<\/p>' matches = re.compile(patron, re.DOTALL).findall(data) first = item.first last = first + 19 if last > len(matches): last = len(matches) next = True for scrapedurl, quality, scrapedthumbnail, scrapedtitle, plot in matches[first:last]: url = host+scrapedurl thumbnail = 'https:'+scrapedthumbnail.strip() filtro_thumb = thumbnail.replace("https://image.tmdb.org/t/p/w185", "") filtro_list = {"poster_path": filtro_thumb.strip()} filtro_list = filtro_list.items() title = scrapedtitle fanart = '' plot = plot itemlist.append( Item(channel=item.channel, action='findvideos', title=title, url=url, thumbnail=thumbnail, plot=plot, infoLabels={'filtro': filtro_list}, fanart=fanart, contentTitle=title )) tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) if not next: url_next_page = item.url first = last else: url_next_page = scrapertools.find_single_match(data, "