diff --git a/plugin.video.alfa/channels/divxtotal.py b/plugin.video.alfa/channels/divxtotal.py index 0163a61b..0525ac09 100644 --- a/plugin.video.alfa/channels/divxtotal.py +++ b/plugin.video.alfa/channels/divxtotal.py @@ -3,83 +3,30 @@ import os import re import urllib -from threading import Thread -import xbmc -import xbmcgui from core import httptools from core import scrapertools from core import tmdb from core.item import Item -from core.scrapertools import decodeHtmlentities as dhe from platformcode import config, logger header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'} - -ACTION_SHOW_FULLSCREEN = 36 -ACTION_GESTURE_SWIPE_LEFT = 511 -ACTION_SELECT_ITEM = 7 -ACTION_PREVIOUS_MENU = 10 -ACTION_MOVE_LEFT = 1 -ACTION_MOVE_RIGHT = 2 -ACTION_MOVE_DOWN = 4 -ACTION_MOVE_UP = 3 -OPTION_PANEL = 6 -OPTIONS_OK = 5 +host = "http://www.divxtotal.co" __modo_grafico__ = config.get_setting('modo_grafico', "divxtotal") -# Para la busqueda en bing evitando baneos - -def browser(url): - import mechanize - - # Utilizamos Browser mechanize para saltar problemas con la busqueda en bing - br = mechanize.Browser() - # Browser options - br.set_handle_equiv(False) - br.set_handle_gzip(True) - br.set_handle_redirect(True) - br.set_handle_referer(False) - br.set_handle_robots(False) - # Follows refresh 0 but not hangs on refresh > 0 - br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1) - # Want debugging messages? - # br.set_debug_http(True) - # br.set_debug_redirects(True) - # br.set_debug_responses(True) - - # User-Agent (this is cheating, ok?) - # br.addheaders = [('User-agent', 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/7.1.7 Safari/537.85.16')] - # br.addheaders =[('Cookie','SRCHD=AF=QBRE; domain=.bing.com; expires=25 de febrero de 2018 13:00:28 GMT+1; MUIDB=3B942052D204686335322894D3086911; domain=www.bing.com;expires=24 de febrero de 2018 13:00:28 GMT+1')] - # Open some site, let's pick a random one, the first that pops in mind - r = br.open(url) - response = r.read() - print response - if "img,divreturn" in response: - r = br.open("http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url) - print "prooooxy" - response = r.read() - - return response - - -api_key = "2e2160006592024ba87ccdf78c28f49f" -api_fankey = "dffe90fba4d02c199ae7a9e71330c987" - - def mainlist(item): logger.info() itemlist = [] itemlist.append(item.clone(title="[COLOR orange][B]Películas[/B][/COLOR]", action="scraper", - url="http://www.divxtotal.com/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png", + url = host + "/peliculas/", thumbnail="http://imgur.com/A4zN3OP.png", fanart="http://imgur.com/fdntKsy.jpg", contentType="movie")) itemlist.append(item.clone(title="[COLOR orange][B] Películas HD[/B][/COLOR]", action="scraper", - url="http://www.divxtotal.com/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png", + url = host + "/peliculas-hd/", thumbnail="http://imgur.com/A4zN3OP.png", fanart="http://imgur.com/fdntKsy.jpg", contentType="movie")) itemlist.append(itemlist[-1].clone(title="[COLOR orange][B]Series[/B][/COLOR]", action="scraper", - url="http://www.divxtotal.com/series/", thumbnail="http://imgur.com/GPX2wLt.png", + url = host + "/series/", thumbnail="http://imgur.com/GPX2wLt.png", contentType="tvshow")) itemlist.append(itemlist[-1].clone(title="[COLOR orangered][B]Buscar[/B][/COLOR]", action="search", @@ -90,7 +37,7 @@ def mainlist(item): def search(item, texto): logger.info() texto = texto.replace(" ", "+") - item.url = "http://www.divxtotal.com/?s=" + texto + item.url = host + "/?s=" + texto item.extra = "search" try: return buscador(item) @@ -106,22 +53,16 @@ def buscador(item): itemlist = [] data = httptools.downloadpage(item.url, headers=header, cookies=False).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - - patron = scrapertools.find_multiple_matches(data, - '
([^"]+)')
-
- for url, thumb, title in patron:
+ patron = '(?s)
([^<]+)<'
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for url, thumb, title in matches:
titulo = title.strip()
title = re.sub(r"\d+x.*|\(.*?\)", "", title)
new_item = item.clone(action="findvideos", title="[COLOR orange]" + titulo + "[/COLOR]", url=url,
@@ -193,7 +125,6 @@ def scraper(item):
fulltitle=title, contentTitle=title, show=title, contentType="tvshow", library=True)
new_item.infoLabels['year'] = get_year(url)
itemlist.append(new_item)
-
## Paginación
next = scrapertools.find_single_match(data, "
.*?\(current\).*?href='([^']+)'")
if len(next) > 0:
@@ -215,21 +146,14 @@ def scraper(item):
except:
pass
-
for item_tmdb in itemlist:
logger.info(str(item_tmdb.infoLabels['tmdb_id']))
-
return itemlist
def findtemporadas(item):
logger.info()
itemlist = []
-
- if item.extra == "search":
- th = Thread(target=get_art(item))
- th.setDaemon(True)
- th.start()
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
if len(item.extra.split("|")):
@@ -264,8 +188,7 @@ def findtemporadas(item):
except:
fanart_extra = item.fanart
fanart_info = item.fanart
-
- bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada.*?(\d+).*?<\/a>(.*?)<\/table>')
+ bloque_episodios = scrapertools.find_multiple_matches(data, 'Temporada (\d+).*?<\/a>(.*?)<\/table>')
for temporada, bloque_epis in bloque_episodios:
item.infoLabels = item.InfoLabels
item.infoLabels['season'] = temporada
@@ -298,9 +221,9 @@ def epis(item):
itemlist = []
if item.extra == "serie_add":
item.url = item.datalibrary
- patron = scrapertools.find_multiple_matches(item.url,
- '
.*?(\d+x\d+).*?td>')
- for idioma, url, epi in patron:
+ patron = '
.*?(\d+x\d+).*?td>'
+ matches = scrapertools.find_multiple_matches(item.url, patron)
+ for idioma, url, epi in matches:
episodio = scrapertools.find_single_match(epi, '\d+x(\d+)')
item.infoLabels['episode'] = episodio
itemlist.append(
@@ -320,19 +243,11 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
-
- if not item.infoLabels['episode']:
- th = Thread(target=get_art(item))
- th.setDaemon(True)
- th.start()
-
if item.contentType != "movie":
-
if not item.infoLabels['episode']:
capitulo = scrapertools.find_single_match(item.title, '(\d+x\d+)')
- url_capitulo = scrapertools.find_single_match(data,
- '= 2:
extra = item.extra
else:
@@ -350,7 +265,6 @@ def findvideos(item):
title="[COLOR chocolate][B]Ver capítulo " + capitulo + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki] " + size + " )" + "[/COLOR]",
url=url_capitulo, action="play", server="torrent", fanart=fanart, thumbnail=item.thumbnail,
extra=item.extra, fulltitle=item.fulltitle, folder=False))
-
if item.infoLabels['episode'] and item.library:
thumbnail = scrapertools.find_single_match(item.extra, 'http://assets.fanart.tv/.*jpg')
if thumbnail == "":
@@ -363,15 +277,13 @@ def findvideos(item):
action="info_capitulos", fanart=fanart, thumbnail=item.thumb_art,
thumb_info=item.thumb_info, extra=item.extra, show=item.show,
InfoLabels=item.infoLabels, folder=False))
-
if not item.infoLabels['episode']:
itemlist.append(
Item(channel=item.channel, title="[COLOR moccasin][B]Todos los episodios[/B][/COLOR]", url=item.url,
- action="findtemporadas", server="torrent", fanart=item.extra.split("|")[1],
+ action="findtemporadas", server="torrent",
thumbnail=item.thumbnail, extra=item.extra + "|" + item.thumbnail, contentType=item.contentType,
contentTitle=item.contentTitle, InfoLabels=item.infoLabels, thumb_art=item.thumb_art,
thumb_info=item.thumbnail, fulltitle=item.fulltitle, library=item.library, folder=True))
-
else:
url = scrapertools.find_single_match(data, '.*?href="([^"]+)"')
item.infoLabels['year'] = None
@@ -388,7 +300,6 @@ def findvideos(item):
action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels,
text_color="0xFFe5ffcc",
thumbnail='http://imgur.com/xQNTqqy.png'))
-
return itemlist
@@ -401,7 +312,6 @@ def info_capitulos(item, images={}):
url = url.replace("/0", "/")
from core import jsontools
data = httptools.downloadpage(url).data
-
if "
(
(\s*
(
((.*?)')
- sinopsis_f = sinopsis_f.replace("
", "\n")
- sinopsis_f = re.sub(r"\(FILMAFFINITY\)
", "", sinopsis_f)
- try:
- year_f = scrapertools.get_match(data, '