From 0e20aaf456e571b025ee2328c8c6793affbeaca6 Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Wed, 27 Mar 2019 15:28:28 -0300 Subject: [PATCH 1/4] Correcciones y novedades --- plugin.video.alfa/channels/abtoon.py | 112 ++++--- plugin.video.alfa/channels/homecine.json | 70 +++++ plugin.video.alfa/channels/homecine.py | 358 +++++++++++++++++++++++ plugin.video.alfa/channels/mixtoon.py | 6 +- 4 files changed, 507 insertions(+), 39 deletions(-) create mode 100644 plugin.video.alfa/channels/homecine.json create mode 100644 plugin.video.alfa/channels/homecine.py diff --git a/plugin.video.alfa/channels/abtoon.py b/plugin.video.alfa/channels/abtoon.py index 5d7653e5..ef006d94 100644 --- a/plugin.video.alfa/channels/abtoon.py +++ b/plugin.video.alfa/channels/abtoon.py @@ -32,11 +32,23 @@ def mainlist(item): itemlist = list() itemlist.append( - Item(channel=item.channel, action="lista", title="Series", contentSerieName="Series", url=host, thumbnail=thumb_series, page=0)) - #itemlist.append( - # Item(channel=item.channel, action="lista", title="Live Action", contentSerieName="Live Action", url=host+"/liveaction", thumbnail=thumb_series, page=0)) - #itemlist.append( - # Item(channel=item.channel, action="peliculas", title="Películas", contentSerieName="Películas", url=host+"/peliculas", thumbnail=thumb_series, page=0)) + Item(channel=item.channel, action="lista", title="Series Actuales", url=host+'/p/actuales', + thumbnail=thumb_series)) + + itemlist.append( + Item(channel=item.channel, action="lista", title="Series Clasicas", url=host+'/p/clasicas', + thumbnail=thumb_series)) + + itemlist.append( + Item(channel=item.channel, action="lista", title="Series Anime", url=host + '/p/anime', + thumbnail=thumb_series)) + + itemlist.append( + Item(channel=item.channel, action="lista", title="Series Live Action", url=host + '/p/live-action', + thumbnail=thumb_series)) + itemlist.append( + Item(channel=item.channel, action="search", title="Buscar", thumbnail='')) + itemlist = renumbertools.show_option(item.channel, itemlist) autoplay.show_option(item.channel, itemlist) return itemlist @@ -47,29 +59,15 @@ def lista(item): itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) - patron = '(.*?)
') + patron = '' + matches = scrapertools.find_multiple_matches(data, patron) - # Paginacion - num_items_x_pagina = 30 - min = item.page * num_items_x_pagina - min=min-item.page - max = min + num_items_x_pagina - 1 - b=0 - for link, img, name in matches[min:max]: - b=b+1 + + for link, img, name in matches: if " y " in name: title=name.replace(" y "," & ") else: @@ -80,17 +78,15 @@ def lista(item): context2 = autoplay.context context.extend(context2) - itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title, - context=context)) - if b<29: - a=a+1 - url=host+"/p/pag-"+str(a) - if b>10: - itemlist.append( - Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0)) - else: - itemlist.append( - Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1)) + itemlist.append(Item(channel=item.channel, title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, + contentSerieName=title, context=context)) + + # Paginacion + + next_page = scrapertools.find_single_match(full_data, '\d+\d+') + if next_page != '': + itemlist.append(Item(channel=item.channel, contentSerieName=item.contentSerieName, + title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=host+next_page, action="lista")) tmdb.set_infoLabels(itemlist) return itemlist @@ -211,6 +207,48 @@ def findvideos(item): return itemlist +def search_results(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url, post=item.post).data + if len(data) > 0: + results = eval(data) + else: + return itemlist + + for result in results: + try: + thumbnail = host + "/tb/%s.jpg" % result[0] + title = u'%s' % result[1] + logger.debug(title) + url = host + "/s/%s" % result[2] + itemlist.append(Item(channel=item.channel, thumbnail=thumbnail, title=title, url=url, contentSerieName=title, + action='episodios')) + except: + pass + + tmdb.set_infoLabels(itemlist, seekTmdb=True) + return itemlist + +def search(item, texto): + logger.info() + import urllib + + if texto != "": + texto = texto.replace(" ", "+") + item.url = host+"/b.php" + post = {'k':texto, "pe":"", "te":""} + item.post = urllib.urlencode(post) + + try: + return search_results(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + def golink(ida,sl): a=ida b=[3,10,5,22,31] diff --git a/plugin.video.alfa/channels/homecine.json b/plugin.video.alfa/channels/homecine.json new file mode 100644 index 00000000..81bef526 --- /dev/null +++ b/plugin.video.alfa/channels/homecine.json @@ -0,0 +1,70 @@ +{ + "id": "homecine", + "name": "HomeCine", + "active": true, + "adult": false, + "language": ["lat","cast"], + "thumbnail": "https://homecine.net/wp-content/uploads/2018/05/homedark-1-3.png", + "banner": "", + "version": 1, + "categories": [ + "movie", + "direct" + ], + "settings": [ + { + "id": "include_in_global_search", + "type": "bool", + "label": "Incluir en busqueda global", + "default": false, + "enabled": false, + "visible": false + }, + { + "id": "filter_languages", + "type": "list", + "label": "Mostrar enlaces en idioma...", + "default": 0, + "enabled": true, + "visible": true, + "lvalues": [ + "No filtrar", + "LAT", + "CAST", + "VOSE" + ] + }, + { + "id": "include_in_newest_peliculas", + "type": "bool", + "label": "Incluir en Novedades - Peliculas", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_infantiles", + "type": "bool", + "label": "Incluir en Novedades - Infantiles", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_terror", + "type": "bool", + "label": "Incluir en Novedades - terror", + "default": true, + "enabled": true, + "visible": true + }, + { + "id": "include_in_newest_latino", + "type": "bool", + "label": "Incluir en Novedades - Latino", + "default": true, + "enabled": true, + "visible": true + } + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/homecine.py b/plugin.video.alfa/channels/homecine.py new file mode 100644 index 00000000..b6fd9274 --- /dev/null +++ b/plugin.video.alfa/channels/homecine.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- + +import re +import urllib +import urlparse + +from channels import autoplay +from channels import filtertools +from core import httptools +from core import jsontools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger +from channelselector import get_thumb + +IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'} +list_language = IDIOMAS.values() +list_quality = ['HD 720p', 'HD 1080p', '480p', '360p'] +list_servers = ['cinemaupload'] + +host = 'https://homecine.net' + + +def mainlist(item): + logger.info() + + autoplay.init(item.channel, list_servers, list_quality) + + itemlist = [] + + itemlist.append(Item(channel=item.channel, title="Ultimas", + action="list_all", + thumbnail=get_thumb('last', auto=True), + url='%s%s' % (host, '/release-year/2019'), + first=0 + )) + + itemlist.append(Item(channel=item.channel,title="Películas", + action="sub_menu", + thumbnail=get_thumb('movies', auto=True), + )) + + itemlist.append(Item(channel=item.channel,title="Series", + action="list_all", + thumbnail=get_thumb('tvshows', auto=True), + url='%s%s'%(host,'/series/'), + first=0 + )) + + itemlist.append(Item(channel=item.channel, title="Documentales", + action="list_all", + thumbnail=get_thumb('documentaries', auto=True), + url='%s%s' % (host, '/documentales/'), + first=0 + )) + + itemlist.append(Item(channel=item.channel,title="Buscar", + action="search", + url=host+'/?s=', + thumbnail=get_thumb('search', auto=True), + )) + + autoplay.show_option(item.channel, itemlist) + + return itemlist + +def sub_menu(item): + logger.info() + + itemlist = [] + + + + itemlist.append(Item(channel=item.channel,title="Todas", + action="list_all", + thumbnail=get_thumb('all', auto=True), + url='%s%s' % (host, '/peliculas/'), + first=0 + )) + + itemlist.append(Item(channel=item.channel, title="Mas vistas", + action="list_all", + thumbnail=get_thumb('more watched', auto=True), + url='%s%s' % (host, '/most-viewed/'), + first=0 + )) + + itemlist.append(Item(channel=item.channel,title="Generos", + action="seccion", + thumbnail=get_thumb('genres', auto=True), + fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png', + url=host, + )) + + return itemlist + +def get_source(url, referer=None): + logger.info() + if referer is None: + data = httptools.downloadpage(url).data + else: + data = httptools.downloadpage(url, headers={'Referer':referer}).data + data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data) + return data + +def list_all(item): + logger.info() + + itemlist = [] + next = False + + data = get_source(item.url) + patron = 'movie-id="\d+".*?([^<]+).*?jtip(.*?)clearfix' + + matches = re.compile(patron, re.DOTALL).findall(data) + + first = item.first + last = first + 19 + if last > len(matches): + last = len(matches) + next = True + + for scrapedurl, scrapedthumbnail, scrapedtitle, extra_info in matches[first:last]: + + year = scrapertools.find_single_match(extra_info, '"tag">(\d{4})<') + url = host+scrapedurl + thumbnail = host+scrapedthumbnail.strip() + title = scrapedtitle + new_item = Item(channel=item.channel, + title=title, + url=url, + thumbnail=thumbnail, + infoLabels = {'year': year} + ) + if 'series' in scrapedurl: + new_item.action = 'seasons' + new_item.contentSerieName = title + else: + new_item.action = 'findvideos' + new_item.contentTitle = title + + + + itemlist.append(new_item) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + + if not next: + url_next_page = item.url + first = last + else: + url_next_page = scrapertools.find_single_match(data, "
  • .*?class='page larger' href='([^']+)'") + url_next_page = host+url_next_page + first = 0 + + if url_next_page: + itemlist.append(Item(channel=item.channel,title="Siguiente >>", url=url_next_page, action='list_all', + first=first)) + + return itemlist + + +def seccion(item): + logger.info() + + itemlist = [] + duplicado = [] + data = get_source(item.url) + + patron = 'menu-item-object-category menu-item-\d+">([^<]+)<\/a><\/li>' + + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle in matches: + url = host+scrapedurl + title = scrapedtitle + thumbnail = '' + if url not in duplicado: + itemlist.append(Item(channel=item.channel, + action='list_all', + title=title, + url=url, + thumbnail=thumbnail, + first=0 + )) + return itemlist + + +def seasons(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + + patron = 'Season (\d+)' + + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedseason in matches: + contentSeasonNumber = scrapedseason + title = 'Temporada %s' % scrapedseason + infoLabels['season'] = contentSeasonNumber + + itemlist.append(Item(channel=item.channel, + action='episodesxseason', + url=item.url, + title=title, + contentSeasonNumber=contentSeasonNumber, + infoLabels=infoLabels + )) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + + if config.get_videolibrary_support() and len(itemlist) > 0: + itemlist.append(Item(channel=item.channel, + title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]', + url=item.url, + action="add_serie_to_library", + extra="episodios", + contentSerieName=item.contentSerieName, + extra1='library' + )) + + return itemlist + +def episodios(item): + logger.info() + itemlist = [] + templist = seasons(item) + for tempitem in templist: + itemlist += episodesxseason(tempitem) + return itemlist + +def episodesxseason(item): + logger.info() + itemlist = [] + season = item.contentSeasonNumber + data = get_source(item.url) + data = scrapertools.find_single_match(data, 'Season %s.*?class="les-content"(.*?)
  • ' % season) + patron = 'Episode (\d+)' + matches = re.compile(patron, re.DOTALL).findall(data) + infoLabels = item.infoLabels + for scrapedurl, dataep in matches: + url = host+scrapedurl + contentEpisodeNumber = dataep + try: + title = '%sx%s - Episodio %s' % (season, dataep, dataep) + except: + title = 'episodio %s' % dataep + infoLabels['episode'] = dataep + infoLabels = item.infoLabels + + itemlist.append(Item(channel=item.channel, + action="findvideos", + title=title, + url=url, + contentEpisodeNumber=contentEpisodeNumber, + infoLabels=infoLabels + )) + + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True) + return itemlist + + + +def search(item, texto): + logger.info() + texto = texto.replace(" ", "+") + item.url = item.url + texto + item.first=0 + if texto != '': + return list_all(item) + + +def newest(categoria): + logger.info() + itemlist = [] + item = Item() + try: + if categoria in ['peliculas']: + item.url = host +'/peliculas' + elif categoria == 'infantiles': + item.url = host + '/animacion/' + elif categoria == 'terror': + item.url = host + '/terror/' + item.first=0 + itemlist = list_all(item) + if itemlist[-1].title == 'Siguiente >>>': + itemlist.pop() + except: + import sys + for line in sys.exc_info(): + logger.error("{0}".format(line)) + return [] + + return itemlist + +def findvideos(item): + logger.info() + itemlist = [] + + data = get_source(item.url) + patron = '
    (.*?)<' % option) + if '-' in extra_info: + quality, language = scrapertools.find_single_match(extra_info, '(.*?) - (.*)') + else: + language = '' + quality = extra_info + + if 'https:' not in url: + url = 'https:'+url + title = '' + if not config.get_setting('unify'): + if language != '': + title += ' [%s]' % IDIOMAS[language] + if quality != '': + title += ' [%s]' % quality + + new_item = Item(channel=item.channel, + url=url, + title= '%s'+ title, + contentTitle=item.title, + action='play', + infoLabels = item.infoLabels + ) + if language != '': + new_item.language = IDIOMAS[language] + if quality != '': + new_item.quality = quality + + itemlist.append(new_item) + itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) + + # Requerido para FilterTools + + itemlist = filtertools.get_links(itemlist, item, list_language) + + # Requerido para AutoPlay + + autoplay.start(itemlist, item) + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append( + Item(channel=item.channel, + title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url=item.url, + action="add_pelicula_to_library", + extra="findvideos", + contentTitle=item.contentTitle, + )) + + + return itemlist diff --git a/plugin.video.alfa/channels/mixtoon.py b/plugin.video.alfa/channels/mixtoon.py index 215b144c..52520ddb 100644 --- a/plugin.video.alfa/channels/mixtoon.py +++ b/plugin.video.alfa/channels/mixtoon.py @@ -137,14 +137,16 @@ def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data + _sl = scrapertools.find_single_match(data, 'var _dt=([^;]+);') sl = eval(_sl) buttons = [0,1] for id in buttons: new_url = "https://videoeb.xyz/" + "eb/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2] - data_new = httptools.downloadpage(new_url).data - valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)') + data_new = httptools.downloadpage(new_url, headers={'Referer': item.url}).data try: + valor1, valor2 = scrapertools.find_single_match(data_new, + 'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)') url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1))) if 'download' in url: url = url.replace('download', 'preview') From 3fad1e3566eb92450e56e1527d12becd160bc56e Mon Sep 17 00:00:00 2001 From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com> Date: Wed, 27 Mar 2019 15:29:23 -0300 Subject: [PATCH 2/4] Correcciones y novedades --- plugin.video.alfa/servers/archiveorg.py | 1 - plugin.video.alfa/servers/cinemaupload.json | 42 +++++++++++++++++++++ plugin.video.alfa/servers/cinemaupload.py | 28 ++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 plugin.video.alfa/servers/cinemaupload.json create mode 100644 plugin.video.alfa/servers/cinemaupload.py diff --git a/plugin.video.alfa/servers/archiveorg.py b/plugin.video.alfa/servers/archiveorg.py index a954cc35..2d93aa79 100644 --- a/plugin.video.alfa/servers/archiveorg.py +++ b/plugin.video.alfa/servers/archiveorg.py @@ -20,7 +20,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password= logger.info("url=" + page_url) video_urls = [] data = httptools.downloadpage(page_url).data - logger.debug(data) patron = '' matches = scrapertools.find_multiple_matches(data, patron) for url in matches: diff --git a/plugin.video.alfa/servers/cinemaupload.json b/plugin.video.alfa/servers/cinemaupload.json new file mode 100644 index 00000000..99bc9e2b --- /dev/null +++ b/plugin.video.alfa/servers/cinemaupload.json @@ -0,0 +1,42 @@ +{ + "active": true, + "find_videos": { + "ignore_urls": [], + "patterns": [ + { + "pattern": "https://cinemaupload.com/embed/([a-zA-Z0-9]+)", + "url": "https://cinemaupload.com/embed/\\1/" + } + ] + }, + "free": true, + "id": "cinemaupload", + "name": "cinemaupload", + "settings": [ + { + "default": false, + "enabled": true, + "id": "black_list", + "label": "@60654", + "type": "bool", + "visible": true + }, + { + "default": 0, + "enabled": true, + "id": "favorites_servers_list", + "label": "@60655", + "lvalues": [ + "No", + "1", + "2", + "3", + "4", + "5" + ], + "type": "list", + "visible": false + } + ], + "thumbnail": "https://cinemaupload.com/static/img/logo1.png" +} diff --git a/plugin.video.alfa/servers/cinemaupload.py b/plugin.video.alfa/servers/cinemaupload.py new file mode 100644 index 00000000..69191999 --- /dev/null +++ b/plugin.video.alfa/servers/cinemaupload.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# -------------------------------------------------------- +# Conector Cinemaupload By Alfa development Group +# -------------------------------------------------------- +import re +from core import httptools +from core import scrapertools +from platformcode import logger + + +def test_video_exists(page_url): + logger.info("(page_url='%s')" % page_url) + data = httptools.downloadpage(page_url) + if data.code == 404: + return False, "[CinemaUpload] El archivo no existe o ha sido borrado" + return True, "" + + +def get_video_url(page_url, premium=False, user="", password="", video_password=""): + logger.info("url=" + page_url) + video_urls = [] + data = httptools.downloadpage(page_url).data + data = re.sub(r'\n|\r|\t| |
    |\s{2,}', "", data) + patron = "source: '([^']+)'," + matches = scrapertools.find_multiple_matches(data, patron) + for url in matches: + video_urls.append(['.m3u8 [CinemaUpload]', url]) + return video_urls From 6c50e5c2b6aee2309e031e285bf5d8417df9822b Mon Sep 17 00:00:00 2001 From: "I7PAEZ\\paez" Date: Wed, 27 Mar 2019 19:47:40 +0100 Subject: [PATCH 3/4] =?UTF-8?q?correcci=C3=B3n=20de=20cachepage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin.video.alfa/channels/TXXX.py | 2 +- plugin.video.alfa/channels/czechvideo.py | 2 +- plugin.video.alfa/channels/hclips.py | 4 +- plugin.video.alfa/channels/hdzog.py | 4 +- plugin.video.alfa/channels/hotmovs.py | 4 +- plugin.video.alfa/channels/sexkino.py | 75 ++++++++++++++++-------- plugin.video.alfa/channels/spankwire.py | 48 ++++++++------- plugin.video.alfa/channels/tabooshare.py | 2 +- plugin.video.alfa/channels/tubehentai.py | 4 +- plugin.video.alfa/channels/vidz7.py | 43 +++----------- plugin.video.alfa/channels/vporn.py | 6 +- plugin.video.alfa/channels/xtapes.py | 4 +- 12 files changed, 101 insertions(+), 97 deletions(-) diff --git a/plugin.video.alfa/channels/TXXX.py b/plugin.video.alfa/channels/TXXX.py index 5755703b..32a360b2 100644 --- a/plugin.video.alfa/channels/TXXX.py +++ b/plugin.video.alfa/channels/TXXX.py @@ -109,7 +109,7 @@ def lista(item): def play(item): logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data video_url = scrapertools.find_single_match(data, 'var video_url = "([^"]*)"') video_url += scrapertools.find_single_match(data, 'video_url \+= "([^"]*)"') partes = video_url.split('||') diff --git a/plugin.video.alfa/channels/czechvideo.py b/plugin.video.alfa/channels/czechvideo.py index d7c5c030..a3852b14 100644 --- a/plugin.video.alfa/channels/czechvideo.py +++ b/plugin.video.alfa/channels/czechvideo.py @@ -75,7 +75,7 @@ def lista(item): def play(item): logger.info() - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: diff --git a/plugin.video.alfa/channels/hclips.py b/plugin.video.alfa/channels/hclips.py index 94ce8649..9ae7ce89 100644 --- a/plugin.video.alfa/channels/hclips.py +++ b/plugin.video.alfa/channels/hclips.py @@ -56,7 +56,7 @@ def categorias(item): def peliculas(item): logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data patron = '
    .*?' patron += '([^(.*?)

    Advertisement

    ') patron = '
  • .*?", "", data) patron = '
    .*?src="([^"]+)" alt="([^"]+)".*?
    (.*?)
    ' matches = re.compile(patron,re.DOTALL).findall(data) @@ -107,7 +107,7 @@ def lista(item): def play(item): logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data video_url = scrapertools.find_single_match(data, 'var video_url="([^"]*)"') video_url += scrapertools.find_single_match(data, 'video_url\+=\'([^\']+)\'') partes = video_url.split('||') diff --git a/plugin.video.alfa/channels/sexkino.py b/plugin.video.alfa/channels/sexkino.py index 147201d1..efb4d5c3 100644 --- a/plugin.video.alfa/channels/sexkino.py +++ b/plugin.video.alfa/channels/sexkino.py @@ -11,9 +11,9 @@ from platformcode import logger host = 'http://sexkino.to' def mainlist(item): - logger.info("pelisalacarta.sexkino mainlist") + logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="New" , action="peliculas", url= host + "/movies/")) + itemlist.append( Item(channel=item.channel, title="New" , action="lista", url= host + "/movies/")) itemlist.append( Item(channel=item.channel, title="Año" , action="anual", url= host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host)) @@ -26,7 +26,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/?s=%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -35,9 +35,9 @@ def search(item, texto): def categorias(item): - logger.info("pelisalacarta.sexkino categorias") + logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data patron = '
  • (.*?) (.*?)' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) @@ -45,52 +45,77 @@ def categorias(item): scrapedplot = "" scrapedthumbnail = "" scrapedtitle = scrapedtitle + " ("+cantidad+")" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist def anual(item): - logger.info("pelisalacarta.sexkino anual") + logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data patron = '
  • ([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def peliculas(item): - logger.info("pelisalacarta.sexkino peliculas") +def lista(item): + logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) - #hay que hacer que coincida con el buscador - patron = '.*?([^(\d+)' + data = httptools.downloadpage(item.url).data + patron = '
    .*?' + patron += '([^.*?' + patron += '([^"]+).*?' + patron += '' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) - for scrapedurl,scrapedthumbnail,scrapedtitle,date in matches: + for scrapedthumbnail,scrapedtitle,calidad,scrapedurl in matches: scrapedplot = "" - scrapedtitle = scrapedtitle + " (" + date + ")" - itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) - next_page_url = scrapertools.find_single_match(data,'resppages.*?') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="peliculas" , title="Next page >>" , text_color="blue", url=next_page_url , folder=True) ) + scrapedtitle = scrapedtitle + " (" + calidad + ")" + itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) ) + next_page = scrapertools.find_single_match(data,'resppages.*?') + if next_page != "": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) ) return itemlist def findvideos(item): - logger.info("pelisalacarta.a0 findvideos") + logger.info() itemlist = [] - data = scrapertools.cachePage(item.url) + data = httptools.downloadpage(item.url).data + + # Watch onlineQualityLanguageAdded + # + # Watch online + # DVDRipGerman2 years + # Watch online + # DVDRipGerman2 years + # Watch online + # DVDRipGerman2 years + # Watch online + # DVDRipGerman2 years + # Watch online + # DVDRipGerman2 years + #
  • + + + patron = '' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: url = scrapertools.find_single_match(match,'href="([^"]+)" target') title = scrapertools.find_single_match(match,' (.*?)') itemlist.append(item.clone(action="play", title=title, url=url)) + + # Continue + patron = '