From d038dacdd5bdaa8cf886813faba2307ca2d5c792 Mon Sep 17 00:00:00 2001 From: sculkurt <44222714+sculkurt@users.noreply.github.com> Date: Mon, 22 Oct 2018 18:49:29 +0200 Subject: [PATCH 01/10] Nuevos canales MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Canales de películas grindhouse, thrash, cult, sexplotaition.... --- plugin.video.alfa/channels/cat3plus.json | 14 ++ plugin.video.alfa/channels/cat3plus.py | 130 +++++++++++++++++++ plugin.video.alfa/channels/sleazemovies.json | 14 ++ plugin.video.alfa/channels/sleazemovies.py | 109 ++++++++++++++++ 4 files changed, 267 insertions(+) create mode 100644 plugin.video.alfa/channels/cat3plus.json create mode 100644 plugin.video.alfa/channels/cat3plus.py create mode 100644 plugin.video.alfa/channels/sleazemovies.json create mode 100644 plugin.video.alfa/channels/sleazemovies.py diff --git a/plugin.video.alfa/channels/cat3plus.json b/plugin.video.alfa/channels/cat3plus.json new file mode 100644 index 00000000..dd11f74e --- /dev/null +++ b/plugin.video.alfa/channels/cat3plus.json @@ -0,0 +1,14 @@ +{ + "id": "cat3plus", + "name": "Cat3plus", + "active": true, + "adult": true, + "language": [], + "thumbnail": "https://i.imgur.com/SJxXKa2.png", + "fanart": "https://i.imgur.com/ejCwTxT.jpg", + "banner": "https://i.imgur.com/bXUyk6m.png", + "categories": [ + "movie", + "vo" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/cat3plus.py b/plugin.video.alfa/channels/cat3plus.py new file mode 100644 index 00000000..466fb82b --- /dev/null +++ b/plugin.video.alfa/channels/cat3plus.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +# -*- Channel SleazeMovies -*- +# -*- Created for Alfa-addon -*- +# -*- By Sculkurt -*- + + +import re +import urllib +import urlparse +from channelselector import get_thumb +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger + +host = 'http://www.cat3plus.com/' + +headers = [ + ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'], + ['Accept-Encoding', 'gzip, deflate'], + ['Referer', host] +] + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True))) + itemlist.append(item.clone(title="Años", action="years", url=host, thumbnail=get_thumb('year', auto=True))) + itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True))) + + return itemlist + +def years(item): + logger.info() + itemlist = list() + data = httptools.downloadpage(item.url, cookies=False).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = "([^<]+)" + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle in matches: + itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl)) + return itemlist + +def get_source(url): + logger.info() + data = httptools.downloadpage(url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + return data + + +def list_all(item): + logger.info() + itemlist = [] + data = get_source(item.url) + + patron = "

([^(]+).*?\(([^)]+).*?" + patron += 'src="([^"]+).*?' + matches = re.compile(patron, re.DOTALL).findall(data) + + for scrapedurl, scrapedtitle, year, img in matches: + itemlist.append(Item(channel = item.channel, + title = scrapedtitle, + url = scrapedurl, + action = "findvideos", + thumbnail = img, + contentTitle = scrapedtitle, + contentType = "movie", + infoLabels = {'year': year})) + tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True) + + # Extraer la marca de siguiente página + next_page = scrapertools.find_single_match(data, " 0 and item.extra != 'findvideos': + itemlist.append(Item(channel = item.channel, + title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url = item.url, + action = "add_pelicula_to_library", + extra = "findvideos", + contentTitle = item.contentTitle, + thumbnail = item.thumbnail + )) + + return itemlist \ No newline at end of file diff --git a/plugin.video.alfa/channels/sleazemovies.json b/plugin.video.alfa/channels/sleazemovies.json new file mode 100644 index 00000000..712e6ae8 --- /dev/null +++ b/plugin.video.alfa/channels/sleazemovies.json @@ -0,0 +1,14 @@ +{ + "id": "sleazemovies", + "name": "SleazeMovies", + "active": true, + "adult": true, + "language": [], + "thumbnail": "https://i.imgur.com/x0tzGxQ.jpg", + "banner": "https://i.imgur.com/d8LsUNf.png", + "fanart": "https://i.imgur.com/NRdQvFW.jpg", + "categories": [ + "movie", + "vo" + ] +} \ No newline at end of file diff --git a/plugin.video.alfa/channels/sleazemovies.py b/plugin.video.alfa/channels/sleazemovies.py new file mode 100644 index 00000000..904c6bb0 --- /dev/null +++ b/plugin.video.alfa/channels/sleazemovies.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# -*- Channel SleazeMovies -*- +# -*- Created for Alfa-addon -*- +# -*- By Sculkurt -*- + +import re +from channelselector import get_thumb +from core import httptools +from core import scrapertools +from core import servertools +from core import tmdb +from core.item import Item +from platformcode import config, logger + +host = 'http://www.eroti.ga/' + + +def mainlist(item): + logger.info() + + itemlist = list() + itemlist.append(item.clone(title="Todas", action="list_all", url=host, thumbnail=get_thumb('all', auto=True))) + itemlist.append(item.clone(title="Generos", action="genero", url=host, thumbnail=get_thumb('genres', auto=True))) + itemlist.append(item.clone(title="Buscar", action="search", thumbnail=get_thumb('search', auto=True))) + + return itemlist + +def genero(item): + logger.info() + itemlist = list() + data = httptools.downloadpage(host).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) + patron = '
  • ([^<]+)' + matches = scrapertools.find_multiple_matches(data, patron) + for scrapedurl, scrapedtitle in matches: + + itemlist.append(item.clone(action='list_all', title=scrapedtitle, url=scrapedurl)) + return itemlist + + +def list_all(item): + logger.info() + itemlist = [] + data = httptools.downloadpage(item.url).data + data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) # Eliminamos tabuladores, dobles espacios saltos de linea, etc... + + patron = '') + if next_page != "": + itemlist.append(Item(channel=item.channel, action="list_all", title=">> Página siguiente", url=next_page, folder=True)) + return itemlist + + + +def search(item, texto): + logger.info() + if texto != "": + texto = texto.replace(" ", "+") + item.url = host + "?s=" + texto + item.extra = "busqueda" + try: + return list_all(item) + except: + import sys + for line in sys.exc_info(): + logger.error("%s" % line) + return [] + + +def findvideos(item): + logger.info() + + itemlist = [] + + data = httptools.downloadpage(item.url).data + logger.debug('codigo = ' + data) + + itemlist.extend(servertools.find_video_items(data=data)) + + for video in itemlist: + + video.channel = item.channel + video.contentTitle = item.contentTitle + + if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': + itemlist.append(Item(channel = item.channel, + title = '[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', + url = item.url, + action = "add_pelicula_to_library", + extra = "findvideos", + contentTitle = item.contentTitle, + thumbnail = item.thumbnail + )) + + return itemlist From 3c335f7cea7503336dba1723d2e3a6094efb7eaf Mon Sep 17 00:00:00 2001 From: DiegoT Date: Tue, 23 Oct 2018 21:21:16 +0000 Subject: [PATCH 02/10] xhamster y playpornx fueron corregidos, listado, busqueda, etc. --- plugin.video.alfa/channels/playpornx.py | 38 +++---- plugin.video.alfa/channels/xhamster.py | 139 ++++++++++-------------- 2 files changed, 74 insertions(+), 103 deletions(-) diff --git a/plugin.video.alfa/channels/playpornx.py b/plugin.video.alfa/channels/playpornx.py index 91d726f3..0eb75040 100644 --- a/plugin.video.alfa/channels/playpornx.py +++ b/plugin.video.alfa/channels/playpornx.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- import re +import urlparse from core import httptools from core import scrapertools @@ -12,10 +13,12 @@ host = "https://watchfreexxx.net/" def mainlist(item): itemlist = [] - itemlist.append(Item(channel=item.channel, title="Todas", action="lista", - thumbnail='https://s18.postimg.cc/fwvaeo6qh/todas.png', - fanart='https://s18.postimg.cc/fwvaeo6qh/todas.png', - url =host)) + + itemlist.append(Item(channel=item.channel, title="Movies", action="lista", + url = urlparse.urljoin(host, "category/porn-movies/"))) + + itemlist.append(Item(channel=item.channel, title="Scenes", action="lista", + url = urlparse.urljoin(host, "category/xxx-scenes/"))) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=host+'?s=', thumbnail='https://s30.postimg.cc/pei7txpa9/buscar.png', @@ -29,34 +32,27 @@ def lista(item): itemlist = [] if item.url == '': item.url = host + data = httptools.downloadpage(item.url).data - data = re.sub(r'"|\n|\r|\t| |
    |\s{2,}', "", data) - if item.extra != 'Buscar': - patron = '
    .*?href=(.*?)>|\s{2,}', "", data) + + patron = '') + + #Patron + patron = '(?s)
    .*?' + patron += '
    (.+?)
    ' + matches = re.compile(patron,re.DOTALL).findall(data) - # Patron #1 - patron = '
    ' + "([^' - matches = re.compile(patron, re.DOTALL).findall(data) - for scrapedurl, scrapedthumbnail, scrapedtitle in matches: - logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") - itemlist.append( - Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, - folder=True)) - - # Paginador - patron = "
    Próximo
    " - matches = re.compile(patron, re.DOTALL).findall(data) - if len(matches) > 0: - itemlist.append( - Item(channel=item.channel, action="videos", title="Página Siguiente", url=matches[0], thumbnail="", - folder=True, viewmode="movie")) + #Paginador + patron = '(?s)
    .*?href="([^"]+)"' + matches = re.compile(patron,re.DOTALL).findall(data) + if len(matches) >0: + itemlist.append( Item(channel=item.channel, action="videos", title="Página Siguiente" , url=matches[0] , thumbnail="" , folder=True, viewmode="movie") ) return itemlist - # SECCION ENCARGADA DE VOLCAR EL LISTADO DE CATEGORIAS CON EL LINK CORRESPONDIENTE A CADA PAGINA - + def categorias(item): logger.info() itemlist = [] - itemlist.append( - Item(channel=item.channel, action="lista", title="Heterosexual", url="http://es.xhamster.com/channels.php")) - itemlist.append( - Item(channel=item.channel, action="lista", title="Transexuales", url="http://es.xhamster.com/channels.php")) - itemlist.append(Item(channel=item.channel, action="lista", title="Gays", url="http://es.xhamster.com/channels.php")) - return itemlist + data = scrapertools.cache_page(item.url) + + data = scrapertools.get_match(data,'(?s)
    (.*?)') + + patron = '(?s)
  • .*?([^<]+).*?
  • ' + matches = re.compile(patron,re.DOTALL).findall(data) + for scrapedurl,scrapedtitle in matches: + fullTitle = scrapedtitle.strip() + itemlist.append( Item(channel=item.channel, action="videos" , title=fullTitle , url=scrapedurl)) + + + return itemlist def votados(item): logger.info() itemlist = [] - itemlist.append(Item(channel=item.channel, action="videos", title="Día", - url="http://es.xhamster.com/rankings/daily-top-videos.html", viewmode="movie")) - itemlist.append(Item(channel=item.channel, action="videos", title="Semana", - url="http://es.xhamster.com/rankings/weekly-top-videos.html", viewmode="movie")) - itemlist.append(Item(channel=item.channel, action="videos", title="Mes", - url="http://es.xhamster.com/rankings/monthly-top-videos.html", viewmode="movie")) - itemlist.append(Item(channel=item.channel, action="videos", title="De siempre", - url="http://es.xhamster.com/rankings/alltime-top-videos.html", viewmode="movie")) + itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/best/daily"), viewmode="movie")) + itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/best/weekly"), viewmode="movie")) + itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/best/monthly"), viewmode="movie")) + itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/best/"), viewmode="movie")) return itemlist - -def lista(item): +def vistos(item): logger.info() itemlist = [] - data = scrapertools.downloadpageGzip(item.url) - # data = data.replace("\n","") - # data = data.replace("\t","") - if item.title == "Gays": - data = scrapertools.get_match(data, - '
    ' + item.title + '
    .*?
    (.*?)