From c7a3d08e0d7321724359d07af370ead5a049a2eb Mon Sep 17 00:00:00 2001 From: paezner Date: Wed, 16 Jan 2019 16:39:27 +0100 Subject: [PATCH] Estos creo que ya no hay que limpiarlos mas --- plugin.video.alfa/channels/LIKUOO.py | 33 +++++---- plugin.video.alfa/channels/TXXX.py | 58 ++++++++-------- plugin.video.alfa/channels/absoluporn.py | 24 +++---- plugin.video.alfa/channels/alsoporn.py | 29 ++++---- plugin.video.alfa/channels/analdin.py | 42 ++++++------ plugin.video.alfa/channels/bravoporn.py | 24 +++---- plugin.video.alfa/channels/camwhoresbay.py | 54 +++++++++------ plugin.video.alfa/channels/cliphunter.py | 34 +++++----- plugin.video.alfa/channels/coomelonitas.py | 16 ++--- plugin.video.alfa/channels/elreyx.py | 67 +++++-------------- plugin.video.alfa/channels/eroticage.py | 13 ++-- .../channels/eroticasonlinetv.py | 14 ++-- plugin.video.alfa/channels/fapality.py | 17 ++--- plugin.video.alfa/channels/fetishshrine.py | 34 +++++----- plugin.video.alfa/channels/filmoviXXX.py | 14 ++-- plugin.video.alfa/channels/filmpornoita.py | 21 +++--- plugin.video.alfa/channels/foxtube.py | 42 +++++++----- plugin.video.alfa/channels/freeporn.py | 36 ++++++---- plugin.video.alfa/channels/freepornstreams.py | 24 +++---- plugin.video.alfa/channels/hclips.py | 21 +++--- plugin.video.alfa/channels/hdzog.py | 37 +++++----- plugin.video.alfa/channels/hellporno.py | 43 ++++++------ plugin.video.alfa/channels/hotmovs.py | 55 ++++++++------- plugin.video.alfa/channels/javlin.py | 37 ++++------ plugin.video.alfa/channels/javwhores.py | 32 ++++----- plugin.video.alfa/channels/jizzbunker.py | 19 +++--- plugin.video.alfa/channels/justporn.py | 63 ++++++++++++----- plugin.video.alfa/channels/mporno.py | 24 +++---- plugin.video.alfa/channels/muchoporno.py | 56 +++++++++++----- plugin.video.alfa/channels/pandamovie.py | 27 ++++---- plugin.video.alfa/channels/perfectgirls.py | 26 +++---- plugin.video.alfa/channels/porntrex.py | 34 +++++----- 32 files changed, 548 insertions(+), 522 deletions(-) diff --git a/plugin.video.alfa/channels/LIKUOO.py b/plugin.video.alfa/channels/LIKUOO.py index 20925e3c..2ca05d84 100644 --- a/plugin.video.alfa/channels/LIKUOO.py +++ b/plugin.video.alfa/channels/LIKUOO.py @@ -7,8 +7,6 @@ from core import scrapertools from core.item import Item from core import servertools from core import httptools -from core import tmdb -from core import jsontools host = 'http://www.likuoo.video' @@ -16,7 +14,8 @@ host = 'http://www.likuoo.video' def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimos" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) + itemlist.append( Item(channel=item.channel, title="Pornstar" , action="categorias", url=host + "/pornstars/")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/all-channels/")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -27,7 +26,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/search/?s=%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -46,16 +45,24 @@ def categorias(item): scrapedplot = "" scrapedthumbnail = "https:" + scrapedthumbnail scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot) ) + next_page = scrapertools.find_single_match(data,'...') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist -def peliculas(item): +def lista(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) data = re.sub(r"\n|\r|\t| |
", "", data) - patron = '
.*?.*?src="(.*?)".*?
(.*?)
' + patron = '
.*?' + patron += '.*?' + patron += 'src="(.*?)".*?' + patron += '
(.*?)
' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,scrapedthumbnail,scrapedtime in matches: url = urlparse.urljoin(item.url,scrapedurl) @@ -64,12 +71,12 @@ def peliculas(item): contentTitle = title thumbnail = "https:" + scrapedthumbnail plot = "" - year = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) - next_page_url = scrapertools.find_single_match(data,'...
') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, + plot=plot, contentTitle = contentTitle)) + next_page = scrapertools.find_single_match(data,'...') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/TXXX.py b/plugin.video.alfa/channels/TXXX.py index 3ade3fb3..e68eac93 100644 --- a/plugin.video.alfa/channels/TXXX.py +++ b/plugin.video.alfa/channels/TXXX.py @@ -3,13 +3,11 @@ import urlparse,urllib2,urllib,re import os, sys -from core import jsontools as json from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger from core import httptools -from core import tmdb host = 'http://www.txxx.com' @@ -17,10 +15,10 @@ host = 'http://www.txxx.com' def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Ultimas" , action="peliculas", url=host + "/latest-updates/")) - itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="peliculas", url=host + "/top-rated/")) - itemlist.append( Item(channel=item.channel, title="Mas popular" , action="peliculas", url=host + "/most-popular/")) - itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/")) + itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url=host + "/latest-updates/")) + itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url=host + "/top-rated/")) + itemlist.append( Item(channel=item.channel, title="Mas popular" , action="lista", url=host + "/most-popular/")) + itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host + "/channels-list/most-popular/")) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -31,7 +29,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/search/s=%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -44,19 +42,22 @@ def catalogo(item): itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
", "", data) - patron = '
.*?(.*?)' + patron = '
.*?' + patron += '", "", data) patron = 'data-video-id="\d+">.*?(.*?)') + duration = scrapertools.find_single_match(scrapedtime, '(.*?)') + if scrapedhd != '': + title = "[COLOR yellow]" +duration+ "[/COLOR] " + "[COLOR red]" +scrapedhd+ "[/COLOR] "+scrapedtitle + else: + title = "[COLOR yellow]" + duration + "[/COLOR] " + scrapedtitle thumbnail = scrapedthumbnail plot = "" - year = "" itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, - plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) - next_page_url = scrapertools.find_single_match(data,'\d+ NEXT') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue", - url=next_page_url , folder=True) ) + plot=plot, contentTitle = scrapedtitle)) + next_page = scrapertools.find_single_match(data,'
  • NEXT') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/analdin.py b/plugin.video.alfa/channels/analdin.py index 29bf3c73..051cc8f3 100644 --- a/plugin.video.alfa/channels/analdin.py +++ b/plugin.video.alfa/channels/analdin.py @@ -7,17 +7,17 @@ from core import scrapertools from core.item import Item from core import servertools from core import httptools -from core import tmdb -from core import jsontools + host = 'https://www.analdin.com/es' + def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Nuevas" , action="peliculas", url=host + "/más-reciente/")) - itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="peliculas", url=host + "/más-visto/")) - itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="peliculas", url=host + "/mejor-valorado/")) + itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/más-reciente/")) + itemlist.append( Item(channel=item.channel, title="Mas Vistas" , action="lista", url=host + "/más-visto/")) + itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/mejor-valorado/")) itemlist.append( Item(channel=item.channel, title="Canal" , action="catalogo", url=host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categorías/")) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) @@ -29,7 +29,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/?s=%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -48,13 +48,12 @@ def catalogo(item): for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, - thumbnail=scrapedthumbnail , plot=scrapedplot) ) - next_page_url = scrapertools.find_single_match(data,'
  • ') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue", - url=next_page_url) ) + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot) ) + next_page = scrapertools.find_single_match(data,'
  • ') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="catalogo", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist @@ -71,12 +70,12 @@ def categorias(item): scrapedplot = "" scrapedtitle = scrapedtitle + " (" + cantidad + ")" scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def peliculas(item): +def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data @@ -91,14 +90,13 @@ def peliculas(item): title = "[COLOR yellow]" + scrapedtime + "[/COLOR] " + scrapedtitle thumbnail = scrapedthumbnail plot = "" - year = "" itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, - contentTitle = title, infoLabels={'year':year} )) - next_page_url = scrapertools.find_single_match(data,'
  • ') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail , plot=scrapedplot) ) + next_page = scrapertools.find_single_match(data,'
  • ') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) itemlist.append( Item(channel=item.channel, action="catalogo", title="Página Siguiente >>", text_color="blue", - url=next_page_url) ) + url=next_page) ) return itemlist @@ -68,12 +67,12 @@ def categorias(item): scrapedplot = "" scrapedtitle = scrapedtitle scrapedurl = urlparse.urljoin(item.url,scrapedurl) - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def peliculas(item): +def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data @@ -88,11 +87,10 @@ def peliculas(item): year = "" itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, contentTitle = title, infoLabels={'year':year} )) - next_page_url = scrapertools.find_single_match(data,'
  • ') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue", - url=next_page_url) ) + next_page = scrapertools.find_single_match(data,'
  • ') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/coomelonitas.py b/plugin.video.alfa/channels/coomelonitas.py index 92d3220e..b205e452 100644 --- a/plugin.video.alfa/channels/coomelonitas.py +++ b/plugin.video.alfa/channels/coomelonitas.py @@ -2,7 +2,6 @@ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys -from core import jsontools as json from core import scrapertools from core import servertools from core.item import Item @@ -16,7 +15,7 @@ host ='http://www.coomelonitas.com' def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -27,7 +26,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host+ "/?s=%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -43,12 +42,12 @@ def categorias(item): for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def peliculas(item): +def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data @@ -61,9 +60,8 @@ def peliculas(item): thumbnail = scrapertools.find_single_match(match,'') - if next_page_url!="": - itemlist.append( Item(channel=item.channel, action="peliculas", title="Página Siguiente >>", text_color="blue", - url=next_page_url) ) + next_page = scrapertools.find_single_match(data,'') + if next_page!="": + itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/elreyx.py b/plugin.video.alfa/channels/elreyx.py index 9a759807..3f5f7d78 100644 --- a/plugin.video.alfa/channels/elreyx.py +++ b/plugin.video.alfa/channels/elreyx.py @@ -7,8 +7,6 @@ from core import scrapertools from core.item import Item from core import servertools from core import httptools -from core import tmdb -from core import jsontools host = 'http://www.elreyx.com' @@ -17,11 +15,11 @@ def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host + "/peliculasporno.html")) - itemlist.append( Item(channel=item.channel, title="Escenas" , action="escenas", url=host + "/index.html")) - itemlist.append( Item(channel=item.channel, title="Productora" , action="productora", url=host + "/index.html")) - itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html")) - itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) + itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/peliculasporno.html") ) + itemlist.append( Item(channel=item.channel, title="Escenas" , action="lista", url=host + "/index.html")) + itemlist.append( Item(channel=item.channel, title="Productora" , action="categorias", url=host + "/index.html") ) + itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/peliculasporno.html") ) + itemlist.append( Item(channel=item.channel, title="Buscar", action="search") ) return itemlist @@ -30,7 +28,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/search-%s" % texto + ".html" try: - return escenas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -38,43 +36,33 @@ def search(item, texto): return [] -def productora(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - data = re.sub(r"\n|\r|\t| |
    ", "", data) - patron = '
    (.*?)' - matches = re.compile(patron,re.DOTALL).findall(data) - for scrapedurl,scrapedtitle,scrapedthumbnail in matches: - scrapedplot = "" - thumbnail="https:" + scrapedthumbnail - url="https:" + scrapedurl - itemlist.append( Item(channel=item.channel, action="escenas", title=scrapedtitle, url=url, thumbnail=thumbnail, - plot=scrapedplot) ) - return itemlist - - def categorias(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"\n|\r|\t| |
    ", "", data) - patron = '.*?' + if item.title == "Categorias" : + patron = '.*?' + else: + patron = '.*?' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: scrapedplot = "" url="https:" + scrapedurl scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=url, + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=url, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def escenas(item): +def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data - patron = '
    .*?.*?»') if next_page!= "": next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel, action="escenas", title="Página Siguiente >>", text_color="blue", - url=next_page) ) - return itemlist - - -def peliculas(item): - logger.info() - itemlist = [] - data = httptools.downloadpage(item.url).data - patron = '
    »') - if next_page!="": - next_page = urlparse.urljoin(item.url,next_page) - itemlist.append( Item(channel=item.channel , action="peliculas", title="Página Siguiente >>", text_color="blue", - url=next_page) ) + itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/eroticage.py b/plugin.video.alfa/channels/eroticage.py index 042d54ea..3c18a5d4 100644 --- a/plugin.video.alfa/channels/eroticage.py +++ b/plugin.video.alfa/channels/eroticage.py @@ -2,13 +2,11 @@ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys -from core import jsontools as json from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger from core import httptools -from core import tmdb host = 'http://www.eroticage.net' @@ -16,7 +14,7 @@ host = 'http://www.eroticage.net' def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Novedades" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Novedades" , action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -27,7 +25,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/?s=%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -45,12 +43,12 @@ def categorias(item): for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle, url=scrapedurl, + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def peliculas(item): +def lista(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data @@ -67,8 +65,7 @@ def peliculas(item): next_page = scrapertools.find_single_match(data,'
  • ') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/filmoviXXX.py b/plugin.video.alfa/channels/filmoviXXX.py index 44b952c4..7308fb67 100644 --- a/plugin.video.alfa/channels/filmoviXXX.py +++ b/plugin.video.alfa/channels/filmoviXXX.py @@ -2,14 +2,11 @@ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys -from core import jsontools as json from core import scrapertools from core import servertools from core.item import Item from platformcode import config, logger from core import httptools -from core import tmdb -from core import jsontools def mainlist(item): @@ -19,19 +16,21 @@ def mainlist(item): item.url = "http://www.filmovix.net/videoscategory/porno/" data = httptools.downloadpage(item.url).data data = scrapertools.get_match(data,'

    XXX

    (.*?)

    Novo dodato

    ') - patron = '
  • .*?src="([^"]+)".*?

    ' + patron = '

  • .*?' + patron += 'src="([^"]+)".*?' + patron += '

    ' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedthumbnail,scrapedurl,scrapedtitle in matches: contentTitle = scrapedtitle title = scrapedtitle thumbnail = scrapedthumbnail plot = "" - year = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=scrapedurl, thumbnail=thumbnail, plot=plot, contentTitle=contentTitle, infoLabels={'year':year} )) + itemlist.append( Item(channel=item.channel, action="play", title=title, url=scrapedurl, + thumbnail=thumbnail, plot=plot, contentTitle=contentTitle)) next_page_url = scrapertools.find_single_match(data,'Next') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + itemlist.append( Item(channel=item.channel, action="findvideos", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail , plot=plot , viewmode="movie") ) + next_page = scrapertools.find_single_match(data,'Next') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist diff --git a/plugin.video.alfa/channels/foxtube.py b/plugin.video.alfa/channels/foxtube.py index ad457b70..a1ce679e 100644 --- a/plugin.video.alfa/channels/foxtube.py +++ b/plugin.video.alfa/channels/foxtube.py @@ -7,15 +7,13 @@ from core import scrapertools from core.item import Item from core import servertools from core import httptools -from core import tmdb -from core import jsontools host = 'http://es.foxtube.com' def mainlist(item): logger.info() itemlist = [] - itemlist.append( Item(channel=item.channel, title="Peliculas" , action="peliculas", url=host)) + itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host)) itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host)) itemlist.append( Item(channel=item.channel, title="Buscar", action="search")) return itemlist @@ -26,7 +24,7 @@ def search(item, texto): texto = texto.replace(" ", "+") item.url = host + "/buscador/%s" % texto try: - return peliculas(item) + return lista(item) except: import sys for line in sys.exc_info(): @@ -45,29 +43,37 @@ def categorias(item): for scrapedurl,scrapedtitle in matches: scrapedplot = "" scrapedthumbnail = "" - scrapedurl = host + scrapedurl - itemlist.append( Item(channel=item.channel, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) + scrapedurl = urlparse.urljoin(item.url,scrapedurl) + itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl, + thumbnail=scrapedthumbnail, plot=scrapedplot) ) return itemlist -def peliculas(item): +def lista(item): logger.info() itemlist = [] data = scrapertools.cachePage(item.url) - patron = '.*?src="([^"]+)".*?alt="([^"]+)".*?(.*?)' + patron = '.*?' + patron += 'src="([^"]+)".*?' + patron += 'alt="([^"]+)".*?' + patron += '(.*?)' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches: - url = host + scrapedurl - title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle - contentTitle = title + url = urlparse.urljoin(item.url,scrapedurl) + contentTitle = scrapedtitle + time = scrapertools.find_single_match(duracion, '([^"]+)') + if not 'HD' in duracion : + title = "[COLOR yellow]" + time + "[/COLOR] " + scrapedtitle + else: + title = "[COLOR yellow]" + time + "[/COLOR] " + "[COLOR red]" + "HD" + "[/COLOR] " + scrapedtitle thumbnail = scrapedthumbnail + "|Referer=%s" %host plot = "" - year = "" - itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail, plot=plot, contentTitle = contentTitle, infoLabels={'year':year} )) - next_page_url = scrapertools.find_single_match(data,'') - if next_page_url!="": - next_page_url = urlparse.urljoin(item.url,next_page_url) - itemlist.append( Item(channel=item.channel , action="peliculas" , title="Página Siguiente >>" , text_color="blue", url=next_page_url , folder=True) ) + itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail, + plot=plot, contentTitle = contentTitle)) + next_page = scrapertools.find_single_match(data,'') + if next_page!="": + next_page = urlparse.urljoin(item.url,next_page) + itemlist.append(item.clone(action="lista" , title="Página Siguiente >>", text_color="blue", url=next_page) ) return itemlist @@ -76,7 +82,7 @@ def play(item): itemlist = [] url = scrapertools.find_single_match(scrapertools.cachePage(item.url),'