pandamovie: cambio de estructura
pornboss: cambio de estructura porntrex: cambio estructura y thumbnail videosXYZ: cambio de estructura xms: cambio host
This commit is contained in:
@@ -14,9 +14,9 @@ host= 'https://pandamovies.pw'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/list-movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
@@ -42,7 +42,7 @@ def categorias(item):
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a title=".*?" href="([^"]+)">([^<]+)</a>'
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
@@ -58,17 +58,19 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<a class="clip-link" title="([^"]+)" href="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)"'
|
||||
patron = '<div data-movie-id="\d+".*?'
|
||||
patron += '<a href="([^"]+)".*?oldtitle="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail in matches:
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
|
||||
next_page = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page =="":
|
||||
next_page = scrapertools.find_single_match(data,'<a.*?href="([^"]+)" >Next »</a>')
|
||||
if next_page!="":
|
||||
|
||||
@@ -19,7 +19,7 @@ def mainlist(item):
|
||||
itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/movies/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="Videos" , action="lista", url=host + "/category/clips/"))
|
||||
itemlist.append( Item(channel=item.channel, title=" categorias" , action="lista", url=host + "/category/clips/"))
|
||||
itemlist.append( Item(channel=item.channel, title=" categorias" , action="categorias", url=host + "/category/clips/"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -41,11 +41,11 @@ def categorias(item):
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
if item.url == host + "/category/movies/":
|
||||
if "/category/movies/" in item.url:
|
||||
data = scrapertools.get_match(data,'>Movies</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'>Clips</a>(.*?)</ul>')
|
||||
patron = '<a href="([^"]+)">([^"]+)</a>'
|
||||
patron = '<a href=([^"]+)>([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
@@ -60,20 +60,21 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
patron = '<article id="post-\d+".*?'
|
||||
patron += '<img class="center cover" src="([^"]+)" alt="([^"]+)".*?'
|
||||
patron += '<blockquote>.*?<a href=\'([^\']+)\''
|
||||
patron = '<article id=post-\d+.*?'
|
||||
patron += '<img class="center cover" src=([^"]+) alt="([^"]+)".*?'
|
||||
patron += '<blockquote>.*?<a href=(.*?) target=_blank>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedthumbnail,scrapedtitle,scrapedurl in matches:
|
||||
scrapedplot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
next_page = scrapertools.find_single_match(data,'<a class=nextpostslink rel=next href=(.*?)>')
|
||||
if next_page!="":
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
@@ -81,6 +81,7 @@ def lista(item):
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
scrapedtitle = "%s - [COLOR red]%s[/COLOR] %s" % (duration, quality, scrapedtitle)
|
||||
scrapedthumbnail += "|Referer=https://www.porntrex.com/"
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
contentThumbnail=scrapedthumbnail, fanart=scrapedthumbnail))
|
||||
# Extrae la marca de siguiente página
|
||||
@@ -256,6 +257,7 @@ def menu_info(item):
|
||||
if i == 0:
|
||||
continue
|
||||
img = urlparse.urljoin(host, img)
|
||||
img += "|Referer=https://www.porntrex.com/"
|
||||
title = "Imagen %s" % (str(i))
|
||||
itemlist.append(item.clone(action="", title=title, thumbnail=img, fanart=img))
|
||||
|
||||
|
||||
@@ -38,10 +38,10 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)" title="([^"]+)">.*?data-src="([^"]+)"'
|
||||
patron = '<article id="post-\d+".*?<a href="([^"]+)".*?data-src="([^"]+)".*?alt="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = scrapedtitle.replace("Permalink to Watch ", "").replace("Porn Online", "").replace("Permalink to ", "")
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
|
||||
@@ -12,7 +12,7 @@ from platformcode import config, logger
|
||||
|
||||
__channel__ = "xms"
|
||||
|
||||
host = 'https://xxxmoviestream.com/'
|
||||
host = 'https://xtheatre.org'
|
||||
host1 = 'https://www.cam4.com/'
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -90,8 +90,6 @@ def peliculas(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data)
|
||||
patron_todos = '<div id="content">(.*?)<div id="footer"'
|
||||
data = scrapertools.find_single_match(data, patron_todos)
|
||||
patron = 'src="([^"]+)" class="attachment-thumb_site.*?' # img
|
||||
patron += '<a href="([^"]+)" title="([^"]+)".*?' # url, title
|
||||
patron += '<div class="right"><p>([^<]+)</p>' # plot
|
||||
@@ -119,20 +117,19 @@ def peliculas(item):
|
||||
def webcam(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>|#038;", "", data)
|
||||
patron = '<div class="profileBox">.*?<a href="/([^"]+)".*?' # url
|
||||
patron += 'data-hls-preview-url="([^"]+)">.*?' # video_url
|
||||
patron += 'data-username="([^"]+)".*?' # username
|
||||
patron += 'title="([^"]+)".*?' # title
|
||||
patron += 'data-profile="([^"]+)" />' # img
|
||||
patron += 'data-profile="([^"]+)"' # img
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, video_url, username, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedtitle = scrapedtitle.replace(' Chat gratis con webcam.', '')
|
||||
|
||||
itemlist.append(item.clone(channel=__channel__, action="play", title=scrapedtitle,
|
||||
itemlist.append(item.clone(channel=__channel__, action="play", title=username,
|
||||
url=video_url, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail,
|
||||
viewmode="movie_with_plot", folder=True, contentTitle=scrapedtitle))
|
||||
# Extrae el paginador
|
||||
|
||||
Reference in New Issue
Block a user