Merge pull request #422 from chivmalev/master
cumlouder y porntrex: correcciones
This commit is contained in:
30
plugin.video.alfa/channels/cumlouder.py
Executable file → Normal file
30
plugin.video.alfa/channels/cumlouder.py
Executable file → Normal file
@@ -18,6 +18,7 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(title="Últimos videos", action="videos", url="https://www.cumlouder.com/"))
|
||||
itemlist.append(item.clone(title="Categorias", action="categorias", url="https://www.cumlouder.com/categories/"))
|
||||
itemlist.append(item.clone(title="Pornstars", action="pornstars_list", url="https://www.cumlouder.com/girls/"))
|
||||
itemlist.append(item.clone(title="Listas", action="series", url="https://www.cumlouder.com/series/"))
|
||||
itemlist.append(item.clone(title="Buscar", action="search", url="https://www.cumlouder.com/search?q=%s"))
|
||||
|
||||
return itemlist
|
||||
@@ -82,10 +83,8 @@ def categorias(item):
|
||||
itemlist = []
|
||||
|
||||
data = get_data(item.url)
|
||||
# logger.info("channels.cumlouder data="+data)
|
||||
patron = '<a tag-url="[^"]+" class="[^"]+" href="([^"]+)" title="([^"]+)">[^<]+'
|
||||
patron += '<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^"]+)</span>'
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a tag-url=.*?href="([^"]+)" title="([^"]+)".*?<img class="thumb" src="([^"]+)".*?<span class="cantidad">([^<]+)</span>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, title, thumbnail, count in matches:
|
||||
if "go.php?" in url:
|
||||
@@ -109,6 +108,28 @@ def categorias(item):
|
||||
|
||||
return itemlist
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = get_data(item.url)
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<a onclick=.*?href="([^"]+)".*?\<img src="([^"]+)".*?h2 itemprop="name">([^<]+).*?p>([^<]+)</p>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, thumbnail, title, count in matches:
|
||||
itemlist.append(
|
||||
item.clone(title="%s (%s) " % (title, count), url=urlparse.urljoin(item.url, url), action="videos", thumbnail=thumbnail))
|
||||
|
||||
# Paginador
|
||||
matches = re.compile('<li[^<]+<a href="([^"]+)" rel="nofollow">Next[^<]+</a[^<]+</li>', re.DOTALL).findall(data)
|
||||
if matches:
|
||||
if "go.php?" in matches[0]:
|
||||
url = urllib.unquote(matches[0].split("/go.php?u=")[1].split("&")[0])
|
||||
else:
|
||||
url = urlparse.urljoin(item.url, matches[0])
|
||||
itemlist.append(item.clone(title="Pagina Siguiente", url=url))
|
||||
|
||||
return itemlist
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
@@ -116,7 +137,6 @@ def videos(item):
|
||||
|
||||
data = get_data(item.url)
|
||||
patron = '<a class="muestra-escena" href="([^"]+)" title="([^"]+)"[^<]+<img class="thumb" src="([^"]+)".*?<span class="minutos"> <span class="ico-minutos sprite"></span> ([^<]+)</span>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for url, title, thumbnail, duration in matches:
|
||||
if "go.php?" in url:
|
||||
|
||||
59
plugin.video.alfa/channels/porntrex.py
Executable file → Normal file
59
plugin.video.alfa/channels/porntrex.py
Executable file → Normal file
@@ -10,6 +10,7 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "https://www.porntrex.com"
|
||||
perpage = 20
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -73,12 +74,14 @@ def lista(item):
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
else:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
|
||||
if '>HD<' in quality:
|
||||
scrapedtitle += " [COLOR red][HD][/COLOR]"
|
||||
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentThumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
@@ -128,6 +131,8 @@ def categorias(item):
|
||||
scrapedthumbnail = urllib.unquote(scrapedthumbnail.split("/go.php?u=")[1].split("&")[0])
|
||||
else:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
if videos:
|
||||
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
|
||||
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
@@ -148,10 +153,8 @@ def categorias(item):
|
||||
def playlists(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = get_data(item.url)
|
||||
|
||||
# Extrae las entradas
|
||||
patron = '<div class="item.*?href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?<div class="totalplaylist">([^<]+)<'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -161,12 +164,13 @@ def playlists(item):
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
else:
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
if videos:
|
||||
scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
|
||||
scrapedtitle = "%s [COLOR red](%s)[/COLOR]" % (scrapedtitle, videos)
|
||||
itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
#Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?href="([^"]+)"')
|
||||
if next_page:
|
||||
if "go.php?" in next_page:
|
||||
@@ -180,38 +184,43 @@ def playlists(item):
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
if not item.indexp:
|
||||
item.indexp = 1
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
# Descarga la pagina
|
||||
data = get_data(item.url)
|
||||
|
||||
action = "play"
|
||||
if config.get_setting("menu_info", "porntrex"):
|
||||
action = "menu_info"
|
||||
# Extrae las entradas
|
||||
patron = '<a href="([^"]+)" class="item ".*?data-original="([^"]+)".*?<strong class="title">\s*([^<]+)<'
|
||||
patron = '<div class="video-item.*?href="([^"]+)".*?title="([^"]+)".*?src="([^"]+)"(.*?)<div class="durations">.*?</i>([^<]+)</div>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
|
||||
scrapedtitle = scrapedtitle.strip()
|
||||
count = 0
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
|
||||
count += 1
|
||||
if count < item.indexp:
|
||||
continue
|
||||
if "go.php?" in scrapedurl:
|
||||
scrapedurl = urllib.unquote(scrapedurl.split("/go.php?u=")[1].split("&")[0])
|
||||
scrapedthumbnail = urlparse.urljoin(host, scrapedthumbnail)
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
|
||||
# Extrae la marca de siguiente página
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next">.*?from:(\d+)')
|
||||
if next_page:
|
||||
if "from=" in item.url:
|
||||
next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
|
||||
else:
|
||||
next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \
|
||||
"=added2fav_date&&from=%s" % (item.url, next_page)
|
||||
itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page))
|
||||
scrapedurl = urlparse.urljoin(host, scrapedurl)
|
||||
if not scrapedthumbnail.startswith("https"):
|
||||
scrapedthumbnail = "https:%s" % scrapedthumbnail
|
||||
if duration:
|
||||
scrapedtitle = "%s - %s" % (duration, scrapedtitle)
|
||||
if '>HD<' in quality:
|
||||
scrapedtitle += " [COLOR red][HD][/COLOR]"
|
||||
if len(itemlist) >= perpage:
|
||||
break;
|
||||
itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, contentThumbnail=scrapedthumbnail,
|
||||
fanart=scrapedthumbnail))
|
||||
#Extrae la marca de siguiente página
|
||||
if item.channel and len(itemlist) >= perpage:
|
||||
itemlist.append( item.clone(title = "Página siguiente >>>", indexp = count + 1) )
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
@@ -224,7 +233,7 @@ def play(item):
|
||||
patron = '<iframe.*?height="(\d+)".*?video_url\s*:\s*\'([^\']+)\''
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for url, quality in matches:
|
||||
if "http" in quality:
|
||||
if "https" in quality:
|
||||
calidad = url
|
||||
url = quality
|
||||
quality = calidad + "p"
|
||||
|
||||
Reference in New Issue
Block a user