@@ -9,15 +9,16 @@ from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import renumbertools,autoplay
|
||||
from channels import renumbertools, autoplay
|
||||
|
||||
CHANNEL_HOST = "https://www.animeid.tv/"
|
||||
|
||||
IDIOMAS = {'Latino':'LAT', 'VOSE': 'VOSE'}
|
||||
IDIOMAS = {'Latino': 'LAT', 'VOSE': 'VOSE'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_quality = []
|
||||
list_servers = ['animeid']
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
@@ -69,7 +70,7 @@ def search(item, texto):
|
||||
["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:19.0) Gecko/20100101 Firefox/19.0"])
|
||||
headers.append(["Referer", CHANNEL_HOST])
|
||||
headers.append(["X-Requested-With", "XMLHttpRequest"])
|
||||
data = scrapertools.cache_page(item.url, headers=headers)
|
||||
data = httptools.downloadpage(item.url, headers=headers).data
|
||||
data = data.replace("\\", "")
|
||||
patron = '{"id":"([^"]+)","text":"([^"]+)","date":"[^"]*","image":"([^"]+)","link":"([^"]+)"}'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
@@ -84,7 +85,7 @@ def search(item, texto):
|
||||
context.extend(context2)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="episodios", title=title, url=url, thumbnail=thumbnail, plot=plot,
|
||||
context=context,show=title, viewmode="movie_with_plot"))
|
||||
context=context, show=title, viewmode="movie_with_plot"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -110,7 +111,7 @@ def novedades_series(item):
|
||||
context2 = autoplay.context
|
||||
context.extend(context2)
|
||||
itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=scrapedurl,
|
||||
context=context,show=title, viewmode="movie_with_plot"))
|
||||
context=context, show=title, viewmode="movie_with_plot"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -118,7 +119,7 @@ def novedades_episodios(item):
|
||||
logger.info()
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.find_single_match(data, '<section class="lastcap">(.*?)</section>')
|
||||
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
|
||||
patronvideos = '(?s)<a href="([^"]+)">[^<]+<header>([^<]+).*?src="([^"]+)"[\s\S]+?<p>(.+?)</p>'
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
itemlist = []
|
||||
for url, title, thumbnail, plot in matches:
|
||||
@@ -204,13 +205,13 @@ def episodios(item, final=True):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data_id = scrapertools.find_single_match(data, 'data-id="([^"]+)')
|
||||
CHANNEL_HEADERS = [
|
||||
["Host", "m.animeid.tv"],
|
||||
["X-Requested-With", "XMLHttpRequest"]
|
||||
["Host", "m.animeid.tv"],
|
||||
["X-Requested-With", "XMLHttpRequest"]
|
||||
]
|
||||
page = 0
|
||||
while True:
|
||||
page += 1
|
||||
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" %(data_id, page)
|
||||
u = "https://m.animeid.tv/ajax/caps?id=%s&ord=DESC&pag=%s" % (data_id, page)
|
||||
data = httptools.downloadpage(u, headers=CHANNEL_HEADERS).data
|
||||
# Cuando ya no hay datos devuelve: "list":[]
|
||||
if '"list":[]' in data:
|
||||
@@ -218,21 +219,25 @@ def episodios(item, final=True):
|
||||
dict_data = jsontools.load(data)
|
||||
list = dict_data['list'][::-1]
|
||||
for dict in list:
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1, int(dict["numero"]))
|
||||
title = "%sx%s - %s" % (season, str(episode).zfill(2),dict["date"])
|
||||
itemlist.append(Item(action = "findvideos",
|
||||
channel = item.channel,
|
||||
title = title,
|
||||
url = CHANNEL_HOST + dict['href'],
|
||||
thumbnail = item.thumbnail,
|
||||
show = item.show,
|
||||
viewmode = "movie_with_plot"
|
||||
))
|
||||
season, episode = renumbertools.numbered_for_tratk(item.channel, item.contentSerieName, 1,
|
||||
int(dict["numero"]))
|
||||
title = "%sx%s - %s" % (season, str(episode).zfill(2), dict["date"])
|
||||
itemlist.append(Item(action="findvideos",
|
||||
channel=item.channel,
|
||||
title=title,
|
||||
url=CHANNEL_HOST + dict['href'],
|
||||
thumbnail=item.thumbnail,
|
||||
show=item.show,
|
||||
viewmode="movie_with_plot"
|
||||
))
|
||||
if config.get_videolibrary_support():
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show))
|
||||
itemlist.append(Item(channel=item.channel, title="[COLOR white]Descargar todos los episodios de la serie[/COLOR]", url=item.url,
|
||||
action="download_all_episodes", extra="episodios", show=item.show))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]", url=item.url,
|
||||
action="add_serie_to_library", extra="episodios", show=item.show))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR white]Descargar todos los episodios de la serie[/COLOR]",
|
||||
url=item.url,
|
||||
action="download_all_episodes", extra="episodios", show=item.show))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -270,8 +275,8 @@ def findvideos(item):
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title="Siguiente: " + title_siguiente,
|
||||
url=CHANNEL_HOST + url_siguiente, thumbnail=item.thumbnail, plot=item.plot, show=item.show,
|
||||
fanart=item.thumbnail, folder=True))
|
||||
|
||||
|
||||
# Requerido para AutoPlay
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import re
|
||||
import urllib
|
||||
|
||||
from core import jsontools as json
|
||||
from core import jsontools as json, httptools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
@@ -12,6 +12,7 @@ url_api = ""
|
||||
beeg_salt = ""
|
||||
Host = "https://beeg.com"
|
||||
|
||||
|
||||
def get_api_url():
|
||||
global url_api
|
||||
global beeg_salt
|
||||
@@ -53,7 +54,7 @@ def mainlist(item):
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=url_api + "/index/main/0/pc",
|
||||
viewmode="movie"))
|
||||
#itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
|
||||
# itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias Populares",
|
||||
# url=url_api + "/index/main/0/pc", extra="popular"))
|
||||
itemlist.append(Item(channel=item.channel, action="listcategorias", title="Listado categorias completo",
|
||||
url=url_api + "/index/main/0/pc", extra="nonpopular"))
|
||||
@@ -65,7 +66,7 @@ def mainlist(item):
|
||||
def videos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
JSONData = json.load(data)
|
||||
|
||||
for Video in JSONData["videos"]:
|
||||
@@ -90,14 +91,14 @@ def videos(item):
|
||||
def listcategorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
JSONData = json.load(data)
|
||||
|
||||
#for Tag in JSONData["tags"][item.extra]:
|
||||
# for Tag in JSONData["tags"][item.extra]:
|
||||
for Tag in JSONData["tags"]:
|
||||
url = url_api + "/index/tag/0/pc?tag=" + Tag["tag"]
|
||||
title = '%s - %s' % (str(Tag["tag"]), str(Tag["videos"]))
|
||||
#title = title[:1].upper() + title[1:]
|
||||
# title = title[:1].upper() + title[1:]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title=title, url=url, folder=True, viewmode="movie"))
|
||||
|
||||
@@ -109,7 +110,7 @@ def search(item, texto):
|
||||
|
||||
texto = texto.replace(" ", "+")
|
||||
item.url = item.url % (texto)
|
||||
|
||||
|
||||
try:
|
||||
return videos(item)
|
||||
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
|
||||
@@ -136,7 +137,8 @@ def play(item):
|
||||
viedokey = re.compile("key=(.*?)%2Cend=", re.DOTALL).findall(url)[0]
|
||||
|
||||
url = url.replace(viedokey, decode(viedokey))
|
||||
if not url.startswith("https:"): url = "https:" + url
|
||||
if not url.startswith("https:"):
|
||||
url = "https:" + url
|
||||
title = videourl
|
||||
itemlist.append(["%s %s [directo]" % (title, url[-4:]), url])
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ else:
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
@@ -105,7 +104,6 @@ def sub_search(item):
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, tipo, year in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=scrapedthumbnail, text_color=color3, page=0))
|
||||
@@ -167,7 +165,6 @@ def peliculas(item):
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches[item.page:item.page + 30]:
|
||||
if 'Próximamente' not in quality and '-XXX.jpg' not in scrapedthumbnail:
|
||||
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').strip()
|
||||
contentTitle = scrapedtitle.partition(':')[0].partition(',')[0]
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
@@ -212,7 +209,7 @@ def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="cat-item cat-item-[^"]+"><a href="([^"]+)" title="[^"]+">([^<]+)</a> <i>([^<]+)</i></li>'
|
||||
@@ -231,14 +228,13 @@ def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
@@ -365,7 +361,7 @@ def episodios(item):
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
|
||||
@@ -41,7 +41,6 @@ else:
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
@@ -121,10 +120,10 @@ def peliculas(item):
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
data = scrapertools.decodeHtmlentities(data)
|
||||
patron = '<article id="[^"]+" class="TPost[^<]+<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '</figure>(.*?)' # tipo
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
|
||||
patron += '<span class="Year">([^<]+)</span>.*?' # year
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '</figure>(.*?)' # tipo
|
||||
patron += '<h3 class="Title">([^<]+)</h3>.*?' # title
|
||||
patron += '<span class="Year">([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
@@ -173,7 +172,6 @@ def genresYears(item):
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="peliculas"))
|
||||
|
||||
return itemlist
|
||||
@@ -183,13 +181,12 @@ def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
@@ -203,13 +200,12 @@ def series(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)| |<br>", "", data)
|
||||
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
|
||||
@@ -274,7 +270,7 @@ def episodios(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -307,7 +303,7 @@ def episodios(item):
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
@@ -340,7 +336,8 @@ def findvideos(item):
|
||||
lang, quality = match[0]
|
||||
quality = quality.strip()
|
||||
headers = {'Referer': item.url}
|
||||
url_1 = scrapertools.find_single_match(data, 'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
|
||||
url_1 = scrapertools.find_single_match(data,
|
||||
'id="Opt%s"><iframe width="560" height="315" src="([^"]+)"' % option)
|
||||
new_data = httptools.downloadpage(url_1, headers=headers).data
|
||||
new_data = re.sub(r"\n|\r|\t|amp;|\(.*?\)|\s{2}| ", "", new_data)
|
||||
new_data = scrapertools.decodeHtmlentities(new_data)
|
||||
|
||||
@@ -5,6 +5,7 @@ import re
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
@@ -43,7 +44,7 @@ def lista(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
patronvideos = '<img .*?src="(.*?)"'
|
||||
@@ -92,7 +93,7 @@ def detail(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = data.replace("%3A", ":")
|
||||
data = data.replace("%2F", "/")
|
||||
|
||||
@@ -5,6 +5,7 @@ import urlparse
|
||||
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import httptools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
|
||||
@@ -30,7 +31,7 @@ def DocuSeries(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
# Extrae las entradas (carpetas)
|
||||
patronvideos = '<li><b><a href="([^"]+)" target="_blank">([^<]+)</a></b></li>'
|
||||
@@ -54,7 +55,7 @@ def DocuTag(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span class='label-count' dir='ltr'>(.+?)</span>"
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
scrapertools.printMatches(matches)
|
||||
@@ -76,7 +77,7 @@ def DocuARCHIVO(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patronvideos = "<a class='post-count-link' href='([^']+)'>([^<]+)</a>[^<]+"
|
||||
patronvideos += "<span class='post-count' dir='ltr'>(.+?)</span>"
|
||||
matches = re.compile(patronvideos, re.DOTALL).findall(data)
|
||||
@@ -102,7 +103,7 @@ def listvideos(item):
|
||||
scrapedplot = ""
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patronvideos = "<h3 class='post-title entry-title'[^<]+"
|
||||
patronvideos += "<a href='([^']+)'>([^<]+)</a>.*?"
|
||||
patronvideos += "<div class='post-body entry-content'(.*?)<div class='post-footer'>"
|
||||
@@ -156,7 +157,7 @@ def findvideos(item):
|
||||
itemlist = []
|
||||
|
||||
# Descarga la página
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, "<div class='post-body entry-content'(.*?)<div class='post-footer'>")
|
||||
|
||||
# Busca los enlaces a los videos
|
||||
|
||||
@@ -179,7 +179,7 @@ def genres(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
|
||||
patron = '<li class="myli"><a href="/([^"]+)">([^<]+)</a>'
|
||||
|
||||
@@ -1,23 +1,27 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
# ------------------------------------------------------------
|
||||
import urlparse
|
||||
import urllib2
|
||||
import urllib
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from core import httptools
|
||||
|
||||
host= 'https://pandamovies.pw'
|
||||
host = 'https://pandamovies.pw'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/movies"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/movies"))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/movies"))
|
||||
itemlist.append(Item(channel=item.channel, title="Canal", action="categorias", url=host + "/movies"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -35,46 +39,45 @@ def search(item, texto):
|
||||
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
if item.title == "Categorias" :
|
||||
data = scrapertools.get_match(data,'<a href="#">Genres</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data,'<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl.replace("https:", "")
|
||||
scrapedurl = "https:" + scrapedurl
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
return itemlist
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if item.title == "Categorias":
|
||||
data = scrapertools.get_match(data, '<a href="#">Genres</a>(.*?)</ul>')
|
||||
else:
|
||||
data = scrapertools.get_match(data, '<a href="#">Studios</a>(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)">([^<]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedurl = scrapedurl.replace("https:", "")
|
||||
scrapedurl = "https:" + scrapedurl
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div data-movie-id="\d+".*?'
|
||||
patron += '<a href="([^"]+)".*?oldtitle="([^"]+)".*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos" , title=title , url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle = title))
|
||||
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
|
||||
next_page = scrapertools.find_single_match(data,'<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page =="":
|
||||
next_page = scrapertools.find_single_match(data,'<a.*?href="([^"]+)" >Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, contentTitle=title))
|
||||
# <li class='active'><a class=''>1</a></li><li><a rel='nofollow' class='page larger' href='https://pandamovies.pw/movies/page/2'>
|
||||
next_page = scrapertools.find_single_match(data, '<li class=\'active\'>.*?href=\'([^\']+)\'>')
|
||||
if next_page == "":
|
||||
next_page = scrapertools.find_single_match(data, '<a.*?href="([^"]+)" >Next »</a>')
|
||||
if next_page != "":
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@ else:
|
||||
headers = [['User-Agent', 'Mozilla/50.0 (Windows NT 10.0; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0'],
|
||||
['Referer', host]]
|
||||
|
||||
|
||||
parameters = channeltools.get_channel_parameters(__channel__)
|
||||
fanart_host = parameters['fanart']
|
||||
thumbnail_host = parameters['thumbnail']
|
||||
@@ -103,7 +102,7 @@ def sub_search(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
data = scrapertools.find_single_match(data, 'Archivos (.*?)resppages')
|
||||
patron = 'img alt="([^"]+)".*?'
|
||||
patron = 'img alt="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'fechaestreno">([^<]+)'
|
||||
@@ -111,7 +110,6 @@ def sub_search(item):
|
||||
|
||||
for scrapedtitle, scrapedthumbnail, scrapedurl, year in matches:
|
||||
if 'tvshows' not in scrapedurl:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, contentTitle=scrapedtitle,
|
||||
action="findvideos", infoLabels={"year": year},
|
||||
thumbnail=scrapedthumbnail, text_color=color3))
|
||||
@@ -137,7 +135,7 @@ def peliculas(item):
|
||||
# logger.info(data)
|
||||
|
||||
# img, title
|
||||
patron = '(?is)movie-img img-box.*?alt="([^"]+)".*?'
|
||||
patron = '(?is)movie-img img-box.*?alt="([^"]+)".*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += 'href="([^"]+)".*?'
|
||||
patron += 'fechaestreno">([^<]+)<.*?'
|
||||
@@ -187,7 +185,7 @@ def genresYears(item):
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
title = '%s' % (scrapedtitle)
|
||||
title = title.replace("Peliculas de ","").replace(" Online","")
|
||||
title = title.replace("Peliculas de ", "").replace(" Online", "")
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, action="peliculas"))
|
||||
return itemlist
|
||||
|
||||
@@ -196,14 +194,13 @@ def year_release(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<li><a href="([^"]+)">([^<]+)</a></li>' # url, title
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
|
||||
itemlist.append(item.clone(channel=item.channel, action="peliculas", title=scrapedtitle, page=0,
|
||||
url=scrapedurl, text_color=color3, viewmode="movie_with_plot", extra='next'))
|
||||
|
||||
@@ -219,13 +216,12 @@ def series(item):
|
||||
# logger.info(data)
|
||||
|
||||
patron = '<article class="TPost C TPostd">\s*<a href="([^"]+)">.*?' # url
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
patron += '<img src="([^"]+)".*?' # img
|
||||
patron += '<h3 class="Title">([^<]+)</h3>' # title
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle in matches[item.page:item.page + 30]:
|
||||
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, action="temporadas",
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
thumbnail='https:' + scrapedthumbnail, contentType='tvshow'))
|
||||
@@ -291,7 +287,7 @@ def episodios(item):
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron = '<td class="MvTbImg B"><a href="([^"]+)".*?' # url
|
||||
patron += '<td class="MvTbTtl"><a href="https://cine24h.net/episode/(.*?)/">([^<]+)</a>' # title de episodios
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -324,7 +320,7 @@ def episodios(item):
|
||||
if i.infoLabels['title']:
|
||||
# Si el capitulo tiene nombre propio añadirselo al titulo del item
|
||||
i.title = "%sx%s %s" % (i.infoLabels['season'], i.infoLabels[
|
||||
'episode'], i.infoLabels['title'])
|
||||
'episode'], i.infoLabels['title'])
|
||||
if i.infoLabels.has_key('poster_path'):
|
||||
# Si el capitulo tiene imagen propia remplazar al poster
|
||||
i.thumbnail = i.infoLabels['poster_path']
|
||||
@@ -369,7 +365,8 @@ def findvideos(item):
|
||||
lang = languages[lang]
|
||||
|
||||
server = servertools.get_server_from_url(url)
|
||||
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (server.title(), item.quality, lang)
|
||||
title = "»» [COLOR yellow](%s)[/COLOR] [COLOR goldenrod](%s)[/COLOR] %s ««" % (
|
||||
server.title(), item.quality, lang)
|
||||
# if 'google' not in url and 'directo' not in server:
|
||||
|
||||
itemlist.append(item.clone(action='play', url=url, title=title, language=lang, text_color=color3))
|
||||
|
||||
@@ -200,7 +200,6 @@ def peliculas(item):
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if paginacion:
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="peliculas",
|
||||
title="» Siguiente »", url=paginacion, plot="Página Siguiente",
|
||||
thumbnail='https://raw.githubusercontent.com/Inter95/tvguia/master/thumbnails/next.png'))
|
||||
@@ -219,7 +218,7 @@ def generos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="todos">.*?'
|
||||
@@ -270,8 +269,9 @@ def findvideos(item):
|
||||
server = servertools.get_server_from_url(scrapedurl)
|
||||
quality = scrapertools.find_single_match(
|
||||
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
|
||||
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(servidores.capitalize(),
|
||||
quality.upper())
|
||||
title = "Ver en: [COLOR yellowgreen][{}][/COLOR] [COLOR yellow][{}][/COLOR]".format(
|
||||
servidores.capitalize(),
|
||||
quality.upper())
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url=scrapedurl, quality=item.quality,
|
||||
server=server, language=lang.replace('Español ', ''),
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
# ------------------------------------------------------------
|
||||
import urlparse
|
||||
import urllib2
|
||||
import urllib
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -10,14 +14,15 @@ from core import httptools
|
||||
|
||||
host = 'http://porneq.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/videos/browse/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Vistos" , action="lista", url=host + "/videos/most-viewed/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas Votado" , action="lista", url=host + "/videos/most-liked/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Big Tits" , action="lista", url=host + "/show/big+tits&sort=w"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/videos/browse/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Mas Vistos", action="lista", url=host + "/videos/most-viewed/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Mas Votado", action="lista", url=host + "/videos/most-liked/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Big Tits", action="lista", url=host + "/show/big+tits&sort=w"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -37,20 +42,20 @@ def search(item, texto):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a class="clip-link" data-id="\d+" title="([^"]+)" href="([^"]+)">.*?'
|
||||
patron += '<img src="([^"]+)".*?'
|
||||
patron += '<span class="timer">(.*?)</span></div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedtitle,scrapedurl,scrapedthumbnail,scrapedtime in matches:
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedtime in matches:
|
||||
scrapedplot = ""
|
||||
scrapedtitle = "[COLOR yellow]" + (scrapedtime) + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page !="":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<nav id="page_nav"><a href="(.*?)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -58,8 +63,8 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
scrapedurl = scrapertools.find_single_match(data,'<source src="([^"]+)"')
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
scrapedurl = scrapertools.find_single_match(data, '<source src="([^"]+)"')
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -14,11 +14,11 @@ host = 'http://sexgalaxy.net'
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimos" , action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Peliculas" , action="lista", url=host + "/full-movies/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canales" , action="canales", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search"))
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimos", action="lista", url=host + "/new-releases/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Peliculas", action="lista", url=host + "/full-movies/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Canales", action="canales", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -35,20 +35,20 @@ def search(item, texto):
|
||||
return []
|
||||
|
||||
|
||||
def canales (item):
|
||||
def canales(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(host)
|
||||
data = scrapertools.get_match(data,'Top Networks</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
data = httptools.downloadpage(host).data
|
||||
data = scrapertools.get_match(data, 'Top Networks</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -56,16 +56,16 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'More Categories</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
data = scrapertools.get_match(data, 'More Categories</a>(.*?)</ul>')
|
||||
patron = '<li id=.*?<a href="(.*?)">(.*?)</a></li>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = str(scrapedtitle)
|
||||
thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -73,24 +73,24 @@ def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail in matches:
|
||||
patron = '<div class="post-img small-post-img">.*?<a href="(.*?)" title="(.*?)">.*?<img src="(.*?)"'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
|
||||
scrapedplot = ""
|
||||
calidad = scrapertools.find_single_match(scrapedtitle,'\(.*?/(\w+)\)')
|
||||
calidad = scrapertools.find_single_match(scrapedtitle, '\(.*?/(\w+)\)')
|
||||
if calidad:
|
||||
scrapedtitle = "[COLOR red]" + calidad + "[/COLOR] " + scrapedtitle
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page!="":
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<a class="next page-numbers" href="([^"]+)"')
|
||||
if next_page != "":
|
||||
itemlist.append(item.clone(action="lista", title="Next page >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data=data)
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
@@ -98,4 +98,3 @@ def play(item):
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
# ------------------------------------------------------------
|
||||
import urlparse
|
||||
import urllib2
|
||||
import urllib
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
@@ -10,14 +14,17 @@ from core import httptools
|
||||
|
||||
host = 'http://www.webpeliculasporno.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Ultimas" , action="lista", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Mas vistas" , action="lista", url= host + "/?display=tube&filtre=views"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valoradas" , action="lista", url= host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url= host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
itemlist.append(Item(channel=item.channel, title="Ultimas", action="lista", url=host))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Mas vistas", action="lista", url=host + "/?display=tube&filtre=views"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Mejor valoradas", action="lista", url=host + "/?display=tube&filtre=rate"))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -36,36 +43,35 @@ def search(item, texto):
|
||||
|
||||
def categorias(item):
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="cat-item [^>]+><a href="([^"]+)" >([^<]+)'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot) )
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = '<li class="border-radius-5 box-shadow">.*?'
|
||||
patron += 'src="([^"]+)".*?'
|
||||
patron += '<a href="([^"]+)" title="([^"]+)">'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedthumbnail,scrapedurl,scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
itemlist.append( Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="next page-numbers" href="([^"]+)">Next')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
|
||||
next_page = scrapertools.find_single_match(data, '<li><a class="next page-numbers" href="([^"]+)">Next')
|
||||
if next_page != "":
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -5,28 +5,34 @@ import sys
|
||||
import urlparse
|
||||
|
||||
from platformcode import logger
|
||||
from core import scrapertools
|
||||
from core import scrapertools, httptools
|
||||
from core.item import Item
|
||||
|
||||
HOST = "http://es.xhamster.com/"
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Útimos videos" , url=HOST, viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="categorias" , title="Categorías", url=HOST))
|
||||
itemlist.append( Item(channel=item.channel, action="votados" , title="Lo mejor"))
|
||||
itemlist.append( Item(channel=item.channel, action="vistos" , title="Los mas vistos"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Recomendados", url=urlparse.urljoin(HOST,"/videos/recommended")))
|
||||
itemlist.append( Item(channel=item.channel, action="search" , title="Buscar", url=urlparse.urljoin(HOST,"/search?q=%s")))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Útimos videos", url=HOST, viewmode="movie"))
|
||||
itemlist.append(Item(channel=item.channel, action="categorias", title="Categorías", url=HOST))
|
||||
itemlist.append(Item(channel=item.channel, action="votados", title="Lo mejor"))
|
||||
itemlist.append(Item(channel=item.channel, action="vistos", title="Los mas vistos"))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Recomendados",
|
||||
url=urlparse.urljoin(HOST, "/videos/recommended")))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="search", title="Buscar", url=urlparse.urljoin(HOST, "/search?q=%s")))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# REALMENTE PASA LA DIRECCION DE BUSQUEDA
|
||||
def search(item,texto):
|
||||
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
tecleado = texto.replace( " ", "+" )
|
||||
tecleado = texto.replace(" ", "+")
|
||||
item.url = item.url % tecleado
|
||||
item.extra = "buscar"
|
||||
try:
|
||||
@@ -37,71 +43,95 @@ def search(item,texto):
|
||||
for line in sys.exc_info():
|
||||
logger.error("%s" % line)
|
||||
return []
|
||||
|
||||
|
||||
# SECCION ENCARGADA DE BUSCAR
|
||||
|
||||
|
||||
def videos(item):
|
||||
logger.info()
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.get_match(data,'<article.+?>(.*?)</article>')
|
||||
|
||||
#Patron
|
||||
data = scrapertools.get_match(data, '<article.+?>(.*?)</article>')
|
||||
|
||||
# Patron
|
||||
patron = '(?s)<div class="thumb-list__item.*?href="([^"]+)".*?src="([^"]+)".*?alt="([^"]+)">.*?'
|
||||
patron += '<div class="thumb-image-container__duration">(.+?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duration in matches:
|
||||
#logger.debug("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, duration in matches:
|
||||
# logger.debug("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
|
||||
fullTitle = scrapedtitle.strip() + " [" + duration + "]"
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=fullTitle , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=fullTitle, url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
folder=True))
|
||||
|
||||
#Paginador
|
||||
# Paginador
|
||||
patron = '(?s)<div class="pager-container".*?<li class="next">.*?href="([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
if len(matches) >0:
|
||||
itemlist.append( Item(channel=item.channel, action="videos", title="Página Siguiente" , url=matches[0] , thumbnail="" , folder=True, viewmode="movie") )
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
if len(matches) > 0:
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Página Siguiente", url=matches[0], thumbnail="",
|
||||
folder=True, viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
# SECCION ENCARGADA DE VOLCAR EL LISTADO DE CATEGORIAS CON EL LINK CORRESPONDIENTE A CADA PAGINA
|
||||
|
||||
|
||||
|
||||
def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
data = scrapertools.cache_page(item.url)
|
||||
|
||||
data = scrapertools.get_match(data,'(?s)<div class="all-categories">(.*?)</aside>')
|
||||
data = scrapertools.get_match(data, '(?s)<div class="all-categories">(.*?)</aside>')
|
||||
|
||||
patron = '(?s)<li>.*?<a href="([^"]+)".*?>([^<]+).*?</a></li>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
fullTitle = scrapedtitle.strip()
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title=fullTitle , url=scrapedurl))
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title=fullTitle, url=scrapedurl))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def votados(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/best/daily"), viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/best/weekly"), viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/best/monthly"), viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/best/"), viewmode="movie"))
|
||||
itemlist.append(Item(channel=item.channel, action="videos", title="Día", url=urlparse.urljoin(HOST, "/best/daily"),
|
||||
viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Semana", url=urlparse.urljoin(HOST, "/best/weekly"),
|
||||
viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Mes", url=urlparse.urljoin(HOST, "/best/monthly"),
|
||||
viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="De siempre", url=urlparse.urljoin(HOST, "/best/"),
|
||||
viewmode="movie"))
|
||||
return itemlist
|
||||
|
||||
|
||||
def vistos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Día", url=urlparse.urljoin(HOST,"/most-viewed/daily"), viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Semana" , url=urlparse.urljoin(HOST,"/most-viewed/weekly"), viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="Mes" , url=urlparse.urljoin(HOST,"/most-viewed/monthly"), viewmode="movie"))
|
||||
itemlist.append( Item(channel=item.channel, action="videos" , title="De siempre" , url=urlparse.urljoin(HOST,"/most-viewed/"), viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Día", url=urlparse.urljoin(HOST, "/most-viewed/daily"),
|
||||
viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Semana", url=urlparse.urljoin(HOST, "/most-viewed/weekly"),
|
||||
viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="Mes", url=urlparse.urljoin(HOST, "/most-viewed/monthly"),
|
||||
viewmode="movie"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="videos", title="De siempre", url=urlparse.urljoin(HOST, "/most-viewed/"),
|
||||
viewmode="movie"))
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -111,15 +141,15 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
logger.debug(data)
|
||||
|
||||
patron = '"([0-9]+p)":"([^"]+)"'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for res, url in matches:
|
||||
url = url.replace("\\", "")
|
||||
logger.debug("url="+url)
|
||||
itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url])
|
||||
|
||||
logger.debug("url=" + url)
|
||||
itemlist.append(["%s %s [directo]" % (res, scrapertools.get_filename_from_url(url)[-4:]), url])
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
# ------------------------------------------------------------
|
||||
import urlparse
|
||||
import urllib2
|
||||
import urllib
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
@@ -11,17 +15,18 @@ from core import httptools
|
||||
|
||||
host = 'https://www.xozilla.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/latest-updates/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/latest-updates/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Popular", action="lista", url=host + "/most-popular/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top-rated/"))
|
||||
|
||||
itemlist.append( Item(channel=item.channel, title="PornStar" , action="categorias", url=host + "/models/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Canal" , action="categorias", url=host + "/channels/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host + "/categories/"))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
itemlist.append(Item(channel=item.channel, title="PornStar", action="categorias", url=host + "/models/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Canal", action="categorias", url=host + "/channels/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host + "/categories/"))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -46,64 +51,62 @@ def categorias(item):
|
||||
patron = '<a class="item" href="([^"]+)" title="([^"]+)">.*?'
|
||||
patron += '<img class="thumb" src="([^"]+)".*?'
|
||||
patron += '(.*?)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle,scrapedthumbnail,cantidad in matches:
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle, scrapedthumbnail, cantidad in matches:
|
||||
scrapedplot = ""
|
||||
cantidad = scrapertools.find_single_match(cantidad,'(\d+) videos</div>')
|
||||
cantidad = scrapertools.find_single_match(cantidad, '(\d+) videos</div>')
|
||||
if cantidad:
|
||||
scrapedtitle += " (" + cantidad + ")"
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot) )
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="#videos":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
if next_page=="#videos":
|
||||
next_page = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
|
||||
next_page = urlparse.urljoin(item.url,next_page) + "/"
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
|
||||
if next_page != "#videos":
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
if next_page == "#videos":
|
||||
next_page = scrapertools.find_single_match(data, 'from:(\d+)">Next</a>')
|
||||
next_page = urlparse.urljoin(item.url, next_page) + "/"
|
||||
itemlist.append(item.clone(action="categorias", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<a href="([^"]+)" class="item.*?'
|
||||
patron += 'data-original="([^"]+)".*?'
|
||||
patron += 'alt="([^"]+)".*?'
|
||||
patron += '<div class="duration">(.*?)</div>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches:
|
||||
url = scrapedurl
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play" , title=title , url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li class="next"><a href="([^"]+)"')
|
||||
if next_page!="#videos":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
if next_page=="#videos":
|
||||
next_page = scrapertools.find_single_match(data,'from:(\d+)">Next</a>')
|
||||
next_page = urlparse.urljoin(item.url,next_page) + "/"
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
fanart=thumbnail, plot=plot, contentTitle=contentTitle))
|
||||
next_page = scrapertools.find_single_match(data, '<li class="next"><a href="([^"]+)"')
|
||||
if next_page != "#videos":
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
if next_page == "#videos":
|
||||
next_page = scrapertools.find_single_match(data, 'from:(\d+)">Next</a>')
|
||||
next_page = urlparse.urljoin(item.url, next_page) + "/"
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
media_url = scrapertools.find_single_match(data, 'video_alt_url: \'([^\']+)/\'')
|
||||
if media_url == "":
|
||||
media_url = scrapertools.find_single_match(data, 'video_url: \'([^\']+)/\'')
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#------------------------------------------------------------
|
||||
import urlparse,urllib2,urllib,re
|
||||
import os, sys
|
||||
# ------------------------------------------------------------
|
||||
import urlparse
|
||||
import urllib2
|
||||
import urllib
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
from platformcode import config, logger
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
@@ -10,14 +14,16 @@ from core import httptools
|
||||
|
||||
host = 'https://www.youjizz.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
itemlist.append( Item(channel=item.channel, title="Nuevas" , action="lista", url=host + "/newest-clips/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Popular" , action="lista", url=host + "/most-popular/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Mejor valorada" , action="lista", url=host + "/top-rated-week/1.html"))
|
||||
itemlist.append( Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
|
||||
itemlist.append( Item(channel=item.channel, title="Buscar", action="search"))
|
||||
itemlist.append(Item(channel=item.channel, title="Nuevas", action="lista", url=host + "/newest-clips/1.html"))
|
||||
itemlist.append(Item(channel=item.channel, title="Popular", action="lista", url=host + "/most-popular/1.html"))
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Mejor valorada", action="lista", url=host + "/top-rated-week/1.html"))
|
||||
itemlist.append(Item(channel=item.channel, title="Categorias", action="categorias", url=host))
|
||||
itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -38,67 +44,68 @@ def categorias(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data,'<h4>Trending(.*?)</ul>')
|
||||
data = scrapertools.get_match(data, '<h4>Trending(.*?)</ul>')
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<li><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedtitle in matches:
|
||||
patron = '<li><a href="([^"]+)">([^"]+)</a>'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
scrapedplot = ""
|
||||
scrapedthumbnail = ""
|
||||
scrapedtitle = scrapedtitle
|
||||
scrapedurl = urlparse.urljoin(item.url,scrapedurl)
|
||||
itemlist.append( Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail , plot=scrapedplot) )
|
||||
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title=scrapedtitle, url=scrapedurl,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot))
|
||||
return itemlist
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cachePage(item.url)
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
patron = '<div class="video-item">.*?'
|
||||
patron = '<div class="video-item">.*?'
|
||||
patron += 'class="frame image" href="([^"]+)".*?'
|
||||
patron += 'data-original="([^"]+)" />.*?'
|
||||
patron += '<div class="video-title">.*?'
|
||||
patron += '>(.*?)</a>.*?'
|
||||
patron += '<span class="time">(.*?)</span>'
|
||||
matches = re.compile(patron,re.DOTALL).findall(data)
|
||||
for scrapedurl,scrapedthumbnail,scrapedtitle,duracion in matches:
|
||||
url = urlparse.urljoin(item.url,scrapedurl)
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, duracion in matches:
|
||||
url = urlparse.urljoin(item.url, scrapedurl)
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + scrapedtitle
|
||||
quality= ""
|
||||
if '-720-' in scrapedthumbnail : quality = "720"
|
||||
if '-1080-' in scrapedthumbnail : quality = "1080"
|
||||
quality = ""
|
||||
if '-720-' in scrapedthumbnail:
|
||||
quality = "720"
|
||||
if '-1080-' in scrapedthumbnail:
|
||||
quality = "1080"
|
||||
if quality:
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
|
||||
title = "[COLOR yellow]" + duracion + "[/COLOR] " + "[COLOR red]" + quality + "p[/COLOR] " + scrapedtitle
|
||||
contentTitle = title
|
||||
thumbnail = "http:" + scrapedthumbnail
|
||||
plot = ""
|
||||
year = ""
|
||||
itemlist.append( Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, quality= quality, contentTitle = contentTitle))
|
||||
next_page = scrapertools.find_single_match(data,'<li><a class="pagination-next" href="([^"]+)">Next »</a>')
|
||||
if next_page!="":
|
||||
next_page = urlparse.urljoin(item.url,next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>" , text_color="blue", url=next_page) )
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, url=url, thumbnail=thumbnail,
|
||||
plot=plot, quality=quality, contentTitle=contentTitle))
|
||||
next_page = scrapertools.find_single_match(data, '<li><a class="pagination-next" href="([^"]+)">Next »</a>')
|
||||
if next_page != "":
|
||||
next_page = urlparse.urljoin(item.url, next_page)
|
||||
itemlist.append(item.clone(action="lista", title="Página Siguiente >>", text_color="blue", url=next_page))
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = scrapertools.cache_page(item.url)
|
||||
data = scrapertools.get_match(data,'var encodings(.*?)var')
|
||||
if '360' in data:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = scrapertools.get_match(data, 'var encodings(.*?)var')
|
||||
if '360' in data:
|
||||
patron = '"360".*?"filename"\:"(.*?)"'
|
||||
if '720' in data:
|
||||
if '720' in data:
|
||||
patron = '"720".*?"filename"\:"(.*?)"'
|
||||
if '1080' in data:
|
||||
if '1080' in data:
|
||||
patron = '"1080".*?"filename"\:"(.*?)"'
|
||||
media_url = scrapertools.find_single_match(data, patron)
|
||||
media_url = "https:" + media_url.replace("\\", "")
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=item.title, fulltitle=item.fulltitle, url=media_url,
|
||||
thumbnail=item.thumbnail, plot=item.plot, show=item.title, server="directo", folder=False))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ def youtube_api_call(method, parameters):
|
||||
url = "https://www.googleapis.com/youtube/v3/" + method + "?" + encoded_parameters + "&key=" + YOUTUBE_V3_API_KEY;
|
||||
logger.info("url=" + url)
|
||||
|
||||
data = scrapertools.cache_page(url)
|
||||
data = httptools.downloadpage(url).data
|
||||
logger.info("data=" + data)
|
||||
|
||||
json_object = jsontools.load(data)
|
||||
@@ -37,7 +37,7 @@ def youtube_get_user_playlists(user_id, pageToken=""):
|
||||
{"part": "snippet,contentDetails", "channelId": channel_id, "maxResults": 50,
|
||||
"pageToken": pageToken})
|
||||
|
||||
return json_object;
|
||||
return json_object
|
||||
|
||||
|
||||
def youtube_get_playlist_items(playlist_id, pageToken=""):
|
||||
|
||||
@@ -38,5 +38,5 @@
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": ""
|
||||
"thumbnail": "http://thevid.net/imgs/thevid.png"
|
||||
}
|
||||
|
||||
42
plugin.video.alfa/servers/thevideobee.json
Normal file
42
plugin.video.alfa/servers/thevideobee.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://thevideobee.to/embed-[A-z0-9]+.html)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "thevideobee",
|
||||
"name": "thevideobee",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://thevideobee.to/img/logo.png"
|
||||
}
|
||||
26
plugin.video.alfa/servers/thevideobee.py
Normal file
26
plugin.video.alfa/servers/thevideobee.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector thevideobee By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data or "to copyright issues" in data:
|
||||
return False, "[thevideobee] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
videourl = scrapertools.find_single_match(data, 'src: "([^"]+)')
|
||||
video_urls.append([".MP4 [thevideobee]", videourl])
|
||||
|
||||
return video_urls
|
||||
42
plugin.video.alfa/servers/tusfiles.json
Normal file
42
plugin.video.alfa/servers/tusfiles.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://tusfiles.com/embed-[A-z0-9]+.html)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "tusfiles",
|
||||
"name": "tusfiles",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://tusfiles.com/i/TFLOGO.png"
|
||||
}
|
||||
26
plugin.video.alfa/servers/tusfiles.py
Normal file
26
plugin.video.alfa/servers/tusfiles.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector tusfiles By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data or "to copyright issues" in data:
|
||||
return False, "[tusfiles] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
video_urls = []
|
||||
videourl = scrapertools.find_single_match(data, 'source src="([^"]+)')
|
||||
video_urls.append([".MP4 [tusfiles]", videourl])
|
||||
|
||||
return video_urls
|
||||
42
plugin.video.alfa/servers/vup.json
Normal file
42
plugin.video.alfa/servers/vup.json
Normal file
@@ -0,0 +1,42 @@
|
||||
{
|
||||
"active": true,
|
||||
"find_videos": {
|
||||
"ignore_urls": [],
|
||||
"patterns": [
|
||||
{
|
||||
"pattern": "(https://vup.to/embed-[A-z0-9]+.html)",
|
||||
"url": "\\1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"free": true,
|
||||
"id": "vup",
|
||||
"name": "vup",
|
||||
"settings": [
|
||||
{
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"id": "black_list",
|
||||
"label": "@60654",
|
||||
"type": "bool",
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"default": 0,
|
||||
"enabled": true,
|
||||
"id": "favorites_servers_list",
|
||||
"label": "@60655",
|
||||
"lvalues": [
|
||||
"No",
|
||||
"1",
|
||||
"2",
|
||||
"3",
|
||||
"4",
|
||||
"5"
|
||||
],
|
||||
"type": "list",
|
||||
"visible": false
|
||||
}
|
||||
],
|
||||
"thumbnail": "https://i.postimg.cc/ZKjvqXxj/vup.png"
|
||||
}
|
||||
28
plugin.video.alfa/servers/vup.py
Normal file
28
plugin.video.alfa/servers/vup.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# --------------------------------------------------------
|
||||
# Conector vup By Alfa development Group
|
||||
# --------------------------------------------------------
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
def test_video_exists(page_url):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
if "no longer exists" in data or "to copyright issues" in data:
|
||||
return False, "[vup] El video ha sido borrado"
|
||||
return True, ""
|
||||
|
||||
|
||||
def get_video_url(page_url, user="", password="", video_password=""):
|
||||
logger.info("(page_url='%s')" % page_url)
|
||||
data = httptools.downloadpage(page_url).data
|
||||
bloque = scrapertools.find_single_match(data, 'sources:.*?\]')
|
||||
video_urls = []
|
||||
videourl = scrapertools.find_multiple_matches(bloque, '"(http[^"]+)')
|
||||
for video in videourl:
|
||||
video_urls.append([".MP4 [vup]", video])
|
||||
video_urls = video_urls[::-1]
|
||||
return video_urls
|
||||
Reference in New Issue
Block a user