actualizados
This commit is contained in:
@@ -19,7 +19,6 @@ if __perfil__ - 1 >= 0:
|
||||
color1, color2, color3, color4, color5 = perfil[__perfil__ - 1]
|
||||
else:
|
||||
color1 = color2 = color3 = color4 = color5 = ""
|
||||
|
||||
host = "http://www.crunchyroll.com"
|
||||
proxy_u = "http://anonymouse.org/cgi-bin/anon-www.cgi/"
|
||||
proxy_e = "http://proxyanonimo.es/browse.php?u="
|
||||
@@ -27,24 +26,20 @@ proxy_e = "http://proxyanonimo.es/browse.php?u="
|
||||
|
||||
def login():
|
||||
logger.info()
|
||||
|
||||
langs = ['deDE', 'ptPT', 'frFR', 'itIT', 'enUS', 'esLA', 'esES']
|
||||
lang = langs[config.get_setting("crunchyrollidioma", "crunchyroll")]
|
||||
httptools.downloadpage("http://www.crunchyroll.com/ajax/", "req=RpcApiTranslation_SetLang&locale=%s" % lang)
|
||||
|
||||
login_page = "https://www.crunchyroll.com/login"
|
||||
httptools.downloadpage(host + "/ajax/", "req=RpcApiTranslation_SetLang&locale=%s" % lang)
|
||||
login_page = host.replace("http","https") + "/login"
|
||||
user = config.get_setting("crunchyrolluser", "crunchyroll")
|
||||
password = config.get_setting("crunchyrollpassword", "crunchyroll")
|
||||
if not user or not password:
|
||||
return False, "", ""
|
||||
data = httptools.downloadpage(login_page).data
|
||||
|
||||
if not "<title>Redirecting" in data:
|
||||
token = scrapertools.find_single_match(data, 'name="login_form\[_token\]" value="([^"]+)"')
|
||||
redirect_url = scrapertools.find_single_match(data, 'name="login_form\[redirect_url\]" value="([^"]+)"')
|
||||
post = "login_form%5Bname%5D=" + user + "&login_form%5Bpassword%5D=" + password + \
|
||||
"&login_form%5Bredirect_url%5D=" + redirect_url + "&login_form%5B_token%5D=" + token
|
||||
|
||||
data = httptools.downloadpage(login_page, post).data
|
||||
if not "<title>Redirecting" in data:
|
||||
if "Usuario %s no disponible" % user in data:
|
||||
@@ -53,7 +48,6 @@ def login():
|
||||
return False, "Es necesario resolver un captcha. Loguéate desde un navegador y vuelve a intentarlo", ""
|
||||
else:
|
||||
return False, "No se ha podido realizar el login.", ""
|
||||
|
||||
data = httptools.downloadpage(host).data
|
||||
premium = scrapertools.find_single_match(data, ',"premium_status":"([^"]+)"')
|
||||
premium = premium.replace("_", " ").replace("free trial", "Prueba Gratuita").capitalize()
|
||||
@@ -67,7 +61,6 @@ def mainlist(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
item.text_color = color1
|
||||
|
||||
proxy_usa = config.get_setting("proxy_usa", "crunchyroll")
|
||||
proxy_spain = config.get_setting("proxy_spain", "crunchyroll")
|
||||
item.login = False
|
||||
@@ -82,14 +75,12 @@ def mainlist(item):
|
||||
httptools.downloadpage("http://proxyanonimo.es/")
|
||||
item.proxy = "spain"
|
||||
host = proxy_e + host
|
||||
|
||||
if not item.login and error_message:
|
||||
itemlist.append(item.clone(title=error_message, action="configuracion", folder=False, text_color=color4))
|
||||
elif item.login:
|
||||
itemlist.append(item.clone(title="Tipo de cuenta: %s" % premium, action="", text_color=color4))
|
||||
elif item.proxy:
|
||||
itemlist.append(item.clone(title="Usando proxy: %s" % item.proxy.capitalize(), action="", text_color=color4))
|
||||
|
||||
itemlist.append(item.clone(title="Anime", action="", text_color=color2))
|
||||
item.contentType = "tvshow"
|
||||
itemlist.append(
|
||||
@@ -104,39 +95,32 @@ def mainlist(item):
|
||||
itemlist.append(
|
||||
item.clone(title=" Popular", action="lista", url=host + "/videos/drama/popular/ajax_page?pg=0", page=0))
|
||||
itemlist.append(item.clone(title=" Índice Alfabético", action="indices",
|
||||
url="http://www.crunchyroll.com/videos/drama/alpha"))
|
||||
|
||||
url=host + "/videos/drama/alpha"))
|
||||
if item.proxy != "usa":
|
||||
itemlist.append(item.clone(action="calendario", title="Calendario de Estrenos Anime", text_color=color4,
|
||||
url=host + "/simulcastcalendar"))
|
||||
itemlist.append(item.clone(title="Configuración del canal", action="configuracion", text_color="gold"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def configuracion(item):
|
||||
from platformcode import platformtools
|
||||
ret = platformtools.show_channel_settings()
|
||||
|
||||
user = config.get_setting("crunchyrolluser", "crunchyroll")
|
||||
password = config.get_setting("crunchyrollpassword", "crunchyroll")
|
||||
sub = config.get_setting("crunchyrollsub", "crunchyroll")
|
||||
|
||||
config.set_setting("crunchyrolluser", user)
|
||||
config.set_setting("crunchyrollpassword", password)
|
||||
values = [6, 5, 4, 3, 2, 1, 0]
|
||||
config.set_setting("crunchyrollsub", str(values[sub]))
|
||||
platformtools.itemlist_refresh()
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
next = item.url.replace("?pg=%s" % item.page, "?pg=%s" % str(item.page + 1))
|
||||
data_next = httptools.downloadpage(next).data
|
||||
patron = '<li id="media_group_(\d+)".*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \
|
||||
@@ -154,18 +138,15 @@ def lista(item):
|
||||
itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb,
|
||||
contentTitle=title, contentSerieName=title, infoLabels={'plot': plot},
|
||||
text_color=color2))
|
||||
|
||||
if '<li id="media_group' in data_next:
|
||||
itemlist.append(item.clone(action="lista", url=next, title=">> Página Siguiente", page=item.page + 1,
|
||||
text_color=""))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r'\n|\t|\s{2,}', '', data)
|
||||
patron = '<li id="showview_videos.*?href="([^"]+)".*?(?:src|data-thumbnailUrl)="([^"]+)".*?media_id="([^"]+)"' \
|
||||
@@ -215,12 +196,10 @@ def episodios(item):
|
||||
def indices(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
if not item.url:
|
||||
itemlist.append(item.clone(title="Alfabético", url="http://www.crunchyroll.com/videos/anime/alpha"))
|
||||
itemlist.append(item.clone(title="Géneros", url="http://www.crunchyroll.com/videos/anime"))
|
||||
itemlist.append(item.clone(title="Temporadas", url="http://www.crunchyroll.com/videos/anime"))
|
||||
|
||||
itemlist.append(item.clone(title="Alfabético", url=host + "/videos/anime/alpha"))
|
||||
itemlist.append(item.clone(title="Géneros", url=host + "/videos/anime"))
|
||||
itemlist.append(item.clone(title="Temporadas", url=host + "/videos/anime"))
|
||||
else:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if "Alfabético" in item.title:
|
||||
@@ -235,7 +214,6 @@ def indices(item):
|
||||
url = proxy_u + host + url
|
||||
else:
|
||||
url = host + url
|
||||
|
||||
itemlist.append(item.clone(action="alpha", title=title, url=url, page=0))
|
||||
elif "Temporadas" in item.title:
|
||||
bloque = scrapertools.find_single_match(data,
|
||||
@@ -249,7 +227,6 @@ def indices(item):
|
||||
url = proxy_u + host + url
|
||||
else:
|
||||
url = host + url
|
||||
|
||||
itemlist.append(item.clone(action="lista", title=title, url=url, page=0))
|
||||
else:
|
||||
bloque = scrapertools.find_single_match(data, '<div class="genre-selectors selectors">(.*?)</div>')
|
||||
@@ -260,18 +237,14 @@ def indices(item):
|
||||
url = proxy_e + url.replace("&", "%26")
|
||||
elif item.proxy == "usa":
|
||||
url = proxy_u + url
|
||||
|
||||
itemlist.append(item.clone(action="lista", title=title, url=url, page=0))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def alpha(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="wrapper hover-toggle-queue.*?title="([^"]+)".*?href="([^"]+)".*?src="([^"]+)"' \
|
||||
'.*?<span class="series-data.*?>\s*([^<]+)</span>.*?<p.*?>(.*?)</p>'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -285,16 +258,13 @@ def alpha(item):
|
||||
itemlist.append(item.clone(action="episodios", url=url, title=scrapedtitle, thumbnail=thumb,
|
||||
contentTitle=title, contentSerieName=title, infoLabels={'plot': plot},
|
||||
text_color=color2))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def calendario(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
|
||||
patron = '<div class="specific-date">.*?datetime="\d+-(\d+)-(\d+).*?class="day-name">.*?>\s*([^<]+)</time>(.*?)</section>'
|
||||
bloques = scrapertools.find_multiple_matches(data, patron)
|
||||
for mes, dia, title, b in bloques:
|
||||
@@ -309,10 +279,8 @@ def calendario(item):
|
||||
subt = re.sub(r"\s{2,}", " ", subt)
|
||||
if "<time" in subt:
|
||||
subt = re.sub(r"<time.*?>", "", subt).replace("</time>", "")
|
||||
|
||||
scrapedtitle = " [%s] %s - %s" % (hora, scrapertools.htmlclean(title), subt)
|
||||
scrapedtitle = re.sub(r"\[email protected\]|\[email\xc2\xa0protected\]", "Idolm@ster", scrapedtitle)
|
||||
|
||||
if "Disponible" in scrapedtitle:
|
||||
if item.proxy == "spain":
|
||||
url = urllib.unquote(url.replace("/browse.php?u=", "").replace("&b=12", ""))
|
||||
@@ -329,7 +297,6 @@ def calendario(item):
|
||||
.replace("&b=12", "").replace("_large", "_full"))
|
||||
itemlist.append(item.clone(action=action, url=url, title=scrapedtitle, contentTitle=title, thumbnail=thumb,
|
||||
text_color=color2, contentSerieName=title, server=server))
|
||||
|
||||
next = scrapertools.find_single_match(data, 'js-pagination-next"\s*href="([^"]+)"')
|
||||
if next:
|
||||
if item.proxy == "spain":
|
||||
@@ -344,7 +311,6 @@ def calendario(item):
|
||||
else:
|
||||
prev = host + prev
|
||||
itemlist.append(item.clone(action="calendario", url=prev, title="<< Semana Anterior"))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -353,6 +319,5 @@ def play(item):
|
||||
if item.login and not "[V]" in item.title:
|
||||
post = "cbelapsed=60&h=&media_id=%s" % item.media_id + "&req=RpcApiVideo_VideoView&cbcallcount=1&ht=0" \
|
||||
"&media_type=1&video_encode_id=0&playhead=10000"
|
||||
httptools.downloadpage("http://www.crunchyroll.com/ajax/", post)
|
||||
|
||||
httptools.downloadpage(host + "/ajax/", post)
|
||||
return [item]
|
||||
|
||||
@@ -21,7 +21,7 @@ from channelselector import get_thumb
|
||||
|
||||
__channel__ = "pedropolis"
|
||||
|
||||
host = "http://pedropolis.tv/"
|
||||
host = "https://pedropolis.tv/"
|
||||
|
||||
try:
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', __channel__)
|
||||
@@ -74,7 +74,9 @@ def menumovies(item):
|
||||
itemlist = [item.clone(title="Todas", action="peliculas", text_blod=True, url=host + 'pelicula/',
|
||||
viewcontent='movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Más Vistas", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movie', viewmode="movie_with_plot"),
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Mejor Valoradas", action="peliculas", text_blod=True,
|
||||
viewcontent='movies', url=host + 'tendencias/?get=movies', viewmode="movie_with_plot"),
|
||||
item.clone(title="Por año", action="p_portipo", text_blod=True, extra="Películas Por año",
|
||||
viewcontent='movies', url=host, viewmode="movie_with_plot"),
|
||||
item.clone(title="Por género", action="p_portipo", text_blod=True, extra="Categorías",
|
||||
@@ -115,41 +117,32 @@ def p_portipo(item):
|
||||
def peliculas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url_next_page = ''
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="poster"> <img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
|
||||
patron += '<span class="quality">([^<]+)</span></div> <a href="([^"]+)">.*?' # calidad, url
|
||||
patron += '<span>([^<]+)</span>' # year
|
||||
patron = '<div class="poster"> <img src="([^"]+)" alt="([^"]+)">.*?' # img, title
|
||||
patron += '<div class="rating"><span class="[^"]+"></span>([^<]+).*?' # rating
|
||||
patron += '<span class="quality">([^<]+)</span></div> <a href="([^"]+)">.*?' # calidad, url
|
||||
patron += '<span>([^<]+)</span>.*?' # year
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
# Paginación
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 19:
|
||||
url_next_page = item.url
|
||||
matches = matches[:19]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[19:]
|
||||
next_page = 'a'
|
||||
patron_next_page = "<span class=\"current\">\d+</span><a href='([^']+)'"
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year in matches:
|
||||
if 'Proximamente' not in quality:
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
|
||||
'Español Latino', '').strip()
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, next_page=next_page, quality=quality, title=title))
|
||||
scrapedtitle = scrapedtitle.replace('Ver ', '').partition(' /')[0].partition(':')[0].replace(
|
||||
'Español Latino', '').strip()
|
||||
title = "%s [COLOR green][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (scrapedtitle, year, quality)
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", contentTitle=scrapedtitle,
|
||||
infoLabels={"year":year, "rating":rating}, thumbnail=scrapedthumbnail,
|
||||
url=scrapedurl, quality=quality, title=title))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
if url_next_page:
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<span class=\"current\">\d+</span><a href='([^']+)'")
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="peliculas", title="» Siguiente »",
|
||||
url=url_next_page, next_page=next_page, folder=True, text_blod=True,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
url=pagination, folder=True, text_blod=True, thumbnail=get_thumb("next.png")))
|
||||
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -191,11 +184,13 @@ def sub_search(item):
|
||||
itemlist.append(item.clone(title=title, url=scrapedurl, contentTitle=scrapedtitle, extra='buscar',
|
||||
action=action, infoLabels={"year": year}, contentType=contentType,
|
||||
thumbnail=scrapedthumbnail, text_color=color1, contentSerieName=scrapedtitle))
|
||||
|
||||
tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
|
||||
paginacion = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if paginacion:
|
||||
|
||||
pagination = scrapertools.find_single_match(data, '<link rel="next" href="([^"]+)" />')
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=item.channel, action="sub_search",
|
||||
title="» Siguiente »", url=paginacion, thumbnail=get_thumb("next.png")))
|
||||
title="» Siguiente »", url=pagination, thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -229,37 +224,26 @@ def newest(categoria):
|
||||
|
||||
def series(item):
|
||||
logger.info()
|
||||
url_next_page = ''
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="poster"> <img src="([^"]+)"'
|
||||
patron += ' alt="([^"]+)">.*?'
|
||||
patron += '<a href="([^"]+)">'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if item.next_page != 'b':
|
||||
if len(matches) > 19:
|
||||
url_next_page = item.url
|
||||
matches = matches[:19]
|
||||
next_page = 'b'
|
||||
else:
|
||||
matches = matches[19:]
|
||||
next_page = 'a'
|
||||
patron_next_page = '<link rel="next" href="([^"]+)" />'
|
||||
matches_next_page = re.compile(patron_next_page, re.DOTALL).findall(data)
|
||||
if len(matches_next_page) > 0:
|
||||
url_next_page = urlparse.urljoin(item.url, matches_next_page[0])
|
||||
|
||||
for scrapedthumbnail, scrapedtitle, scrapedurl in matches:
|
||||
scrapedtitle = scrapedtitle.replace('’', "'")
|
||||
itemlist.append(Item(channel=__channel__, title=scrapedtitle, extra='serie',
|
||||
url=scrapedurl, thumbnail=scrapedthumbnail,
|
||||
contentSerieName=scrapedtitle, show=scrapedtitle,
|
||||
next_page=next_page, action="temporadas", contentType='tvshow'))
|
||||
action="temporadas", contentType='tvshow'))
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
if url_next_page:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=url_next_page,
|
||||
next_page=next_page, thumbnail=get_thumb("next.png")))
|
||||
|
||||
pagination = scrapertools.find_single_match(data, "<link rel='next' href='([^']+)' />")
|
||||
|
||||
if pagination:
|
||||
itemlist.append(Item(channel=__channel__, action="series", title="» Siguiente »", url=pagination,
|
||||
thumbnail=get_thumb("next.png")))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -267,8 +251,6 @@ def temporadas(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<span class="title">([^<]+)<i>.*?' # season
|
||||
patron += '<img src="([^"]+)"></a></div>' # img
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
@@ -303,8 +285,6 @@ def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="imagen"><a href="([^"]+)">.*?' # url
|
||||
patron += '<div class="numerando">(.*?)</div>.*?' # numerando cap
|
||||
patron += '<a href="[^"]+">([^<]+)</a>' # title de episodios
|
||||
@@ -350,16 +330,15 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div id="option-(\d+)".*?<iframe.*?src="([^"]+)".*?</iframe>' # lang, url
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
for option, url in matches:
|
||||
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>-->(\w+)' % option)
|
||||
lang = lang.lower()
|
||||
lang = scrapertools.find_single_match(data, '<li><a class="options" href="#option-%s">.*?</b>(.*?)<span' % option)
|
||||
lang = lang.lower().strip()
|
||||
idioma = {'latino': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'drive': '[COLOR cornflowerblue](LAT)[/COLOR]',
|
||||
'castellano': '[COLOR green](CAST)[/COLOR]',
|
||||
'español': '[COLOR green](CAST)[/COLOR]',
|
||||
'subtitulado': '[COLOR red](VOS)[/COLOR]',
|
||||
'ingles': '[COLOR red](VOS)[/COLOR]'}
|
||||
if lang in idioma:
|
||||
|
||||
Reference in New Issue
Block a user