@@ -10,8 +10,8 @@
|
||||
<extension point="xbmc.addon.metadata">
|
||||
<summary lang="es">Navega con Kodi por páginas web.</summary>
|
||||
<assets>
|
||||
<icon>logo-n.jpg</icon>
|
||||
<fanart>fanart-xmas.jpg</fanart>
|
||||
<icon>logo-cumple.jpg</icon>
|
||||
<fanart>fanart1.jpg</fanart>
|
||||
<screenshot>resources/media/themes/ss/1.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/2.jpg</screenshot>
|
||||
<screenshot>resources/media/themes/ss/3.jpg</screenshot>
|
||||
|
||||
@@ -165,7 +165,7 @@ def findvideos(item):
|
||||
if contentTitle != "":
|
||||
item.contentTitle = contentTitle
|
||||
bloque = scrapertools.find_single_match(data, '(?s)<div class="bottomPlayer">(.*?)<script>')
|
||||
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]+)')
|
||||
match = scrapertools.find_multiple_matches(bloque, '(?is)data-Url="([^"]+).*?data-postId="([^"]*)')
|
||||
for dataurl, datapostid in match:
|
||||
page_url = host + "wp-admin/admin-ajax.php"
|
||||
post = "action=get_more_top_news&postID=%s&dataurl=%s" %(datapostid, dataurl)
|
||||
@@ -178,7 +178,7 @@ def findvideos(item):
|
||||
if "youtube" in url:
|
||||
titulo = "Ver trailer: %s"
|
||||
text_color = "yellow"
|
||||
if "ad.js" in url or "script" in url or "jstags.js" in url:
|
||||
if "ad.js" in url or "script" in url or "jstags.js" in url or not datapostid:
|
||||
continue
|
||||
elif "vimeo" in url:
|
||||
url += "|" + "http://www.allcalidad.com"
|
||||
|
||||
@@ -12,6 +12,7 @@ from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
|
||||
host = "http://www.asialiveaction.com"
|
||||
|
||||
IDIOMAS = {'Japones': 'Japones'}
|
||||
@@ -66,7 +67,7 @@ def search_results(item):
|
||||
patron +=">(\d{4})</a>.*?<h6>([^<]+)<a href='([^']+)"
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedtype, scrapedthumbnail, scrapedyear, scrapedtitle ,scrapedurl in matches:
|
||||
title="%s [%s]" % (scrapedtitle,scrapedyear)
|
||||
title="%s [%s]" % (scrapedtitle, scrapedyear)
|
||||
new_item= Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail)
|
||||
if scrapedtype.strip() == 'Serie':
|
||||
new_item.contentSerieName = scrapedtitle
|
||||
@@ -191,6 +192,8 @@ def findvideos(item):
|
||||
if "spotify" in url:
|
||||
continue
|
||||
data = httptools.downloadpage(url).data
|
||||
language = scrapertools.find_single_match(data, '(?:ɥɔɐәlq|lɐʇәɯllnɟ) (\w+)')
|
||||
if not language: language = "VOS"
|
||||
bloque = scrapertools.find_single_match(data, "description articleBody(.*)/div")
|
||||
urls = scrapertools.find_multiple_matches(bloque, "iframe src='([^']+)")
|
||||
if urls:
|
||||
@@ -199,17 +202,17 @@ def findvideos(item):
|
||||
if "luis" in url1:
|
||||
data = httptools.downloadpage(url1).data
|
||||
url1 = scrapertools.find_single_match(data, 'file: "([^"]+)')
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = url1))
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url1))
|
||||
else:
|
||||
# cuando es descarga
|
||||
bloque = bloque.replace('"',"'")
|
||||
urls = scrapertools.find_multiple_matches(bloque, "href='([^']+)")
|
||||
for url2 in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = url2))
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = url2))
|
||||
if "data-video" in bloque:
|
||||
urls = scrapertools.find_multiple_matches(bloque, 'data-video="([^"]+)')
|
||||
urls = scrapertools.find_multiple_matches(bloque, "data-video='([^']+)")
|
||||
for url2 in urls:
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s", url = "https://tinyurl.com/%s" %url2 ))
|
||||
itemlist.append(item.clone(action = "play", title = "Ver en %s (" + language + ")", language = language, url = "https://tinyurl.com/%s" %url2 ))
|
||||
for item1 in itemlist:
|
||||
if "tinyurl" in item1.url:
|
||||
item1.url = httptools.downloadpage(item1.url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
|
||||
@@ -38,14 +38,13 @@ def genero(item):
|
||||
logger.info()
|
||||
itemlist = list()
|
||||
data = httptools.downloadpage(host).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_generos = '<ul id="menu-submenu" class=""><li id="menu-item-.+?"(.+)<\/li><\/ul>'
|
||||
data_generos = scrapertools.find_single_match(data, patron_generos)
|
||||
patron = 'class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-.*?"><a href="(.*?)">(.*?)<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_generos, patron)
|
||||
patron = 'level-0.*?value="([^"]+)"'
|
||||
patron += '>([^<]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
if scrapedtitle != 'Próximas Películas':
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, url=scrapedurl))
|
||||
if 'Próximas Películas' in scrapedtitle:
|
||||
continue
|
||||
itemlist.append(item.clone(action='lista', title=scrapedtitle, cat=scrapedurl))
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -90,13 +89,18 @@ def proximas(item):
|
||||
def lista(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
if not item.cat:
|
||||
data = httptools.downloadpage(item.url).data
|
||||
else:
|
||||
url = httptools.downloadpage("%s?cat=%s" %(host, item.cat), follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
data = httptools.downloadpage(url).data
|
||||
bloque = scrapertools.find_single_match(data, """class="item_1 items.*?id="paginador">""")
|
||||
patron = 'class="item">.*?' # Todos los items de peliculas (en esta web) empiezan con esto
|
||||
patron += '<a href="([^"]+).*?' # scrapedurl
|
||||
patron += '<img src="([^"]+).*?' # scrapedthumbnail
|
||||
patron += 'alt="([^"]+).*?' # scrapedtitle
|
||||
patron += '<div class="fixyear">(.*?)</span></div><' # scrapedfixyear
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
matches = scrapertools.find_multiple_matches(bloque, patron)
|
||||
for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedfixyear in matches:
|
||||
patron = '<span class="year">([^<]+)' # scrapedyear
|
||||
scrapedyear = scrapertools.find_single_match(scrapedfixyear, patron)
|
||||
@@ -113,14 +117,8 @@ def lista(item):
|
||||
item.clone(title=title, url=scrapedurl, action="findvideos", extra=scrapedtitle,
|
||||
contentTitle=scrapedtitle, thumbnail=scrapedthumbnail, contentType="movie", context=["buscar_trailer"]))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
scrapertools.printMatches(itemlist)
|
||||
# Paginacion
|
||||
patron_genero = '<h1>([^"]+)<\/h1>'
|
||||
genero = scrapertools.find_single_match(data, patron_genero)
|
||||
if genero == "Romance" or genero == "Drama":
|
||||
patron = "<a rel='nofollow' class=previouspostslink' href='([^']+)'>Siguiente "
|
||||
else:
|
||||
patron = "<span class='current'>.+?href='(.+?)'>"
|
||||
patron = 'rel="next" href="([^"]+)'
|
||||
next_page_url = scrapertools.find_single_match(data, patron)
|
||||
if next_page_url != "":
|
||||
item.url = next_page_url
|
||||
|
||||
@@ -11,7 +11,7 @@ from core.item import Item
|
||||
from platformcode import config, platformtools, logger
|
||||
from channelselector import get_thumb
|
||||
|
||||
host = "http://www.clasicofilm.com/"
|
||||
host = "http://www.classicofilm.com/"
|
||||
# Configuracion del canal
|
||||
__modo_grafico__ = config.get_setting('modo_grafico', 'clasicofilm')
|
||||
__perfil__ = config.get_setting('perfil', 'clasicofilm')
|
||||
@@ -34,8 +34,8 @@ def mainlist(item):
|
||||
itemlist.append(item.clone(action="peliculas", title=" Novedades",
|
||||
url = host + "feeds/posts/summary?start-index=1&max-results=20&alt=json-in-script&callback=finddatepost",
|
||||
thumbnail=get_thumb('newest', auto=True), text_color=color1))
|
||||
itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
|
||||
thumbnail=get_thumb('genres', auto=True), text_color=color1))
|
||||
#itemlist.append(item.clone(action="generos", title=" Por géneros", url=host,
|
||||
# thumbnail=get_thumb('genres', auto=True), text_color=color1))
|
||||
itemlist.append(item.clone(title="", action=""))
|
||||
itemlist.append(item.clone(action="search", title="Buscar...", text_color=color3,
|
||||
thumbnail=get_thumb('search', auto=True)))
|
||||
@@ -92,12 +92,16 @@ def peliculas(item):
|
||||
data = scrapertools.find_single_match(data, 'finddatepost\((\{.*?\]\}\})\);')
|
||||
data = jsontools.load(data)["feed"]
|
||||
for entry in data["entry"]:
|
||||
bb=jsontools.dump(entry["author"])
|
||||
aa=scrapertools.find_single_match(bb, '(?s)src": "([^"]+)')
|
||||
if "Enviar comentarios" in entry: continue
|
||||
for link in entry["link"]:
|
||||
if link["rel"] == "alternate":
|
||||
title = link["title"]
|
||||
url = link["href"]
|
||||
break
|
||||
thumbnail = entry["media$thumbnail"]["url"].replace("s72-c/", "")
|
||||
thumbnail = "https:" + bb
|
||||
thumbnail = thumbnail.replace("s72-c/", "") #"" #entry["media$thumbnail"]["url"].replace("s72-c/", "")
|
||||
try:
|
||||
title_split = re.split(r"\s*\((\d)", title, 1)
|
||||
year = title_split[1] + scrapertools.find_single_match(title_split[2], '(\d{3})\)')
|
||||
|
||||
@@ -160,17 +160,15 @@ def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
patron = 'data-type="(tv).*?'
|
||||
patron += 'data-post="([^"]+).*?'
|
||||
patron += 'data-nume="([^"]+).*?'
|
||||
patron += 'server">([^<]+).*?'
|
||||
patron = 'player-option-\d+.*?'
|
||||
patron += 'data-sv="([^"]+).*?'
|
||||
patron += 'data-user="([^"]+)'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
headers = {"X-Requested-With":"XMLHttpRequest"}
|
||||
for scrapedtype, scrapedpost, scrapednume, scrapedserver in matches:
|
||||
post = "action=doo_player_ajax&type=%s&post=%s&nume=%s" %(scrapedtype, scrapedpost, scrapednume)
|
||||
data1 = httptools.downloadpage(host + "wp-admin/admin-ajax.php", headers=headers, post=post).data
|
||||
url1 = scrapertools.find_single_match(data1, "src='([^']+)")
|
||||
url1 = devuelve_enlace(url1)
|
||||
for scrapedserver, scrapeduser in matches:
|
||||
data1 = httptools.downloadpage("https://space.danimados.space/gilberto.php?id=%s&sv=mp4" %scrapeduser).data
|
||||
url = base64.b64decode(scrapertools.find_single_match(data1, 'hashUser = "([^"]+)'))
|
||||
url1 = devuelve_enlace(url)
|
||||
if "drive.google" in url1:
|
||||
url1 = url1.replace("view","preview")
|
||||
if url1:
|
||||
@@ -192,11 +190,11 @@ def play(item):
|
||||
|
||||
def devuelve_enlace(url1):
|
||||
if 'danimados' in url1:
|
||||
url = 'https:'+url1.replace('stream/', 'stream_iframe/')
|
||||
id = scrapertools.find_single_match(url, 'iframe/(.*)')
|
||||
url = url.replace(id, base64.b64encode(id))
|
||||
url = 'https:' + url1
|
||||
new_data = httptools.downloadpage(url).data
|
||||
new_data = new_data.replace('"',"'")
|
||||
url1 = scrapertools.find_single_match(new_data, "iframe src='([^']+)")
|
||||
new_data = httptools.downloadpage(url1).data
|
||||
url = scrapertools.find_single_match(new_data, "sources:\s*\[\{file:\s*'([^']+)")
|
||||
if "zkstream" in url or "cloudup" in url:
|
||||
url1 = httptools.downloadpage(url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
# -*- Created for Alfa-addon -*-
|
||||
# -*- By the Alfa Develop Group -*-
|
||||
|
||||
import urllib
|
||||
|
||||
from channelselector import get_thumb
|
||||
from channels import autoplay
|
||||
from channels import filtertools
|
||||
@@ -188,6 +186,7 @@ def findvideos(item):
|
||||
for datos in dict:
|
||||
url1 = datos["url"]
|
||||
hostname = scrapertools.find_single_match(datos["hostname"].replace("www.",""), "(.*?)\.")
|
||||
if "repelisgo" in hostname: continue
|
||||
if hostname == "my": hostname = "mailru"
|
||||
titulo = "Ver en: " + hostname.capitalize() + " (" + cali[datos["quality"]] + ") (" + idio[datos["audio"]] + ")"
|
||||
itemlist.append(
|
||||
@@ -226,8 +225,6 @@ def play(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
url1 = httptools.downloadpage(host + item.url, follow_redirects=False, only_headers=True).headers.get("location", "")
|
||||
if "storage" in url1:
|
||||
url1 = scrapertools.find_single_match(url1, "src=(.*mp4)").replace("%3A",":").replace("%2F","/")
|
||||
itemlist.append(item.clone(url=url1))
|
||||
itemlist = servertools.get_servers_itemlist(itemlist)
|
||||
itemlist[0].thumbnail = item.contentThumbnail
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"id": "rexpelis",
|
||||
"name": "Rexpelis",
|
||||
"active": false,
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["lat","cast"],
|
||||
"thumbnail": "https://i.postimg.cc/MMJ5g9Y1/rexpelis1.png",
|
||||
|
||||
@@ -15,7 +15,7 @@ from platformcode import config, logger, platformtools
|
||||
|
||||
|
||||
idio = {'https://cdn.yape.nu//languajes/la.png': 'LAT','https://cdn.yape.nu//languajes/es.png': 'ESP','https://cdn.yape.nu//languajes/en_es.png': 'VOSE'}
|
||||
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner', 'DVD Rip':'DVD Rip'}
|
||||
cali = {'TS Screnner': 'TS Screnner', 'HD 1080p': 'HD 1080p','TS Screener HQ':'TS Screener HQ', 'BR Screnner':'BR Screnner','HD Rip':'HD Rip','DVD Screnner':'DVD Screnner', 'DVD Rip':'DVD Rip', 'HD 720':'HD 720'}
|
||||
|
||||
list_language = idio.values()
|
||||
list_quality = cali.values()
|
||||
|
||||
@@ -26,7 +26,7 @@ def find_in_text(regex, text, flags=re.IGNORECASE | re.DOTALL):
|
||||
|
||||
|
||||
class UnshortenIt(object):
|
||||
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net'
|
||||
_adfly_regex = r'adf\.ly|j\.gs|q\.gs|u\.bb|ay\.gy|atominik\.com|tinyium\.com|microify\.com|threadsphere\.bid|clearload\.bid|activetect\.net|swiftviz\.net|briskgram\.net|activetect\.net|baymaleti\.net'
|
||||
_linkbucks_regex = r'linkbucks\.com|any\.gs|cash4links\.co|cash4files\.co|dyo\.gs|filesonthe\.net|goneviral\.com|megaline\.co|miniurls\.co|qqc\.co|seriousdeals\.net|theseblogs\.com|theseforums\.com|tinylinks\.co|tubeviral\.com|ultrafiles\.net|urlbeat\.net|whackyvidz\.com|yyv\.co'
|
||||
_adfocus_regex = r'adfoc\.us'
|
||||
_lnxlu_regex = r'lnx\.lu'
|
||||
|
||||
@@ -214,7 +214,7 @@ def render_items(itemlist, parent_item):
|
||||
if item.fanart:
|
||||
fanart = item.fanart
|
||||
else:
|
||||
fanart = os.path.join(config.get_runtime_path(), "fanart-xmas.jpg")
|
||||
fanart = os.path.join(config.get_runtime_path(), "fanart1.jpg")
|
||||
|
||||
# Creamos el listitem
|
||||
#listitem = xbmcgui.ListItem(item.title)
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from platformcode import logger
|
||||
|
||||
|
||||
@@ -21,7 +20,10 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
|
||||
video_urls = []
|
||||
data = httptools.downloadpage(page_url).data
|
||||
patron = "DownloadButtonAd-startDownload gbtnSecondary.*?href='([^']+)'"
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) == 0:
|
||||
patron = 'Download file.*?href="([^"]+)"'
|
||||
matches = scrapertools.find_multiple_matches(data, patron)
|
||||
if len(matches) > 0:
|
||||
video_urls.append([matches[0][-4:] + " [mediafire]", matches[0]])
|
||||
for video_url in video_urls:
|
||||
|
||||
Reference in New Issue
Block a user