Merge remote-tracking branch 'alfa-addon/master' into channels
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.0.1" provider-name="Alfa Addon">
|
||||
<addon id="plugin.video.alfa" name="Alfa" version="2.0.2" provider-name="Alfa Addon">
|
||||
<requires>
|
||||
<import addon="xbmc.python" version="2.1.0"/>
|
||||
<import addon="script.module.libtorrent" optional="true"/>
|
||||
@@ -19,16 +19,15 @@
|
||||
</assets>
|
||||
<news>[B]Estos son los cambios para esta versión:[/B]
|
||||
[COLOR green][B]Canales agregados y arreglos[/B][/COLOR]
|
||||
[I]- peliculasrey
|
||||
- pelis24
|
||||
- peliscity
|
||||
- peliscon
|
||||
- pelisgratis
|
||||
- pelispad
|
||||
- pelismagnet
|
||||
- rapidvideo
|
||||
- streammango
|
||||
- fix internos[/I]
|
||||
» anitoonstv » qserie
|
||||
» cartoonlatino » repelis
|
||||
» seriesblanco » seodiv
|
||||
» serieslan » pelisplanet
|
||||
» seriesmeme » pepecine
|
||||
» peliscity » pelisporno
|
||||
» okru » vimpleru
|
||||
¤ fix internos
|
||||
[COLOR green]Gracias a [COLOR yellow]danielr460[/COLOR] por su colaboración en esta versión[/COLOR]
|
||||
</news>
|
||||
<description lang="es">Navega con Kodi por páginas web para ver sus videos de manera fácil.</description>
|
||||
<summary lang="en">Browse web pages using Kodi</summary>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
|
||||
@@ -10,6 +10,17 @@ from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload',
|
||||
'okru',
|
||||
'netutv',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
host = "http://www.anitoonstv.com"
|
||||
|
||||
@@ -17,6 +28,7 @@ host = "http://www.anitoonstv.com"
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
|
||||
@@ -29,6 +41,7 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Pokemon", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -73,10 +86,10 @@ def lista(item):
|
||||
if "&" in show:
|
||||
cad = title.split("xy")
|
||||
show = cad[0]
|
||||
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=url, plot=show, action="episodios", show=show,
|
||||
context=renumbertools.context(item)))
|
||||
context=context1))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -144,6 +157,8 @@ def findvideos(item):
|
||||
itemlist.append(item.clone(url=url, action="play", server=server, contentQuality=quality,
|
||||
thumbnail=scrapedthumbnail, plot=scrapedplot,
|
||||
title="Enlace encontrado en %s: [%s]" % (server.capitalize(), quality)))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -12,18 +12,29 @@ from core.item import Item
|
||||
from platformcode import config, logger
|
||||
|
||||
host = "http://www.cartoon-latino.com/"
|
||||
from channels import autoplay
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload',
|
||||
'vimple',
|
||||
'gvideo',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista", title="Series", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
|
||||
return itemlist
|
||||
|
||||
@@ -87,9 +98,10 @@ def lista(item):
|
||||
for link, name in matches:
|
||||
title = name + " [Latino]"
|
||||
url = link
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(
|
||||
item.clone(title=title, url=url, plot=title, action="episodios", show=title,
|
||||
context=renumbertools.context(item)))
|
||||
context=context1))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
return itemlist
|
||||
|
||||
@@ -171,11 +183,13 @@ def findvideos(item):
|
||||
if server in link:
|
||||
url = link.replace('" + ID' + server + ' + "', str(id))
|
||||
if "drive" in server:
|
||||
server1 = 'googlevideo'
|
||||
server1 = 'Gvideo'
|
||||
else:
|
||||
server1 = server
|
||||
itemlist.append(item.clone(url=url, action="play", server=server1,
|
||||
title="Enlace encontrado en %s " % (server1.capitalize())))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
@@ -136,33 +136,26 @@ def findvideos(item):
|
||||
|
||||
# Descarga la pagina
|
||||
data = scrapertools.cache_page(item.url)
|
||||
patron = 'class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span.*?cursor: hand" rel="(.*?)"'
|
||||
patron = 'cursor: hand" rel="(.*?)".*?class="optxt"><span>(.*?)<.*?width.*?class="q">(.*?)</span'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
for scrapedidioma, scrapedcalidad, scrapedurl in matches:
|
||||
for scrapedurl, scrapedidioma, scrapedcalidad in matches:
|
||||
idioma = ""
|
||||
title = item.title + " [" + scrapedcalidad + "][" + scrapedidioma +"]"
|
||||
title = "%s [" + scrapedcalidad + "][" + scrapedidioma +"]"
|
||||
if "youtube" in scrapedurl:
|
||||
scrapedurl += "&"
|
||||
quality = scrapedcalidad
|
||||
language = scrapedidioma
|
||||
if not ("omina.farlante1" in scrapedurl or "404" in scrapedurl):
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="play", title=title, fulltitle=title, url=scrapedurl,
|
||||
thumbnail="", plot=plot, show=item.show, quality= quality, language=language))
|
||||
|
||||
itemlist=servertools.get_servers_itemlist(itemlist)
|
||||
Item(channel=item.channel, action="play", title=title, fulltitle=item.title, url=scrapedurl,
|
||||
thumbnail=item.thumbnail, plot=plot, show=item.show, quality= quality, language=language, extra = item.thumbnail))
|
||||
|
||||
itemlist=servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
|
||||
return itemlist
|
||||
|
||||
|
||||
def play(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = servertools.find_video_items(data=item.url)
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.title = item.title
|
||||
videoitem.fulltitle = item.fulltitle
|
||||
videoitem.thumbnail = item.thumbnail
|
||||
videoitem.channel = item.channel
|
||||
|
||||
return itemlist
|
||||
item.thumbnail = item.extra
|
||||
return [item]
|
||||
|
||||
@@ -97,8 +97,8 @@ def sub_search(item):
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t| |<br>", "", data)
|
||||
# logger.info(data)
|
||||
patron = '<div class="img">.*?<a href="(?P<url>[^"]+)" title="(?P<name>[^"]+)".*?'
|
||||
patron += '<img.+?src="(?P<img>[^"]+)".*?\(([^\)]+)\)"> </a></div>.*?'
|
||||
patron = '<div class="img">.*?<a href="([^"]+)" title="([^"]+)".*?'
|
||||
patron += '<img.+?src="([^"]+)".*?\(([^\)]+)\)"> </a></div>.*?'
|
||||
patron += 'Ver\s(.*?)\sOnline'
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -165,11 +165,11 @@ def peliculas(item):
|
||||
data = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", data)
|
||||
patron_todas = '<div class="home-movies">(.*?)<footer>'
|
||||
data = scrapertools.find_single_match(data, patron_todas)
|
||||
patron = 'col-sm-5"><a href="(?P<scrapedurl>[^"]+)".+?'
|
||||
patron += 'browse-movie-link-qd.*?>(?P<calidad>[^>]+)</.+?'
|
||||
patron += '<p>(?P<year>[^>]+)</p>.+?'
|
||||
patron += 'title one-line">(?P<scrapedtitle>[^>]+)</h2>.+?'
|
||||
patron += 'img-responsive" src="(?P<scrapedthumbnail>[^"]+)".*?'
|
||||
patron = 'col-sm-5"><a href="([^"]+)".+?'
|
||||
patron += 'browse-movie-link-qd.*?>([^>]+)</.+?'
|
||||
patron += '<p>([^>]+)</p>.+?'
|
||||
patron += 'title one-line">([^>]+)</h2>.+?'
|
||||
patron += 'img-responsive" src="([^"]+)".*?'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(data)
|
||||
|
||||
@@ -185,13 +185,16 @@ def peliculas(item):
|
||||
director = scrapertools.find_single_match(datas, '<div class="list-cast-info tableCell"><a href="[^"]+" rel="tag">([^<]+)</a></div>')
|
||||
title = "%s [COLOR yellow][%s][/COLOR]" % (scrapedtitle.strip(), calidad.upper())
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="findvideos", title=title, plot='',
|
||||
new_item = Item(channel=item.channel, action="findvideos", title=title, plot='', contentType='movie',
|
||||
url=scrapedurl, contentQuality=calidad, thumbnail=scrapedthumbnail,
|
||||
contentTitle=contentTitle, infoLabels={"year": year, 'rating': rating, 'director': director},
|
||||
text_color=color3))
|
||||
text_color=color3)
|
||||
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
# tmdb.set_infoLabels(itemlist, __modo_grafico__)
|
||||
if year:
|
||||
tmdb.set_infoLabels_item(new_item, __modo_grafico__)
|
||||
itemlist.append(new_item)
|
||||
|
||||
paginacion = scrapertools.find_single_match(data, '<a class="nextpostslink" rel="next" href="([^"]+)">')
|
||||
if paginacion:
|
||||
@@ -237,70 +240,77 @@ def findvideos(item):
|
||||
|
||||
datas = httptools.downloadpage(item.url).data
|
||||
datas = re.sub(r"\n|\r|\t|\(.*?\)|\s{2}| ", "", datas)
|
||||
# logger.info(data)
|
||||
patron = '<a style="cursor:pointer; cursor: hand;" rel="([^"]+)".*?'
|
||||
patron += 'clearfix colores title_calidad">.*?<span>([^<]+)</span></a>'
|
||||
# logger.info(datas)
|
||||
patron = '<a id="[^"]+" style="cursor:pointer; cursor: hand" rel="([^"]+)".*?'
|
||||
patron += '<span class="optxt"><span>([^<]+)</span>.*?'
|
||||
patron += '<span class="q">([^<]+)</span>'
|
||||
|
||||
matches = re.compile(patron, re.DOTALL).findall(datas)
|
||||
|
||||
for scrapedurl, servidores, in matches:
|
||||
if 'youtube' in scrapedurl:
|
||||
video_urls = []
|
||||
for scrapedurl, lang, servidores in matches:
|
||||
# doc_url = ''
|
||||
doc_id = ''
|
||||
video_urls = []
|
||||
if 'drive' in scrapedurl:
|
||||
doc_id = httptools.downloadpage(scrapedurl).data
|
||||
doc_id = scrapertools.find_single_match(doc_id, "docid=(\w+)")
|
||||
elif 'youtube' in scrapedurl:
|
||||
doc_id = scrapertools.find_single_match(scrapedurl, "docid=(\w+)")
|
||||
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
|
||||
response = httptools.downloadpage(doc_url, cookies=False)
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
|
||||
streams = scrapertools.find_multiple_matches(url_streams,
|
||||
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
|
||||
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
|
||||
for itag, video_url in streams:
|
||||
video_url += headers_string
|
||||
video_urls.append([video_url, itags[itag]])
|
||||
doc_url = "http://docs.google.com/get_video_info?docid=%s" % doc_id
|
||||
response = httptools.downloadpage(doc_url, cookies=False)
|
||||
cookies = ""
|
||||
cookie = response.headers["set-cookie"].split("HttpOnly, ")
|
||||
for c in cookie:
|
||||
cookies += c.split(";", 1)[0] + "; "
|
||||
data = response.data.decode('unicode-escape')
|
||||
data = urllib.unquote_plus(urllib.unquote_plus(data))
|
||||
headers_string = "|Cookie=" + cookies
|
||||
url_streams = scrapertools.find_single_match(data, 'url_encoded_fmt_stream_map=(.*)')
|
||||
streams = scrapertools.find_multiple_matches(url_streams,
|
||||
'itag=(\d+)&url=(.*?)(?:;.*?quality=.*?(?:,|&)|&quality=.*?(?:,|&))')
|
||||
itags = {'18':'360p', '22':'720p', '34':'360p', '35':'480p', '37':'1080p', '43':'360p', '59':'480p'}
|
||||
for itag, video_url in streams:
|
||||
video_url += headers_string
|
||||
video_urls.append([video_url, itags[itag]])
|
||||
|
||||
for video_item in video_urls:
|
||||
calidad = video_item[1]
|
||||
title = '%s [COLOR green](%s)[/COLOR] [COLOR green]([/COLOR][COLOR black]You[/COLOR][COLOR red]tube[/COLOR][COLOR green])[/COLOR]'%(item.contentTitle, calidad)
|
||||
url = video_item[0]
|
||||
for video_item in video_urls:
|
||||
calidad = video_item[1]
|
||||
title = '%s [COLOR green](%s)[/COLOR] [COLOR green]([/COLOR][COLOR black]You[/COLOR][COLOR red]tube[/COLOR][COLOR green])[/COLOR]'%(item.contentTitle, calidad)
|
||||
url = video_item[0]
|
||||
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel,
|
||||
action='play',
|
||||
title=title,
|
||||
url= url,
|
||||
thumbnail=item.thumbnail,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
contentTitle=item.contentTitle,
|
||||
server='directo',
|
||||
context = item.context
|
||||
))
|
||||
itemlist.sort(key=lambda it: it.title, reverse=True)
|
||||
if 'pelispp.com' or 'ultrapelis' in scrapedurl:
|
||||
itemlist.append(
|
||||
item.clone(channel=item.channel,
|
||||
action='play',
|
||||
title=title,
|
||||
url= url,
|
||||
thumbnail=item.thumbnail,
|
||||
quality = calidad,
|
||||
plot=item.plot,
|
||||
fanart=item.fanart,
|
||||
contentTitle=item.contentTitle,
|
||||
language=lang.replace('Español ', ''),
|
||||
server='directo',
|
||||
context = item.context
|
||||
))
|
||||
itemlist.sort(key=lambda it: it.title, reverse=True)
|
||||
if 'pelispp.com' in scrapedurl or 'ultrapelis' in scrapedurl:
|
||||
data = httptools.downloadpage(scrapedurl, headers=headers).data
|
||||
patronr = 'file: "([^"]+)",label:"([^"]+)",type'
|
||||
matchesr = re.compile(patronr, re.DOTALL).findall(data)
|
||||
for scrapedurl, label in matchesr:
|
||||
url = scrapedurl.replace('\\', '')
|
||||
language = 'latino'
|
||||
quality = label.decode('cp1252').encode('utf8')
|
||||
title = item.contentTitle + ' (' + str(label) + ') ([COLOR blue]G[/COLOR][COLOR red]o[/COLOR][COLOR yellow]o[/COLOR][COLOR blue]g[/COLOR][COLOR green]l[/COLOR][COLOR red]e[/COLOR])'
|
||||
thumbnail = item.thumbnail
|
||||
fanart = item.fanart
|
||||
itemlist.append(item.clone(action="play", title=title, url=url, server='directo',
|
||||
thumbnail=thumbnail, fanart=fanart, extra='directo',
|
||||
quality=quality, language=language,))
|
||||
quality=quality, language=lang.replace('Español ', '')))
|
||||
itemlist.sort(key=lambda it: it.title, reverse=True)
|
||||
|
||||
# if 'youtube' not in scrapedurl:
|
||||
servidores.lower()
|
||||
if 'youtube' not in scrapedurl and 'pelispp.com' not in scrapedurl and 'streamplus' not in servidores:
|
||||
if 'drive' not in scrapedurl and 'pelispp.com' not in scrapedurl and 'youtube' not in scrapedurl and 'streamplus' not in servidores:
|
||||
quality = scrapertools.find_single_match(
|
||||
datas, '<p class="hidden-xs hidden-sm">.*?class="magnet-download">([^<]+)p</a>')
|
||||
title = "[COLOR green]%s[/COLOR] [COLOR yellow][%s][/COLOR] [COLOR yellow][%s][/COLOR]" % (
|
||||
@@ -309,14 +319,9 @@ def findvideos(item):
|
||||
thumbnail = item.thumbnail
|
||||
server = servertools.get_server_from_url(url)
|
||||
|
||||
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality,
|
||||
itemlist.append(item.clone(action='play', title=title, url=url, quality=quality, language=lang.replace('Español ', ''),
|
||||
server=server, text_color=color3, thumbnail=thumbnail))
|
||||
|
||||
for videoitem in itemlist:
|
||||
videoitem.infoLabels = item.infoLabels
|
||||
videoitem.channel = item.channel
|
||||
videoitem.action = 'play'
|
||||
videoitem.fulltitle = item.title
|
||||
|
||||
if config.get_videolibrary_support() and len(itemlist) > 0:
|
||||
itemlist.append(Item(channel=item.channel,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urlparse
|
||||
@@ -10,11 +10,28 @@ from core import scrapertoolsV2
|
||||
from core import servertools
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
|
||||
|
||||
HOST = "https://seriesblanco.com/"
|
||||
IDIOMAS = {'es': 'Español', 'en': 'Inglés', 'la': 'Latino', 'vo': 'VO', 'vos': 'VOS', 'vosi': 'VOSI', 'otro': 'OVOS'}
|
||||
list_idiomas = IDIOMAS.values()
|
||||
list_language = ['default']
|
||||
CALIDADES = ['SD', 'HDiTunes', 'Micro-HD-720p', 'Micro-HD-1080p', '1080p', '720p']
|
||||
list_quality = CALIDADES
|
||||
|
||||
list_servers = ['streamix',
|
||||
'powvideo',
|
||||
'streamcloud',
|
||||
'openload',
|
||||
'flashx',
|
||||
'streamplay',
|
||||
'nowvideo',
|
||||
'gamovideo',
|
||||
'kingvid',
|
||||
'vidabc'
|
||||
]
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
@@ -25,6 +42,8 @@ def mainlist(item):
|
||||
thumb_buscar = get_thumb("search.png")
|
||||
|
||||
itemlist = list()
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist.append(Item(channel=item.channel, title="Listado alfabético", action="series_listado_alfabetico",
|
||||
thumbnail=thumb_series_az))
|
||||
itemlist.append(Item(channel=item.channel, title="Todas las series", action="series",
|
||||
@@ -45,6 +64,7 @@ def mainlist(item):
|
||||
|
||||
itemlist = filtertools.show_option(itemlist, item.channel, list_idiomas, CALIDADES)
|
||||
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -83,10 +103,11 @@ def extract_series_from_data(item, data):
|
||||
else:
|
||||
action = "findvideos"
|
||||
|
||||
context1=[filtertools.context(item, list_idiomas, CALIDADES), autoplay.context]
|
||||
itemlist.append(item.clone(title=name, url=urlparse.urljoin(HOST, url),
|
||||
action=action, show=name,
|
||||
thumbnail=img,
|
||||
context=filtertools.context(item, list_idiomas, CALIDADES)))
|
||||
context=context1))
|
||||
|
||||
more_pages = re.search('pagina=([0-9]+)">>>', data)
|
||||
if more_pages:
|
||||
@@ -268,7 +289,6 @@ def findvideos(item):
|
||||
# logger.info(data)
|
||||
|
||||
online = extract_videos_section(data)
|
||||
|
||||
try:
|
||||
filtro_enlaces = config.get_setting("filterlinks", item.channel)
|
||||
except:
|
||||
@@ -284,6 +304,16 @@ def findvideos(item):
|
||||
|
||||
list_links = filtertools.get_links(list_links, item, list_idiomas, CALIDADES)
|
||||
|
||||
for i in range(len(list_links)):
|
||||
a=list_links[i].title
|
||||
b=a.lstrip('Ver en')
|
||||
c=b.split('[')
|
||||
d=c[0].rstrip( )
|
||||
d=d.lstrip( )
|
||||
list_links[i].server=d
|
||||
|
||||
autoplay.start(list_links, item)
|
||||
|
||||
return list_links
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,17 @@ from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
IDIOMAS = {'latino': 'Latino'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload',
|
||||
'okru',
|
||||
'netutv',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
|
||||
host = "https://serieslan.com"
|
||||
|
||||
@@ -17,12 +28,14 @@ host = "https://serieslan.com"
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, action="lista", title="Series", url=host, thumbnail=thumb_series, page=0))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -48,8 +61,9 @@ def lista(item):
|
||||
title = name
|
||||
url = host + link
|
||||
scrapedthumbnail = host + img
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,
|
||||
context=renumbertools.context(item)))
|
||||
context=context1))
|
||||
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="Página Siguiente >>", url=item.url, action="lista", page=item.page + 1))
|
||||
@@ -76,7 +90,7 @@ def episodios(item):
|
||||
for cap, link, name in matches:
|
||||
|
||||
title = ""
|
||||
pat = "as/sd"
|
||||
pat = "/"
|
||||
# varios episodios en un enlace
|
||||
if len(name.split(pat)) > 1:
|
||||
i = 0
|
||||
@@ -164,6 +178,7 @@ def findvideos(item):
|
||||
itemlist.append(Item(channel=item.channel, action="play", title=title, show=show, url=video_url, plot=item.plot,
|
||||
thumbnail=thumbnail, server=server, folder=False))
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
return itemlist
|
||||
else:
|
||||
return []
|
||||
|
||||
@@ -7,9 +7,20 @@ from channels import renumbertools
|
||||
from channelselector import get_thumb
|
||||
from core import httptools
|
||||
from core import scrapertools
|
||||
from core import servertools
|
||||
from core import tmdb
|
||||
from core.item import Item
|
||||
from platformcode import config, logger
|
||||
from channels import autoplay
|
||||
|
||||
IDIOMAS = {'latino': 'Latino', 'español':'Español'}
|
||||
list_language = IDIOMAS.values()
|
||||
list_servers = ['openload',
|
||||
'sendvid',
|
||||
'netutv',
|
||||
'rapidvideo'
|
||||
]
|
||||
list_quality = ['default']
|
||||
|
||||
host = "https://seriesmeme.com/"
|
||||
|
||||
@@ -19,7 +30,7 @@ def mainlist(item):
|
||||
|
||||
thumb_series = get_thumb("channels_tvshow.png")
|
||||
thumb_series_az = get_thumb("channels_tvshow_az.png")
|
||||
|
||||
autoplay.init(item.channel, list_servers, list_quality)
|
||||
itemlist = list()
|
||||
|
||||
itemlist.append(Item(channel=item.channel, action="lista_gen", title="Novedades", url=host,
|
||||
@@ -33,6 +44,7 @@ def mainlist(item):
|
||||
itemlist.append(Item(channel=item.channel, action="top", title="Top Series", url=host,
|
||||
thumbnail=thumb_series))
|
||||
itemlist = renumbertools.show_option(item.channel, itemlist)
|
||||
autoplay.show_option(item.channel, itemlist)
|
||||
return itemlist
|
||||
|
||||
|
||||
@@ -129,9 +141,10 @@ def lista_gen(item):
|
||||
if 'HD' in scrapedlang:
|
||||
scrapedlang = scrapedlang.replace('HD', '')
|
||||
title = scrapedtitle + " [ " + scrapedlang + "]"
|
||||
context1=[renumbertools.context(item), autoplay.context]
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title=title, url=scrapedurl, thumbnail=scrapedthumbnail, action="episodios",
|
||||
show=scrapedtitle, context=renumbertools.context(item)))
|
||||
show=scrapedtitle, context=context1))
|
||||
tmdb.set_infoLabels(itemlist)
|
||||
# Paginacion
|
||||
|
||||
@@ -194,3 +207,18 @@ def episodios(item):
|
||||
action="add_serie_to_library", extra="episodios", show=show))
|
||||
|
||||
return itemlist
|
||||
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist.extend(servertools.find_video_items(data=data))
|
||||
for videoitem in itemlist:
|
||||
videoitem.channel=item.channel
|
||||
|
||||
autoplay.start(itemlist, item)
|
||||
|
||||
return itemlist
|
||||
|
||||
Reference in New Issue
Block a user