.+?
.+?
'
- if item.url==host or item.url==host+"/liveaction":
- a=1
- else:
- num=(item.url).split('-')
- a=int(num[1])
+ full_data = httptools.downloadpage(item.url).data
+ full_data = re.sub(r"\n|\r|\t|\s{2}| ", "", full_data)
+ data = scrapertools.find_single_match(full_data, 'class="sl">(.*?)')
+ patron = '
'
+
matches = scrapertools.find_multiple_matches(data, patron)
- # Paginacion
- num_items_x_pagina = 30
- min = item.page * num_items_x_pagina
- min=min-item.page
- max = min + num_items_x_pagina - 1
- b=0
- for link, img, name in matches[min:max]:
- b=b+1
+
+ for link, img, name in matches:
if " y " in name:
title=name.replace(" y "," & ")
else:
@@ -80,17 +78,15 @@ def lista(item):
context2 = autoplay.context
context.extend(context2)
- itemlist.append(item.clone(title=title, url=url, action="episodios", thumbnail=scrapedthumbnail, show=title,contentSerieName=title,
- context=context))
- if b<29:
- a=a+1
- url=host+"/p/pag-"+str(a)
- if b>10:
- itemlist.append(
- Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=url, action="lista", page=0))
- else:
- itemlist.append(
- Item(channel=item.channel, contentSerieName=item.contentSerieName, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=item.url, action="lista", page=item.page + 1))
+ itemlist.append(Item(channel=item.channel, title=title, url=url, action="episodios", thumbnail=scrapedthumbnail,
+ contentSerieName=title, context=context))
+
+ # Paginacion
+
+ next_page = scrapertools.find_single_match(full_data, '\d+\d+')
+ if next_page != '':
+ itemlist.append(Item(channel=item.channel, contentSerieName=item.contentSerieName,
+ title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=host+next_page, action="lista"))
tmdb.set_infoLabels(itemlist)
return itemlist
@@ -211,6 +207,48 @@ def findvideos(item):
return itemlist
+def search_results(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url, post=item.post).data
+ if len(data) > 0:
+ results = eval(data)
+ else:
+ return itemlist
+
+ for result in results:
+ try:
+ thumbnail = host + "/tb/%s.jpg" % result[0]
+ title = u'%s' % result[1]
+ logger.debug(title)
+ url = host + "/s/%s" % result[2]
+ itemlist.append(Item(channel=item.channel, thumbnail=thumbnail, title=title, url=url, contentSerieName=title,
+ action='episodios'))
+ except:
+ pass
+
+ tmdb.set_infoLabels(itemlist, seekTmdb=True)
+ return itemlist
+
+def search(item, texto):
+ logger.info()
+ import urllib
+
+ if texto != "":
+ texto = texto.replace(" ", "+")
+ item.url = host+"/b.php"
+ post = {'k':texto, "pe":"", "te":""}
+ item.post = urllib.urlencode(post)
+
+ try:
+ return search_results(item)
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+
def golink(ida,sl):
a=ida
b=[3,10,5,22,31]
diff --git a/plugin.video.alfa/channels/homecine.json b/plugin.video.alfa/channels/homecine.json
new file mode 100644
index 00000000..81bef526
--- /dev/null
+++ b/plugin.video.alfa/channels/homecine.json
@@ -0,0 +1,70 @@
+{
+ "id": "homecine",
+ "name": "HomeCine",
+ "active": true,
+ "adult": false,
+ "language": ["lat","cast"],
+ "thumbnail": "https://homecine.net/wp-content/uploads/2018/05/homedark-1-3.png",
+ "banner": "",
+ "version": 1,
+ "categories": [
+ "movie",
+ "direct"
+ ],
+ "settings": [
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Incluir en busqueda global",
+ "default": false,
+ "enabled": false,
+ "visible": false
+ },
+ {
+ "id": "filter_languages",
+ "type": "list",
+ "label": "Mostrar enlaces en idioma...",
+ "default": 0,
+ "enabled": true,
+ "visible": true,
+ "lvalues": [
+ "No filtrar",
+ "LAT",
+ "CAST",
+ "VOSE"
+ ]
+ },
+ {
+ "id": "include_in_newest_peliculas",
+ "type": "bool",
+ "label": "Incluir en Novedades - Peliculas",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_infantiles",
+ "type": "bool",
+ "label": "Incluir en Novedades - Infantiles",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_terror",
+ "type": "bool",
+ "label": "Incluir en Novedades - terror",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ },
+ {
+ "id": "include_in_newest_latino",
+ "type": "bool",
+ "label": "Incluir en Novedades - Latino",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ }
+ ]
+}
\ No newline at end of file
diff --git a/plugin.video.alfa/channels/homecine.py b/plugin.video.alfa/channels/homecine.py
new file mode 100644
index 00000000..b6fd9274
--- /dev/null
+++ b/plugin.video.alfa/channels/homecine.py
@@ -0,0 +1,358 @@
+# -*- coding: utf-8 -*-
+
+import re
+import urllib
+import urlparse
+
+from channels import autoplay
+from channels import filtertools
+from core import httptools
+from core import jsontools
+from core import scrapertools
+from core import servertools
+from core import tmdb
+from core.item import Item
+from platformcode import config, logger
+from channelselector import get_thumb
+
+IDIOMAS = {'Latino': 'LAT', 'Castellano': 'CAST', 'Subtitulado': 'VOSE'}
+list_language = IDIOMAS.values()
+list_quality = ['HD 720p', 'HD 1080p', '480p', '360p']
+list_servers = ['cinemaupload']
+
+host = 'https://homecine.net'
+
+
+def mainlist(item):
+ logger.info()
+
+ autoplay.init(item.channel, list_servers, list_quality)
+
+ itemlist = []
+
+ itemlist.append(Item(channel=item.channel, title="Ultimas",
+ action="list_all",
+ thumbnail=get_thumb('last', auto=True),
+ url='%s%s' % (host, '/release-year/2019'),
+ first=0
+ ))
+
+ itemlist.append(Item(channel=item.channel,title="Películas",
+ action="sub_menu",
+ thumbnail=get_thumb('movies', auto=True),
+ ))
+
+ itemlist.append(Item(channel=item.channel,title="Series",
+ action="list_all",
+ thumbnail=get_thumb('tvshows', auto=True),
+ url='%s%s'%(host,'/series/'),
+ first=0
+ ))
+
+ itemlist.append(Item(channel=item.channel, title="Documentales",
+ action="list_all",
+ thumbnail=get_thumb('documentaries', auto=True),
+ url='%s%s' % (host, '/documentales/'),
+ first=0
+ ))
+
+ itemlist.append(Item(channel=item.channel,title="Buscar",
+ action="search",
+ url=host+'/?s=',
+ thumbnail=get_thumb('search', auto=True),
+ ))
+
+ autoplay.show_option(item.channel, itemlist)
+
+ return itemlist
+
+def sub_menu(item):
+ logger.info()
+
+ itemlist = []
+
+
+
+ itemlist.append(Item(channel=item.channel,title="Todas",
+ action="list_all",
+ thumbnail=get_thumb('all', auto=True),
+ url='%s%s' % (host, '/peliculas/'),
+ first=0
+ ))
+
+ itemlist.append(Item(channel=item.channel, title="Mas vistas",
+ action="list_all",
+ thumbnail=get_thumb('more watched', auto=True),
+ url='%s%s' % (host, '/most-viewed/'),
+ first=0
+ ))
+
+ itemlist.append(Item(channel=item.channel,title="Generos",
+ action="seccion",
+ thumbnail=get_thumb('genres', auto=True),
+ fanart='https://s3.postimg.cc/5s9jg2wtf/generos.png',
+ url=host,
+ ))
+
+ return itemlist
+
+def get_source(url, referer=None):
+ logger.info()
+ if referer is None:
+ data = httptools.downloadpage(url).data
+ else:
+ data = httptools.downloadpage(url, headers={'Referer':referer}).data
+ data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data)
+ return data
+
+def list_all(item):
+ logger.info()
+
+ itemlist = []
+ next = False
+
+ data = get_source(item.url)
+ patron = 'movie-id="\d+".*?
([^<]+).*?jtip(.*?)clearfix'
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ first = item.first
+ last = first + 19
+ if last > len(matches):
+ last = len(matches)
+ next = True
+
+ for scrapedurl, scrapedthumbnail, scrapedtitle, extra_info in matches[first:last]:
+
+ year = scrapertools.find_single_match(extra_info, '"tag">(\d{4})<')
+ url = host+scrapedurl
+ thumbnail = host+scrapedthumbnail.strip()
+ title = scrapedtitle
+ new_item = Item(channel=item.channel,
+ title=title,
+ url=url,
+ thumbnail=thumbnail,
+ infoLabels = {'year': year}
+ )
+ if 'series' in scrapedurl:
+ new_item.action = 'seasons'
+ new_item.contentSerieName = title
+ else:
+ new_item.action = 'findvideos'
+ new_item.contentTitle = title
+
+
+
+ itemlist.append(new_item)
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb = True)
+
+ if not next:
+ url_next_page = item.url
+ first = last
+ else:
+ url_next_page = scrapertools.find_single_match(data, ".*?class='page larger' href='([^']+)'")
+ url_next_page = host+url_next_page
+ first = 0
+
+ if url_next_page:
+ itemlist.append(Item(channel=item.channel,title="Siguiente >>", url=url_next_page, action='list_all',
+ first=first))
+
+ return itemlist
+
+
+def seccion(item):
+ logger.info()
+
+ itemlist = []
+ duplicado = []
+ data = get_source(item.url)
+
+ patron = 'menu-item-object-category menu-item-\d+">([^<]+)<\/a><\/li>'
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+
+ for scrapedurl, scrapedtitle in matches:
+ url = host+scrapedurl
+ title = scrapedtitle
+ thumbnail = ''
+ if url not in duplicado:
+ itemlist.append(Item(channel=item.channel,
+ action='list_all',
+ title=title,
+ url=url,
+ thumbnail=thumbnail,
+ first=0
+ ))
+ return itemlist
+
+
+def seasons(item):
+ logger.info()
+ itemlist = []
+
+ data = get_source(item.url)
+
+ patron = 'Season (\d+)'
+
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+ for scrapedseason in matches:
+ contentSeasonNumber = scrapedseason
+ title = 'Temporada %s' % scrapedseason
+ infoLabels['season'] = contentSeasonNumber
+
+ itemlist.append(Item(channel=item.channel,
+ action='episodesxseason',
+ url=item.url,
+ title=title,
+ contentSeasonNumber=contentSeasonNumber,
+ infoLabels=infoLabels
+ ))
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+
+ if config.get_videolibrary_support() and len(itemlist) > 0:
+ itemlist.append(Item(channel=item.channel,
+ title='[COLOR yellow]Añadir esta serie a la videoteca[/COLOR]',
+ url=item.url,
+ action="add_serie_to_library",
+ extra="episodios",
+ contentSerieName=item.contentSerieName,
+ extra1='library'
+ ))
+
+ return itemlist
+
+def episodios(item):
+ logger.info()
+ itemlist = []
+ templist = seasons(item)
+ for tempitem in templist:
+ itemlist += episodesxseason(tempitem)
+ return itemlist
+
+def episodesxseason(item):
+ logger.info()
+ itemlist = []
+ season = item.contentSeasonNumber
+ data = get_source(item.url)
+ data = scrapertools.find_single_match(data, 'Season %s.*?class="les-content"(.*?) ' % season)
+ patron = 'Episode (\d+)'
+ matches = re.compile(patron, re.DOTALL).findall(data)
+ infoLabels = item.infoLabels
+ for scrapedurl, dataep in matches:
+ url = host+scrapedurl
+ contentEpisodeNumber = dataep
+ try:
+ title = '%sx%s - Episodio %s' % (season, dataep, dataep)
+ except:
+ title = 'episodio %s' % dataep
+ infoLabels['episode'] = dataep
+ infoLabels = item.infoLabels
+
+ itemlist.append(Item(channel=item.channel,
+ action="findvideos",
+ title=title,
+ url=url,
+ contentEpisodeNumber=contentEpisodeNumber,
+ infoLabels=infoLabels
+ ))
+
+ tmdb.set_infoLabels_itemlist(itemlist, seekTmdb=True)
+ return itemlist
+
+
+
+def search(item, texto):
+ logger.info()
+ texto = texto.replace(" ", "+")
+ item.url = item.url + texto
+ item.first=0
+ if texto != '':
+ return list_all(item)
+
+
+def newest(categoria):
+ logger.info()
+ itemlist = []
+ item = Item()
+ try:
+ if categoria in ['peliculas']:
+ item.url = host +'/peliculas'
+ elif categoria == 'infantiles':
+ item.url = host + '/animacion/'
+ elif categoria == 'terror':
+ item.url = host + '/terror/'
+ item.first=0
+ itemlist = list_all(item)
+ if itemlist[-1].title == 'Siguiente >>>':
+ itemlist.pop()
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("{0}".format(line))
+ return []
+
+ return itemlist
+
+def findvideos(item):
+ logger.info()
+ itemlist = []
+
+ data = get_source(item.url)
+ patron = '(.*?)<' % option)
+ if '-' in extra_info:
+ quality, language = scrapertools.find_single_match(extra_info, '(.*?) - (.*)')
+ else:
+ language = ''
+ quality = extra_info
+
+ if 'https:' not in url:
+ url = 'https:'+url
+ title = ''
+ if not config.get_setting('unify'):
+ if language != '':
+ title += ' [%s]' % IDIOMAS[language]
+ if quality != '':
+ title += ' [%s]' % quality
+
+ new_item = Item(channel=item.channel,
+ url=url,
+ title= '%s'+ title,
+ contentTitle=item.title,
+ action='play',
+ infoLabels = item.infoLabels
+ )
+ if language != '':
+ new_item.language = IDIOMAS[language]
+ if quality != '':
+ new_item.quality = quality
+
+ itemlist.append(new_item)
+ itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
+
+ # Requerido para FilterTools
+
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+
+ # Requerido para AutoPlay
+
+ autoplay.start(itemlist, item)
+
+ if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos':
+ itemlist.append(
+ Item(channel=item.channel,
+ title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]',
+ url=item.url,
+ action="add_pelicula_to_library",
+ extra="findvideos",
+ contentTitle=item.contentTitle,
+ ))
+
+
+ return itemlist
diff --git a/plugin.video.alfa/channels/mixtoon.py b/plugin.video.alfa/channels/mixtoon.py
index 215b144c..52520ddb 100644
--- a/plugin.video.alfa/channels/mixtoon.py
+++ b/plugin.video.alfa/channels/mixtoon.py
@@ -137,14 +137,16 @@ def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
+
_sl = scrapertools.find_single_match(data, 'var _dt=([^;]+);')
sl = eval(_sl)
buttons = [0,1]
for id in buttons:
new_url = "https://videoeb.xyz/" + "eb/" + sl[0] + "/" + sl[1] + "/" + str(id) + "/" + sl[2]
- data_new = httptools.downloadpage(new_url).data
- valor1, valor2 = scrapertools.find_single_match(data_new, 'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)')
+ data_new = httptools.downloadpage(new_url, headers={'Referer': item.url}).data
try:
+ valor1, valor2 = scrapertools.find_single_match(data_new,
+ 'var x0x = \["[^"]*","([^"]+)","[^"]*","[^"]*","([^"]+)')
url = base64.b64decode(gktools.transforma_gsv(valor2, base64.b64decode(valor1)))
if 'download' in url:
url = url.replace('download', 'preview')
From 3fad1e3566eb92450e56e1527d12becd160bc56e Mon Sep 17 00:00:00 2001
From: Alfa-beto <30815244+Alfa-beto@users.noreply.github.com>
Date: Wed, 27 Mar 2019 15:29:23 -0300
Subject: [PATCH 2/2] Correcciones y novedades
---
plugin.video.alfa/servers/archiveorg.py | 1 -
plugin.video.alfa/servers/cinemaupload.json | 42 +++++++++++++++++++++
plugin.video.alfa/servers/cinemaupload.py | 28 ++++++++++++++
3 files changed, 70 insertions(+), 1 deletion(-)
create mode 100644 plugin.video.alfa/servers/cinemaupload.json
create mode 100644 plugin.video.alfa/servers/cinemaupload.py
diff --git a/plugin.video.alfa/servers/archiveorg.py b/plugin.video.alfa/servers/archiveorg.py
index a954cc35..2d93aa79 100644
--- a/plugin.video.alfa/servers/archiveorg.py
+++ b/plugin.video.alfa/servers/archiveorg.py
@@ -20,7 +20,6 @@ def get_video_url(page_url, premium=False, user="", password="", video_password=
logger.info("url=" + page_url)
video_urls = []
data = httptools.downloadpage(page_url).data
- logger.debug(data)
patron = ''
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
diff --git a/plugin.video.alfa/servers/cinemaupload.json b/plugin.video.alfa/servers/cinemaupload.json
new file mode 100644
index 00000000..99bc9e2b
--- /dev/null
+++ b/plugin.video.alfa/servers/cinemaupload.json
@@ -0,0 +1,42 @@
+{
+ "active": true,
+ "find_videos": {
+ "ignore_urls": [],
+ "patterns": [
+ {
+ "pattern": "https://cinemaupload.com/embed/([a-zA-Z0-9]+)",
+ "url": "https://cinemaupload.com/embed/\\1/"
+ }
+ ]
+ },
+ "free": true,
+ "id": "cinemaupload",
+ "name": "cinemaupload",
+ "settings": [
+ {
+ "default": false,
+ "enabled": true,
+ "id": "black_list",
+ "label": "@60654",
+ "type": "bool",
+ "visible": true
+ },
+ {
+ "default": 0,
+ "enabled": true,
+ "id": "favorites_servers_list",
+ "label": "@60655",
+ "lvalues": [
+ "No",
+ "1",
+ "2",
+ "3",
+ "4",
+ "5"
+ ],
+ "type": "list",
+ "visible": false
+ }
+ ],
+ "thumbnail": "https://cinemaupload.com/static/img/logo1.png"
+}
diff --git a/plugin.video.alfa/servers/cinemaupload.py b/plugin.video.alfa/servers/cinemaupload.py
new file mode 100644
index 00000000..69191999
--- /dev/null
+++ b/plugin.video.alfa/servers/cinemaupload.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+# --------------------------------------------------------
+# Conector Cinemaupload By Alfa development Group
+# --------------------------------------------------------
+import re
+from core import httptools
+from core import scrapertools
+from platformcode import logger
+
+
+def test_video_exists(page_url):
+ logger.info("(page_url='%s')" % page_url)
+ data = httptools.downloadpage(page_url)
+ if data.code == 404:
+ return False, "[CinemaUpload] El archivo no existe o ha sido borrado"
+ return True, ""
+
+
+def get_video_url(page_url, premium=False, user="", password="", video_password=""):
+ logger.info("url=" + page_url)
+ video_urls = []
+ data = httptools.downloadpage(page_url).data
+ data = re.sub(r'\n|\r|\t| |
|\s{2,}', "", data)
+ patron = "source: '([^']+)',"
+ matches = scrapertools.find_multiple_matches(data, patron)
+ for url in matches:
+ video_urls.append(['.m3u8 [CinemaUpload]', url])
+ return video_urls