.*?
.*?
')
-
- if paginacion:
- itemlist.append(Item(channel=item.channel, action="sub_search", title="Next page >>", url=paginacion))
-
+ data = scrapertools.get_match(data,'CATEGORÍAS(.*?)')
+ patron = '
([^"]+)'
+ matches = re.compile(patron,re.DOTALL).findall(data)
+ for scrapedurl,scrapedtitle in matches:
+ scrapedplot = ""
+ scrapedthumbnail = ""
+ itemlist.append(item.clone(channel=item.channel, action="lista", title=scrapedtitle , url=scrapedurl ,
+ thumbnail=scrapedthumbnail , plot=scrapedplot) )
return itemlist
-def peliculas(item):
+def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t| |
", "", data)
- patron = '
.*?href="(.*?)" title="(.*?)".*?<.*?src="(.*?)"'
+ patron = '
(.*?).*?'
+ patron += '"Title">([^"]+).*?'
+ patron += '"Year">(\d+).*?'
+ patron += '
\w+ \(([^"]+)\)'
matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
- filter_thumb = scrapedthumbnail.replace("http://image.tmdb.org/t/p/w300", "")
- filter_list = {"poster_path": filter_thumb}
- filter_list = filter_list.items()
- itemlist.append(Item(channel=item.channel,
- action="findvideos",
- title=scrapedtitle,
- fulltitle = scrapedtitle,
- url=scrapedurl,
- thumbnail=scrapedthumbnail,
- infoLabels={'filtro': filter_list}))
-
+ for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedyear, calidad in matches:
+ thumbnail = scrapertools.find_single_match(scrapedthumbnail, 'src="([^"]+)"')
+ scrapedtitle = scrapedtitle.replace("(%s)" % scrapedyear, "")
+ if not config.get_setting('unify'):
+ title = title = '%s [COLOR red] %s [/COLOR] (%s)' % (scrapedtitle, calidad , scrapedyear)
+ else:
+ title = ''
+ if not '>TV<' in scrapedthumbnail:
+ itemlist.append(item.clone(action="findvideos", title=title, url=scrapedurl, thumbnail=thumbnail,
+ contentTitle = scrapedtitle, quality=calidad, infoLabels={'year':scrapedyear}) )
tmdb.set_infoLabels(itemlist, True)
- next_page_url = scrapertools.find_single_match(data, '
([^<]+)'
+ patron = '"server":"[^"]+",'
+ patron += '"lang":"([^"]+)",'
+ patron += '"quality":"\w+ \(([^"]+)\)",'
+ patron += '"link":"https:.*?=([^"]+)"'
matches = scrapertools.find_multiple_matches(data, patron)
- for url, language in matches:
- url = url.replace("&", "&")
- response = httptools.downloadpage(url, follow_redirects=False, add_referer=True)
- if response.data:
- url = scrapertools.find_single_match(response.data, 'src="([^"]+)"')
+ for lang, quality, url in matches:
+ if lang in IDIOMAS:
+ lang = IDIOMAS[lang]
+ url = base64.b64decode(url + "==")
+ if not config.get_setting('unify'):
+ title = '[COLOR red] %s [/COLOR] (%s)' % (quality , lang)
else:
- url = response.headers.get("location", "")
- url = url.replace(""","")
- titulo = "Ver en %s (" + language + ")"
- itemlist.append(item.clone(
- action = "play",
- title = titulo,
- url = url,
- language = language))
- tmdb.set_infoLabels(itemlist, True)
- itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
+ title = ''
+ itemlist.append(item.clone(action = "play", title = '%s'+ title, url = url, language=lang, quality=quality,
+ fulltitle = item.title))
+ itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
+ # Requerido para Filtrar enlaces
+ if __comprueba_enlaces__:
+ itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__)
+ # Requerido para FilterTools
+ itemlist = filtertools.get_links(itemlist, item, list_language)
+ # Requerido para AutoPlay
+ autoplay.start(itemlist, item)
+
+ if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra !='findvideos' :
+ itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library",
+ title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url,
+ extra="findvideos", contentTitle=item.contentTitle))
return itemlist
def play(item):
- item.thumbnail = item.contentThumbnail
- return [item]
-
-def newest(categoria):
logger.info()
- itemlist = []
- item = Item()
- try:
- if categoria == 'peliculas':
- item.url = host
- elif categoria == 'castellano':
- item.url = host +'tag/espanol/'
- elif categoria == 'latino':
- item.url = host +'tag/latino/'
- itemlist = peliculas(item)
- if "Pagina" in itemlist[-1].title:
- itemlist.pop()
- except:
- import sys
- for line in sys.exc_info():
- logger.error("{0}".format(line))
- return []
-
+ data = scrapertools.cachePage(item.url)
+ itemlist = servertools.find_video_items(data=data)
+ for videoitem in itemlist:
+ videoitem.title = item.fulltitle
+ videoitem.fulltitle = item.fulltitle
+ videoitem.thumbnail = item.thumbnail
+ videoitem.channel = item.channel
return itemlist
+
+
diff --git a/plugin.video.alfa/channels/mirapeliculas.py b/plugin.video.alfa/channels/mirapeliculas.py
index 7ccd39d9..a5515b54 100644
--- a/plugin.video.alfa/channels/mirapeliculas.py
+++ b/plugin.video.alfa/channels/mirapeliculas.py
@@ -13,7 +13,7 @@ host = 'http://mirapeliculas.net'
IDIOMAS = {'Latino': 'LAT', 'Español': 'ESP', 'Subtitulado': 'VOSE'}
list_language = IDIOMAS.values()
-list_servers = ['Streamango', 'Streamplay', 'Openload', 'Okru']
+list_servers = ['streamango', 'streamplay', 'openload', 'okru']
list_quality = ['BR-Rip', 'HD-Rip', 'DVD-Rip', 'TS-HQ', 'TS-Screner', 'Cam']
__channel__='mirapeliculas'
@@ -67,7 +67,6 @@ def categorias(item):
data = httptools.downloadpage(item.url).data
patron = '
'
matches = re.compile(patron,re.DOTALL).findall(data)
- scrapertools.printMatches(matches) # esto luego de terminado el canal se debe eliminar
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""