.*?
Next ').replace("&", "&")
-
- # Enlace para la siguiente pagina
- if url:
- itemlist.append(Item(channel=item.channel, action="videos", title=">> Página Siguiente", url=url))
-
- return itemlist
-
-
-def videos_2(item):
- logger.info()
- itemlist = []
- url_limpia = item.url.split("?")[0]
- url = item.url
- while url and len(itemlist) < 25:
- data = scrapertools.downloadpage(url)
- patronvideos = 'data-link="([^"]+)" data-title="([^"]+)" src="([^"]+)" border="0" />';
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
-
- for url, title, thumbnail in matches:
- itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
- thumbnail=thumbnail))
-
- url = scrapertools.find_single_match(data, '
').replace("&", "&")
-
- # Enlace para la siguiente pagina
- if url:
- itemlist.append(Item(channel=item.channel, action="videos_2", title=">> Página Siguiente", url=url))
-
- return itemlist
-
-
-def videos_3(item):
- logger.info()
- itemlist = []
-
- url = item.url
- url_limpia = item.url.split("?")[0]
- while url and len(itemlist) < 25:
- data = scrapertools.downloadpage(url)
- patronvideos = '
.*? '
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
-
- for url, thumbnail, title in matches:
- itemlist.append(Item(channel=item.channel, action="detail_2", title=title, fulltitle=title, url=url,
- thumbnail=thumbnail))
-
- url = scrapertools.find_single_match(data,
- ' → ').replace(
- "&", "&")
-
- # Enlace para la siguiente pagina
- if url:
- itemlist.append(
- Item(channel=item.channel, action="videos_3", title=">> Página Siguiente", url=url_limpia + url))
-
- return itemlist
-
-
-def detail(item):
- logger.info()
- itemlist = []
-
- data = scrapertools.downloadpage(item.url)
- patronvideos = '["|\'](http\://filesmonster.com/download.php\?[^"\']+)["|\']'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
-
- for url in matches:
- title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
- itemlist.append(
- Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
- url=url, thumbnail=item.thumbnail, folder=False))
- itemlist.append(Item(channel=item.channel, action="anadir_favorito",
- title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=url,
- thumbnail=item.thumbnail, plot="el archivo", folder=True))
- itemlist.append(Item(channel=item.channel, title=""));
-
- patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
- for url in matches:
- if not url == item.url:
- logger.info(url)
- logger.info(item.url)
- title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
- itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
- thumbnail=item.thumbnail, folder=True))
- itemlist.append(Item(channel=item.channel, action="anadir_favorito",
- title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
- thumbnail=item.thumbnail, plot="la carpeta", folder=True))
- itemlist.append(Item(channel=item.channel, title=""));
-
- return itemlist
-
-
-def detail_2(item):
- logger.info()
- itemlist = []
-
- # descarga la pagina
- data = scrapertools.downloadpageGzip(item.url)
- data = data.split('
Download from Filesmonster ')
- data = data[0]
- # descubre la url
- patronvideos = 'href="http://filesmonster.com/download.php(.*?)".(.*?)'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
- for match2 in matches:
- url = "http://filesmonster.com/download.php" + match2[0]
- title = "Archivo %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
- itemlist.append(
- Item(channel=item.channel, action="play", server="filesmonster", title=title, fulltitle=item.fulltitle,
- url=url, thumbnail=item.thumbnail, folder=False))
- itemlist.append(Item(channel=item.channel, action="anadir_favorito",
- title="(+) Añadir el vídeo a tus favoritos en filesmonster", url=match2[0],
- thumbnail=item.thumbnail, plot="el archivo", folder=True))
- itemlist.append(Item(channel=item.channel, title=""));
-
- patronvideos = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
- matches = re.compile(patronvideos, re.DOTALL).findall(data)
- for url in matches:
- if not url == item.url:
- logger.info(url)
- logger.info(item.url)
- title = "Carpeta %d: %s [filesmonster]" % (len(itemlist) + 1, item.fulltitle)
- itemlist.append(Item(channel=item.channel, action="detail", title=title, fulltitle=item.fulltitle, url=url,
- thumbnail=item.thumbnail, folder=True))
- itemlist.append(Item(channel=item.channel, action="anadir_favorito",
- title="(+) Añadir la carpeta a tus favoritos en filesmonster", url=url,
- thumbnail=item.thumbnail, plot="la carpeta", folder=True))
- itemlist.append(Item(channel=item.channel, title=""));
-
- return itemlist
diff --git a/plugin.video.alfa/channels/freecambay.json b/plugin.video.alfa/channels/freecambay.json
deleted file mode 100755
index b27ede33..00000000
--- a/plugin.video.alfa/channels/freecambay.json
+++ /dev/null
@@ -1,21 +0,0 @@
-{
- "id": "freecambay",
- "name": "FreeCamBay",
- "language": ["*"],
- "active": true,
- "adult": true,
- "thumbnail": "http://i.imgur.com/wuzhOCt.png?1",
- "categories": [
- "adult"
- ],
- "settings": [
- {
- "id": "menu_info",
- "type": "bool",
- "label": "Mostrar menú antes de reproducir con imágenes",
- "default": true,
- "enabled": true,
- "visible": true
- }
- ]
-}
\ No newline at end of file
diff --git a/plugin.video.alfa/channels/freecambay.py b/plugin.video.alfa/channels/freecambay.py
deleted file mode 100755
index f3cde98a..00000000
--- a/plugin.video.alfa/channels/freecambay.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import re
-import urlparse
-
-from core import httptools
-from core import scrapertools
-from core.item import Item
-from platformcode import config, logger
-
-host = "http://www.freecambay.com"
-
-
-def mainlist(item):
- logger.info()
- itemlist = []
-
- itemlist.append(item.clone(action="lista", title="Nuevos Vídeos", url=host + "/latest-updates/"))
- itemlist.append(item.clone(action="lista", title="Mejor Valorados", url=host + "/top-rated/"))
- itemlist.append(item.clone(action="lista", title="Más Vistos", url=host + "/most-popular/"))
- itemlist.append(item.clone(action="categorias", title="Categorías", url=host + "/categories/"))
- itemlist.append(item.clone(action="categorias", title="Modelos",
- url=host + "/models/?mode=async&function=get_block&block_id=list_models_models" \
- "_list&sort_by=total_videos"))
- itemlist.append(item.clone(action="playlists", title="Listas", url=host + "/playlists/"))
- itemlist.append(item.clone(action="tags", title="Tags", url=host + "/tags/"))
- itemlist.append(item.clone(title="Buscar...", action="search"))
- itemlist.append(item.clone(action="configuracion", title="Configurar canal...", text_color="gold", folder=False))
-
- return itemlist
-
-
-def configuracion(item):
- from platformcode import platformtools
- ret = platformtools.show_channel_settings()
- platformtools.itemlist_refresh()
- return ret
-
-
-def search(item, texto):
- logger.info()
- item.url = "%s/search/%s/" % (host, texto.replace("+", "-"))
- item.extra = texto
- try:
- return lista(item)
- # Se captura la excepción, para no interrumpir al buscador global si un canal falla
- except:
- import sys
- for line in sys.exc_info():
- logger.error("%s" % line)
- return []
-
-
-def lista(item):
- logger.info()
- itemlist = []
-
- # Descarga la pagina
- data = httptools.downloadpage(item.url).data
-
- action = "play"
- if config.get_setting("menu_info", "freecambay"):
- action = "menu_info"
-
- # Extrae las entradas
- patron = '
([^<]+)<'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail, quality, duration in matches:
- if duration:
- scrapedtitle = "%s - %s" % (duration, scrapedtitle)
- if '>HD<' in quality:
- scrapedtitle += " [COLOR red][HD][/COLOR]"
-
- itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
- fanart=scrapedthumbnail))
-
- # Extrae la marca de siguiente página
- if item.extra:
- next_page = scrapertools.find_single_match(data, '
.*?from_videos\+from_albums:(\d+)')
- if next_page:
- if "from_videos=" in item.url:
- next_page = re.sub(r'&from_videos=(\d+)', '&from_videos=%s' % next_page, item.url)
- else:
- next_page = "%s?mode=async&function=get_block&block_id=list_videos_videos_list_search_result" \
- "&q=%s&category_ids=&sort_by=post_date&from_videos=%s" % (item.url, item.extra, next_page)
- itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
- else:
- next_page = scrapertools.find_single_match(data, ' .*?href="([^"]*)"')
- if next_page and not next_page.startswith("#"):
- next_page = urlparse.urljoin(host, next_page)
- itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
- else:
- next_page = scrapertools.find_single_match(data, ' .*?from:(\d+)')
- if next_page:
- if "from=" in item.url:
- next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
- else:
- next_page = "%s?mode=async&function=get_block&block_id=list_videos_common_videos_list&sort_by=post_date&from=%s" % (
- item.url, next_page)
- itemlist.append(item.clone(action="lista", title=">> Página Siguiente", url=next_page))
-
- return itemlist
-
-
-def categorias(item):
- logger.info()
- itemlist = []
-
- # Descarga la pagina
- data = httptools.downloadpage(item.url).data
-
- # Extrae las entradas
- patron = '([^<]+)<'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
- if videos:
- scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
- itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
- fanart=scrapedthumbnail))
-
- # Extrae la marca de siguiente página
- next_page = scrapertools.find_single_match(data, '.*?from:(\d+)')
- if next_page:
- if "from=" in item.url:
- next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
- else:
- next_page = "%s&from=%s" % (item.url, next_page)
- itemlist.append(item.clone(action="categorias", title=">> Página Siguiente", url=next_page))
-
- return itemlist
-
-
-def playlists(item):
- logger.info()
- itemlist = []
-
- # Descarga la pagina
- data = httptools.downloadpage(item.url).data
-
- # Extrae las entradas
- patron = '([^<]+)<'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedtitle, scrapedthumbnail, videos in matches:
- if videos:
- scrapedtitle = "%s (%s)" % (scrapedtitle, videos)
- itemlist.append(item.clone(action="videos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
- fanart=scrapedthumbnail))
-
- # Extrae la marca de siguiente página
- next_page = scrapertools.find_single_match(data, '
.*?href="([^"]+)"')
- if next_page:
- next_page = urlparse.urljoin(host, next_page)
- itemlist.append(item.clone(action="playlists", title=">> Página Siguiente", url=next_page))
-
- return itemlist
-
-
-def videos(item):
- logger.info()
- itemlist = []
-
- # Descarga la pagina
- data = httptools.downloadpage(item.url).data
-
- action = "play"
- if config.get_setting("menu_info", "freecambay"):
- action = "menu_info"
- # Extrae las entradas
- patron = '\s*([^<]+)<'
- matches = scrapertools.find_multiple_matches(data, patron)
- for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
- scrapedtitle = scrapedtitle.strip()
- itemlist.append(item.clone(action=action, title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail,
- fanart=scrapedthumbnail))
-
- # Extrae la marca de siguiente página
- next_page = scrapertools.find_single_match(data, '.*?from:(\d+)')
- if next_page:
- if "from=" in item.url:
- next_page = re.sub(r'&from=(\d+)', '&from=%s' % next_page, item.url)
- else:
- next_page = "%s?mode=async&function=get_block&block_id=playlist_view_playlist_view&sort_by" \
- "=added2fav_date&&from=%s" % (item.url, next_page)
- itemlist.append(item.clone(action="videos", title=">> Página Siguiente", url=next_page))
-
- return itemlist
-
-
-def play(item):
- logger.info()
- itemlist = []
-
- data = httptools.downloadpage(item.url).data
-
- patron = '(?:video_url|video_alt_url[0-9]*)\s*:\s*\'([^\']+)\'.*?(?:video_url_text|video_alt_url[0-9]*_text)\s*:\s*\'([^\']+)\''
- matches = scrapertools.find_multiple_matches(data, patron)
- if not matches:
- patron = '(.*?) ')
- matches = scrapertools.find_multiple_matches(bloque, ' \s*(.*?)')
- for title in matches:
- title = title.strip()
- if title not in letras:
- letras.append(title)
- itemlist.append(Item(channel=item.channel, action="tags", url=item.url, title=title, extra=title))
- else:
- if not item.length:
- item.length = 0
-
- bloque = scrapertools.find_single_match(data,
- '>%s(.*?)(?:(?!%s)(?!#)[A-Z#]{1}|