Add files via upload
This commit is contained in:
33
plugin.video.alfa/channels/torrentrapid.json
Normal file
33
plugin.video.alfa/channels/torrentrapid.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"id": "torrentrapid",
|
||||
"name": "Torrentrapid",
|
||||
"active": true,
|
||||
"adult": false,
|
||||
"language": ["cast", "lat"],
|
||||
"thumbnail": "torrentrapid.png",
|
||||
"banner": "torrentrapid.png",
|
||||
"categories": [
|
||||
"movie",
|
||||
"tvshow",
|
||||
"anime",
|
||||
"torrent"
|
||||
],
|
||||
"settings": [
|
||||
{
|
||||
"id": "include_in_global_search",
|
||||
"type": "bool",
|
||||
"label": "Incluir en busqueda global",
|
||||
"default": false,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
},
|
||||
{
|
||||
"id": "include_in_newest_torrent",
|
||||
"type": "bool",
|
||||
"label": "Incluir en Novedades - Torrent",
|
||||
"default": true,
|
||||
"enabled": true,
|
||||
"visible": true
|
||||
}
|
||||
]
|
||||
}
|
||||
117
plugin.video.alfa/channels/torrentrapid.py
Normal file
117
plugin.video.alfa/channels/torrentrapid.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import urllib
|
||||
import urlparse
|
||||
import requests
|
||||
|
||||
from core import servertools
|
||||
from core import scrapertools
|
||||
from core.item import Item
|
||||
from platformcode import logger
|
||||
from core import httptools
|
||||
|
||||
Host='http://torrentrapid.com'
|
||||
|
||||
|
||||
def mainlist(item):
|
||||
logger.info()
|
||||
|
||||
itemlist = []
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Películas",url=Host+"/peliculas/"))
|
||||
itemlist.append(Item(channel=item.channel, action="submenu", title="Series",url=Host+"/series/"))
|
||||
itemlist.append(Item(channel=item.channel, action="search", title="Buscar"))
|
||||
return itemlist
|
||||
|
||||
def search(item, texto):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
payload = {'q': 'data'}
|
||||
payload["q"] = texto
|
||||
data = requests.post("http://torrentrapid.com/buscar", data=payload)
|
||||
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data.text)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
|
||||
data_listado=re.sub("Descargar Todas ", "",data_listado)
|
||||
data_listado=re.sub("Descargar Pel\xedculas ", "",data_listado)
|
||||
data_listado=re.sub("Descargar ", "",data_listado)
|
||||
patron_listado='<li><a href="(.+?)" title="(.+?)"><img src="(.+?)"'
|
||||
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
for scrapedurl, scrapedtitle, scrapedimg in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="findvideos"))
|
||||
|
||||
return itemlist
|
||||
|
||||
def submenu(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron = '<li><a href="'+item.url+'"><i.+?<ul>(.+?)<\/ul>' #Filtrado por url
|
||||
data_cat = scrapertools.find_single_match(data, patron)
|
||||
patron_cat='<li><a href="(.+?)" title="(.+?)".+?<\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_cat, patron_cat)
|
||||
for scrapedurl, scrapedtitle in matches:
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,action="listado"))
|
||||
return itemlist
|
||||
|
||||
def listado(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="pelilist">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron_listado='<li><a href="(.+?)" title=".+?"><img src="(.+?)".+?><h2'
|
||||
if 'Serie' in item.title:
|
||||
patron_listado+='.+?>'
|
||||
else:
|
||||
patron_listado+='>'
|
||||
patron_listado+='(.+?)<\/h2><span>(.+?)<\/span><\/a><\/li>'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron_listado)
|
||||
for scrapedurl, scrapedthumbnail,scrapedtitle,scrapedquality in matches:
|
||||
if 'Serie' in item.title:
|
||||
action="episodios"
|
||||
else:
|
||||
action="findvideos"
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action=action, quality=scrapedquality,show=scrapedtitle))
|
||||
# Página siguiente
|
||||
patron_pag='<ul class="pagination"><li><a class="current" href=".+?">.+?<\/a>.+?<a href="(.+?)">'
|
||||
siguiente = scrapertools.find_single_match(data, patron_pag)
|
||||
itemlist.append(
|
||||
Item(channel=item.channel, title="[COLOR cyan]Página Siguiente >>[/COLOR]", url=siguiente, action="listado"))
|
||||
return itemlist
|
||||
|
||||
def episodios(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
|
||||
patron_data='<ul class="buscar-list">(.+?)</ul>'
|
||||
data_listado = scrapertools.find_single_match(data, patron_data)
|
||||
patron = '<img src="(.+?)" alt=".+?">.+?<div class=".+?">.+?<a href="(.+?)" title=".+?">.+?>Serie.+?>(.+?)<'
|
||||
matches = scrapertools.find_multiple_matches(data_listado, patron)
|
||||
for scrapedthumbnail,scrapedurl, scrapedtitle in matches:
|
||||
if " al " in scrapedtitle:
|
||||
titulo=scrapedurl.split('http')
|
||||
scrapedurl="http"+titulo[1]
|
||||
itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl,thumbnail=scrapedthumbnail, action="findvideos", show=scrapedtitle))
|
||||
return itemlist
|
||||
|
||||
def findvideos(item):
|
||||
logger.info()
|
||||
itemlist = []
|
||||
new_item = []
|
||||
data = httptools.downloadpage(item.url).data
|
||||
itemlist = servertools.find_video_items(data = data)
|
||||
url = scrapertools.find_single_match( data, 'location.href = "([^"]+)"')
|
||||
new_item.append(Item(url = url, title = "Torrent", server = "torrent", action = "play"))
|
||||
itemlist.extend(new_item)
|
||||
for it in itemlist:
|
||||
it.channel = item.channel
|
||||
return itemlist
|
||||
Reference in New Issue
Block a user