(.*?)ha sido descargada'
- matchesenlaces = re.compile(patronbloque_enlaces, re.DOTALL).findall(data)
- for enlaces in matchesenlaces:
- if "serie" in item.url:
- try:
- temp_check = scrapertools.find_single_match(enlaces,
- 'icono_.*?png".*?alt=".*?".*?| (\d+\d+;\d+)<\/td>.*? | .*?<\/td>')
- if temp_check == "":
- temp_check = scrapertools.find_single_match(enlaces,
- 'icono_.*?png".*?alt=".*?".*? | (\d+\d+;\d+-\d+)<\/td>.*? | .*?<\/td>')
- if temp_check == "":
- check = ""
- else:
- check = "yes"
- else:
- check = "yes"
- except:
- check = ""
-
- else:
- check = "pelicula"
-
- if "Completa" in item.title and check == "" or not "Completa" in item.title and check == "":
- if krypton:
- patron = 'icono_.*?png" title="(.*?)".*? | .*?<.*? | (.*?)<.*?(\d+\d+;\d+)<\/td>.*? | .*?<\/td>')
- if temp_check == "":
- check = ""
- else:
- check = "yes"
- except:
- check = ""
-
- idioma = re.sub(r'\(Contra.*?\)', '', idioma)
- if "Completa" in peso and check == "":
- continue
- if krypton:
- url = url
- else:
- url = "http://" + url
- torrents_path = config.get_videolibrary_path() + '/torrents'
- if not os.path.exists(torrents_path):
- os.mkdir(torrents_path)
- try:
- urllib.urlretrieve("http://anonymouse.org/cgi-bin/anon-www.cgi/" + url,
- torrents_path + "/temp.torrent")
- pepe = open(torrents_path + "/temp.torrent", "rb").read()
- except:
- pepe = ""
- torrent = decode(pepe)
- try:
- name = torrent["info"]["name"]
- sizet = torrent["info"]['length']
- sizet = convert_size(sizet)
- except:
- name = "no disponible"
- try:
- check_video = scrapertools.find_multiple_matches(str(torrent["info"]["files"]), "'length': (\d+)}")
- size = max([int(i) for i in check_video])
- for file in torrent["info"]["files"]:
- manolo = "%r - %d bytes" % ("/".join(file["path"]), file["length"])
- if str(size) in manolo:
- video = manolo
- size = convert_size(size)
- ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\[.*?\]|\(.*?\)|.*?\.", "", video)
- try:
- os.remove(torrents_path + "/temp.torrent")
- except:
- pass
- except:
- try:
- size = sizet
- ext_v = re.sub(r"-.*? bytes|.*?\[.*?\].|'|.*?COM.|.*?\.es.|.*?\[.*?\]|.*?\(.*?\)\.|.*?\.", "",
- name)
- except:
- size = "NO REPRODUCIBLE"
- ext_v = ""
- try:
- os.remove(torrents_path + "/temp.torrent")
- except:
- pass
- if "rar" in ext_v:
- ext_v = ext_v + " -- No reproducible"
- size = ""
-
- title = "[COLOR gold][B]" + idioma + "[/B][/COLOR]" + "-" + "[COLOR lemonchiffon][B]" + calidad + "[/B][/COLOR]" + "-" + "[COLOR khaki] ( Video" + "[/COLOR]" + " " + "[COLOR khaki]" + ext_v + "[/COLOR]" + " " + "[COLOR khaki]" + size + " )" + "[/COLOR]"
-
- if "series" in item.url and not "Completa" in item.title or check != "" and check != "pelicula":
- year = item.extra.split("|")[1]
- # idioma= re.sub(r"-.*","",idioma)
- check = calidad + "|" + peso + "|" + idioma
- temp_epi = re.compile('(\d).*?;(\d+)', re.DOTALL).findall(check)
-
- for temp, epi in temp_epi:
- url_tmdb2 = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
- 5] + "/season/" + temp + "/images?api_key=" + api_key + ""
- data = httptools.downloadpage(url_tmdb2).data
- data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
- patron = '{"id".*?"file_path":"(.*?)","height"'
- matches = re.compile(patron, re.DOTALL).findall(data)
- if len(matches) == 0:
- thumbnail = item.thumbnail
- for thumtemp in matches:
- thumbnail = "https://image.tmdb.org/t/p/original" + thumtemp
-
- if check_epi == epi and check_calidad != peso and not "Especial:" in idioma or "Especial:" in idioma and check_especial == "yes":
- check_info = "no"
- title = " [COLOR mediumslateblue][B]Versión[/B][/COLOR]" + " " + "[COLOR royalblue][B]" + peso + "[/B][/COLOR]" + "[COLOR turquoise] ( Video" + "[/COLOR]" + " " + "[COLOR turquoise]" + ext_v + "[/COLOR]" + " " + "[COLOR turquoise]" + size + " )" + "[/COLOR]"
- else:
- check_info = "yes"
- if "Especial:" in idioma:
- check_especial = "yes"
- title = "[COLOR steelblue][B]" + idioma + "[/B][/COLOR]" + "-" + "[COLOR lightskyblue][B]" + calidad + "[/B][/COLOR]" + "-" + "[COLOR royalblue][B]" + peso + "[/B][/COLOR]" + "[COLOR turquoise] ( Video" + "[/COLOR]" + " " + "[COLOR turquoise]" + ext_v + "[/COLOR]" + " " + "[COLOR turquoise]" + size + " )" + "[/COLOR]"
-
- check_epi = epi
- check_calidad = peso
-
- itemlist.append(Item(channel=item.channel, title=title, action="play", url=url, server="torrent",
- thumbnail=thumbnail, extra=item.extra, show=item.show,
- fanart=item.show.split("|")[0], folder=False))
-
- if "series" in item.url:
- if check_info == "yes":
- extra = item.extra + "|" + temp + "|" + epi
- if "-" in idioma:
- temp_epi2 = re.compile('\d.*?;\d+-(\d+)', re.DOTALL).findall(check)
- for epi2 in temp_epi2:
- len_epis = int(epi2) - int(epi)
- if len_epis == 1:
- check_iepi2 = "ok"
- title_info = " Info Cap." + epi
- title_info = "[COLOR skyblue]" + title_info + "[/COLOR]"
- itemlist.append(
- Item(channel=item.channel, action="info_capitulos", title=title_info,
- url=item.url, thumbnail=thumbnail, fanart=item.show.split("|")[0],
- extra=extra, show=item.show, category=item.category, folder=False))
- else:
- epis_len = range(int(epi), int(epi2) + 1)
- extra = item.extra + "|" + temp + "|" + str(epis_len)
- title_info = " Info Capítulos"
- title_info = "[COLOR skyblue]" + title_info + "[/COLOR]"
- itemlist.append(
- Item(channel=item.channel, action="capitulos", title=title_info, url=item.url,
- thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra,
- show=item.show, category=item.category, folder=True))
- check_iepi2 = ""
- else:
- title_info = " Info"
- title_info = "[COLOR skyblue]" + title_info + "[/COLOR]"
- itemlist.append(
- Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url,
- thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show,
- category=item.category, folder=False))
-
- if check_iepi2 == "ok":
- extra = item.extra + "|" + temp + "|" + epi2
- title_info = " Info Cap." + epi2
- title_info = "[COLOR skyblue]" + title_info + "[/COLOR]"
- itemlist.append(
- Item(channel=item.channel, action="info_capitulos", title=title_info, url=item.url,
- thumbnail=thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show,
- category=item.category, folder=False))
+ else:
+ for index, url in enumerate(item.url):
+ title = "%sx%s [%s] [%s]" % (item.contentSeason, item.contentEpisodeNumber, item.lang, item.quality[index])
+ itemlist.append(item.clone(action="play", title=title, url=url, thumbnail=item.thumbnail, server="torrent",
+ quality=item.quality[index]))
return itemlist
-
-
-def capitulos(item):
- logger.info()
- itemlist = []
- url = item.url
- capis = item.extra.split("|")[3]
- capis = re.sub(r'\[|\]', '', capis)
- capis = [int(k) for k in capis.split(',')]
- for i in capis:
- extra = item.extra.split("|")[0] + "|" + item.extra.split("|")[1] + "|" + item.extra.split("|")[2] + "|" + str(
- i)
- itemlist.append(
- Item(channel=item.channel, action="info_capitulos", title="[COLOR skyblue]Info Cap." + str(i) + "[/COLOR]",
- url=item.url, thumbnail=item.thumbnail, fanart=item.show.split("|")[0], extra=extra, show=item.show,
- category=item.category, folder=False))
- return itemlist
-
-
-def info(item):
- logger.info()
- itemlist = []
- url = item.url
- id = item.extra
-
- if "serie" in item.url:
- try:
- rating_tmdba_tvdb = item.extra.split("|")[6]
- if item.extra.split("|")[6] == "":
- rating_tmdba_tvdb = "Sin puntuación"
- except:
- rating_tmdba_tvdb = "Sin puntuación"
- else:
- rating_tmdba_tvdb = item.extra.split("|")[3]
- rating_filma = item.extra.split("|")[4]
- print "eztoquee"
- print rating_filma
- print rating_tmdba_tvdb
-
- filma = "http://s6.postimg.org/6yhe5fgy9/filma.png"
-
- try:
- if "serie" in item.url:
- title = item.extra.split("|")[8]
-
- else:
- title = item.extra.split("|")[6]
- title = title.replace("%20", " ")
- title = "[COLOR yellow][B]" + title + "[/B][/COLOR]"
- except:
- title = item.title
-
- try:
- if "." in rating_tmdba_tvdb:
- check_rat_tmdba = scrapertools.get_match(rating_tmdba_tvdb, '(\d+).')
- else:
- check_rat_tmdba = rating_tmdba_tvdb
- if int(check_rat_tmdba) >= 5 and int(check_rat_tmdba) < 8:
- rating = "[COLOR springgreen][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
- elif int(check_rat_tmdba) >= 8 or rating_tmdba_tvdb == 10:
- rating = "[COLOR yellow][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
- else:
- rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
- print "lolaymaue"
- except:
- rating = "[COLOR crimson][B]" + rating_tmdba_tvdb + "[/B][/COLOR]"
- if "10." in rating:
- rating = re.sub(r'10\.\d+', '10', rating)
- try:
- check_rat_filma = scrapertools.get_match(rating_filma, '(\d)')
- print "paco"
- print check_rat_filma
- if int(check_rat_filma) >= 5 and int(check_rat_filma) < 8:
- print "dios"
- print check_rat_filma
- rating_filma = "[COLOR springgreen][B]" + rating_filma + "[/B][/COLOR]"
- elif int(check_rat_filma) >= 8:
-
- print check_rat_filma
- rating_filma = "[COLOR yellow][B]" + rating_filma + "[/B][/COLOR]"
- else:
- rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
- print "rojo??"
- print check_rat_filma
- except:
- rating_filma = "[COLOR crimson][B]" + rating_filma + "[/B][/COLOR]"
-
- try:
- if not "serie" in item.url:
- url_plot = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
- 1] + "?api_key=" + api_key + "&append_to_response=credits&language=es"
- data_plot = scrapertools.cache_page(url_plot)
- plot, tagline = scrapertools.find_single_match(data_plot, '"overview":"(.*?)",.*?"tagline":(".*?")')
- if plot == "":
- plot = item.show.split("|")[2]
-
- plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
- plot = re.sub(r"\\", "", plot)
-
- else:
- plot = item.show.split("|")[2]
- plot = "[COLOR moccasin][B]" + plot + "[/B][/COLOR]"
- plot = re.sub(r"\\", "", plot)
-
- if item.extra.split("|")[7] != "":
- tagline = item.extra.split("|")[7]
- # tagline= re.sub(r',','.',tagline)
- else:
- tagline = ""
- except:
- title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
- plot = "Esta pelicula no tiene informacion..."
- plot = plot.replace(plot, "[COLOR yellow][B]" + plot + "[/B][/COLOR]")
- photo = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
- foto = "http://s6.postimg.org/ub7pb76c1/noinfo.png"
- info = ""
-
- if "serie" in item.url:
- check2 = "serie"
- thumb_busqueda = "http://imgur.com/84pleyQ.png"
- icon = "http://s6.postimg.org/hzcjag975/tvdb.png"
- foto = item.show.split("|")[1]
- if item.extra.split("|")[5] != "":
- critica = item.extra.split("|")[5]
- else:
- critica = "Esta serie no tiene críticas..."
-
- photo = item.extra.split("|")[0].replace(" ", "%20")
- try:
- tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
- except:
- tagline = ""
-
- else:
- thumb_busqueda = "http://imgur.com/Slbtn28.png"
- critica = item.extra.split("|")[5]
- if "%20" in critica:
- critica = "No hay críticas"
- icon = "http://imgur.com/SenkyxF.png"
-
- photo = item.extra.split("|")[0].replace(" ", "%20")
- foto = item.show.split("|")[1]
- if foto == item.thumbnail:
- foto = "http://imgur.com/5jEL62c.jpg"
-
- try:
- if tagline == "\"\"":
- tagline = " "
- except:
- tagline = " "
- tagline = "[COLOR aquamarine][B]" + tagline + "[/B][/COLOR]"
- check2 = "pelicula"
- # Tambien te puede interesar
- peliculas = []
- if "serie" in item.url:
-
- url_tpi = "http://api.themoviedb.org/3/tv/" + item.show.split("|")[
- 5] + "/recommendations?api_key=" + api_key + "&language=es"
- data_tpi = scrapertools.cachePage(url_tpi)
- tpi = scrapertools.find_multiple_matches(data_tpi,
- 'id":(.*?),.*?"original_name":"(.*?)",.*?"poster_path":(.*?),')
-
- else:
- url_tpi = "http://api.themoviedb.org/3/movie/" + item.extra.split("|")[
- 1] + "/recommendations?api_key=" + api_key + "&language=es"
- data_tpi = scrapertools.cachePage(url_tpi)
- tpi = scrapertools.find_multiple_matches(data_tpi,
- 'id":(.*?),.*?"original_title":"(.*?)",.*?"poster_path":(.*?),')
-
- for idp, peli, thumb in tpi:
-
- thumb = re.sub(r'"|}', '', thumb)
- if "null" in thumb:
- thumb = "http://s6.postimg.org/tw1vhymj5/noposter.png"
- else:
- thumb = "https://image.tmdb.org/t/p/original" + thumb
- peliculas.append([idp, peli, thumb])
-
- check2 = check2.replace("pelicula", "movie").replace("serie", "tvshow")
- infoLabels = {'title': title, 'plot': plot, 'thumbnail': photo, 'fanart': foto, 'tagline': tagline,
- 'rating': rating}
- item_info = item.clone(info=infoLabels, icon=icon, extra=id, rating=rating, rating_filma=rating_filma,
- critica=critica, contentType=check2, thumb_busqueda=thumb_busqueda)
- from channels import infoplus
- infoplus.start(item_info, peliculas)
-
-
-def info_capitulos(item):
- logger.info()
- url = "https://api.themoviedb.org/3/tv/" + item.show.split("|")[5] + "/season/" + item.extra.split("|")[
- 2] + "/episode/" + item.extra.split("|")[3] + "?api_key=" + api_key + "&language=es"
-
- if "/0" in url:
- url = url.replace("/0", "/")
-
- data = httptools.downloadpage(url).data
- data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
-
- patron = '],"name":"(.*?)","overview":"(.*?)".*?"still_path":(.*?),"vote_average":(\d+\.\d).*?,"'
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- if len(matches) == 0:
-
- url = "http://thetvdb.com/api/1D62F2F90030C444/series/" + item.category + "/default/" + item.extra.split("|")[
- 2] + "/" + item.extra.split("|")[3] + "/es.xml"
- if "/0" in url:
- url = url.replace("/0", "/")
- data = httptools.downloadpage(url).data
- data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
-
- patron = '.*?([^<]+).*?(.*?).*?(.*?)'
-
- matches = re.compile(patron, re.DOTALL).findall(data)
-
- if len(matches) == 0:
-
- title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
- plot = "Este capitulo no tiene informacion..."
- plot = "[COLOR yellow][B]" + plot + "[/B][/COLOR]"
- image = "http://s6.postimg.org/ub7pb76c1/noinfo.png"
- foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
- rating = ""
-
-
- else:
-
- for name_epi, info, rating in matches:
- if "episodes" in data:
- foto = scrapertools.get_match(data, '.*?(.*?)')
- fanart = "http://thetvdb.com/banners/" + foto
- else:
- fanart = "http://imgur.com/ZiEAVOD.png"
- plot = info
- plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]"
- title = name_epi.upper()
- title = "[COLOR bisque][B]" + title + "[/B][/COLOR]"
- image = fanart
- foto = item.extra.split("|")[0]
- if not ".png" in foto:
- if "serie" in item.url:
- foto = "http://imgur.com/6uXGkrz.png"
- else:
- foto = "http://i.imgur.com/5jEL62c.png"
- foto = re.sub(r'\(.*?\)|" "|" "', '', foto)
- foto = re.sub(r' ', '', foto)
- try:
-
- check_rating = scrapertools.get_match(rating, '(\d+).')
-
- if int(check_rating) >= 5 and int(check_rating) < 8:
- rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) >= 8 and int(check_rating) < 10:
- rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) == 10:
- rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]"
- else:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
-
- except:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
- if "10." in rating:
- rating = re.sub(r'10\.\d+', '10', rating)
- else:
- for name_epi, info, fanart, rating in matches:
- if info == "" or info == "\\":
- info = "Sin informacion del capítulo aún..."
- plot = info
- plot = re.sub(r'/n', '', plot)
- plot = "[COLOR peachpuff][B]" + plot + "[/B][/COLOR]"
- title = name_epi.upper()
- title = "[COLOR bisque][B]" + title + "[/B][/COLOR]"
- image = fanart
- image = re.sub(r'"|}', '', image)
- if "null" in image:
- image = "http://imgur.com/ZiEAVOD.png"
- else:
- image = "https://image.tmdb.org/t/p/original" + image
- foto = item.extra.split("|")[0]
- if not ".png" in foto:
- if "serie" in item.url:
- foto = "http://imgur.com/6uXGkrz.png"
- else:
- foto = "http://i.imgur.com/5jEL62c.png"
- foto = re.sub(r'\(.*?\)|" "|" "', '', foto)
- foto = re.sub(r' ', '', foto)
- try:
-
- check_rating = scrapertools.get_match(rating, '(\d+).')
-
- if int(check_rating) >= 5 and int(check_rating) < 8:
- rating = "Puntuación " + "[COLOR springgreen][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) >= 8 and int(check_rating) < 10:
- rating = "Puntuación " + "[COLOR yellow][B]" + rating + "[/B][/COLOR]"
- elif int(check_rating) == 10:
- rating = "Puntuación " + "[COLOR orangered][B]" + rating + "[/B][/COLOR]"
- else:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
-
- except:
- rating = "Puntuación " + "[COLOR crimson][B]" + rating + "[/B][/COLOR]"
- if "10." in rating:
- rating = re.sub(r'10\.\d+', '10', rating)
- ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto, rating=rating)
- ventana.doModal()
-
-
-class TextBox2(xbmcgui.WindowDialog):
- """ Create a skinned textbox window """
-
- def __init__(self, *args, **kwargs):
- self.getTitle = kwargs.get('title')
- self.getPlot = kwargs.get('plot')
- self.getThumbnail = kwargs.get('thumbnail')
- self.getFanart = kwargs.get('fanart')
- self.getRating = kwargs.get('rating')
-
- self.background = xbmcgui.ControlImage(70, 20, 1150, 630, 'http://imgur.com/gh1GShA.jpg')
- self.title = xbmcgui.ControlTextBox(120, 60, 430, 50)
- self.rating = xbmcgui.ControlTextBox(145, 112, 1030, 45)
- self.plot = xbmcgui.ControlTextBox(120, 150, 1056, 100)
- self.thumbnail = xbmcgui.ControlImage(120, 300, 1056, 300, self.getThumbnail)
- self.fanart = xbmcgui.ControlImage(780, 43, 390, 100, self.getFanart)
-
- self.addControl(self.background)
- self.background.setAnimations(
- [('conditional', 'effect=slide start=1000% end=0% time=1500 condition=true tween=bounce',),
- ('WindowClose', 'effect=slide delay=800 start=0% end=1000% time=800 condition=true',)])
- self.addControl(self.thumbnail)
- self.thumbnail.setAnimations([('conditional',
- 'effect=zoom start=0% end=100% delay=2700 time=1500 condition=true tween=elastic easing=inout',),
- ('WindowClose', 'effect=slide end=0,700% time=300 condition=true',)])
- self.addControl(self.plot)
- self.plot.setAnimations(
- [('conditional', 'effect=zoom delay=2000 center=auto start=0 end=100 time=800 condition=true ',), (
- 'conditional',
- 'effect=rotate delay=2000 center=auto aceleration=6000 start=0% end=360% time=800 condition=true',),
- ('WindowClose', 'effect=zoom center=auto start=100% end=-0% time=600 condition=true',)])
- self.addControl(self.fanart)
- self.fanart.setAnimations(
- [('WindowOpen', 'effect=slide start=0,-700 delay=1000 time=2500 tween=bounce condition=true',), (
- 'conditional',
- 'effect=rotate center=auto start=0% end=360% delay=3000 time=2500 tween=bounce condition=true',),
- ('WindowClose', 'effect=slide end=0,-700% time=1000 condition=true',)])
- self.addControl(self.title)
- self.title.setText(self.getTitle)
- self.title.setAnimations(
- [('conditional', 'effect=slide start=-1500% end=0% delay=1000 time=2000 condition=true tween=elastic',),
- ('WindowClose', 'effect=slide start=0% end=-1500% time=800 condition=true',)])
- self.addControl(self.rating)
- self.rating.setText(self.getRating)
- self.rating.setAnimations(
- [('conditional', 'effect=fade start=0% end=100% delay=3000 time=1500 condition=true',),
- ('WindowClose', 'effect=slide end=0,-700% time=600 condition=true',)])
- xbmc.sleep(200)
-
- try:
- self.plot.autoScroll(7000, 6000, 30000)
- except:
-
- xbmc.executebuiltin(
- 'Notification([COLOR red][B]Actualiza Kodi a su última versión[/B][/COLOR], [COLOR skyblue]para mejor info[/COLOR],8000,"https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/kodi-icon.png")')
- self.plot.setText(self.getPlot)
-
- def get(self):
- self.show()
-
- def onAction(self, action):
- if action == ACTION_PREVIOUS_MENU or action.getId() == ACTION_GESTURE_SWIPE_LEFT or action == 110 or action == 92:
- self.close()
-
-
-def test():
- return True
-
-
-def translate(to_translate, to_langage="auto", langage="auto"):
- '''Return the translation using google translate
- you must shortcut the langage you define (French = fr, English = en, Spanish = es, etc...)
- if you don't define anything it will detect it or use english by default
- Example:
- print(translate("salut tu vas bien?", "en"))
- hello you alright?'''
- agents = {
- 'User-Agent': "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"}
- before_trans = 'class="t0">'
- link = "http://translate.google.com/m?hl=%s&sl=%s&q=%s" % (to_langage, langage, to_translate.replace(" ", "+"))
- import urllib2
- request = urllib2.Request(link, headers=agents)
- page = urllib2.urlopen(request).read()
- result = page[page.find(before_trans) + len(before_trans):]
- result = result.split("<")[0]
- return result
-
-
-if __name__ == '__main__':
- to_translate = 'Hola como estas?'
- print("%s >> %s" % (to_translate, translate(to_translate)))
- print("%s >> %s" % (to_translate, translate(to_translate, 'fr')))
-
-
-# should print Hola como estas >> Hello how are you
-# and Hola como estas? >> Bonjour comment allez-vous?
-
-
-
-def tokenize(text, match=re.compile("([idel])|(\d+):|(-?\d+)").match):
- i = 0
- while i < len(text):
- m = match(text, i)
- s = m.group(m.lastindex)
- i = m.end()
- if m.lastindex == 2:
- yield "s"
- yield text[i:i + int(s)]
- i = i + int(s)
- else:
- yield s
-
-
-def decode_item(next, token):
- if token == "i":
- # integer: "i" value "e"
- data = int(next())
- if next() != "e":
- raise ValueError
- elif token == "s":
- # string: "s" value (virtual tokens)
- data = next()
- elif token == "l" or token == "d":
- # container: "l" (or "d") values "e"
- data = []
- tok = next()
- while tok != "e":
- data.append(decode_item(next, tok))
- tok = next()
- if token == "d":
- data = dict(zip(data[0::2], data[1::2]))
- else:
- raise ValueError
- return data
-
-
-def decode(text):
- try:
- src = tokenize(text)
- data = decode_item(src.next, src.next())
- for token in src: # look for more tokens
- data = data
- except (AttributeError, ValueError, StopIteration):
- try:
- data = data
- except:
- data = src
-
- return data
-
-
-def convert_size(size):
- import math
- if (size == 0):
- return '0B'
- size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
- i = int(math.floor(math.log(size, 1024)))
- p = math.pow(1024, i)
- s = round(size / p, 2)
- return '%s %s' % (s, size_name[i])
-
-
-def busqueda(item):
- logger.info()
- cat = [item.extra.split("|")[0].replace("tv", "serie"), 'torrent']
- new_item = Item()
- new_item.extra = item.extra.split("|")[1].replace("+", " ")
- new_item.category = item.extra.split("|")[0]
-
- from channels import search
- return search.do_search(new_item, cat)
From c7a87b50059d127394b545ca0ef47b688ba2f2dd Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 1 Oct 2017 08:59:24 -0500
Subject: [PATCH 06/14] Update allcalidad.py
---
plugin.video.alfa/channels/allcalidad.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/channels/allcalidad.py b/plugin.video.alfa/channels/allcalidad.py
index 0894a0a5..ede804b0 100755
--- a/plugin.video.alfa/channels/allcalidad.py
+++ b/plugin.video.alfa/channels/allcalidad.py
@@ -138,8 +138,8 @@ def findvideos(item):
if config.get_videolibrary_support():
itemlist.append(Item(channel=item.channel, title="Añadir a la videoteca", text_color="green",
filtro=True, action="add_pelicula_to_library", url=item.url, thumbnail = item.thumbnail,
- infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle,
- extra="library"))
+ infoLabels={'title': item.fulltitle}, fulltitle=item.fulltitle
+ ))
return itemlist
From 5ba56221b0474331e84d49e92e39dff29a072ac4 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 1 Oct 2017 09:05:30 -0500
Subject: [PATCH 07/14] Update okru.json
---
plugin.video.alfa/servers/okru.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/servers/okru.json b/plugin.video.alfa/servers/okru.json
index 8985c4ab..12e4973e 100755
--- a/plugin.video.alfa/servers/okru.json
+++ b/plugin.video.alfa/servers/okru.json
@@ -22,7 +22,7 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "//(?:www.)?ok.../(?:videoembed|video)/(\\d+)",
+ "pattern": "(?:www.)?ok.../(?:videoembed|video)/(\\d+)",
"url": "http://ok.ru/videoembed/\\1"
}
]
@@ -63,4 +63,4 @@
],
"thumbnail": "server_okru.png",
"version": 1
-}
\ No newline at end of file
+}
From 410512fa8ff3187be3e170f70b20bf234731a0ee Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 1 Oct 2017 09:37:00 -0500
Subject: [PATCH 08/14] Update openload.json
---
plugin.video.alfa/servers/openload.json | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/plugin.video.alfa/servers/openload.json b/plugin.video.alfa/servers/openload.json
index 4097199e..0fb0d7ef 100755
--- a/plugin.video.alfa/servers/openload.json
+++ b/plugin.video.alfa/servers/openload.json
@@ -18,7 +18,7 @@
"ignore_urls": [],
"patterns": [
{
- "pattern": "(?:openload|oload).../(?:embed|f)/([0-9a-zA-Z-_]+)",
+ "pattern": "(?:openload|oload).*?/(?:embed|f)/([0-9a-zA-Z-_]+)",
"url": "https://openload.co/embed/\\1/"
}
]
@@ -57,4 +57,4 @@
],
"thumbnail": "server_openload.png",
"version": 1
-}
\ No newline at end of file
+}
From 427ae87549e6baa97eef48dff897d676c9dd82e5 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 1 Oct 2017 09:38:49 -0500
Subject: [PATCH 09/14] Update vidz7.py
---
plugin.video.alfa/channels/vidz7.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/plugin.video.alfa/channels/vidz7.py b/plugin.video.alfa/channels/vidz7.py
index adc38e80..410ac9f7 100755
--- a/plugin.video.alfa/channels/vidz7.py
+++ b/plugin.video.alfa/channels/vidz7.py
@@ -3,7 +3,9 @@
import re
import urlparse
+from core import httptools
from core import scrapertools
+from core import servertools
from core.item import Item
from platformcode import logger
@@ -38,7 +40,7 @@ def search(item, texto):
def categorias(item):
logger.info()
itemlist = []
- data = scrapertools.cache_page(item.url)
+ data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
patron = '(.*?)'
matches = re.compile(patron, re.DOTALL).findall(data)
@@ -52,7 +54,7 @@ def lista(item):
logger.info()
# Descarga la página
- data = scrapertools.cache_page(item.url)
+ data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}", "", data)
# Extrae las entradas de la pagina seleccionada
@@ -83,10 +85,8 @@ def play(item):
logger.info()
itemlist = []
# Descarga la página
- data = scrapertools.cachePage(item.url)
+ data = httptools.downloadpage(item.url).data
data = scrapertools.unescape(data)
- logger.info(data)
- from core import servertools
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.thumbnail = item.thumbnail
From 8b96d4d9623fa2e412b839e89d411660b368d2b7 Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 1 Oct 2017 10:16:12 -0500
Subject: [PATCH 10/14] Update gvideo.json
---
plugin.video.alfa/servers/gvideo.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/plugin.video.alfa/servers/gvideo.json b/plugin.video.alfa/servers/gvideo.json
index c185fcf8..a5d78fe4 100644
--- a/plugin.video.alfa/servers/gvideo.json
+++ b/plugin.video.alfa/servers/gvideo.json
@@ -14,7 +14,7 @@
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
- "pattern": "(?s)https://drive.google.com/file/d/([^/]+)/preview",
+ "pattern": "(?s)https://(?:docs|drive).google.com/file/d/([^/]+)/preview",
"url": "http://docs.google.com/get_video_info?docid=\\1"
},
{
From 70d1f45117192287f8cb7d5bf0ad29d0eddd0e6f Mon Sep 17 00:00:00 2001
From: Intel1 <25161862+Intel11@users.noreply.github.com>
Date: Sun, 1 Oct 2017 10:20:21 -0500
Subject: [PATCH 11/14] Update cartoonlatino.py
---
plugin.video.alfa/channels/cartoonlatino.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/plugin.video.alfa/channels/cartoonlatino.py b/plugin.video.alfa/channels/cartoonlatino.py
index 1cc9d289..4ed18ee4 100644
--- a/plugin.video.alfa/channels/cartoonlatino.py
+++ b/plugin.video.alfa/channels/cartoonlatino.py
@@ -1,4 +1,4 @@
-# -*- coding: utf-8 -*-
+# -*- coding: utf-8 -*-
import re
@@ -174,10 +174,11 @@ def findvideos(item):
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
data_function = scrapertools.find_single_match(data, '\(adsbygoogle = window\.adsbygoogle \|\| \[\]\)\.push\({}\);<\/script><\/div> (.+?)<\/ins>")
+ data_id = scrapertools.find_single_match(data, " .*?")
itemla = scrapertools.find_multiple_matches(data_function, "src='(.+?)'")
serverid = scrapertools.find_multiple_matches(data_id, ' |