diff --git a/plugin.video.alfa/channels/maxipelis.json b/plugin.video.alfa/channels/maxipelis.json
new file mode 100644
index 00000000..293a229d
--- /dev/null
+++ b/plugin.video.alfa/channels/maxipelis.json
@@ -0,0 +1,30 @@
+{
+ "id": "maxipelis",
+ "name": "Maxipelis",
+ "active": true,
+ "adult": false,
+ "language": "es",
+ "version": 1,
+ "thumbnail": "http://www.maxipelis.net/wp-content/uploads/2016/12/applogo.png",
+ "banner": "",
+ "changes": [
+ {
+ "date": "25/08/2017",
+ "description": "Nuevo canal"
+ }
+ ],
+ "categories": [
+ "movie",
+ "adult"
+ ],
+ "settings": [
+ {
+ "id": "include_in_global_search",
+ "type": "bool",
+ "label": "Incluir en busqueda global",
+ "default": true,
+ "enabled": true,
+ "visible": true
+ }
+ ]
+}
diff --git a/plugin.video.alfa/channels/maxipelis.py b/plugin.video.alfa/channels/maxipelis.py
new file mode 100644
index 00000000..5a16c2c7
--- /dev/null
+++ b/plugin.video.alfa/channels/maxipelis.py
@@ -0,0 +1,149 @@
+# -*- coding: iso-8859-1 -*-
+#------------------------------------------------------------
+# Alfa
+#------------------------------------------------------------
+import urlparse,urllib2,urllib,re
+import os, sys
+
+from core import jsontools as json
+from core import scrapertools
+from core import servertools
+from core.item import Item
+from platformcode import config, logger
+from core import httptools
+from core import tmdb
+
+
+
+host = 'http://www.maxipelis.net'
+
+def mainlist(item):
+ logger.info()
+ itemlist = []
+ itemlist.append(Item(channel=item.channel, title="Novedades" , action="peliculas", url=host + "/pelicula"))
+
+ itemlist.append(Item(channel=item.channel, title="Categorias" , action="categorias", url=host))
+ itemlist.append(Item(channel=item.channel, title="Buscar", action="search"))
+ return itemlist
+
+
+def search(item, texto):
+ logger.info()
+ texto = texto.replace(" ", "+")
+ item.url = host + "/?s=%s" % texto
+
+ try:
+ return sub_search(item)
+
+ except:
+ import sys
+ for line in sys.exc_info():
+ logger.error("%s" % line)
+ return []
+
+
+def sub_search(item):
+ logger.info()
+ itemlist = []
+ data = httptools.downloadpage(item.url).data
+ data = re.sub(r"\n|\r|\t| |
", "", data)
+
+ patron = '
(.*?)
' + matches = scrapertools.find_multiple_matches(data, patron) + + for url,img,name,plot in matches: + itemlist.append(item.clone(channel=item.channel, action="findvideos", title=name, url=url, plot=plot, + thumbnail=img)) + + paginacion = scrapertools.find_single_match(data, '