diff options
Diffstat (limited to 'module/plugins/internal/SimpleCrypter.py')
| -rw-r--r-- | module/plugins/internal/SimpleCrypter.py | 159 | 
1 files changed, 0 insertions, 159 deletions
| diff --git a/module/plugins/internal/SimpleCrypter.py b/module/plugins/internal/SimpleCrypter.py deleted file mode 100644 index c83ee8a78..000000000 --- a/module/plugins/internal/SimpleCrypter.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from urlparse import urlparse - -from module.plugins.Crypter import Crypter -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookies -from module.utils import fixup - - -class SimpleCrypter(Crypter, SimpleHoster): -    __name__    = "SimpleCrypter" -    __type__    = "crypter" -    __version__ = "0.37" - -    __pattern__ = r'^unmatchable$' -    __config__  = [("use_subfolder", "bool", "Save package to subfolder", True),  #: Overrides core.config['general']['folder_per_package'] -                   ("subfolder_per_package", "bool", "Create a subfolder for each package", True)] - -    __description__ = """Simple decrypter plugin""" -    __license__     = "GPLv3" -    __authors__     = [("stickell", "l.stickell@yahoo.it"), -                       ("zoidberg", "zoidberg@mujmail.cz"), -                       ("Walter Purcaro", "vuolter@gmail.com")] - - -    """ -    Following patterns should be defined by each crypter: - -      LINK_PATTERN: Download link or regex to catch links in group(1) -        example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"' - -      NAME_PATTERN: (optional) folder name or page title -        example: NAME_PATTERN = r'<title>Files of: (?P<N>[^<]+) folder</title>' - -      OFFLINE_PATTERN: (optional) Checks if the page is unreachable -        example: OFFLINE_PATTERN = r'File (deleted|not found)' - -      TEMP_OFFLINE_PATTERN: (optional) Checks if the page is temporarily unreachable -        example: TEMP_OFFLINE_PATTERN = r'Server maintainance' - - -    You can override the getLinks method if you need a more sophisticated way to extract the links. - - -    If the links are splitted on multiple pages you can define the PAGES_PATTERN regex: - -      PAGES_PATTERN: (optional) group(1) should be the number of overall pages containing the links -        example: PAGES_PATTERN = r'Pages: (\d+)' - -    and its loadPage method: - - -      def loadPage(self, page_n): -          return the html of the page number page_n -    """ - -    LINK_PATTERN = None - -    NAME_REPLACEMENTS = [("&#?\w+;", fixup)] -    URL_REPLACEMENTS  = [] - -    TEXT_ENCODING = False  #: Set to True or encoding name if encoding in http header is not correct -    COOKIES       = True  #: or False or list of tuples [(domain, name, value)] - -    LOGIN_ACCOUNT = False -    LOGIN_PREMIUM = False - - -    #@TODO: Remove in 0.4.10 -    def init(self): -        account_name = (self.__name__ + ".py").replace("Folder.py", "").replace(".py", "") -        account = self.core.accountManager.getAccountPlugin(account_name) - -        if account and account.canUse(): -            self.user, data = account.selectAccount() -            self.req = account.getAccountRequest(self.user) -            self.premium = account.isPremium(self.user) - -            self.account = account - - -    def prepare(self): -        self.info  = {} -        self.links = []  #@TODO: Move to hoster class in 0.4.10 - -        if self.LOGIN_PREMIUM and not self.premium: -            self.fail(_("Required premium account not found")) - -        if self.LOGIN_ACCOUNT and not self.account: -            self.fail(_("Required account not found")) - -        self.req.setOption("timeout", 120) - -        if isinstance(self.COOKIES, list): -            set_cookies(self.req.cj, self.COOKIES) - -        self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) - - -    def decrypt(self, pyfile): -        self.prepare() - -        self.preload() -        self.checkInfo() - -        self.links = self.getLinks() - -        if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'): -            self.handlePages(pyfile) - -        self.logDebug("Package has %d links" % len(self.links)) - -        if self.links: -            self.packages = [(self.info['name'], self.links, self.info['folder'])] - -        elif not self.urls and not self.packages:  #@TODO: Remove in 0.4.10 -            self.fail("No link grabbed") - - -    def checkNameSize(self, getinfo=True): -        if getinfo: -            self.logDebug("File info (BEFORE): %s" % self.info) -            self.info.update(self.getInfo(self.pyfile.url, self.html)) -            self.logDebug("File info (AFTER): %s"  % self.info) - -        name = self.info['name'] -        url  = self.info['url'] - -        if name and name != url: -            self.pyfile.name = name -        else: -            self.pyfile.name = self.info['name'] = urlparse(name).path.split('/')[-1] - -        folder = self.info['folder'] = self.pyfile.name - -        self.logDebug("File name: %s"   % self.pyfile.name, -                      "File folder: %s" % folder) - - -    def getLinks(self): -        """ -        Returns the links extracted from self.html -        You should override this only if it's impossible to extract links using only the LINK_PATTERN. -        """ -        return re.findall(self.LINK_PATTERN, self.html) - - -    def handlePages(self, pyfile): -        try: -            m = re.search(self.PAGES_PATTERN, self.html) -            pages = int(m.group(1)) -        except: -            pages = 1 - -        for p in xrange(2, pages + 1): -            self.html = self.loadPage(p) -            self.links += self.getLinks() | 
