diff options
Diffstat (limited to 'module/plugins/internal')
| -rw-r--r-- | module/plugins/internal/AbstractExtractor.py | 109 | ||||
| -rw-r--r-- | module/plugins/internal/CaptchaService.py | 213 | ||||
| -rw-r--r-- | module/plugins/internal/DeadCrypter.py | 32 | ||||
| -rw-r--r-- | module/plugins/internal/DeadHoster.py | 32 | ||||
| -rw-r--r-- | module/plugins/internal/MultiHoster.py | 206 | ||||
| -rw-r--r-- | module/plugins/internal/SimpleCrypter.py | 165 | ||||
| -rw-r--r-- | module/plugins/internal/SimpleHoster.py | 569 | ||||
| -rw-r--r-- | module/plugins/internal/UnRar.py | 221 | ||||
| -rw-r--r-- | module/plugins/internal/UnZip.py | 41 | ||||
| -rw-r--r-- | module/plugins/internal/XFSAccount.py | 160 | ||||
| -rw-r--r-- | module/plugins/internal/XFSCrypter.py | 29 | ||||
| -rw-r--r-- | module/plugins/internal/XFSHoster.py | 345 | ||||
| -rw-r--r-- | module/plugins/internal/__init__.py | 0 | 
13 files changed, 0 insertions, 2122 deletions
| diff --git a/module/plugins/internal/AbstractExtractor.py b/module/plugins/internal/AbstractExtractor.py deleted file mode 100644 index 8a69ebb56..000000000 --- a/module/plugins/internal/AbstractExtractor.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- coding: utf-8 -*- - -class ArchiveError(Exception): -    pass - - -class CRCError(Exception): -    pass - - -class WrongPassword(Exception): -    pass - - -class AbtractExtractor: -    __name__    = "AbtractExtractor" -    __version__ = "0.1" - -    __description__ = """Abtract extractor plugin""" -    __license__     = "GPLv3" -    __authors__     = [("pyLoad Team", "admin@pyload.org")] - - -    @staticmethod -    def checkDeps(): -        """ Check if system statisfy dependencies -        :return: boolean -        """ -        return True - - -    @staticmethod -    def getTargets(files_ids): -        """ Filter suited targets from list of filename id tuple list -        :param files_ids: List of filepathes -        :return: List of targets, id tuple list -        """ -        raise NotImplementedError - - -    def __init__(self, m, file, out, fullpath, overwrite, excludefiles, renice): -        """Initialize extractor for specific file - -        :param m: ExtractArchive Hook plugin -        :param file: Absolute filepath -        :param out: Absolute path to destination directory -        :param fullpath: extract to fullpath -        :param overwrite: Overwrite existing archives -        :param renice: Renice value -        """ -        self.m = m -        self.file = file -        self.out = out -        self.fullpath = fullpath -        self.overwrite = overwrite -        self.excludefiles = excludefiles -        self.renice = renice -        self.files = []  #: Store extracted files here - - -    def init(self): -        """ Initialize additional data structures """ -        pass - - -    def checkArchive(self): -        """Check if password if needed. Raise ArchiveError if integrity is -        questionable. - -        :return: boolean -        :raises ArchiveError -        """ -        return False - - -    def checkPassword(self, password): -        """ Check if the given password is/might be correct. -        If it can not be decided at this point return true. - -        :param password: -        :return: boolean -        """ -        return True - - -    def extract(self, progress, password=None): -        """Extract the archive. Raise specific errors in case of failure. - -        :param progress: Progress function, call this to update status -        :param password password to use -        :raises WrongPassword -        :raises CRCError -        :raises ArchiveError -        :return: -        """ -        raise NotImplementedError - - -    def getDeleteFiles(self): -        """Return list of files to delete, do *not* delete them here. - -        :return: List with paths of files to delete -        """ -        raise NotImplementedError - - -    def getExtractedFiles(self): -        """Populate self.files at some point while extracting""" -        return self.files diff --git a/module/plugins/internal/CaptchaService.py b/module/plugins/internal/CaptchaService.py deleted file mode 100644 index 7009e6986..000000000 --- a/module/plugins/internal/CaptchaService.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from random import random - - -class CaptchaService: -    __name__    = "CaptchaService" -    __version__ = "0.15" - -    __description__ = """Base captcha service plugin""" -    __license__     = "GPLv3" -    __authors__     = [("pyLoad Team", "admin@pyload.org")] - - -    KEY_PATTERN = None - -    key = None  #: last key detected - - -    def __init__(self, plugin): -        self.plugin = plugin - - -    def detect_key(self, html=None): -        if not html: -            if hasattr(self.plugin, "html") and self.plugin.html: -                html = self.plugin.html -            else: -                errmsg = _("%s html not found") % self.__name__ -                self.plugin.fail(errmsg)  #@TODO: replace all plugin.fail(errmsg) with plugin.error(errmsg) in 0.4.10 -                raise TypeError(errmsg) - -        m = re.search(self.KEY_PATTERN, html) -        if m: -            self.key = m.group("KEY") -            self.plugin.logDebug("%s key: %s" % (self.__name__, self.key)) -            return self.key -        else: -            self.plugin.logDebug("%s key not found" % self.__name__) -            return None - - -    def challenge(self, key=None): -        raise NotImplementedError - - -    def result(self, server, challenge): -        raise NotImplementedError - - -class ReCaptcha(CaptchaService): -    __name__    = "ReCaptcha" -    __version__ = "0.08" - -    __description__ = """ReCaptcha captcha service plugin""" -    __license__     = "GPLv3" -    __authors__     = [("pyLoad Team", "admin@pyload.org")] - - -    KEY_PATTERN = r'recaptcha(/api|\.net)/(challenge|noscript)\?k=(?P<KEY>[\w-]+)' -    KEY_AJAX_PATTERN = r'Recaptcha\.create\s*\(\s*["\'](?P<KEY>[\w-]+)' - - -    def detect_key(self, html=None): -        if not html: -            if hasattr(self.plugin, "html") and self.plugin.html: -                html = self.plugin.html -            else: -                errmsg = _("ReCaptcha html not found") -                self.plugin.fail(errmsg) -                raise TypeError(errmsg) - -        m = re.search(self.KEY_PATTERN, html) or re.search(self.KEY_AJAX_PATTERN, html) -        if m: -            self.key = m.group("KEY") -            self.plugin.logDebug("ReCaptcha key: %s" % self.key) -            return self.key -        else: -            self.plugin.logDebug("ReCaptcha key not found") -            return None - - -    def challenge(self, key=None): -        if not key: -            if self.detect_key(): -                key = self.key -            else: -                errmsg = _("ReCaptcha key not found") -                self.plugin.fail(errmsg) -                raise TypeError(errmsg) - -        js = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={'k': key}) -        try: -            challenge = re.search("challenge : '(.+?)',", js).group(1) -            server = re.search("server : '(.+?)',", js).group(1) -        except: -            self.plugin.error("ReCaptcha challenge pattern not found") - -        result = self.result(server, challenge) - -        self.plugin.logDebug("ReCaptcha result: %s" % result, "challenge: %s" % challenge) - -        return challenge, result - - -    def result(self, server, challenge): -        return self.plugin.decryptCaptcha("%simage" % server, get={'c': challenge}, -                                          cookies=True, forceUser=True, imgtype="jpg") - - -class AdsCaptcha(CaptchaService): -    __name__    = "AdsCaptcha" -    __version__ = "0.05" - -    __description__ = """AdsCaptcha captcha service plugin""" -    __license__     = "GPLv3" -    __authors__     = [("pyLoad Team", "admin@pyload.org")] - - -    ID_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?[^"\']*CaptchaId=(?P<ID>\d+)' -    KEY_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?[^"\']*PublicKey=(?P<KEY>[\w-]+)' - - -    def detect_key(self, html=None): -        if not html: -            if hasattr(self.plugin, "html") and self.plugin.html: -                html = self.plugin.html -            else: -                errmsg = _("AdsCaptcha html not found") -                self.plugin.fail(errmsg) -                raise TypeError(errmsg) - -        m = re.search(self.ID_PATTERN, html) -        n = re.search(self.KEY_PATTERN, html) -        if m and n: -            self.key = (m.group("ID"), m.group("KEY")) -            self.plugin.logDebug("AdsCaptcha id|key: %s | %s" % self.key) -            return self.key -        else: -            self.plugin.logDebug("AdsCaptcha id or key not found") -            return None - - -    def challenge(self, key=None):  #: key is a tuple(CaptchaId, PublicKey) -        if not key: -            if self.detect_key(): -                key = self.key -            else: -                errmsg = _("AdsCaptcha key not found") -                self.plugin.fail(errmsg) -                raise TypeError(errmsg) - -        CaptchaId, PublicKey = key - -        js = self.plugin.req.load("http://api.adscaptcha.com/Get.aspx", get={'CaptchaId': CaptchaId, 'PublicKey': PublicKey}) -        try: -            challenge = re.search("challenge: '(.+?)',", js).group(1) -            server = re.search("server: '(.+?)',", js).group(1) -        except: -            self.plugin.error("AdsCaptcha challenge pattern not found") - -        result = self.result(server, challenge) - -        self.plugin.logDebug("AdsCaptcha result: %s" % result, "challenge: %s" % challenge) - -        return challenge, result - - -    def result(self, server, challenge): -        return self.plugin.decryptCaptcha("%sChallenge.aspx" % server, get={'cid': challenge, 'dummy': random()}, -                                          cookies=True, imgtype="jpg") - - -class SolveMedia(CaptchaService): -    __name__    = "SolveMedia" -    __version__ = "0.06" - -    __description__ = """SolveMedia captcha service plugin""" -    __license__     = "GPLv3" -    __authors__     = [("pyLoad Team", "admin@pyload.org")] - - -    KEY_PATTERN = r'api\.solvemedia\.com/papi/challenge\.(no)?script\?k=(?P<KEY>.+?)["\']' - - -    def challenge(self, key=None): -        if not key: -            if self.detect_key(): -                key = self.key -            else: -                errmsg = _("SolveMedia key not found") -                self.plugin.fail(errmsg) -                raise TypeError(errmsg) - -        html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript", get={'k': key}) -        try: -            challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="([^"]+)">', -                                  html).group(1) -            server = "http://api.solvemedia.com/papi/media" -        except: -            self.plugin.error("SolveMedia challenge pattern not found") - -        result = self.result(server, challenge) - -        self.plugin.logDebug("SolveMedia result: %s" % result, "challenge: %s" % challenge) - -        return challenge, result - - -    def result(self, server, challenge): -        return self.plugin.decryptCaptcha(server, get={'c': challenge}, imgtype="gif") diff --git a/module/plugins/internal/DeadCrypter.py b/module/plugins/internal/DeadCrypter.py deleted file mode 100644 index 07c5c3881..000000000 --- a/module/plugins/internal/DeadCrypter.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- - -from urllib import unquote -from urlparse import urlparse - -from module.plugins.internal.SimpleCrypter import create_getInfo -from module.plugins.Crypter import Crypter as _Crypter - - -class DeadCrypter(_Crypter): -    __name__    = "DeadCrypter" -    __type__    = "crypter" -    __version__ = "0.04" - -    __pattern__ = r'^unmatchable$' - -    __description__ = """ Crypter is no longer available """ -    __license__     = "GPLv3" -    __authors__     = [("stickell", "l.stickell@yahoo.it")] - - -    @classmethod -    def getInfo(cls, url="", html=""): -        return {'name': urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 'size': 0, 'status': 1, 'url': url} - - -    def setup(self): -        self.pyfile.error = "Crypter is no longer available" -        self.offline()  #@TODO: self.offline("Crypter is no longer available") - - -getInfo = create_getInfo(DeadCrypter) diff --git a/module/plugins/internal/DeadHoster.py b/module/plugins/internal/DeadHoster.py deleted file mode 100644 index 6f3252f70..000000000 --- a/module/plugins/internal/DeadHoster.py +++ /dev/null @@ -1,32 +0,0 @@ -# -*- coding: utf-8 -*- - -from urllib import unquote -from urlparse import urlparse - -from module.plugins.internal.SimpleHoster import create_getInfo -from module.plugins.Hoster import Hoster as _Hoster - - -class DeadHoster(_Hoster): -    __name__    = "DeadHoster" -    __type__    = "hoster" -    __version__ = "0.14" - -    __pattern__ = r'^unmatchable$' - -    __description__ = """ Hoster is no longer available """ -    __license__     = "GPLv3" -    __authors__     = [("zoidberg", "zoidberg@mujmail.cz")] - - -    @classmethod -    def getInfo(cls, url="", html=""): -        return {'name': urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 'size': 0, 'status': 1, 'url': url} - - -    def setup(self): -        self.pyfile.error = "Hoster is no longer available" -        self.offline()  #@TODO: self.offline("Hoster is no longer available") - - -getInfo = create_getInfo(DeadHoster) diff --git a/module/plugins/internal/MultiHoster.py b/module/plugins/internal/MultiHoster.py deleted file mode 100644 index 6ec2e4b82..000000000 --- a/module/plugins/internal/MultiHoster.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from module.plugins.Hook import Hook -from module.utils import remove_chars - - -class MultiHoster(Hook): -    __name__    = "MultiHoster" -    __type__    = "hook" -    __version__ = "0.20" - -    __description__ = """Generic MultiHoster plugin""" -    __license__     = "GPLv3" -    __authors__     = [("pyLoad Team", "admin@pyload.org")] - - -    interval = 12 * 60 * 60  #: reload hosters every 12h - -    HOSTER_REPLACEMENTS = [("1fichier.com", "onefichier.com"), ("2shared.com", "twoshared.com"), -                           ("4shared.com", "fourshared.com"), ("cloudnator.com", "shragle.com"), -                           ("easy-share.com", "crocko.com"), ("freakshare.net", "freakshare.com"), -                           ("hellshare.com", "hellshare.cz"), ("ifile.it", "filecloud.io"), -                           ("putlocker.com", "firedrive.com"), ("share-rapid.cz", "multishare.cz"), -                           ("sharerapid.cz", "multishare.cz"), ("ul.to", "uploaded.to"), -                           ("uploaded.net", "uploaded.to")] -    HOSTER_EXCLUDED     = [] - - -    def setup(self): -        self.hosters       = [] -        self.supported     = [] -        self.new_supported = [] - - -    def getConfig(self, option, default=''): -        """getConfig with default value - sublass may not implements all config options""" -        try: -            return self.getConf(option) -        except KeyError: -            return default - - -    def getHosterCached(self): -        if not self.hosters: -            try: -                hosterSet = self.toHosterSet(self.getHoster()) - set(self.HOSTER_EXCLUDED) -            except Exception, e: -                self.logError(e) -                return [] - -            try: -                configMode = self.getConfig('hosterListMode', 'all') -                if configMode in ("listed", "unlisted"): -                    configSet = self.toHosterSet(self.getConfig('hosterList', '').replace('|', ',').replace(';', ',').split(',')) - -                    if configMode == "listed": -                        hosterSet &= configSet -                    else: -                        hosterSet -= configSet - -            except Exception, e: -                self.logError(e) - -            self.hosters = list(hosterSet) - -        return self.hosters - - -    def toHosterSet(self, hosters): -        hosters = set((str(x).strip().lower() for x in hosters)) - -        for rep in self.HOSTER_REPLACEMENTS: -            if rep[0] in hosters: -                hosters.remove(rep[0]) -                hosters.add(rep[1]) - -        hosters.discard('') -        return hosters - - -    def getHoster(self): -        """Load list of supported hoster - -        :return: List of domain names -        """ -        raise NotImplementedError - - -    def coreReady(self): -        if self.cb: -            self.core.scheduler.removeJob(self.cb) - -        self.setConfig("activated", True)  #: config not in sync after plugin reload - -        cfg_interval = self.getConfig("interval", None)  #: reload interval in hours -        if cfg_interval is not None: -            self.interval = cfg_interval * 60 * 60 - -        if self.interval: -            self._periodical() -        else: -            self.periodical() - - -    def initPeriodical(self): -        pass - - -    def periodical(self): -        """reload hoster list periodically""" -        self.logInfo(_("Reloading supported hoster list")) - -        old_supported      = self.supported -        self.supported     = [] -        self.new_supported = [] -        self.hosters       = [] - -        self.overridePlugins() - -        old_supported = [hoster for hoster in old_supported if hoster not in self.supported] -        if old_supported: -            self.logDebug("UNLOAD", ", ".join(old_supported)) -            for hoster in old_supported: -                self.unloadHoster(hoster) - - -    def overridePlugins(self): -        pluginMap    = dict((name.lower(), name) for name in self.core.pluginManager.hosterPlugins.keys()) -        accountList  = [name.lower() for name, data in self.core.accountManager.accounts.iteritems() if data] -        excludedList = [] - -        for hoster in self.getHosterCached(): -            name = remove_chars(hoster.lower(), "-.") - -            if name in accountList: -                excludedList.append(hoster) -            else: -                if name in pluginMap: -                    self.supported.append(pluginMap[name]) -                else: -                    self.new_supported.append(hoster) - -        if not self.supported and not self.new_supported: -            self.logError(_("No Hoster loaded")) -            return - -        module = self.core.pluginManager.getPlugin(self.__name__) -        klass  = getattr(module, self.__name__) - -        # inject plugin plugin -        self.logDebug("Overwritten Hosters", ", ".join(sorted(self.supported))) -        for hoster in self.supported: -            dict = self.core.pluginManager.hosterPlugins[hoster] -            dict['new_module'] = module -            dict['new_name']   = self.__name__ - -        if excludedList: -            self.logInfo(_("The following hosters were not overwritten - account exists"), ", ".join(sorted(excludedList))) - -        if self.new_supported: -            self.logDebug("New Hosters", ", ".join(sorted(self.new_supported))) - -            # create new regexp -            regexp = r'.*(%s).*' % "|".join([x.replace(".", "\.") for x in self.new_supported]) -            if hasattr(klass, "__pattern__") and isinstance(klass.__pattern__, basestring) and '://' in klass.__pattern__: -                regexp = r'%s|%s' % (klass.__pattern__, regexp) - -            self.logDebug("Regexp", regexp) - -            dict = self.core.pluginManager.hosterPlugins[self.__name__] -            dict['pattern'] = regexp -            dict['re']      = re.compile(regexp) - - -    def unloadHoster(self, hoster): -        dict = self.core.pluginManager.hosterPlugins[hoster] -        if "module" in dict: -            del dict['module'] - -        if "new_module" in dict: -            del dict['new_module'] -            del dict['new_name'] - - -    def unload(self): -        """Remove override for all hosters. Scheduler job is removed by hookmanager""" -        for hoster in self.supported: -            self.unloadHoster(hoster) - -        # reset pattern -        klass = getattr(self.core.pluginManager.getPlugin(self.__name__), self.__name__) -        dict  = self.core.pluginManager.hosterPlugins[self.__name__] -        dict['pattern'] = getattr(klass, "__pattern__", r'^unmatchable$') -        dict['re']      = re.compile(dict['pattern']) - - -    def downloadFailed(self, pyfile): -        """remove plugin override if download fails but not if file is offline/temp.offline""" -        if pyfile.hasStatus("failed") and self.getConfig("unloadFailing", True): -            hdict = self.core.pluginManager.hosterPlugins[pyfile.pluginname] -            if "new_name" in hdict and hdict['new_name'] == self.__name__: -                self.logDebug("Unload MultiHoster", pyfile.pluginname, hdict) -                self.unloadHoster(pyfile.pluginname) -                pyfile.setStatus("queued") diff --git a/module/plugins/internal/SimpleCrypter.py b/module/plugins/internal/SimpleCrypter.py deleted file mode 100644 index 53ffaf4a6..000000000 --- a/module/plugins/internal/SimpleCrypter.py +++ /dev/null @@ -1,165 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from urlparse import urlparse - -from module.plugins.Crypter import Crypter -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookies -from module.utils import fixup - - -class SimpleCrypter(Crypter, SimpleHoster): -    __name__    = "SimpleCrypter" -    __type__    = "crypter" -    __version__ = "0.32" - -    __pattern__ = r'^unmatchable$' -    __config__  = [("use_subfolder", "bool", "Save package to subfolder", True),  #: Overrides core.config['general']['folder_per_package'] -                   ("subfolder_per_package", "bool", "Create a subfolder for each package", True)] - -    __description__ = """Simple decrypter plugin""" -    __license__     = "GPLv3" -    __authors__     = [("stickell", "l.stickell@yahoo.it"), -                       ("zoidberg", "zoidberg@mujmail.cz"), -                       ("Walter Purcaro", "vuolter@gmail.com")] - - -    """ -    Following patterns should be defined by each crypter: - -      LINK_PATTERN: group(1) must be a download link or a regex to catch more links -        example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"' - -      NAME_PATTERN: (optional) folder name or webpage title -        example: NAME_PATTERN = r'<title>Files of: (?P<N>[^<]+) folder</title>' - -      OFFLINE_PATTERN: (optional) Checks if the file is yet available online -        example: OFFLINE_PATTERN = r'File (deleted|not found)' - -      TEMP_OFFLINE_PATTERN: (optional) Checks if the file is temporarily offline -        example: TEMP_OFFLINE_PATTERN = r'Server maintainance' - - -    You can override the getLinks method if you need a more sophisticated way to extract the links. - - -    If the links are splitted on multiple pages you can define the PAGES_PATTERN regex: - -      PAGES_PATTERN: (optional) group(1) should be the number of overall pages containing the links -        example: PAGES_PATTERN = r'Pages: (\d+)' - -    and its loadPage method: - - -      def loadPage(self, page_n): -          return the html of the page number page_n -    """ - -    LINK_PATTERN = None - -    NAME_REPLACEMENTS = [("&#?\w+;", fixup)] -    URL_REPLACEMENTS  = [] - -    TEXT_ENCODING = False  #: Set to True or encoding name if encoding in http header is not correct -    COOKIES       = True  #: or False or list of tuples [(domain, name, value)] - -    LOGIN_ACCOUNT = False -    LOGIN_PREMIUM = False - - -    #@TODO: Remove in 0.4.10 -    def init(self): -        account_name = (self.__name__ + ".py").replace("Folder.py", "").replace(".py", "") -        account = self.core.accountManager.getAccountPlugin(account_name) - -        if account and account.canUse(): -            self.user, data = account.selectAccount() -            self.req = account.getAccountRequest(self.user) -            self.premium = account.isPremium(self.user) - -            self.account = account - - -    def prepare(self): -        if self.LOGIN_ACCOUNT and not self.account: -            self.fail(_("Required account not found")) - -        if self.LOGIN_PREMIUM and not self.premium: -            self.fail(_("Required premium account not found")) - -        self.info  = {} -        self.links = [] - -        self.req.setOption("timeout", 120) - -        if isinstance(self.COOKIES, list): -            set_cookies(self.req.cj, self.COOKIES) - -        self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) - - -    def decrypt(self, pyfile): -        self.prepare() - -        self.preload() - -        if self.html is None: -            self.fail(_("No html retrieved")) - -        self.checkInfo() - -        self.links = self.getLinks() - -        if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'): -            self.handleMultiPages() - -        self.logDebug("Package has %d links" % len(self.links)) - -        if self.links: -            self.packages = [(self.info['name'], self.links, self.info['folder'])] - - -    def checkStatus(self): -        status = self.info['status'] - -        if status is 1: -            self.offline() - -        elif status is 6: -            self.tempOffline() - - -    def checkNameSize(self): -        name = self.info['name'] -        url  = self.info['url'] - -        if name and name != url: -            self.pyfile.name = name -        else: -            self.pyfile.name = self.info['name'] = urlparse(name).path.split('/')[-1] - -        folder = self.info['folder'] = self.pyfile.name - -        self.logDebug("File name: %s" % self.pyfile.name, -                      "File folder: %s" % folder) - - -    def getLinks(self): -        """ -        Returns the links extracted from self.html -        You should override this only if it's impossible to extract links using only the LINK_PATTERN. -        """ -        return re.findall(self.LINK_PATTERN, self.html) - - -    def handleMultiPages(self): -        try: -            m = re.search(self.PAGES_PATTERN, self.html) -            pages = int(m.group(1)) -        except: -            pages = 1 - -        for p in xrange(2, pages + 1): -            self.html = self.loadPage(p) -            self.links += self.getLinks() diff --git a/module/plugins/internal/SimpleHoster.py b/module/plugins/internal/SimpleHoster.py deleted file mode 100644 index 530b67692..000000000 --- a/module/plugins/internal/SimpleHoster.py +++ /dev/null @@ -1,569 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from time import time -from urllib import unquote -from urlparse import urljoin, urlparse - -from module.PyFile import statusMap as _statusMap -from module.network.CookieJar import CookieJar -from module.network.RequestFactory import getURL -from module.plugins.Hoster import Hoster -from module.plugins.Plugin import Fail -from module.utils import fixup, parseFileSize - - -#@TODO: Adapt and move to PyFile in 0.4.10 -statusMap = dict((v, k) for k, v in _statusMap.iteritems()) - - -#@TODO: Remove in 0.4.10 and redirect to self.error instead -def _error(self, reason, type): -        if not reason and not type: -            type = "unknown" - -        msg  = _("%s error") % type.strip().capitalize() if type else _("Error") -        msg += ": %s" % reason.strip() if reason else "" -        msg += _(" | Plugin may be out of date") - -        raise Fail(msg) - - -#@TODO: Remove in 0.4.10 -def _wait(self, seconds, reconnect): -    if seconds: -        self.setWait(int(seconds) + 1) - -    if reconnect is not None: -        self.wantReconnect = reconnect - -    super(SimpleHoster, self).wait() - - -def replace_patterns(string, ruleslist): -    for r in ruleslist: -        rf, rt = r -        string = re.sub(rf, rt, string) -    return string - - -def set_cookies(cj, cookies): -    for cookie in cookies: -        if isinstance(cookie, tuple) and len(cookie) == 3: -            domain, name, value = cookie -            cj.setCookie(domain, name, value) - - -def parseHtmlTagAttrValue(attr_name, tag): -    m = re.search(r"%s\s*=\s*([\"']?)((?<=\")[^\"]+|(?<=')[^']+|[^>\s\"'][^>\s]*)\1" % attr_name, tag, re.I) -    return m.group(2) if m else None - - -def parseHtmlForm(attr_str, html, input_names={}): -    for form in re.finditer(r"(?P<TAG><form[^>]*%s[^>]*>)(?P<CONTENT>.*?)</?(form|body|html)[^>]*>" % attr_str, -                            html, re.S | re.I): -        inputs = {} -        action = parseHtmlTagAttrValue("action", form.group('TAG')) - -        for inputtag in re.finditer(r'(<(input|textarea)[^>]*>)([^<]*(?=</\2)|)', form.group('CONTENT'), re.S | re.I): -            name = parseHtmlTagAttrValue("name", inputtag.group(1)) -            if name: -                value = parseHtmlTagAttrValue("value", inputtag.group(1)) -                if not value: -                    inputs[name] = inputtag.group(3) or '' -                else: -                    inputs[name] = value - -        if input_names: -            # check input attributes -            for key, val in input_names.iteritems(): -                if key in inputs: -                    if isinstance(val, basestring) and inputs[key] == val: -                        continue -                    elif isinstance(val, tuple) and inputs[key] in val: -                        continue -                    elif hasattr(val, "search") and re.match(val, inputs[key]): -                        continue -                    break  #: attibute value does not match -                else: -                    break  #: attibute name does not match -            else: -                return action, inputs  #: passed attribute check -        else: -            # no attribute check -            return action, inputs - -    return {}, None  #: no matching form found - - -#: Deprecated -def parseFileInfo(plugin, url="", html=""): -    info = plugin.getInfo(url, html) -    return info['name'], info['size'], info['status'], info['url'] - - -#@TODO: Remove in 0.4.10 -#@NOTE: Every plugin must have own parseInfo classmethod to work with 0.4.10 -def create_getInfo(plugin): -    return lambda urls: [(info['name'], info['size'], info['status'], info['url']) for info in plugin.parseInfo(urls)] - - -def timestamp(): -    return int(time() * 1000) - - -#@TODO: Move to hoster class in 0.4.10 -def _isDirectLink(self, url, resumable=True): -    header = self.load(url, ref=True, just_header=True, decode=True) - -    if not 'location' in header or not header['location']: -        return "" - -    location = header['location'] - -    resumable = False  #@NOTE: Testing... - -    if resumable:  #: sometimes http code may be wrong... -        if 'location' in self.load(location, ref=True, cookies=True, just_header=True, decode=True): -            return "" -    else: -        if not 'code' in header or header['code'] != 302: -            return "" - -    if urlparse(location).scheme: -        link = location -    else: -        p = urlparse(url) -        base = "%s://%s" % (p.scheme, p.netloc) -        link = urljoin(base, location) - -    return link - - -class SimpleHoster(Hoster): -    __name__    = "SimpleHoster" -    __type__    = "hoster" -    __version__ = "0.71" - -    __pattern__ = r'^unmatchable$' - -    __description__ = """Simple hoster plugin""" -    __license__     = "GPLv3" -    __authors__     = [("zoidberg", "zoidberg@mujmail.cz"), -                       ("stickell", "l.stickell@yahoo.it"), -                       ("Walter Purcaro", "vuolter@gmail.com")] - - -    """ -    Info patterns should be defined by each hoster: - -      INFO_PATTERN: (optional) Name and Size of the file -        example: INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>size_unit)' -      or -        NAME_PATTERN: (optional) Name that will be set for the file -          example: NAME_PATTERN = r'(?P<N>file_name)' -        SIZE_PATTERN: (optional) Size that will be checked for the file -          example: SIZE_PATTERN = r'(?P<S>file_size) (?P<U>size_unit)' - -      HASHSUM_PATTERN: (optional) Hash code and type of the file -        example: HASHSUM_PATTERN = r'(?P<H>hash_code) (?P<T>MD5)' - -      OFFLINE_PATTERN: (optional) Check if the file is yet available online -        example: OFFLINE_PATTERN = r'File (deleted|not found)' - -      TEMP_OFFLINE_PATTERN: (optional) Check if the file is temporarily offline -        example: TEMP_OFFLINE_PATTERN = r'Server (maintenance|maintainance)' - - -    Error handling patterns are all optional: - -      WAIT_PATTERN: (optional) Detect waiting time -        example: WAIT_PATTERN = r'' - -      PREMIUM_ONLY_PATTERN: (optional) Check if the file can be downloaded only with a premium account -        example: PREMIUM_ONLY_PATTERN = r'Premium account required' - -      ERROR_PATTERN: (optional) Detect any error preventing download -        example: ERROR_PATTERN = r'' - - -    Instead overriding handleFree and handlePremium methods you can define the following patterns for direct download: - -      LINK_FREE_PATTERN: (optional) group(1) should be the direct link for free download -        example: LINK_FREE_PATTERN = r'<div class="link"><a href="(.+?)"' - -      LINK_PREMIUM_PATTERN: (optional) group(1) should be the direct link for premium download -        example: LINK_PREMIUM_PATTERN = r'<div class="link"><a href="(.+?)"' -    """ - -    NAME_REPLACEMENTS = [("&#?\w+;", fixup)] -    SIZE_REPLACEMENTS = [] -    URL_REPLACEMENTS  = [] - -    TEXT_ENCODING       = False  #: Set to True or encoding name if encoding value in http header is not correct -    COOKIES             = True   #: or False or list of tuples [(domain, name, value)] -    FORCE_CHECK_TRAFFIC = False  #: Set to True to force checking traffic left for premium account -    CHECK_DIRECT_LINK   = None   #: Set to True to check for direct link, set to None to do it only if self.account is True -    MULTI_HOSTER        = False  #: Set to True to leech other hoster link (according its multihoster hook if available) -    CONTENT_DISPOSITION = False  #: Set to True to replace file name with content-disposition value from http header - - -    @classmethod -    def parseInfo(cls, urls): -        for url in urls: -            url = replace_patterns(url, cls.FILE_URL_REPLACEMENTS if hasattr(cls, "FILE_URL_REPLACEMENTS") else cls.URL_REPLACEMENTS)  #@TODO: Remove FILE_URL_REPLACEMENTS check in 0.4.10 -            yield cls.getInfo(url) - - -    @classmethod -    def getInfo(cls, url="", html=""): -        info = {'name': urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 'size': 0, 'status': 3, 'url': url} - -        if not html: -            try: -                if not url: -                    info['error']  = "missing url" -                    info['status'] = 1 -                    raise - -                try: -                    html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING) - -                    if isinstance(cls.TEXT_ENCODING, basestring): -                        html = unicode(html, cls.TEXT_ENCODING) - -                except BadHeader, e: -                    info['error'] = "%d: %s" % (e.code, e.content) - -                    if e.code is 404: -                        info['status'] = 1 -                        raise - -                    if e.code is 503: -                        info['status'] = 6 -                        raise -            except: -                return info - -        online = False - -        if hasattr(cls, "OFFLINE_PATTERN") and re.search(cls.OFFLINE_PATTERN, html): -            info['status'] = 1 - -        elif hasattr(cls, "FILE_OFFLINE_PATTERN") and re.search(cls.FILE_OFFLINE_PATTERN, html):  #@TODO: Remove in 0.4.10 -            info['status'] = 1 - -        elif hasattr(cls, "TEMP_OFFLINE_PATTERN") and re.search(cls.TEMP_OFFLINE_PATTERN, html): -            info['status'] = 6 - -        else: -            try: -                info['pattern'] = re.match(cls.__pattern__, url).groupdict()  #: pattern groups will be saved here, please save api stuff to info['api'] -            except: -                pass - -            for pattern in ("FILE_INFO_PATTERN", "INFO_PATTERN", -                            "FILE_NAME_PATTERN", "NAME_PATTERN", -                            "FILE_SIZE_PATTERN", "SIZE_PATTERN", -                            "HASHSUM_PATTERN"):  #@TODO: Remove old patterns starting with "FILE_" in 0.4.10 -                try: -                    attr = getattr(cls, pattern) -                    dict = re.search(attr, html).groupdict() - -                    if all(True for k in dict if k not in info['pattern']): -                        info['pattern'].update(dict) - -                except AttributeError: -                    continue - -                else: -                    online = True - -        if online: -            info['status'] = 2 - -            if 'N' in info['pattern']: -                info['name'] = replace_patterns(unquote(info['pattern']['N'].strip()), -                                                cls.FILE_NAME_REPLACEMENTS if hasattr(cls, "FILE_NAME_REPLACEMENTS") else cls.NAME_REPLACEMENTS)  #@TODO: Remove FILE_NAME_REPLACEMENTS check in 0.4.10 - -            if 'S' in info['pattern']: -                size = replace_patterns(info['pattern']['S'] + info['pattern']['U'] if 'U' in info else info['pattern']['S'], -                                        cls.FILE_SIZE_REPLACEMENTS if hasattr(cls, "FILE_SIZE_REPLACEMENTS") else cls.SIZE_REPLACEMENTS)  #@TODO: Remove FILE_SIZE_REPLACEMENTS check in 0.4.10 -                info['size'] = parseFileSize(size) - -            elif isinstance(info['size'], basestring): -                unit = info['units'] if 'units' in info else None -                info['size'] = parseFileSize(info['size'], unit) - -            if 'H' in info['pattern']: -                hashtype = info['pattern']['T'] if 'T' in info['pattern'] else "hash" -                info[hashtype] = info['pattern']['H'] - -        return info - - -    def setup(self): -        self.resumeDownload = self.multiDL = self.premium - - -    def prepare(self): -        self.info      = {} -        self.link      = ""     #@TODO: Move to hoster class in 0.4.10 -        self.directDL  = False  #@TODO: Move to hoster class in 0.4.10 -        self.multihost = False  #@TODO: Move to hoster class in 0.4.10 - -        self.req.setOption("timeout", 120) - -        if isinstance(self.COOKIES, list): -            set_cookies(self.req.cj, self.COOKIES) - -        if (self.MULTI_HOSTER -            and (self.__pattern__ != self.core.pluginManager.hosterPlugins[self.__name__]['pattern'] -                 or re.match(self.__pattern__, self.pyfile.url) is None)): - -            self.logInfo("Multi hoster detected") - -            if self.account: -                self.multihost = True -                return -            else: -                self.fail(_("Only registered or premium users can use url leech feature")) - -        if self.CHECK_DIRECT_LINK is None: -            self.directDL = bool(self.account) - -        self.pyfile.url = replace_patterns(self.pyfile.url, -                                           self.FILE_URL_REPLACEMENTS if hasattr(self, "FILE_URL_REPLACEMENTS") else self.URL_REPLACEMENTS)  #@TODO: Remove FILE_URL_REPLACEMENTS check in 0.4.10 - - -    def preload(self): -        self.html = self.load(self.pyfile.url, cookies=bool(self.COOKIES), decode=not self.TEXT_ENCODING) - -        if isinstance(self.TEXT_ENCODING, basestring): -            self.html = unicode(self.html, self.TEXT_ENCODING) - - -    def process(self, pyfile): -        self.prepare() - -        if self.multihost: -            self.logDebug("Looking for leeched download link...") -            self.handleMulti() - -        elif self.directDL: -            self.logDebug("Looking for direct download link...") -            self.handleDirect() - -        if not self.link: -            self.preload() - -            if self.html is None: -                self.fail(_("No html retrieved")) - -            self.checkErrors() - -            premium_only = 'error' in self.info and self.info['error'] == "premium-only" - -            self._updateInfo(self.getInfo(pyfile.url, self.html)) - -            self.checkNameSize() - -            #: Usually premium only pages doesn't show any file information -            if not premium_only: -                self.checkStatus() - -            if self.premium and (not self.FORCE_CHECK_TRAFFIC or self.checkTrafficLeft()): -                self.logDebug("Handled as premium download") -                self.handlePremium() - -            elif premium_only: -                self.fail(_("Link require a premium account to be handled")) - -            else: -                self.logDebug("Handled as free download") -                self.handleFree() - -        self.downloadLink(self.link) -        self.checkFile() - - -    def downloadLink(self, link): -        if not link: -            return - -        self.download(link, disposition=self.CONTENT_DISPOSITION) - - -    def checkFile(self): -        if self.checkDownload({'empty': re.compile(r"^$")}) is "empty":  #@TODO: Move to hoster in 0.4.10 -            self.fail(_("Empty file")) - - -    def checkErrors(self): -        if hasattr(self, 'ERROR_PATTERN'): -            m = re.search(self.ERROR_PATTERN, self.html) -            if m: -                errmsg = self.info['error'] = m.group(1) -                self.error(errmsg) - -        if hasattr(self, 'PREMIUM_ONLY_PATTERN'): -            m = re.search(self.PREMIUM_ONLY_PATTERN, self.html) -            if m: -                self.info['error'] = "premium-only" -                return - -        if hasattr(self, 'WAIT_PATTERN'): -            m = re.search(self.WAIT_PATTERN, self.html) -            if m: -                wait_time = sum([int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1}[u.lower()] for v, u in -                                 re.findall(r'(\d+)\s*(hr|hour|min|sec)', m, re.I)]) -                self.wait(wait_time, False) -                return - -        self.info.pop('error', None) - - -    def checkStatus(self): -        status = self.info['status'] - -        if status is 1: -            self.offline() - -        elif status is 6: -            self.tempOffline() - -        elif status is not 2: -            self.logInfo(_("File status: %s") % statusMap[status], -                         _("File info: %s")   % self.info) -            self.error(_("No file info retrieved")) - - -    def checkNameSize(self): -        name = self.info['name'] -        size = self.info['size'] -        url  = self.info['url'] - -        if name and name != url: -            self.pyfile.name = name -        else: -            self.pyfile.name = name = self.info['name'] = urlparse(name).path.split('/')[-1] - -        if size > 0: -            self.pyfile.size = size -        else: -            size = "Unknown" - -        self.logDebug("File name: %s" % name, -                      "File size: %s" % size) - - -    def checkInfo(self): -        self.checkErrors() - -        self._updateInfo(self.getInfo(self.pyfile.url, self.html or "")) - -        self.checkNameSize() -        self.checkStatus() - - -    #: Deprecated -    def getFileInfo(self): -        self.info = {} -        self.checkInfo() -        return self.info - - -    def _updateInfo(self, info): -        self.logDebug(_("File info (before update): %s") % self.info) -        self.info.update(info) -        self.logDebug(_("File info (after update): %s")  % self.info) - - -    def handleDirect(self): -        link = _isDirectLink(self, self.pyfile.url, self.resumeDownload) - -        if link: -            self.logInfo(_("Direct download link detected")) - -            self.link = link - -            self._updateInfo(self.getInfo(self.pyfile.url)) -            self.checkNameSize() -        else: -            self.logDebug(_("Direct download link not found")) - - -    def handleMulti(self):  #: Multi-hoster handler -        pass - - -    def handleFree(self): -        if not hasattr(self, 'LINK_FREE_PATTERN'): -            self.fail(_("Free download not implemented")) - -        try: -            m = re.search(self.LINK_FREE_PATTERN, self.html) -            if m is None: -                self.error(_("Free download link not found")) - -            self.link = m.group(1) - -        except Exception, e: -            self.fail(e) - - -    def handlePremium(self): -        if not hasattr(self, 'LINK_PREMIUM_PATTERN'): -            self.fail(_("Premium download not implemented")) - -        try: -            m = re.search(self.LINK_PREMIUM_PATTERN, self.html) -            if m is None: -                self.error(_("Premium download link not found")) - -            self.link = m.group(1) - -        except Exception, e: -            self.fail(e) - - -    def longWait(self, wait_time=None, max_tries=3): -        if wait_time and isinstance(wait_time, (int, long, float)): -            time_str  = "%dh %dm" % divmod(wait_time / 60, 60) -        else: -            wait_time = 900 -            time_str  = _("(unknown time)") -            max_tries = 100 - -        self.logInfo(_("Download limit reached, reconnect or wait %s") % time_str) - -        self.setWait(wait_time, True) -        self.wait() -        self.retry(max_tries=max_tries, reason=_("Download limit reached")) - - -    def parseHtmlForm(self, attr_str="", input_names={}): -        return parseHtmlForm(attr_str, self.html, input_names) - - -    def checkTrafficLeft(self): -        traffic = self.account.getAccountInfo(self.user, True)['trafficleft'] - -        if traffic is None: -            return False -        elif traffic == -1: -            return True -        else: -            size = self.pyfile.size / 1024 -            self.logInfo(_("Filesize: %i KiB, Traffic left for user %s: %i KiB") % (size, self.user, traffic)) -            return size <= traffic - - -    #@TODO: Remove in 0.4.10 -    def wait(self, seconds=0, reconnect=None): -        return _wait(self, seconds, reconnect) - - -    def error(self, reason="", type="parse"): -        return _error(self, reason, type) diff --git a/module/plugins/internal/UnRar.py b/module/plugins/internal/UnRar.py deleted file mode 100644 index c15a4c96e..000000000 --- a/module/plugins/internal/UnRar.py +++ /dev/null @@ -1,221 +0,0 @@ -# -*- coding: utf-8 -*- - -import os -import re - -from glob import glob -from os.path import basename, join -from string import digits -from subprocess import Popen, PIPE - -from module.plugins.internal.AbstractExtractor import AbtractExtractor, WrongPassword, ArchiveError, CRCError -from module.utils import save_join, decode - - -def renice(pid, value): -    if os.name != "nt" and value: -        try: -            Popen(["renice", str(value), str(pid)], stdout=PIPE, stderr=PIPE, bufsize=-1) -        except: -            print "Renice failed" - - -class UnRar(AbtractExtractor): -    __name__    = "UnRar" -    __version__ = "0.19" - -    __description__ = """Rar extractor plugin""" -    __license__     = "GPLv3" -    __authors__     = [("RaNaN", "RaNaN@pyload.org")] - - -    CMD = "unrar" - -    # there are some more uncovered rar formats -    re_version   = re.compile(r'UNRAR ([\w .]+?) freeware') -    re_splitfile = re.compile(r'(.*)\.part(\d+)\.rar$', re.I) -    re_partfiles = re.compile(r'.*\.(rar|r\d+)', re.I) -    re_filelist  = re.compile(r'(.+)\s+(\d+)\s+(\d+)\s+') -    re_filelist5 = re.compile(r'(.+)\s+(\d+)\s+\d\d-\d\d-\d\d\s+\d\d:\d\d\s+(.+)') -    re_wrongpwd  = re.compile(r'(Corrupt file or wrong password|password incorrect)', re.I) - - -    @staticmethod -    def checkDeps(): -        if os.name == "nt": -            UnRar.CMD = join(pypath, "UnRAR.exe") -            p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE) -            p.communicate() -        else: -            try: -                p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE) -                p.communicate() -            except OSError: - -                # fallback to rar -                UnRar.CMD = "rar" -                p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE) -                p.communicate() - -        return True - - -    @staticmethod -    def getTargets(files_ids): -        result = [] - -        for file, id in files_ids: -            if not file.endswith(".rar"): -                continue - -            match = UnRar.re_splitfile.findall(file) -            if match: -                # only add first parts -                if int(match[0][1]) == 1: -                    result.append((file, id)) -            else: -                result.append((file, id)) - -        return result - - -    def init(self): -        self.passwordProtected = False -        self.headerProtected = False  #: list files will not work without password -        self.smallestFile = None  #: small file to test passwords -        self.password = ""  #: save the correct password - - -    def checkArchive(self): -        p = self.call_unrar("l", "-v", self.file) -        out, err = p.communicate() -        if self.re_wrongpwd.search(err): -            self.passwordProtected = True -            self.headerProtected = True -            return True - -        # output only used to check if passworded files are present -        if self.re_version.search(out): -            for attr, size, name in self.re_filelist5.findall(out): -                if attr.startswith("*"): -                    self.passwordProtected = True -                    return True -        else: -            for name, size, packed in self.re_filelist.findall(out): -                if name.startswith("*"): -                    self.passwordProtected = True -                    return True - -        self.listContent() -        if not self.files: -            raise ArchiveError("Empty Archive") - -        return False - - -    def checkPassword(self, password): -        # at this point we can only verify header protected files -        if self.headerProtected: -            p = self.call_unrar("l", "-v", self.file, password=password) -            out, err = p.communicate() -            if self.re_wrongpwd.search(err): -                return False - -        return True - - -    def extract(self, progress, password=None): -        command = "x" if self.fullpath else "e" - -        p = self.call_unrar(command, self.file, self.out, password=password) -        renice(p.pid, self.renice) - -        progress(0) -        progressstring = "" -        while True: -            c = p.stdout.read(1) -            # quit loop on eof -            if not c: -                break -            # reading a percentage sign -> set progress and restart -            if c == '%': -                progress(int(progressstring)) -                progressstring = "" -            # not reading a digit -> therefore restart -            elif c not in digits: -                progressstring = "" -            # add digit to progressstring -            else: -                progressstring = progressstring + c -        progress(100) - -        # retrieve stderr -        err = p.stderr.read() - -        if "CRC failed" in err and not password and not self.passwordProtected: -            raise CRCError -        elif "CRC failed" in err: -            raise WrongPassword -        if err.strip():  #: raise error if anything is on stderr -            raise ArchiveError(err.strip()) -        if p.returncode: -            raise ArchiveError("Process terminated") - -        if not self.files: -            self.password = password -            self.listContent() - - -    def getDeleteFiles(self): -        if ".part" in basename(self.file): -            return glob(re.sub("(?<=\.part)([01]+)", "*", self.file, re.I)) -        # get files which matches .r* and filter unsuited files out -        parts = glob(re.sub(r"(?<=\.r)ar$", "*", self.file, re.I)) -        return filter(lambda x: self.re_partfiles.match(x), parts) - - -    def listContent(self): -        command = "vb" if self.fullpath else "lb" -        p = self.call_unrar(command, "-v", self.file, password=self.password) -        out, err = p.communicate() - -        if "Cannot open" in err: -            raise ArchiveError("Cannot open file") - -        if err.strip():  #: only log error at this point -            self.m.logError(err.strip()) - -        result = set() - -        for f in decode(out).splitlines(): -            f = f.strip() -            result.add(save_join(self.out, f)) - -        self.files = result - - -    def call_unrar(self, command, *xargs, **kwargs): -        args = [] -        # overwrite flag -        args.append("-o+") if self.overwrite else args.append("-o-") - -        if self.excludefiles: -            for word in self.excludefiles.split(';'): -                args.append("-x%s" % word) - -        # assume yes on all queries -        args.append("-y") - -        # set a password -        if "password" in kwargs and kwargs['password']: -            args.append("-p%s" % kwargs['password']) -        else: -            args.append("-p-") - -        # NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue -        call = [self.CMD, command] + args + list(xargs) -        self.m.logDebug(" ".join(call)) - -        p = Popen(call, stdout=PIPE, stderr=PIPE) - -        return p diff --git a/module/plugins/internal/UnZip.py b/module/plugins/internal/UnZip.py deleted file mode 100644 index e754141a1..000000000 --- a/module/plugins/internal/UnZip.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- - -import sys -import zipfile - -from module.plugins.internal.AbstractExtractor import AbtractExtractor - - -class UnZip(AbtractExtractor): -    __name__    = "UnZip" -    __version__ = "0.1" - -    __description__ = """Zip extractor plugin""" -    __license__     = "GPLv3" -    __authors__     = [("RaNaN", "RaNaN@pyload.org")] - - -    @staticmethod -    def checkDeps(): -        return sys.version_info[:2] >= (2, 6) - - -    @staticmethod -    def getTargets(files_ids): -        result = [] - -        for file, id in files_ids: -            if file.endswith(".zip"): -                result.append((file, id)) - -        return result - - -    def extract(self, progress, password=None): -        z = zipfile.ZipFile(self.file) -        self.files = z.namelist() -        z.extractall(self.out) - - -    def getDeleteFiles(self): -        return [self.file] diff --git a/module/plugins/internal/XFSAccount.py b/module/plugins/internal/XFSAccount.py deleted file mode 100644 index 2094b1480..000000000 --- a/module/plugins/internal/XFSAccount.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from time import gmtime, mktime, strptime -from urlparse import urljoin - -from module.plugins.Account import Account -from module.plugins.internal.SimpleHoster import parseHtmlForm, set_cookies - - -class XFSAccount(Account): -    __name__    = "XFSAccount" -    __type__    = "account" -    __version__ = "0.32" - -    __description__ = """XFileSharing account plugin""" -    __license__     = "GPLv3" -    __authors__     = [("zoidberg", "zoidberg@mujmail.cz"), -                       ("Walter Purcaro", "vuolter@gmail.com")] - - -    HOSTER_DOMAIN = None -    HOSTER_URL    = None - -    COOKIES = [(HOSTER_DOMAIN, "lang", "english")] - -    PREMIUM_PATTERN = r'\(Premium only\)' - -    VALID_UNTIL_PATTERN = r'Premium.[Aa]ccount expire:.*?(\d{1,2} [\w^_]+ \d{4})' - -    TRAFFIC_LEFT_PATTERN = r'Traffic available today:.*?<b>\s*(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>' -    TRAFFIC_LEFT_UNIT    = "MB"  #: used only if no group <U> was found - -    LEECH_TRAFFIC_PATTERN = r'Leech Traffic left:<b>.*?(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>' -    LEECH_TRAFFIC_UNIT    = "MB"  #: used only if no group <U> was found - -    LOGIN_FAIL_PATTERN = r'>\s*(Incorrect Login or Password|Error<)' - - -    def __init__(self, manager, accounts):  #@TODO: remove in 0.4.10 -        self.init() -        return super(XFSAccount, self).__init__(manager, accounts) - - -    def init(self): -        # if not self.HOSTER_DOMAIN: -            # self.fail(_("Missing HOSTER_DOMAIN")) - -        if not self.HOSTER_URL: -            self.HOSTER_URL = "http://www.%s/" % self.HOSTER_DOMAIN - - -    def loadAccountInfo(self, user, req): -        validuntil   = None -        trafficleft  = None -        leechtraffic = None -        premium      = None - -        html = req.load(self.HOSTER_URL, get={'op': "my_account"}, decode=True) - -        premium = True if re.search(self.PREMIUM_PATTERN, html) else False - -        m = re.search(self.VALID_UNTIL_PATTERN, html) -        if m: -            expiredate = m.group(1).strip() -            self.logDebug("Expire date: " + expiredate) - -            try: -                validuntil = mktime(strptime(expiredate, "%d %B %Y")) - -            except Exception, e: -                self.logError(e) - -            else: -                self.logDebug("Valid until: %s" % validuntil) - -                if validuntil > mktime(gmtime()): -                    premium = True -                    trafficleft = -1 -                else: -                    premium = False -                    validuntil = None  #: registered account type (not premium) -        else: -            self.logDebug("VALID_UNTIL_PATTERN not found") - -        m = re.search(self.TRAFFIC_LEFT_PATTERN, html) -        if m: -            try: -                traffic = m.groupdict() -                size    = traffic['S'] - -                if "nlimited" in size: -                    trafficleft = -1 -                    if validuntil is None: -                        validuntil = -1 -                else: -                    if 'U' in traffic: -                        unit = traffic['U'] -                    elif isinstance(self.TRAFFIC_LEFT_UNIT, basestring): -                        unit = self.TRAFFIC_LEFT_UNIT -                    else: -                        unit = "" - -                    trafficleft = self.parseTraffic(size + unit) - -            except Exception, e: -                self.logError(e) -        else: -            self.logDebug("TRAFFIC_LEFT_PATTERN not found") - -        leech = [m.groupdict() for m in re.finditer(self.LEECH_TRAFFIC_PATTERN, html)] -        if leech: -            leechtraffic = 0 -            try: -                for traffic in leech: -                    size = traffic['S'] - -                    if "nlimited" in size: -                        leechtraffic = -1 -                        if validuntil is None: -                            validuntil = -1 -                        break -                    else: -                        if 'U' in traffic: -                            unit = traffic['U'] -                        elif isinstance(self.LEECH_TRAFFIC_UNIT, basestring): -                            unit = self.LEECH_TRAFFIC_UNIT -                        else: -                            unit = "" - -                        leechtraffic += self.parseTraffic(size + unit) - -            except Exception, e: -                self.logError(e) -        else: -            self.logDebug("LEECH_TRAFFIC_PATTERN not found") - -        return {'validuntil': validuntil, 'trafficleft': trafficleft, 'leechtraffic': leechtraffic, 'premium': premium} - - -    def login(self, user, data, req): -        if isinstance(self.COOKIES, list): -            set_cookies(req.cj, self.COOKIES) - -        url = urljoin(self.HOSTER_URL, "login.html") -        html = req.load(url, decode=True) - -        action, inputs = parseHtmlForm('name="FL"', html) -        if not inputs: -            inputs = {'op': "login", -                      'redirect': self.HOSTER_URL} - -        inputs.update({'login': user, -                       'password': data['password']}) - -        html = req.load(self.HOSTER_URL, post=inputs, decode=True) - -        if re.search(self.LOGIN_FAIL_PATTERN, html): -            self.wrongPassword() diff --git a/module/plugins/internal/XFSCrypter.py b/module/plugins/internal/XFSCrypter.py deleted file mode 100644 index 62fd8c017..000000000 --- a/module/plugins/internal/XFSCrypter.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- - -from module.plugins.internal.SimpleCrypter import SimpleCrypter - - -class XFSCrypter(SimpleCrypter): -    __name__    = "XFSCrypter" -    __type__    = "crypter" -    __version__ = "0.04" - -    __pattern__ = r'^unmatchable$' - -    __description__ = """XFileSharing decrypter plugin""" -    __license__     = "GPLv3" -    __authors__     = [("Walter Purcaro", "vuolter@gmail.com")] - - -    HOSTER_DOMAIN = None -    HOSTER_NAME = None - -    URL_REPLACEMENTS = [(r'&?per_page=\d+', ""), (r'[?/&]+$', ""), (r'(.+/[^?]+)$', r'\1?'), (r'$', r'&per_page=10000')] - -    COOKIES = [(HOSTER_DOMAIN, "lang", "english")] - -    LINK_PATTERN = r'<(?:td|TD).*?>\s*<a href="(.+?)".*?>.+?(?:</a>)?\s*</(?:td|TD)>' -    NAME_PATTERN = r'<[tT]itle>.*?\: (?P<N>.+) folder</[tT]itle>' - -    OFFLINE_PATTERN      = r'>\s*\w+ (Not Found|file (was|has been) removed)' -    TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)' diff --git a/module/plugins/internal/XFSHoster.py b/module/plugins/internal/XFSHoster.py deleted file mode 100644 index 3ae0692dc..000000000 --- a/module/plugins/internal/XFSHoster.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*- coding: utf-8 -*- - -import re - -from random import random -from time import sleep - -from pycurl import FOLLOWLOCATION, LOW_SPEED_TIME - -from module.plugins.hoster.UnrestrictLi import secondsToMidnight -from module.plugins.internal.CaptchaService import ReCaptcha, SolveMedia -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo -from module.utils import html_unescape - - -class XFSHoster(SimpleHoster): -    __name__    = "XFSHoster" -    __type__    = "hoster" -    __version__ = "0.27" - -    __pattern__ = r'^unmatchable$' - -    __description__ = """XFileSharing hoster plugin""" -    __license__     = "GPLv3" -    __authors__     = [("zoidberg", "zoidberg@mujmail.cz"), -                       ("stickell", "l.stickell@yahoo.it"), -                       ("Walter Purcaro", "vuolter@gmail.com")] - - -    HOSTER_DOMAIN = None -    HOSTER_NAME   = None - -    TEXT_ENCODING     = False -    COOKIES           = [(HOSTER_DOMAIN, "lang", "english")] -    CHECK_DIRECT_LINK = None -    MULTI_HOSTER      = True  #@NOTE: Should be default to False for safe, but I'm lazy... - -    NAME_PATTERN = r'(>Filename:</b></td><td nowrap>|name="fname" value="|<span class="name">)(?P<N>.+?)(\s*<|")' -    SIZE_PATTERN = r'(>Size:</b></td><td>|>File:.*>|<span class="size">)(?P<S>[\d.,]+)\s*(?P<U>[\w^_]+)' - -    OFFLINE_PATTERN      = r'>\s*\w+ (Not Found|file (was|has been) removed)' -    TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)' - -    WAIT_PATTERN         = r'<span id="countdown_str">.*?>(\d+)</span>|id="countdown" value=".*?(\d+).*?"' -    PREMIUM_ONLY_PATTERN = r'>This file is available for Premium Users only' -    ERROR_PATTERN        = r'(?:class=["\']err["\'].*?>|<[Cc]enter><b>|>Error</td>|>\(ERROR:)(?:\s*<.+?>\s*)*(.+?)(?:["\']|<|\))' - -    LEECH_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)' -    LINK_PATTERN       = None  #: final download url pattern - -    CAPTCHA_PATTERN       = r'(https?://[^"\']+?/captchas?/[^"\']+)' -    CAPTCHA_BLOCK_PATTERN = r'>Enter code.*?<div.*?>(.+?)</div>' -    RECAPTCHA_PATTERN     = None -    SOLVEMEDIA_PATTERN    = None - -    FORM_PATTERN    = None -    FORM_INPUTS_MAP = None  #: dict passed as input_names to parseHtmlForm - - -    def setup(self): -        self.chunkLimit = 1 -        self.resumeDownload = self.multiDL = self.premium - - -    def prepare(self): -        """ Initialize important variables """ -        if not self.HOSTER_DOMAIN: -            self.fail(_("Missing HOSTER_DOMAIN")) - -        if not self.HOSTER_NAME: -            self.HOSTER_NAME = "".join([str.capitalize() for str in self.HOSTER_DOMAIN.split('.')]) - -        if not self.LINK_PATTERN: -            pattern = r'(https?://(www\.)?([^/]*?%s|\d+\.\d+\.\d+\.\d+)(\:\d+)?(/d/|(/files)?/\d+/\w+/).+?)["\'<]' -            self.LINK_PATTERN = pattern % self.HOSTER_DOMAIN.replace('.', '\.') - -        self.captcha   = None -        self.errmsg    = None -        self.passwords = self.getPassword().splitlines() - -        super(XFSHoster, self).prepare() - -        if self.CHECK_DIRECT_LINK is None: -            self.directDL = bool(self.premium) - - -    def handleFree(self): -        link = self.getDownloadLink() - -        if link: -            if self.captcha: -                self.correctCaptcha() - -            self.download(link, ref=True, cookies=True, disposition=True) - -        elif self.errmsg: -            if 'captcha' in self.errmsg: -                self.fail(_("No valid captcha code entered")) -            else: -                self.fail(self.errmsg) - -        else: -            self.fail(_("Download link not found")) - - -    def handlePremium(self): -        return self.handleFree() - - -    def getDownloadLink(self): -        for i in xrange(1, 6): -            self.logDebug("Getting download link: #%d" % i) - -            self.checkErrors() - -            m = re.search(self.LINK_PATTERN, self.html, re.S) -            if m: -                break - -            data = self.getPostParameters() - -            self.req.http.c.setopt(FOLLOWLOCATION, 0) - -            self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True) - -            self.req.http.c.setopt(FOLLOWLOCATION, 1) - -            m = re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I) -            if m and not "op=" in m.group(1): -                break - -            m = re.search(self.LINK_PATTERN, self.html, re.S) -            if m: -                break -        else: -            self.logError(data['op'] if 'op' in data else _("UNKNOWN")) -            return "" - -        self.errmsg = None - -        return m.group(1).strip()  #@TODO: Remove .strip() in 0.4.10 - - -    def handleMulti(self): -        #only tested with easybytez.com -        self.html = self.load("http://www.%s/" % self.HOSTER_DOMAIN) - -        action, inputs = self.parseHtmlForm() - -        upload_id = "%012d" % int(random() * 10 ** 12) -        action += upload_id + "&js_on=1&utype=prem&upload_type=url" - -        inputs['tos'] = '1' -        inputs['url_mass'] = self.pyfile.url -        inputs['up1oad_type'] = 'url' - -        self.logDebug(action, inputs) - -        self.req.setOption("timeout", 600)  #: wait for file to upload to easybytez.com - -        self.html = self.load(action, post=inputs) - -        self.checkErrors() - -        action, inputs = self.parseHtmlForm('F1') -        if not inputs: -            if self.errmsg: -                self.retry(reason=self.errmsg) -            else: -                self.error(_("TEXTAREA F1 not found")) - -        self.logDebug(inputs) - -        stmsg = inputs['st'] - -        if stmsg == 'OK': -            self.html = self.load(action, post=inputs) - -        elif 'Can not leech file' in stmsg: -            self.retry(20, 3 * 60, _("Can not leech file")) - -        elif 'today' in stmsg: -            self.retry(wait_time=secondsToMidnight(gmt=2), reason=_("You've used all Leech traffic today")) - -        else: -            self.fail(stmsg) - -        #get easybytez.com link for uploaded file -        m = re.search(self.LEECH_LINK_PATTERN, self.html) -        if m is None: -            self.error(_("LEECH_LINK_PATTERN not found")) - -        header = self.load(m.group(1), just_header=True, decode=True) - -        if 'location' in header:  #: Direct download link -            self.link = header['location'] -        else: -            self.fail(_("Download link not found")) - - -    def checkErrors(self): -        m = re.search(self.PREMIUM_ONLY_PATTERN, self.html) -        if m: -            self.info['error'] = "premium-only" -            return - -        m = re.search(self.ERROR_PATTERN, self.html) - -        if m is None: -            self.errmsg = None -        else: -            self.errmsg = m.group(1).strip() - -            self.logWarning(re.sub(r"<.*?>", " ", self.errmsg)) - -            if 'wait' in self.errmsg: -                wait_time = sum([int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1}[u.lower()] for v, u in -                                 re.findall(r'(\d+)\s*(hr|hour|min|sec)', self.errmsg, re.I)]) -                self.wait(wait_time, True) - -            elif 'country' in self.errmsg: -                self.fail(_("Downloads are disabled for your country")) - -            elif 'captcha' in self.errmsg: -                self.invalidCaptcha() - -            elif 'premium' in self.errmsg and 'require' in self.errmsg: -                self.fail(_("File can be downloaded by premium users only")) - -            elif 'limit' in self.errmsg: -                if 'days' in self.errmsg: -                    delay = secondsToMidnight(gmt=2) -                    retries = 3 -                else: -                    delay = 1 * 60 * 60 -                    retries = 24 - -                self.wantReconnect = True -                self.retry(retries, delay, _("Download limit exceeded")) - -            elif 'countdown' in self.errmsg or 'Expired' in self.errmsg: -                self.retry(reason=_("Link expired")) - -            elif 'maintenance' in self.errmsg or 'maintainance' in self.errmsg: -                self.tempOffline() - -            elif 'download files up to' in self.errmsg: -                self.fail(_("File too large for free download")) - -            else: -                self.wantReconnect = True -                self.retry(wait_time=60, reason=self.errmsg) - -        if self.errmsg: -            self.info['error'] = self.errmsg -        else: -            self.info.pop('error', None) - - -    def getPostParameters(self): -        if self.FORM_PATTERN or self.FORM_INPUTS_MAP: -            action, inputs = self.parseHtmlForm(self.FORM_PATTERN or "", self.FORM_INPUTS_MAP or {}) -        else: -            action, inputs = self.parseHtmlForm(input_names={'op': re.compile(r'^download')}) - -        if not inputs: -            action, inputs = self.parseHtmlForm('F1') -            if not inputs: -                if self.errmsg: -                    self.retry(reason=self.errmsg) -                else: -                    self.error(_("TEXTAREA F1 not found")) - -        self.logDebug(inputs) - -        if 'op' in inputs: -            if "password" in inputs: -                if self.passwords: -                    inputs['password'] = self.passwords.pop(0) -                else: -                    self.fail(_("Missing password")) - -            if not self.premium: -                m = re.search(self.WAIT_PATTERN, self.html) -                if m: -                    wait_time = int(m.group(1)) -                    self.setWait(wait_time, False) - -                self.captcha = self.handleCaptcha(inputs) - -                self.wait() -        else: -            inputs['referer'] = self.pyfile.url - -        if self.premium: -            inputs['method_premium'] = "Premium Download" -            inputs.pop('method_free', None) -        else: -            inputs['method_free'] = "Free Download" -            inputs.pop('method_premium', None) - -        return inputs - - -    def handleCaptcha(self, inputs): -        m = re.search(self.CAPTCHA_PATTERN, self.html) -        if m: -            captcha_url = m.group(1) -            inputs['code'] = self.decryptCaptcha(captcha_url) -            return 1 - -        m = re.search(self.CAPTCHA_BLOCK_PATTERN, self.html, re.S) -        if m: -            captcha_div = m.group(1) -            numerals    = re.findall(r'<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div)) -            self.logDebug(captcha_div) -            inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))]) -            self.logDebug("Captcha code: %s" % inputs['code'], numerals) -            return 2 - -        recaptcha = ReCaptcha(self) -        try: -            captcha_key = re.search(self.RECAPTCHA_PATTERN, self.html).group(1) -        except: -            captcha_key = recaptcha.detect_key() -        else: -            self.logDebug("ReCaptcha key: %s" % captcha_key) - -        if captcha_key: -            inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key) -            return 3 - -        solvemedia = SolveMedia(self) -        try: -            captcha_key = re.search(self.SOLVEMEDIA_PATTERN, self.html).group(1) -        except: -            captcha_key = solvemedia.detect_key() -        else: -            self.logDebug("SolveMedia key: %s" % captcha_key) - -        if captcha_key: -            inputs['adcopy_challenge'], inputs['adcopy_response'] = solvemedia.challenge(captcha_key) -            return 4 - -        return 0 diff --git a/module/plugins/internal/__init__.py b/module/plugins/internal/__init__.py deleted file mode 100644 index e69de29bb..000000000 --- a/module/plugins/internal/__init__.py +++ /dev/null | 
